text
stringlengths 56
7.94M
|
---|
\begin{document}
\author{Gergely Harcos}
\title{An additive problem in the Fourier coefficients of cusp forms}
\address{Department of Mathematics, Princeton University, Fine Hall,
Washington Road, Princeton, NJ 08544, USA}
\email{[email protected]}
\subjclass[2000]{Primary 11F30, 11F37; Secondary 11M41.}
\begin{abstract}We establish an estimate on sums of shifted
products of Fourier coefficients coming from holomorphic or Maass
cusp forms of arbitrary level and nebentypus. These sums are
analogous to the binary additive divisor sum which has been
studied extensively. As an application we derive, extending work
of Duke, Friedlander and Iwaniec, a subconvex estimate on the
critical line for $L$-functions associated to character twists of
these cusp forms.
\end{abstract}
\maketitle
\section{Introduction and statement of results}\label{sect6}
In the analytic theory of automorphic $L$-functions one often
encounters sums of the form
\begin{equation}\label{eq24}
D_f(a,b;h)=\sum_{am\pm bn=h} \lambda_\phi(m)\lambda_\psi(n)f(am,bn),
\end{equation}
where $a$, $b$, $h$ are positive integers, $\lambda_\phi(m)$ (resp.
$\lambda_\psi(n)$) are the normalized Fourier coefficients of a
holomorphic or Maass cusp form $\phi$ (resp. $\psi$) coming from
an automorphic representation of $GL(2)$ over $\mathbb{Q}$ and $f$ is
some nice weight function on $(0,\infty)\times (0,\infty)$. These
sums have been studied extensively beginning with Selberg
\cite{Se} (see also Good \cite{G}) and are analogous to the
generalized binary additive divisor sum where $\lambda_\phi(m)$ and
$\lambda_\psi(n)$ are replaced by values of the divisor function:
\[D_f^\tau(a,b;h)=\sum_{am\pm bn=h} \tau(m)\tau(n)f(am,bn).\]
The analogy is deeper than formal, because $\tau(n)$ appears as
the $n$-th Fourier coefficient of the modular form
$\frac{\partial}{\partial s}E(z,s)\big|_{s=1/2}$ where $E(z,s)$ is
the Eisenstein series for $SL_2(\mathbb{Z})$. In general one tries to
deduce good estimates for these sums assuming the parameters $a$,
$b$, $h$ are of considerable size.
The binary additive divisor problem has an extensive history and
we refer the reader to \cite{DFI} for a short introduction. Let us
just mention that in the special case $a=b=1$ one can derive very
sharp results by employing the spectral theory of automorphic
forms for the full modular group \cite{Mo}. This approach is hard
to generalize for larger values of $a$, $b$ as one faces
difficulties with small Laplacian eigenvalues for the congruence
subgroup $\Gamma_0(ab)$ as well as uniformity issues.
The idea of Duke, Friedlander and Iwaniec
\cite{DFI} is to combine the more elementary $\delta$-method (a
variant of Kloosterman's refinement of the classical circle method)
with a Voronoi-type
summation formula for the divisor function and then apply Weil's
estimate for the individual Kloosterman sums
\[S(m,n;q)=\csillag\sum_{d\smod{q}}e_q\bigl(dm+{\bar
d}n\bigr)\]
that arise.
Assuming $a$, $b$ are coprime and the partial derivatives of the
weight function $f$ satisfy the estimate
\begin{equation}\label{eq1}
x^iy^jf^{(i,j)}(x,y)\ll_{i,j}
\left(1+\frac{x}{X}\right)^{-1}\left(1+\frac{y}{Y}\right)^{-1}P^{i+j}
\end{equation}
with some $P,X,Y\geq 1$ for all $i,j\geq 0$, they were able to deduce
\[D_f^\tau(a,b;h)=\int_0^\infty g(x,\mp x\pm h)\,dx+
O\bigl(P^{5/4}(X+Y)^{1/4}(XY)^{1/4+\epsilon}\bigr),\]
where the implied constant depends on $\epsilon$ only,
\[g(x,y)=f(x,y)\sum_{q=1}^\infty \frac{(ab,q)}{abq^2}c_q(h)
(\log x-\lambda_{aq})(\log y-\lambda_{bq}),\]
$c_q(h)=S(h,0;q)$ denotes Ramanujan's sum and
$\lambda_{aq}$, $\lambda_{bq}$ are constants given by
\[\lambda_{aq}=2\gamma+\log\frac{aq^2}{(a,q)^2}.\]
As was pointed out in \cite{DFI} the error term is smaller than
the main term whenever
\[ab\ll P^{-5/4}(X+Y)^{-5/4}(XY)^{3/4-\epsilon}.\]
The case $N=a=b=1$ of the sum $D_f(a,b;h)$ has been discussed in
detail via spectral theory by Jutila \cite{Ju1,Ju2}. This approach
is hard to generalize to $Nab>1$ because
one faces with the difficulty of uniformity and small Laplacian
eigenvalues similarly as in the additive divisor problem. A
different spectral approach was developed by Sarnak for all levels.
Using his estimates for triple products of eigenfunctions
\cite{Sa4} (see also \cite{P,BR}) he recently established quite
strong uniform bounds for $D_f(a,b;h)$ at least when the forms $\phi$
and $\psi$ are holomorphic. This method has the big advantage
of generalizing naturally to number fields \cite{Sa1,Sa3}. Our
aim here is to emphasize the Maass case and establish,
uniformly for all $h>0$, a nontrivial estimate on
$D_f(a,b;h)$ in the spirit of Duke, Friedlander and Iwaniec.
\begin{theorem}\label{Th1} Let $\lambda_\phi(m)$ (resp. $\lambda_\psi(n)$)
be the normalized Fourier coefficients of a holomorphic or Maass
cusp form $\phi$ (resp. $\psi$) of arbitrary level and nebentypus
and suppose that $f$ satisfies (\ref{eq1}). Then for coprime $a$
and $b$ we have
\[D_f(a,b;h)\ll P^{11/10}(ab)^{-1/10}(X+Y)^{1/10}(XY)^{2/5+\epsilon},\]
where the implied constant depends only on $\epsilon$ and the forms
$\phi$, $\psi$.
\end{theorem}
See the next section for a precise definition of the notions in
the theorem. We note that the theorem supercedes the trivial upper
bound $(XY/ab)^{1/2}$ (following from Cauchy's inequality, see
Section~\ref{sect2}) whenever
\begin{equation}\label{eq17}
ab\ll P^{-11/4}(X+Y)^{-1/4}(XY)^{1/4-\epsilon}.
\end{equation}
As an application we prove a subconvex estimate on the critical
line for $L$-functions associated to character twists of a fixed
holomorphic or Maass cusp form $\phi$ of arbitrary level and
nebentypus. We shall assume that $\phi$ is a \emph{primitive form}, i.e.
a newform in the sense of \cite{AL,Li,ALi} normalized so that
$\lambda_\phi(1)=1$. Then $\lambda_\phi(m)$ $(m\geq 1)$ defines a
character of the corresponding Hecke algebra while
$\lambda_\phi(-m)=\pm\lambda_\phi(m)$ (with a constant sign) when $\phi$ is a
Maass form. In other words, $\phi$ defines a cuspidal automorphic
representation of $GL(2)$ over $\mathbb{Q}$. The contragradient
representation corresponds to the primitive cusp form
$\tilde\phi(z)=\bar\phi(-\bar z)$ with Fourier coefficients
$\lambda_{\tilde\phi}(m)={\bar\lambda}_\phi(m)$. If $q$ is an integer prime to
the level and $\chi$ is a primitive Dirichlet character modulo $q$
then to the twisted primitive cusp form $\phi\otimes\chi$ is
attached the $L$-function
\[L(s,\phi\otimes\chi)=\sum_{m=1}^\infty\frac{\lambda_\phi(m)\chi(m)}{m^s}\]
which is absolutely convergent for $\Re s>1$ and has an Euler
product over the prime numbers. It has an analytic continuation to
an entire function, as shown by Hecke, and satisfies a functional
equation of the standard type. It follows from the
Phragm\'en--Lindel\"of convexity principle that for a fixed point
on the critical line $\Re s=1/2$ we have a bound
\[L(s,\phi\otimes\chi)\ll_{\epsilon,s,\phi}q^{1/2+\epsilon}.\]
By a subconvexity estimate we mean one which replaces the
convexity exponent $1/2$ by any smaller absolute constant. Upon
the Generalized Riemann Hypothesis we would have the Generalized
Lindel\"of Hypothesis which asserts that any positive exponent is
permissible. For the philosophy of breaking convexity in the
analytic theory of $L$-functions and its importance for arithmetic
we refer the reader to the excellent discussion by Iwaniec and
Sarnak \cite{IS}.
\begin{theorem}\label{Th2} Suppose that $\phi$ is a
primitive holomorphic or Maass cusp form of arbitrary level and
nebentypus. Let $\Re s=1/2$ and $q$ be an integer prime to the
level. If $\chi$ is a primitive Dirichlet character modulo $q$
then
\begin{equation}\label{eq19}
L(s,\phi\otimes\chi)\ll q^{1/2-1/54+\epsilon},
\end{equation}
where the implied constant depends only on $\epsilon$, $s$ and the form
$\phi$.
\end{theorem}
This estimate with exponent $1/2-1/22$ has been proved for holomorphic
forms of full level in \cite{DFI2} and the improved exponent
$1/2-7/130$ follows for holomorphic forms of arbitrary level as
a special case of Theorem 1 in \cite{Sa3}.
Duke, Friedlander and Iwaniec anticipated their
method to be extendible to more general $L$-functions of rank two,
and the present paper is indeed an extension of their work.
Combining the estimate (\ref{eq19}) at the central point $s=1/2$
with Waldspurger's theorem \cite{Wa} (see also \cite{K}) we get
the bound
\[c(q)\ll_\epsilon q^{1/4-1/108+\epsilon},\quad\text{$q$ square-free}\]
for the normalized Fourier coefficients of half-integral weight
forms of arbitrary level. Such a nontrivial bound is the key step
in the solution of the general ternary Linnik problem given by
Duke and Schulze-Pillot \cite{D,D-SP}.
The proof Theorem~\ref{Th1} is presented in Sections~\ref{sect1}
through \ref{sect2} and closely follows \cite{DFI}. The heart of
the argument is again a Voronoi-type formula (see
Section~\ref{sect1}) for transforming certain exponential sums
defined by the coefficients $\lambda_\phi(m)$ and $\lambda_\psi(n)$ but this time
the level of the forms imposes some restriction on the frequencies
in the formula. As the $\delta$-method uses information at all
frequencies (and in this sense it corresponds to the classical
Farey dissection of the unit circle) we replace it (in
Section~\ref{sect3}) with another variant of the circle method
(given by Jutila \cite{Ju3}) which is more flexible in the choice
of frequencies. After the transformations we shall encounter
twisted Kloosterman sums
\[S_\chi(m,n;q)=\csillag\sum_{d\smod{q}}
\chi(d)e_q\bigl(dm+{\bar d}n\bigr),\] where $\chi$ is a Dirichlet
character mod $q$. We shall make use of the usual Weil--Estermann
bound \begin{equation}\label{eq10}\bigl|S_\chi(m,n;q)\bigr|\leq
(m,n,q)^{1/2}q^{1/2}\tau(q)\end{equation} which holds true for
these sums as well (the original proofs \cite{W,E} carry over with
minor modifications).
Sections~\ref{sect4} through \ref{sect5} are devoted to the proof
of Theorem~\ref{Th2}. In Section~\ref{sect4} we reduce
(\ref{eq19}), via an approximate functional equation, to an
inequality about certain finite sums involving about $q$ terms. In
order to prove this inequality we use the amplification method
which was introduced in \cite{DFI2}. The idea is to consider a
suitably weighted second moment of the finite sums arising from the family
$\phi\otimes\chi$ of cusp forms ($\chi$ varies, $\phi$ is fixed).
We choose the weights (called amplifiers) in such a way that one of the characters
$\chi$ is emphasized while the second moment average is still of moderate size.
This forces, by positivity, $L(s,\phi\otimes\chi)$ to be small. In
the course of evaluating the amplified second moment we
encounter diagonal and off-diagonal terms and it is the
off-diagonal contribution where Theorem~\ref{Th1} enters.
\noindent {\bf Acknowledgements.} I am grateful to Peter
Sarnak for calling my attention to this problem (a question
originally raised by Atle Selberg, cf. \cite{Se}), for his
comments about the paper and for many valuable discussions on
related topics. I also thank the referee for a careful reading and
for suggesting to clarify certain points in the argument.
\noindent {\bf Addendum (October 2001).} A preliminary
draft of this paper was completed in January 2001 and posted to
the \emph{e-Print archive} as
\href{http://arxiv.org/abs/math.NT/0101096}{math.NT/0101096}. Later I
learned about the work of Kowalski, Michel and VanderKam
\cite{KMV} establishing subconvexity bounds for various families
of Rankin--Selberg $L$-functions. This work involves a more
elaborate version of the summation formula Proposition~\ref{Prop1}
below. The ultimate generalization (depending heavily on
Atkin--Lehner theory) appears in an unpublished complement
\cite{Mi} to \cite{KMV}. As pointed out on p.10 of \cite{Mi}, this
suffices, via the $\delta$-method, to establish Theorems~\ref{Th1}
and \ref{Th2} even in slightly stronger forms. In particular, the
original subconvexity exponent $1/2-1/22$ of \cite{DFI2} applies
for the general setting as well. However, the present paper is
technically simpler (e.g. it requires the theory of newforms only
to have the relevant $L$-functions at hand) and the simplification
is achieved by using Jutila's method of overlapping intervals in
place of the $\delta$-method.
\noindent {\bf Addendum (December 2002).} It is
straightforward to see from the argument given below that the
implied constants of Theorems 1 and 2 depend polynomially on $|s|$
and the levels of the forms involved. Some additional estimates on
Bessel functions establish polynomial dependence on the
Archimedean parameters (weight or Laplacian eigenvalue) as well.
The details are worked out for a special case in a recent paper by
Michel \cite{Mi2} where such a dependence turns out to be crucial.
\cite{Mi2} also supercedes the unpublished complement \cite{Mi}.
\section{Summation formula for the Fourier
coefficients}\label{sect1}
We define the normalized Fourier coefficients of cusp forms as
follows. Let $\phi(z)$ be a cusp form of level $N$ and nebentypus
$\chi$, that is, a holomorphic cusp form of some integral weight
$k$ or a real-analytic Maass cusp form of some nonnegative Laplacian
eigenvalue $1/4+\mu^2$. By definition, $\chi$ is a Dirichlet
character mod $N$ and the form $\phi$ satisfies a transformation
rule with respect to the Hecke congruence subgroup $\Gamma_0(N)$:
\[\phi\circ[\gamma]=\chi(d)\phi,\qquad
\gamma=\begin{pmatrix}a&b\\c&d\end{pmatrix}\in\Gamma_0(N),\]
where
\[\phi\circ[\gamma](z)=\begin{cases}
\phi(\gamma z)(cz+d)^{-k}&\text{if $\phi$ is holomorphic,}\\
\phi(\gamma z)&\text{if $\phi$ is real-analytic,}
\end{cases}\]
and $\Gamma_0(N)$ acts on the upper half-plane $\mathcal{H}=\{z:\Im z>0\}$ by
fractional linear transformations. Also, the form $\phi$ is
holomorphic or real-analytic on $\mathcal{H}$ and decays exponentially to
zero at each cusp. Any such $\phi$ admits the Fourier expansion
\[\phi(z)=\sum_{m\neq 0}{\hat\lambda}_\phi(m)W(mz),\]
where
\[W(z)=\begin{cases}
e(z)&\text{if $\phi$ is holomorphic,}\\
|y|^{1/2}K_{i\mu}\bigl(2\pi|y|\bigr)e(x) &\text{if
$\phi$ is real-analytic.}
\end{cases}\]
Here $e(z)=e^{2\pi iz}$, $z=x+iy$ and $K_{i\mu}$ is the
MacDonald-Bessel function. If $\phi$ is holomorphic,
${\hat\lambda}_\phi(m)$ vanishes for $m\leq 0$. We define the
\emph{normalized Fourier coefficients} of $\phi$ as
\[\lambda_\phi(m)=\begin{cases}
{\hat\lambda}_\phi(m)m^\frac{1-k}{2}&\text{if $\phi$ is holomorphic,}\\
{\hat\lambda}_\phi(m)|m|^\frac{1}{2}&\text{if $\phi$ is
real-analytic.}
\end{cases}\]
This normalization corresponds to the Ramanujan Conjecture which
asserts that
\[\lambda_\phi(m)\ll_{\epsilon,\phi}m^\epsilon.\]
Rankin--Selberg theory implies that the conjecture holds on
average in the form
\begin{equation}\label{eq22}
\sum_{1\leq m\leq x}|\lambda_\phi(m)|^2\ll_\phi x.
\end{equation}
Various Voronoi-type summation formulas are fulfilled by these
coefficients. In the case of full level ($N=1$) Duke and Iwaniec
\cite{DI} established such a formula for holomorphic cusp forms
and Meurman \cite{M} for Maass cusp forms. These can be
generalized to arbitrary level and nebentypus with obvious minor
modifications as follows.
\begin{proposition}\label{Prop1}
Let $d$ and $q$ be coprime integers such that $N\mid q$, and let
$g$ be a smooth, compactly supported function on $(0,\infty)$. If
$\phi$ is a holomorphic cusp form of level $N$, nebentypus $\chi$
and integral weight $k$ then
\[\chi(d)\sum_{m=1}^\infty\lambda_\phi(m)e_q(dm)g(m)=
\sum_{m=1}^\infty\lambda_\phi(m)e_q\bigl(-{\bar d}m\bigr){\hat g}(m),\]
where
\[{\hat g}(y)=\frac{2\pi i^k}{q}\int_0^\infty g(x)
J_{k-1}\left(\frac{4\pi\sqrt{xy}}{q}\right)\,dx.\] If $\phi$ is a
real-analytic Maass cusp form of level $N$, nebentypus $\chi$ and
nonnegative Laplacian eigenvalue $1/4+\mu^2$ then
\[\chi(d)\sum_{m=1}^\infty\lambda_\phi(m)e_q(dm)g(m)=
\sum_{\pm}\sum_{m=1}^\infty\lambda_\phi(\mp m)e_q\bigl(\pm\bar d
m\bigr)g^{\pm}(m),\] where
\[\begin{split}
g^-(y)&=-\frac{\pi}{q\cosh \pi\mu}\int_0^\infty g(x)
\{Y_{2i\mu}+Y_{-2i\mu}\}\left(\frac{4\pi\sqrt{xy}}{q}\right)\,dx,\\\\
g^+(y)&=\frac{4\cosh \pi\mu}{q}\int_0^\infty g(x)
K_{2i\mu}\left(\frac{4\pi\sqrt{xy}}{q}\right)\,dx.
\end{split}\]
Here $\bar d$ is a multiplicative inverse of $d\bmod q$,
$e_q(x)=e(x/q)=e^{2\pi ix/q}$ and $J_{k-1}$, $Y_{\pm 2i\mu}$,
$K_{2i\mu}$ are Bessel functions.
\end{proposition}
The proof for the holomorphic case \cite{DI} is a straightforward
application of Laplace transforms. Meurman's proof for the
real-analytic case \cite{M} is more involved, but only because he
considers a wider class of test functions $g$ and has to deal with
delicate convergence issues. For smooth, compactly supported
functions $g$ as in our formulation these difficulties do not
arise and one can give a much simpler proof based on Mellin
transformation, the functional equations of the $L$-series
attached to additive twists of $\phi$ (see \cite{M}), and Barnes'
formulas for the gamma function. Indeed, Lemma 5 in \cite{St} (a
special case of Meurman's summation formula) has been proved by
such an approach. We expressed the formula for the non-holomorphic
case in terms of $K$ and $Y$ Bessel functions in order to
emphasize the analogy with the Voronoi-type formula for the
divisor function (where one has $\mu=0$) as derived by Jutila
\cite{Ju4,Ju5}.
\section{Setting up the circle method}\label{sect3}
For sake of exposition we shall only present the case of
real-analytic Maass forms and the equation $am-bn=h$. The other
cases follow along the same lines by changing Bessel functions and
signs at relevant places of the argument. In our inequalities
$\epsilon$ will always denote a small positive number whose actual
value is allowed to change at each occurrence. Furthermore, unless
otherwise indicated, implied constants will depend on $\epsilon$ and
the cusp forms only (including dependence on the level, nebentypus
characters and Laplacian eigenvalues).
Let $\phi(z)$ (resp. $\psi(z)$) be a Maass cusp form of level $N$,
nebentypus $\chi$ (resp. $\omega$) and Laplacian eigenvalue
$1/4+\mu^2\geq 0$ (resp. $1/4+\nu^2\geq 0$) whose normalized
Fourier coefficients are $\lambda_\phi(m)$ (resp. $\lambda_\psi(n)$), i.e.,
\[\begin{split}
\phi(x+iy)&=\sqrt{y}\sum_{m\neq 0}\lambda_\phi(m)K_{i\mu}\bigl(2\pi|m|y\bigr)e(mx),\\\\
\psi(x+iy)&=\sqrt{y}\sum_{n\neq
0}\lambda_\psi(n)K_{i\nu}\bigl(2\pi|n|y\bigr)e(nx).
\end{split}\]
We shall first investigate $D_g(a,b;h)$ for smooth test functions
$g(x,y)$ which are supported in a box $[A,2A]\times[B,2B]$ and
have partial derivatives bounded by
\begin{equation}\label{eq27}g^{(i,j)}\ll_{i,j}A^{-i}B^{-j}P^{i+j}.
\end{equation} Our aim is to prove the estimate
\begin{equation}\label{eq29}D_g(a,b;h)\ll P^{11/10}(ab)^{-1/10}(A+B)^{1/10}(AB)^{2/5+\epsilon}.\end{equation}
In Section~\ref{sect2} we shall deduce Theorem~\ref{Th1} from this
bound by employing a partition of unity and decomposing
appropriately any smooth test function $f(x,y)$ satisfying
(\ref{eq1}). In fact, (\ref{eq29}) is a special case of
Theorem~\ref{Th1}, as can be seen upon setting $X=A$, $Y=B$, and
$f(x,y)=g(x,y)$. It supercedes the trivial upper bound
\[D_g(a,b;h)\ll(AB/ab)^{1/2}\] whenever
\begin{equation}\label{eq28}
ab\ll P^{-11/4}(A+B)^{-1/4}(AB)^{1/4-\epsilon}.
\end{equation}
The trivial bound is a consequence of $g\ll 1$, Cauchy's
inequality, and the Rankin--Selberg estimate (\ref{eq22}) applied
to the forms $\phi$ and $\psi$.
As $g(x,y)$ is supported in $[A,2A]\times[B,2B]$, we can assume
that $A,B\geq 1/2$, and also that
\begin{equation}\label{eq11}
h\leq 2(A+B),
\end{equation}
for otherwise $D_g(a,b;h)$ vanishes trivially. We shall attach, as
in \cite{DFI}, a redundant factor $w(x-y-h)$ to $g(x,y)$ where
$w(t)$ is a smooth function supported on $|t|<\delta^{-1}$ such that
$w(0)=1$ and $w^{(i)}\ll_i\delta^i$. This, of course, does not alter
$D_g(a,b;h)$. We choose
\begin{equation}\label{eq2}
\delta=P\frac{A+B}{AB},
\end{equation}
so that, by (\ref{eq27}), the new function
\[F(x,y)=g(x,y)w(x-y-h)\]
has partial derivatives bounded by
\begin{equation}\label{eq5}
F^{(i,j)}\ll_{i,j}\delta^{i+j}.
\end{equation}
We apply the Hardy--Littlewood method to detect the equation $am-bn=h$,
that is, we express $D_F(a,b;h)$ as the integral of a certain
exponential sum over the unit interval $[0,1]$. We get
\begin{equation}\label{eq30}D_g(a,b;h)=D_F(a,b;h)=\int_0^1
G(\alpha)\,d\alpha,\end{equation} where
\[G(\alpha)=\sum_{m,n}\lambda_\phi(m)\lambda_\psi(n)F(am,bn)e\bigl((am-bn-h)\alpha\bigr).\]
We shall approximate this integral by the following proposition of Jutila
(a consequence of the main theorem in \cite{Ju3}).
\begin{proposition}[Jutila]\label{Prop2}
Let $\mathcal{Q}$ be a nonempty set of integers $Q\leq q\leq 2Q$ where $Q\geq 1$.
Let $Q^{-2}\leq\delta\leq Q^{-1}$ and for
each fraction $d/q$ (in its lowest terms) denote by
$I_{d/q}(\alpha)$ the characteristic function of the interval
$\left[d/q-\delta,d/q+\delta\right]$. Write $L$ for the
number of such intervals, i.e.,
\[L=\sum_{q\in\mathcal{Q}}\varphi(q),\]
and put
\[\tilde{I}(\alpha)=\frac{1}{2\delta L}\sum_{q\in\mathcal{Q}}\ \
\csillag\sum_{d\smod{q}}I_{d/q}(\alpha).\] If $I(\alpha)$ is the
characteristic function of the unit interval $[0,1]$ then
\[\int_{-\infty}^\infty\bigl(I(\alpha)-\tilde{I}(\alpha)\bigr)^2 \,dx\ll
\delta^{-1}L^{-2}Q^{2+\epsilon},\]
where the implied constant depends on $\epsilon$ only.
\end{proposition}
We shall choose some $Q$ and apply the proposition with a set of
denominators of the form
\[\mathcal{Q}=\bigl\{q\in[Q,2Q]:Nab\mid q\text{ and
}(h,q)=(h,Nab)\bigr\}.\] By a result of Jacobsthal \cite{Ja} the
largest gap between reduced residue classes mod $h$ is of size
$\ll h^\epsilon$, whence, by (\ref{eq11}),
\begin{equation}\label{eq18}
|\mathcal{Q}|\gg\frac{Q(AB)^{-\epsilon}}{ab},
\end{equation}
assuming the right hand side exceeds some positive constant
$c=c(\epsilon,N)$. Moreover, we shall assume that
\begin{equation}\label{eq3}
Q^{-2}\leq\delta\leq Q^{-1},
\end{equation}
so that also
\begin{equation}\label{eq12}
1\leq Q\leq AB,
\end{equation}
whence (\ref{eq18}) yields
\begin{equation}\label{eq6}
L\gg\frac{Q^2(AB)^{-\epsilon}}{ab}.
\end{equation}
We clearly have
\begin{equation}\label{eq4}
|D_F(a,b;h)-\tilde{D}_F(a,b;h)|\leq\|G\|_\infty\|I-\tilde{I}\|_1,
\end{equation}
where
\[\begin{split}
\tilde{D}_F(a,b;h)&=\int_{-\infty}^\infty
G(\alpha)\tilde{I}(\alpha)\,d\alpha =\frac{1}{2\delta L}\sum_{q\in\mathcal{Q}}\ \
\csillag\sum_{d\smod{q}}
\int_{-\infty}^\infty G(\alpha)I_{d/q}(\alpha)\,d\alpha\\\\
&=\frac{1}{2\delta L}\sum_{q\in\mathcal{Q}}\ \ \csillag\sum_{d\smod{q}}
\int_{-\delta}^\delta G(d/q+\beta)\,d\beta =\frac{1}{2\delta
L}\sum_{q\in\mathcal{Q}}\ \ \csillag\sum_{d\smod{q}}\mathfrak{I}_{d/q},
\end{split}\]
say. To derive an upper estimate for $G(\alpha)$ we express it as
\[G(\alpha)=\int_0^\infty\int_0^\infty F(x,y)e(-h\alpha)\,dS(x/a)\,dT(y/b),\]
where
\[S(x)=\sum_{1\leq m\leq x}\lambda_\phi(m)e(am\alpha),\quad
T(y)=\sum_{1\leq n\leq y}\lambda_\psi(n)e(-bn\alpha).\] Then, integrating by
parts,
\[G(\alpha)=\int_0^\infty\int_0^\infty F^{(1,1)}(x,y)e(-h\alpha)S(x/a)T(y/b)\,dx\,dy,\]
therefore (\ref{eq5}) combined with Wilton's classical estimate
\[S(x)\ll x^{1/2}\log(2x),\quad T(y)\ll y^{1/2}\log(2y)\]
yields
\[\|G\|_\infty\ll\frac{(AB)^{1/2+\epsilon}}{(ab)^{1/2}}\|F^{(1,1)}\|_1
\ll\frac{\delta(AB)^{3/2+\epsilon}}{(ab)^{1/2}(A+B)}.\] Also, by
(\ref{eq6}) and Proposition~\ref{Prop2} we get
\[\|I-\tilde{I}\|_1\leq 3\|I-\tilde{I}\|_2\ll \frac{ab(AB)^\epsilon}{\delta^{1/2}Q},\]
so that (\ref{eq4}) becomes
\begin{equation}\label{eq15}
D_F(a,b;h)-\tilde{D}_F(a,b;h)\ll
\frac{(ab)^{1/2}\delta^{1/2}}{Q}\cdot\frac{(AB)^{3/2+\epsilon}}{A+B}.
\end{equation}
\section{Transforming exponential sums}
The contribution of the interval $[d/q-\delta,d/q+\delta]$ can be
expressed as
\[\mathfrak{I}_{d/q}=\int_{-\delta}^\delta
G(d/q+\beta)\,d\beta=e_q(-dh)\sum_{m,n}\lambda_\phi(m)\lambda_\psi(n)e_q\bigl(d(am-bn)\bigr)E(m,n),\]
where
\[E(x,y)=F(ax,by)\int_{-\delta}^\delta
e\bigl((ax-by-h)\beta\bigr)\,d\beta.\] Using (\ref{eq5}) we
clearly have
\[E^{(i,j)}\ll_{i,j}\delta^{i+j+1}a^ib^j,\]
and we also record, for further reference, that
\begin{equation}\label{eq7}
\|E^{(i,j)}\|_1\ll_{i,j}\delta^{i+j}a^{i-1}b^{j-1}\frac{AB}{A+B}.
\end{equation}
We assume that $q\in\mathcal{Q}$, hence $Nab\mid q$ and we can apply
Proposition~\ref{Prop1} to yield
\[\mathfrak{I}_{d/q}=\overline{\chi\omega}(d)e_q(-dh)\sum_{\pm\pm}\,\sum_{m,n\geq
1}\lambda_\phi(\mp m)\lambda_\psi(\mp n)e_q\bigl({\bar d}(\pm am\mp
bn)\bigr)E^{\pm\pm}(m,n),\] where the corresponding signs must be
matched and
\[E^{\pm\pm}(m,n)=\frac{ab}{q^2}\int_0^\infty\int_0^\infty
E(x,y)M^{\pm}_{2i\mu}\left(\frac{4\pi a\sqrt{mx}}{q}\right)
M^{\pm}_{2i\nu}\left(\frac{4\pi b\sqrt{ny}}{q}\right)\,dx\,dy,\]
\[M^+_{2ir}=(4\cosh\pi r)K_{2ir},\quad
M^-_{2ir}=-\frac{\pi}{\cosh\pi r}\{Y_{2ir}+Y_{-2ir}\}.\] By
summing over the residue classes we get
\begin{equation}\label{eq9}
\csillag\sum_{d\smod{q}}\mathfrak{I}_{d/q}= \sum_{\pm\pm}\,\sum_{m,n\geq
1}\lambda_\phi(\mp m)\lambda_\psi(\mp n)S_{\overline{\chi\omega}}(-h,\pm am\mp
bn;q)E^{\pm\pm}(m,n).
\end{equation}
In order to estimate the twisted Kloosterman sum we observe that
the greatest common divisor $(-h,\pm am\mp bn,q)$ divides
$N(h,n,a)(h,m,b)$ as follows from the relations $(a,b)=1$ and
$(h,q)=(h,Nab)$, therefore (\ref{eq10}) and (\ref{eq12}) imply
that
\begin{equation}\label{eq14}
S_{\overline{\chi\omega}}(-h,\pm am\mp bn;q)\ll
(h,m)^{1/2}(h,n)^{1/2}Q^{1/2}(AB)^\epsilon.
\end{equation}
We estimate $E^{\pm\pm}(m,n)$ by successive applications of
integration by parts and the relations
\[\frac{d}{dz}\bigl(z^sK_s(z)\bigr)=-z^sK_{s-1}(z),
\quad\frac{d}{dz}\bigl(z^sY_s(z)\bigr)=z^sY_{s-1}(z);\]
\[K_s(z)\ll_s z^{-1/2},\quad Y_s(z)\ll_s z^{-1/2},\qquad z>0.\]
We get, for any integers $i,j\geq 0$,
\begin{multline*}E^{\pm\pm}(m,n)\ll_{i,j}
\frac{ab}{Q^2}\left(\frac{Q}{a\sqrt{m}}\right)^{i+\frac{1}{2}}
\left(\frac{Q}{b\sqrt{n}}\right)^{j+\frac{1}{2}}\\
\times\max_{\substack{0\leq k\leq i\\0\leq l\leq j}}
\left(\frac{A}{a}\right)^{k-\frac{i}{2}-\frac{1}{4}}
\left(\frac{B}{b}\right)^{l-\frac{j}{2}-\frac{1}{4}}
\|E^{(k,l)}\|_1,\end{multline*} i.e., by (\ref{eq7}),
\begin{multline*}E^{\pm\pm}(m,n)\ll_{i,j}\frac{1}{Q^2}
\left(\frac{Q}{a\sqrt{m}}\right)^{i+\frac{1}{2}}
\left(\frac{Q}{b\sqrt{n}}\right)^{j+\frac{1}{2}}\\
\times\left(\frac{A}{a}\right)^{-\frac{i}{2}-\frac{1}{4}}
\left(\frac{B}{b}\right)^{-\frac{j}{2}-\frac{1}{4}} \frac{AB}{A+B}
\max_{\substack{0\leq k\leq i\\0\leq l\leq
j}}(A\delta)^k(B\delta)^l.\end{multline*} Therefore
\begin{equation}\label{eq13}
E^{\pm\pm}(m,n)\ll_{i,j} \frac{(AB)^{1/2}}{\delta Q^2(A+B)}
\left(\frac{(\delta Q)^2A}{am}\right)^{\frac{i}{2}+\frac{1}{4}}
\left(\frac{(\delta Q)^2B}{bn}\right)^{\frac{j}{2}+\frac{1}{4}},
\end{equation}
suggesting that we can neglect the contribution to (\ref{eq9}) of
those pairs $(m,n)$ for which $am/A$ or $bn/B$ is $>(\delta
Q)^2(AB)^\epsilon$. Indeed, if we apply (\ref{eq22}) to $\phi$ and
$\psi$ to see that
\[\sum_{1\leq m\leq x}|\lambda_\phi(\mp m)|(h,m)^{1/2}\ll x\tau^{1/2}(h),\qquad
\sum_{1\leq n\leq y}|\lambda_\psi(\mp n)|(h,n)^{1/2}\ll y\tau^{1/2}(h)\]
then we can specify $i$ and $j$ large enough (in terms of $\epsilon$)
to deduce from (\ref{eq14}) and (\ref{eq13}) that the contribution
to (\ref{eq9}) of those terms with $m$ or $n$ large is
\[\ll\tau(h)\frac{\delta^3Q^{5/2}(AB)^{-100}}{ab(A+B)},\]
say, while the choice $i=j=0$ in (\ref{eq13}) shows that the
remaining terms (for which $am/A$ and $bn/B$ are at most $(\delta
Q)^2(AB)^\epsilon$) contribute
\[\ll\tau(h)\frac{\delta^3Q^{5/2}(AB)^{3/2+\epsilon}}{ab(A+B)}.\]
Hence, by (\ref{eq6}),
\begin{equation}\label{eq16}
\tilde{D}_F(a,b;h)=\frac{1}{2\delta L}\sum_{q\in\mathcal{Q}}\ \
\csillag\sum_{d\smod{q}}\mathfrak{I}_{d/q}
\ll\frac{\delta^2Q^{3/2}}{ab}\cdot\frac{(AB)^{3/2+\epsilon}}{A+B}.
\end{equation}
Inequalities (\ref{eq15}) and (\ref{eq16}) show that the optimal
balance is achieved when
\[\delta^3Q^5(ab)^{-3}\asymp 1.\]
A natural choice is given by
\[\delta^3Q^5=(cab)^3,\]
where $c$ is the constant appearing in the remark after
(\ref{eq18}). By (\ref{eq30}), this choice proves (\ref{eq29})
whenever the conditions of Proposition~\ref{Prop2} are satisfied,
that is, when $Q\geq cab(AB)^\epsilon$ and (\ref{eq3}) hold
simultaneously. It turns out that this is the case whenever
\[cab\leq P^{-2/3}(A+B)^{-2/3}(AB)^{2/3-\epsilon},\]
in particular, whenever (\ref{eq28}) is true. However, when
(\ref{eq28}) fails, (\ref{eq29}) follows from the Cauchy bound
$(AB/ab)^{1/2}$, as we already pointed out in Section~\ref{sect3}.
\section{Concluding Theorem~\ref{Th1}}\label{sect2}
Our aim is to prove Theorem~\ref{Th1} for all test functions
$f(x,y)$ satisfying (\ref{eq1}). We fix an arbitrary smooth
function
\[\rho:(0,\infty)\to\mathbb{R}\]
whose support lies in $[1,2]$ and which satisfies the following
identity on the positive axis:
\[\sum_{k=-\infty}^\infty\rho\bigl(2^{-k/2}x\bigr)=1.\]
To obtain such a function, we take an arbitrary smooth
$\eta:(0,\infty)\to\mathbb{R}$ which is constant 0 on $(0,1)$ and
constant 1 on $(\sqrt{2},\infty)$, and then define $\rho$ as
\[\rho(x)=\begin{cases}
\eta(x)&\text{if $0<x\leq\sqrt{2}$,}\\
1-\eta(x/\sqrt{2})&\text{if $\sqrt{2}<x<\infty$.}
\end{cases}\]
According to this partition of unity we decompose $f(x,y)$ as
\[f(x,y)=\sum_{k=-\infty}^\infty\sum_{l=-\infty}^\infty f_{k,l}(x,y),\]
\[f_{k,l}(x,y)=f(x,y)\rho\left(\frac{x}{2^{k/2}X}\right)
\rho\left(\frac{y}{2^{l/2}Y}\right).\] Observe that
\begin{equation}\label{eq8}\text{supp}\,f_{k,l}\subseteq
[A_k,2A_k]\times[B_l,2B_l\bigr],\quad A_k=2^{k/2}X,\quad
B_l=2^{l/2}Y,\end{equation} whence (\ref{eq1}) and $P\geq 1$ show
that
\[\bigl(1+2^{k/2}\bigr)\bigl(1+2^{l/2}\bigr)f_{k,l}^{(i,j)}\ll_{i,j}
A_k^{-i}B_l^{-j}P^{i+j}.\] In other words, the bound (\ref{eq29})
applies uniformly to each function
\[g_{k,l}(x,y)=\bigl(1+2^{k/2}\bigr)\bigl(1+2^{l/2}\bigr)f_{k,l}(x,y)\]
with the corresponding parameters $A=A_k$, $B=B_l$:
\[D_{g_{k,l}}(a,b;h)\ll
P^{11/10}(ab)^{-1/10}(A_k+B_l)^{1/10}(A_kB_l)^{2/5+\epsilon}.\] This
implies, for $\epsilon<1/10$,
\[D_{f_{k,l}}(a,b;h)\ll
2^{-|k|/5}2^{-|l|/5}P^{11/10}(ab)^{-1/10}(X+Y)^{1/10}(XY)^{2/5+\epsilon}.\]
Finally,
\[D_f(a,b;h)=\sum_{k=-\infty}^\infty\sum_{l=-\infty}^\infty
D_{f_{k,l}}(a,b;h)\] completes the proof of Theorem~\ref{Th1}.
It should be noted that the trivial upper bound
\[D_f(a,b;h)\ll(XY/ab)^{1/2}\] mentioned in Section~\ref{sect6}
follows by a similar reduction technique from the Cauchy bounds
$D_{g_{k,l}}(a,b;h)\ll(A_kB_l/ab)^{1/2}$ of Section~\ref{sect3}.
\section{Approximate functional equation}\label{sect4}
Let $\phi$ be a primitive holomorphic or Maass cusp
form of arbitrary level and nebentypus, $\Re s=1/2$, and $\chi$ a
primitive character modulo $q$ where $q$ is prime to the level.
Using the functional equation of the $L$-function attached to the
twisted primitive cusp form $\phi\otimes\chi$ and a standard
technique involving Mellin transforms we can express the special
value $L(s,\phi\otimes\chi)$ as a sum of two Dirichlet series of
essentially $\sqrt{C}$ terms where $C=C(s,\phi\otimes\chi)$ is the
analytic conductor defined by \cite{IS}. More precisely, $C\asymp
q^2$ where the implied constants depend only on $s$ and $\phi$,
therefore a special case of Theorem~2.1 in \cite{H} gives the
following
\begin{proposition}\label{Prop3}There is a smooth function
$f:(0,\infty)\to\mathbb{C}$ and a complex number $\lambda$ of modulus 1 such
that
\[L(s,\phi\otimes\chi)=\sum_{m=1}^\infty\frac{\lambda_\phi(m)\chi(m)}{m^{1/2}}
f\left(\frac{m}{q}\right)+
\lambda\sum_{m=1}^\infty\frac{{\bar\lambda}_\phi(m)\bar{\chi}(m)}{m^{1/2}}
\bar{f}\left(\frac{m}{q}\right).\] The function $f$ and its
partial derivatives $f^{(j)}$ $(j=1,2,.\dots)$ satisfy the
following uniform growth estimates at $0$ and infinity:
\[f(x)=\begin{cases}1+O(x^{\sigma}),&\quad
0<\sigma<1/5;\\
O(x^{-\sigma}),&\quad \sigma>0.
\end{cases}\]
\[f^{(j)}(x)\ll x^{-\sigma},\quad \sigma>j-1/5.\]
The implied constants depend only on $\sigma$, $j$, $s$ and the form
$\phi$.
\end{proposition}
For any positive numbers $A$ and $\epsilon$ we obtain, using
(\ref{eq22}), an expression
\[L(s,\phi\otimes\chi)=T+\lambda\bar
T+O_{A,\epsilon,s,\phi}\left(q^{-A}\right),\] where
\[T=\sum_{1\leq m\leq q^{1+\epsilon}}
\frac{\lambda_\phi(m)\chi(m)g(m)}{m^{1/2}},\] and $g:(0,\infty)\to\mathbb{C}$ is
a smooth function satisfying
\[g^{(j)}(x)\ll_{j,s,\phi} x^{-j}.\]
Therefore, applying partial summation and a smooth dyadic
decomposition we can reduce Theorem~\ref{Th2} to the following
\begin{proposition}\label{Prop4}
Let $1\leq M\leq q^{1+\epsilon}$ and $k$ be a smooth function supported
in $[M,2M]$ such that $k^{(j)}\ll_j M^{-j}$. Then
\[\sum_{m=1}^\infty\lambda_\phi(m)\chi(m)k(m)\ll q^{17/54+\epsilon}M^{2/3},\]
where the implied constant depends only on $\epsilon$ and the form
$\phi$.
\end{proposition}
\section{Amplification}
Our purpose is to prove Proposition~\ref{Prop4}. As in \cite{DFI2}
we shall estimate from both ways the amplified second moment
\[S=\csillag\sum_{\omega\bmod{q}}
\left|\sum_{1\leq l\leq L}\bar\chi(l)\omega(l)\right|^2|S_\omega|^2,\]
where $\omega$ runs through the primitive characters modulo $q$, $L$
is a parameter to be chosen later in terms of $M$ and $q$, and
\[S_\omega=\sum_{m=1}^\infty\lambda_\phi(m)\omega(m)k(m).\]
Assuming $L\geq c(\epsilon)q^\epsilon$ (indeed this will be the case
whenever $\epsilon<1/27$, cf. (\ref{eq26})) it follows, using the
result of Jacobsthal \cite{Ja} that the largest gap between
reduced residue classes mod $q$ is of size $\ll q^\epsilon$, that
\begin{equation}\label{eq20}
S\gg q^{-\epsilon}L^2|S_\chi|^2.
\end{equation}
On the other hand, expanding each primitive $\omega$ in $S$ using
Gauss sums and then extending the resulting summation to all
characters mod $q$, we get by orthogonality,
\[S\leq\frac{\varphi(q)}{q}\csillag\sum_{d\smod{q}}
\left|\sum_n a(n)e_q(dn)\right|^2,\] where
\[a(n)=\sum_{\substack{lm=n\\1\leq l\leq
L}}\bar\chi(l)\lambda_\phi(m)k(m).\] It is clear that the coefficients
$a(n)$ are supported in the interval $[1,N]$ where $N=2LM$.
Extending the summation to all residue classes $d$ the previous
inequality becomes
\begin{equation}\label{eq21}
S\leq\varphi(q)\sum_{h\equiv 0\smod{q}}D(h),
\end{equation}
where
\[D(h)=\sum_{n_1-n_2=h}a(n_1)\bar a(n_2).\]
Using the Rankin--Selberg bound (\ref{eq22}) it is simple to
estimate the diagonal contribution $D(0)$. Indeed, by $k\ll 1$ we
get
\[\begin{split}
D(0)&=\sum_n|a(n)|^2\ll\sum_{\substack{l_1m_1=l_2m_2\\1\leq
l_1,l_2\leq L\\M\leq m_1,m_2\leq 2M}}\lambda_\phi(m_1){\bar\lambda}_\phi(m_2)\\\\
&\ll\sum_{\substack{1\leq l\leq L\\M\leq m\leq
2M}}|\lambda_\phi(m)|^2\tau(ml)\ll N^\epsilon L\sum_{M\leq m\leq
2M}|\lambda_\phi(m)|^2,
\end{split}\]
whence
\begin{equation}\label{eq23}
D(0)=\sum_n|a(n)|^2\ll N^{1+\epsilon}.
\end{equation}
In order to estimate the non-diagonal terms $D(h)$ $(h\neq 0)$ we
shall refer to Theorem~\ref{Th1}. Clearly, we can rewrite each
term as
\[D(h)=\sum_{1\leq l_1,l_2\leq L}\bar\chi(l_1)\chi(l_2)
\sum_{l_1m_1-l_2m_2=h}\lambda_\phi(m_1){\bar\lambda}_\phi(m_2)k(m_1)\bar k(m_2).\] The
inner sum is of type (\ref{eq24}), because ${\bar\lambda}_\phi(m)$ is just the
$m$-th normalized Fourier coefficient of the contragradient cusp
form $\tilde\phi(z)=\bar\phi(-\bar z)$. For each pair $(l_1,l_2)$
we shall apply Theorem~\ref{Th1} with $a=l_1/(l_1,l_2)$,
$b=l_2/(l_1,l_2)$, $X=aM$ and $Y=bM$ to conclude that
\begin{equation}\label{eq25}
D(h)\ll L^2(a+b)^{1/10}(ab)^{3/10+\epsilon}M^{9/10+\epsilon}\ll
L^{27/10+\epsilon}M^{9/10+\epsilon}.
\end{equation}
\section{Concluding Theorem~\ref{Th2}}\label{sect5}
Inserting the bounds (\ref{eq23}) and (\ref{eq25}) into
(\ref{eq21}) it follows that
\[S\ll N^\epsilon\varphi(q)\left(N+\frac{N}{q}L^{27/10}M^{9/10}\right).\]
This shows that the optimal choice for $L$ is provided by
\begin{equation}\label{eq26}
q=L^{27/10}M^{9/10},
\end{equation}
whence (\ref{eq20}) yields
\[S_\chi\ll q^\epsilon L^{-1}|S|^{1/2}\ll
(qN)^{1/2+\epsilon}L^{-1}\ll (qM/L)^{1/2+\epsilon}.\] Substituting
(\ref{eq26}) we get
\[S_\chi\ll (qMq^{-10/27}M^{1/3})^{1/2+\epsilon}\ll q^{17/54+\epsilon}M^{2/3},\]
which is precisely the conclusion of Proposition~\ref{Prop4}. The
proof of Theorem~\ref{Th2} is complete.
\end{document}
|
\begin{document}
\begin{abstract}
We prove a two-parameter family of $q$-hypergeometric congruences modulo
the fourth power of a cyclotomic polynomial. Crucial ingredients in our proof
are George Andrews'
multiseries extension of the Watson transformation, and a Karlsson--Minton type summation for very-well-poised basic hypergeometric series due to George Gasper.
The new family of $q$-congruences is then used to prove two conjectures
posed earlier by the authors.
\end{abstract}
\maketitle
\section{Introduction}
In 1914, Ramanujan \cite{Ramanujan} presented a number of fast
approximations of $1/\pi$, including
\begin{align}
\sum_{k=0}^{\infty}(6k+1)\frac{(\frac{1}{2})_k^3}{k!^3 4^k}
=\frac{4}{\pi}, \lambdabel{eq:ram}
\end{align}
where $(a)_n=a(a+1)\cdots(a+n-1)$ denotes the rising factorial.
In 1997, Van Hamme \cite{Hamme} proposed 13 interesting $p$-adic
analogues of Ramanujan-type formulas, such as
\begin{align}
\sum_{k=0}^{(p-1)/2}(6k+1)\frac{(\frac{1}{2})_k^3}{k!^3 4^k}
&\equiv p(-1)^{(p-1)/2}\pmod{p^4}, \lambdabel{eq:pram}
\end{align}
where $p>3$ is a prime. Van Hamme's supercongruence
\eqref{eq:pram} was first proved by Long \cite{Long}.
It should be pointed out that all of the 13 supercongruences have
been proved by different techniques (see \cite{OZ,Swisher}). For
some background on Ramanujan-type supercongruences,
the reader is referred to Zudilin's paper \cite{Zud2009}.
In 2016, Long and Ramakrishna~\cite[Thm.~2]{LR} proved the following
supercongruence:
\begin{equation}
\sum_{k=0}^{p-1} (6k+1) \frac{(\frac{1}{3})_k^6}{k!^6} \equiv
\begin{cases} -p\displaystyle \Gamma_p\bigg(\frac{1}{3}\bigg)^9
\pmod{p^6}, &\text{if $p\equiv 1\pmod 6$,}\\[10pt]
-\frac{p^4}{27}\displaystyle \Gamma_p\bigg(\frac{1}{3}\bigg)^9\pmod{p^6},
&\text{if $p\equiv 5\pmod 6$,}
\end{cases} \lambdabel{eq:d2}
\end{equation}
where $\Gamma_p(x)$ is the $p$-adic Gamma function.
This result for $p\equiv 1\pmod 6$ confirms the (D.2) supercongruence
of Van Hamme, which asserts a congruence modulo $p^4$.
During the past few years, many congruences and supercongruences
were generalized to the $q$-setting by various authors (see, for instance,
\cite{Gorodetsky,Guo2018,Guo2,Guo-gz,Guo-fac,Guo-m3,Guo-par,
GJZ,GS19,GSdiff,GS0,GS,GW2,GuoZu,NP,NP2,Straub,Tauraso2}).
In particular, the authors \cite[Thm.~2.3]{GS} proposed the following
partial $q$-analogue of Long and Rama\-krishna's
supercongruence \eqref{eq:d2}:
\begin{equation}
\sum_{k=0}^{n-1} [6k+1]\frac{(q;q^3)_k^6}{(q^3;q^3)_k^6} q^{3k}
\equiv
\begin{cases} 0 \pmod{[n]}, &\text{if $n\equiv 1\pmod 3$,}\\[10pt]
0 \pmod{[n]\Phi_n(q)}, &\text{if $n\equiv 2\pmod 3$.}
\end{cases} \lambdabel{eq;3rd-noa}
\end{equation}
Here and throughout the paper, we adopt the standard $q$-notation
(cf.\ \cite{GR}):
For an indeterminate $q$, let
\begin{equation*}
(a;q)_n=(1-a)(1-aq)\cdots (1-aq^{n-1})
\end{equation*}
be the {\em $q$-shifted factorial}.
For convenience, we compactly write
\begin{equation*}
(a_1,a_2,\ldots,a_m;q)_n=(a_1;q)_n (a_2;q)_n\cdots (a_m;q)_n.
\end{equation*}
Moreover,
$
[n]=[n]_q=1+q+\cdots+q^{n-1}
$
denotes the {\em $q$-integer}
and $\Phi_n(q)$ the $n$-th {\em cyclotomic polynomial} in $q$,
which may be defined as
\begin{align*}
\Phi_n(q)=\prod_{\substack{1\leqslant k\leqslant n\\ \gcd(n,k)=1}}(q-\zeta^k),
\end{align*}
where $\zeta$ is an $n$-th primitive root of unity.
The authors \cite[Conjectures 12.10 and 12.11]{GS} also proposed the
following conjectures, the first one generalizing
the $q$-congruence \eqref{eq;3rd-noa} for $n\equiv 2\pmod 3$.
\begin{conjecture}\lambdabel{conj-1} Let $d\geqslant 3$ and $n$ be positive
integers with $n\equiv -1\pmod{d}$. Then
\begin{equation*}
\sum_{k=0}^{M}[2dk+1]
\frac{(q;q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d(d-2)k} \equiv 0 \pmod{[n]\Phi_{n}(q)^3},
\end{equation*}
where $M=((d-1)n-1)/d$ or $n-1$.
\end{conjecture}
\begin{conjecture}\lambdabel{conj-2} Let $d\geqslant 3$ and $n>1$ be integers
with $n\equiv 1\pmod{d}$. Then
\begin{equation*}
\sum_{k=0}^{M}[2dk-1]
\frac{(q^{-1};q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d^2 k} \equiv 0 \pmod{[n]\Phi_{n}(q)^3},
\end{equation*}
where $M=((d-1)n+1)/d$ or $n-1$.
\end{conjecture}
Note that Conjecture \ref{conj-1} does not hold for $d=2$ while
Conjecture \ref{conj-2} is still true for $d=2$.
In fact, the first author and Wang \cite{GW2} proved that
\begin{equation*}
\sum_{k=0}^{(n-1)/2}[4k+1]\frac{(q;q^2)_k^4}{(q^2;q^2)_k^4}
\equiv q^{(1-n)/2}[n]+\frac{(n^2-1)(1-q)^2}{24}q^{(1-n)/2}[n]^3
\pmod{[n]\Phi_n(q)^3}
\end{equation*}
for odd $n$, and the authors \cite{GSdiff} showed that
\begin{equation*}
\sum_{k=0}^{(n+1)/2}[4k-1]\frac{(q^{-1};q^2)_k^4}{(q^2;q^2)_k^4} q^{4k}
\equiv -(1+3q+q^2)[n]^4 \pmod{[n]^4\Phi_n(q)}
\end{equation*}
for odd $n>1$.
The last two $q$-congruences are quite special, as they are rare examples of
$q$-hyper\-geometric congruences that were rigorously shown
to hold modulo a high (fourth and even fifth) power
of a cyclotomic polynomial.
The main purpose of this paper is to add a complete two-parameter family of
$q$-hypergeometric congruences to the list of such $q$-congruences (see Theorem~\ref{thm:1}).
We shall also prove that Conjectures~\ref{conj-1} and \ref{conj-2} are true.
Our proof relies on the following result:
\begin{theorem}\lambdabel{thm:1}
Let $d,r,n$ be integers satisfying $d\geqslant 3$, $r\leqslant d-2$
(in particular, $r$ may be negative), and $n\geqslant d-r$, such that
$d$ and $r$ are coprime, and $n\equiv-r\pmod{d}$.
Then
\begin{equation}
\sum_{k=0}^{n-1}[2dk+r]
\frac{(q^r;q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d(d-1-r)k} \equiv 0 \pmod{\Phi_{n}(q)^4}.
\lambdabel{eq:thm1}
\end{equation}
\end{theorem}
This result is similar in nature to the two-parameter result in \cite[Thm.~1.1]{Guo-fac}
which, however, only concerned a $q$-congruence modulo $\Phi_n(q)^2$.
Note that the $q$-congruence \eqref{eq:thm1} is still true when the sum
is over $k$ from $0$ to $((d-1)n-r)/d$, since
$(q^r;q^d)_k/(q^d;q^d)_k \equiv 0\pmod{\Phi_n(q)}$ for
$((d-1)n-r)/d<k\leqslant n-1$.
(Also, we must have $((d-1)n-r)/d\le n-1$ since $n\geqslant d-r$.)
Thus, Theorem \ref{thm:1} implies that Conjectures~\ref{conj-1} and \ref{conj-2}
hold modulo $\Phi_n(q)^4$.
To prove that Conjectures~\ref{conj-1} and \ref{conj-2} also hold modulo $[n]$
(which in conjunction with Theorem~\ref{thm:1} would fully establish the
validity of the conjectures),
we need to prove the following result.
\begin{theorem}\lambdabel{thm:2} Let $d\geqslant 3$ and $n$ be positive
integers with $\gcd(d,n)=1$. Then
\begin{align}
\sum_{k=0}^{n-1}[2dk+1]
\frac{(q;q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d(d-2)k} &\equiv 0 \pmod{\Phi_n(q)}, \lambdabel{eq:first-1}\\%[5pt]
\intertext{and}
\sum_{k=0}^{n-1}[2dk-1]
\frac{(q^{-1};q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d^2 k} &\equiv 0 \pmod{\Phi_n(q)}. \lambdabel{eq:first-2}
\end{align}
\end{theorem}
We shall prove Theorem~\ref{thm:1}
in Section~\ref{sec:thm1}
by making a careful use of Andrews' multiseries
generalization \eqref{andrews} of the Watson
transformation~\cite[Theorem~4]{Andrews75},
combined with a special case of Gasper's very-well-poised
Karlsson--Minton type summation \cite[Eq.~(5.13)]{Gasper}.
We point out that Andrews' transformation
plays an important role in combinatorics and number theory. For example,
this transformation was utilized by Zudilin \cite{Zu} to solve a problem
of Asmus Schmidt. It was also used by Krattenthaler and Rivoal \cite{KR}
to provide an
alternative proof of a result by Zudilin that relates a very-well-poised
hypergeometric series with a Vasilenko--Vasilev-type multiple integral,
the latter serving as a tool in the study of the arithmetic behaviour
of values of the Riemann zeta function at integers.
Andrews' transformation was also used by the first author,
Jouhet and Zeng \cite{GJZ} to prove some $q$-congruences involving
$q$-binomial coefficients.
The couple Hessami Pilehrood \cite{HH} used this transformation
to give a short proof of a theorem of Zagier.
Recently, the present authors \cite{GS19,GS0} applied Andrews' transformation
to establish some $q$-congruences for truncated basic hypergeometric series.
We shall prove Theorem \ref{thm:2} in Section~\ref{sec:thm2}.
The proof of Conjectures~\ref{conj-1} and \ref{conj-2} will be given in Section~\ref{sec:conjectures}.
We conclude this short paper by Section~\ref{sec:final}, where we state
an open problem involving a $q$-hypergeometric congruence modulo the
fifth power of a cyclotomic polynomial.
\section{Proof of Theorem \ref{thm:1}}\lambdabel{sec:thm1}
We first give a simple $q$-congruence modulo $\Phi_n(q)^2$, which was
already used in \cite{GS0}.
\begin{lemma}\lambdabel{lem:mod-square}
Let $\alpha$, $r$ be integers and $n$ a positive integer. Then
\begin{equation}
(q^{r-\alpha n},q^{r+\alpha n};q^d)_k \equiv (q^r;q^d)_k^2 \pmod{\Phi_n(q)^2}.
\lambdabel{eq:mod-square}
\end{equation}
\end{lemma}
\begin{proof}
For any integer $j$, it is easy to check that
\begin{equation*}
(1-q^{\alpha n-dj+d-r})(1-q^{\alpha n+dj-d+r})+(1-q^{dj-d+r})^2
q^{\alpha n-dj+d-r}=(1-q^{\alpha n})^2
\end{equation*}
and $1-q^{\alpha n}\equiv 0\pmod{\Phi_n(q)}$, and so
\begin{equation*}
(1-q^{\alpha n-dj+d-r})(1-q^{\alpha n+dj-d+r})\equiv -(1-q^{dj-d+r})^2
q^{\alpha n-dj+d-r}\pmod{\Phi_n(q)^2}.
\end{equation*}
The proof then follows easily from the above $q$-congruence.
\end{proof}
We will make use of a powerful transformation formula due to
Andrews \cite[Theorem~4]{Andrews75}, which can be stated as follows:
\begin{align}
\sum_{k\geqslant 0}\frac{(a,q\sqrt{a},-q\sqrt{a},b_1,c_1,\dots,b_m,c_m,q^{-N};q)_k}
{(q,\sqrt{a},-\sqrt{a},aq/b_1,aq/c_1,\dots,aq/b_m,aq/c_m,aq^{N+1};q)_k}
\left(\frac{a^mq^{m+N}}{b_1c_1\cdots b_mc_m}\right)^k &\notag\\[5pt]
=\frac{(aq,aq/b_mc_m;q)_N}{(aq/b_m,aq/c_m;q)_N}
\sum_{j_1,\dots,j_{m-1}\geqslant 0}
\frac{(aq/b_1c_1;q)_{j_1}\cdots(aq/b_{m-1}c_{m-1};q)_{j_{m-1}}}
{(q;q)_{j_1}\cdots(q;q)_{j_{m-1}}} \notag\\[5pt]
\times\frac{(b_2,c_2;q)_{j_1}\dots(b_m,c_m;q)_{j_1+\dots+j_{m-1}}}
{(aq/b_1,aq/c_1;q)_{j_1}
\dots(aq/b_{m-1},aq/c_{m-1};q)_{j_1+\dots+j_{m-1}}}& \notag\\[5pt]
\times\frac{(q^{-N};q)_{j_1+\dots+j_{m-1}}}
{(b_mc_mq^{-N}/a;q)_{j_1+\dots+j_{m-1}}}
\frac{(aq)^{j_{m-2}+\dots+(m-2)j_1} q^{j_1+\dots+j_{m-1}}}
{(b_2c_2)^{j_1}\cdots(b_{m-1}c_{m-1})^{j_1+\dots+j_{m-2}}}&. \lambdabel{andrews}
\end{align}
This transformation actually constitutes a multiseries generalization
of Watson's
$_8\phi_7$ transformation formula (see \cite[Appendix (III.18)]{GR})
which we state here in standard notation for basic hypergeometric series
\cite[Section 1]{GR}:
\begin{align}
& _{8}\phi_{7}\!\left[\begin{array}{cccccccc}
a,& qa^{\frac{1}{2}},& -qa^{\frac{1}{2}}, & b, & c, & d, & e, & q^{-n} \\
& a^{\frac{1}{2}}, & -a^{\frac{1}{2}}, & aq/b, & aq/c, & aq/d, & aq/e, & aq^{n+1}
\end{array};q,\, \frac{a^2q^{n+2}}{bcde}
\right] \notag\\[5pt]
&\quad =\frac{(aq, aq/de;q)_n}
{(aq/d, aq/e;q)_n}
\,{}_{4}\phi_{3}\!\left[\begin{array}{c}
aq/bc,\ d,\ e,\ q^{-n} \\
aq/b,\, aq/c,\, deq^{-n}/a
\end{array};q,\, q
\right]. \lambdabel{eq:8phi7}
\end{align}
Next, we recall the following very-well-poised Karlsson--Minton type summation
by Gasper~\cite[Eq.~(5.13)]{Gasper} (see also \cite[Ex.~2.33 (i)]{GR}).
\begin{align}
\sum_{k=0}^\infty\frac{(a,q\sqrt{a},-q\sqrt{a},b,a/b,d,e_1,aq^{n_1+1}/e_1,\dots,
e_m,aq^{n_m+1}/e_m;q)_k}{(q,\sqrt{a},-\sqrt{a},aq/b,bq,aq/d,aq/e_1,e_1q^{-n_1},
\dots,aq/e_m,e_mq^{-n_m};q)_k}\left(\frac{q^{1-\nu}}d\right)^k&\notag\\
=\frac{(q,aq,aq/bd,bq/d;q)_\infty}{(bq,aq/b,aq/d,q/d;q)_\infty}
\prod_{j=1}^m\frac{(aq/be_j,bq/e_j;q)_{n_j}}{(aq/e_j,q/e_j;q)_{n_j}}&,
\lambdabel{eq:gasper}
\end{align}
where $n_1,\dots,n_m$ are nonnegative integers,
$\nu=n_1+\cdots+n_m$, and $|q^{1-\nu}/d|<1$
when the series does not terminate.
For an elliptic extension of the terminating $d=q^{-\nu}$ case of
\eqref{eq:gasper}, see \cite[Eq.~(1.7)]{RS}.
In particular, we note that for $d=bq$ the right-hand side of
\eqref{eq:gasper} vanishes.
Putting in addition $b=q^{-N}$ we obtain the following terminating summation:
\begin{equation}\lambdabel{eq:vwp-km}
\sum_{k=0}^N\frac{(a,q\sqrt{a},-q\sqrt{a},e_1,aq^{n_1+1}/e_1,\dots,
e_m,aq^{n_m+1}/e_m,q^{-N};q)_k}{(q,\sqrt{a},-\sqrt{a},aq/e_1,e_1q^{-n_1},
\dots,aq/e_m,e_mq^{-n_m},aq^{N+1};q)_k}q^{(N-\nu)k}=0,
\end{equation}
valid for $N>\nu=n_1+\cdots+n_m$.
By suitably combining \eqref{andrews} with \eqref{eq:vwp-km},
we obtain the following multiseries summation formula:
\begin{lemma}\lambdabel{lem:ms=0}
Let $m\geqslant 2$. Let $q$, $a$ and $e_1,\dots,e_{m+1}$ be arbitrary
parameters with
$e_{m+1}=e_1$, and let $n_1,\dots,n_m$ and $N$ be nonnegative integers
such that $N>n_1+\cdots+n_m$. Then
\begin{align}
0=\sum_{j_1,\dots,j_{m-1}\geqslant 0}
\frac{(e_1q^{-n_1}/e_2;q)_{j_1}\cdots(e_{m-1}q^{-n_{m-1}}/e_m;q)_{j_{m-1}}}
{(q;q)_{j_1}\cdots(q;q)_{j_{m-1}}} \notag\\[5pt]
\times\frac{(aq^{n_2+1}/e_2,e_3;q)_{j_1}\dots
(aq^{n_m+1}/e_m,e_{m+1};q)_{j_1+\dots+j_{m-1}}}
{(e_1q^{-n_1},aq/e_2;q)_{j_1}
\dots(e_{m-1}q^{-n_{m-1}},aq/e_m;q)_{j_1+\dots+j_{m-1}}}& \notag\\[5pt]
\times\frac{(q^{-N};q)_{j_1+\dots+j_{m-1}}}
{(e_1q^{n_m-N+1}/e_m;q)_{j_1+\dots+j_{m-1}}}
\frac{(aq)^{j_{m-2}+\dots+(m-2)j_1} q^{j_1+\dots+j_{m-1}}}
{(aq^{n_2+1}e_3/e_2)^{j_1}\cdots
(aq^{n_{m-1}+1}e_m/e_{m-1})^{j_1+\dots+j_{m-2}}}&. \lambdabel{mkm0}
\end{align}
\end{lemma}
\begin{proof}
By specializing the parameters in the multisum transformation \eqref{andrews}
by $b_i\mapsto aq^{n_i+1}/e_i$, $c_i\mapsto e_{i+1}$, for $1\le i\le m$
(where $e_{m+1}=e_1$), and dividing both sides of
the identity by the prefactor of the multisum,
we obtain that the series on the right-hand side of \eqref{mkm0} equals
\begin{align*}
&\frac{(e_mq^{-n_m},aq/e_1;q)_N}{(aq,e_mq^{-n_m}/e_1;q)_N}\\&\times
\sum_{k=0}^N\frac{(a,q\sqrt{a},-q\sqrt{a},e_1,aq^{n_1+1}/e_1,\dots,
e_m,aq^{n_m+1}/e_m,q^{-N};q)_k}{(q,\sqrt{a},-\sqrt{a},aq/e_1,e_1q^{-n_1},
\dots,aq/e_m,e_mq^{-n_m},aq^{N+1};q)_k}q^{(N-\nu)k},
\end{align*}
with $\nu=n_1+\cdots+n_m$.
Now the last sum vanishes by the special case of Gasper's summation stated in
\eqref{eq:vwp-km}.
\end{proof}
We collected enough ingredients and are ready to prove Theorem~\ref{thm:1}.
\begin{proof}[Proof of Theorem \ref{thm:1}]
The left-hand side of \eqref{eq:thm1} can be written as the following
multiple of a terminating $_{2d+4}\phi_{2d+3}$ series:
\begin{align*}
\frac{1-q^r}{1-q}
\sum_{k=0}^{((d-1)n-r)/d}\frac{(q^r,q^{d+\frac{r}{2}},-q^{d+\frac{r}{2}},
\overbrace{q^r,\ldots,q^r}^{\text{$(2d-1)$ times}},q^{d+(d-1)n},q^{r-(d-1)n};q^d)_k}
{(q^d,q^{\frac{r}{2}},-q^{\frac{r}{2}},q^d,\ldots,q^d,q^{r-(d-1)n},q^{d+(d-1)n};q^d)_k}
q^{d(d-1-r)k}.
\end{align*}
Now, by the $m=d$ case of Andrews' transformation \eqref{andrews},
we can write the above expression as
\begin{align}
\frac{(1-q^r)(q^{d+r},q^{-(d-1)n};q^d)_{((d-1)n-r)/d}}
{(1-q)(q^d,q^{r-(d-1)n};q^d)_{((d-1)n-r)/d}}
\sum_{j_1,\dots,j_{d-1}\geqslant 0}
\frac{(q^{d-r};q^d)_{j_1}\cdots(q^{d-r};q^d)_{j_{d-1}}}
{(q^d;q^d)_{j_1}\cdots(q^d;q^d)_{j_{d-1}}}& \notag\\[5pt]
\times\frac{(q^r,q^r;q^d)_{j_1}\dots (q^r,q^r;q^d)_{j_1+\dots+j_{d-2}}
(q^r,q^{d+(d-1)n};q^d)_{j_1+\dots+j_{d-1}}}
{(q^d,q^d;q^d)_{j_1}
\dots(q^d,q^d;q^d)_{j_1+\dots+j_{d-1}}}& \notag\\[5pt]
\times\frac{(q^{r-(d-1)n};q^d)_{j_1+\dots+j_{d-1}}}
{(q^{d+r};q^d)_{j_1+\dots+j_{d-1}}}
\frac{q^{(d+r)(j_{d-2}+\dots+(d-2)j_1)} q^{d(j_1+\dots+j_{d-1})}}
{q^{2rj_1}\cdots q^{2r(j_1+\dots+j_{d-2})}}&. \lambdabel{eq:multi}
\end{align}
It is easy to see that the $q$-shifted factorial $(q^{d+r};q^d)_{((d-1)n-r)/d}$
contains the factor $1-q^{(d-1)n}$ which is a multiple of $1-q^n$.
Similarly, the $q$-shifted factorial $(q^{-(d-1)n};q^d)_{((d-1)n-r)/d}$
contains the factor $1-q^{-(d-1)n}$ (again being a multiple of $1-q^n$)
since $((d-1)n-r)/d\geqslant 1$ holds due to the conditions
$d\geqslant 3$, $r\leqslant d-2$, and $n\geqslant d-r$.
This means that the $q$-factorial $(q^{d+r},q^{-(d-1)n};q^d)_{((d-1)n-r)/d}$
in the numerator of the fraction before the multisummation is divisible by $\Phi_n(q)^2$.
Moreover, it is easily seen that the $q$-factorial $(q^d,q^{r-(d-1)n};q^d)_{((d-1)n-r)/d}$
in the denominator is coprime with $\Phi_n(q)$.
Note that the non-zero terms in the multisummation in \eqref{eq:multi}
are those indexed by $(j_1,\ldots,j_{d-1})$ that satisfy
$j_1+\dots+j_{d-1}\leqslant ((d-1)n-r)/d$ because of the appearance of the
factor $(q^{r-(d-1)n};q^d)_{j_1+\dots+j_{d-1}}$ in the numerator.
None of the factors appearing in the denominator
of the multisummation of \eqref{eq:multi} contain a
factor of the form $1-q^{\alpha n}$ (and are therefore coprime with $\Phi_n(q)$),
except for $(q^{d+r};q^d)_{j_1+\dots+j_{d-1}}$
when $j_1+\dots+j_{d-1}=((d-1)n-r)/d$.
(In this case, the factor $1-q^{(d-1)n}$ appears
in the numerator.) Writing $n=ad-r$ (with $a\geqslant 1$),
we have $j_1+\dots+j_{d-1}=a(d-1)-r$.
Since $r\le d-2$, there must be an $i$ with $j_i\geqslant a$.
Then $(q^{d-r};q^d)_{j_i}$ contains the factor $1-q^{d-r+d(a-1)}=1-q^n$
which is a multiple of $\Phi_n(q)$.
So the denominator of the reduced form of the multisum in
\eqref{eq:multi} is coprime with $\Phi_n(q)$.
What remains is to show that the multisum in \eqref{eq:multi},
without the prefactor, is divisible by $\Phi_n(q)^2$, i.e. vanishes
modulo $\Phi_n(q)^2$.
By repeated application of Lemma~\ref{lem:mod-square},
the mulitsum in \eqref{eq:multi},
without the prefactor, is modulo $\Phi_n(q)^2$ congruent to
\begin{align*}
\sum_{j_1,\dots,j_{d-1}\geqslant 0}
\frac{(q^{d-r};q^d)_{j_1}\cdots(q^{d-r};q^d)_{j_{d-1}}}
{(q^d;q^d)_{j_1}\cdots(q^d;q^d)_{j_{d-1}}}& \notag\\[5pt]
\times\frac{(q^{r-(d-2)n},q^{r+(d-2)n};q^d)_{j_1}\dots
(q^{r-n},q^{r+n};q^d)_{j_1+\dots+j_{d-2}}
(q^r,q^{d+(d-1)n};q^d)_{j_1+\dots+j_{d-1}}}
{(q^{d+(d-1)n},q^{d-(d-1)n};q^d)_{j_1}
\dots(q^{d+2n},q^{d-2n};q^d)_{j_1+\dots+j_{d-2}}
(q^{d+n},q^{d-n};q^d)_{j_1+\dots+j_{d-1}}}& \notag\\[5pt]
\times\frac{(q^{r-(d-1)n};q^d)_{j_1+\dots+j_{d-1}}}
{(q^{d+r};q^d)_{j_1+\dots+j_{d-1}}}
\frac{q^{(d+r)(j_{d-2}+\dots+(d-2)j_1)} q^{d(j_1+\dots+j_{d-1})}}
{q^{2rj_1}\cdots q^{2r(j_1+\dots+j_{d-2})}}&.
\end{align*}
However, this sum vanishes due to the $m=d$, $q\mapsto q^d$, $a\mapsto q^r$,
$e_1\mapsto q^{d+(d-1)n}$, $e_i\mapsto q^{r+(d-i+1)n}$, $n_1=0$,
$n_i\mapsto (n+r-d)/d$, $2\leqslant i\leqslant d$, $N=((d-1)n-r)/d$, case
of Lemma~\ref{lem:ms=0}.
\end{proof}
\section{Proof of Theorem \ref{thm:2}}\lambdabel{sec:thm2}
We first give the following result, which is a generalization of \cite[Lemma 3.1]{GS}.
\begin{lemma}\lambdabel{lem:2.1}
Let $d$, $m$ and $n$ be positive integers with $m\leqslant n-1$ and $dm\equiv -1\pmod{n}$.
Then, for $0\leqslant k\leqslant m$, we have
\begin{equation*}
\frac{(aq;q^d)_{m-k}}{(q^d/a;q^d)_{m-k}}
\equiv (-a)^{m-2k}\frac{(aq;q^d)_k}{(q^d/a;q^d)_k} q^{m(dm-d+2)/2+(d-1)k}
\pmod{\Phi_n(q)}.
\end{equation*}
\end{lemma}
\begin{proof}In view of $q^n\equiv 1\pmod{\Phi_n(q)}$, we have
\begin{align}\lambdabel{aqcong}
\frac{(aq;q^d)_{m} }{(q^d/a;q^d)_{m}}
&=\frac{(1-aq)(1-aq^{d+1})\cdots (1-aq^{dm-d+1})}
{(1-q^d/a)(1-q^{2d}/a)\cdots (1-q^{dm}/a)} \notag\\[5pt]
&\equiv \frac{(1-aq)(1-aq^{d+1})\cdots (1-aq^{dm-d+1})}
{(1-q^{d-dm-1}/a)(1-q^{2d-dm-1}/a)\cdots (1-q^{-1}/a)}\notag\\[5pt]
&=(-a)^{m}q^{m(dm-d+2)/2} \pmod{\Phi_n(q)}.
\end{align}
Furthermore, modulo $\Phi_n(q)$, we get
\begin{align*}
\frac{(aq;q^d)_{m-k}}{(q^d/a;q^d)_{m-k}}
&=\frac{(aq;q^d)_{m}}{(q^d/a;q^d)_{m}}
\frac{(1-q^{dm-dk+d}/a)(1-q^{dm-dk+2d}/a)\cdots (1-q^{dm}/a)}
{(1-aq^{dm-dk+1})(1-aq^{dm-dk+d+1})\cdots (1-aq^{dm-d+1})}
\\[5pt]
&\equiv \frac{(aq;q^d)_{m}}{(q^d/a;q^d)_{m}}
\frac{(1-q^{d-dk-1}/a)(1-q^{2d-dk-1}/a)\cdots (1-q^{-1}/a)}
{(1-aq^{-dk})(1-aq^{d-dk})\cdots (1-aq^{-d})} \\[5pt]
&=\frac{(aq;q^d)_{m}}{(q^d/a;q^d)_{m}} \frac{(aq;q^d)_k}{(q^d/a;q^d)_k} a^{-2k} q^{(d-1)k},
\end{align*}
which together with \eqref{aqcong} establishes the assertion.
\end{proof}
Similarly, we have the following $q$-congruence.
\begin{lemma}\lambdabel{lem:2.2}
Let $d$, $m$ and $n$ be positive integers with $m\leqslant n-1$ and $dm\equiv 1\pmod{n}$.
Then, for $0\leqslant k\leqslant m$, we have
\begin{equation*}
\frac{(aq^{-1};q^d)_{m-k}}{(q^d/a;q^d)_{m-k}}
\equiv (-a)^{m-2k}\frac{(aq^{-1};q^d)_k}{(q^d/a;q^d)_k}
q^{m(dm-d-2)/2+(d+1)k}\pmod{\Phi_n(q)}.
\end{equation*}
\end{lemma}
The proof of Lemma~\ref{lem:2.2} is completely analogous to that of
Lemma~\ref{lem:2.1} and thus omitted.
\begin{proof}[Proof of Theorem \ref{thm:2}]
Since $\gcd(d,n)=1$, there exists a positive integer $m\leqslant n-1$ such that
$dm\equiv -1\pmod{n}$.
By the $a=1$ case of Lemma~\ref{lem:2.1} one sees that,
for $0\leqslant k\leqslant m$,
the $k$-th and $(m-k)$-th terms on the left-hand side of \eqref{eq:first-1}
cancel each other modulo $\Phi_n(q)$, i.e.,
\begin{align*}
[2d(m-k)+1]
\frac{(q;q^d)_{m-k}^{2d}}{(q^d;q^d)_{m-k}^{2d}}q^{d(d-2)(m-k)}
\equiv -[2dk+1]
\frac{(q;q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d(d-2)k}
\pmod{\Phi_n(q)}.
\end{align*}
This proves that
\begin{equation}
\sum_{k=0}^{m}[2dk+1]
\frac{(q;q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d(d-2)k} \equiv 0 \pmod{\Phi_n(q)}. \lambdabel{eq:symmetry-1}
\end{equation}
Moreover, since $dm\equiv -1\pmod{n}$, the expression $(q;q^d)_k$ contains a factor of the form $1-q^{\alpha n}$ for $m< k\leqslant n-1$, and is therefore
congruent to $0$ modulo $\Phi_n(q)$. At the same time the expression $(q^d;q^d)_k$ is relatively prime to $\Phi_n(q)$ for $m< k\leqslant n-1$.
Therefore, each summand in \eqref{eq:first-1} with $k$ in the range $m< k\leqslant n-1$ is congruent to $0$ modulo $\Phi_n(q)$.
This together with \eqref{eq:symmetry-1} establishes the $q$-congruence \eqref{eq:first-1}.
Similarly, we can use Lemma~\ref{lem:2.2} to prove \eqref{eq:first-2}.
The proof of the theorem is complete.
\end{proof}
\section{Proof of Conjectures~\ref{conj-1} and \ref{conj-2}}\lambdabel{sec:conjectures}
As mentioned in the introduction, we only need to show that Conjectures~\ref{conj-1} and \ref{conj-2} are also true modulo $[n]$.
We first give a detailed proof of the $q$-congruences modulo $[n]$ in Conjecture~\ref{conj-1}.
\begin{proof}[Proof of Conjecture~\ref{conj-1}]
We need to show that
\begin{align}
\sum_{k=0}^{((d-1)n-1)/d}[2dk+1]
\frac{(q;q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d(d-2)k} \equiv 0 \pmod{[n]}, \lambdabel{conj1-a}\\
\intertext{and}
\sum_{k=0}^{n-1}[2dk+1]
\frac{(q;q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d(d-2)k} \equiv 0 \pmod{[n]}. \lambdabel{conj1-b}
\end{align}
Let $\zeta\ne1$ be an $n$-th root of unity, not
necessarily primitive. Clearly, $\zeta$ is a primitive root of unity
of degree $s$ with $s\mid n$ and $s>1$. Let $c_q(k)$ denote the $k$-th term on the
left-hand side of \eqref{conj1-a} or \eqref{conj1-b}, i.e.,
\begin{equation*}
c_q(k)=[2dk+1]
\frac{(q;q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d(d-2)k}.
\end{equation*}
The $q$-congruences \eqref{eq:symmetry-1} and \eqref{eq:first-1} with
$n\mapsto s$ imply that
\begin{equation*}
\sum_{k=0}^{m}c_\zeta(k)=\sum_{k=0}^{s-1}c_\zeta(k)=0,
\end{equation*}
where $dm\equiv -1\pmod{s}$ and $1\leqslant m\leqslant s-1$.
Observing that
\begin{equation}
\frac{c_\zeta(\ell s+k)}{c_\zeta(\ell s)}
=\lim_{q\to\zeta}\frac{c_q(\ell s+k)}{c_q(\ell s)}
=c_\zeta(k), \lambdabel{eq:conj1-aa}
\end{equation}
we have
\begin{equation}
\sum_{k=0}^{n-1}c_\zeta(k)=\sum_{\ell=0}^{n/s-1}
\sum_{k=0}^{s-1}c_\zeta(\ell s+k)
=\sum_{\ell=0}^{n/s-1}c_\zeta(\ell s) \sum_{k=0}^{s-1}c_\zeta(k)=0, \lambdabel{eq:conj1-bb}
\end{equation}
and
\begin{equation*}
\sum_{k=0}^{((d-1)n-1)/d}c_\zeta(k)
=\sum_{\ell=0}^{N-1} c_\zeta(\ell s)
\sum_{k=0}^{s-1}c_\zeta(k)+c_\zeta(N s)\sum_{k=0}^{m}c_\zeta(k)=0,
\end{equation*}
where
$$
N=\frac{(d-1)n-dm-1}{ds}.
$$
(It is easy to check that $N$ is a positive integer.)
This means that the sums $\sum_{k=0}^{n-1}c_q(k)$ and $\sum_{k=0}^{((d-1)n-1)/d}c_q(k)$
are both divisible by the cyclotomic polynomial $\Phi_s(q)$.
Since this is true for any divisor $s>1$ of $n$, we deduce that they
are divisible by
\begin{equation*}
\prod_{s\mid n,\, s>1}\Phi_s(q)=[n],
\end{equation*}
thus establishing the $q$-congruences \eqref{conj1-a} and \eqref{conj1-b}.
\end{proof}
Similarly, we can prove Conjecture~\ref{conj-2}.
\begin{proof}[Proof of Conjecture~\ref{conj-2}]
This time we need to show that
\begin{align}
\sum_{k=0}^{((d-1)n+1)/d}
[2dk-1]\frac{(q^{-1};q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d^2 k} \equiv 0 \pmod{[n]}, \lambdabel{conj2-a}\\
\intertext{and}
\sum_{k=0}^{n-1}
[2dk-1]\frac{(q^{-1};q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d^2 k} \equiv 0 \pmod{[n]}. \lambdabel{conj2-b}
\end{align}
Again, let $\zeta$ be a primitive root of unity
of degree $s$ with $s\mid n$ and $s>1$, and let
\begin{equation*}
c_q(k)=
[2dk-1]\frac{(q^{-1};q^d)_k^{2d}}{(q^d;q^d)_k^{2d}}q^{d^2 k}.
\end{equation*}
Just like before, we have
\begin{equation*}
\sum_{k=0}^{m}c_\zeta(k)=\sum_{k=0}^{s-1}c_\zeta(k)=0,
\end{equation*}
where $dm\equiv 1\pmod{s}$ and $1\leqslant m\leqslant s-1$.
Furthermore, we also have \eqref{eq:conj1-aa}, \eqref{eq:conj1-bb}, and
\begin{equation*}
\sum_{k=0}^{((d-1)n+1)/d}c_\zeta(k)
=\sum_{\ell=0}^{N-1} c_\zeta(\ell s)
\sum_{k=0}^{s-1}c_\zeta(k)+c_\zeta(N s)\sum_{k=0}^{m}c_\zeta(k)=0,
\end{equation*}
where
$
N=\frac{(d-1)n-dm+1}{ds}
$
this time. The rest is exactly the same as in the proof of Conjecture~\ref{conj-1} and
is omitted here.
\end{proof}
\section{An open problem}\lambdabel{sec:final}
Recently, the first author \cite[Theorem 5.4]{Guo-m3} proved that
\begin{align}
\sum_{k=0}^{M}[4k-1]_{q^2}[4k-1]^2\frac{(q^{-2};q^4)_k^4}{(q^4;q^4)_k^4}q^{4k}
\equiv 0 \pmod{[n]_{q^2}\Phi_n(q^2)^2},\lambdabel{last-1}
\end{align}
where $n$ is odd and $M=(n+1)/2$ or $n-1$. We take this opportunity to
propose a unified generalization of \cite[Conjectures 6.3 and 6.4]{Guo-m3},
involving a remarkable $q$-hypergeometric congruence modulo the fifth
power of a cyclotomic polynomial:
\begin{conjecture}
Let $n>1$ be an odd integer. Then
\begin{align*}
\sum_{k=0}^{M}[4k-1]_{q^2}[4k-1]^2\frac{(q^{-2};q^4)_k^4}{(q^4;q^4)_k^4}q^{4k}
\equiv (2q+2q^{-1}-1)[n]_{q^2}^4 \pmod{[n]_{q^2}^4\Phi_n(q^2)},
\end{align*}
where $M=(n+1)/2$ or $n-1$.
\end{conjecture}
\end{document}
|
\begin{equation}gin{document}
\title [ The addition and multiplication theorems]
{On the addition and multiplication theorems}
\author{K. A. Makarov}
\address{Department of Mathematics, University of Missouri, Columbia, MO 63211, USA}
\email{[email protected]}
\author{E. Tsekanovski\u{i} }
\address{
Department of Mathematics, Niagara University, P.O. Box 2044,
NY 14109, USA }
\email{[email protected]}
\dedicatory
{Dedicated with great pleasure to Lev Aronovich Sakhnovich
on the occasion of his 80th birthday anniversary}
\mathfrak{su}bjclass[2010]{Primary: 81Q10, Secondary: 35P20, 47N50}
\keywords{Deficiency indices, quasi-self-adjoint extensions,
Liv\v{s}ic functions, characteristic functions}
\begin{equation}gin{abstract} We discuss the classes $\mathfrak{C}$, $\mathfrak{M}$, and $\mathfrak{S}$ of analytic functions that can be realized as the Liv\v{s}ic characteristic functions of a symmetric densely defined operator $\dot A$ with deficiency indices $(1,1)$, the Weyl-Titchmarsh functions associated with the pair $(\dot A, A)$ where $A$ is a self-adjoint extension of $\dot A$, and the characteristic function of a maximal dissipative extension $\widehat A$ of $\dot A$, respectively. We show that the class $\mathfrak{M}$ is a convex set, both of the classes $\mathfrak{S}$ and $\mathfrak{C}$ are
closed under multiplication and, moreover, $\mathfrak{C}\mathfrak{su}bset \mathfrak{S}$ is a double sided ideal in the sense that $\mathfrak{S}\cdot \mathfrak{C}=\mathfrak{C}\cdot \mathfrak{S}\mathfrak{su}bset \mathfrak{S}$.
The goal of this paper is to obtain these analytic results by providing explicit constructions for the corresponding operator realizations.
In particular, we introduce the concept of an operator coupling of two unbounded
maximal dissipative operators and establish an analog
of the Liv\v{s}ic-Potapov multiplication theorem \cite{LP} for the operators associated with the function classes $\mathfrak{C}$
and $\mathfrak{S}$. We also establish that the modulus of the von Neumann parameter characterizing the domain of $\widehat A$ is a multiplicative functional with respect to the operator coupling.
\end{abstract}
\maketitle
\section{introduction }
In 1946, M. Liv\v{s}ic \cite{L}
introduced fundamental concepts of the {\it characteristic functions} of a densely defined symmetric operator $\dot A$ with deficiency indices $(1, 1)$, and of its maximal non-self-adjoint extension $\widehat A$. Under the hypothesis that the symmetric operator $\dot A$ is prime\footnote{Recall that a closed symmetric operator $\dot A$ is called a prime operator if $\dot A$ does not have invariant subspaces where the corresponding restriction of $\dot A$ is self-adjoint},
a cornerstone result \cite[Theorem 13]{L} (also see \cite{AkG} and \cite{AWT}) states that the characteristic function (modulo inessential constant unimodular factor) determines the operator up to unitary equivalence.
In 1965, in an attempt to characterize self-adjoint extensions $A$ of a symmetric operator $\dot A$,
Donoghue \cite{D} introduced the Weyl-Titchmarsh function associated with the pair $(\dot A, A)$ and showed that
the Weyl-Titchmarsh function determines the pair $(\dot A, A)$ up to unitary equivalence whenever $\dot A$ is a prime symmetric operator with deficiency indices $(1,1)$.
In our recent paper \cite{MT}, we introduced into play an auxiliary self-adjoint (reference)
extension $A$ of $\dot A$ and
suggested to define
the characteristic functions of a symmetric operator and of its dissipative extension as the functions associated with the pairs $(\dot A, A)$ and $(\widehat A, A)$, rather than with the single operators $\dot A$ and $\widehat A$, respectively. Honoring M. Liv\v{s}ic's fundamental contributions to the theory of non-self-adjoint operators and also taking into account the crucial role that the characteristic function of a symmetric operator plays in the theory, we suggested to
call the characteristic function associated with the pair $(\dot A, A)$ the Liv\v{s}ic function.
For a detailed treatment of the aforementioned concepts of the Liv\v{s}ic, Weyl-Titchmarsh, and the characteristic functions including the discussion of their interrelations we refer to \cite{MT}.
The main goal of this paper is to obtain the following two principal results.
Our first result states that given two
Weyl-Titchmarsh functions $M_1=M(\dot A_1, A_1)$ and $M_2=M(\dot A_2, A_2)$,
any convex combination
$pM_1+qM_2$ can also be realized as the Weyl-Titchmarsh function associated with a pair $(\dot A, A_1\oplus A_2)$, where
$\dot A$ stands for some special
symmetric extension with deficiency indices $(1,1)$ of the direct orthogonal sum of $\dot A_1$ and $\dot A_2$
(see Theorem \ref{additionth}).
Our second result concerns the computation of the characteristic function
of an operator coupling $\widehat A=\widehat A_1\uplus \widehat A_2$ of two dissipative operators $\widehat A_1$ and $ \widehat A_2$, acting in the Hilbert spaces ${\mathcal H}_1$ and ${\mathcal H}_2$, defined as
a dissipative extension of $\widehat A_1$ outgoing from the Hilbert space ${\mathcal H}_1$ to the direct sum of the Hilbert space ${\mathcal H}_1\oplus {\mathcal H}_2$ satisfying the constraint
$$
\widehat A|_{\mathrm{Dom}(\widehat A)\cap \mathrm{Dom}((\widehat A)^*)}\mathfrak{su}bset \widehat A_1\oplus (\widehat A_2)^*.
$$
This result, called the multiplication theorem (see Theorem \ref{opcoup}),
states that the product $S_1\cdot S_2$ of the characteristic functions $S_1$ and $S_2$ associated with the pairs $(\widehat A_1, A_1)$ and
$(\widehat A_2, A_2)$ coincides with the
characteristic function of the operator coupling $\widehat A=\widehat A_1\uplus \widehat A_2$ relative to an appropriate reference self-adjoint operator.
It is important to mention that the multiplication theorem substantially relies on the multiplicativity of the absolute value $\widehat \kappa (\cdot)$ of the von Neumann extension parameter
of a maximal dissipative extension of $\dot A$ established in Theorem \ref{nachalo}:
\begin{equation}gin{equation}\label{vnkappa}
\widehat \kappa(\widehat A_1\uplus \widehat A_2)=\widehat \kappa(\widehat A_1)\cdot \widehat \kappa(\widehat A_2).
\end{equation}
Introducing the analytic function classes $\mathfrak{C}$ and $\mathfrak{M}$, elements of which
can be realized as the Liv\v{s}ic and Weyl-Titchmarsh functions associated with a pair
$(\dot A, A)$, respectively,
along with the analytic function class $\mathfrak{S}$ consisting of all characteristic functions associated with all possible pairs
$(\widehat A, A)$,
as a corollary of our geometric considerations we obtain that
\begin{equation}gin{itemize}
\item[(i)] The class $\mathfrak{M}$ is a convex set with respect to addition;
\item[(ii)] The class $\mathfrak{S}$ is closed with respect to multiplication,
$\mathfrak{S}\cdot \mathfrak{S}\mathfrak{su}bset \mathfrak{S};
$
\item[(iii)] The subclass $\mathfrak{C}\mathfrak{su}bset \mathfrak{S}$ is a (double sided) ideal\footnote{We borrow this term from the ring theory. However, it worth perhaps mentioning that the function class $\mathfrak{S}$ as an
algebraic structure is not a ring.} in the sense that
$$
\mathfrak{C}\cdot \mathfrak{S}=\mathfrak{S}\cdot \mathfrak{C}\mathfrak{su}bset \mathfrak{C};
$$
\item[(iv)] The class $\mathfrak{C}$ is closed with respect to multiplication:
$\mathfrak{C}\cdot\mathfrak{C}\mathfrak{su}bset \mathfrak{C}.$
\end{itemize}
The closedness of the class $\mathfrak{S}$ under multiplication (ii) is a scalar variant of the multiplication theorem in the unbounded setting.
The multiplication theorem for bounded operators was originally obtained in 1950 by
M.~S.~Liv\v{s}ic and V.~ P.~Potapov \cite{LP}, who in particular
established that the
product of two characteristic matrix-valued functions of bounded operators coincides with the
matrix-valued characteristic function of a bounded
operator. After this the result has been extended
to the case of operator colligations (systems) \cite {ABT}, \cite{Br}, \cite {Brod}, \cite{BL58}, \cite{L2}, \cite{LYa}.
The paper is organized as follows.
In Section 2, we recall the definitions and briefly discuss various properties of the Liv\v{s}ic, Weyl-Titchmarch and the characteristic functions.
In Section 3, we introduce a coupling of two symmetric operators defined as a
symmetric extension with deficiency indices $(1,1)$ of the direct sum of two symmetric operators $\dot A_1$ and $\dot A_2$ acting in the Hilbert spaces ${\mathcal H}_1$ and ${\mathcal H}_2$
and then we explicitly compute the Liv\v{s}ic function of the coupling (see Theorem \ref{technich0}).
In Section 4, we prove the {\it Addition Theorem for the Weyl-Titchmarsh functions} (see Theorem \ref{additionth}).
In Section 5, we develop a variant of the extension theory with constrains, introduce a concept of the operator coupling of two unbounded dissipative operators, discuss its properties,
and prove the {\it Multiplicativity of the von Neumann extension parameter} (see Theorem \ref{nachalo}).
In Section 6, we prove the {\it Multiplication Theorem for the characteristic functions} (see Theorem \ref{opcoup}).
We also illustrate the corresponding geometric constructions by an example of the differentiation operator on a finite interval
(see Example \ref{exam}).
In Appendix $A$, a differentiation operator on a finite interval is treated in detail (also see \cite{AkG} for a related exposition).
\section{Preliminaries}
Throughout this paper we assume the following hypothesis.
\begin{equation}gin{hypothesis}\label{hyp}
Suppose that $\dot A$ is a densely defined symmetric operator $\dot A$ with deficiency indices $(1,1)$ and $A$ its self-adjoint extension.
Assume that the deficiency elements $g_\pm\in \Ker( (\dot A)^*\mp iI)$ are chosen in such a way that $\|g_+\|=\|g_-\|=1$
and that
\begin{equation}gin{equation}\label{ddff}
g_{+}-g_-\in \mathrm{Dom}(A).
\end{equation}
\end{hypothesis}
\mathfrak{su}bsection{The Liv\v{s}ic function and the class $\mathfrak{C}$}
Under Hypothesis \ref{hyp}, introduce the Liv\v{s}ic function $s=s(\dot A, A)$ of the symmetric operator $\dot A$ relative to the self-adjoint extension $A$ by
\begin{equation}gin{equation}\label{charfunk}
s(\dot A, A )(z)=\frac{z-i}{z+i}\cdot \frac{(g_z, g_-)}{(g_z, g_+)}, \quad z\in \mathbb C_+,
\end{equation}
where $g_z$, $z\in \mathbb C_+$, is an arbitrary deficiency element, $0\ne g_z\in \Ker((\dot A)^*-zI )$.
We remark that from the definition it follows that the dependence of
the Liv\v{s}ic function
$s(\dot A, A)$ on the reference (self-adjoint) operator
$A$
reduces to multiplication by a $z$-independent
unimodular factor whenever $A$ changes. That is,
\begin{equation}gin{equation}\label{transs}
s(\dot A, A_\alpha)=e^{-2i\alpha}s(\dot A, A), \quad \alpha \in [0,\pi),
\end{equation}whenever the self-adjoint reference extension $A_\alpha$ of $\dot A$ has the property
\begin{equation}gin{equation}\label{dom1}
g_+-e^{2i\alpha}g_-\in \mathrm{Dom} (A_\alpha).
\end{equation}
Denote by $\mathfrak{C}$
the class of all analytic mappings from $\mathbb C_+$ into the unit disk $\mathbb D$ that can be realized as the Liv\v{s}ic function
associated with some pair $(\dot A, A)$.
The class $\mathfrak{C}$ can be characterized as follows (see \cite{L}). An
analytic mapping $s$ from the upper-half plane into the unit disk
belongs to the class $\mathfrak{C}$, $s\in \mathfrak{C}$, if and only if
\begin{equation}gin{equation}\label{vsea0}
s(i)=0\quad \text{and}\quad \lim_{z\to \infty}
z(s(z)-e^{2i\alpha})=\infty \quad \text{for all} \quad \alpha\in
[0, \pi),
\end{equation}
$$
0< \varepsilon \le \text{arg} (z)\le \pi -\varepsilon.
$$
\mathfrak{su}bsection{The Weyl-Titchmarsh function and the class $\mathfrak{M}$}
Define
the Weyl-Titchmarsh function $M(\dot A, A)$
associated with the pair $(\dot A, A )$ as
\begin{equation}gin{equation}\label{1}
M(\dot A, A)(z)=
\left ((Az+I)(A-zI)^{-1}g_+,g_+\mathrm{i}ght ), \quad z\in \mathbb C_+.
\end{equation}
Denote by $\mathfrak{M}$ the class of all analytic mapping from $\mathbb C_+$ into itself that can be realized as the Weyl-Titchmarsh function $M(\dot A, A)$ associated with a pair $(\dot A, A)$.
As for the characterization of the class $\mathfrak{M}$, we recall that $M\in \mathfrak{M}$ if and only if $M$ admits the representation (see \cite{D}, \cite{FKE}, \cite{GT},
\cite {MT})
\begin{equation}gin{equation}\label{hernev0}
M(z)=\int_\mathbb R \left
(\frac{1}{\lambda-z}-\frac{\lambda}{1+\lambda^2}\mathrm{i}ght )
d\mu,
\end{equation}
where $\mu$ is an infinite Borel measure and
\begin{equation}gin{equation}\label{hernev01}
\int_\mathbb R\frac{d\mu(\lambda)}{1+\lambda^2}=1\,,\quad\text{equivalently,}\quad M(i)=i.
\end{equation}
It is worth mentioning (see, e.g., \cite{MT}) that the Liv\v{s}ic and Weyl-Titchmarsh functions are related by the Cayley transform
\begin{equation}gin{equation}\label{blog}
s(\dot A, A)(z)=\frac{M(\dot A, A)(z)-i}{M(\dot A,A)(z)+i},\quad z\in \mathbb C_+.
\end{equation}
Taking this into account, one can show that the properties \eqref{vsea0} and \eqref{hernev0},
\eqref{hernev01} imply one another (see, e.g., \cite{MT}).
Combining \eqref{transs}, \eqref{dom1} and \eqref{blog} shows that
the corresponding transformation law for the Weyl-Titchmarsh functions reads as (see \cite{D}, \cite{FKE}, \cite{GT})
\begin{equation}gin{equation}\label{transm}
M(\dot A, A_\alpha)=\frac{\cos \alpha \, M(\dot A, A)-\sin \alpha}{
\cos\alpha +\sin \alpha \,M(\dot A, A)},
\quad \alpha \in [0,\pi).
\end{equation}
In view of \eqref{blog}, the function classes $\mathfrak{C}$ and $\mathfrak{M}$ are related by the Cayley transform,
$$\mathfrak{C}=K\circ\mathfrak{M},
$$
where
$$
K(z)=\frac{z-i}{z+i}, \quad z\in \mathbb C.
$$That is,
$$\mathfrak{C}=\{K\circ M\, |\, M\in \mathfrak{M}\},
$$ where $ K\circ M$ denotes the composition of the functions $K$ and $M$.
Moreover, the transformation law \eqref{transs} shows that the class $\mathfrak{C}$ is closed under multiplication by a unimodular constant,
\begin{equation}gin{equation}\label{theta}\theta \cdot \mathfrak{C} = \mathfrak{C},\quad |\theta|=1.
\end{equation}
Accordingly, from \eqref{transm} one concludes that the class $\mathfrak{M}$ is closed under the action of a one parameter
subgroup of $SL(2, \mathbb R)$ of linear-fractional transformations
$$
K_\alpha\circ \mathfrak{M}=\mathfrak{M},
\quad \mathbb R\ni \alpha \mapsto K_\alpha,$$
given by $$
K _\alpha(z)= \frac{\cos \alpha \, z-\sin \alpha}{
\cos\alpha +\sin \alpha \,z} .
$$
\mathfrak{su}bsection{The von Neumann extension parameter of a dissipative operator}
Denote by $\mathfrak{D}$
the set of all maximal dissipative unbounded operators $\widehat A$ such that the restriction $\dot A$ of $\widehat A$ onto $ \mathrm{Dom}(\widehat A)\cap\mathrm{Dom}((\widehat A)^*)$ is a densely defined symmetric operators with indices $(1,1)$.
Given $\widehat A\in \mathfrak{D}$ and a self-adjoint (reference) extension $A$ of the underlying symmetric operator $\dot A=\widehat A|_{\mathrm{Dom}(\widehat A)\cap\mathrm{Dom}((\widehat A)^*)}$, assume that the pair $(\dot A, A)$ satisfies Hypothesis \ref{hyp}
with some $g_\pm$ taken from the corresponding deficiency subspaces,
so that
$$
g_+-g_-\in \mathrm{Dom} ( A).$$
In this case,
\begin{equation}gin{equation}\label{unkappa}
g_+-\kappa g_-\in \mathrm{Dom} ( \widehat A)
\quad \text{for some } \quad \kappa \in \mathbb D.
\end{equation}
\begin{equation}gin{definition}
We call $\kappa=\kappa(\widehat A, A)$ the von Neumanmn extension parameter of the dissipative operator $\widehat A\in \mathfrak{D}$ relative to the reference self-adjoint operator $A$.
\end{definition}
\mathfrak{su}bsection{ The characteristic function of a dissipative operator and the class $\mathfrak{S}$}
Suppose that $\widehat A\in \mathfrak{D}$ is a maximal dissipative operator, $\dot A=\widehat A|_{\mathrm{Dom}(\widehat A)\cap\mathrm{Dom}((\widehat A)^*)}$ its symmetric restriction, and $A$ is a reference self-adjoint extension of $\dot A$.
Following \cite{L} (also see \cite{AkG}, \cite{MT}) we define
the characteristic
function $S=S(\widehat A, A)$ of the dissipative operator $\widehat A$ relative to the reference self-adjoint operator $A$ as
\begin{equation}gin{equation}\label{ch1}
S(z)=\frac{s(z)-\kappa}
{\overline{ \kappa }\,s(z)-1},\quad z\in \mathbb C_+,
\end{equation}
where $s=s(\dot A, A)$ is the Liv\v{s}ic function associated with the pair $(\dot A, A)$ and the complex number $\kappa=\kappa(\widehat A, A)$ is the von Neumann extension parameter of $\widehat A$ (relative to $A$).
We stress that for a dissipative operator $\widehat A\in \mathfrak{D}$ one always has that
\begin{equation}gin{equation}\label{dota}
\mathrm{Dom}(\widehat A)\ne \mathrm{Dom}((\widehat A)^*)
\end{equation}
and, moreover, the underlying densely defined symmetric operator $\dot A$ can uniquely be recovered by restricting $\widehat A$
on $$\mathrm{Dom} (\dot A)=\mathrm{Dom} (\widehat A)\cap \mathrm{Dom}\left ( ( \widehat A)^*\mathrm{i}ght ).$$
This explains why it is more natural to associate the characteristic function with the pair
$(\widehat A, A)$ rather than with the triple $(\dot A, \widehat A, A)$ which would perhaps be more pedantic.
The class of all analytic mapping from $\mathbb C_+$ into the unit disk consisting of all the characteristic functions
$S(\widehat A, A)$ associated with arbitrary pairs $ (\widehat A, A)$, with $\widehat A\in \mathfrak{D}$ and $A$ a reference self-adjoint extension of the underlining symmetric operator $\dot A$, will be denoted by $\mathfrak{S}$.
As in the case of the class $\mathfrak{C}$, the class $\mathfrak{S}$ is also closed under multiplication by a constant unimodular factor (cf. \eqref{theta}), that is,
$$
\theta \cdot \mathfrak{S}=\mathfrak{S},\quad |\theta|=1.
$$
Indeed, if $S\in \mathfrak{S}$, then
$$
S=\frac{s-\kappa}
{\overline{ \kappa }\,s-1} \quad \text{for some } s\in \mathfrak{C}, \,\,\kappa\in \mathbb D.
$$
Therefore, $$
\theta \cdot S=\frac{\theta \cdot s- \theta \kappa}
{\overline{ \theta \kappa }\,\theta \cdot s-1}, \quad |\theta|=1.
$$
Since the class $\mathfrak{C}$ is closed under multiplication by a constant unimodular factor, $\theta\cdot s\in \mathfrak{C}$ and since $|\theta \kappa|<1$,
by definition \eqref{ch1}, the function
$\theta \cdot S$
belongs to the class $\mathfrak{S}$ as well.
Now it is easy to see that the class $\mathfrak{S}$ coincides with the orbit of the class $\mathfrak{C}$ under the action
of the group of automorphisms $\text{Aut} (\mathbb D)$ of the complex unit disk. That is,
\begin{equation}gin{equation}\label{kcirc}
K\circ \mathfrak{C}=\mathfrak{S},
\quad K\in\text{Aut} (\mathbb D).
\end{equation}
In particular, one obtains that
\begin{equation}gin{equation}\label{subsub}
\mathfrak{C}\mathfrak{su}bset \mathfrak{S}.
\end{equation}
From \eqref{kcirc} and \eqref{subsub} follows that the class $\mathfrak{S}$ is closed under the action of the
group $\text{Aut} (\mathbb D)$, that is,
$$
K\circ \mathfrak{S}=\mathfrak{S},
\quad K\in\text{Aut} (\mathbb D). $$
\mathfrak{su}bsection{The unitary invariant $\widehat \kappa:\mathfrak{D}\to [0,1)$}\label{ssylka} Combining \eqref{vsea0} and \eqref{ch1} shows that the value of the von Neumann extension parameter $\kappa (\widehat A, A)$ can also be recognized as the value of the characteristic function at the point $z=i$, that is,
\begin{equation}gin{equation}\label{kappa1}\kappa=\kappa(\widehat A, A)=S(\widehat A, A)(i).
\end{equation}
Since by Liv\v{s}ic theorem \cite[Theorem 13]{L} the characteristic function $S(\widehat A, A)$ determines the pair $(\widehat A, A)$ up to unitary equivalence provided that the underlining symmetric operator $\dot A$ is prime, cf. \cite{MT}, the parameter $\kappa$ is a unitary invariant of the pair $(\widehat A , A)$.
It is important to notice that
the absolute value $\widehat \kappa(\widehat A)=|\kappa(\widehat A, A)|$ of the von Neumann extension parameter
is independent of the choice of the reference self-adjoint extension $A$. Therefore,
the following functional
$$\widehat \kappa :\mathfrak{D}\to [0,1)
$$
of the form
$$ \widehat \kappa=\widehat \kappa(\widehat A)=|\kappa(\widehat A, A)|, \quad \widehat A\in \mathfrak{D},
$$ is well defined as one of the geometric unitary invariants of a dissipative operator from the class $\mathfrak{D}$.
The kernel of the functional $\widehat \kappa$ can be characterized as follows.
The inclusion \eqref{subsub} shows that
any Liv\v{s}ic function $s(\dot A, A)$ can be indentified with the characteristic function $S(\widehat A,A')$ associated with some pair $(\widehat A, A')$ where $\widehat A\in \mathfrak{D}$ and $A'$ is an appropriate self-adjoint reference extension of the symmetric operator $\dot A
=\widehat A|_{\mathrm{Dom}(\widehat A)\cup \mathrm{Dom} ((\widehat A)^*)}$.
To be more specific, it suffices to take
the maximal dissipative extension $\widehat A$ of $\dot A$
with the domain
\begin{equation}gin{equation}\label{vanish}
\mathrm{Dom}(\widehat A)=\mathrm{Dom}(\dot A)\dot +\Ker ((\dot A)^*-iI)
\end{equation}
and to choose the reference self-adjoint operator
$A'$ in such a way that
$$
s(\dot A, A)=-s(\dot A, A').
$$
This is always possible due to \eqref{transs}. Since $\widehat \kappa(\widehat A)=0$ (combine \eqref{unkappa} and \eqref{vanish}), it is easy to see that
$$
S (\widehat A, A')=-s(\dot A, A')=s(\dot A,A)
$$
which proves the claim.
The subclass of maximal dissipative extensions $\widehat A$ with the property \eqref{vanish} will be denoted by $\dot \mathfrak{D}$. That is,
\begin{equation}gin{equation}\label{dotD}
\dot \mathfrak{D}=\{\widehat A\in \mathfrak{D}\,|\, \widehat \kappa(\widehat A)=0\}\mathfrak{su}bset \mathfrak{D},
\end{equation}
and, therefore,
$$
\dot \mathfrak{D}=\Ker ( \widehat \kappa).
$$
\section{Symmetric extensions of the direct sum of symmetric operators}
Suppose that $\dot A_1$ and
$\dot A_2$ are densely defined symmetric operators with deficiency indices $(1,1)$ acting in the Hilbert spaces ${\mathcal H}_1$ and ${\mathcal H}_2, $
respectively.
In accordance with the von Neumann extensions theory,
the set of all symmetric extensions $\dot A$
with deficiency indices $(1,1)$
of the direct sum of the symmetric operators $\dot A_1\oplus\dot A_2$
is in one-to-one correspondence with the set of
one-dimensional {\it neutral}
subspaces ${\mathcal L}$ of the quotient space
$\mathrm{Dom} ((\dot A_1\oplus\dot A_2)^*)\mathfrak{sl}sh
\mathrm{Dom} (\dot A_1\oplus\dot A_2)$
such that the adjoint operator
$(\dot A_1\oplus\dot A_2)^*$ restricted on ${\mathcal L}$ is symmetric,
that is,
$$
{\ensuremath{\mathrm{Im}}} ((\dot A_1\oplus \dot A_2)^*f, f)=0, \quad f\in {\mathcal L}.
$$
The above mentioned correspondence can be established in the following way: given ${\mathcal L}$, the corresponding symmetric operator
$
\dot A
$
is determined by the restriction of
$(\dot A_1\oplus \dot A_2)^*
$ on
$$
\mathrm{Dom}(\dot A)=\mathrm{Dom}(\dot A_1)\oplus \mathrm{Dom} (\dot A_2)\dot +{\mathcal L},
$$ and vice versa.
Our main technical result describes
the geometry of the deficiency subspaces of the
symmetric extensions $\dot A$ associated with a two-parameter family of
neutral subspaces ${\mathcal L}$. We also explicitly obtain the Liv\v{s}ic function of these symmetric extensions $\dot A$ relative to an
appropriate self-adjoint extension of $\dot A$.
\begin{equation}gin{theorem}\label{technich0} Assume that $\dot A_k$, $k=1,2$,
are closed
symmetric operators with deficiency indices $(1,1)$ in the Hilbert spaces
${\mathcal H}_k$, $k=1,2$.
Suppose that
$
g_k\in \Ker ((\dot A_k)^*\mp iI)$, $ \|g_\pm^k\|=1$, $ k=1,2$.
Introduce the one-dimensional
subspace ${\mathcal L}\mathfrak{su}bset {\mathcal H}_1\oplus {\mathcal H}_2$ by
$$
{\mathcal L}
=\mathrm{lin\ span}\left \{ (\sin \alpha g_+^1-\sin \begin{equation}ta g_-^1)
\oplus(\cos \alpha
g_+^2- \cos\begin{equation}ta g_-^2 )
\mathrm{i}ght \},
$$
$$\alpha, \begin{equation}ta\in [0, \pi).
$$
Then \begin{equation}gin{itemize}
\item[(i)]
the linear set ${\mathcal L}$ is a neutral subspace of the quotient space
$$\mathrm{Dom} ((\dot A_1\oplus\dot A_2)^*)\mathfrak{sl}sh
\mathrm{Dom} (\dot A_1\oplus\dot A_2),$$
\item[(ii)] the restriction $\dot A$ of
the operator $(\dot A_1\oplus \dot A_2)^*$
on the domain
$$
\mathrm{Dom}(\dot A)=\mathrm{Dom} (\dot A_1)\oplus \mathrm{Dom} (\dot A_2)\dot +{\mathcal L}
$$
is a symmetric operator with deficiency indices $(1,1)$
and the deficiency subspaces of $\dot A$ are given by
$$
\Ker ((\dot A)^*\mp iI)=\mathrm{lin\ span} \{ G_\pm \},
$$
where
\begin{equation}gin{equation}\label{G+G-}
G_+= \cos \alpha g_+^1-\sin \alpha g_+^2
\,\, \text{ and } \,\,
G_-= \cos \begin{equation}ta g_-^1- \sin \begin{equation}ta g^2_-,
\end{equation}
$$\|G_\pm\|=1;
$$
\item[(iii)] the Liv\v{s}ic function
$s=s(\dot A, A)$ associated with the pair $(\dot A, A)$, where $A$ is a reference self-adjoint extension of $\dot A$
such that
$$G_+-G_-\in \mathrm{Dom}(A),
$$
admits the representation
\begin{equation}gin{equation}\label{formula}
s(z)=\frac{\cos \alpha \cos \begin{equation}ta s_1(z)- s_1(z)s_2(z)
+ \sin \alpha \sin \begin{equation}ta s_2(z)
}
{1 - (\sin \alpha \sin \begin{equation}ta s_1(z)+
\cos \alpha \cos \begin{equation}ta s_2(z) ) },\quad z\in \mathbb C_+.
\end{equation}
Here
$s_k=s(\dot A_k, A_k)
$
are the Liv\v{s}ic functions associated with the pairs $(\dot A_k, A_k)$, $k=1,2.$
\end{itemize}
\end{theorem}
\begin{equation}gin{proof} (i).
First we note that the element $f\in {\mathcal L}\mathfrak{su}bset \mathrm{Dom} (\dot A)$ given by
\begin{equation}gin{equation}\label{stali1}
f=(\sin \alpha g_+^1-\sin \begin{equation}ta g_-^1)
+(\cos \alpha
g_+^2- \cos\begin{equation}ta g_-^2 )
\end{equation}
belongs to $ \mathrm{Dom} ((\dot A_1\oplus \dot A_2)^*)$
and that
\begin{equation}gin{equation}\label{stali2}
(\dot A_1\oplus \dot A_2)^*f=i(\sin \alpha g_+^1+\sin \begin{equation}ta g_-^1
+\cos \alpha
g_+^2+\cos\begin{equation}ta g_-^2 ).
\end{equation}
Combining \eqref{stali1} and \eqref{stali2}, one obtains
\begin{equation}gin{align*}
((\dot A_1\oplus \dot A_2)^*f,f)=&i(\sin^2\alpha-\sin^2\begin{equation}ta+\cos^2\alpha-\cos^2\begin{equation}ta)
\\ \,\, &
+i\sin\alpha \sin \begin{equation}ta( (g^1_-, g^1_+) -(g^1_+, g^1_-))
\\ \,\, &+i
\cos \alpha \cos \begin{equation}ta( (g^2_-, g^2_+) -(g^2_+, g^2_-)).
\end{align*}
Hence,
$$
{\ensuremath{\mathrm{Im}}} ((\dot A_1\oplus \dot A_2)^*f,f)=0
,\quad f\in {\mathcal L},$$
and therefore
$$
{\ensuremath{\mathrm{Im}}} ((\dot A_1\oplus \dot A_2)^*f,f)=0,\quad \text{ for all } \,\,
f\in \mathrm{Dom}(\dot A),
$$
which proves that the operator $\dot A$ is symmetric and $(i)$ follows.
(ii). Let us show that
$$
\Ker ((\dot A)^*-iI)=\mathrm{lin\ span} \{ G_+ \}.
$$
We need to check that
$$
((\dot A+iI)y,G_+ )=0 \quad \text{for all}\quad y\in\mathrm{Dom}(\dot A).
$$
Take a $y\in \mathrm{Dom}(\dot A)$. Then $y$ can be decomposed as
$$
y=h_1+h_2+Cf,
$$
where $h_k\in \mathrm{Dom} (\dot A_k)$, $k=1,2$, $C\in \mathbb C$, and
\begin{equation}gin{equation}\label{fff}
f=(\sin \alpha g_+^1-\sin \begin{equation}ta g_-^1)
\oplus(\cos \alpha
g_+^2- \cos\begin{equation}ta g_-^2 )\in {\mathcal L}.
\end{equation}
Next,
\begin{equation}gin{align}
((\dot A+iI)y,G_+ )&=(\dot A+iI)(h_1+h_2+Cf), G_+)
\label{okm1}\\&=
((\dot A_1+iI)h_1\oplus(\dot A_2+iI)h_2),
G_+)
\nonumbernumber \\&+C((\dot A+iI)f,G_+).\nonumbernumber
\end{align}
On the other hand, since $g_+^k\in \Ker ((\dot A_k)^*-iI)$, $k=1,2,$
\begin{equation}gin{align}
((\dot A_1+iI)h_1\oplus(\dot A_1+iI)h_2),
G_+)&=
\cos \alpha((\dot A_1+iI)h_1,g_+^1)
\label{okm2}\\&-\sin \alpha
((\dot A_2+iI)h_2,g_+^2)=0.\nonumbernumber
\end{align}
Now we can prove that
\begin{equation}gin{equation}\label{okm3}
((\dot A+iI)f,G_+)=0,\quad f\in {\mathcal L}.
\end{equation}
Indeed,
\begin{equation}gin{align*}
((\dot A+iI)f&=((\dot A+iI)((\sin \alpha g_+^1-\sin \begin{equation}ta g_-^1)
+(\cos \alpha
g_+^2- \cos\begin{equation}ta g_-^2 ))\\&=2i(\sin \alpha g_+^1
+\cos \alpha
g_+^2 )
\end{align*}
and since
\begin{equation}gin{equation}\label{g+g+}
G_+= \cos \alpha g_+^1-\sin \alpha g_+^2,
\end{equation}
we have
$$
((\dot A+iI)f,G_+)
=2i(\sin \alpha g_+^1
+\cos \alpha
g_+^2 ),\cos \alpha g_+^1-\sin \alpha g_+^2)=0.
$$
Combining \eqref{okm1}, \eqref{okm2} and \eqref{okm3} proves that
$$
((\dot A+iI)y,G_+ )=0 \quad\text{for all} \quad y\in \mathrm{Dom}(\dot A).
$$
Therefore,
$$
G_+\in \Ker ((\dot A)^*-iI).
$$
In a similar way it follows that $G_-$ given by
\begin{equation}gin{equation}\label{g-g-}G_-= \cos \begin{equation}ta g_-^1- \sin \begin{equation}ta g^2_-
\end{equation}
generates the deficiency subspace $\Ker ((\dot A)^*+iI)$.
Since $\|g_\pm^{1}\|=\|g_\pm^2\|=1$ and the elements $g_\pm^1$ and $g_\pm^2$ are orthogonal to each other,
\eqref{g+g+}
and \eqref{g-g-} yield
\begin{equation}gin{equation}\label{normm} \|G_\pm \|=1.
\end{equation}
(iii). In order to evaluate the Liv\v{s}ic function associated with the pair $(\dot A, A)$,
choose nontrivial elements $g_z^k\in \Ker ( (\dot A_k)^*-zI)$, $k=1,2$, $z\in \mathbb C_+$.
Suppose that for $z\in \mathbb C_+$ an element $G_z\ne 0$ belongs to the deficiency subspace $\Ker ( (\dot A)^*-zI)$.
Since $\dot A \mathfrak{su}bset (\dot A_1\oplus \dot A_2)^*$, one gets that
$$
G_z=g_z^1+T(z)g_z^2\in \Ker ( (\dot A_1\oplus \dot A_2)^*-zI)
$$
for some function $T(z)$ (to be determined later).
Therefore, the Liv\v{s}ic function $s(z)=s(\dot A, A)(z)$ associated with the pair $(\dot A, A)$ admits the representation
\begin{equation}gin{align}
s(z)&=\frac{z-i}{z+i}\cdot \frac{(G_z, G_-)}{(G_z, G_+)}
=\frac{z-i}{z+i}\cdot \frac{(g_z^1+ T(z)g_z^2, \cos\begin{equation}ta g_-^1- \sin \begin{equation}ta g^2_-)}
{(g_z^1+ T(z)g_z^2, \cos \alpha g_+^1-\sin \alpha g_+^2) }
\label{inis}\\
&=\frac{z-i}{z+i}\cdot
\frac{ \cos \begin{equation}ta (g^1_z, g^1_-)-T(z)\sin \begin{equation}ta (g_z^2, g_-^2)
}
{ \cos \alpha (g^1_z, g^1_+)-T(z) \sin \alpha ( g_z^2, g_+^2)}.\nonumbernumber
\end{align}
Since $G_z\in \Ker ( (\dot A)^*-zI)$ implies that
\begin{equation}gin{equation}\label{G_Z}
(G_z, (\dot A-\overline{z}I)f)=0,
\end{equation}
where the element $f\in {\mathcal L}$ is given by \eqref{fff},
the equation \eqref{G_Z} yields the following equation for determining the function $T(z)$:
\begin{equation}gin{equation}\label{texn}
\left (g_z^1+T(z)g_z^2, (\dot A-\overline{z}I)\left (
(\sin \alpha g_+^1-\sin \begin{equation}ta g_-^1) \oplus
(\cos \alpha g_+^2- \cos \begin{equation}ta g_-^2)\mathrm{i}ght ) \mathrm{i}ght )=0.
\end{equation}
Since
$$\dot A \mathfrak{su}bset (\dot A_1\oplus \dot A_2)^*\quad \text{and}\quad
(\dot A_k)^* g^k_\pm =\pm i g^k_\pm, \quad k=1,2,
$$
from \eqref{texn} one gets that
\begin{equation}gin{align*}
&(-i-z) \sin \alpha (g_z^1, g_+^1)-(i-z) \sin \begin{equation}ta(g_z^1, g_-^1)
\\
&+T(z)\left [(-i-z)\cos
\alpha (g_z^2, g_+^2) - (i-z)\cos \alpha (g_z^2, g_-^2)\mathrm{i}ght ]=0.
\end{align*}
Solving for $T(z)$, we have
\begin{equation}gin{align}
T(z)&=- \frac{(-i-z)\sin \alpha
(g_z^1, g_+^1)-(i-z)
\sin \begin{equation}ta (g_z^1, g_-^1)}{(-i-z)
\cos \alpha (g_z^2, g_+^2) - (i-z)\cos \begin{equation}ta (g_z^2, g_-^2)}
\label{TTT}
\\
&
=- \frac{(g_z^1, g_+^1)}{(g_z^2, g_+^2)}\cdot
\frac{
\sin \alpha -\sin \begin{equation}ta \frac{z-i}{z+i}\cdot
\frac{ (g_z^1, g_-^1)}{(g_z^1, g_+^1)}}
{\cos \alpha
-\cos \begin{equation}ta\frac{z-i}{z+i}\cdot
\frac{ (g_z^2, g_-^2)}{(g_z^2, g_+^2)}}
\nonumbernumber \\&
=- \frac{(g_z^1, g_+^1)}{(g_z^2, g_+^2)}\cdot
\frac{
\sin \alpha -\sin \begin{equation}ta s_1(z)}
{\cos \alpha
-\cos \begin{equation}ta s_2(z)}. \nonumbernumber
\end{align}
Therefore, taking into account \eqref{inis} and \eqref{TTT}, one arrives at the repsentation
\begin{equation}gin{align*}
s(z)&=\frac{z-i}{z+i}\cdot
\frac{ \cos \begin{equation}ta (g^1_z, g^1_-)+ \frac{(g_z^1, g_+^1)}{(g_z^2, g_+^2)}\cdot
\frac{
\sin \alpha -\sin \begin{equation}ta s_1(z)}
{\cos \alpha
-\cos \begin{equation}ta s_2(z)}\sin \begin{equation}ta (g_z^2, g_-^2)
}
{ \cos \alpha (g^1_z, g^1_+)+ \frac{(g_z^1, g_+^1)}{(g_z^2, g_+^2)}\cdot
\frac{
\sin \alpha -\sin \begin{equation}ta s_1(z)}
{\cos \alpha
-\cos \begin{equation}ta s_2(z)}\sin \alpha ( g_z^2, g_+^2)}
\end{align*}
which, after a direct computation, yields \eqref{formula}.
The proof is complete.
\end{proof}
\begin{equation}gin{remark}\label{kogdage} A straightforward computation using \eqref{formula}
shows that representation \eqref{formula} is a particular case (for $k=0$) of a more general equality
\begin{equation}gin{equation}\label{formula1}
\frac{s(z)-k}{
ks(z)-1}=\frac{a_1 s_1(z)+a_2 s_2(z)-
s_1(z)s_2(z)
-k}
{a_2 s_1(z)+a_1 s_2(z)-ks_1(z)s_2(z)-
1},\quad k\in [0,1).
\end{equation}
Here
$$
a_1=\cos \alpha \cos \begin{equation}ta +k\sin \alpha \sin \begin{equation}ta,
$$
$$
a_2=\sin \alpha \sin \begin{equation}ta+k\cos \alpha \cos \begin{equation}ta.
$$
\end{remark}
\section{The addition theorem}
As the first application of Theorem \ref{technich0} we obtain the following addition theorem for the Weyl-Titchmarsh functions.
\begin{equation}gin{theorem}[{\bf The Addition Theorem}]\label{additionth}
Assume the hypotheses of Theorem \ref{technich0} with $\alpha=\begin{equation}ta.$
Suppose that $\dot A$ is the symmetric operator referred
to in Theorem
\ref{technich0}.
Then
the Weyl-Titchmarsh function $M$ associated with the pair
$(\dot A, A_1\oplus A_2)$
is a convex combination
of the Weyl-Titchmarsh functions $M_k$
associated with the pairs $(\dot A_k, A_k)$, $k=1,2$, which is given by
\begin{equation}gin{equation}\label{vypvyp}
M (z)=\cos^2 \alpha \,\,M_1(z)+\sin^2 \alpha \,\,M_2(z),
\quad z\in \mathbb C_+.
\end{equation}
\end{theorem}
\begin{equation}gin{proof} Since by hypothesis $\alpha=\begin{equation}ta$,
one concludes that
$$
G_+-G_-\in \mathrm{Dom} (A_1\oplus A_2),
$$
where $G_\pm$ are the deficiency elements of $\dot A$ from
Theorem \ref{technich0} given by \eqref{G+G-}.
So, one can apply
Theorem \ref{technich0} with the self-adjoint reference operator
$
A=A_1\oplus A_2
$ to conclude that
$$
s (z)=
\frac{\cos^2 \alpha (z)-s_1(z)s_2(z)
+ \sin^2 \alpha \, s_2(z)
}
{1 - (\sin^2 \alpha s_1(z)+
\cos^2 \alpha s_2(z) ) },
$$
where
$$
s(z)=\frac{M(z)-i}{M (z)+i}\quad \text{ and }\quad
s_k=\frac{M_k(z)-i}{M_k(z)+i}, \quad k=1,2.
$$
Thus, to prove \eqref{vypvyp} it remains to check the equality
\begin{equation}gin{align*}
&\frac{\cos^2 \alpha \,\,M_1(z)+\sin^2 \alpha \,\,M_2(z)-i}{\cos^2 \alpha \,\,M_1(z)+\sin^2 \alpha \,\,M_2(z)+i}
\\
&=\frac{\cos^2 \alpha \frac{M_1(z)-i}{M_1(z)+i}-
\frac{M_1(z)-i}{M_1(z)+i}\frac{M_2(z)-i}{M_2(z)+i}
+ \sin^2 \alpha \frac{M_2(z)-i}{M_2(z)+i}
}
{1 - \left (\sin^2 \alpha \frac{M_1(z)-i}{M_1(z)+i}+
\cos^2 \alpha \frac{M_2(z)-i}{M_2(z)+i} \mathrm{i}ght ) }
\end{align*}
which can be directly verified.
\end{proof}
\section{An operator coupling of dissipative operators}
We now introduce the concept of the operator coupling of two dissipative unbounded operators.
\begin{equation}gin{definition}
Suppose that
$\widehat A_1\in \mathfrak{D}({\mathcal H}_1)$ and $\widehat A_2\in \mathfrak{D}({\mathcal H}_2)$ are maximal dissipative unbounded operators acting in the Hilbert spaces ${\mathcal H}_1$ and ${\mathcal H}_2$, respectively.
We say that a maximal dissipative operator $\widehat A\in \mathfrak{D}({\mathcal H}_1\oplus{\mathcal H}_2)$ is an operator coupling of $\widehat A_1$ and $\widehat A_2$,
in writing, $$\widehat A=\widehat A_1\uplus \widehat A_2,$$ if
\begin{equation}gin{itemize}
\item[(i)] the Hilbert space ${\mathcal H}_1$ is invariant for $\widehat A_1$ and
the restriction of $\widehat A$ on ${\mathcal H}_1$ coincides with the dissipative operator $\widehat A_1$, that is,
$$
\mathrm{Dom}(\widehat A)\cap {\mathcal H}_1=\mathrm{Dom} (\widehat A_1),
$$
$$
\widehat A|_{{\mathcal H}_1\cap \mathrm{Dom}({\widehat A_1)}}=\widehat A_1,
$$
\end{itemize}
and
\begin{equation}gin{itemize}
\item[(ii)]the symmetric operator $\dot A= \widehat A|_{\mathrm{Dom}(\widehat A)\cap \mathrm{Dom} ((\widehat A)^*)}$ has the property
$$
\dot A\mathfrak{su}bset \widehat A_1\oplus (\widehat A_2)^*.
$$
\end{itemize}
\end{definition}
To justify the existence of an operator coupling of two dissipative operators and discuss properties of the concept
we proceed with preliminary considerations.
Assume the following hypothesis.
\begin{equation}gin{hypothesis}\label{hyphyp} Suppose that
$\widehat A_1\in \mathfrak{D}({\mathcal H}_1)$ and $\widehat A_2\in \mathfrak{D}({\mathcal H}_2)$ are maximal dissipative unbounded operators acting in the Hilbert spaces ${\mathcal H}_1$ and ${\mathcal H}_2$, respectively.
Assume, in addition, that
$$
\dot A_j=\widehat A_j|_{\mathrm{Dom}(\widehat A_j)\cap\mathrm{Dom}((\widehat A_j)^*)},\quad j=1,2,
$$
are the corresponding underlying symmetric operators.
\end{hypothesis}
First we show that under Hypothesis \ref{hyphyp} the following
{\bf extension problem with a constraint} admits a one-parameter family of solutions.
This problem is:
\begin{equation}gin{itemize}
\item[]
{\it Find a closed symmetric operator $\dot A$ with deficiency indices $(1,1)$ such that }
\begin{equation}gin{equation}\label{EPC}
\dot A_1\oplus \dot A_2\mathfrak{su}bset \dot A
\quad \text{and }\quad
\dot A\mathfrak{su}bset \widehat A_1\oplus (\widehat A_2)^*
\end{equation}
\end{itemize}
The lemma below justifies the solvability of the extension problem with a constraint.
\begin{equation}gin{lemma}\label{dlinnaia}
Assume Hypothesis \ref{hyphyp}. Then
\begin{equation}gin{itemize}
\item[(i)] there exists a one parameter family $[0, 2\pi)\ni \theta\mapsto \dot A_\theta$ of symmetric restrictions with deficiency indices $(1,1)$ of the operator $(\dot A_1\oplus\dot A_2)^*$ such that
$$
\dot A_1\oplus \dot A_2\mathfrak{su}bset \dot A_\theta\mathfrak{su}bset \widehat A_1\oplus (\widehat A_2)^*,\quad \theta\in [0,2\pi);
$$
\item[(ii)] if $\dot A$ is a closed symmetric operator with deficiency indices $(1,1)$ such that
$$
\dot A_1\oplus \dot A_2\mathfrak{su}bset \dot A\mathfrak{su}bset \widehat A_1\oplus (\widehat A_2)^*,
$$
then there exists a $\theta\in [0, 2\pi)$ such that
$$\dot A=\dot A_\theta.
$$
\end{itemize}
\end{lemma}
\begin{equation}gin{proof} First, introduce the notation.
Let $\kappa_j$, $ 0\le \kappa_j<1$, $j=1,2, $ stand for the absolute value of the von Neumann parameter of $\widehat A_j$,
$$\kappa_j=\widehat \kappa (\widehat A_j), \quad j=1,2.
$$
Fix a basis
$g_\pm^j \in \Ker ((\dot A_j)^*\mp iI)$, $\|g_\pm^j\|=1$, $j=1,2$, in the corresponding deficiency subspaces
such that
$$
g_+^j-\kappa_jg_-^j\in \mathrm{Dom} (\widehat A_j),
\quad j=1,2.
$$
(i). To show that there exists at least one symmetric extensions $\dot A_0$ with deficiency indices $(1,1)$ of $\dot A_1\oplus\dot A_2$
such that
$$
\dot A_0\mathfrak{su}bset \widehat A_1\oplus (\widehat A_2)^*,
$$
suppose that $\alpha, \begin{equation}ta \in \big [0, \frac{\pi}{2} \big )$ are chosen in such a way that
\begin{equation}gin{equation}\label{tana1}
\alpha=\begin{equation}gin{cases}
\arctan \frac{1}{\kappa_2}\sqrt{\frac{1-\kappa_2^2}{1-\kappa_1^2}},&\text{if } \kappa_2\ne 0\\
\frac\pi2,&\text{if }\kappa_2=0
\end{cases}
\end{equation}
and
\begin{equation}gin{equation}\label{tana2}
\begin{equation}ta = \begin{equation}gin{cases}
\arctan (\kappa_1\kappa_2 \tan \alpha),&\text{if } \kappa_2\ne 0\\
\frac{\kappa_1}{\sqrt{1-\kappa_1^2}},&\text{if } \kappa_2= 0
\end{cases}.
\end{equation}
By Theorem \ref{technich0} (i),
the one-dimensional subspace
\begin{equation}gin{equation}\label{netral}
{\mathcal L}_0
=\mathrm{lin\ span}\left \{ (\sin \alpha g_+^1-\sin \begin{equation}ta g_-^1)
\oplus(\cos \alpha
g_+^2- \cos\begin{equation}ta g_-^2 )
\mathrm{i}ght \}
\end{equation}
is a neutral subspace of the quotient space
$$\mathrm{Dom} ((\dot A_1\oplus\dot A_2)^*) / \mathrm{Dom} (\dot A_1\oplus\dot A_2).
$$
By Theorem \ref{technich0} (ii), the restriction $\dot A_0$ of
the operator $(\dot A_1\oplus \dot A_2)^*$
on the domain
\begin{equation}gin{equation}\label{vbnm}
\mathrm{Dom}(\dot A_0)=\mathrm{Dom} (\dot A_1)\oplus \mathrm{Dom} (\dot A_2)\dot +{\mathcal L}_0
\end{equation}
is a symmetric operator with deficiency indices $(1,1)$.
Taking into account the relations (see \eqref{tana1}, \eqref{tana2})
$$\sin \begin{equation}ta =\kappa_1\sin\alpha \quad \text{and }\quad \cos \begin{equation}ta=\frac{1}{\kappa_2}\cos \alpha,\quad \kappa_2\ne 0,
$$ and
$$\sin \begin{equation}ta =\kappa_1 \quad \text{and }\quad \cos \begin{equation}ta=\sqrt{1-\kappa_1^2},\quad \kappa_2= 0,$$
from \eqref{netral} one obtains that the subspace ${\mathcal L}_0$ admits the representation
$${\mathcal L}_0=
\begin{equation}gin{cases}
\mathrm{lin\ span}\left \{ \sin \alpha \,\, (g_+^1-\kappa_1 g_-^1)
\oplus\cos \alpha \left (
g_+^2- \frac{1}{\kappa_2}g_-^2 \mathrm{i}ght )\mathrm{i}ght \},&\text{if }\kappa_2\ne 0,
\\
\mathrm{lin\ span}\left \{ (g_+^1-\kappa_1 g_-^1)
\oplus
\left (-\sqrt{1-\kappa_1^2}g_-^2 \mathrm{i}ght )\mathrm{i}ght \},& \text{if } \kappa_2=0.
\end{cases}
$$
It follows that
$${\mathcal L}_0\mathfrak{su}bset \mathrm{Dom} (\widehat A_1\oplus (\widehat A_2)^*).
$$
From \eqref{vbnm} one concludes that the symmetric operator $\dot A_0$ has the property
\begin{equation}gin{equation}\label{111}\dot A_0\mathfrak{su}bset \widehat A_1\oplus (\widehat A_2)^*.
\end{equation}
Clearly, for any $\theta\in [0,2\pi)$ the subspace
\begin{equation}gin{equation}
{\mathcal L}_\theta=
\begin{equation}gin{cases}\label{sravni}
\mathrm{lin\ span}\left \{ e^{i\theta}\sin \alpha \,\, (g_+^1-\kappa_1 g_-^1)
\oplus\cos \alpha \left (
g_+^2- \frac{1}{\kappa_2}g_-^2 \mathrm{i}ght )\mathrm{i}ght \},&\text{if }\kappa_2\ne 0,
\\
\mathrm{lin\ span}\left \{ e^{i\theta} (g_+^1-\kappa_1 g_-^1)
\oplus
\left (-\sqrt{1-\kappa_1^2}g_-^2 \mathrm{i}ght )\mathrm{i}ght \},& \text{if } \kappa_2=0.
\end{cases}
\end{equation}
is also a neutral subspace of the quotient space
$$\mathrm{Dom} ((\dot A_1\oplus\dot A_2)^*) / \mathrm{Dom} (\dot A_1\oplus\dot A_2).
$$
Therefore, the symmetric operator $\dot A_\theta$ defined as the restrictions of $(\dot A_1\oplus\dot A_2)^*$
on
$$\mathrm{Dom}(\dot A_\theta)=\mathrm{Dom} (\dot A_1\oplus \dot A_2)\dot + {\mathcal L}_\theta, \quad \theta\in [0,2\pi),
$$has deficiency indices $(1,1) $ and
$$
(\dot A_1\oplus \dot A_2)\mathfrak{su}bset \dot A_\theta \mathfrak{su}bset (\widehat A_1\oplus (\widehat A_2)^*)\mathfrak{su}bset (\dot A_1\oplus \dot A_2)^*, \quad \theta\in
[0,2\pi),
$$
proving the claim (i).
(ii). Introduce the elements
\begin{equation}gin{equation}\label{eqref1}
f^1=g_+^1-\kappa_1g_-^1\in \mathrm{Dom} (\widehat A_1)
\mathfrak{su}bset {\mathcal H}_1
\end{equation}
and
\begin{equation}gin{equation}\label{eqref2}
f^2=g_+^2-\kappa_2^{-1}g_-^2 \in
\mathrm{Dom} ((\widehat A_2)^*)\mathfrak{su}bset {\mathcal H}_2 \quad (\kappa_2\ne0).
\end{equation}
If $\kappa_2=0$, then we take
\begin{equation}gin{equation}\label{eqref3}
f^2=-\sqrt{1-\kappa_1^2}\,g_-^2 \in
\mathrm{Dom} ((\widehat A_2)^*)\mathfrak{su}bset {\mathcal H}_2.
\end{equation}
A simple computation shows that
\begin{equation}gin{equation}\label{fedor}
{\ensuremath{\mathrm{Im}}} (\widehat A_1 f^1,f^1)=(1-\kappa_1^2)>0\end{equation}
and that
\begin{equation}gin{equation}\label{fedor2}
{\ensuremath{\mathrm{Im}}} ((\widehat A_2)^* f^2,f^2)=
\begin{equation}gin{cases}
1-\kappa_2^{-2},&\text{if } \kappa_2\ne 0\\
\kappa_1^{2}-1,&\text{if } \kappa_2=0
\end{cases}.
\end{equation}
Hence,
$
{\ensuremath{\mathrm{Im}}} ((\widehat A_2)^* f^2,f^2)<0
$.
Therefore, if
$
f=af^1+bf^2$, $a,b\in \mathbb C$,
then
$$
{\ensuremath{\mathrm{Im}}}((\dot A_1\oplus \dot A_2)^*f,f)=
\begin{equation}gin{cases}
|a|^2(1-\kappa_1^2)+|b|^2(1-\kappa_2^{-2}),& \kappa_2\ne0\\
|a|^2(1-\kappa_1^2)-|b|^2(1-\kappa_1^{2}),& \kappa_2=0
\end{cases}.
$$
This means that a one-dimensional subspace
$${\mathcal L} \mathfrak{su}bset\mathrm{lin\ span} \{f^1,f^2\}\mathfrak{su}bset \mathrm{Dom} (\widehat A_1)\oplus \mathrm{Dom}((\widehat A_2)^*)$$
is a neutral (Lagrangian) subspace for the symplectic form
$$\omega(h,g)=((\dot A_1\oplus \dot A_2)^*h,g)-(h,(\dot A_1\oplus \dot A_2)^*g), \quad h,g\in \mathrm{Dom}((\dot A_1\oplus \dot A_2)^*)
$$
if and only if ${\mathcal L}$ admits the representation
\begin{equation}gin{equation}\label{cl}{\mathcal L}=\mathrm{lin\ span}\{e^{i\theta}\sin \alpha f^1\oplus\cos \alpha f^2\} ,
\end{equation}
for some $\theta\in [0,2\pi)$ where
$$\tan \alpha =\frac{\kappa_2^{-2}-1}{1-\kappa_1^2}=\frac{1}{\kappa_2}\sqrt{\frac{1-\kappa_2^2}{1-\kappa_1^2}}$$
if $\kappa_2\ne 0$, and
\begin{equation}gin{equation}\label{cl1}
{\mathcal L}=\mathrm{lin\ span}\{e^{i\theta}f^1\oplus f^2\},
\end{equation}
if $\kappa_2=0$.
Taking into account \eqref{eqref1}--\eqref{eqref3} and comparing \eqref{cl} and \eqref{cl1} with \eqref{sravni}, one concludes that
\begin{equation}gin{equation}\label{vso}
{\mathcal L}={\mathcal L}_\theta.
\end{equation}
By hypothesis (ii), $\dot A$ is a closed symmetric operator with deficiency indices $(1,1)$ and
$$
\dot A_1\oplus \dot A_2\mathfrak{su}bset \dot A\mathfrak{su}bset \widehat A_1\oplus (\widehat A_2)^*.
$$
Therefore, the subspace
$$
\mathrm{Dom}(\dot A)\cap\mathrm{Dom}( \widehat A_1\oplus (\widehat A_2)^*)
$$
is a neutral subspace. Hence, by \eqref{vso},
$$
\mathrm{Dom}(\dot A)=\mathrm{Dom}(\dot A_1\oplus \dot A_2)\dot +{\mathcal L}_\theta \quad\text{for some }\quad \theta\in[0,2\pi)
$$
which means that
$$\dot A=\dot A_\theta
$$
proving the claim (ii).
The proof is complete.
\end{proof}
Our next result, on the one hand, shows that given a solution $\dot A$ of the extension problem with a constraint \eqref{EPC}, there exists a unique
operator coupling $\widehat A_1\uplus\widehat A_2$ of $\widehat A_1$ and
$\widehat A_2$ such that
$$\dot A\mathfrak{su}bset \widehat A_1\uplus\widehat A_2.
$$
On the other hand, this result justifies that
the functional
$$\widehat \kappa :\mathfrak{D}\to [0,1)$$
introduced in subsection \ref{ssylka} is multiplicative with respect to
the operator coupling operation.
\begin{equation}gin{theorem}[{\bf Multiplicativity of the extension parameter}]\label{nachalo}
Assume Hypothesis \ref{hyphyp}.
Suppose, in addition, that $\dot A$ is a solution of the extension problem with a constraint \eqref{EPC}.
Then
\begin{equation}gin{itemize}
\item[(i)] there exists a unique operator coupling $\widehat A=\widehat A_1\uplus \widehat A_2\in \mathfrak{D}({\mathcal H}_1\oplus {\mathcal H}_2)$ such that
$$
\widehat A|_{\mathrm{Dom} (\widehat A)\cap\mathrm{Dom}( \widehat A)^*)}=\dot A;
$$
\item[(ii)] for any operator coupling $\widehat A$ of $\widehat A_1$ and $\widehat A_2$, the multiplication rule
\begin{equation}gin{equation}\label{multkappa1}\widehat \kappa(\widehat A)=\widehat \kappa (\widehat A_1)\cdot \widehat \kappa(\widehat A_2)
\end{equation}
holds.
Here $\widehat \kappa(\cdot )$ stands for the absolute value of the von Neumann parameter of a dissipative operator.
\end{itemize}
\end{theorem}
\begin{equation}gin{proof} (i).
As in the proof of Lemma \ref{dlinnaia}, start with a basis
$g_\pm^j \in \Ker ((\dot A_j)^*\mp iI)$, $\|g_\pm^j\|=1$, $j=1,2$, in the corresponding deficiency subspaces
such that
\begin{equation}gin{equation}\label{bubu}
g_+^j-\kappa_jg_-^j\in \mathrm{Dom} (\widehat A_j),
\quad j=1,2,
\end{equation}
where $\kappa_j$ stands for the absolute value of the von Neumann parameter of $\widehat A_j$,
$$\kappa_j=\widehat \kappa (\widehat A_j), \quad j=1,2.
$$
By Lemma \ref{dlinnaia}, the domain of $\dot A$ admits the representation
$$\mathrm{Dom} (\dot A)=\mathrm{Dom}(\dot A_1\oplus \dot A_2)\dot +{\mathcal L}_\theta,
$$
where
\begin{equation}gin{equation}\label{sravnieche}{\mathcal L}_\theta=
\begin{equation}gin{cases}
\mathrm{lin\ span}\left \{ e^{i\theta}\sin \alpha \,\, (g_+^1-\kappa_1 g_-^1)
\oplus\cos \alpha \left (
g_+^2- \frac{1}{\kappa_2}g_-^2 \mathrm{i}ght )\mathrm{i}ght \},&\text{if } \kappa_2\ne 0
\\
\mathrm{lin\ span}\left \{ e^{i\theta} (g_+^1-\kappa_1 g_-^1)
\oplus
\left (-\sqrt{1-\kappa_1^2}g_-^2 \mathrm{i}ght )\mathrm{i}ght \},& \text{if } \kappa_2=0
\end{cases}
\end{equation}
and
\begin{equation}gin{equation}\label{tana11}
\tan \alpha=\frac{1}{\kappa_2}\sqrt{\frac{1-\kappa_2^2}{1-\kappa_1^2}}, \quad \kappa_2\ne 0.
\end{equation}
Without loss one may assume that $\theta=0$. Indeed, instead of taking the basis $g_\pm^1 \in\Ker ((\dot A_1)^*\mp iI)$, one can start with the basis
$ e^{i\theta}g_\pm^1 \in\Ker ((\dot A_1)^*\mp iI)$ without changing the von Neumann extension parameter $\kappa_1$ that characterizes
the domain of $ \widehat A_1$ (see eq. \eqref{bubu}).
Taking into account the relations $$\sin \begin{equation}ta =\kappa_1\sin\alpha \quad \text{and }\quad \cos \begin{equation}ta=\frac{1}{\kappa_2}\cos \alpha,
\quad \text{if } \kappa_2\ne 0,
$$ and
$$\sin \begin{equation}ta =\kappa_1 \quad \text{and }\quad \cos \begin{equation}ta=\sqrt{1-\kappa_1^2},\quad \text{if } \kappa_2= 0,$$
it is easy to see that
\begin{equation}gin{equation}\label{netral1}
{\mathcal L}_0
=\mathrm{lin\ span}\left \{ (\sin \alpha g_+^1-\sin \begin{equation}ta g_-^1)
\oplus(\cos \alpha
g_+^2- \cos\begin{equation}ta g_-^2 )
\mathrm{i}ght \}.
\end{equation}
In accordance with Theorem \ref{technich0}, introduce the maximal dissipative extension $\widehat A$ of $\dot A$
defined as the restriction of $(\dot A_1\oplus\dot A_2)^*$ on
$$\mathrm{Dom} (\widehat A )=\mathrm{Dom} (\dot A)\dot +\mathrm{lin\ span} \left \{ G_+-\kappa_1\kappa_2 G_-\mathrm{i}ght \},
$$
where the deficiency elements $G_\pm$ of $\dot A$ are given by
\eqref{G+G-}. That is,
\begin{equation}gin{equation}\label{GGG}
G_+= \cos \alpha \, g_+^1-\sin \alpha \,g_+^2,
\end{equation}
$$
G_-= \cos \, \begin{equation}ta g_-^1- \sin \begin{equation}ta \,g^2_-.
$$
By construction,
\begin{equation}gin{equation}\label{222}\dot A=\widehat A|_{\mathrm{Dom} (\widehat A_1)\cap \mathrm{Dom}((\widehat A_2)^*)}.
\end{equation}
Clearly,
\begin{equation}gin{align*}
G_+-\kappa_1\kappa_2 G_-&=(\cos \alpha \, g_+^1 -\kappa_1\kappa_2 \cos \, \begin{equation}ta g_-^1)\oplus (-\sin \alpha \,g_+^2+\kappa_1\kappa_2 \sin \begin{equation}ta \,g^2_-)\\
&=\begin{equation}gin{cases}
\cos \alpha \, ( g_+^1 -\kappa_1 g_-^1)\oplus (- \sin \alpha) \, (g_+^2-k_1^2k_2g_-^2),&\text{if } \kappa_2\ne 0\\
0\oplus (-g_+^2),&\text{if } \kappa_2= 0
\end{cases}.
\end{align*}
Therefore,
$$
\text{Proj}_{{\mathcal H}_1}( G_+-\kappa_1\kappa_2 G_-)\in \mathrm{Dom} (\widehat A_1),
$$
where $\text{Proj}_{{\mathcal H}_1}$ denotes the orthogonal projection of ${\mathcal H}_1\oplus {\mathcal H}_2$ onto ${\mathcal H}_1$. Hence,
the subspace ${\mathcal H}_1$ is invariant for the dissipative operator $\widehat A$ and\begin{equation}gin{equation}\label{333}
\widehat A|_{{\mathcal H}_1\cap \mathrm{Dom}(\widehat A_1)}=\widehat A_1.
\end{equation}
Combining \eqref{111}, \eqref{222} and \eqref{333} shows that the dissipative extension $\widehat A$ is an operator coupling of $\widehat A_1$ and $\widehat A_2$ such that $\dot A\mathfrak{su}bset \widehat A_1\uplus
\widehat A_2
$,
which proves the existence part of the assertion.
To prove the uniqueness of the operator coupling $\widehat A$ extending $\dot A$ and satisfying the property \eqref{333},
one observes that since $\widehat A\in \mathfrak{D}({\mathcal H}_1\oplus{\mathcal H}_2)$, there exists some $|\kappa|<1$ such that
$$
\mathrm{Dom} (\widehat A)=\mathrm{Dom} (\dot A)\dot +\mathrm{lin\ span} \left \{ G_+-\kappa G_-\mathrm{i}ght \}.
$$
In particular,
\begin{equation}gin{equation}\label{means}
G_+-\kappa G_-\in \mathrm{Dom} (\widehat A).
\end{equation}
If $\kappa_2\ne 0$, from \eqref{333} it follows that
\eqref{means} holds if and and only if
$$
\text{Proj}_{{\mathcal H}_1}(G_+-\kappa G_-)=\cos \alpha g_+^1-\kappa \cos \begin{equation}ta g_-^1=\cos \alpha \left (g_+^1-\frac{\kappa}{\kappa_2} g_-^1\mathrm{i}ght )\in \mathrm{Dom} (\widehat A_1)
$$
which is only possible if
$$
\frac{\kappa}{\kappa_2}=\kappa_1.
$$
If $\kappa_2=0$, and therefore in this case $G_+=-g_+^2$ (see \eqref{GGG} with $\alpha=\frac\pi2$),
one computes
$$\text{Proj}_{{\mathcal H}_1}(G_+-\kappa G_-)=-\kappa \cos\begin{equation}ta g_-^1 \in \mathrm{Dom} (\widehat A),
$$
and hence \eqref{333} and \eqref{means} hold if and only if $\kappa=\kappa_2=0$.
In particular, we have shown that in either case
\begin{equation}gin{equation}\label{prodprod}\kappa=\kappa_1\kappa_2.
\end{equation}
(ii).
By definition of the von Neumann parameter associated with a pair of operators, equality \eqref{prodprod} means that
$$
\kappa(\widehat A, A)=\kappa(\widehat A_1, A_1)\cdot \kappa(\widehat A_2, A_2),
$$
where $A$ and $A_j$, $j=1,2$, are self-adjoint reference extensions of $\dot A$ and $\dot A_j$, $j=1,2$, such that
$$G_+-G_-\in \mathrm{Dom} (A)
$$
and
$$
g_+^j-g_-^j\in \mathrm{Dom} (A_j),\quad j=1,2,
$$
which proves the remaining assertion \eqref{multkappa1}.
The proof is complete.
\end{proof}
\section{The multiplication theorem}
Now, we are ready to state the central result of this paper.
\begin{equation}gin{theorem}[{\bf The Mutiplication Theorem}]\label{opcoup}
Suppose that $\widehat A=\widehat A_1\uplus \widehat A_2$ is an operator coupling of two maximal dissipative operators
$\widehat A_k \in \mathfrak{D}({\mathcal H}_k)$, $k=1,2$. Denote by $\dot A $, $\dot A_1$ and $\dot A_2$ the corresponding underlying symmetric operators with deficiency indices $(1,1)$, respectively.
That is, $$\dot A=\widehat A|_{\mathrm{Dom}(\widehat A)\cap\mathrm{Dom}((\widehat A)^*)}
$$
and
$$
\dot A_k=\widehat A_k|_{\mathrm{Dom}(\widehat A_k)\cap\mathrm{Dom}((\widehat A_k)^*)}, \quad k=1,2.
$$
Then there exist self-adjoint reference operators $A$, $A_1$, and $A_2$, extending $\dot A$, $\dot A_1$ and $\dot A_2$, respectively, such that
\begin{equation}gin{equation}\label{proizvas}S(\widehat A_1\uplus \widehat A_2, A)=S(\widehat A_1,A_1)\cdot S(\widehat A_2, A_2).
\end{equation}
\end{theorem}
\begin{equation}gin{proof}
As in the proof of Theorem \ref{nachalo}, one can always find a basis
$$g_\pm^j \in \Ker ((\dot A_j)^*\mp iI),\quad \|g_\pm^j\|=1,\quad j=1,2, $$
such that
$$
g_+^j-\kappa_jg_-^j\in \mathrm{Dom} (\widehat A_j),
\quad \text{with} \quad \kappa_j=\widehat \kappa (\widehat A_j),\quad j=1,2,
$$
and that
$$\mathrm{Dom} (\dot A)=\mathrm{Dom} (\dot A_1\oplus\dot A_2)\dot +{\mathcal L}_0.
$$
Here
$$
{\mathcal L}_0
=\mathrm{lin\ span}\left \{ (\sin \alpha g_+^1-\sin \begin{equation}ta g_-^1)
\oplus(\cos \alpha
g_+^2- \cos\begin{equation}ta g_-^2 )\mathrm{i}ght \}
$$
and
\begin{equation}gin{equation}\label{tana111}
\alpha=\arctan \frac{1}{\kappa_2}\sqrt{\frac{1-\kappa_2^2}{1-\kappa_1^2}}
\quad \left (\alpha=\frac\pi2 \quad \text{if } \quad \kappa_2=0\mathrm{i}ght ),\end{equation}
\begin{equation}gin{equation}\label{sinsin}
\sin \begin{equation}ta =\kappa_1
\begin{equation}gin{cases}
\sin\alpha,&\text{if } \kappa_2\ne0\\
1,&\text{if } \kappa_2=0
\end{cases},
\end{equation}
\begin{equation}gin{equation}\label{coscos}
\cos \begin{equation}ta=
\begin{equation}gin{cases}
\frac{1}{\kappa_2}\cos \alpha,&\text{if } \kappa_2\ne 0\\
\sqrt{1-\kappa_1^2},&\text{if } \kappa_2= 0
\end{cases}.
\end{equation}
By Theorem \ref{technich0}, the deficiency elements $G_\pm$ of $\dot A$ are given by
\eqref{G+G-},
\begin{equation}gin{equation}\label{GGGG}
G_+= \cos \alpha \, g_+^1-\sin \alpha \,g_+^2,
\end{equation}
$$
G_-= \cos \, \begin{equation}ta g_-^1- \sin \begin{equation}ta \,g^2_-.
$$
Introducing self-adjoint reference extensions $A$ and $A_j$, $j=1,2$, of the symmetric operators $\dot A$ and $\dot A_j$, $j=1,2$, such that
$$G_+-G_-\in \mathrm{Dom} (A)\quad \text{
and}\quad
g_+^j-g_-^j\in \mathrm{Dom} (A_j),\quad j=1,2,
$$
one can apply Theorem \ref{technich0} to conclude that the Liv\v{s}ic function of $\dot A$ relative to $A$ admits the representation
\begin{equation}gin{equation}\label{sysy11}
s(z)=s(\dot A, A)(z)=\frac{\cos \alpha \cos \begin{equation}ta s_1(z)-s_1(z)s_2(z)
+ \sin \alpha \sin \begin{equation}ta s_2(z)
}
{1 - (\sin \alpha \sin \begin{equation}ta s_1(z)+
\cos \alpha \cos \begin{equation}ta s_2(z) ) }.
\end{equation}
Here
$$s_k(z)=s(\dot A_k, A_k)
$$
are the Liv\v{s}ic functions associated with the pairs $ (\dot A_k, A_k)$, $ k=1,2$.
Denote the operator coupling $\widehat A_1\uplus\widehat A_2$ by $\widehat A$.
By Theorem \ref{nachalo},
\begin{equation}gin{equation}\label{nuii}
G_+-\kappa_1\kappa_2G_-\in \mathrm{Dom} (\widehat A).
\end{equation}
Therefore, from \eqref{nuii} it follows that
the characteristic function $S(\widehat A, A)$ of the dissipative extension $\widehat A$ relative to the reference self-adjoint operator $A$
has the form
$$
S(\widehat A, A)(z)=\frac{s(z)-\kappa_1\kappa_2}{
\kappa_1\kappa_2s(z)-1}.
$$
By Remark \eqref{kogdage} with $\kappa=\kappa_1\kappa_2$, one gets that
$$
\frac{s(z)-\kappa_1\kappa_2}{
\kappa_1\kappa_2s(z)-1}=\frac{a_1 s_1(z)+a_2 s_2(z)-
s_1(z)s_2(z)
-\kappa_1\kappa_2}
{a_2 s_1(z)+a_1 s_2(z)-\kappa_1\kappa_2s_1(z)s_2(z)-
1},
$$
where
$$
a_1=\cos \alpha \cos \begin{equation}ta +\kappa_1\kappa_2\sin \alpha \sin \begin{equation}ta,
$$
$$
a_2=\sin \alpha \sin \begin{equation}ta+\kappa_1\kappa_2\cos \alpha \cos \begin{equation}ta.
$$
From the relations \eqref{tana111}, \eqref{sinsin} and \eqref{coscos} it follows that $a_1=\kappa_2$ and $a_2=\kappa_1$ and hence
\begin{equation}gin{align}
\frac{s(z)-\kappa_1\kappa_2}{
\kappa_1\kappa_2s(z)-1}&=\frac{\kappa_2 s_1(z)+\kappa_1 s_2(z)-s_1(z)s_2(z)
-\kappa_1\kappa_2 }
{\kappa_1s_1(z)+\kappa_2s_2(z)-\kappa_1\kappa_2s_1(z)s_2(z)-
1}
\nonumbernumber \\
&=
\frac{s_1(z)-\kappa_1}
{\kappa_1 s_1(z)-1}
\cdot \frac{s_2(z)-\kappa_2}
{\kappa_2 s_2(z)-1}.\label{nunu}
\end{align}
Thus,
\begin{equation}gin{equation}\label{prozv}
S(\widehat A, A)(z)= S(\widehat A_1, A_1)(z)\cdot S(\widehat A_2, A_2)(z),
\quad z\in \mathbb C_+.
\end{equation}
The proof is complete.
\end{proof}
The following example illustrates the Multiplication Theorem \ref{opcoup} for a differentiation operator on a finite interval.
\begin{equation}gin{example}\label{exam} For a finite interval $\delta=[\alpha, \begin{equation}ta]$, denote by $\widehat D_\delta$
the first order differentiation operator
in the Hilbert space $L^2(\delta)$
given by the differential expression
$$
\tau=-\frac{1}{i} \frac{d}{dx}
$$
on
$$
\mathrm{Dom}(\widehat D_\delta )=\left \{f\in W_2^1((\alpha, \begin{equation}ta )),\,\, f(\alpha)=0\mathrm{i}ght \}
.
$$
It is easy to see that if $\mathfrak{g}amma\in (\alpha, \begin{equation}ta)$, and therefore $$ \delta=\delta_1\cup \delta_2,$$
with $\delta_1=[\alpha,\mathfrak{g}amma]$ and $\delta_2=[\mathfrak{g}amma, \begin{equation}ta]$, then
\begin{equation}gin{equation}\label{ccoo}
\widehat D_\delta= \widehat D_{\delta_1\cup\delta_2}=\widehat D_{\delta_1}\uplus \widehat D_{\delta_2},
\end{equation}
where $\widehat D_{\delta_1}\uplus \widehat D_{\delta_2}$ stands for the dissipative operator coupling of $\widehat D_{\delta_1}$ and $\widehat D_{\delta_2}$.
Indeed, by construction, $\widehat D_\delta$ is a maximal dissipative extension of $\widehat D_{\delta_1}$ outgoing from the Hilbert space ${\mathcal H}_1=L^2(\delta_1)$ to the Hilbert space
${\mathcal H}={\mathcal H}_1\oplus{\mathcal H}_2=L^2(\delta)$, where ${\mathcal H}_2=L^2(\delta_2)$. Moreover, since
$$
\mathrm{Dom}((\widehat D_\delta)^*)=\left \{f\in W_2^1((\alpha, \begin{equation}ta )),\,\, f(\begin{equation}ta)=0\mathrm{i}ght \},
$$
the restriction $\dot D_\delta$ of $\widehat D_\delta $ on
\begin{equation}gin{equation}\label{domu}
\mathrm{Dom} (\dot D_\delta)=\mathrm{Dom}(\widehat D_\delta)\cap\mathrm{Dom}( (\widehat D_\delta)^*)
\end{equation}
is a symmetric operator with deficiency indices $(1,1)$ given by the same differential expression $\tau$ on
$$
\mathrm{Dom} (\dot D_\delta)=\left \{f\in W_2^1((\alpha, \begin{equation}ta )),\,\,f(\alpha)= f(\begin{equation}ta)=0\mathrm{i}ght \}.
$$
On the other hand,
$$
\mathrm{Dom}((\widehat D_{\delta_2})^*)=\left \{f\in W_2^1((\mathfrak{g}amma, \begin{equation}ta )),\,\, f(\begin{equation}ta)=0\mathrm{i}ght \}.
$$
Therefore,
\begin{equation}gin{equation}\label{bkluch}\dot D_\delta\mathfrak{su}bset \widehat D_{\delta_1}\oplus (\widehat D_{\delta_2})^*.
\end{equation}
Combining \eqref{domu} and \eqref{bkluch} shows that $\widehat D_\delta$ coincides with the dissipative operator coupling of $\widehat D_{\delta_1}$ and $\widehat D_{\delta_2}$. That is,
\eqref{ccoo} holds.
By Lemma \ref{app} (see Appendix A), the Liv\v{s}ic function associated with the pair $(\widehat D_\delta, D_\delta)$ is of the form
$$S(\widehat D_\delta, D_\delta)(z)=\exp ( i |\delta|z), \quad z\in \mathbb C_+,
$$
where $|\cdot|$ stands for Lebesgue measure of a Borel set and $ D_\delta$ is the self-adjoint reference differentiation operator with antiperiodic boundary conditions defined on
$$
\mathrm{Dom} (D_\delta)=\left \{f\in W_2^1((\alpha, \begin{equation}ta )),\,\,f(\alpha)= -f(\begin{equation}ta)\mathrm{i}ght \}.
$$
Therefore, taking into account that
$$
\exp ( i |\delta|z)=\exp\left (i(|\delta_1|+|\delta_2|)z\mathrm{i}ght )=\exp ( i |\delta_1|z)\cdot \exp ( i |\delta_2|z),
$$
one obtains that
$$
S(\widehat D_\delta, D_\delta)(z)=S(\widehat D_{\delta_1}, D_{\delta_1})(z)\cdot S(\widehat D_{\delta_2}, D_{\delta_2})(z),
$$
which illustrates the statement of Theorem \ref{opcoup}.
\end{example}
We conclude this section by the following purely analytic result.
\begin{equation}gin{theorem} \label{mainan}Let $\mathfrak{M}$, $\mathfrak{C}$, and $\mathfrak{S}$ be the function classes of Weyl-Titchmarsh, Liv\v{s}ic, and characteristic functions, respectively.
Then,
\begin{equation}gin{itemize}
\item[(i)] The class $\mathfrak{M}$ is a convex set with respect to addition;
\item[(ii)] The class $\mathfrak{S}$ is closed under multiplication,
$$\mathfrak{S}\cdot \mathfrak{S}\mathfrak{su}bset \mathfrak{S};
$$
\item[(iii)] The subclass $\mathfrak{C}\mathfrak{su}bset \mathfrak{S}$ is a (double sided) ideal under multiplication in the sense that
$$
\mathfrak{C}\cdot \mathfrak{S}=\mathfrak{S}\cdot \mathfrak{C}\mathfrak{su}bset \mathfrak{C};
$$
\item[(iv)] The class $\mathfrak{C}$ is closed under multiplication:
$$\mathfrak{C}\cdot\mathfrak{C}\mathfrak{su}bset \mathfrak{C}.$$
\end{itemize}
\end{theorem}
\begin{equation}gin{proof} One notices that (i) is a corollary of Theorem \ref{additionth},
(ii) follows from Theorem \ref{opcoup}, and
(iv) follows from (iii). Therefore, it remains to prove (iii).
(iii). Suppose that $S_1\in \mathfrak{C}$ and $S_2\in \mathfrak{S}$. Since $\mathfrak{C}\mathfrak{su}bset \mathfrak{S}$ and $\dot \mathfrak{D}=\ker \widehat \kappa$,
$S_1$ is the characteristic function of a dissipative operator $\widehat A_1$ from $\dot \mathfrak{D}$ (see \eqref{dotD})
relative to some self-adjoint reference operator $A_1$. Since $S_2\in \mathfrak{S}$, the function $S_2$ is the characteristic function of a dissipative operator
$\widehat A_2\in \mathfrak{D}$
relative to some self-adjoint reference operator $A_2$. By Theorem \ref{opcoup}, the product $S_1\cdot S_2$ is
the characteristic function of an operator coupling $\widehat A_1\uplus \widehat A_2$ relative to an appropriate reference self-adjoint operator.
Since $S_1\in \mathfrak{C}$, and therefore $\widehat \kappa (\widehat A_1)=0$, from Theorem \ref{opcoup} follows that
$\widehat \kappa(\widehat A_1\uplus\widehat A_2)=0$ and hence the product $S_1\cdot S_2$ belongs to the class $\mathfrak{C}$.
\end{proof}
\begin{equation}gin{remark} Recall that the subclass $\dot \mathfrak{D}$ of $\mathfrak{D}$ has been defined as the set of all dissipative operators form $\mathfrak{D}$ with zero value of the corresponding von Neumann parameter (see \eqref{dotD}). To express this in a different way, the characteristic functions for the operators from $\dot \mathfrak{D}$ are exactly those
that belong the class $\mathfrak{C}$.
Having this in mind, a non-commutative version of the ``absorption principle'' (iii) can be formulated as follows.
Suppose that $\widehat A\in \dot \mathfrak{D}({\mathcal H}_1)\mathfrak{su}bset \mathfrak{D}({\mathcal H}_1)$ and $\widehat B\in \mathfrak{D}({\mathcal H}_2)$. Then
$$
\widehat A\uplus \widehat B\in \dot \mathfrak{D}({\mathcal H}_1\oplus {\mathcal H}_2)
$$
and
$$
\widehat B\uplus \widehat A\in \dot \mathfrak{D}({\mathcal H}_2\oplus {\mathcal H}_1).
$$
\end{remark}
\begin{equation}gin{appendix}
\section{The differentiation on a finite interval}
In this Appendix we collect some known results, see, e.g., \cite{AkG}, regarding the maximal and minimal differentiation operators on a finite interval.
Here we present them in a version adapted to the notation of the current paper.
\begin{equation}gin{lemma}\label{app}
Let $\widehat D$ be the first order differentiation operator
in the Hilbert space $L^2(0, \ell)$
given by the differential expression
$$
\tau=-\frac{1}{i} \frac{d}{dx}
$$
on
$$
\mathrm{Dom}(\widehat D )=\left \{f\in W_2^1((0, \ell)),\,\, f(0)=0\mathrm{i}ght \}
$$ and $D$ the self-adjoint realization of $\tau$ on
$$
\mathrm{Dom}( D )=\left \{f\in W_2^1((0, \ell)),\,\, f(\ell)=-f(0)\mathrm{i}ght \}.
$$
Then \begin{equation}gin{itemize}
\item[(i)]
the restriction $\dot D$ of the operator $\widehat D$ on
\begin{equation}gin{align}
\mathrm{Dom} (\dot D&)=\mathrm{Dom} (\widehat D)\cap \mathrm{Dom}(\widehat D^*) \label{dommm}
\end{align}
is a symmetric operator with deficiency indices $(1, 1)$;
\item[(ii)]
the Liv\v{s}ic function $s=s(\dot D, D)$
of the symmetric operator $\dot D$ relative to the self-adjoint reference operator $D$
is of the form
\begin{equation}gin{equation}\label{chrff3}
s(z)=
\frac{e^{i\ell z}-e^{-\ell}}{e^{-\ell}e^{i\ell z}-1};
\end{equation}
\item[(iii)] the von Neumann parameter $\kappa(\dot D, \widehat D)$ associated with the pair $(\dot D, \widehat D) $ is given by
$$
\kappa(\dot D, \widehat D)=e^{-\ell};
$$
\item[(iv)] the characteristic function $S=S(\widehat D,D)$ of the dissipative operator $\widehat D$ relative to $D$ is an inner singular function given by
$$
S(z)=e^{i\ell z}, \quad z\in \mathbb C_+.$$
\end{itemize}
\end{lemma}
\begin{equation}gin{proof} It is straightforward to conclude that
$$
\mathrm{Dom}(\widehat D^*)=\left \{f\in W_2^1((0, \ell)),\,\, f(\ell)=0\mathrm{i}ght \}
$$
and therefore
\begin{equation}gin{align}
\mathrm{Dom} (\dot D)=\mathrm{Dom} (\widehat D)\cap \mathrm{Dom}(\widehat D^*)
=\left \{f\in W_2^1((0,\ell)),\,\,f(0)= f(\ell)=0\mathrm{i}ght \}\nonumbernumber.
\end{align}
Clearly,
$
\Ker ((\dot D)^*-z I)=\mathrm{lin\ span}\{ g_z\},
$
where
$$
g_z(x)=e^{-izx}, \quad x\in [0, \ell], \quad z\in \mathbb C,
$$
which proves (i).
To compute the Liv\v{s}ic function, one observes that
$
\Ker ((\dot D)^*\mp iI)=\mathrm{lin\ span}\{ g_\pm\},
$
where
$$
g_+(x)=\frac{\sqrt{2}}{\sqrt{e^{2\ell}-1}}e^{ x}
\quad \text{ and } \quad
g_-(x)=\frac{\sqrt{2}}{\sqrt{1-e^{-2\ell}}}e^{ -x}, \quad x\in [0, \ell],
$$
Obviously,
$\|g_\pm \|=1.
$
Since
$$g_+(0)-g_-(0)=\frac{\sqrt{2}}{\sqrt{e^{2\ell}-1}}-\frac{\sqrt{2}}{\sqrt{1-e^{-2\ell}}}=\frac{\sqrt{2}}{\sqrt{e^{2\ell}-1}}\left (1-e^{\ell}\mathrm{i}ght )
$$
and
$$\quad\quad\quad\,\, \,\,g_+(\ell)-g_-(\ell)=\frac{\sqrt{2}}{\sqrt{e^{2\ell}-1}}e^\ell-\frac{\sqrt{2}}{\sqrt{1-e^{-2\ell}}}e^{-\ell}=-\frac{\sqrt{2}}{\sqrt{e^{2\ell}-1}}\left (1-e^{\ell}\mathrm{i}ght ),
$$
one observes that
$
g_+(0)-g_-(0)=-(g_+(\ell)-g_-(\ell))
$ which proves that
\begin{equation}gin{equation}\label{propp}
g_+-g_-\in \mathrm{Dom}(D).
\end{equation}
Now, since \eqref{propp} holds, in accordance with definition
the Liv\v{s}ic function $s=s(\dot D, D)$
of the symmetric operator $\dot D$ relative to the self-adjoint reference operator $D$ can be evaluated as
\begin{equation}gin{align*}
s(z)&=\frac{z-i}{z+i}\cdot \frac{(g_z,g_-)}{(g_z,g_+)}=\sqrt{\frac{e^{2\ell}-1}{1-e^{-2\ell}}}\cdot
\frac{z-i}{z+i}\cdot\frac{\int\limits_0^\ell e^{(-iz-1)x}dx}{\int\limits_0^\ell e^{(-iz+1)x}dx}
\\&
=\sqrt{\frac{e^{2\ell}-1}{1-e^{-2\ell}}}\cdot\frac{e^{(-iz-1)\ell}-1}{e^{(-iz+1)\ell}-1}=
e^{\ell} \frac{e^{(-iz-1)\ell}-1}{e^{(-iz+1)\ell}-1}
\\ &=\frac{e^{-iz\ell}-e^\ell}{e^\ell e^{-iz\ell}-1},\quad z\in\mathbb C_+.
\end{align*}
Thus,
$$
s(z)=
\frac{e^{i\ell z}-e^{-\ell}}{e^{-\ell}e^{i\ell z}-1},\quad z\in \mathbb C_+,
$$
which proves the representation (ii).
Next, since
$
g_+(0)=e^{-\ell}g_-(0),
$
one also obtains that
$$
g_+-e^{-\ell} g_-\in \mathrm{Dom} (\widehat D)
$$
which proves the assertion (iii) taking into account \eqref{propp}.
Finally, one concludes that
the characteristic function $S=S(\widehat D, D)$ of the dissipative operator $\widehat D$ relative to the self-adjoint reference operator $D$
is given by
\begin{equation}gin{equation}\label{kapling3}
S(z)=\frac{s(z)-e^{-\ell}}{e^{-\ell}s(z)-1}=\frac{\frac{e^{i\ell z}-e^{-\ell}}{e^{-\ell}e^{i\ell z}-1}-e^{-\ell}}{e^{-\ell} \frac{e^{i\ell z}-e^{-\ell}}{e^{-\ell}e^{i\ell z}-1}-1}
=
e^{i\ell z}, \quad z\in \mathbb C_+,
\end{equation}
which proves (iv).
The proof is complete.
\end{proof}
\end{appendix}
\begin{equation}gin{thebibliography}{99}
\bibitem{ABT}
Yu.~Arlinskii, S.~Belyi, E.~Tsekanovskii,
Conservative realizations of Herglotz-Nevanlinna functions,
Operator Theory: Adv. and Appl.,
{\bf 217}, Birkh\"auser/Springer Basel AG, Basel,
2011.
\bibitem{AkG}
N.~I.~Akhiezer, I.~M.~Glazman,
Theory of Linear Operators in
Hilbert Space, Dover, New York, 1993.
\bibitem{AWT}
A.~ Aleman, R.~T.~W.~Martin, W.~T. ~Ross,
{\it On a theorem of Liv\v{s}ic},
J. Funct. Anal., {\bf 264}, 999--1048 (2013).
\bibitem{Br}
M.~S.~Brodskii,
Triangular and Jordan representations of linear operators,
Translations of Mathematical Monographs,
Vol. 32.
Amer. Math. Soc., Providence, R.I., 1971.
\bibitem {Brod}
M.~S.~ Brodskii,
\textit{Unitary operator colligations and their characteristic functions},
Uspechi Mat. Nauk {\bf 33}, no. 4(202), 142--168 (1978), (Russian).
English transl.:
Russ. Math. Surveys, {\bf 33} (4), 159--191 (1978).
\bibitem{BL58}
M.~S.~Brodskii, M.~S.~Liv\v{s}ic,
\textit{Spectral analysis of
non-self-adjoint operators and intermediate systems},
Uspehi Mat. Nauk (N.S.), {\bf 13}, no. 1(79), 3--85 (1958), (Russian).
English transl.: Amer. Math. Soc. Transl., (2), {\bf 13}, 265--346
(1960).
\bibitem{D}
W.~F.~Donoghue,
\textit{On perturbation of spectra},
Commun. Pure and Appl. Math.,
{\bf 18}, 559--579 (1965).
\bibitem{FKE}
F.~Gesztesy, K.~A.~Makarov, E.~Tsekanovskii,
\textit{An addendum to Krein's formula},
J. Math. Anal. Appl.,
{\bf 222}, 594--606 (1998).
\bibitem{GT}
F.~Gesztesy, E.~Tsekanovskii,
\textit{On Matrix-Valued Herglotz Functions},
Math. Nachr., {\bf 218}, 61--138 (2000).
\bibitem{L}
M.~S.~Liv\v{s}ic,
\textit{On a class of linear operators in Hilbert space},
Mat. Sbornik , (2), {\bf 19}, 239--262 (1946),
(Russian).
English transl.: Amer. Math. Soc. Transl., (2), {\bf 13}, 61--83
(1960).
\bibitem{L1}
M.~S.~Liv\v{s}ic,
\textit{On spectral decomposition of linear non-self-adjoint operators},
Mat. Sbornik , (76), {\bf 34}, 145--198 (1954),
(Russian).
English transl.: Amer. Math. Soc. Transl. (2), {\bf 5}, 67--114 (1957).
\bibitem{L2}
M.~S.~Liv\v{s}ic,
Operators, Oscillations, Waves (Open systems),
Nauka, Moscow, 1966, (Russian).
English transl.: Transl. Math. Monograph, {\bf 34}, AMS, Providence R.~ I., 1973.
\bibitem{LYa}
M.~S.~Liv\v{s}ic, A.~A.~Yantsevich,
Operator colligations in Hilbert spaces,
Winston, 1979.
\bibitem{LP}
M.~S.~Liv\v{s}ic, V.~P.~Potapov,
\textit{A theorem on the multiplication of characteristic matrix-functions},
Dokl. Acad. Nauk SSSR, {\bf 72}, 625--628 (1950), (Russian).
\bibitem{MT}
K.~ A.~ Makarov, E.~Tsekanovskii,
\textit{ On the Weyl Titchmarsh and Liv\v{s}ic functions},
Proceedings of Symposia in Pure Mathematics, Amer. Math. Soc., {\bf 87}, 291--313 (2013).
\bibitem{NF}
B.~Sz.-Nagy, C.~Foias,
Harmonic analysis of operators on Hilbert space,
North-Holland, Amsterdam, 1970.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Supplementary Materials: \ Detection of multiple perturbations in multi-omics biological networks}
\section{Software}
An R package {\bf mapggm} containing methods for multi-attribute network estimation and perturbation detection is available at
\url{https://github.com/paulajgriffin/mapggm}. To use this package, install the {\bf devtools} package from CRAN and run:
\begin{Verbatim}
library(devtools)
install_github('paulajgriffin/mapggm')
\end{Verbatim}
\noindent
Other supplementary files, including the simulation pipeline and TCGA scripts/processed data, may be found at \url{https://github.com/paulajgriffin/mapggm\_supplemental}.
\section{Properties of sequential tests}
We prove the following theorems, presented in Section 3.2 of the paper.
\begin{theorem}{\label{th:expnest}}
Given a set of nodes already found to have nonzero mean in steps $1, \ldots s$, consider testing for a perturbation at an additional node $i$ in step $s+1$. Denote the indices in $Z=\Omega Y$ corresponding to the nodes found in steps $1, \ldots, s$ as $S$.
We can write the expected difference between the original test statistic and the test statistic adjusted for perturbations in $S$ as
\begin{equation*}
E(T_i - T^{[s+1]}_i) = \mu_i^T\left(\Sigma_{i,S}\Sigma_{S,i} \right) \mu_i +
2 \mu_i^T \left(\Sigma_{i,S} \right) \mu_S +
\mu_S^T \left(\Sigma_{S,i} \Sigma_{i,S} \right) \mu_S
\enskip .
\end{equation*}
In the special case that $\mu_i = 0$,
\begin{align*}
E(T_i - T^{[s+1]}_i | \mu_i = 0 ) \geq 0 \enskip.
\end{align*}
\end{theorem}
\noindent
{\bf Proof of Theorem~\ref{th:expnest}.}\quad First, recall the form of $T_j$, the unadjusted test statistic for testing the alternative hypothesis that $\mu_j \neq 0$ and $\mu_{(-j)}=0$ against a null of $\mu=0$. We can rewrite the test statistic in terms of the reordered unfiltered data $y = \left[y_j, y_{-(j)}\right]^T$ and obtain
\begin{align*}
T_j &= n \left(\bar{z}^T \Sigma \bar{z} - \bar{z}_{(-j)}^T \left(\Sigma_{(-j),(-j)}- \Sigma_{(-j),j}\Sigma_{j,j}^{-1}\Sigma_{j,(-j)}\right)\bar{z}_{(-j)} \right) \\
&= n \left( \bar{z}^T \Sigma \bar{z}-
\bar{z}^T\left( \begin{array}{cc}
0 & 0 \\
0 & \Sigma_{(-j),(-j)}- \Sigma_{(-j),j}\Sigma_{j,j}^{-1}\Sigma_{j,(-j)} \\
\end{array} \right) \bar{z}
\right) \\
&= n \left( \bar{y}^T \Omega \Sigma \Omega \bar{z}-
\bar{y}^T \Omega \left( \begin{array}{cc}
0 & 0 \\
0 & \Omega_{(-j),(-j)}^{-1}\\
\end{array} \right) \Omega \bar{y}
\right) \\
&= n \left( \bar{y}^T \Omega \bar{z}-
\bar{y}^T \left( \begin{array}{cc}
\Omega_{j,(-j)}\Omega_{(-j),(-j)}^{-1} \Omega_{(-j),j} & \Omega_{j,(-j)} \\
\Omega_{(-j),j} & \Omega_{(-j),(-j)} \\
\end{array} \right) \bar{y}
\right) \\
&= n \left(
\bar{y}^T \left( \begin{array}{cc}
\Omega_{j,j} - \Omega_{j,(-j)}\Omega_{(-j),(-j)}^{-1} \Omega_{(-j),j} & 0 \\
0 & 0 \\
\end{array} \right) \bar{y}
\right) \\
&= n \left(
\bar{y}^T \left( \begin{array}{cc}
\Sigma_{j,j}^{-1} & 0 \\
0 & 0 \\
\end{array} \right) \bar{y}
\right)
\enskip.
\end{align*}
The mean of the unfiltered data has distribution $\bar{y} \sim N (\Sigma \mu, \Sigma/n)$. Taking the expectation of our test statistic $T_j$, we obtain
\begin{align*}
E(T_j) =& n E(\bar{y})^T \left( \begin{array}{cc}
\Sigma_{j,j}^{-1} & 0 \\
0 & 0 \\
\end{array} \right) E(\bar{y}) +
\mbox{Tr} \left(
\left( \begin{array}{cc}
\Sigma_{j,j}^{-1} & 0 \\
0 & 0 \\
\end{array} \right) \Sigma
\right)\\
=& n
\left( \begin{array}{c}
\Sigma_{j,j}\mu_j + \Sigma_{j,(-j)}\mu_{(-j)} \\
\Sigma_{(-j),j}\mu_j + \Sigma_{(-j),(-j)}\mu_{(-j)} \\
\end{array} \right)^T
\left( \begin{array}{cc}
\Sigma_{j,j}^{-1} & 0 \\
0 & 0 \\
\end{array} \right)
\left( \begin{array}{c}
\Sigma_{j,j}\mu_j + \Sigma_{j,(-j)}\mu_{(-j)} \\
\Sigma_{(-j),j}\mu_j + \Sigma_{(-j),(-j)}\mu_{(-j)} \\
\end{array} \right)
\\
&+
\mbox{Tr} \left(
\left( \begin{array}{cc}
I & \Sigma_{j,j}^{-1}\Sigma_{j,(-j)} \\
0 & 0 \\
\end{array} \right)
\right)\\
=& n \left(\mu_j^T \Sigma_{j,j} \mu_j +
2\mu_j^T\Sigma_{j,(-j)}\mu_{(-j)} +
\mu_{(-j)}^T \Sigma_{(-j),j} \Sigma_{j,(-j)} \mu_{(-j)}
\right) + k_j \enskip,
\end{align*}
where $k_j$ indicates the number of attributes for node $j$.
Denote the indices in $Z=\Omega Y$ corresponding to the nodes found in steps $1, \ldots, s$ as $S$, and the indices corresponding to the node currently under consideration as $i$.
Denote all other indices $X$.
In the sequential testing procedure, we test the alternative hypothesis of $\mu_i \neq 0$, $\mu_S \neq 0$, $\mu_X =0$ against the null that $\mu_S \neq 0$, $\mu_i=0$, $\mu_X=0$. We can write the adjusted test statistic $T_i^{[s+1]}$ as the difference of two unadjusted test statistics
\begin{equation}
T_i^{[s+1]} = T_{(i,S)} - T_S \enskip.
\end{equation}
We are interested in $E(T_i - T_i^{[s+1]})$, the expected difference between the original and adjusted test statistic.
\begin{alignat*}{2}
E(T_i - T_i^{[s+1]}) =& &&E(T_i) + E(T_S) - E(T_{(i,S)}) \\
=& && n \left(
\mu_i^T \Sigma_{ii} \mu_i + 2\mu_i^T \Sigma_{i,S} \mu_s + 2 \mu_i^T \Sigma_{i,X} \mu_X + \mu_S^T \Sigma_{S,i} \Sigma_{i,S} \mu_S + \mu_X^T \Sigma_{X,i}\Sigma_{i,X} \mu_X
\right) + k_i \\
&+ &&n \left( \mu_S^T \Sigma_{S,S} \mu_S + 2 \mu_S^T \Sigma_{S,i} \mu_i +
2\mu_S^T \Sigma_{S,X} \mu_X+
\mu_i^T \Sigma_{i,S}\Sigma_{S,i} \mu_i + \right. \\
& && \left. \mu_X^T \Sigma_{X,S} \Sigma_{S,X} \mu_X
\right) + \sum_{j \in S}k_j \\
&- &&n \left(
\mu_i^T \Sigma_{i,i} \mu_i + 2\mu_i^T \Sigma_{i,S} \mu_S + \mu_S^T \Sigma_{S,S} \mu_S
+ 2\mu_i^T \Sigma_{i,X}\mu_X + 2\mu_S^T \Sigma_{S,X} \mu_X\right. \\
& &&+ \left.
\mu_X^T \Sigma_{X,i}\Sigma_{i,X} \mu_X + \mu_X^T \Sigma_{X,S} \Sigma_{S,X} \mu_x
\right) + \left(k_i + \sum_{j \in S}k_j \right)
\end{alignat*}
\noindent
By gathering common terms, we obtain
\begin{equation*}
E(T_i - T^{[s+1]}_i) = \mu_i^T\left(\Sigma_{i,S}\Sigma_{S,i} \right) \mu_i +
2 \mu_i^T \left(\Sigma_{i,S} \right) \mu_S +
\mu_S^T \left(\Sigma_{S,i} \Sigma_{i,S} \right) \mu_S
\enskip .
\end{equation*}
\noindent
In the special case that $\mu_i = 0$ (no perturbation exists at the node under consideration),
\begin{align*}
E(T_i - T^{[s+1]}_i | \mu_i = 0 ) = \mu_S^T \left(\Sigma_{S,i} \Sigma_{i,S} \right) \mu_S \geq 0 \enskip,
\end{align*}
since $\left(\Sigma_{S,i} \Sigma_{i,S}\right)$ is by definition positive semi-definite.
\begin{theorem}{\label{th:nested}}
Under the same conditions outlined in the general case of Theorem~\ref{th:expnest}, if $\Sigma_{i,S} = 0$, then
\begin{equation*}
T^{[s+1]}_i= T_i \enskip .
\end{equation*}
\end{theorem}
\noindent
{\bf Proof of Theorem~\ref{th:nested}.}\quad
Denote the indices in $Z=\Omega Y$ corresponding to the nodes found in steps $1, \ldots, s$ as $S$, and the indices corresponding to the node currently under consideration as $i$. Denote all other indices $X$.
For any $\Sigma_{S,i}$ we can write the test statistic $T_i$ for the unconditional test as
\begin{align*}
T_i &= n(\bar{z} - \hat{\mu}_A)^T \Sigma (\bar{z} - \hat{\mu}_A) - n(\bar{z} - \hat{\mu}_0)^T \Sigma (\bar{z} - \hat{\mu}_0)\\
&= n\left((\bar{z} - \hat{\mu}_A) - (\bar{z} - \hat{\mu}_0) \right)^T \Sigma \left((\bar{z} - \hat{\mu}_A) + (\bar{z} - \hat{\mu}_0) \right)
\enskip,
\end{align*}
where $\hat{\mu}_0$ and $\hat{\mu}_A$ denote the maximum likelihood estimators for $\mu$ under the null and alternative hypothesis, respectively. Without loss of generality, we reorder the filtered data so that $Z = \left(Z_i', Z_S', Z_X'\right)$
Following formula (7) in the main paper, for the unconditional test, we have
\begin{align*}
\hat{\mu}_0 &= \left(\begin{array}{c} 0 \\ 0\\ 0 \\ \end{array} \right) &\enskip,
\end{align*}
and
\begin{align*}
\hat{\mu}_A &= \left( \begin{array}{c}
\bar{z}_i+\Sigma_{i,i}^{-1}\Sigma_{i,(SX)}\bar{z}_{(SX)} \\
0\\
0 \\ \end{array} \right) &\enskip.
\end{align*}
\noindent
Similarly, a nested likelihood ratio test that conditions on the presence of nonzero mean values for indices $S$ has the form
\begin{align*}
T_i^{[s+1]} &= n(\bar{z} - \hat{\mu}_A^{[s+1]})^T \Sigma (\bar{z} - \hat{\mu}_A^{[s+1]}) - n(\bar{z} - \hat{\mu}_0^{[s+1]})^T \Sigma (\bar{z} - \hat{\mu}_0^{[s+1]})\\
&= n\left((\bar{z} - \hat{\mu}_A^{[s+1]}) - (\bar{z} - \hat{\mu}_0^{[s+1]}) \right)^T \Sigma \left((\bar{z} - \hat{\mu}_A^{[s+1]}) + (\bar{z} - \hat{\mu}_0^{[s+1]}) \right)
\enskip,
\end{align*}
\noindent
with restricted MLEs
\begin{align*}
\hat{\mu}_0^{[s+1]} &= \left(\begin{array}{c}
0 \\
\bar{z}_S + \Sigma_{S,S}^{-1} \Sigma_{S, (iX)} \bar{z}_{(iX)}\\
0 \\ \end{array} \right) &\enskip, \mbox{ and} \\
\hat{\mu}_A^{[s+1]} &= \left( \begin{array}{c}
\bar{z}_{(iS)}+\Sigma_{(iS),(iS)}^{-1}\Sigma_{(iS),X}\bar{z}_{X} \\
0 \\ \end{array} \right) &\enskip.
\end{align*}
Recall $\Sigma_{i,S}=0$ by assumption. We may rewrite $\hat{\mu}_A$, $\hat{\mu}_0^{[s+1]}$, and $\hat{\mu}_A^{[s+1]}$ as
\begin{align*}
\hat{\mu}_A
&= \left( \begin{array}{c}
\bar{z}_i+\Sigma_{i,i}^{-1}(\Sigma_{i,S}\bar{z}_S+ \Sigma_{i,X}\bar{z}_X) \\
0\\
0 \\ \end{array} \right) \\
&= \left( \begin{array}{c}
\bar{z}_i+\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_X \\
0\\
0 \\ \end{array} \right) \\
\hat{\mu}_0^{[s+1]}
&= \left(\begin{array}{c}
0 \\
\bar{z}_S + \Sigma_{S,S}^{-1} (\Sigma_{S,i} \bar{z}_i + \Sigma_{S,X}\bar{z}_X)\\
0 \\ \end{array} \right) \\
&= \left(\begin{array}{c}
0 \\
\bar{z}_S + \Sigma_{S,S}^{-1}\Sigma_{SX}\bar{z}_X\\
0 \\ \end{array} \right) \\
\hat{\mu}_A^{[s+1]}
&= \left( \begin{array}{c}
\bar{z}_{(iS)} + \Sigma_{(iS),(iS)}^{-1}\Sigma_{(iS),X}\bar{z}_{X} \\
0 \\ \end{array} \right) \\
&= \left( \begin{array}{c}
\bar{z}_{i}+\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_{X} \\
\bar{z}_{S}+\Sigma_{S,S}^{-1}\Sigma_{S,X}\bar{z}_{X} \\
0 \\ \end{array} \right) \enskip.
\end{align*}
\noindent
Our unadjusted test yields
\begin{align*}
T_i
&= n\left(\left(\begin{array}{c}
-\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_X \\
\bar{z}_S\\
\bar{z}_X \\ \end{array}\right) -
\left(\begin{array}{c}
\bar{z}_i\\
\bar{z}_S\\
\bar{z}_X\\ \end{array}\right) \right)^T
\Sigma
\left(\left(\begin{array}{c}
-\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_X \\
\bar{z}_S\\
\bar{z}_X \\ \end{array}\right) +
\left(\begin{array}{c}
\bar{z}_i\\
\bar{z}_S\\
\bar{z}_X\\ \end{array}\right) \right) \\
&= n\left(\begin{array}{c}
-\bar{z}_i -\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_X \\
0 \\
0 \\ \end{array}\right)^T
\Sigma
\left(\begin{array}{c}
\bar{z}_i-\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_X \\
2\bar{z}_S\\
2\bar{z}_X \\ \end{array} \right)
\end{align*}
\noindent
By a similar process, the adjusted test statistic is
\begin{align*}
T_i^{[s+1]}
&= n\left(\begin{array}{c}
-\bar{z}_i -\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_X \\
0 \\
0 \\ \end{array}\right)^T
\Sigma
\left(\begin{array}{c}
\bar{z}_i-\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_X \\
2\bar{z}_S -2\Sigma_{S,S}^{-1}\Sigma_{SX}\bar{z}_X \\
2\bar{z}_X \\ \end{array} \right) \enskip.
\end{align*}
\noindent
Note that both of these statistics have the form
\begin{align*}
T &= n d' \Sigma a \\
&= n \left( \begin{array}{c}
d_i \\
d_S \\
d_X \\ \end{array}
\right)^T \left(
\begin{array}{ccc}
\Sigma_{ii}& 0& \Sigma_{iX} \\
0& \Sigma_{SS}& \Sigma_{SX} \\
\Sigma_{Xi}& \Sigma_{XS}& \Sigma_{XX} \\
\end{array}
\right)
\left(\begin{array}{c}
a_i \\
a_S \\
a_X \\ \end{array} \right)\\
&= d_i^T \left(\Sigma_{ii} a_i + \Sigma_{iX}a_X\right)
+ d_S^T \left(\Sigma_{SS} a_S + \Sigma_{SX}a_X\right)
+ d_X^T \left(\Sigma_{Xi} a_i + \Sigma_{XS} a_S + \Sigma_{XX}a_X\right) \\
&= d_i^T \left(\Sigma_{ii} a_i + \Sigma_{iX}a_X\right) \enskip.
\end{align*}
\noindent
In both $T_i$ and $T_i^{[s+1]}$, we have $d_i=-\bar{z}_i -\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_X$, $a_i = \bar{z}_i-\Sigma_{i,i}^{-1}\Sigma_{i,X}\bar{z}_X$, and $a_X=2\bar{z}_x$. Therefore, $T_i^{[s+1]} = T_i$.
\section{Bounds on error in test statistic}
We prove the following theorem, presented in Section 3.3 of the paper.
\begin{theorem}{\label{th:errbound}}
Under the conditions above, the discrepancy $T_1-\tilde T_1$ is equal in distribution to a linear combination
of mutually independent, noncentral chisquare random variables,
\begin{equation}
\sum_{k=1}^s a_k \chi^2_{r_k}\left(\delta_k\right) \enskip ,
\label{eq:main.decomp}
\end{equation}
where
$$\delta_k = (n/2) \mu^T \Sigma_{\cdot 1} E_k \Sigma_{11}^{-1} \Sigma_{1\cdot}\mu \enskip .$$
Accordingly,
\begin{equation}
E\left[ T_1 - \tilde T_1\right] = tr\left(D\Sigma_{11}\right) + \frac{1}{2}n\mu^T \Sigma_{\cdot 1} D\Sigma_{1\cdot} \mu
\label{eq:mean}
\end{equation}
and
\begin{equation}
\hbox{Var}\left(T_1 - \tilde T_1\right) = 2 tr\left( (D\Sigma_{11})^2\right)
+ 2n \mu^T \Sigma_{\cdot 1} D \Sigma_{11} D \Sigma_{1\cdot} \mu \enskip .
\label{eq:var}
\end{equation}
\end{theorem}
\noindent
{\bf Proof of Theorem~\ref{th:errbound}.}\quad
Begin by noting that $T_1 - \tilde T_1 = X^T D X$, where
$$D = \Omega_{11} - \Omega_{1\cdot}\Omega_{\cdot\cdot}^{-1}\Omega_{\cdot 1} -
\left(\tilde\Omega_{11} - \tilde\Omega_{1\cdot}\tilde\Omega_{\cdot\cdot}^{-1}\tilde\Omega_{\cdot 1} \right) \enskip ,$$
as defined in the paper, and $X$ is a multivariate normal random variable with mean
$n^{1/2} \Sigma_{1\cdot}\mu$ and covariance $\Sigma_{11}$. Since $D$ is symmetric and $\Sigma_{11}$ is symmetric and positive definite
(the latter by assumption), it follows from Lemma 1 of \cite{baldessari1967distribution} that $D\Sigma_{11}$ has spectral decomposition
$$D\Sigma_{11} = \sum_{k=1}^s a_k E_k\enskip ,$$
such that $\hbox{rank}(E_k) = r_k$ (corresponding to the multiplicity of the eigenvalue $a_k$) and $\sum_{k=1}^s r_k = K$.
By direct application of Theorem 1 of \cite{baldessari1967distribution}, the expression in (\ref{eq:main.decomp}) then follows.
As for the mean and variance expressions in (\ref{eq:mean}) and (\ref{eq:var}), we see that
\begin{eqnarray*}
E\left[ T_1 - \tilde T_1\right] & = & E\left[ \sum_{k=1}^s a_k \chi^2_{r_k}(\delta_k)\right] \\
& = & \sum_{k=1}^s a_k(r_k + \delta_k) \\
& = & \sum_{k=1}^s a_k r_k + \sum_{k=1}^s a_k\delta_k \\
& = & tr(D\Sigma_{11}) + \frac{n}{2} \mu^T \Sigma_{\cdot 1} D\Sigma_{1\cdot} \mu \enskip ,
\end{eqnarray*}
and similarly,
\begin{eqnarray*}
\hbox{Var}\left(T_1 - \tilde T_1\right) & = & \hbox{Var}\left(\sum_{k=1}^s a_k \chi^2_{r_k}(\delta_k)\right) \\
& = & \sum_{k=1}^s a_k^2( 2r_k + 4\delta_k) \\
& = & 2\sum_{k=1}^s a_k^2 r_k + 4 \sum_{k=1}^s a_k^2 \delta_k \\
& = & 2 tr\left( (D\Sigma_{11})^2\right)
+ 2n \mu^T \Sigma_{\cdot 1} D \Sigma_{11} D \Sigma_{1\cdot} \mu \enskip ,
\end{eqnarray*}
where we have exploited independence among the chisquare random variables in both cases.
The following corollary was also provided in Section 3.3 of the paper.
\begin{corollary}{\label{cor:meanvar}}
Let $||\cdot ||_2$ denote the spectral norm. Then
$$E\left[T_1 - \tilde T_1\right] = O\left(||\Delta||_2\right) \quad\hbox{and}\quad
\hbox{Var}\left(T_1 - \tilde T_1\right) = O\left(||\Delta||^2_2\right) \enskip .$$
\end{corollary}
\noindent {\bf Proof of Corollary~\ref{cor:meanvar}.}\quad
The statements in this corollary follow through application of bounds on the trace of matrix products and repeated application
of Cauchy-Schwartz, coupled with an appeal to the Lipschitz smoothness of the mapping between $\Omega$ and
the expression $ \Omega_{11} - \Omega_{1\cdot}\Omega_{\cdot\cdot}^{-1}\Omega_{\cdot 1}$. The latter follows from a
straightforward Taylor series argument and the continuity of matrix inversion.
\cite{wang1986trace} establish that for two matrices $M$ and $N$, with $N$ symmetric and positive semidefinite,
that $|tr(MN)|\le ||M||_2 \, tr(N)$. For the mean, therefore, we have that $tr(D\Sigma_{11}) \le ||D||_2\, tr(\Sigma_{11})$. At the same time,
$$\big\vert \frac{n}{2}\mu^T \Sigma_{\cdot 1} D\Sigma_{1\cdot} \mu \big\vert
\le \frac{n}{2} ||\Sigma_{1\cdot} \mu||^2_2 ||D||_2 \enskip .$$
As a result, we find that $E[T_1 - \tilde T_1] = O(||D||_2)$.
Similarly, for the variance
$$2 tr\left( (D\Sigma_{11})^2 \right) \le 2 tr\left( D^2 \Sigma^2_{11}\right)
\le 2 ||D^2||_2 \, tr(\Sigma^2_{11}) \le 2 ||D||^2_2\, tr(\Sigma^2_{11}) \enskip ,$$
where the first inequality follows from Theorem 1 of \cite{chang1999matrix}. Additionally,
$$|2n \mu^T \Sigma_{\cdot 1} D \Sigma_{11} D \Sigma_{1\cdot} \mu | \le
2n ||\Sigma_{11}\mu||^2_2 ||\Sigma_{11}||_2 ||D||^2_2 \enskip .$$
Hence, $\hbox{Var}(T_1 - \tilde T_1) = O(||D||^2_2)$.
Recall that the quantity $||D||_2$ depends upon our choice of $j=1$. In order to have a general result,
applicable to $T_j - \tilde T_j$ for all $j$, we prefer a bound in terms of the overall error $\Delta = \tilde \Omega - \Omega$.
Without loss of generality, define a function $f(\Omega) = \Omega_{11} - \Omega_{1\cdot}\Omega_{\cdot\cdot}^{-1}\Omega_{\cdot 1}$.
That this function is Lipschitz smooth is straightforward to show, as mentioned previously. As a result,
$$||D||_2 = ||f(\Omega) - f(\tilde\Omega)||_2 \le K ||\Omega - \tilde \Omega||_2 = K ||\Delta ||_2 \enskip .$$
The results of the corollary then follow.
\section{Additional simulations}
Additional simulations are provided to demonstrate predictive ability at lower signal-to-noise (SNR) thresholds. Comparisons across values of $\rho_{in}$ and $\rho_{out}$ in the main paper were shown with $\mbox{SNR}=0.20$; these additional simulations show $\mbox{SNR}=0.10$ and $\mbox{SNR}=0.05$.
\begin{figure}
\caption{Single-site recovery from a stochastic block model simulation with $p=20$ nodes, $n=50$ cases and controls, and $\mbox{SNR}
\label{fig:snr10}
\end{figure}
\begin{table}[ht]
\centering
\caption{Probability that the top-ranked site is the true perturbation site and (AUC) for simulations shown in Figure~\ref{fig:snr10}. ($\mbox{SNR}=0.10$)}
\label{tab:snr10}
\begin{tabular}{ccccccc}
\hline
& & \multicolumn{2}{c}{LRT methods} & \multicolumn{2}{c}{Differential expression} & SSEM-lasso \\
$\rho_{in}$ & $\rho_{out}$ & Multi-att. & Single-att. & Multi-att. & Single-att. & Single-att.\\
\hline
0.8 & 0.2 & 0.14 (0.61) & 0.12 (0.61) & 0.10 (0.58) & 0.11 (0.55) & 0.11 (0.60) \\
& 0.4 & 0.21 (0.65)& 0.14 (0.63) & 0.12 (0.59) & 0.12 (0.57) & 0.12 (0.62) \\
& 0.6 & 0.16 (0.68) & 0.14 (0.63) & 0.09 (0.58) & 0.08 (0.55) & 0.14 (0.62) \\ \hline
0.6 & 0.2 & 0.21 (0.70) & 0.15 (0.65) & 0.14 (0.64) & 0.14 (0.58) & 0.17 (0.65) \\
& 0.4 & 0.09 (0.64)& 0.11 (0.64) & 0.09 (0.58) & 0.10 (0.57) & 0.11 (0.63) \\
& 0.6 & 0.15 (0.67) & 0.11 (0.64) & 0.07 (0.55) & 0.09 (0.55) & 0.12 (0.63) \\
\hline
\end{tabular}
\end{table}
\begin{figure}
\caption{Single-site recovery from a stochastic block model simulation with $p=20$ nodes, $n=50$ cases and controls, and $\mbox{SNR}
\label{fig:snr05}
\end{figure}
\begin{table}[ht]
\centering
\caption{Probability that the top-ranked site is the true perturbation site and (AUC) for simulations shown in Figure~\ref{fig:snr05}. ($\mbox{SNR}=0.10$)}
\label{tab:snr05}
\begin{tabular}{ccccccc}
\hline
& & \multicolumn{2}{c}{LRT methods} & \multicolumn{2}{c}{Differential expression} & SSEM-lasso \\
$\rho_{in}$ & $\rho_{out}$ & Multi-att. & Single-att. & Multi-att. & Single-att. & Single-att.\\
\hline
0.8 & 0.2 & 0.09 (0.52) & 0.09 (0.51) & 0.05 (0.52) & 0.06 (0.52) & 0.07 (0.51) \\
& 0.4 & 0.09 (0.55) & 0.08 (0.55) & 0.07 (0.54) & 0.05 (0.55) & 0.07 (0.54) \\
& 0.6 & 0.07 (0.55) & 0.08 (0.54) & 0.08 (0.48) & 0.05 (0.48) & 0.07 (0.54) \\ \hline
0.6 & 0.2 & 0.10 (0.56) & 0.08 (0.57) & 0.07 (0.52) & 0.06 (0.52) & 0.09 (0.56) \\
& 0.4 & 0.06 (0.55) & 0.09 (0.52) & 0.08 (0.54) & 0.07 (0.56) & 0.07 (0.52) \\
& 0.6 & 0.09 (0.56) & 0.10 (0.53) & 0.07 (0.52) & 0.06 (0.52) & 0.10 (0.54) \\
\hline
\end{tabular}
\end{table}
\end{document}
|
\begin{equation}gin{document}
\title[Geometric phases and criticality in spin systems]
{Geometric phases and criticality in spin systems}
\author[Jiannis K. Pachos and Angelo C. M. Carollo]
{Jiannis K. Pachos and Angelo C. M. Carollo}
\affiliation{Department of Applied Mathematics and Theoretical Physics, \\
University of Cambridge, Wilberforce Road, Cambridge CB3 0WA, UK}
\lambdabel{firstpage}
\maketitle
\begin{equation}gin{abstract}{Berry phases, critical phenomena, XY model}
A general formalism of the relation between geometric phases produced by
circularly evolving interacting spin systems and their criticality behavior
is presented. This opens up the way for the use of geometric phases as a
tool to study regions of criticality without having to undergo a quantum
phase transition. As a concrete example a spin-1/2 chain with XY
interactions is presented and the corresponding geometric phases are
analyzed. The generalization of these results to the case of an arbitrary
spin system provides an explanation for the existence of such a relation.
\end{abstract}
\section{Introduction}
A few conceptual advances in quantum physics have been as exciting or as
broadly studied as geometric phases. They lie within the heart of quantum
mechanics giving a surprising connection between the geometric properties of
evolutions and their dynamics. Historically, the first such effect was
presented by Aharonov \& Bohm (1959), where the quantum state of a charged
particle acquires a phase factor when it moves along a closed path in the
presence of a magnetic field. Since then a series of similar effects have
been considered and experimentally verified. A more intriguing case where a
quantum state is changed in a circular fashion was studied by Berry (1984)
which led to the generation of the celebrated Berry phase. This effect does
not need the presence of electromagnetic interactions and due to its
abstract and general nature has found many applications~(Shapere {\it et
al.} 1989; Bohm {\it et al.} 2003).
A characteristic that all non-trivial geometric evolutions have in common is
the presence of non-analytic points in the energy spectrum. At these points
the state of the system is not well defined due to their degenerate nature.
One could say that the generation of a geometric phase is a witness of such
singular points. Indeed, the presence of degeneracy at some point is
accompanied by curvature in its immediate neighborhood and a state that is
evolved along a closed path is able to detect it. These points or regions,
are of great interest to condensed matter or molecular physicists as they
determine, to a large degree, the behavior of complex quantum systems. The
geometric phases are already used in molecular physics to probe the presence
of degeneracy in the electronic spectrum of complex molecules. Initial
considerations by Herzberg and Longuet-Higgins (1963), revealed a sign
reversal when a real Hamiltonian is continuously transported around a
degenerate point. Its generalization to the complex case was derived by
Stone (1976) and an optimization of the real Hamiltonian case was performed
by Johansson \& Sj\"oqvist (2004).
Geometric phases have been associated with a variety of condensed matter and
solid state phenomena (Thouless {\it et al.} 1982; Resta 1994; Nakamura \&
Todo 2002; Ryu \& Hatsugai 2006). Nevertheless, their connection to quantum
phase transitions has only been shown recently by Carollo \& Pachos (2005).
It was farther elaborated by Zhu (2005), where the critical exponents were
evaluate from the scaling behavior of geometric phases, and by Hamma (2006),
who showed that geometric phases can be used as a topological test to reveal
quantum phase transitions. In essence, quantum phase transitions describe
the abrupt changes on the macroscopic behavior of a system resulting from
small variations of external parameters. These critical changes are caused
by the presence of degeneracies in the energy spectrum and are characterized
by long range quantum correlations. This is an exciting area of research
that considers a variety of effects such as the quantum Hall effect and high
$T_c$ superconductivity.
Here we exploit geometric phases as a tool to probe quantum phase
transitions in many-body systems. This provides the means to detect, not
only theoretically, but also experimentally the presence of criticalities.
Apart from the academic interest this approach may have certain advantages.
In particular, the geometric evolutions do not take the system through a
quantum phase transition. The latter is hard to physically implement as it
is accompanied by multiple degeneracies that can take the system away from
its ground state. Hence, they provide a way to probe criticalities in a
physically appealing way. Moreover, the geometric phases provide a non-local
object that might be useful to probe critical phenomena that are
undetectable by local order parameters. The latter consist an exciting field
of current research (Wen 2002).
As an explicit example we employ the one dimensional XY model in the
presence of a magnetic field. This model is analytically solvable and it
offers enough control parameters to support geometric evolutions. By
explicit calculations we observe that an excitation of the model obtains a
non-trivial geometric phase if and only if it circulates a region of
criticality. The generation of this phase can be traced down to the presence
of degeneracy of the energy at the critical point in a similar way used in
molecular systems. The geometric phase can be used to extract information on
the critical exponents that completely characterize the critical behavior. A
generalization of the results to the case of an arbitrary spin system is
demonstrated. Finally, a physical implementation of the XY model and their
corresponding geometric evolutions is proposed with ultra-cold atoms
superposed by optical lattices~(Pachos \& Rico 2005). The independence of
the generated phase from the number of atoms, its topological nature and its
resilience against control errors makes the proposal appealing for
experimental realization.
\section{Geometric phases}
Historically, the definition of geometric phase was originally introduced by
Berry in a context of closed, adiabatic, Schr\"odinger evolutions. What
Berry showed in his seminal paper~(Berry 1984) was that a quantum system
subjected to a slowly varying Hamiltonian manifests in its phase a geometric
behavior.
Let us summarize the derivation of the Berry phase. Consider a Hamiltonian
$H(\lambdambda)$ depending on some external parameters
$\lambdambda=(\lambdambda_1,\lambdambda_2,\dots, \lambdambda_m)$, and suppose that these
parameters can be varied arbitrarily inside a parameter space $\mathcal{M}$.
Assume that for each value of $\lambdambda$ the Hamiltonian has a completely
discrete spectrum of eigenvalues,
\begin{equation}gin{equation}\lambdabel{eq:spectr}
H(\lambdambda)\ket{n(\lambdambda)}=E_n(\lambdambda)\ket{n(\lambdambda)},
\end{equation}
where $\ket{n(\lambdambda)}$ and $E_n(\lambdambda)$ are eigenstates and eigenvalues,
respectively, of $H(\lambdambda)$. Suppose that the values of $\lambdambda$ change
slowly, along a smooth path in $\mathcal{M}$. Under the adiabatic
approximation, a system initially prepared in an eigenstate
$|n(\lambdambda)\ranglegle$ it remains in the corresponding instantaneous
eigenspace.
In the simplest case of a non-degenerate eigenvalue, the evolution of the
eigenstate is specified by the spectral decomposition~(\ref{eq:spectr}) up
to a phase factor. This phase factor can be evaluated by solving the
Schr\"odinger equation under the constraint of the adiabatic approximation,
yielding
\begin{equation}gin{equation}\lambdabel{esp:state}
\ket{\partialsi(t)_n}= e^{-i\begin{equation}ta}\exp\left\{i\oint_C\mathbf{A}\cdot
d\lambdambda\right\}
\ket{n(\lambdambda(t))},
\end{equation}
where $\begin{equation}ta=-i\int_0^T E_n(t) dt$ is the usual dynamical phase, and the
extra phase factor is the geometric phase. This phase has the form of a path
integral of a vector potential $\mathbf{A}$ (analogous to the
electromagnetical vector potential) called the Berry connection, whose
components are
\begin{equation}
A_i=i\bra{n(\lambdambda)}\frac{\partialartial}{\partialartial
\lambdambda_i}\ket{n(\lambdambda)}.
\lambdabel{connection}
\end{equation}
Berry was the first recognizing that this additional phase factor has an
inherent geometrical meaning: it cannot be expressed as a single valued
function of $\lambdambda$, but it is a function of the path followed by the
state during its evolution. Surprisingly, the value of this phase depends
only on the geometry of the path, and not on the rate at which it is
traversed. Hence the name ``geometric phase".
\begin{equation}gin{figure}
\begin{equation}gin{center}
\includegraphics[width=0.45\textwidth]{Deg.eps}
\caption{The geometric phase is proportional to the solid angle
spanned by the Hamiltonian with respect of its degeneracy point.}
\lambdabel{Bloch}
\end{center}
\end{figure}
The simplest, but still significant example of geometric phase, is the one
obtained for a two-level system, such as a spin-1/2 particle in the presence
of a magnetic field. Its Hamiltonian is given by
\begin{equation} H(\theta,\partialhi)=\mathbf{B}(\theta,\partialhi)\cdot
\mbox{\boldmath{$\sigmagma$}} =|\mathbf{B}| U(\theta,\partialhi) \sigmagma^z
U^\dagger(\theta,\partialhi),
\end{equation}
where $(\theta, \partialhi)$ determine the orientation of the magnetic field,
$U(\theta,\partialhi)=e^{-i\sigmagma^z\partialhi/2}e^{-i\sigmagma^y\theta/2}$ is a $SU(2)$
transformation which rotates the operator
$\mathbf{B}\cdot\mbox{\boldmath{$\sigmagma$}}$ to the $z$-direction and
$\mbox{\boldmath{$\sigmagma$}}=(\sigmagma^x,\sigmagma^y, \sigmagma^z)$ is the vector of
Pauli's operators, given by
\begin{equation}
\sigmagma^x =
\left(\begin{equation}gin{array}{cc}
0 & 1 \\
1 & 0
\end{array}\right) ,\,\,\,\,\,\,
\sigmagma^y =
\left(\begin{equation}gin{array}{cc}
0 & -i \\
i & 0
\end{array}\right) ,\,\,\,\,\,\,
\sigmagma^z =
\left(\begin{equation}gin{array}{cc}
1 & 0 \\
0 & -1
\end{array}\right).
\end{equation}
With this parametrization the Hamiltonian can be represented as a vector on
a sphere, centered in the point of degeneracy of the Hamiltonian
($|\mathbf{B}|=0$), as seen in Figure~\ref{Bloch}.
For $\theta=\partialhi=0$ we have that $U=\mbox{$1 \hspace{-1.0mm} {\bf l}$}$ and the two eigenstates of the
system given by $|+\ranglegle=(1,0)^T$ and $|-\ranglegle=(0,1)^T$ with
corresponding eigenvalues $E_\partialm=\partialm|\mathbf{B}|/2$. Let us consider the
evolution resulting when a closed path $C$ is spanned adiabatically on the
sphere. Following the previous general consideration it is easy to show that
the Berry connection components corresponding, e.g. to the $|+\ranglegle$
state, are given by
\begin{equation}gin{equation}
A_\theta =0 ,\,\,\, A_{\partialhi}=\frac{1}{2}\left(1-\cos\theta \right)
\end{equation}
that leads to the geometric phase
\begin{equation}gin{equation}\lambdabel{spinphase}
\partialhi=\oint_{C}\mathbf{A}\cdot\\dr=\frac{1}{2}
\int_{\Sigma(\theta,\partialhi)}\!\!\!\sigman{\theta}\;d\theta
d\partialhi=\frac{\Omega}{2}.
\end{equation}
The geometric phase that corresponds to the $|-\ranglegle$ state is given by
$\gammamma(C)=-\Omega/2$. Here $\Omega=\int\!\!\int_{\Sigma}sin\theta d\theta d
\partialhi$ is the solid angle enclosed by the loop, as seen from the degeneracy
point. In this expression the geometric origin of the geometric phase $\partialhi$
is evident. Its value depends only on the way in which these parameters are
changed in relation with the degeneracy point of the Hamiltonian.
A particularly interesting case is the one in which the Hamiltonian can be
casted in a real form, corresponding to $\theta=\partiali/2$. In this case the
phase becomes $\partialhi=\partiali$ reproducing the sign change of the eigenstate, when
it circulates a point of degeneracy ($|\mathbf{B}|=0$), in agreement with
Longuet-Higgins theorem.
\section{The XY model and its criticality}
In order to illustrate the connection between geometric phases and critical
spin systems we shall consider the concrete example of a chain of spin-1/2
particles subject to XY interactions. This is a one dimensional model with
nearest neighbors spin-spin interactions, where we allow the presence of an
external magnetic field oriented along the $z$-direction. The Hamiltonian is
given by
\begin{equation}gin{equation}
\lambdabel{HXYModel}
H = -\sum_{l=-M}^{M} \left(\frac{1 + \gammamma}{2}\sigma^x_l
\sigma^x_{l+1} + \frac{1 - \gammamma}{2}\sigma^y_l \sigma^y_{l+1}
+\frac{\lambdambda}{2}\sigma^z_l \right),\nonumber\\number
\end{equation}
where $\sigmagma^\mu_l$ are the Pauli matrices at site $l$, $\gammamma$ is the x-y
anisotropy parameter and $\lambdambda$ is the strength of the magnetic field.
This model was first explicitly solved by Lieb {\it et al.} (1961) and by
Katsura (1962). Since the XY model is exactly solvable and still presents a
rich structure it offers a benchmark to test the properties of geometric
phases in the proximity of criticalities.
In particular, we are interested in a generalization of Hamiltonian
(\ref{HXYModel}) obtained by applying to each spin a rotation with angle
$\partialhi$ around the $z$-direction
\begin{equation}gin{equation}
\lambdabel{HXYphi}
H(\partialhi) = U(\partialhi)H U^\dag (\partialhi)\quad \text{ with }\quad U(\partialhi) =
\partialrod_{l=-M}^{M} e^{i\sigma^z_l\partialhi/2},
\end{equation}
in the same way as we did for the single spin-1/2 particle. The family of
Hamiltonians generated by varying $\partialhi$ is clearly isospectral and,
therefore, has the same energy spectrum as the initial Hamiltonian. In
addition, due to the bilinear form of the interaction term we have that
$H(\partialhi)$ is $\partiali$-periodic in $\partialhi$. The Hamiltonian $H(\partialhi)$ can be
diagonalized by a standard procedure based on the Jordan-Wigner
transformation and the Bogoliubov transformation (Carollo
\& Pachos 2005). From this procedure one can obtain the ground state,
$\ket{g}$, which is given by
\begin{equation}gin{equation}\lambdabel{groundstate}
\ket{g}\!=\!\!\!\partialrod_{\otimes k>0}\!\!\Big(\!\cos {\theta_k \over 2}
\ket{0}_{\!k}\ket{0}_{\!\!-k}
\!\!-ie^{2i\partialhi} \sigman {\theta_k \over 2}
\ket{1}_{\!k} \ket{1}_{\!\!-k}\!\Big),
\end{equation}
where $\ket{0}_{k}$ and $\ket{1}_k$ are the vacuum and single
fermionic excitation of the k-th momentum mode. The angle $\theta_k$
is defined by $\cos\theta_k=\epsilonsilon_k/\Lambdambda_k$ with
$\epsilonsilon_k=\cos{2\partiali k \over N}-\lambdambda$ and the energy gap above
the ground state is given by $\Lambdambda_k=\sqrt{\epsilonsilon_k^2+ \gammamma^2
\sigman^2{2\partiali k \over N}}$. It is remarkable that by inspection of
(\ref{groundstate}) the ground state can be interpreted as the direct
product of $N$ spins each one having its own orientation given by the
direction $(2\partialhi,\theta_k)$. As we will see in the next section this
observation will make the evaluation of the ground state geometric phase a
simple task. Before investigating the geometric properties of the XY model
we will first consider the behavior of the spectrum as a function of the
external parameters $\gammamma$, $\lambdambda$ and $\partialhi$.
Let us first review the concept of quantum phase transitions. A many body
system, driven by a parameter $g$, undergoes a quantum phase transition at a
point $g=g_c$ when the energy density of the ground state at $g=g_c$ is
non-analytic. This point is associated with a crossing or an avoiding of the
energy eigenvalues. It is characterized by either a discontinuity in the
first derivative of the ground state energy density (first-order phase
transition) or by discontinuity or divergence in the second derivative of
the ground state energy density (second-order quantum phase transition)
assuming that the first derivative is continuous. In particular, the energy
gap ${\cal D}elta$ between the ground and the first excited states vanishes like
${\cal D}elta\partialropto |g-g_c|^{z\nu}$ as $g$ approaches $g_c$ creating a point of
degeneracy that we will call critical. Moreover, the non-analyticity of the
energy eigenvalues is related to abrupt changes in the ground state
properties. As these transitions occur at zero temperature they are driven
purely by quantum fluctuations~(Sachdev 2001). It can be shown that the
length of the associated quantum correlations, $\xi$, diverges like
$\xi^{-1}\partialropto |g-g_c|^\nu$ as $g$ approaches the critical point $g_c$.
The parameters $z$ and $\nu$ are called the critical exponents and their
values are universal, independent of most of the microscopic details of the
system Hamiltonian.
For the case of the XY model one can identify the critical points by finding
the regions where the energy gap $\Lambdambda_k$ vanishes. Indeed, there are two
regions in the $\lambdambda$, $\gammamma$ space that are critical. When $\gammamma=0$
we have $\Lambdambda_k=0$ for $-1<\lambdambda<1$, which is a first order phase
transition with an actual energy crossing and critical exponents $z=2$ and
$\nu=1/2$. The other critical region is given by $\lambdambda=\partialm 1$ where we
have $\Lambdambda_k=0$ for all $\gammamma$. This is a second order quantum phase
transition with energy level avoiding. When $\gammamma=1$ and $\lambdambda =\partialm1$
we obtain the Ising critical model with critical exponents $z=1$ and
$\nu=1$.
Finally, let us consider the criticality behavior of the rotated
Hamiltonian, $H(\partialhi)$. The energy gap, $\Lambdambda_k$ does not depend on the
angle $\partialhi$, as this parameter is related to an isospectral transformation.
Hence, the criticality region for the rotated Hamiltonian, $H(\partialhi)$, is
obtained just by a rotation of the critical points of the XY Hamiltonian
around the $\lambdambda$ axis. This is illustrated in Figure~\ref{criticality},
where the Ising type criticality corresponds now to two planes at $\lambdambda
=1 $ and $\lambdambda=-1$ and the XX criticality remains along the $\lambdambda$
axis for $|\lambdambda|<1$.
\begin{equation}gin{figure}
\begin{equation}gin{center}
\includegraphics[width=4in]{criticality.eps}
\caption{(a) The regions of criticality of the XY Hamiltonian are
presented as a function of the parameters $ \lambdambda $ and $\gammamma$ and (b)
the corresponding ones for the Hamiltonian $H(\partialhi)$ where $\partialhi$
parameterizes a rotation around the $\lambdambda$ axis. Possible paths for the
geometric evolutions are depicted spanned by varying the parameter $\partialhi$.}
\lambdabel{criticality}
\end{center}
\end{figure}
\section{Geometric phases and XY criticalities}
Figure 2 depicts the critical points of the XY model. Now we are interested
in spanning looping trajectories in the space comprising of the Hamiltonian
parameters $\lambdambda$, $\gammamma$ and $\partialhi$. The aim is to determine the
geometric evolutions corresponding to these paths and relate them to the
criticality regions. A special family of paths is of interest that
circulates the $\lambdambda$ axis just by varying the $\partialhi$ parameter from zero
to $\partiali$. Indeed, these paths enclose the XX criticality only when
$-1<\lambdambda<1$. As we shall see in the following it is possible to evaluate
the corresponding geometric phases of the ground and the first excited
states as a function of $\lambdambda$ and $\gammamma$.
\begin{equation}gin{figure}
\begin{equation}gin{center}
\includegraphics[width=4.5in]{berry.eps}
\caption{The geometric phase corresponding to the ground state (a)
and the relative one between the ground and first excited state (b) as a
function of the path parameters $\lambdambda $ and $\gammamma$. Each point of the
surface corresponds to the geometrical phase for a path that is spanned by
varying $\partialhi$ from $0$ to $\partiali$ for certain $\lambdambda$ and $\gammamma$. The
values of the geometric phase corresponding to the loops $C_1$, $C_2$ and
$C_3$ in Figure~\ref{criticality} are also indicated.}
\lambdabel{berry}
\end{center}
\end{figure}
Using the standard formula it is easy to show that the geometric phase of
the ground state $\ket{g}$ is given by
\begin{equation}gin{equation}
\lambdabel{geomphase}
\varphi_g = -i\int_0^\partiali\bra{g}\frac{\partialartial}{\partialartial\partialhi}
\ket{g} = \sum_{k>0}\partiali(1-\cos\theta_k).
\end{equation}
This result can be understood by considering the form of $\ket{g}$, which is
a tensor product of states, each lying in the two dimensional Hilbert space
spanned by $\ket{0}_{k}\ket{0}_{-k}$ and $\ket{1}_k\ket{1}_{-k}$. For each
value of $k(>0)$, the state in each of these two-dimensional Hilbert spaces
can be represented as a Bloch vector with coordinates $(2\partialhi,\theta_k)$. A
change in the parameter $\partialhi$ determines a rotation of each Bloch vector
about the $z$-direction. A closed circle will, therefore, produce an overall
phase given by the sum of the individual phases as given in
(\ref{geomphase}) and illustrated in Figure \ref{berry}(a).
Of particular interest is the relative geometric phase between the first
excited and ground states given by the difference of the geometric phases
acquired by these two states. The first excited state is given by
\begin{equation}gin{equation}\lambdabel{excitedstate}
\ket{e_{k_0}} =\!\!\!\ket{1}_{\!k_0}\ket{0}_{\!\!-k_0}
\partialrod_{\otimes k>0,\,\,k\neq \partialm k_0}\!\!\Big(\!\cos {\theta_k \over 2}
\ket{0}_{\!k}\ket{0}_{\!\!-k} \!\!-ie^{2i\partialhi} \sigman {\theta_k \over 2}
\ket{1}_{\!k} \ket{1}_{\!\!-k}\!\Big),
\end{equation}
with $k_0$ corresponding to the minimum value of the energy $\Lambdambda_k$. The
behavior of this state is similar to a direct product of only $N-1$ spins
oriented along $(2\partialhi,\theta_k)$ where the state of the spin corresponding
to momentum $k_0$ does not contribute any more to the geometric phase. Thus
the relative geometric phase between the ground and the excited states
becomes
\begin{equation}gin{equation}
\lambdabel{connectionExcited}
\varphi_{eg} \equiv \varphi_e-\varphi_g =
-\partiali (1-\cos \theta_{k_0})
\end{equation}
In the thermodynamical limit ($N \to \infty$), $\partialhi_{eg}$ takes the form
\begin{equation}gin{equation}
\lambdabel{GPExcGrndSmallGamma}
\varphi_{eg}=\left\{\begin{equation}gin{array}{cl}
0, & \text{for $|\lambdambda |>1-\gammamma^2$} \\
-\partiali+{\partiali \lambdambda \gammamma \over
\sqrt{(1-\gammamma^2)(1-\gammamma^2-\lambdambda^2)}}, & \text{for
$|\lambdambda |<1-\gammamma^2$}
\end{array}\right.
\end{equation}
where the condition $|\lambdambda | > 1-\gammamma^2$ constrains the excited state
to be completely oriented along the $z$-direction resulting in a zero
geometric phase. As can be seen from Figure~\ref{berry}(b), the most
interesting behavior of $\varphi_{eg}$ is obtained in the case of $\gammamma$
small compared to $\lambdambda$. In this case $\varphi_{eg}$ behaves as a step
function, giving either $\partiali$ or $0$ phase, depending on whether
$|\lambdambda|<1$ or $|\lambdambda|>1$, respectively. This behavior is precisely
determined from whether the corresponding loop encloses a critical point or
not and can be used as a witness of its presence. In particular, in the
$|\lambdambda |<1-\gammamma^2$ case the first term corresponds to a purely
topological phase, while the second is a geometric contribution. Indeed, the
first part gives rise to a phase which depends solely on the topological
character of the trajectory traced by the $(2\partialhi,\theta_k)$ coordinates. In
particular if $n$ circulations are performed then the topological phase is
$n\partiali$, where $n$ is the winding number. The second term is geometric in
nature and it can be made arbitrarily small by tuning appropriately the
couplings $\lambdambda$ or $\gammamma$. This idea is illustrated in
Figure~\ref{conical}, where the energy surface of ground and first excited
state is depicted. The point of degeneracy is the intersection of the two
surfaces. This is the point where the energy density is not analytical.
Consider the case of a family of loops converging to a point. In the trivial
case where the limiting point does not coincide to a degeneracy, the
corresponding geometric phase converges to zero. If instead, the degeneracy
point is included, the geometric phase tends to a finite value~(Hamma 2006).
To better understand the properties of the relative geometric phase, we
focus on the region of parameters with $\gammamma\ll 1$. In this case, it can
be shown (Carollo \& Pachos 2005) that the Hamiltonian, when restricted to
its lowest energy modes, can be casted in a {\em real} form and, for
$|\lambdambda|<1$, its eigenvalues present a {\em conical intersection} centered
at $\gammamma=0$.
\begin{equation}gin{figure}
\begin{equation}gin{center}
\includegraphics[width=3.3in]{conical2.eps}
\caption{The conical intersection between the two lowest energy levels
of the Hamiltonian as a function of its parameters. A contractible loop,
i.e. a loop that can be continuously deformed to a point of the domain,
produces a zero geometric phase. A non-trivial geometric phase is obtained
from non-contractible loops.}
\lambdabel{conical}
\end{center}
\end{figure}
It is well known that when a closed path is taken within the real domain of
a Hamiltonian, a topological phase shift $\partiali$ occurs only when a conical
intersection is enclosed. In the present case, the conical intersection
corresponds to a point of degeneracy where the XX criticality occurs and it
is revealed by the topological term in the relative geometric phase
$\varphi_{eg}$. It is worth noticing that the presence of a conical
intersection indicates that the energy gap scales linearly with respect to
the coupling $\gammamma$ when approaching the degeneracy point. This implies
that the critical exponents of the energy, $z$, and of the correlation
length, $\nu$, satisfy the relation $z\nu=1$ which is indeed the case for
the XX criticality~(Sachdev 2001). In the following we shall see that
geometric phases are sufficient to determine the exact values of the
critical exponents and thus provide a complete characterization of the
criticality behavior.
\section{The general case}
We shall show here that the vacuum expectation value of a hermitian
operator, $O$, can be written in terms of a geometric phase. This is a
rather general result that can be used to study critical models, usually
probed by the behavior of vacuum expectation values of observables, just by
considering geometric phases. We assume, first, that $O$ does not commute
with the Hamiltonian, a requirement satisfied for the case of a
non-degenerate spectrum and, second, that $O$ can transform the ground state
in a cyclic fashion. The latter provides the looping trajectories of the
geometric evolutions.
To show that let us extend the initial Hamiltonian, $H_0$, of the model in
the following way
\begin{equation}
H(\lambdambda) = H_0 +\lambdambda O
\end{equation}
Turning to the interaction picture with respect to $O$ we obtain
\begin{equation}
H_\text{int}(\lambdambda) = U(\lambdambda t)H_0 U^\dagger(\lambdambda t)
\end{equation}
where $U(\lambdambda t) = \exp(-i \lambdambda O t)$. From the cyclicity requirement
there exists time $T$ such that the unitary rotation $U(\lambdambda T)$ takes
the ground state $\ket{\partialsi}$ to itself, i.e. $U(\lambdambda T)\ket{\partialsi}
=\ket{\partialsi}$. Hence, the desired cyclic evolution is obtained by a rotation
generated by $O$. The geometric phase that result from the cyclic evolution
is given by~(\ref{esp:state}) and, thus, we have
\begin{equation}gin{equation}
\varphi = \lambdambda T \bra{\partialsi} O \ket{\partialsi}
\lambdabel{Relation}
\end{equation}
Hence, the expectation value of an operator that can generate circulations
of the ground state is expressible with respect to a geometric phase.
One can easily verify this relation for the simple case of a spin-1/2
particle in a magnetic field. When the direction of the magnetic field is
changed adiabatically and isospectrally then the state of the spin is guided
in a cyclic path around the $z$-direction. The generated phase is given by
$\varphi = \partiali(1- \cos \theta)$, where $\theta$ is the fixed direction of
the magnetic field with respect to the $z$-direction. On the other hand, one
can easily evaluate that the expectation value of the operator
$(1-\sigmagma^z)/2$ that generates the cyclic evolution is given by
$\bra{\partialsi}(1-\sigmagma^z)/2\ket{\partialsi} =(1-\cos\theta)/2$ which verifies
relation (\ref{Relation}) as for this example $\lambdambda T=2\partiali$.
This connection has far reaching consequences. It is expected that intrinsic
properties of the state will be reflected in the properties of the geometric
phases. The latter, as they result from a physical evolution can be obtained
and measured in a conceptually straightforward way. Here, we are interested
in employing geometric phases to probe critical phenomena of spin systems.
Indeed, from the particular example of the XY model we saw that the presence
of critical points can be detected by the behavior of specific geometric
evolutions and the corresponding critical exponents can be extracted. This
comes as no surprise as one can choose geometric phases that correspond to
the classical correlations of the system (expectation values, e.g. of
$\sigmagma^z_1\sigmagma^z_L$) from where the correlation length and the critical
behavior can be obtained.
Let us apply this idea to the XY model we studied earlier. There the
rotations are generated by the operator $O= \sum_l \sigmagma^z_l$.
Hence, the resulting geometric phase is proportional to the total
magnetization
\begin{equation}
M_z = \bra{\partialsi} \sum_l \sigmagma^z_l \ket{\partialsi}
\end{equation}
It is well known (Sachdev 2001) that the mangetization $M_z$ can serve as an
order parameter, from which one can derive all the critical properties of
the XY model just by considering its scaling behavior. Indeed, Zhu 2006 has
considered the scaling of the ground state geometric phase of the XY model
from where he evaluated the Ising critical exponents. As it has been shown
here this is a general property that can be applied to any critical system.
\section{Physical implementation with optical lattices}
This construction, apart from its theoretical interest, offers a possible
experimental method to detect critical regions without the need to cross
them. When a physical system is forced to go through a critical region then
excited states may become populated due to the vanishing energy gap, thus
undermining the identification of the system state. Hence, being able to
probe the critical properties of a physical systems just by evolving it
around the critical area is of much interest to experimentalists as the
energy gap can be kept to a finite value.
In particular, we shall implement this model with optical lattices a system
that has proven versatile in the field of quantum simulations. To this end,
consider two bosonic species labelled by $\sigmagma=a, b$ that can be given by
two hyperfine levels of an atom. Each one can be trapped by a laser field
configured as a standing wave that is heavily detuned from any atomic
transitions. Thus, the atom acts as a dipole in the presence of a periodic
sinusoidal trapping potential that can generate one, two or three
dimensional lattices. Here we will restrict to the case where the atoms in
an arbitrary superposition of state $a$ and $b$ are confined in a
one dimensional array by the help of two in-phase optical lattices. The
tunnelling of atoms between neighboring sites is described by
\begin{equation}gin{equation}
V=-\sum_{l\sigmagma} (J_\sigmagma a_{l\sigmagma}^\dagger a_{(l+1)
\sigmagma} +\text{H.c.})
\end{equation}
where $a_\sigmagma$ and $a^\dagger_\sigmagma$ are the annihilation and creation
operators of particles $\sigmagma$ and $J_\sigmagma$ their corresponding
tunnelling coupling. When two or more atoms are present in the same site,
they experience collisions given by
\begin{equation}
H^{(0)}=
\sum_{l\sigmagma \sigmagma'} {U_{\sigmagma \sigmagma'} \over 2}
a^\dagger_{l\sigmagma} a^\dagger_{l\sigmagma'} a_{l\sigmagma'} a_{l\sigmagma}
\end{equation}
where $U_{\sigmagma \sigmagma'}$ are the collisional couplings between atom
species $\sigmagma$ and $\sigmagma'$. We shall consider the limit $J \ll U$ where
the system is in the Mott insulator regime with one atom per lattice site
(Kastberg {\em et al.} 1995; Raithel {\it et al.} 1998). In this regime, the
effective evolution is obtained by adiabatic elimination of the states with
a population of two or more atoms per site, which are energetically
unfavorable. Hence, to describe the Hilbert space of interest, we can employ
the pseudospin basis of $|\!\!\uparrow\ranglegle\equiv |n_l^a=1,n_l^b=0\ranglegle$
and $|\!\!\downarrow\ranglegle\equiv |n_l^a=0,n_l^b=1\ranglegle $, for lattice
site $l$, and the effective evolution can be expressed in terms of the
corresponding Pauli (spin) operators.
It is easily verified by perturbation theory that when the tunnelling
coupling of both atomic species is activated, the following exchange
interaction is realized between neighboring sites~(Kuklov \& Svistunov 2003;
Duan {\it et al.} 2003)
\begin{equation}
H_1=-\frac{J_a J_b}{U_{ab}} \sum_l \left(
\sigmagma^x_l\sigmagma^x_{l+1}+\sigmagma^y_l\sigmagma^y_{l+1}\right).
\lambdabel{Ham1}
\end{equation}
In order to create an anisotropy between the $x$ and $y$ spin directions, we
activate a tunnelling by means of Raman couplings (Duan {\it et al.} 2003).
Application of two standing lasers $L_1$ and $L_2$, with zeros of their
intensities at the lattice sites and with phase difference $\partialhi$, can
induce tunnelling of the state $|+\ranglegle
\equiv ({e^{-i\partialhi /2} |a\ranglegle + e ^{i\partialhi /2} |b\ranglegle) / \sqrt{2}}$.
The resulting tunnelling term is given by $V_c=J_c \sum_l (c^\dagger_l
c^{}_{l+1} +\text{H.c.})$, where $c_l$ is the annihilation operator of
$|+\ranglegle$ state particles. The tunnelling coupling, $J_c $, is given by
the potential barrier of the initial optical lattice superposed by the
potential reduction due to the Raman transition. The resulting evolution is
dominated by an effective Hamiltonian given, up to a readily compensated
Zeeman term, by
\begin{equation}
H_2=-{1 \over 2}{J_c^2 \over U_{ab}} \sum_l
U(\partialhi)\sigmagma^x_l\sigmagma^x_{l+1}U^\dagger(\partialhi)
\lambdabel{Ham0}
\end{equation}
where $U(\partialhi)$ was defined in (\ref{HXYphi}). Combining the rotationally
invariant Heisenberg interaction $H_1$ with $H_2$ gives the rotated XY
Hamiltonian described by equation~(\ref{HXYphi}), where the parameter
$\gammamma$ is given by $J_c^2/(2\epsilonsilon U_{ab})$ and $\epsilonsilon = (2J_a J_b
+J_c^2/2)/U_{ab}$ is the overall energy scale multiplying the
Hamiltonian~(\ref{HXYModel}). The magnetic field term $\lambdambda\sum_l
\sigmagma^z_l$ is easily produced by a homogeneous and heavily detuned
laser radiation. If the radiation has amplitude $\Omega$ and detuning
${\cal D}elta$ then the magnetic coupling is given by $\lambdambda=\Omega^2/{\cal D}elta$.
Finally, the angle $\partialhi$ of the rotated XY Hamiltonian is given by the
phase difference, $\partialhi$, of the lasers $L_1$ and $L_2$. Hence, the complete
control of the rotated XY Hamiltonian can be obtained by the optical lattice
configuration presented here and the geometric evolutions can be performed
by varying the phase $\partialhi$ from $0$ to $\partiali$.
\section{Conclusions}
In this article, we have presented a method that theoretically, as well as
experimentally, allows for the detection of regions of criticality through
the geometric phase, without the need for the system to experience quantum
phase transitions. The latter is experimentally hard to realize as the
adiabaticity condition breaks down at the critical point and the state of
the system is no longer faithfully represented by the ground state.
Alternatively, the finite energy gap $\Lambdambda_k$ that is present along the
circular procedure is sufficient to adiabatically prevent unwanted
transitions between the ground state and the excited ones, even in the
thermodynamical limit.
The origin of the geometric phase can be ascribed to the existence of
degeneracy points in the parameter space of the Hamiltonian (Hamma 2006).
Hence, a criticality point can be detected by performing a looping
trajectory around it and detecting whether or not a non-zero geometric phase
has been generated. For the case of the XY model the topological nature of
the resulting phase pinned to the value, $\varphi_{eg}\approx \partiali$, is
revealed by its resilience with respect to small deformations of the loop.
This characteristic results from the conical intersection structure of the
potential surfaces that is equivalent to having the critical exponents
satisfying $z\nu=1$. In addition, the critical exponents can be evaluated by
scaling arguments of the geometric phases (Zhu 2005). Hence, additional
information about the critical exponents can be deduced from the topological
nature and the exact value of the geometric phase. Moreover, topological
phases are inherently resilient against control errors, a property that can
be proved to be of a great advantage when considering many-body systems.
Such a study can be theoretically performed on any system which can be
analytically elaborated such as the case of the cluster Hamiltonian~(Pachos
\& Plenio 2004), or exploited numerically when analytic solutions are
not known. The generalization of these results to a wide variety of critical
phenomena and their relation to the critical exponents is a promising and
challenging question which deserves extensive future investigation.
\begin{equation}gin{thebibliography}{}
\item Aharonov, Y. \& Bohm, D. 1959 Phys. Rev. {\bf 115}, 485--491.
\item Berry, M. B. 1984 Proc. R. Soc. Lond. A {\bf 329}, 45.
\item Bohm, A., Mostafazadeh, A., Koizumi, H., Niu, Q. \& Zwanziger, J. 2003
The Geometric Phase in Quantum Systems, Ed. Springer.
\item Carollo, A. C. M. \& Pachos, J. K. 2005 Phys. Rev. Lett.
{\bf 95}, 157203.
\item Duan, L.-M., Demler, E. \& Lukin, M. D. 2003
Phys. Rev. Lett. {\bf 91}, 090402.
\item Hamma, A. 2006 quant-ph/0602091.
\item Herzberg, G. \& Longuet-Higgins, H. C. 1963 Discuss. Faraday
Soc, {\bf 35}, 77; Longuet-Higgins, H. C. 1975 Proc. R. Soc. London,
Ser. A {\bf 344}, 17.
\item Johansson, N. \& Sj\"oqvist, E. 2004 Phys. Rev. Lett. {\bf
92}, 060406.
\item Kastberg, A., {\em et al.} 1995 Phys. Rev. Lett. {\bf 74}, 1542.
\item Katsura, S. 1962 Phys. Rev. {\bf 127} 1508.
\item Kuklov, A. B. \& Svistunov, B. V. 2003
Phys. Rev. Lett. {\bf 90}, 100401.
\item Lieb, E., Schultz, T. \& Mattis, D. 1961 Annals of Phys. {\bf 16}
407.
\item Nakamura, M. \& Todo, S. 2002 Phys. Rev. Lett. {\bf 89}, 077204.
\item Pachos, J. K. \& Plenio, M. B. 2004 Phys. Rev. Lett. {\bf 93}, 056402.
\item Pachos, J. K. \& Rico, E. 2005 Phys. Rev. A {\bf 70}, 053620.
\item Raithel, G., {\em et al.} 1998 Phys. Rev. Lett. {\bf 81}, 3615.
\item Resta, R. 1994 Rev. Mod. Phys. {\bf 66}, 899.
\item Ryu, S. \& Hatsugai, Y. 2006 cond-mat/0601237 .
\item Sachdev, S. 2001 Quantum Phase Transitions, Cambridge University
Press, Cambridge, UK.
\item Shapere, A. \& Wilczek, F. 1989 Geometric phases in physics,
World Scientific, Singapore.
\item Stone, A. J. 1976 Proc. R. Soc. London, Ser. A {\bf 351}, 141.
\item Thouless, D. J., {\it et al.} 1982 Phys. Rev. Lett. {\bf 49}, 405.
\item Wen, X.-G. 2002 Quantum field theory of many-body systems,
Oxford University Press.
\item Zhu, S.-L. to appear in Phys. Rev. Lett, cond-mat/0511565.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{On the structure of stationary setsootnote{2000 AMS subject
classification 03E40, 03E65}
\begin{abstract}
We isolate several classes of stationary sets of
$[\kappa]^{\omega}$ and investigate implications among them.
Under a large cardinal assumption, we prove a structure theorem for
stationary sets.
\end{abstract}
\section{Introduction}
We investigate stationary sets in the space $[\kappa]^{\omega}$ of countable subsets
of an uncountable cardinal. We concentrate on the following particular
classes of stationary sets:
\vskip5truept
\hskip4truecm full
\hskip2truecm {$\displaystyle{\nearrow\hskip4truecm \searrow}$}
\centerline{club $\to$ local club $\to$ reflective $\to$
projective stationary}
\hskip2truecm {$\displaystyle{\searrow\hskip4truecm \nearrow}$}
\hskip3.7truecm spanning
$$ \hbox{\rm Fig. 1.1}$$
In the diagram, the $\to$ represents implication. We show among others
that under suitable large cardinal assumption (e.g., under Martin's
Maximum), the diagram collapses to just two classes:
$$\left\{ \aligned
{\hbox{\rm club}}\cr
{\hbox{\rm local club}}\cr
{\hbox{\rm full}}\cr
\endaligned \right\}
\longrightarrow \left\{ \aligned
{\hbox{\rm projective stationary}}\cr
{\hbox{\rm spanning}}\cr
{\hbox{\rm reflective}}\cr
\endaligned\right\}$$
$$\hbox{\rm Fig.\ } 1.2$$
\noindent Under the same large cardinal assumption, we prove a structure
theorem for stationary sets: for every stationary set $S$ there exists a
stationary set $A\subset \omega_1$ such that $S$ is spanning above $A$
and nonstationary above $\omega_1-A$.
We also investigate the relation between some of the above properties of
stationary sets on the one hand, and properties of forcing on the other,
in particular the forcing that shoots a continuous $\omega_1$--chain
through a stationary set. We show that the equality of the classes of
projective stationary sets and spanning sets is equivalent to the
equality of the class of stationary--set--preserving forcings and the
class of semiproper forcings.
The work is in a sense a continuation of the previous work
\cite{FJ1} and \cite{FJ2} of the first two authors, and ultimately
of the groundbreaking work of \cite{FMSh} of Foreman, Magidor and
Shelah.
\section{ Definitions}
We work in the spaces $[\kappa]^{\omega}$ and $[H_\lambda]^\omega$, where
$\kappa$ and $\lambda$ are uncountable cardinals. The concept of a
closed unbounded set and a stationary set has been generalized to
the context of these spaces (cf. \cite{J1}) and the generalization
gained considerable prominence following the work \cite{S} of
Shelah on proper forcing.
The space $[\kappa]^{\omega}$ is the set of all countable subsets of $\kappa$,
ordered by inclusion; similarly for ${[H_\lambda]^\omega}$, where $H_\lambda$ denotes
the set of all sets hereditarily of cardinality less than $\lambda$. A
set $C$ in this space is {\sl closed unbounded (club)} if it is closed
under unions of increasing countable chains, and cofinal in the ordering
by inclusion. A set $S$ is {\sl stationary} if it meets every club set.
We shall (with some exceptions) only consider $\kappa$ and $\lambda$
that are greater than $\omega_1$; note that the set $\omega_1$ is a club
in the space $[\omega_1]^\omega$ (which motivates the generalization).
In order to simplify some statements and some arguments, we shall only
consider those $x\in [\kappa]^{\omega}$ (those $M\in{[H_\lambda]^\omega}$) whose intersection with
$\omega_1$ is a countable ordinal (these objects form a club set); we
denote this countable ordinal by $\delta_x$ or $\delta_M$ respectively:
\vskip10truept \noindent$(2.1) \hskip 1truecm \delta_x = x\cap
\omega_1,\;\; \delta_M = M\cap \omega_1.$ \vskip10truept
The filter generated by the club sets in $[\kappa]^{\omega}$ is generated by the club
sets of the form
\vskip10truept
\noindent$(2.2) \hskip 1truecm C_F=\{x\;|\;x \hbox{\rm \ is closed under
} F\}$
\vskip10truept
\noindent where $F$ is an operation, $F : \kappa^{<\omega}\to\kappa;$
similarly for $H_\lambda$. In the case of $H_\lambda$, we consider only
those $M\in{[H_\lambda]^\omega}$ that are submodels of the model $(H_\lambda,\in,<)$,
where $<$ is some fixed well ordering; in particular, the $M$'s are
closed under the canonical Skolem functions obtained from the well
ordering.
For technical reasons, when considering continuous chains in $[\kappa]^{\omega}$ or
${[H_\lambda]^\omega}$, we always assume that when $\langle x_\alpha\;|\;\alpha <
\alphamma\rangle$ is such a chain then for every $\alpha,\beta<\alphamma$,
\vskip10truept
\noindent$(2.3)$ \hskip 1truecm if $\alpha<\beta$ then
$\delta_{x_\alpha}<\delta_{x_\beta}.$
\vskip10truept
\noindent The term $\omega_1$--{\sl chain} or $(\alphamma+1)$--{\sl chain}, where
$\alphamma<\omega_1$, is an abbreviation for ``a continuous
$\omega_1$--chain that satisfies $(2.3)$.''
We also note that in one instance we consider club (stationary) sets in
the spaces $[\kappa]^{\omega_1}$ (where $\kappa\geq \omega_2$) those are
defined appropriately.
Throughout the paper we employ the operations of {\sl projection} and
{\sl lifting}, that move sets between the spaces $[\kappa]^{\omega}$ for different
$\kappa$:
If $\kappa_1<\kappa_2$ and if $S$ is a set in $[\kappa_2]^\omega$, then
the projection of $S$ to $\kappa_1$ is the set
\vskip10truept
\noindent$(2.4) \hskip 1truecm \pi(S) = \{x\cap \kappa_1\;|\;x\in S\}.$
\vskip10truept
If $S$ is a set in $[\kappa_1]^\omega$ then the lifting of $S$ to
$\kappa_2$ is the set
\vskip10truept
\noindent$(2.5) \hskip1truecm \hat S = \{x\in
[\kappa_2]^\omega\;|\;x\cap\kappa_1\in S\}.$
\vskip10truept
We recall that the lifting of a club set is a club set and the
projection of a club set contains a club set. Hence, stationarity is
preserved under lifting and projection.
The special case of projection and lifting is when $\kappa = \omega_1$:
\vskip10truept
$\pi(S) = \{\delta_x\;|\;x\in S\},\hskip10truept \hat A = \{x\;|\;\delta_x\in A\}
{(A\subset \omega_1).}$
\begin{definition}
A set $S\subset[\kappa]^{\omega}$ is a {\sl local club} if the set
$$\{X\in[\kappa]^{\aleph_1}\;|\;S\cap[X]^\omega \hbox{\rm \ contains a
club in } [X]^\omega\}$$
contains a club in $[\kappa]^{\aleph_1}.$
\end{definition}
\begin{definition}
A set $S\subset[\kappa]^{\omega}$ is {\sl full} if for every stationary
$A\subset\omega_1$ there exist a stationary $B\subset A$ and a
club $C$ in $[\kappa]^{\omega}$ such that
$$\{x\in C\;|\;\delta_x\in B\}\subset S.$$
(``$S$ contains a club above densely many stationary $B\subset
\omega_1$.'')
\end{definition}
\begin{definition}
A set $S\subset[\kappa]^{\omega}$ is {\sl projective stationary} if for every
stationary set $A\subset \omega_1$, the set $\{x\in S\;|\;\delta_x\in
A\}$ is stationary.
\noindent(``$S$ is stationary above every stationary $A\subset
\omega_1$.'')
\end{definition}
\begin{definition}
A set $S\subset[\kappa]^{\omega}$ is {\sl reflective} if for every club $C$ in
$[\kappa]^{\omega}$, $S\cap C$ contains an $\omega_1$--chain.
\end{definition}
\begin{definition}
If $x$ and $y$ are in $[\kappa]^\omega$, then $y$ is an
$\omega_1$--{\sl extension}
of $x$ if $x\subset y$ and $\delta_x = \delta_y$.
\end{definition}
\begin{definition}
A set $S\subset[\kappa]^{\omega}$ is {\sl spanning} if for every
$\lambda\geq\kappa$, for every club set $C$
in $[\lambda]^\omega$ there exists a
club $D$
in $[\lambda]^\omega$ such that every $x\in D$ has an
$\omega_1$--extension $y\in C$ such that $y\cap \kappa \in S.$
\end{definition}
Local clubs were defined in \cite{FJ1}. Projective stationary sets were
defined in \cite{FJ2}; so were full sets (without the name). Note that
all five properties defined are invariant under the equivalence mod
club filter. All five properties are also preserved under lifting
and projection. For instance, let $S\subset [\kappa_1]^\omega$ be
reflective and let us show that the lifting $\hat S$ to
$[\kappa_2]^\omega$ is reflective. Let $C$ be a club set in
$[\kappa_2]^\omega$ and let $F :\kappa_2^{<\omega}\to\kappa_2$ be such
that $C_F\subset C$. If we let for every $e\in [\kappa_1]^{<\omega}$,
$$f(e) = \kappa_1\cap cl_F(e),$$
where $cl_F(e)$ is the closure of $e$ under $F$, then $C_f$ is a club in
$[\kappa_1]^\omega$. Also for every $x\in C_f$, if $y$ is the closure of
$x$ under $F$ then $y\cap \kappa_1 = x$. Let $\langle
x_\alpha\;|\;\alpha<\omega_1\rangle$ be an $\omega_1$--chain in $S\cap
C_f$, we then let $y_\alpha$ be the closure of $x_\alpha$ under $F$,
then $\langle y_\alpha\;|\;\alpha<\omega_1\rangle$ is an
$\omega_1$--chain in $\hat S \cap C_F$.
The arguments are simpler for the other four properties as well as for
projection.
It is not difficult to see that all the implications in Fig 1.1 hold.
For instance, to see that every spanning set is projective stationary,
note that the definition of projective stationary can be reformulated as
follows: for every club $C$ in $[\kappa]^{\omega}$, the projection of $S\cap C$ to
$\omega_1$ contains a club in $\omega_1$. So let $C$ be a club in
$[\kappa]^{\omega}$.
If $S$ is spanning, then there is a club $D$
in $[\kappa]^{\omega}$ such that all $x\in D$ have an $\omega_1$--extension in $
S \cap C$. Hence $\pi(D)\subset \pi(S\cap C)$, where $\pi$ denotes
the projection to $\omega_1$.
\section{Local clubs and full sets}
Local clubs form a $\sigma$--complete normal filter that extends the
club filter. Local clubs need not contain a club, but they do under the
large cardinal assumption {\sl Weak Reflection Principle} (WRP).
\begin{definition}\cite{FMSh} Weak Reflection Principle at $\kappa$: for
every stationary set $S\subset [\kappa]^{\omega}$ there exists a set $X$ of size
$\aleph_1$ such that $\omega_1\subset X$ and $S\cap [X]^\omega$ is
stationary in $[X]^\omega$ ($S$ reflects at $X$).
\end{definition}
It is not hard to show \cite{FJ1} that WRP at $\kappa$ implies a
stronger version, namely that for every stationary set $S\subset[\kappa]^{\omega}$,
the set of all $X\in [\kappa]^{\omega_1}$ at which $S$ reflects is
stationary in $[\kappa]^{\omega_1}$. In other words, every local club in
$[\kappa]^{\omega}$ contains a club.
Thus WRP is equivalent to the statement that every local club contains a
club. And clearly, WRP at $\lambda > \kappa$ implies WRP at $\kappa$.
The consistency strength of WRP at $\omega_2$ is exactly that of the
existence of a weakly compact cardinal; the consistency of full WRP is
considerably stronger but not known exactly at this time.
\begin{example}
For every ordinal $\eta$ such that $\omega_1\leq \eta<\omega_2$, let
$C_\eta$ be a club set of $[\eta]^\omega$ of order--type $\omega_1$
(therefore $|C_\eta| = \aleph_1$). Let
$\displaystyle{ S = \bigcup\{C_\eta\;|\;\omega_1\leq \eta<\omega_2\} }.$
Then $S$ is a local club in $[\omega_2]^\omega$ and has cardinality
$\aleph_2$. By a theorem of Baumgartner and Taylor \cite{B}, every club
set
in $[\omega_2]^\omega$ has size $\aleph_2^{\aleph_0}$. Therefore, WRP at
$\omega_2$ implies $2^{\aleph_0}\leq \aleph_2$, a result of Todor\v
cevi\' c \cite {T}.
\end{example}
Let $P$ be a notion of forcing and assume that $|P|\geq\aleph_1$. Let
$\lambda\geq|P|^+$ and consider the model $H_\lambda$ whose language has
predicates for forcing $P$ as well as the forcing relation. Note that
every countable ordinal has a $P$--name in $H_\lambda$.
If $M\in {[H_\lambda]^\omega}$, a condition $q$ is {\sl semi--generic} for $M$ if for
every name $\dot\alpha$ for a countable ordinal such that $\dot\alpha\in
M$ there exists some $\beta\in M$ such that $q\Vdashs \dot\alpha=\beta$.
The forcing $P$ is {\sl semiproper} (Shelah \cite{S}) if the set
\vskip10truept \noindent$(3.1)\hskip1truecm \{M\in{[H_\lambda]^\omega}\;|\;\forall
p\in M\;\exists q<p\; q \hbox{\rm \ is semigeneric for } M\}$
\vskip10truept
\noindent contains a club in ${[H_\lambda]^\omega}$.
In \cite{FJ1}, it is proved that $P$ preserves stationary sets (in
$\omega_1$) if and only if the set $(3.1)$ is a local club. Since
$|H_{|P|^+}| = 2^{|P|}$, we conclude that if $P$ is
stationary--set--preserving, then WRP at $2^{|P|}$ implies that $P$ is
semiproper. Consequently, we have
\begin{theorem}\cite{FMSh}
WRP implies that the class of stationary--set--preserving forcing
notions equals the class of semiproper forcing notions.
\end{theorem}
\begin{example}
Namba forcing \cite{N}. This is a forcing (of cardinality
$2^{\aleph_2}$) that adds a countable cofinal subset of $\omega_2$
without adding new reals (cf. \cite{jech}). It preserves
stationary subsets of $\omega_1$ and by Shelah \cite{S}, it is not
semiproper unless $0^{\#}$ exists.
\end{example}
We use the Namba forcing to get a partial converse of Theorem 3.3: if
stationary--set--preserving equals semiproper, then WRP holds at
$\omega_2$.
\begin{theorem}
If there exists a stationary set $S\subset[\omega_2]^\omega$ that does
not reflect, then the Namba forcing is not semiproper. Hence if every
stationary--set--preserving forcing of size $2^{\aleph_2}$ is
semiproper, then WRP holds at $\omega_2$, and every local club in
$[\omega_2]^\omega$ contains a club.
\end{theorem}
\begin{proof}
Let $S\subset [\omega_2]^\omega$ be nonreflecting stationary set and
assume that the Namba forcing $P$ is semiproper.
Since $S$ does not reflect, there exists for each $\alpha$,
$\omega_1\leq\alpha<\omega_2$, an operation
$F_\alpha:\alpha^{<\omega}\to\alpha$ such that no $x\in S$ is closed
under $F_\alpha$.
Let $\lambda = (2^{\aleph_2})^+$. As the set $(3.1)$ contains a club,
there exists some $M\in {[H_\lambda]^\omega}$ such that $M\cap\omega_2\in S$,
$\langle F_\alpha\;|\;\omega_1\leq\alpha<\omega_2\rangle\in M$ and there
exists some $q\in P$ semigeneric for $M$.
Let $G$ be a $P$--generic filter (over $V$) such that $q\in G$. In
$V[G]$, look at $M[G]$, where $M[G] = \{\dot x/G\;|\;\dot x\in
M\}$. Since $G$ produces a countable cofinal subset of
$\omega_2^{V}$, $M[G]\cap \omega_2^{V}$ is cofinal in
$\omega_2^{V}$. Let $\alpha<\omega_2^{V}$ be the least ordinal in
$M[G]$ that is not in $M$. Since $G$ contains a semigeneric
condition for $M$, we have $M[G]\cap\omega_1=M\cap\omega_1$ and so
$\omega_1\leq\alpha<\omega_2^{V}$ and
$M[G]\cap\alpha=M\cap\alpha.$ Since $\alpha\in M[G]$, $F_\alpha\in
M[G]$. Hence $M[G]\cap\alpha$ is closed under $F_\alpha$. It
follows that $x= M\cap\alpha = M[G]\cap\alpha$ belongs to $S$ and
is closed under $F_\alpha$. This is a contradiction.
\end{proof}
Now we turn our attention to full sets. First we reformulate the
definition: $S\subset[\kappa]^{\omega}$ is full if and only if there exists a
maximal antichain $W$ of stationary subsets of $\omega_1$ such that for
every $A\in W$, there exists a club $C_A$ in $[\kappa]^{\omega}$ with $\hat A\cap
C_A\subset S$, where $\hat A$ is the lifting of $A$ from $\omega_1$ to
$[\kappa]^{\omega}$.
We remark that the full sets form a filter, not necessarily
$\sigma$--complete. It is proved in \cite{FJ2} that
$\sigma$--completeness of the filter of full sets is equivalent to the
presaturation of the nonstationary ideal on $\omega_1$. It is also known
that presaturation follows from WRP which shows that WRP is a large
cardinal assumption.
\begin{example}
Let $W$ be a maximal antichain of stationary subsets of $\omega_1$ and
consider the model $\langle H_\lambda,\in,<,\cdots\rangle$, whose
language has a predicate for $W$. Let
$$S_W=\{ M\in {[H_\lambda]^\omega}\;|\;(\exists A\in W\cap M)\;\delta_M\in A\}.$$
The clubs $C_A=\{M\in{[H_\lambda]^\omega}\;|\;A\in M\}$ for $A\in W$ witness that $S_W$
is full.
\end{example}
We will now show that the sets $S_W$ from Example 3.5 generate the
filter of full sets:
\begin{lemma}
Let $S$ be a full set in $[\kappa]^{\omega}$. There exists a model $\langle
H_\lambda,\in,<,\cdots\rangle$, where $\lambda = \kappa^+$, and a
maximal antichain $W$ of stationary subsets of $\omega_1$ such
that $S_W\subset \hat S.$
\end{lemma}
\begin{proof}
Let $S$ be full in $[\kappa]^{\omega}$. By the reformulation of full sets, let $W$ be
a maximal antichain and for each $A\in W$, let $F_A :
\kappa^{<\omega}\to \kappa$ be an operation such that
$$\{x\in C_{F_A}\;|\;\delta_x\in A\}\subset S.$$
Consider a model $\langle H_\lambda,\in,<,\cdots\rangle$, $\lambda
= \kappa^+$, whose language has a predicate for $W$ as well as for
the function assigning the operation $F_A$ to each $A\in W$. We
claim that for every $M\in S_W$, $M\cap\kappa\in S$. To see this,
let $M\in {[H_\lambda]^\omega}$ and let
$A\in W\cap M$ be such that $\delta_M\in A$. Then $M$ is closed under
$F_A$ and so $M\cap \kappa\in C_{F_A}$ and $\delta_{M\cap\kappa} =
\delta_M\in A$. Hence, $M\cap\kappa\in S$.
\end{proof}
Consequently, the filter of full sets on $[\kappa]^{\omega}$ is generated by
the projections of the sets $S_W$ on ${[H_\lambda]^\omega}$ with $\lambda =
\kappa^+$.
In \cite{FJ2}, it is proved that the statement that every full set
contains a club is equivalent to the saturation of the nonstationary
ideal on $\omega_1$ (and so is the statement that every full set
contains an $\omega_1$--chain). More precisely,
\begin{theorem}\cite{FJ2}
(a) If the nonstationary ideal on $\omega_1$ is saturated then for every
$\kappa\geq\omega_2$, every full set in $[\kappa]^{\omega}$ contains a club.
(b) If every full set in $[H_{\omega_2}]^\omega$ contains an
$\omega_1$--chain, then the ideal of nonstationary subsets of $\omega_1$
is saturated.
\end{theorem}
Consequently, ``every full set is reflective'' is equivalent to ``every
full set contains a club'' and follows from large cardinal assumptions
(such as MM). The consistency of ``full $=$ club'', being that of the
saturation of $NS_{\omega_1}$, is quite strong. Neither ``local club $=$
club'' nor ``full $=$ club'' implies the other: WRP has a model in
which $NS_{\omega_1}$ is not saturated, while the saturation of
$NS_{\omega_1}$ is consistent with $2^{\aleph_0}>\aleph_2$ which
contradicts WRP. Both are consequences of MM, which therefore implies
that ``club $=$ local club $=$ full''.
\section{Projective stationary and spanning sets}
In this section, we investigate projective stationary and spanning sets
and particularly a forcing notion associated with such sets. Among
others we show that WRP implies that every projective stationary set is
spanning (and then spanning $=$ projective stationary).
First we prove a theorem (that generalizes Baumgartner and Taylor's
result \cite{B}
on clubs) that shows that the equality does not hold in ZFC. Every
spanning subset of $[\omega_2]^\omega$ has size $\aleph_2^{\aleph_0}$
while Example 3.2 gives a projective stationary (even a local club) set
of $[\omega_2]^\omega$ of size $\aleph_2$. Thus the equality ``spanning
$=$ projective stationary'' implies $2^{\aleph_0}\leq \aleph_2$.
\begin{theorem}
Every spanning set in $[\omega_2]^\omega$ has size
$\aleph_2^{\aleph_0}$.
\end{theorem}
\begin{proof}
Let $S\subset [\omega_2]^\omega$ be spanning. We shall find
$2^{\aleph_0}$ distinct elements of $S$. Let $F : [\omega_2]^2\to
\omega_1$ be such that for each $\eta<\omega_2$, the function $F_\eta$,
defined by $F_\eta(\xi) = F(\{\xi,\eta\})$, is a one--to--one mapping
of $\eta$ to $\omega_1$. As $S$ is spanning, there exists an operation
$G$ on $\omega_2$ such that every
$M\in [\omega_2]^\omega$ closed under $G$ has an $\omega_1$--extension
$N$ that is closed under $F$ and $N\in S$.
We shall find models $M_f$, $f\in 2^\omega$, closed under $G$, and
$\delta<\omega_1$ such that
\vskip10truept
\noindent$(4.1)$
{(a)} $\delta_{M_f}\leq \delta$ for each $f$, and
\hskip8truept{(b)} if $f\not=g$ then there exist $\xi\in M_f$ and
$\eta\in M_g$ such that $F(\xi,\eta)\geq\delta.$
\vskip10truept
Now assume that we have models $M_f$ that satisfy $(4.1)$. If $f\not=g$
and if $x\in [\omega_2]^\omega$ is such that $M_f\cup
M_g\subset x$ and $x$ is closed under $F$, then
$\delta_x>\delta.$ Hence if $N_f$ and $N_g$ are $\omega_1$--extensions
of $M_f$ and $M_g$, respectively, and are closed under $F$, then
$N_f\not=N_g$. Thus we get $\{N_f,\;|\;f\in
2^\omega\}$ such that the $N_f$'s are $2^{\aleph_0}$ elements
of $S$.
Toward the construction of the models $M_f$, let
$c_\alpha\subset\alpha$,
for each $\alpha<\omega_2$ of cofinality $\omega$, be a set of order
type $\omega$ with $\sup c_\alpha = \alpha$ and let $M_\alpha$ be the
closure of $c_\alpha$ under $G$. Let $Z\subset\omega_2$ and
$\delta<\omega_1$ be such that $Z$ is stationary and for each $\alpha\in
Z$, $M_\alpha\subset\alpha$ and
$\delta_{M_\alpha}=\delta.$
We shall find, for each $s\in 2^{<\omega}$ (the set of all finite
$0$--$1$--sequences), a stationary set $Z_s$ and an ordinal
$\xi_s<\omega_2$ such that
\vskip10truept
\noindent$(4.2)$
{(i)} if $s\subset t$, then $Z_t\subset Z_s$,
\hskip8truept{(ii)} $(\forall\;\alpha\in Z_s)\;\xi_s\in c_\alpha$,
\hskip8truept{(iii)} $\xi_{\langle s0\rangle} < \xi_{\langle
s1\rangle}$ and $F(\xi_{\langle s0\rangle}, \xi_{\langle
s1\rangle})\geq\delta.$
\vskip10truept
Once we have the ordinals $\xi_s$, we let, for each
$f\in 2^\omega$, $M_f$ be the closure under $G$ of the set
$\{\xi_{f\restriction n}\;|\;n<\omega\}$. Clearly,
$$M_f=\bigcup_{n=0}^{\infty}M_{f\restriction n},$$ where for each $s\in
2^{<\omega}$, $M_s$ is the closure under $G$ of
$\{\xi_{s\restriction 0},\cdots, \xi_s\}$. Since $M_s\subset
M_\alpha$ for $\alpha\in Z_s$, we have $\delta_{M_f}\leq \delta$
for every $f\in 2^\omega$. The condition $(4.2)(iii)$ guarantees
that the models $M_f$ satisfy $(4.1)$.
The $Z_s$ and $\xi_s$ are constructed by induction on $|s|$. Given
$Z_s$, there are $\aleph_2$ ordinals $\xi$ such that $S_\xi =
\{\alpha\in Z_s\;|\;\xi\in c_\alpha\}$ is stationary. Consider the first
$\omega_1+1$ of these $\xi$'s and let $\eta = \xi_{\langle s1\rangle}$
be the $\omega_1+1$st element, and $Z_{\langle s1\rangle} = S_\eta$.
Then find some $\xi < \eta$ among the first $\omega_1$ elements such
that $F_\eta(\xi)\geq\delta$ and let $\xi_{\langle s0\rangle}$ be such
ordinal $\xi$ and let $Z_{\langle s0\rangle} = S_\xi.$
\end{proof}
In Definition 2.6, we defined spanning sets in $[\kappa]^{\omega}$ as satisfying a
certain condition at every $\lambda\geq\kappa$. The following lemma
shows that it is enough to consider the condition at $H_{\kappa^+}$.
\begin{lemma}
A set $S\subset[\kappa]^{\omega}$ is spanning if and only if for every club $C$ in
$[H_{\kappa^+}]^\omega$ there exists a club $D$ in
$[H_{\kappa^+}]^\omega$ such that every $M\in D$ has an
$\omega_1$--extension $N\in C$ such that $N\cap\kappa\in S$.
\end{lemma}
\begin{proof}
It is easy to verify that if the condition $\forall C\;\exists D $
etc. holds at some $\mu>\lambda$ then it holds at $\lambda$. Thus
assume that $\lambda\geq\kappa^+$ and the condition of the lemma
holds and let us prove that for every club $C$ in ${[H_\lambda]^\omega}$ there
exists a club $D$ in ${[H_\lambda]^\omega}$ such that every $M\in D$ has an
$\omega_1$--extension $N\in C$ such that $N\cap\kappa\in S$.
Let $C$ be a club in ${[H_\lambda]^\omega}$ and let $F$ be an operation on $H_\lambda$
such that $C_F\subset C$. Let $C_0$ be a club in
$[H_{\kappa^+}]^\omega$ be such that $\hat C_0\subset C_F$. Let $D_0$
be a club in $[H_{\kappa^+}]^\omega$ such that every $M_0\in D_0$ has an
$\omega_1$--extension $N_0\in C_0$ with $N_0\cap\kappa\in S$. Let
$D=\hat D_0$ be the set of all $M\in {[H_\lambda]^\omega}$ such that $M\cap
H_{\kappa^+}\in D_0$. Let $M\in D$ and let $M_0 = M\cap H_{\kappa^+}$.
Then $M_0\in D_0$. Let $N_0\in C_0$ be an $\omega_1$--extension of $M_0$
such that $N_0\cap\kappa\in S$. We let $N$ be the $F$--closure of $M\cup
(N_0\cap\kappa)$ in $H_\lambda$. The model $N$ is in $C_F$. We claim
that $N\cap\kappa=N_0\cap\kappa$. This shall give us that
$N\cap\kappa\in S$ and $N$ is an $\omega_1$--extension of $M$.
Let $\alpha\in N\cap\kappa$. Let $\tau$ be a skolem term in
$(H_\lambda,\in,<,F)$ and let $a\in M$ and $\alpha_0,\cdots,\alpha_n\in
N_0\cap \kappa$ be such that $\alpha =
\tau(a,\alpha_0,\cdots,\alpha_n).$
Define $h:[\kappa]^{n+1}\to\kappa$ by
$$h(\beta_0,\cdots,\beta_n) =
\begin{cases}{\tau(a,\beta_0,\cdots,\beta_n)}&{\hbox{\rm if }
\tau(a,\beta_0,\cdots,\beta_n)<\kappa,}\cr
{ 0}&{\hbox{\rm otherwise.}}
\end{cases}
$$
Then $h\in M$ and hence $h\in M\cap H_{\kappa^+} = M_0\subset N_0.$
Therefore,
$$\alpha =h(\alpha_0,\cdots,\alpha_n)\in N_0\cap\kappa.$$
\end{proof}
\begin{definition}
Let $S\subset [\kappa]^{\omega}$ be a stationary set. $P_S$ is the forcing notion
that shoots an $\omega_1$--chain through $S$: forcing conditions are
continuous $(\alphamma+1)$--chains, $\langle
x_\alpha\;|\;\alpha\leq\alphamma\rangle$, $\alphamma<\omega_1$, such that
$x_\alpha\in S$ for each $\alpha$, and
$\delta_{x_\alpha}<\delta_{x_\beta}$ when $\alpha<\beta\leq\alphamma.$ The
ordering is by extension.
\end{definition}
The forcing $P_S$ does not add new countable sets and so $\omega_1$ is
preserved. The generic $\omega_1$--chain is cofinal in $[\kappa]^{\omega}$ and so
$\kappa$ is collapsed to $\omega_1$.
The following theorem gives a characterization of projective stationary
sets and spanning sets in terms of the forcing $P_S$:
\begin{theorem}
{ (a)} A set $S\subset [\kappa]^{\omega}$ is projective stationary if and only
if the forcing $P_S$ preserves stationary subsets of $\omega_1$.
{ (b)} A set $S\subset[\kappa]^{\omega}$ is spanning if and only if the
forcing $P_S$ is semiproper.
\end{theorem}
\begin{proof}
(a) This equivalence was proved in \cite{FJ2}; we include the proof for
the sake of completeness.
Let $A$ be a stationary subset of $\omega_1$. We will show that $P_S$
preserves $A$ if and only if $\hat A \cap S$ is stationary.
First assume that $\hat A\cap S$ is nonstationary and let
$C\subset\omega_1$ be a club such that for every $x\in S$,
$\delta_x\not\in C\cap A$.
Let $\langle x_\alpha\;|\;\alpha<\omega_1\rangle$ be a generic
$\omega_1$--chain and let $D =
C\cap\{\delta_{x_\alpha}\;|\;\alpha<\omega_1\}.$ Then $D$ is a club in
$V[G]$ disjoint from $A$.
Conversely, assume that $\hat A\cap S$ is stationary. We will show that
$A$ remains stationary in $V[G]$. Let $\dot C$ be a name for a club in
$\omega_1$ and let $p$ be a condition. Let $\lambda$ be sufficiently
large. Since $\hat A\cap \hat S$ is stationary in ${[H_\lambda]^\omega}$, there exists a
countable model $M$ containing $\dot C$ and $p$ such that $\delta_M\in
A$ and $M\cap \kappa\in S$. Let $\langle
x_\alpha\;|\;\alpha<\delta_M\rangle$ be an $M$--generic
$\delta_M$--chain extending $p$. By genericity,
$\displaystyle{M\cap\kappa = \bigcup\{x_\alpha\;|\;\alpha<\delta_M\} }.$
Since $M\cap\kappa\in S$, it can be added on top of the chain $\langle
x_\alpha\;|\;\alpha<\delta_M\rangle$ to form a condition $q$. This
condition extends $p$ and forces that $\delta_M$ is a limit point of
$\dot C$, and hence $q$ forces that $\delta_M \in \dot C \cap A.$
Therefore, $A$ is stationary in $V[G]$.
(b) First let $S$ be a spanning set in $[\kappa]^{\omega}$. Let $\lambda\geq
(2^\kappa)^+$ (note that $|P_S|\leq 2^\kappa$) and let us prove that the
set $(3.1)$ contains a club in ${[H_\lambda]^\omega}$.
Let $C$ be the club of all models $N\in {[H_\lambda]^\omega}$ that contain $S$, the
forcing $P_S$ and the forcing relation. By definition $2.6$, there exists
a club $D$ in ${[H_\lambda]^\omega}$ such that every $M\in D$ has an
$\omega_1$--extension $N\in C$ such that $N\cap\kappa\in S$. We claim
that the set $(3.1)$ contains $D$.
Let $M\in D$ and $p\in M$. Let $N\in C$ be an $\omega_1$--extension of
$M$ such that $N\cap\kappa\in S$. We enumerate
all ordinals in $N\cap \kappa$ and
all names $\dot\alpha \in
N$ for ordinals.
Starting with $p_0=p$, construct a sequence of conditions
$p_0>p_1>\cdots > p_n>\cdots$ such that $p_n\in N$ for each $n$, and
for every $\dot\alpha\in N$ there are
some $p_n$ and $\beta\in N$ such that $p_n\Vdashs \dot\alpha = \beta$,
and that for every $\alphamma\in N\cap\kappa$ there is some $p_n = \langle
x_\xi\;|\;\xi\leq\alpha\rangle$ such that $\alphamma\in x_\alpha$. The
sequence produces a continuous chain whose limit is the set $N\cap
\kappa$. Since $N\cap \kappa\in S$, it can be put on top of this chain
to form a condition $q<p$ that decides every ordinal name in $N$
as an ordinal in $N$. Now since $N$ is an $\omega_1$--extension
of $M$, they have the same set of countable ordinals and it follows that
$q$ is semigeneric for $M$.
Conversely, assume that $P_S$ is semiproper. Let $\lambda\geq
(2^\kappa)^+$ and let $C$ be a club in ${[H_\lambda]^\omega}$. Let $F$ be an operation
on $H_\lambda$ such that $C_F\subset C$. Let $\mu>\lambda$ be such
that $F\in H_\mu$. Since $P_S$ is semiproper, there is a club
$D\subset{[H_\lambda]^\omega}$ such that every model in $D$ has the form $M\cap
H_\lambda$, where $F\in M\in [H_\mu]^\omega$, and there is a semigeneric
condition for $M$. We shall prove that every $M\cap H_\lambda\in D$ has
an $\omega_1$--extension $N$ in $C_F$ such that $N\cap \kappa\in S$.
Let $M\cap H_\lambda\in D$ and let $q$ be a semigeneric condition for
$M\in [H_\mu]^\omega$. Let $G$ be a generic filter on $P_S$ over $V$
such that $q\in G$. Working in $V[G]$,
let $M[G]$ be the set of all $\dot a/G$ for $\dot a \in M,$ and
let $N=M[G]\cap (H_\lambda)^V$.
Since $P_S$ does not add new countable sets, $N\in V$. Since $F\in
M[G]$, $M[G]$ is closed under $F$, and so is $N$. Hence $N\in C_F$.
Since
$q$ is semigeneric for $M$, $M[G]\cap\omega_1 = M\cap\omega_1$, and so
$N$ is an $\omega_1$--extension of $M\cap H_\lambda$. Since
the union of the generic $\omega_1$--chain $\langle
x_\alpha\;|\;\alpha<\omega_1\rangle$ is $\kappa$, we claim that the
union of $\langle
x_\alpha\;|\;\alpha<\delta_M\rangle$ is $M[G]\cap \kappa = N\cap\kappa$.
Granting this claim,
this union is $x_{\delta_M}$ and $\langle
x_\alpha\;|\;\alpha\leq\delta_M\rangle$ is a condition in $P_S$.
Therefore, $x_{\delta_M}\in S$, and hence $N\cap\kappa\in S$.
We now proceed to prove the claim.
We just need to check that $x_{\delta_M} = M[G]\cap \kappa.$
We have $M\subset M[G]$ and $G\in M[G]$.
In $V[G]$, $G$ defines a bijection $f : \omega_1\to\kappa$.
Let $\dot f\in M$ be a canonical name for this $f$.
We then have that
$$\Vdash \forall p\in \dot
G\;\exists\;\alpha<\omega_1\;\forall\;\alphamma<\mathrm{dom}(p)\;p(\alphamma)\subset
\dot f''\alpha$$ and
$$\Vdash\forall\;\alpha<\omega_1\;\exists\;p\in\dot
G\;\forall\alphamma<\alpha\;\exists\;\beta<\mathrm{dom}(p)\;\dot
f(\alphamma)\in p(\beta).$$
Also, $\dot f/G\in M[G]$ and
$\dot f/G\cap M[G] : \delta_M\to M[G]\cap\kappa$
is a bijection.
First we check that $M[G]\cap\kappa\subset x_{\delta_M}.$
Let $\alpha\in M[G]\cap\kappa$. Let $\dot\alpha\in M$ be a name such
that $\Vdashs \dot\alpha <\kappa$ and $\alpha = \dot\alpha/G$.
Then $$\Vdashs \exists p\in \dot G (\dot\alpha\in \bigcup p).$$
Hence $M\models \exists\;\xi<\omega_1\;\dot\alpha\in \dot x_{\xi}.$ Let
$\dot\xi\in M$ be a name for a countable ordinal such that
$$\Vdashs \dot\alpha\in \dot x_{\dot\xi}.$$
Since the semigeneric condition $q$ is in $G$, let $\xi<\delta_M$ be
such that $q\Vdashs \dot\alpha \in \dot x_\xi$. It follows that
$$\alpha = \dot\alpha/G \in (\dot x/G)_\xi\subset x_{\delta_M}.$$
Secondly, we check that $x_{\delta_M}\subset M[G]\cap\kappa.$
Let $\alpha < \delta_M$. Let $\beta\in x_\alpha$. We show that $\beta\in
M[G]$.
Let $p\in M[G]\cap G$ be such that $x_\alpha = p(\alpha)$.
Let $\dot p \in M$ be such that $\dot p/G = p$. Let $\dot \alpha\in M$
be such that $\dot \alpha/G = \alpha$. Let $\dot \xi\in M$ be such that
$$q\Vdash \dot p(\dot \alpha)\subset \dot f''\dot \xi.$$
It follows that $\beta\in M[G]\cap\kappa.$
\end{proof}
As a corollary, if stationary--set--preserving $=$ semiproper, then
projective stationary $=$ spanning. We shall prove the converse later in
this section.
It follows that WRP implies that projective stationary $=$ spanning.
More precisely,
\begin{corollary}
If every local club in $[H_{(2^\kappa)^+}]^\omega$ contains a club, then
every projective stationary set in $[\kappa]^{\omega}$ is spanning.
\end{corollary}
Looking at the proof of (b), we observe that the club $D$ in the
definition of spanning is the club that witnesses semiproperness of
$P_S$. If we replace ``club'' by ``local club'', the proof goes through
as before and we get the following characterization of projective
stationary sets.
\begin{lemma}
A set $S\subset[\kappa]^{\omega}$ is projective stationary if and only if for
every $\lambda\geq \kappa$, for every club $C\subset [\lambda]^\omega$,
there exists a local club $D$ in $[\lambda]^\omega$ such that every
$x\in
D$ has an $\omega_1$--extension $y$ in $C$ such that $y\cap\kappa\in S$.
\end{lemma}
The quantifier $\forall\; C$ in Definition 2.6 and Lemma 4.6 can
be removed by the following trick. Let $S$ be a stationary set in $[\kappa]^{\omega}$
and let $\lambda\geq \kappa^+$ and $\mu = \lambda^+$. Let
\vskip10truept
\noindent$(4.3)$ \hskip1truecm $S^*_\lambda = \{ M\cap
H_\lambda\;|\;M\in [H_\mu]^\omega,\; S\in
M\;\hbox{\rm and}\;M\cap\kappa\in S\}$, \vskip10truept
\noindent and
\vskip10truept
\noindent$(4.4)$ \hskip1truecm Sub($S^*_\lambda$) $=\{ M\in {[H_\lambda]^\omega}\;|\;
M$ has an $\omega_1$--extension $N\in S^*_\lambda\}$.
\vskip10truept
Here we assume that $H_\mu$ has Skolem functions and $M\in
[H_\mu]^\omega$ is an elementary submodel. The set $S^*_\lambda$ is a
stationary subset of ${[H_\lambda]^\omega}$ and is equivalent to the lifting of $S$.
\begin{lemma}
(a) $S$ is spanning if and only if Sub($S^*_\lambda$) contains a
club.
(b) $S$ is projective stationary if and only if Sub($S^*_\lambda$)
is a local club.
\end{lemma}
\begin{proof}
We prove (a) as (b) is proved similarly.
Let $\lambda\geq\kappa^+$ and $\mu=\lambda^+$.
First assume that $S$ is spanning. Let
$$C = \{ M\cap H_\lambda\;|\;M\in [H_\mu]^\omega\; \hbox{\rm
and}\;S\in M\}.$$ Let $D$ be a club in ${[H_\lambda]^\omega}$ such that every
$M\in D$ has an $\omega_1$--extension $N\in C$ with
$N\cap\kappa\in S$. Then $D\subset $ Sub($S^*_\lambda$).
Conversely, assume that $S$ is not spanning. Let $C = C_F$ be the
least counterexample. As $F$ is definable in $H_\mu$ from $S$, it
belongs to every elementary countable submodel $M$ of $H_\mu$ such
that $S\in M$. Hence every $N\in S^*_\lambda$ is closed under $F$
and it follows that $S^*_\lambda\subset C$. Therefore, every $M\in
$ Sub($S^*_\lambda$) has an $\omega_1$--extension $N\in C$ such
that $N\cap\kappa\in S$. Since $C$ is a counterexample,
Sub($S^*_\lambda$) does not contain a club.
\end{proof}
Now we prove that projective stationary $=$ spanning implies that
stationary--set--preserving $=$ semiproper. This is a consequence of the
following lemma.
\begin{lemma}
Let $P$ be a forcing ($|P|\geq\aleph_1$) and let $\lambda\geq |P|^+$.
(a) $P$ is semiproper if and only if the set $(3.1)$ is spanning.
(b) $P$ preserves stationary sets in $\omega_1$ is and only if the
set $(3.1)$ is projective stationary.
\end{lemma}
\begin{proof}
Both (a) and (b) have the same proof, using Definition 2.6 and Lemma
4.6. The left--to--right implications are obvious, as club implies
spanning and local club implies projective stationary. Thus assume (for
(a)) that the set (3.1) is spanning. If follows from Definition 2.6
that there exists a club $D$ in ${[H_\lambda]^\omega}$ such that every $M\in D$ has an
$\omega_1$--extension in the set (3.1). But since every condition that
is semigeneric for an $\omega_1$--extension of $M$ is semigeneric for
$M$, it follows that every $M\in D$ belongs to the set (3.1). Thus the
set (3.1) contains a club and $P$ is semiproper.
\end{proof}
\begin{corollary}
If every projective stationary set is spanning, then every forcing that
preserves stationary sets of $\omega_1$ is semiproper.
\end{corollary}
We conclude Section 4 with the following diagram describing the
implications under the assumption of WRP.
$$\begin{array}{ccccc}
&&{\hbox{\rm full}}&&\cr
&\nearrow&&\searrow&\cr
{\hbox{\rm club $=$ local club}}&&&&{\hbox{\rm projective stationary $=$
spanning}}\cr
&\searrow&&\nearrow&\cr
&&{\hbox{\rm reflective}}&&
\end{array}$$
$$\hbox{\rm Fig.\ } 4.1$$
\section{Strong reflection principle}
The Strong Reflection Principle (SRP) is the statement that every
projective stationary set contains an $\omega_1$--chain. Thus SRP
implies that every projective stationary set is reflective and that
every full set contains a club. As SRP implies WRP (cf. \cite{FJ1}) we
also have local club $=$ club and projective stationary $=$ spanning,
obtaining the diagram (Fig. 1.2) from the introduction.
We shall now look more closely at spanning sets and prove, among others,
that if all spanning sets contain an $\omega_1$--chain then SRP holds.
\begin{definition}
For $X\subset [\kappa]^{\omega}$, let
$$X^{\bot} = \{ M\in [H_{\kappa^+}]^\omega\;|\;M \hbox{\rm \ has no
$\omega_1$--extension $N$ such that } N\cap \kappa\in X\}.$$
\end{definition}
The set $X^{\bot}$ is a subset of $[H_{\kappa^+}]^\omega$ and is
disjoint from $\hat X$. If $X$ is nonstationary, then $X^\bot$ contains
a club. Let us therefore restrict ourselves to stationary sets
$X\subset [\kappa]^{\omega}$.
\begin{lemma}
{(i)} If $S_1\subset S_2\subset[\kappa]^{\omega}$, then $S_2^\bot\subset
S_1^\bot.$
{(ii)} If $S_1\equiv S_2$ mod club filter, then $S_1^\bot\equiv
S_2^\bot$ mod club filter.
{(iii)} $\hat S\cup S^\bot$ is spanning (where $\hat S$ is the
lifting of $S$ to $H_{\kappa^+}$).
{(iv)} $S$ is spanning if and only if $S^\bot$ is nonstationary.
\end{lemma}
\begin{proof}
(ii) Let $F : \kappa^{<\omega}\to \kappa$ be such that $S_1\cap C_F =
S_2\cap C_F$. Let $D = \{ M\in [H_{\kappa^+}]^\omega\;|\;F\in M\}.$ $D$ is a club in
$[H_{\kappa^+}]^\omega$. We claim that $S_1^\bot\cap D = S_2^\bot\cap D.$
If $M\in D$ and $M\not\in S_1^\bot$, then $M$ has an
$\omega_1$--extension $N$ such that $N\cap\kappa\in S_1$. Since $F\in
M\subset N$, $N\cap \kappa$ is closed under $F$. So $N\cap\kappa\in
S_2$. Hence $M\not\in S_2^\bot$. Similarly for the other direction, and
so we have $S_1^\bot\cap D = S_2^\bot\cap D.$
(iii) Let $\lambda\geq \kappa^+$ be arbitrary and let $C$ be a club in
${[H_\lambda]^\omega}$. Let $F : H_\lambda^{<\omega}\to H_\lambda$ be such that
$C_F\subset C$. We claim that every $M\in C_F$ has an
$\omega_1$--extension $N\in C$ such that $N\cap H_{\kappa^+}\in \hat
S\cup S^\bot$, i.e., either $N\cap \kappa\in S$ or $N\cap
H_{\kappa^+}\in S^\bot$.
Let $M\in C_F$. If $M\cap H_{\kappa^+}\in S^\bot$, then we are done.
Otherwise, let $M_0 = M\cap H_{\kappa^+}$. $M_0$ has an
$\omega_1$--extension $N_0\in [H_{\kappa^+}]^\omega$ such that $N_0\cap\kappa\in S$. Let
$N$ be the closure of $M\cup (N_0\cap\kappa)$ under $F$. We have that
$N\in C$ and $M\subset N$. By an argument exactly as in the proof of
Lemma 4.2, we conclude that $N\cap\kappa = N_0\cap\kappa.$ Hence $N$ is
an $\omega_1$--extension of $M$ and $N\cap\kappa\in S$.
(iv) If $S$ is spanning then by definition the set of all $M\in [H_{\kappa^+}]^\omega$
that do have an $\omega_1$--extension $N$ with $N\cap\kappa\in S$
contains a club, and hence $S^\bot$ is nonstationary. If $S^\bot$ is
nonstationary, then, since $\hat S\cup S^\bot$ is spanning, $\hat S$
must be spanning. Hence $S$ is spanning.
\end{proof}
\begin{theorem}
If every spanning set in $[H_{\kappa^+}]^\omega$ contains an $\omega_1$--chain, they
every projective stationary set in $[\kappa]^{\omega}$ contains an $\omega_1$--chain.
\end{theorem}
\begin{proof}
Let $S$ be a projective stationary set in $[\kappa]^{\omega}$. By Lemma 5.2(iii),
$\hat S\cup S^\bot$ is spanning in $[H_{\kappa^+}]^\omega$ and therefore contains an
$\omega_1$--chain $\langle M_\alpha\;|\;\alpha<\omega_1\rangle$. We
claim that
$\{\alpha<\omega_1\;|\;M_\alpha\cap\kappa\in S\}$ contains a club and
therefore $S$ contains an $\omega_1$--chain.
Suppose not. The set $A=\{\alpha<\omega_1\;|\;M_\alpha\in
S^\bot\;\hbox{\rm and}\;\alpha = \delta_{M_\alpha}\}$ is stationary. Let
$$C = \{N\in[H_{\kappa^+}]^\omega\;|\;\kappa\in N\;\hbox{\rm and}\;(\forall\;\beta\in
N\cap\omega_1)\;M_\beta\in N\}.$$
$C$ is a club in $[H_{\kappa^+}]^\omega$. Since $S$ is projective stationary, there
exists an $N\in C$ such that $\delta_N\in A$ and $N\cap\kappa\in S$.
For every $\alpha <\delta_N$ we have $M_\alpha\subset N$. Hence
$M_{\delta_N}\subset N$ and $M_{\delta_N}\cap\omega_1 =
N\cap\omega_1=\delta_N.$ Therefore, $M_{\delta_N}\not\in S^\bot$. This
is a contradiction.
\end{proof}
\begin{corollary}
If every spanning set contains an $\omega_1$--chain, then SRP holds.
\end{corollary}
\section{A structure theorem}
The following definition relativizes projective stationary and spanning.
\begin{definition}
Let $A$ be a stationary set of countable ordinals and let $S\subset
[\kappa]^{\omega}$.
{(a)} $S$ is {\sl projective stationary above} $A$ if for every
stationary $B\subset A$, the set $\{x\in S\;|\;\delta_x\in B\}$ is
stationary.
{(b)} $S$ is {\sl spanning above} $A$ if for every club
$C\subset[H_{\kappa^+}]^\omega$ there exists a club $D$ in $[H_{\kappa^+}]^\omega$ such that every $M\in
D$ with $\delta_M\in A$ has an $\omega_1$--extension $N\in C$ such that
$N\cap\kappa\in S$.
\end{definition}
The following result is proved in \cite{FJ2}.
\begin{lemma}
If the nonstationary ideal on $\omega_1$ is saturated, then for every
stationary set $S\subset[\kappa]^{\omega}$ there exists a stationary
$A\subset\omega_1$ such that $S$ is projective above $A$.
\end{lemma}
Notice that the conclusion of the lemma can be stated as: the complement
of $S$ is not full. Thus Lemma 6.2 is a reformulation of Theorem 3.8(a).
\begin{corollary}
If the nonstationary ideal on $\omega_1$ is saturated then for every
stationary $S\subset[\kappa]^{\omega}$ there exists a stationary $A\subset
\omega_1$ such that
{(i)} $S$ is projective stationary above $A$, and
{(ii)} $\{x\in S\;|\;\delta_x\not\in A\}$ is nonstationary.
\end{corollary}
\begin{proof}
Let $W$ be a maximal antichain of stationary sets $A\subset \omega_1$
such that $S$ is projective stationary above $A$. Since
$|W|\leq\aleph_1$, there exists a stationary $A_S$ such that
$$A_S = \Sigma\{A\;|\;A\in W\}$$
in the Boolean algebra $P(\omega_1)/{NS}$. It is easy to verify that
$A_S$ has the two properties.
\end{proof}
\begin{corollary}
SRP implies WRP. In fact, assuming SRP, for every stationary
$S\subset[\kappa]^{\omega}$ there exists a set $X$ of size $\aleph_1$ such that
$\omega_1\subset X$ and an $\omega_1$--chain $\langle
N_\alpha\;|\;\alpha<\omega_1\rangle$ with $\alpha = \delta_{N_\alpha}$
for all $\alpha<\omega_1$ such that $X =
\displaystyle{\bigcup_{\alpha<\omega_1} N_\alpha}$ and $N_\alpha\in S$
for every $\alpha\in A_S$.
\end{corollary}
\begin{proof}
The set $S\cup \{x\;|\;\delta_x\not\in A_S\}$ is projective stationary
and by SRP it contains an $\omega_1$--chain.
\end{proof}
The proof that WRP implies that projective stationary equals spanning
applies to the relativized notions, i.e., projective stationary above
$A$ equals spanning above $A$. Thus we obtain the following theorem.
\begin{theorem}
Assume SRP. Let $\kappa\geq\omega_2$ and let $S\subset[\kappa]^{\omega}$ be
stationary. There exists a stationary $A_S$ such that
{(i)} for almost all $x\in S,\;\delta_x\in A_S$, and
{(ii)} almost all $x$ with $\delta_x\in A_S$ have an
$\omega_1$--extension $y\in S$.
Moreover, the set $A_S$ is unique mod club filter and if $S_1\equiv
S_2$ then $A_{S_1} \equiv A_{S_2}$.
Also, a stronger version of (ii) holds: for every
$\lambda\geq\kappa$ and every model $(\lambda,\cdots)$, almost all
countable $M\prec (\lambda,\cdots)$ with $\delta_M\in A_S$ have an
$\omega_1$--extension $N\prec(\lambda,\cdots)$ such that
$N\cap\kappa\in S.$
\end{theorem}
\section{Order types and canonical functions}
Two functions $f, g :\omega_1\to\omega_1$ are equivalent (mod club
filter) if the set $\{\alpha<\omega_1\;|\;f(\alpha) = g(\alpha)\}$
contains a club. $f<g$ if and only if
$\{\alpha<\omega_1\;|\;f(\alpha)<g(\alpha)\}$ contains a club. Then $<$
is a well--founded partial order of the equivalence classes and every
function can be assigned a rank in this partial order. For all
$\eta<\omega_2$, there exist {\sl canonical function} $f_\eta$ such that
each $f_\eta$ has rank $\eta$ and when $\eta$ is a limit ordinal then
$f_\eta$ is the least upper bound of $\{f_\xi\;|\;\xi<\eta\}$. The
canonical functions are unique and for $\omega_1\leq \eta<\omega_2$, if
$g_\eta$ is any one--to--one mapping of $\omega_1$ onto $\eta$, then for
almost all $\alpha<\omega_1$,
\vskip10truept
\noindent $(7.1)\hskip1truecm f_\eta(\alpha) = \hbox{\rm order type of }
\{g_\eta(\beta)\;|\;\beta<\alpha\}.$
\vskip10truept
The {\sl Boundedness Principle} is the statement
\vskip10truept
\noindent$(7.2)\hskip1truecm (\forall\;g :
\omega_1\to\omega_1)(\exists\;\eta<\omega_2)\;g<f_\eta.$
\vskip10truept
This follows from the saturation of the nonstationary ideal on
$\omega_1$ (but the consistency strength is considerably less).
\begin{theorem}
The boundedness principle is equivalent to the following statement: for
every club $C\subset\omega_1$, the set
\vskip10truept
\noindent$(7.3)\hskip2truecm \{x\in [\omega_2]^\omega\;|\;\hbox{\rm
order--type}(x)\in C\}$
\noindent is a local club.
\end{theorem}
\begin{proof}
First assume that for every club $C$ the set $(7.3)$ is a local club.
Let $g : \omega_1\to\omega_1$ be an arbitrary function.
Let $C =
\{\alphamma<\omega_1\;|\;(\forall\;\alpha<\alphamma)\;g(\alpha)<\alphamma\}.$
Let $\eta$ and
$\langle x_\alpha\;|\;\alpha<\omega_1\rangle$
be such that $\omega_1<\eta<\omega_2$ and
$\langle x_\alpha\;|\;\alpha<\omega_1\rangle$ is an $\omega_1$--chain
which is a club in $[\eta]^\omega$
and for all $\alpha<\omega_1$
order--type$(x_\alpha)\in C$. By our assumption, such $\eta$ exists.
We claim that $g<f_\eta$. By (7.1), $f_\eta(\alpha) = $
order--type$(x_\alpha)$ for almost all $\alpha<\omega_1$. Let
$$D=\{\alpha\in C\;|\;\alpha <f_\eta(\alpha) = \hbox{\rm
order--type}(x_\alpha)\}.$$
For each $\alpha\in D$ we have $f_\eta(\alpha)\in C$ and
$f_\eta(\alpha)>\alpha$, while $g(\alpha) <\alpha'$, where $\alpha'$ is
the least element of $C$ greater than $\alpha$. Thus $g<f_\eta$,
witnessed by $D$.
Conversely, assume that for every $g : \omega_1\to \omega_1$, there
exists an $\eta<\omega_2$ such that $g<f_\eta$. Let
$C\subset\omega_1$. Consider the set
$$D=\{\eta<\omega_2\;|\;\{\alpha<\omega_1\;|\;f_\eta(\alpha)\in
C\}\hbox{\rm \ contains a club}\}.$$
Using canonicity, it is easy to verify that $D$ is closed. We claim that
$D$ is unbounded.
Let $\eta_0<\omega_2$. We construct a sequence of functions $\langle
g_k\;|\;k<\omega\rangle$ and a sequence of ordinals
$\langle\eta_k\;|\;k<\omega\rangle$ so that
$$f_{\eta_0}<g_0<f_{\eta_1}<g_1<\cdots$$
and that $g_k(\alpha)\in C$ for every $k$ and every $\alpha$. This can
be done since $C$ is unbounded and by our assumption. Let
$$\eta = \hbox{\rm lim}_k\;\eta_k.$$
Then for almost $\alpha$,
$$f_\eta(\alpha) = \hbox{\rm lim}_{k}\; f_{\eta_k}(\alpha)
=\hbox{\rm lim}_{k}\; g_k(\alpha).$$ Since $C$ is closed, we have
$f_\eta(\alpha) \in C $ for almost $\alpha$, and so $\eta\in D.$
Now if $\eta\in D$ and $\langle x_\alpha\;|\;\alpha<\omega_1\rangle$ is
a club in $[\eta]^\omega$, then by
(7.1) the order type of $x_\alpha$ is $f_\eta(\alpha)$
for almost all $\alpha<\omega_1$, and therefore
$$\{x\in [\eta]^\omega\;|\;\hbox{\rm order--type}(x)\in C\}$$
contains a club in $[\eta]^\omega$. Thus (7.3) is a local club.
\end{proof}
\begin{corollary}
If SRP holds then for every stationary set $S\subset [\kappa]^{\omega}$,
the set $\{\hbox{\rm order--type}(x\cap\omega_2)\;|\;x\in S\}$ is
stationary.
\end{corollary}
\begin{proof}
SRP implies both the boundedness principle and that local club $=$ club,
and so the set
$$\{x\in [\kappa]^{\omega}\;|\;\hbox{\rm order--type}(x\cap\omega_2)\in C\}$$
contains a club for every club $C\subset \omega_1.$
\end{proof}
\vskip30truept
\noindent Institute of Mathematics, AMSS, Chinese Academy of Sciences,
Zhong Guan Cun, Beijing 100080, China\\
\noindent{Email:} {\tt [email protected]}\\
\noindent and \\
\noindent Department of Mathematics,
National University of Singapore, 2 Science Drive 2,
Singapore 117543, Republic of Singapore\\
\noindent{Email:} {\tt [email protected]}
\vskip10truept
\noindent Mathematical Institute, The Academy of Sciences of the
Czech
Republic, \v Zitn\'a 25, 115 67 Praha 1, Czech Republic\\
\noindent{Email:} {\tt [email protected]}
\vskip10truept
\noindent Department of Mathematics, University of
Florida, Gainesville,
FL 32611, USA\\
\noindent{Email:} {\tt [email protected]}
\end{document}
|
{\beta}gin{document}
\title[Hadamard matrices of order $764$]
{Hadamard matrices of order $764$ exist}
\author[D.\v{Z}. {\hbox{D\kern-.8em\raise.15ex\hbox{--}\kern.35em}} okovi\'{c}]
{Dragomir \v{Z}. {\hbox{D\kern-.8em\raise.15ex\hbox{--}\kern.35em}} okovi\'{c}}
\address{Department of Pure Mathematics, University of Waterloo,
Waterloo, Ontario, N2L 3G1, Canada}
\email{[email protected]}
\thanks{
The author was supported by the NSERC Grant A-5285.}
\keywords{}
\date{}
{\beta}gin{abstract}
Two Hadamard matrices of order $764$ of Goethals--Seidel type
are constructed.
\end{abstract}
\maketitle
\subjclassname{ 05B20, 05B30 }
\vskip5mm
Recall that a Hadamard matrix of order $m$ is a $\{\pm1\}$-matrix
$A$ of size $m\times m$ such that $AA^T=mI_m$, where
$T$ denotes the transpose and $I_m$ the identity matrix.
We refer the reader to one of \cite{KH,SY} for the survey of
known results about Hadamard matrices.
In our previous note \cite{DZ}, written about 13 years ago,
we listed 17 integers $n\le500$ for which no Hadamard matrix
of order $4n$ was known at that time. Two of these integers were
removed in that note and the smallest one, $n=107$, was
removed recently by Kharaghani and Tayfeh-Rezaie \cite{KT}.
Among the remaining 14 integers $n$ only four are less than
1000. The problem of existence of Hadamard matrices of
these four orders, namely 668, 716, 764 and 892, has been
singled out as Research Problem 7 in the recent book \cite{KH}
by Kathy Horadam. In this note we shall remove the integer 764
from the mentioned list by constructing two examples of Hadamard
matrices of Goethals--Seidel type of that order.
(We have constructed a bunch of examples but we shall present
only two of them.)
Consequently, the revised list now consists of 13 integers:
\[ 167,\, 179,\, 223,\, 251,\, 283,\, 311,\, 347,\, 359,\,
419,\, 443,\, 479,\, 487,\, 491; \]
all of them primes congruent to $3 \pmod{4}$.
For the remainder of this note we set $n=191$.
Let $G$ be the multiplicative group of non-zero residue classes
modulo the prime $n=191$, a cyclic group of order $n-1=190$, and let
$H={\lambda}ngle 39 \rangle =\{ 1,39,184,109,49 \}$ be its subgroup
of order 5. We choose the enumeration of the 38 cosets ${\alpha}_i$,
$0\le i\le 37$, of $H$ in $G$ so that ${\alpha}_{2i+1}=-1\cdot{\alpha}_{2i}$
for $0\le i\le 18$ and
\[
{\beta}gin{array}{lllll}
{\alpha}_0=H, \quad & {\alpha}_2=2H, \quad & {\alpha}_4=3H, \quad & {\alpha}_6=4H, \quad & {\alpha}_8=6H, \\
{\alpha}_{10}=8H, & {\alpha}_{12}=9H, & {\alpha}_{14}=11H, & {\alpha}_{16}=12H, & {\alpha}_{18}=13H, \\
{\alpha}_{20}=16H, & {\alpha}_{22}=17H, & {\alpha}_{24}=18H, & {\alpha}_{26}=19H, & {\alpha}_{28}=22H, \\
{\alpha}_{30}=32H, & {\alpha}_{32}=36H, & {\alpha}_{34}=38H, & {\alpha}_{36}=41H. &
\end{array}
\]
Define four index sets:
{\beta}gin{eqnarray*}
J_1 &=& \{ 1,7,9,10,11,13,17,18,25,26,30,31,33,34,35,36,37 \}, \\
J_2 &=& \{ 1,4,7,9,11,12,13,14,19,21,22,23,24,25,26,29,36,37 \}, \\
J_3 &=& \{ 0,3,4,5,7,8,9,16,17,19,24,25,29,30,31,33,35,37 \}, \\
J_4 &=& \{ 1,3,4,5,8,11,14,18,19,20,21,23,24,25,28,29,30,32,34,35 \}
\end{eqnarray*}
and introduce the following four sets of integers modulo 191:
\[ S_k = \bigcup_{i\in J_k} {\alpha}_i,\quad k=1,2,3,4. \]
Their cardinals $n_k=|S_k|=5|J_k|$ are:
\[ n_1=85,\, n_2=n_3=90,\, n_4=100 \]
and we set
\[ {\lambda} = n_1+n_2+n_3+n_4-n=174. \]
For $r\in\{ 1,2,\ldots,190 \}$ let ${\lambda}_k(r)$ denote the number of
solutions of the congruence $i-j\equiv r \pmod{191}$ with
$\{i,j\}\subseteq S_k$. It is easy to verify (by using a computer) that
\[ {\lambda}_1(r)+{\lambda}_2(r)+{\lambda}_3(r)+{\lambda}_4(r)={\lambda} \]
is valid for all such $r$. Hence the sets
$S_1,S_2,S_3,S_4$ are supplementary
difference sets (SDS), with associated decomposition
{\beta}gin{eqnarray*}
4n=764 &=& 9^2+11^2+11^2+21^2 \\
&=& \sum_{k=1}^4 (n-2n_k)^2.
\end{eqnarray*}
Let $A_k$ be the $n\times n$ circulant matrix with first row
\[ a_{k,0},\, a_{k,1},\, \ldots ,\, a_{k,n-1} \]
where $a_{k,j}=-1$ if $j\in S_k$ and $a_{k,j}=1$ otherwise.
These $\{\pm1\}$-matrices satisfy the identity
\[ \sum_{k=1}^4 A_kA_k^T=4nI_n. \]
One can now plug in the matrices $A_k$ into the Goethals--Seidel
array to obtain a Hadamard matrix of order $4n=764$.
Our second example is constructed in the same way by using
the index sets:
{\beta}gin{eqnarray*}
J_1 &=& \{ 0,1,6,8,9,11,12,16,18,20,21,23,28,31,33,36,37 \}, \\
J_2 &=& \{ 0,1,3,4,10,12,13,17,20,22,24,31,32,33,34,35,36,37 \}, \\
J_3 &=& \{ 4,8,9,10,12,13,14,16,17,20,21,24,26,27,29,31,32,34 \}, \\
J_4 &=& \{ 1,7,9,10,11,12,14,15,16,17,20,22,23,25,28,29,32,33,34, \\
&& \quad 37 \}.
\end{eqnarray*}
The two solutions are not equivalent in the sense that the
two SDS's are not equivalent. (For the definition of equivalence
for SDS's see our note \cite{DZ}.)
{\beta}gin{thebibliography}{99}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\bibitem{DZ}
D.\v{Z}. {\hbox{D\kern-.8em\raise.15ex\hbox{--}\kern.35em}}o{}, Two Hadamard matrices of order 956 of Goethals--Seidel type,
Combinatorica {\bf 14} (1994), 375--377.
\bibitem{KH}
K.J. Horadam, Hadamard matrices and Their Applications,
Princeton University Press, 2007.
\bibitem{KT}
H. Kharaghani and B. Tayfeh-Rezaie, A Hadamard matrix of order 428,
J. Combin. Designs {\bf 13} (2005), 435--440.
\bibitem{SY}
J. Seberry and M. Yamada, Hadamard matrices, sequences and block designs,
in ``Contemporary Design Theory, A Collection of Surveys'',
J.H. Dinitz and D.R. Stinson, Eds., J. Wiley, New York, 1992.
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Witnessing nonseparability of bipartite quantum operations}
\author{Sohail}
\email{[email protected]}
\affiliation{Quantum Information and Computation Group, Harish-Chandra Research Institute, HBNI, Jhunsi, Allahabad 211 019, India}
\author{Ujjwal Sen}
\email{[email protected]}
\affiliation{Quantum Information and Computation Group, Harish-Chandra Research Institute, HBNI, Jhunsi, Allahabad 211 019, India}
\begin{abstract}
We provide a method for witnessing nonseparability of quantum processes on shared systems, which uses the channel-state duality. The method uses a maximally entangled state as a resource. We show that using the resource provides significant advantage over the corresponding protocol for nonseparability detection without the resource.
\end{abstract}
\maketitle
\section{Introduction}
Creation and manipulation of entanglement \mathscrite{RevModPhys.81.865} are one of the basic necessities of quantum information tasks. Local quantum operations and classical communication between parts of a physical system cannot create entangled states between the parts. It is therefore important to identify quantum processes on shared systems that are not local. An efficient method for detecting entanglement of shared quantum states is by using entanglement witnesses \mathscrite{woronowicz1976,CHOI1984462,1996PhLA..223....1H,1998quant.ph.10091T,PhysRevA.62.052310,2002JMOp...49.1399B,PhysRevA.66.062305,2003JMOp...50.1079G,PhysRevLett.92.087902,das2017separability}. We show that a similar technique can be utilized for witnessing nonseparability of quantum processes on shared systems.
\\
\\
Just like the concept of entanglement witnesses for states, the technique of nonseparability witnesses for quantum operations also uses the mathematics around the Hahn-Banach theorem of normed linear spaces \mathscrite{smith_1970, loTa-kambal}. Additionally, we use the channel-state duality between quantum states and quantum operations \mathscrite{CHOI1975285,article, kraus1983states,Sudarshan1985}. The proposed method requires a maximally entangled state as a resource.
\\
\\
We exemplify our method by considering the paradigmatic controlled-NOT (CNOT), square-root of swap, and Bell gates on two-qubit systems. We also analyze the noise threshold that our method can withstand by considering noisy versions of the gates, where the noise is modelled by the completely depolarizing channel. This also helps us to identify a trade-off between the noise threshold and the entanglement in the resource.
\\
\\
We begin in Section \ref{eibar-bachhadhan-chaT-bogole} with a short discussion on the Hahn-Banach theorem. This is followed by a short discussion on the channel-state duality in Section \ref{saRe-chuattar}. The results are presented in Section \ref{preme-paRa-baran}, followed by a conclusion in Section \ref{karane-akaran}.
\section{Gathering the tools}
\subsection{The Hahn-Banach theorem}
\label{eibar-bachhadhan-chaT-bogole}
The Hahn-Banach theorem \mathscrite{smith_1970, loTa-kambal} is a very important and powerful tool in functional analysis. It is an important contraption in finding whether a given quantum mechanical state of a multiparty physical system is entangled. Instead of the usual version, it is a corollary (Corollary 2 below, which has been referred to as the ``hyperplane separation theorem'' \mathscrite{loTa-kambal}) of the theorem that is utilized for the purpose.
\\
\\
\textbf{Hahn-Banach theorem:} Let $M$ be a linear subspace of a normed linear space $N$, and let $f$ be a functional defined on $M$. Then $f$ can be extended to a functional $f_0$ defined on the whole space $N$ such that
$ \left\Vert f_0 \right\Vert = \left\Vert f \right\Vert $.
\\
\\
The following corollaries are consequences of the theorem.
\\
\\
\textbf{Corollary 1}: If $M$ is a closed linear subspace of a normed linear space $N$ and $x_0$ is a vector not in $M$, then there exists a functional $f^0$ in $N^*$ such that $f^0 (M)=0$ and $f^0 (x_0)\nonumbereq 0$, where $N^*$ is the dual space of $N$.
\\
\\
\textbf{Corollary 2}: Let $M$ be a convex compact set in a finite dimensional Banach space $X$. Let $\rho \nonumberotin M $ be a point in $X$. Then there exists a hyperplane that separates $\rho$ from $M$.
\\
\\
\textbf{Entanglement witness and Hahn-Banach theorem:}
It is known that the set of separable states of a bipartite quantum system is a convex compact subset of the set of quantum states \mathscritep{PhysRevA.40.4277}. So it is clear from corollary 2 that there exists a functional in the dual space of the Hilbert space of the physical system, which can distinguish an entangled state from the separable states. It can be constructed as a linear functional on $\mathcal{B}(H)$ where $H$ is the joint Hilbert space of the bipartite system, in such a way that it assigns positive real numbers to the separable states and a negative real number to the given entangled state. The linear functional is of the form tr$(W \mathscrdot)$, where $W$ is an operator on $H$, being called an ``witness operator". It has been
shown \mathscrite{das2017separability} that $W$ can be chosen as the partial transpose of the projector in the direction of an eigenvector of the given entangled state corresponding to a negative eigenvalue, provided the given entangled state does have a negative eigenvalue after partial transposition.
\\
\subsection{The CJKS Isomorphism}
\label{saRe-chuattar}
Let $H_1={\mathbb{C}^n}$ and let $\phi : \mathcal{B}(H_1)\rightarrow \mathcal{B}(H_2)$ be a linear map, where $H_1$ and $H_2$ are two Hilbert spaces. Let $ \{e_{ij}\}$, $i,j=1,2,....,n$ be a complete set of matrix units for $\mathcal{B}(H_1)$. Then the Choi-Jamio{\l}kowski-Kraus-Sudarshan (CJKS) matrix \mathscrite{book,article,CHOI1975285,kraus1983states,Sudarshan1985} for $\phi$ is defined to be the operator
$\rho_\phi = $ $\sum_{{i,j}=1}^{n} $ $e_{ij} \otimes \phi(e_{ij})$ $\epsilon $ $\mathcal{B}(H_1)\otimes \mathcal{B}(H_2) $.
\\
\\The map $\phi \rightarrow \rho_\phi $ is linear and bijective. This map is called the CJKS isomorphism. Using this isomorphism, the concept of ``channel-state duality" emerges. Here, a channel or quantum channel is a completely positive trace-preserving map, which acts on the space of bounded operators on a Hilbert space. To understand the ``channel-state duality" we need to have a look at the CJKS theorem on completely positive maps.
\\
\\
\textbf{CJKS theorem on completely positive maps:} The CJKS matrix, $\rho_\phi = $ $\sum_{{i,j}=1}^{n} $ $e_{ij} \otimes \phi(e_{ij})$ $\epsilon $ $\mathcal{B}(H_1)\otimes \mathcal{B}(H_2) $, is positive if and only if the map $\phi : \mathcal{B}(H_1)\rightarrow \mathcal{B}(H_2)$ is completely positive.
\\
\\
The CJKS isomorphism, with the help of the CJKS theorem on completely positive maps, allows us to view completely positive trace-preserving linear maps acting on quantum states as a quantum state in a higher-dimensional Hilbert space. If we consider quantum states which are density matrices on an $n$-dimensional Hilbert space, then the completely positive trace-preserving map acting on them can be identified with a density matrix on an $n^2$-dimensional Hilbert space.
\\
\\
\textbf{Lemma:} The CJKS isomorphism is continuous.
\\
\\
\textbf{Proof:} Let $\phi \epsilon \mathcal{L}(\mathcal{B}(H_1),\mathcal{B}(H_2))$, where $\mathcal{L}(\mathcal{B}(H_1),\mathcal{B}(H_2))$ is the set of all linear maps from $\mathcal{B}(H_1)$ to $\mathcal{B}(H_2)$, for two Hilbert spaces $H_1$ and $H_2$. The map $f:\mathcal{L}(\mathcal{B}(H_1),\mathcal{B}(H_2)) \rightarrow \mathcal{B}(H_1)\otimes \mathcal{B}(H_2) $, defined by
$f(\phi)=\rho_\phi=\sum_{{i,j}=1}^{n} $ $e_{ij} \otimes \phi(e_{ij})$, is the CJKS isomorphism. Let ${\phi_n}$ be a sequence in $\mathcal{L}(\mathcal{B}(H_1),\mathcal{B}(H_2))$ which converges to $\phi$ in $\mathcal{L}(\mathcal{B}(H_1),\mathcal{B}(H_2))$. Then
\(\left\Vert \rho_{\phi_n}-\rho_{\phi} \right\Vert\)
\(=\left\Vert \sum_{{i,j}=1}^{n} e_{ij} \otimes \phi_n(e_{ij})-\sum_{{i,j}=1}^{n} e_{ij} \otimes \phi(e_{ij}) \right\Vert\)
\(=\left\Vert\sum_{{i,j}=1}^{n} e_{ij} \otimes (\phi_n-\phi)(e_{ij}) \right\Vert
\)
\(\leq \sum_{{i,j}=1}^{n} \left\Vert e_{ij} \right\Vert \left \Vert (\phi_n-\phi)(e_{ij}) \right\Vert\)
\(\leq \left(\sum_{{i,j}=1}^{n} \left\Vert e_{ij} \right\Vert \right)^2 \left \Vert (\phi_n-\phi)\right\Vert\).
So, the sequence $\rho_{\phi_n}$ converges to $\rho_\phi$. This implies that the map $f$, the CJKS isomorphism, is continuous.
\(\square\)
\section{Nonseparability witness for quantum operations}
\label{preme-paRa-baran}
We will in this section provide a general method for witnessing nonseparability of bipartite quantum operations.
\\
\\
\textbf{Local operation and classical communication (LOCC):} The concept of local quantum operations and classical communication is a very useful one in quantum information. In an LOCC protocol, a quantum mechanical operation is performed on one of the parts of a bipartite system and the result of the operation, if it is a measurement, is communicated classically to the other part, where another operation is performed, based on the communicated results. This to and fro communication, interspersed with local quantum operations, may be repeated as many times as needed. The mathematical characterization of LOCC is a difficult one. Recently it was shown that the set of LOCC protocols is not topologically closed but the set of finite-round LOCC is a compact subset of quantum operations \mathscrite{2014CMaPh.328..303C}.
\\
\\
\textbf{Separable operators:} An operation on $\mathcal{L}(\mathcal{B}(H_A),\mathcal{B}(H_B))$ that takes an element $\rho$ $\epsilon$ $\mathcal{L}(\mathcal{B}(H_A),\mathcal{B}(H_B))$ to $\sum_{{i}} (A_i \otimes B_i)\rho (A_i \otimes B_i)^ \dagger$, where $A_i$ and $B_i$ are operations on the Hilbert spaces $H_A$ and $H_B$ respectively, is called separable. An LOCC is certainly a separable operation. The set of separable operations is clearly convex. It is also closed, as shown in the following statement.
\\
\\
\textbf{Statement:} The set of separable operators is closed in the norm topology.
\\
\\
\textbf{Proof:} It is known that separable operators correspond to separable states under the CJKS isomorphism \mathscrite{2001PhRvL..86..544C}. The set of separable states is closed and as we have seen in the Lemma above, the CJKS isomorphism is continuous. This implies that the inverse image of the set of separable states under the CJKS map is closed, i.e., the set of separable operators is closed.
\(\square\)
\\
\\
In the case of constructing a witness for an entangled state, we use the fact that the set of separable states is convex and closed. The witness is constructed under the belief that the corresponding experimental set-up creates that entangled state. The witness, then, detects the state not only if the set-up is perfect but also if there are imperfections present. Notwithstanding any such noise in the set-up, provided the noise is not higher then a certain threshold, the witness guarantees the presence of entanglement in the state created by the set-up.
\\
\\In a similar way, let $\Lambda$ be a physical operator on density matrices in $\mathcal{L}(\mathcal{B}(H_A),\mathcal{B}(H_B))$ and consider an experimental apparatus that we believe to be implementing the operator. Ideally, we would like to find a witness to detect if the apparatus is implementing a map that is outside the LOCC class. This is a difficult problem. We however use the fact that the closed and convex set of separable physical operations on $\mathcal{L}(\mathcal{B}(H_A),\mathcal{B}(H_B))$ is mapped onto the closed and convex set of separable density matrices on $H_1 \otimes H_2$ and vice-versa, via the CJKS isomorphism, where $H_1=H_A \otimes H_{\tilde{A}}$, $H_2=H_B \otimes H_{\tilde{B}}$, with $\dim H_{\tilde{A}}= \dim H_A$, $\dim H_{\tilde{B}}= \dim H_B$ \mathscrite{2001PhRvL..86..544C}. Therefore, given an apparatus that claims to implement $\Lambda$, we apply $\Lambda$ on the $H_A \otimes H_B$ part of the state (unnormalized) $\sum_{{i}=1}^{\dim H_A} $ $\sum_{{j}
=1}^{\dim H_B} $ $\ket{ij}_{AB}\ket{ij}_{\tilde{A}\tilde{B}} $, assuming the latter to be available as a resource. The output state, $\rho_{AB\tilde{A}\tilde{B}}$, if non-separable as a density matrix in the $A\tilde{A}:B\tilde{B}$ partition, will imply that the apparatus is implementing an operation that is non-separable in the $A:B$ partition. The non-separability of $\rho_{AB\tilde{A}\tilde{B}}$ in the $A\tilde{A}:B\tilde{B}$ can now be checked by using the concept of entanglement witness for bipartite states. In particular, if $\rho_{AB\tilde{A}\tilde{B}}^{T_{A\tilde{A}}}$ has a negative eigenvalue, then the partial transpose of the projector of a corresponding eigenvector can act as a witness. Since the LOCC maps are contained within the set of separable operations, $\Lambda$ will of course be non-LOCC. Moreover, the non-LOCC-ness is being detected here not just for $\Lambda$, but for any other operation that is sufficiently close to $\Lambda$, as we have already seen that the set of separable operations forms a closed set. We therefore have a method for witnessing the nonseparability of an arbitrary apparatus that acts on bipartite quantum states, provided the corresponding CJKS state is entangled. Let us now exemplify the method by using an apparatus that claims to implement the CNOT gate for $\mathbb{C}^2 \otimes \mathbb{C}^2$.
\\
\\
\textbf{Witness operator for CNOT gate:} The CNOT gate is a paradigmatic non-LOCC operator acting on $\mathbb{C}^2 \otimes \mathbb{C}^2$, that has been implemented in several physical systems. The CJKS state corresponding to the CNOT gate is $\rho_{CNOT}$, being given by
$4\rho_{CNOT}=$
\begin{equation}
\label{noteq}
\left( {\begin{array}{cccccccccccccccc}
1 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 1\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 1\\
1 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 1 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 1 & 0\\
0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0
\end{array}}\right),
\end{equation}
where we have used the computational basis in $AB\tilde{A}\tilde{B}$ to express the density matrix, and where we have adopted the convention in which the identity superoperator in the CJKS isomorphism is applied onto the second system. $\rho_{CNOT}^{T_{A\tilde{A}}}$ has a single negative eigenvalue and it is non-degenerate. The partial transpose in the $A\tilde{A}:B\tilde{B}$ partition of the projection on the corresponding eigenvector is given by
\\
\begin{equation}
W=\left(
\begin{array}{cccccccccccccccc}
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0& -1 & 0 & 0 & -1 &0 \\
0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0& -1 & 0 & 0 & -1 &0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 1 \\
-1& 0 & 0 & 0 & 0 & -1& 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
-1& 0 & 0 & 0 & 0 & -1& 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 1 \\
\end{array}
\right),
\end{equation}
where again we have used the computational basis in $AB\tilde{A}\tilde{B}$ to express the matrix. This operator can now act as a witness operator for detecting the non-separability of $\rho_{CNOT}$ in the $A\tilde{A}:B\tilde{B}$ partition, thereby implying the non-separability (i.e., being not a separable operator) of $\Lambda$ on $H_A \otimes H_B$. This in turn will imply that $\Lambda$ is not an LOCC on $H_A \otimes H_B$.
The decomposition of the witness operator $W$ is given by
\begin{align}
W=\sum_{{i,j,k,l}=1}^{4} w_{ijkl}(-1)^\alpha \mu_i \otimes \mu_j \otimes \mu_k \otimes \mu_l ,
\end{align}
where $\mu_1=I+\sigma_z $, $\mu_2=I-\sigma_z $, $\mu_3=\sigma_x+i\sigma_y $,$\mu_4=\sigma_x-i\sigma_y $.
$\alpha$ depends on $i,j,k,l$, and is equal to the total number of appearances of the indices 3 and 4 in the corresponding term in the summation. There are only 16 non-zero terms in the summation, viz. for \begin{eqnarray}
&&\nonumberonumber(i,j,k,l)=\\
&&\nonumberonumber(1,1,1,2),(1,4,1,3),(4,1,4,4),(4,4,4,1),\\
&&\nonumberonumber(1,3,1,4),(1,2,1,1),(4,3,4,2),(4,2,4,3),\\
&&\nonumberonumber(3,1,3,3),(3,4,3,2),(2,1,2,1),(2,4,2,4),\\
&&\nonumberonumber(3,3,3,1),(3,2,3,4),(2,3,2,3),(2,2,2,2),
\end{eqnarray}
and $w_{ijkl}=1$ for these combinations, being vanishing for others. Since W has been decomposed into a sum over local operations on $H_A \otimes H_{\tilde{A}} \otimes H_B \otimes H_{\tilde{B}}$, we have a local strategy for measuring $W$.
\\
\\
\textbf{Noise analysis:} We now try to quantify the amount of noise that can be inflicted on the CNOT gate, and yet its nonseparability can be detected by the witness. For this purpose, we use the completely depolarizing noise, i.e., for a given map $\Lambda$, we consider the noisy map, $p\Lambda + (1-p)D$, where $D$ is the completely depolarizing map, and $0 \leq p \leq 1$. By considering the CJKS state corresponding to the noisy map, we found that the nonseparability is still detected provided $p > \frac{1}{9}$.
\\
\\
\textbf{Comparison with situation when resource is absent:} The nonseparability of CNOT gate can of course be detected even when the maximally entangled state in $AB:\tilde{A}\tilde{B}$ is unavailable. In this case, we can try to detect the nonseparability of the CNOT gate by applying it on $\ket{+}_A \ket{0}_B$, whereby a maximally entangled two-qubit state is created, where $\ket{+}$ and $\ket{0}$ are respectively eigenstates of $\sigma_x$ and $\sigma_z$ corresponding to the eigenvalues $+1$ for each. The nonseparability is then detected by witnessing the entanglement in the output state. To compare this method with the case when the CJKS map is employed, we again consider the noisy CNOT map, $p\Lambda_{CNOT} + (1-p)D$, apply it on $\ket{+}_A \ket{0}_B$, and try to witness the entanglement in the output state. In this case, the nonseparability can be detected provided $p> \frac{1}{3}$. We therefore find that there is a clear trade-off between the efficiency of the nonseparability detection, as quantified by the amount of noise that the detection process can withstand, and the resource available. Provided that a resource, in the form of a maximally entangled state in $AB:\tilde{A}\tilde{B}$, is available, the noise tolerance is much higher.
\\
\\
\nonumberoindent \textbf{Witness operator for the square root of swap gate:} We now repeat the above procedure for the square root of the swap gate, where the swap gate is defined as a two-party gate (whose two-qubit version we are concerned with here) that interchanges the states of any product input, with the action on other states being defined linearly. The witness operator for this gate, corresponding to the negative eigenvalue, $-\frac{\sqrt{5}}{8}$, of the partial transpose of the CJKS state of this gate is given by
\begin{align}
W_{\sqrt{swap}}=\sum_{{i,j,k,l}=1}^{4} w^s_{ijkl} \mu_i \otimes \mu_j \otimes \mu_k \otimes \mu_l ,
\end{align}
where $w^s_{ijkl}=1$ for
\begin{eqnarray}
&&(i,j,k,l)=\nonumberonumber\\
&&(1,1,1,2),(1,1,2,1),(1,1,2,2),(3,4,1,2),\nonumberonumber
\\
&&(3,4,2,1),(1,2,1,1),(1,2,3,4),(1,2,4,3),\nonumberonumber \\
&&(1,2,2,2),(2,1,1,1),(2,1,3,4),(2,1,4,3),\nonumberonumber \\
&&(2,1,2,2),(4,3,1,2),(4,3,2,1),(2,2,1,1),\nonumberonumber \\&&(2,2,1,2),
(2,2,2,1), \nonumberonumber
\end{eqnarray}
$w^s_{ijkl}=-1$ for
\begin{eqnarray}
&&\nonumberonumber (i,j,k,l)=\\
&&\nonumberonumber (1,4,4,1),(1,4,2,4),(4,2,4,1),(4,2,2,4),\\
&& \nonumberonumber (3,1,1,3),(3,1,3,2),(2,3,1,3),(2,3,3,2),\\
&& \nonumberonumber (3,4,4,3),(1,3,3,1),(1,3,2,3),(3,2,3,1),\\
&& \nonumberonumber (3,2,2,3),(4,1,1,4),(4,1,4,2),(2,4,1,4),\\&& \nonumberonumber (2,4,4,2),(4,3,3,4) ,
\end{eqnarray}
and $w^s_{ijkl}=0$ for the
remaining combinations of $(i,j,k,l)$.
\\
\\
\nonumberoindent \textbf{Noise analysis for the square root of swap gate and comparison with resource-free case:}
Just like for the CNOT gate, we again perform a noise analysis here by considering admixture with the completely depolarizing channel. We find that when the maximally entangled state is available as a resource, the nonseparability in the square of the swap gate can be detected
when $p>\frac{1}{1+2\sqrt{5}} \approx 0.18$. For the case when the resource is not available, the nonseparability can be detected
when $p>\frac{1}{3}$, corresponding to the situation when the square root of the swap gate is applied to the state \(|01\rangle\), which produces a maximally entangled two-qubit state.
\\
\\
\textbf{Witness operator for the Bell gate:} Let us now consider yet another example of an entangling gate, viz. the Bell gate \mathscrite{nahoi-mon-ditei-tumi-parona, ei-akash-natun}, defined as one that respectively takes the states of the standard biorthogonal two-qubit product basis consisting of the states \(|00\rangle\), \(|11\rangle\), \(|01\rangle\), \(|10\rangle\),
to those of the Bell basis consisting of the states \(|00\rangle + |11\rangle)/\sqrt{2}\), \(|00\rangle - |11\rangle)/\sqrt{2}\),
\(|01\rangle + |10\rangle)/\sqrt{2}\), \(|01\rangle - |10\rangle)/\sqrt{2}\).
The decomposition of the witness operator corresponding to the eigenvalue $-\frac{1}{2}$ of the partial transpose of the CJKS state corresponding to this gate is given by
\begin{align}
W_{Bell}=\sum_{{i,j,k,l}=1}^{4} w^B_{ijkl} \mu_i \otimes \mu_j \otimes \mu_k \otimes \mu_l ,
\end{align} \\
where $w^B_{ijkl}=1$ for
\begin{eqnarray}
&&\nonumberonumber(i,j,k,l)=\\
&&\nonumberonumber(1,1,1,2),(1,1,2,1),(1,4,1,3),(1,4,2,4),\\
&&\nonumberonumber(3,1,4,1),(3,1,2,4),(3,4,4,4),(3,4,2,1),\\
&&\nonumberonumber(1,3,1,4),(1,3,2,3),(1,2,1,1),(1,2,2,2),\\
&&\nonumberonumber(3,3,4,3),(3,3,2,2),(3,2,4,2),(3,2,2,3),\\
&&\nonumberonumber(4,1,3,1),(4,1,2,3),(4,4,3,4),(4,4,2,2),\\
&&\nonumberonumber(2,1,1,1),(2,1,3,4),(2,1,4,3),(2,1,2,2),\\
&&\nonumberonumber(2,4,1,4),(2,4,3,1),(2,4,4,2),(2,4,2,2),\\
&&\nonumberonumber(4,3,3,3),(4,3,2,1),(4,2,3,2),(4,2,2,4),\\
&&\nonumberonumber(2,3,1,3),(2,3,3,2),(2,3,4,1),(2,3,2,4),\\
&&\nonumberonumber(2,2,1,2),(2,2,3,3),(2,2,4,4),(2,2,2,1),
\end{eqnarray}
$w^B_{ijkl}=-1$ for
\begin{eqnarray}
&&\nonumberonumber(i,j,k,l)=\\
&&\nonumberonumber(1,1,3,3),(1,1,4,4),(1,4,3,2),(1,4,4,1),\\
&&\nonumberonumber(3,1,1,3),(3,1,3,2),(3,4,1,2),(3,4,3,3),\\
&&\nonumberonumber(1,3,3,1),(1,3,4,2),(1,2,3,4),(1,2,4,3),\\
&&\nonumberonumber(3,3,1,1),(3,3,3,4),(3,2,1,4),(3,2,3,1),\\
&&\nonumberonumber(4,1,1,4),(4,1,4,2),(4,4,1,1),(4,4,4,3),\\
&&\nonumberonumber(4,3,1,2),(4,3,4,4),(4,2,1,3),(4,2,4,1),
\end{eqnarray}
and $w^B_{ijkl}=0$ for the rest of the different values of $(i,j,k,l)$.
\\
\\
\nonumberoindent \textbf{Noise analysis for the Bell gate and comparison with resource-free case:}
In this case, the nonseparability is detected via the CJKS approach when $p>\frac{1}{9}$ in the noisy case, and it is detected without the resource when $p>\frac{1}{3}$, in the situation where the Bell gate is applied on \(|00\rangle\).
\section{Conclusion}
\label{karane-akaran}
The concept of entanglement witness forms an efficient method for detecting entangled states. Entangled states are created by quantum processes that cannot be written in separable form. We show that it is possible to conceptualize a parallel method of witness for nonseparability of quantum processes. The method utilizes the channel-state duality between quantum channels and states. The method uses a maximally entangled state as a resource. We find that considering the witness for noisy maps provide us with a trade-off between the threshold amount of noise that the method can withstand and the entanglement in the resource.
\section*{Acknowledgments}
We acknowledge useful discussions with Aditi Sen(De).
\end{document}
|
\begin{document}
\begin{titlepage}
\title{Breaking Quadratic Time for Small Vertex Connectivity and an Approximation Scheme}
\pagenumbering{roman}
\begin{abstract}
Vertex connectivity a classic extensively-studied problem. Given an integer $k$, its goal is to decide if an $n$-node $m$-edge graph can be disconnected by removing $k$ vertices.
Although a linear-time algorithm was postulated since 1974 [Aho, Hopcroft, and Ullman], and despite its sibling problem of edge connectivity being resolved over two decades ago [Karger STOC'96], so far no vertex connectivity algorithms are faster than $O(n^2)$ time even for $k=4$ and $m=O(n)$.
In the simplest case where $m=O(n)$ and $k=O(1)$, the $O(n^2)$ bound dates five decades back to [Kleitman IEEE Trans. Circuit Theory'69].
For higher $m$, $O(m)$ time is known for $k\leq 3$ [Tarjan FOCS'71; Hopcroft, Tarjan SICOMP'73], the first $O(n^2)$ time is from [Kanevsky, Ramachandran, FOCS'87] for $k=4$ and from [Nagamochi, Ibaraki, Algorithmica'92] for $k=O(1)$.
For general $k$ and $m$, the best bound is $\tilde{O}(\min(kn^2, n^\omega+nk^\omega))$ [Henzinger, Rao, Gabow FOCS'96; Linial, Lov{\'{a}}sz, Wigderson FOCS'86] where $\tilde O$ hides polylogarithmic terms and $\omega<2.38$ is the matrix multiplication exponent.
In this paper, we present a randomized Monte Carlo algorithm with $\tilde{O}(m+k^{7/3}n^{4/3})$ time for any $k=O(\sqrt{n})$. This
gives the {\em first subquadratic time} bound for any $4\leq k \leq o(n^{2/7})$ (subquadratic time refers to $O(m)+o(n^2)$ time) and improves all above classic bounds for all $k\le n^{0.44}$.
We also present a new randomized Monte Carlo $(1+\epsilonilon)$-approximation algorithm that is strictly faster than the previous Henzinger's 2-approximation algorithm [J. Algorithms'97] and all previous exact algorithms.
The story is the same for the {\em directed} case, where our exact $\tilde{O}( \min\{km^{2/3}n, km^{4/3}\} )$-time for any $k = O(\sqrt{n})$ and $(1+\epsilonilon)$-approximation algorithms improve all previous classic bounds. Additionally, our algorithm is the first approximation algorithm on directed graphs.
The key to our results is to avoid computing single-source connectivity, which was needed by all previous exact algorithms and is not known to admit $o(n^2)$ time. Instead, we design the first {\em local algorithm} for computing vertex connectivity; without reading the whole graph, our algorithm can find a separator of size at most $k$ or certify that there is no separator of size at most $k$ ``near'' a given seed node.
\end{abstract}
\setcounter{tocdepth}{2}
\tableofcontents
\end{titlepage}
\pagenumbering{arabic}
\section{Introduction}
Vertex connectivity is a central concept in graph theory. The vertex
connectivity $\kappa_{G}$ of a graph $G$ is the minimum number
of the nodes needed to be removed to disconnect some remaining node
from another remaining node. (When $G$ is directed, this means that there is no directed path from some node $u$ to some node $v$ in the remaining graph.)
Since 1969, there has been a long line
of research on efficient algorithms \cite{Kleitman1969methods,Podderyugin1973algorithm,EvenT75,Even75,Galil80,EsfahanianH84,Matula87,BeckerDDHKKMNRW82,LinialLW88,CheriyanT91,NagamochiI92,CheriyanR94,Henzinger97,HenzingerRG00,Gabow06,Censor-HillelGK14}
for \emph{deciding $k$-connectivity }(i.e. deciding if $\kappa_{G}\ge k$)
or \emph{computing the connectivity} $\kappa_{G}$ (see
\Cref{tab:compare} for details).
For the undirected case, Aho, Hopcroft, and Ullman \cite[Problem 5.30]{AhoHU74} conjecture in 1974 that there exists an
$O(m)$-time algorithm for computing $\kappa_{G}$ on a graph with
$n$ nodes and $m$ edges. However, no algorithms to date are faster than $O(n^2)$ time even for $k=4$.
On undirected graphs, the first $O(n^2)$ bound for the simplest case, where $m=O(n)$ and $k=O(1)$, dates back to five decades ago:
Kleitman \cite{Kleitman1969methods} in 1969 presented an algorithm for deciding $k$-connectivity with running time
$O(kn\cdot\operatorname{VC}_{k}(n,m))$ where $\operatorname{VC}_{k}(n,m)$ is the time needed
for deciding if the minimum size $s$-$t$ vertex-cut is of size at
least $\kappa$, for fixed $s,t$. Although the running time bound
was not explicitly stated, it was known that $\operatorname{VC}_{k}(n,m)=O(mk)$
by Ford-Fulkerson algorithm \cite{ford1956maximal}. This gives $O(k^{2}nm)$ which is $O(n^{2})$
when $m=O(n)$ and $k=O(1)$, when we plug in the 1992 result of Nagamochi and Ibaraki \cite{NagamochiI92}.
Subsequently, Tarjan~\cite{Tarjan72} and Hopcroft and Tarjan~\cite{HopcroftT73} presented $O(m)$-time algorithms when $k$ is $2$ and $3$ respectively.
All subsequent works
improved Kleitman's bound for larger $k$ and $m$, but none could break beyond $O(n^{2})$ time. For $k=4$ and any $m$, the first $O(n^2)$ bound was by Kanevsky and Ramachandran~\cite{KanevskyR91}. The first $O(n^2)$ for any $k=O(1)$ (and any $m$) was by Nagamochi and Ibaraki \cite{NagamochiI92}.
For general $k$ and $m$, the fastest running
times are $\tilde{O}(n^{\omega}+nk^{\omega})$ by Linial, Lov{\'a}sz and Wigderson \cite{LinialLW88} and $\tilde{O}(kn^{2})$ by Henzinger, Rao and Gabow \cite{HenzingerRG00}. Here, $\tilde O$ hides $\operatorname{poly}log(n)$ terms, and $\omega$ is the matrix multiplication exponent. Currently, $\omega<2.37287$ \cite{Gall14a}.
For directed graphs, an $O(m)$-time algorithm is known only for $k\le2$ by Georgiadis \cite{Georgiadis10}. For general $k$ and $m$, the fastest running times are $\tilde{O}(n^{\omega}+nk^{\omega})$
by Cheriyan and Reif \cite{CheriyanR94} and $\tilde{O}(mn)$ by Henzinger~et~al.~\cite{HenzingerRG00}.
All mentioned state-of-the-art algorithms for general $k$ and $m$, for both directed and undirected cases \cite{LinialLW88,CheriyanR94,HenzingerRG00}, are randomized and correct with high probability.
The fastest deterministic algorithm is by Gabow \cite{Gabow06} and has a slower running time.
Some {\em approximation algorithms} have also been developed. The first is the deterministic $2$-approximation $O(\min\{\sqrt{n},k\}n^{2})$-time algorithm by Henzinger \cite{Henzinger97}. The second is the recent randomized $O(\log n)$-approximation $\tilde O(m)$-time algorithm by Censor-Hillel, Ghaffari, and Kuhn \cite{Censor-HillelGK14}. Both algorithms work only on undirected graphs.
Besides a few $O(m)$-time algorithms for $k\leq 3$, all previous exact algorithms could not go beyond $O(n^2)$ for a common reason: As a subroutine, they have to solve the following problem. For a pair of nodes $s$ and $t$, let $\kappa(s,t)$
denote the minimum number of nodes (excluding $s$ and $t$) required
to be removed so that there is no path from $s$ to $t$ in the remaining
graph. In all previous algorithms,
there is always some node $s$ such that these algorithms decide if
$\kappa(s,t)\ge k$ for all other nodes $t$ (and some algorithms
in fact computes $\kappa(s,t)$ for all $t$).
We call this problem \emph{single-source $k$-connectivity}.
Until now, there is no $o(n^{2})$-time algorithm for this problem even when $k=O(1)$ and $m=O(n)$.
\begin{table}
{\footnotesize
\begin{tabular}{|>{\raggedright}p{0.17\textwidth}|>{\centering}p{0.25\textwidth}|>{\centering}p{0.25\textwidth}|>{\raggedright}p{0.27\textwidth}|}
\hline
Reference & Directed & Undirected & Note\tabularnewline
\hline
\hline
Trivial & \multicolumn{2}{c|}{$O(n^{2}\cdot\operatorname{VC}(n,m))$} & \tabularnewline
\hline
\cite{Kleitman1969methods} & \multicolumn{2}{c|}{$O(kn\cdot\operatorname{VC}_{k}(n,m))$} & \tabularnewline
\hline
\cite{Podderyugin1973algorithm,EvenT75} & \multicolumn{2}{c|}{$O(kn\cdot\operatorname{VC}(n,m))$} & \tabularnewline
\hline
\cite{Even75} (cf. \cite{Galil80,EsfahanianH84,Matula87}) & \multicolumn{2}{c|}{$O((k^{2}+n)\cdot\operatorname{VC}_{k}(n,m))$} & \tabularnewline
\hline
\cite{BeckerDDHKKMNRW82} & \multicolumn{2}{c|}{$\tilde{O}(n\cdot\operatorname{VC}(n,m))$} & Monte Carlo\tabularnewline
\hline
\multirow{2}{0.2\textwidth}{\cite{LinialLW88} (\cite{CheriyanR94} for the directed case)} & \multicolumn{2}{c|}{$O((n^{\omega}+nk^{\omega})\log n)$} & Monte Carlo\tabularnewline
\cline{2-4}
& \multicolumn{2}{c|}{$O((n^{\omega}+nk^{\omega})k)$} & Las Vegas\tabularnewline
\hline
\cite{NagamochiI92,CheriyanT91} & - & $O(k^{3}n^{1.5}+k^{2}n^{2})$ & \tabularnewline
\hline
\cite{Henzinger97} & - & $O(\min\{\sqrt{n},k\}n^{2})$ & $2$-approx.\tabularnewline
\hline
\multirow{2}{0.2\textwidth}{\cite{HenzingerRG00}} & $O(mn\log n)$ & $O(kn^{2}\log n)$ & Monte Carlo\tabularnewline
\cline{2-4}
& $O(\min\{n,k^{2}\}km+mn)$ & $O(\min\{n,k^{2}\}k^{2}n+\kappa n^{2})$ & \tabularnewline
\hline
\cite{Gabow06} & $O(\min\{n^{3/4},k^{1.5}\}km+mn)$ & $O(\min\{n^{3/4},k^{1.5}\}k^{2}n+kn^{2})$ & \tabularnewline
\hline
\cite{Censor-HillelGK14} & - & $\tilde{O}(m)$ & Monte Carlo, $O(\log n)$-approx.\tabularnewline
\hline
\multirow{2}{0.2\textwidth}{\textbf{This
paper}
}
& $\tilde{O}(\min\{km^{2/3}n, km^{4/3}\}) $ & $\tilde{O}(m+k^{7/3}n^{4/3})$& Monte Carlo, for $k\le\sqrt{n}$\tabularnewline
\cline{2-4}
& $\tilde{O}(t_{\text{directed}}) $ & $\tilde{O}(t_{\text{undirected}})$ & Monte Carlo, $(1+\epsilonilon)$-approx. \tabularnewline
\hline
\end{tabular}
}
\caption{\label{tab:compare}List of running time $T(k,n,m)$ of previous algorithms
on a graph $G$ with $n$ nodes and $m$ edges for deciding if the
vertex connectivity $\kappa_{G}\ge k$. $\protect\operatorname{VC}(n,m)$ is the
time needed for finding the minimum size $s$-$t$ vertex cut for
fixed $s,t$. $\protect\operatorname{VC}_{k}(n,m)$ is the time needed for either
certifying that the minimum size $s$-$t$ vertex cut is of size at
least $k$, or return such cut. Currently, $\protect\operatorname{VC}_{k}(n,m)=O(\min\{k,\sqrt{n}\}m)$.
If $\kappa_{G}\le k$, all algorithms above can compute $\kappa_{G}$,
and most algorithms (except those in \cite{LinialLW88,CheriyanR94})
also return a corresponding
separator. $t_{\text{directed}} = \operatorname{poly}(1/\epsilonilon) \min(T_{\operatorname{flow}}(k,m,n), n^{\omega}) $ , and
$t_{\text{undirected} } = m+\operatorname{poly}(1/\epsilonilon) \min(
k^{4/3} n^{4/3}, k^{2/3} n^{5/3+o(1)}, n^{3+o(1)}/k,
n^{\omega} )$ where $T_{\operatorname{flow}}$ is defined in \Cref{eq:exact-directed-time-intro}.}
\end{table}
\subsection{Our Results}
In this paper, we present the first algorithms that break the $O(n^2)$ bound on both undirected and undirected graphs, when $k$ is small. More precisely:
\begin{theorem}
\label{thm:exact} There are randomized (Monte Carlo)
algorithms that take as inputs an $n$-node $m$-edge graph $G=(V,E)$ and an integer $k=O(\sqrt{n})$, and can decide w.h.p.\footnote{We say that an event holds with high probability (w.h.p.) if it holds with probability at least $1-1/n^c$, where $c$ is an arbitrarily large constant.}
if $\kappa_{G}\ge k$. If $\kappa_{G}<k$,
then the algorithms also return the corresponding separator $S\subset V$,
i.e. a set $S$ where $|S|=\kappa_{G}$ and $G[V-S]$ is not
connected if $G$ is undirected and not strongly connected if
$G$ is directed. The algorithm takes
$\tilde{O}(m+k^{7/3}n^{4/3})$ and $\tilde{O}(
\min(km^{2/3}n, km^{4/3} ))$ time on undirected and directed
graphs, respectively.
\end{theorem}
Our bounds are the {\em first} $o(n^2)$ for the range $4\leq k \leq o(n^{2/7})$ on undirected graphs and range $3\leq k \leq o(n/m^{2/3})$ on directed graphs.
Our algorithms are combinatorial, meaning that they do not rely on fast matrix multiplication.
For all range of $k$ that our algorithms support,
i.e. $k=O(\sqrt{n})$, our algorithms
improve upon the previous best combinatorial algorithms by Henzinger~et~al.~\cite{HenzingerRG00}, which take
time $\tilde{O}(kn^{2})$ on undirected graphs and $\tilde{O}(mn)$
on directed graphs\footnote{As $k\le\sqrt{n}$ and $m\ge nk$, we have
$k\le m^{1/3}$. So $km^{2/3}n\le mn$.}.
Comparing with the $\tilde{O}(n^{\omega}+nk^{\omega})$ bound based on algebraic techniques by Linial~et~al.~\cite{LinialLW88}
and Cheriyan and Reif~\cite{CheriyanR94}, our algorithms are faster on undirected graphs
when $k\le n^{3\omega/7-4/7}\approx n^{0.44}$.
For directed graphs, our algorithm is faster where the range $k$
depends on graph density. For example, consider the interesting case the graph is sparse but can still be $k$-connected which is when $m = O(nk)$.
Then ours is faster than \cite{CheriyanR94} for any $k\le n^{0.44}$ like the undirected case.
However, in the dense case when $m = \Omega(n^2)$, ours is faster than
\cite{CheriyanR94} for any $k \leq n^{\omega-7/3}\approx n^{0.039}$.
To conclude, our bounds are lower than all previous bounds when $4\leq
k \leq n^{0.44}$ for undirected graphs and $3\leq k \leq n^{0.44}$ for directed sparse graphs (i.e. when $m = O(nk)$).
All these bounds \cite{HenzingerRG00,LinialLW88,CheriyanR94} have not been broken for over $20$ years.
In the simplest case where $m = O(n)$ and, hence $k= O(1)$, we break the 49-year-old $O(n^2)$ bound \cite{Kleitman1969methods}
down to $\tilde{O}(n^{4/3})$ for both undirected and directed graphs, respectively.
\danupon{SELF NOTE: $nk^{\omega}$ kicks in when $nk^\omega\geq n^\omega$, which is when (approximately) $k\geq 0.578$}
\paragraph{Approximation algorithms.}
We can adjust the same techniques to get $(1+\epsilonilon)$-approximate
$\kappa_{G}$ with faster
running time. In addition, we give another algorithm using a different technique that can $(1+\epsilonilon)$-approximate $\kappa_{G}$
in $\tilde{O}(n^{\omega}/\epsilonilon^{2})$ time.
We define the function $T_{\text{flow}}(k,m,n)$ as
\begin{equation} \label{eq:exact-directed-time-intro}
T_{\operatorname{flow}}(k,m,n) = \left\{ \begin{array}{rl}
\min(m^{4/3}, nm^{2/3}k^{1/2} , mn^{2/3+o(1)}/k^{1/3}, n^{7/3+o(1)}/k^{1/6} ) &\mbox{ if $ k \leq n^{4/5}$,} \\
n^{3+o(1)}/k &\mbox{ if $k > n^{4/5}$. }
\end{array} \right.
\end{equation}
\begin{theorem}[Approximation Algorithm]\label{thm:intro:approx}
There is a randomized (Monte Carlo)
algorithm that takes as input an $n$-node $m$-edge graph $G=(V,E)$ and w.h.p. outputs $\tilde{\kappa}$,
where $\kappa_{G}\le\tilde{\kappa}\le(1+\epsilonilon)\kappa_{G}$, in
$\tilde{O} ( m+\operatorname{poly}(1/\epsilonilon) \min( k^{4/3} n^{4/3}, k^{2/3}
n^{5/3+o(1)}, n^{3+o(1)}/k, n^{\omega} )) = \tilde{O}(\min\{n^{2.2},n^{\omega}\})$ time
for undirected graph, and in $\tilde{O} (\operatorname{poly}(1/\epsilonilon)
\min(T_{\operatorname{flow}}(k,m,n), n^{\omega}) ) = \tilde{O}(\min\{n^{7/3},n^{\omega}\})$
time for directed graph where $T_{\operatorname{flow}}(k,m,n)$ is defined in
\Cref{eq:exact-directed-time-intro}.
The algorithm also returns a pair of nodes $x$ and $y$
where $\kappa(x,y)=\tilde{\kappa}$. Hence, with additional $O(m\min\{\sqrt{n},\tilde{\kappa}\})$ time, the algorithm can compute the corresponding separator.
\end{theorem}
As noted earlier, previous algorithms achieve $2$-approximation in $O(\min\{\sqrt{n},k\}n^{2})$-time \cite{Henzinger97} and $O(\log n)$-approximation in $\tilde O(m)$ time \cite{Censor-HillelGK14}. For all possible values of $k$, our algorithms are strictly faster than the $2$-approximation algorithm of \cite{Henzinger97}.
Our approximation algorithms are also strictly faster than all
previous exact algorithms with current matrix multiplication time (and
are never slower even if $\omega < 2.2$).
In particular, even when
$\epsilonilon = 1/n^{\gamma}$ for small constant $\gamma>0$,
our algorithms are always polynomially faster than the exact algorithms
by \cite{HenzingerRG00} with running time $\tilde{O}(mn)$ and $\tilde{O}(kn^{2})$
on directed and undirected graphs, respectively. Compared with the bound $\tilde{O}(n^{\omega}+nk^{\omega})$
by \cite{LinialLW88} and \cite{CheriyanR94}, our bound for undirected
and directed graphs are $\tilde{O}(\min\{n^{2.2},n^{\omega}\})$ and
$\tilde{O}(\min\{n^{7/3}, n^\omega \})$, respectively for any density, which are less than current matrix multiplication time.
Finally, note that the previous approximation algorithms \cite{Henzinger97,Censor-HillelGK14} only work on undirected graphs, while we also show algorithms on directed graphs.
\subsection{The Key Technique}
At the heart of our main result in \Cref{thm:exact} is a new \emph{local
algorithm} for finding minimum vertex cuts. In general, we say that
an algorithm is \emph{local} if its running time does not depend
on the size of the whole input.
More concretely, let $G=(V,E)$ be a directed graph
where each node $u$ has out-degree $\deg^{\operatorname{out}}(u)$. Let $\deg_{\min}^{\operatorname{out}}=\min_{u}\deg^{\operatorname{out}}(u)$
be the minimum out-degree. For any set $S\subset V$, the \emph{out-volume}
of $S$ is $\operatorname{vol}^{\operatorname{out}}(S)=\sum_{u\in S}\deg^{\operatorname{out}}(u)$ and the set
of \emph{out-neighbors} of $S$ is $N^{\operatorname{out}}(S)=\{v\notin S\mid(u,v)\in E\}$.
We show the following algorithm (see \Cref{thm:local-vertex-connectivity} for more
detail):
\begin{theorem}
[Local vertex connectivity (informal)]\label{thm:local informal}
There is a deterministic algorithm that takes as inputs
a node $x$ in a graph $G$ and parameters $\nu$ and $k$ where $\nu,k$ are not too large, and in $\tilde{O}(\nu^{1.5}k)$ time
either
\begin{enumerate}
\item returns a set $S\ni x$ where
$|N^{\operatorname{out}}(S)|\le k$,
or
\item certifies that there is no set $S\ni x$ such that $\operatorname{vol}^{\operatorname{out}}(S)\le\nu$
and $|N^{\operatorname{out}}(S)|\le k$.
\end{enumerate}
\end{theorem}
Our algorithm is the first local algorithm for finding small vertex
cuts (i.e. finding a small separator $N^{\operatorname{out}}(S)$). The algorithm
either finds a separator of size at most $k$, or certifies that no
separator of size at most $k$ exists ``near'' some node $x$.
Our algorithm is exact in the sense that there is no gap on the cut size $k$ in the two cases.
Previously, there was a rich literature on local algorithms for finding
\emph{low conductance cuts}\footnote{The conductance of a cut $(S,V-S)$ is defined as $\Phi(S)=\frac{|E(S,V-S)|}{\min\{\operatorname{vol}(S),\operatorname{vol}(V-S)\}}$.},
which is a different problem from ours. The study was initiated by Spielman
and Teng \cite{SpielmanT04} in 2004. Since then, deep techniques
have been further developed, such as \emph{spectral-based} techniques\footnote{They are algorithms based on some random-walk or diffusion process.}
(e.g.
\cite{SpielmanT13,AndersenCL06,AndersenL08,AndersenP09,GharanT12})
and newer \emph{flow-based} techniques \cite{OrecchiaZ14,HenzingerRW17,WangFHMR17,VeldtGM16}).
Applications of these techniques for finding low conductance cuts
are found in various contexts (e.g. balanced cuts \cite{SpielmanT13,SaranurakW19}),
edge connectivity \cite{KawarabayashiT15,HenzingerRW17}, and dynamically
maintaining expanders \cite{Wulff-Nilsen17,NanongkaiS17,NanongkaiSW17,SaranurakW19}).
It is not clear a priori that these previous techniques can be used
for proving \Cref{thm:local informal}. First of all, they were invented to
solve a different problem, and there are several small differences in
technical input-output constraints. More importantly, there is a conceptual difference, as follows. In most previous
algorithms, there is a ``gap'' between the two cases of the guarantees.
That is, if in one case the algorithm can return a cut $S\ni x$ whose conductance
is at most $\phi\in(0,1)$, then in the other case the algorithm can only guarantee that
there is no cut ``near'' $x$ with conductance $\alpha\phi$, for some $\alpha=o(1)$
(e.g. $\alpha = O(\phi)$ or $O(1/\log n)$)\footnote{The algorithms from \cite{KawarabayashiT15,HenzingerRW17} in fact do not
guarantee non-existence of some low conductance cuts in the second case,
but the guarantee is about min-cuts.}.
Because of these differences, not many existing techniques can be adapted to design a local algorithm for vertex connectivity. In fact, we are not aware of any spectral-based algorithms that can solve this problem, even when we can read the whole graph.
Fortunately, it turns out that \Cref{thm:local informal} can be proved
by adapting some recent flow-based techniques. In general, a challenge in designing flow-based algorithms is to achieve the following goals simultaneously.
\begin{enumerate}
\item Design some well-structured graph so that finding flows on this graph is useful for our application (proving \Cref{thm:local informal} in this case).
We call such graph an \emph{augmented graph}.
\item At the same time, design a local flow-based algorithm that is fast when running on the augmented graph.
\end{enumerate}
For the first task, the design of the augmented graph requires some careful choices (see \Cref{sub:overview flow} for the high-level ideas and \Cref{sec:augmented graph} for details).
For the second task, it turns out that previous
flow-based local algorithms \cite{OrecchiaZ14,HenzingerRW17,WangFHMR17,VeldtGM16}
can be adjusted to give useful answers for our applications when run on our augmented graph.
However, these previous algorithms only give a slower running time of at least
$\tilde{O}((\nu k)^{1.5})$.
To obtain the $\tilde{O}(\nu^{1.5}k)$ bound, we first speed up
Goldberg-Rao max flow algorithm \cite{GoldbergR98} from running time
$\tilde{O}(m\min\{\sqrt{m},n^{2/3}\})$ to $\tilde{O}(m\sqrt{n})$ when running on a graph with certain structure.
Then, we ``localize'' this algorithm in a similar manner
as in \cite{OrecchiaZ14}, which completes our second task (see \Cref{sub:overview flow} for more discussion).\footnote{Update: In the earlier version, we claimed that we could achieve a deterministic algorithm for the $s$-$t$ vertex connectivity problem on weighted directed graphs as a by-product. This claim is not correct, but this does not affect our main result.}
Given the key local algorithm in \Cref{thm:local informal}, we obtain
\Cref{thm:exact,thm:intro:approx} by combining our local algorithms with other known techniques including
random sampling, Ford-Fulkerson algorithm, Nagamochi Irabaki's connectivity
certificate \cite{NagamochiI92} and convex embedding \cite{LinialLW88,CheriyanR94}.
We sketch how everything fits together in \Cref{sec:overview}.
\subsection{Updates}
After this paper first appeared in STOC 2019 \cite{NanongkaiSY-stoc19}, new local vertex connectivity algorithms were independently developed in \cite{NanongkaiSY-preSODA20} and \cite{ForsterY-preSODA20} (see \cite{ForsterNSYY-soda20} for the merged version). These algorithms are simpler than our local algorithm. Combining them with our framework leads to a nearly-linear time algorithm for the vertex connectivity problem when the connectivity is $O(\operatorname{poly}\log n)$. The new algorithm is still slower than the algorithm in this paper when the input graph is directed and dense and $k$ is moderately large; for $(1+\epsilonilon)$-approxmiation, the new algorithm is still slower when $k$ is moderately large for both undirected and directed graphs.
More recently, \cite{LiNPSY-stoc21} shows that vertex connectivity can be solved in roughly the time to compute a max-flow. This improves the algorithm of \cite{HenzingerRG00} when the vertex connectivity is high.
\endinput
\section{Overview}
\label{sec:overview}
\subsection{Exact Algorithm}
\label{sub:overview exact}
To illustrate the main idea, let us sketch our algorithm with running time $\tilde{O}(m+n^{4/3})$ only on
an {\em undirected graph} with $m=O(n)$ and $k=O(1)$.
This regime is already very interesting, because the
best bound has been $\tilde{O}(n^{2})$ for nearly 50 years \cite{Kleitman1969methods}.
Throughout this section, $N(C)$ is a set of neighbors of nodes in $C\subseteq V$ that are not in $C$, and $E_G(S, T)$ is the set of edges between (not necessarily disjoint) vertex sets $S$ and $T$ in $G$ (the subscript is omitted when the context is clear). A vertex partition $(A,S,B)$ is called a {\em separation triple} if $A, B\neq \emptyset$ and there is no edge between $A$ and $B$, i.e., $N(A)=S=N(B)$.
Given a graph $G=(V,E)$ and a parameter $k$, our goal is to either return
a set $C\subset V$ where $|N(C)| < k$ or certify that $\kappa_G\ge k$.
Our first step is to find a sparse subgraph $H$ of $G$ where $\kappa_H=\min\{\kappa_G,k\}$
using the algorithm by Nagamochi and Ibaraki \cite{NagamochiI92}.
The nice property of $H$ is that it is formed by a union of $k$
disjoint forests, i.e. $H$ has \emph{arboricity} $k$. In particular,
for any set of nodes $C$, we have $|E_{H}(C,C)|\le k|C|$.
As the algorithm only takes linear time, from
now, we treat $H$ as our input graph $G$.
The next step has three cases. First, suppose there is a separation triple $(A,S,B)$
where $|S| < k$ and $|A|,|B|\ge n^{2/3}$. Here, we sample $\tilde{O}(n^{1/3})$
many pairs $(x,y)$ of nodes uniformly at random.
With high probability, one of these pairs is such that $x\in A$ and
$y\in B$.
In this case, it is well known (e.g. \cite{Even75}) that one can modify the graph and run a max $xy$-flow algorithm.
Thus, for each pair $(x,y)$, we run Ford-Fulkerson max-flow algorithm in time $O(k m)=O(n)$
to decide whether $\kappa(x,y)<k$ and if so, return the corresponding cut.
So w.h.p. the algorithm returns set $C$ where $|N(C)| < k$
in total time $\tilde{O}(n^{1+1/3})$.
The next case is when all separation triples $(A,S,B)$ where $|S| < k$
are such that either $|A|<n^{2/3}$ or $|B|<n^{2/3}$. Suppose w.l.o.g.
that $|A|<n^{2/3}$. By a binary search trick, we can assume to know the
size $|A|$ up to a factor of $2$. Here, we sample $\tilde{O}(n/|A|)$
many nodes uniformly at random. For each node $x$, we run the local vertex connectivity subroutine from \Cref{thm:local informal} where the parameter $k$ in \Cref{thm:local informal} is set to be $k-1$.
Note that the volume of $A$ is
$$\operatorname{vol}(A)=2|E(A,A)|+|E(A,S)|=O(k|A|)=O(|A|)$$
where the second equality is because
$G$ has arboricity $k$ and $|S| < k$ (also recall that we only consider $m=O(n)$ and $k=O(1)$ in this subsection). We set the parameter
$\nu=\Theta(|A|)$. With high probability, we have
that one of the samples $x$ must be inside $A$. Here, the local-max-flow
cannot be in the second case, and will return a set $C$ where $|N(C)| < k$,
which implies that $\kappa_G < k$. The total running time is
$\tilde{O}(n/|A|)\times\tilde{O}(|A|^{1.5})=\tilde{O}(n^{1+1/3})$
because $|A|<n^{2/3}$.
The last case is when $\kappa_G \ge k$. Here, both of Ford-Fulkerson
algorithm and local max flow algorithm will never return any set $C$
where $|N(C)|< k$. So we can correctly report that $\kappa_G \ge k$.
All of our techniques generalize to the case when $\kappa_G$ is not constant.
\subsection{Local Vertex Connectivity}
\label{sub:overview flow}
In this section, we give a high-level idea how to obtain our local vertex
connectivity algorithm in \Cref{thm:local informal}. Recall from the
introduction that there are two tasks which are to design an \emph{augmented
graph} and to devise a \emph{local flow-based algorithm} running on
such augmented graph. We have two goals: 1) the running time of our algorithm is \emph{local};
i.e., it does not depend on the size of the whole graph and
2) the local flow-based algorithm's output should be useful for our
application.
\paragraph{The local time principles.}
We first describe high-level principles on how to design the augmented
graph and the local flow-based algorithm so that the running time
is local\footnote{In fact, these are also principles behind all previous local flow-based
algorithms. To the best of our knowledge, these general principles
have not been stated. We hope that they explain previous seemingly
ad-hoc results.}.\thatchaphol{From reviewer1: items (1) and (2). I found this whole discussion confusing, and probably unnecessary. In general, the authors should strive to make the paper as short as possible, while still keeping readability.}
\begin{enumerate}
\item \emph{Augmented graph is absorbing}: Each node $u$ of the augmented
graph is a \emph{sink} that can ``absorb'' flow proportional to
its degree $\deg(u)$. More formally, each node $u$ is connected
to a \emph{super-sink} $t$ with an edge $(u,t)$ of capacity $\alpha\deg(u)$
for some constant $\alpha$. In our case, $\alpha=1$.
\item \emph{Flow algorithm tries to absorb before forward}: Suppose that
a node $u$ does not fully absorb the flow yet, i.e. $(u,t)$ is not saturated.
When a flow is routed to $u$, the local flow-based algorithm must
first send a flow from $u$ to $t$ so that the sink at $u$ is fully
absorbed, before forwarding to other neighbors of $u$. Moreover,
the absorbed flow at $u$ will stay at $u$ forever.
\end{enumerate}
We give some intuition behind these principles. The second principle
resembles the following physical process. Imagine pouring water on
a compartment of an ice tray. There cannot be water flowing out of
an unsaturated compartment until that compartment is saturated. So
if the amount of initial water is small, the process will stop way before
the water reaches the whole ice tray. This explains in principle why
the algorithm needs not read the whole graph.
The first principle allows us to argue why the cost of the algorithm
is proportional to the part of the graph that is read. Very roughly,
the total cost for forwarding the flow from a node $u$ to its neighbors
depends on $\deg(u)$, but at the same time we forward the flow only after it is already fully absorbed at $u$.
This allows us to charge the total
cost to the total amount of absorbed flow, which in turn is small
if the initial amount of flow is small.
\paragraph{Augmented graph. }
Let us show how to design the augmented graph in the context of \emph{edge
connectivity} in undirected graphs first. The construction is simpler
than the case of vertex connectivity, but already captures the main
idea. We then sketch how to extend this idea to vertex connectivity.
Let $G=(V,E)$ be an undirected graph with $m$ edges and $x\in V$
be a node. Consider any numbers $\nu, k>0$ such that
\begin{align}
2\nu k+\nu+1&\le2m. \label{eq:intro:localcut_condition}
\end{align}
We construct an undirected graph $G'$ as follows. The
node set of $G'$ is $V(G')=\{s\}\cup V\cup\{t\}$ where $s$ and
$t$ is a super-source and a super-sink respectively. For each node
$u$, add $(u,t)$ with capacity $\deg_{G}(u)$. (So, this satisfied
the first local time principle.)
For each edge $(u,v)\in E$, set
the capacity to be $2\nu$. Finally, add an edge $(s,x)$ with capacity
$2\nu k+\nu+1$.
\begin{theorem}\label{thm:intro:split_graph}
Let $F^{*}$ be the value of the s-t
max flow in $G'$. We have the following:
\begin{enumerate}
\item If $F^{*}=2\nu k+\nu+1$, then there is no vertex partition $(S,T)$ in $G$ where
$S\ni x$, $\operatorname{vol}(S)\le\nu$ and $|E(S,V-S)|\le k$.
\item If $F^{*}\le2\nu k+\nu$, then there is a vertex partition $(S,T)$ in $G$ where
$S\ni x$ and $|E(S,V-S)|\le k$.
\end{enumerate}
\end{theorem}
\begin{proof}
To see (1), suppose for a contradiction that there is such a partition $(S,T)$
where $S\ni x$. Let $(S',T')=(\{s\}\cup S,T\cup\{t\})$. The edges between $S'$ and $T'$ has total capacity
\danupon{I don't fully see the first equality.}
$$c(E_{G'}(S',T'))=2\nu|E_{G}(S,V-S)|+\operatorname{vol}_{G}(S)\le2\nu k+\nu.$$
So $F^*\leq 2\nu k+\nu$,
a contradiction. To see (2), let $(S',T')=(\{s\}\cup S,T\cup\{t\})$
be a min $st$-cut in $G'$ corresponding to the max flow, i.e. by the min-cut max-flow theorem, the edges between $S'$ and $T'$ has total capacity
\begin{align}\label{eq:intro:localcut_proof2}
c(E_{G'}(S',T'))\leq 2\nu k+\nu.
\end{align}
Observe that
$S'\neq\{s\}$ and $S\ni x$ because the edge $(s,x)$ has capacity
strictly more than $2\nu k+\nu$. Also, $T'\neq\{t\}$
because edges between $\{s\}\cup V$ and $\{t\}$ has total capacity $\operatorname{vol}(V)=2m>2\nu k+\nu$ (the inequality is because of \Cref{eq:intro:localcut_condition}).
So $(S,T)$ gives a cut in $G$ where $S\ni x$. Suppose that $|E_{G}(S,T)|\ge k+1$,
then $c(E_{G'}(S',T'))\ge2\nu(k+1)=2\nu k+2\nu>2\nu k+\nu$ which
contradicts \Cref{eq:intro:localcut_proof2}.
\end{proof}
Observe that the above theorem is similar to \Cref{thm:local informal}
except that it is about edge connectivity. To extend this idea to
vertex connectivity, we use a standard transformation as used in \cite{EvenT75,HenzingerRG00}
by constructing a so-called \emph{split graph}. In our split graph,
for each node $v$, we create two nodes $v_{\text{in}}$ and $v_{\operatorname{out}}$.
For each edge $(u,v)$, we create an edge $(u_{\operatorname{out}},v_{\text{in}})$
with infinite capacity. There is an edge $(v_{\text{in}},v_{\text{out}})$
for each node $v$ as well. Observe that a cut set with finite capacity
in the split graph corresponds to a set of nodes in the original graph.
Then, we create the augmented graph of the split graph in a similar manner as above, e.g. by adding nodes $s$ and $t$ and an edge
$(s,x)$ with $2\nu k+\nu+1$. The important point is that we
set the capacity of each $(v_{\text{in}},v_{\text{out}})$ to be $2\nu$.
The proof of \Cref{thm:local informal} (except the statement about
the running time) is similar as above (see \Cref{sec:augmented graph}
for details).
\paragraph{Local flow-based algorithm.}
As discussed in the introduction, we can in fact adapt previous local flow-based algorithms
to run on our augmented graph and they can decide the two cases in \Cref{thm:local informal}
(i.e. whether there is a small vertex cut ``near'' a seed node $x$).
\Cref{thm:intro:split_graph} in fact already allows us to achieve this with slower running time than the desired $\tilde O(\nu^{1.5}k)$ by implementing existing local flow-based algorithms.
For example, the algorithm by \cite{OrecchiaZ14}, which is a ``localized'' version of Goldberg-Rao algorithm \cite{GoldbergR98},
can give a slower running time of $\tilde O((\nu k)^{1.5})$.
Other previous local flow-based algorithms that we are aware of (e.g. \cite{OrecchiaZ14,HenzingerRW17,WangFHMR17,VeldtGM16}) give even slower running time (even after appropriate adaptations).
We can speed up the time to $\tilde O(\nu^{1.5}k)$ by exploiting the fact that
our augmented graph is created from a split graph sketched above.
This improvement resembles the idea by Hopcroft and Karp \cite{HopcroftK73} (see also \cite{EvenT75,Karzanov1974determining}) which yields
an $O(m\sqrt{n})$-time algorithm for computing $s$-$t$ {\em unweighted} vertex connectivity.
The idea is to show that Dinic's algorithm with running time $O(m\min\{\sqrt{m},n^{2/3}\})$ on a general unit-capacity graph
can be sped up to $\tilde{O}(m\sqrt{n})$ when run on a special graph called ``unit network''.
It turns out that unit networks share some structures with our split graphs, allowing us to apply a similar idea.
Although our improvement is based on a similar idea, it is more complicated to implement this idea on our split graph since it is weighted.
Finally, we ``localize'' our improved algorithm by enforcing the second local time principle.
Our way to localize the algorithm goes hand in hand with the way Orecchia
and Zhu \cite{OrecchiaZ14} did to the standard Goldberg-Rao algorithm
(see \Cref{sec:local binary blocking flow} for details).
\section{Preliminaries}\label{sec:prelim}
\subsection{Directed Graph}
Let $G = (V,E)$ be a \textit{directed} graph where $|V| = n$ and $|E|
= m$. We assume that $G$ is strongly-connected. Otherwise, we can list
all strongly connected components in linear time by a standard
textbook algorithm. We also assume that $G$ is \textit{simple}. That is, $G$
does not have a duplicate edge. Otherwise, we can simplify the graph
in linear time by removing duplicate edges.
For any edge $(u,v)$, we denote $e^{R} = (v,u)$. For any directed
graph $G = (V,E)$, the {\em reverse graph} $G^{R}$ is $G^R = (V, E^R)$ where
$E^R = \{ e^R \colon e \in E \}$.
\begin{definition}[$\delta$, $\deg$, $\operatorname{vol}$, $N$]
Definitions below are defined for any vertex $v$ on graph $G$ and subset of vertex $U \subseteq V$.
\begin{itemize}[noitemsep,nolistsep]
\item $\delta_G^{\text{in}}(v) = \{(u, v)\in E\} $ and
$\delta_G^{\text{in}}(U) = \{(x, y)\in E \colon x\notin U, y\in V\}$; i.e. they are the sets of edges entering $v$ and $U$ respectively.
\item Analogously, $\delta_G^{\text{out}}(v)$ and $\delta_G^{\text{out}}(U)$ are the sets of edges leaving $v$ and $U$ respectively.
\item $\deg_G^{\text{in}}(v) = |\delta_G^{\text{in}}(v)|$ and $\deg_G^{\text{out}}(v) = |\delta_G^{\text{out}}(v)|$; i.e. they are the numbers of edges entering and leaving $v$ respectively.
\item
$\text{vol}_G^{\text{out}}(U) = \sum_{v \in U}
\text{deg}_G^{\text{out}}(v)$ and $\text{vol}_G^{\text{in}}(U)= \sum_{v \in U}
\text{deg}_G^{\text{in}}(v)$. Note that $\text{vol}_G^{\text{in}}(V) =
\text{vol}_G^{\text{out}}(V) = m$.
\item $N_G^{\text{in}}(v) = \{u \colon (u,v)\in E\}$ and $N_G^{\text{out}}(v) = \{u \colon (v, u)\in E\}$; i.e. they are sets of in- and out-neighbors of $v$, respectively.
\item $N_G^{\text{in}}(U) = \bigcup_{v \in U}
N_G^{\text{in}}(v) \setminus U $ and $N_G^{\text{out}}(U) =
\bigcup_{v \in U} N_G^{\text{out}}(v) \setminus U$. Call
these sets {\em external in-neighborhood of $U$} and {\em
external out-neighborhood of $U$}, respectively. \qedhere
\end{itemize}
\end{definition}
\begin{definition}[Subgraphs]
For a set of vertices $U \subseteq
V$, we denote $G[U]$ as a subgraph of $G$ induced by $U$. Denote for
any vertex $v$, any subset of vertices $U \subseteq V$, any edge $e
\in E$, and any subset of edges $F \subseteq E$,
\begin{itemize}[noitemsep,nolistsep]
\item $G \setminus v
= (V \setminus \{ v\}, E)$,
\item $G \setminus U = (V \setminus U, E),$
\item $G
\setminus e = (V, E\setminus \{ e \}), $ and
\item $ G \setminus F = (V,
E\setminus F)$.
\end{itemize}
We say that these graphs \textit{arise} from $G$ by deleting
$v, U, e,$ and $F$, respectively.
\end{definition}
\begin{definition}[Paths and reachability]
For $s,t \in V$, we say a path $P$ is an {\em $(s,t)$-path}
if $P$ is a directed path
starting from $s$ and ending at $t$.
For any $S, T \subseteq V$, we say
$P$ is an {\em $(S,T)$-path} if $P$ starts with some vertex in $S$ and ends
at some vertex in $T$.
We say that a vertex $t$ is \textit{reachable} from
a vertex $s$ if there exists a $(s,t)$-path $P$.
Moreover, if a node $v$ is in such path $P$, then we say that $t$ is reachable from $s$ {\em via $v$}.
\end{definition}
\begin{definition}[Edge- and
Vertex-cuts] \label{def:edge-or-vertex-cuts}Let $s$ and $t$ be any
distinct vertices. Let $S, T \subset V$ be any disjoint non-empty subsets of vertices. We call any subset of edges $C \subseteq E$ (respectively any subset of vertices $U\subseteq V$):
\begin{itemize}[noitemsep,nolistsep]
\item an {\em $(S,T)$-edge-cut} (respectively an {\em
$(S,T)$-vertex-cut} ) if there is no $(S,T)$-path in $G\setminus
C$ (respectively if there is no $(S,T)$-path in $G\setminus U$ {\em
and} $S \cap U = \emptyset, T \cap U = \emptyset$),
\item an {\em $(s,t)$-edge-cut} (respectively an {\em $(s,t)$-vertex-cut} ) if there is no $(s,t)$-path in $G\setminus C$ (respectively if there is no $(s,t)$-path in $G\setminus U$ {\em and} $s, t\notin U$),
\item an {\em $s$-edge-cut} (respectively {\em $s$-vertex-cut}) if it is an $(s,t)$-edge-cut (respectively $(s,t)$-vertex-cut) for some vertex $t$, and
\item an {\em edge-cut} (respectively {\em vertex-cut}) if it is an $(s,t)$-edge-cut (respectively $(s,t)$-vertex-cut) for some distinct vertices $s$ and $t$. In other words, $G\setminus C$ (respectively $G\setminus U$) is not strongly connected.
\end{itemize}
If the
graph has capacity function $c : E \rightarrow
\mathbb{R}_{\geq 0}$ on edges, then $c(C)= \sum_{e \in C} c_e$ is the total
capacity of the cut $C$.
\end{definition}
\begin{definition} [Edge set]
We define $E(S,T)$ as the set of edges $\{ (u,v) \colon u \in S, v \in T\}$.
\end{definition}
\begin{definition}[Vertex partition]\label{def:separation_triple}
Let $S, T \subset V$. We say that $(S,T)$ is a \textit{vertex
partition} if $S$ and $T$ are not empty, and $S \sqcup T = V$. In
particular, $E(S,T)$ is an $(x,y)$-edge-cut for some $x \in S, y \in
T$.
\end{definition}
\begin{definition}[Separation triple]\label{def:separation_triple}
We call $(L,S,R)$ a \textit{separation triple} if $L, S,$ and $R$
partition the vertex $V$ in $G$ where $L$ and $R$ are non-empty, and
there is no edge from $L$ to $R$.
\end{definition}
Note that, from the above definition, $S$ is an $(x,y)$-vertex-cut for
any $x \in L$ and $y \in R$.
\begin{definition}[Shore] \label{def:shore}
We call a set of vertices $S\subseteq V$ an {\em out-vertex shore} (respectively {\em in-vertex shore}) if
$N_G^{\text{out}}(S)$ (respectively $N_G^{\text{in}}(S)$) is a
vertex-cut.
\end{definition}
\begin{definition}[Vertex connectivity $\kappa$]\label{def:VertexConnectivity}
We define vertex connectivity $\kappa_G$ as the
minimum cardinality vertex-cut or $n-1$ if no vertex cut exists. More
precisely, for distinct $x,y \in V$, define $\kappa_G(x,y)$ as the smallest cardinality of $(x,y)$-vertex-cut if
exists. Otherwise, we define $\kappa_G(x,y) = n-1$. Then, $\kappa _G=
\min \{ \kappa_G (x,y) \text{ } | \text{ } x,y \in V, x\neq y \}$. We
drop the subscript when $G$ is clear from the context.
\end{definition}
Let $d^{\operatorname{out}}_{\text{min}} = \min_v \text{deg}_G^{\text{out}}(v)$ and let $v_{\text{min}}$ be
any vertex whose out-degree is $d^{\operatorname{out}}_{\text{min}}$. If $d^{\operatorname{out}}_{\text{min}} = n - 1$, then $G$ is complete, meaning that $\kappa_G =
n-1$. Otherwise, $\delta_G^{\text{out}}(v_{\text{min}})$ is a
vertex-cut. Hence, $\kappa_G \leq |\delta_G^{\text{out}}(v_{\text{min}})| =
d^{\operatorname{out}}_{\text{min}}$. So, we have the following observation.
\begin{observation} \label{obs:kappa-degree}
$\kappa_G \leq d^{\operatorname{out}}_{\min}$
\end{observation}
\begin{proposition} [\cite{HenzingerRG00}]
There exists an algorithm that takes as input graph $G$, and two vertices $x,y
\in V$ and an integer $k > 0$ and in $\tilde{O}(\min( km )$ time outputs either an
out-vertex shore $S$ containing $x$ with $|N_G^{\text{out}}(S)| =
\kappa_G(x,y) \leq k$ and $y $ is in the corresponding in-vertex shore, or an ``$\perp$''
symbol indicating that no such shore exists and thus $\kappa_G(x,y) > k$.
\label{pro:easyff}
\end{proposition}
\subsection{Undirected Graph}
Let $G = (V,E)$ be an undirected graph. We assume that $G$ is simple,
and connected.
\begin{theorem} [\cite{NagamochiI92}] \label{thm:sparsification}
There exists an algorithm that takes as input undirected graph $G =
(V,E)$, and in $O(m)$ time outputs a sequence of forests $F_1, F_2,
\ldots, F_n$ such that each forest subgraph $H_k= (V,
\bigcup_{i=1}^kF_i)$ is $k$-connected if $G$ is $k$-connected. $H_k$
has aboricity $k$. For any set of vertices $S$, we have $E_{H_k}(S,S)
\leq k|S|$. In particular, the number of edges in $H_k$ is at most $kn$.
\end{theorem}
To compute vertex connectivity in an undirected graph, we turn it into
a directed graph by adding edges in forward and backward directions
and run the directed vertex connectivity algorithm.
\section{Local Vertex Connectivity}
\label{sec:local_flow}
Recall that a directed graph $G = (V,E)$ is strongly connected where $|V| = n$ and $|E| = m$.
\begin{theorem}
There is an algorithm that takes as input a pointer to any vertex $x \in
V$ in
an adjacency list representing a strongly-connected directed graph $G=(V, E)$,
positive integer $\nu$ (``target volume''), positive integer $k$ (``target
$x$-vertex-cut size''), and positive real $\epsilonilon$ satisfying
\begin{align}\label{eq:local_condition}
\nu/ \epsilonilon + \nu < m, \quad (1+\epsilonilon)( \frac{2\nu}{\epsilonilon k}+ k)
< n \quad \mbox{and}\quad \deg_{\min}^{\operatorname{out}} \geq k
\end{align}
or,
\begin{align}\label{eq:local_condition_dense}
\nu/\epsilonilon + (1+\epsilonilon) nk < m, \quad \mbox{and}\quad \deg_{\min}^{\operatorname{out}} \geq
k \end{align}
and in $\tilde{O}( \frac{\nu^{3/2}}{\epsilonilon^{3/2} k^{1/2}})$ time outputs either
\begin{itemize}[noitemsep,nolistsep]
\item a vertex-cut $S$ corresponding to the separation triple $(L,S,R), x \in L$ such that
\begin{align}\label{eq:found_cut}
|S| \leq (1+\epsilonilon) k \quad \mbox{and}\quad
\operatorname{vol}_G^{\text{out}}(L) \leq \nu/\epsilonilon + \nu +1 , \mbox{or}
\end{align}
\item the ``$\perp$'' symbol indicating that there is no
separation triple $(L,S,R), x \in L$ such that
\begin{align}\label{eq:unfound_cut}
|S| \leq k \quad\mbox{and}\quad \operatorname{vol}_G^{\text{out}}(L) \leq \nu.
\end{align}
\end{itemize}
\label{thm:local-vertex-connectivity}
\end{theorem}
By setting $\epsilonilon = 1/(2k)$, we get the exact version for the size
of vertex-cut. Observe that \Cref{eq:found_cut} is changed to $|S|
\leq (1+ 1/(2k))k = k + 1/2$. So $|S| \leq k$ since $|S|$ and $k$ are
integers.
\begin{corollary} \label{cor:exact-local-vertex-connectivity}
There is an algorithm that takes as input a pointer to any vertex $x \in
V$ in
an adjacency list representing a strongly-connected directed graph $G=(V, E)$,
positive integer $\nu$ (``target volume''), and positive integer $k$(``target
$x$-vertex-cut size'') satisfying
\Cref{eq:local_condition}, or \Cref{eq:local_condition_dense} where
$\epsilonilon = 1/(2k)$,
and in $\tilde{O}( \nu^{3/2}k )$ time outputs either
\begin{itemize}[noitemsep,nolistsep]
\item a vertex cut $S$ corresponding to the separation triple $(L,S,R), x \in L$ such that
\begin{align}\label{eq:exact_found_cut}
|S| \leq k \quad \mbox{and}\quad
\operatorname{vol}_G^{\text{out}}(L) \leq 2\nu k + \nu +1 , \mbox{or}
\end{align}
\item the ``$\perp$'' symbol indicating that there is no
separation triple $(L,S,R), x \in L$ such that
\begin{align}\label{eq:exact_unfound_cut}
|S| \leq k \quad\mbox{and}\quad \operatorname{vol}_G^{\text{out}}(L) \leq \nu.
\end{align}
\end{itemize}
\end{corollary}
The rest of this section is devoted to proving the above theorem. For
the rest of this section, fix $x$, $\nu$, $k$ and $\epsilonilon$ as in the theorem statement.
\subsection{Augmented Graph and Properties}
\label{sec:augmented graph}
\begin{definition} [Augmented Graph $G'$] \label{def:aug_graph}
Given a directed uncapacitated graph $G= (V,E)$, we define a directed capacitated graph $(G',c_{G'}) =
((V',E'), c_{G'})$ where
\begin{align}\label{eq:aug_graph}
V' = V_{\text{in}} \sqcup V_{\text{out}} \sqcup \{ s,t\} \quad\mbox{and}\quad E' = E_\nu \sqcup E_\infty \sqcup
E_{\text{deg}} \sqcup \{ (s,x_{\operatorname{out}}) \},
\end{align}
where $\sqcup$ denotes disjoint union of sets, $s$ and $t$ are additional vertices not in $G$, and sets in \Cref{eq:aug_graph} are defined as follows.
\begin{itemize}[noitemsep,nolistsep]
\item For each vertex $v \in V \setminus \{ x \} $, we create vertex
$v_{\text{in}}$ in set $V_{\text{in}}$ and $v_{\text{out}}$ in set $V_{\text{out}} $. For the vertex $x$, we add only $x_{\operatorname{out}}$ to $V_{\operatorname{out}}$.
\item $E_{\nu} = \{ (v_{\text{in}}, v_{\text{out}}) \colon v \in V \setminus \{ x \}\}$.
\item $E_{\infty} = \{ (v_{\text{out}}, w_{\text{in}}) \colon (v,w) \in E \} $.
\item $E_{\text{deg}} = \{ (v_{\text{out}},t) \colon v \in V_{\text{out}} \} $.
\end{itemize}
Finally, we define the capacity function $c_{G'} : E' \rightarrow
\mathbb{R}_{\geq 0} \cup \{ \infty \}$ as:
\begin{equation*}
c_{G'}(e) = \begin{cases}
\nu/ (\epsilonilon k) & \text{if } e = (v_{\text{in}}, v_{\text{out}}) \in E_\nu \\
\text{deg}_G^{\text{out}}(v) & \text{if }
e = (v_{\text{out}}, t) \in E_{\text{deg}} \\
\nu/\epsilonilon + \nu + 1 & \text{if } e = (s, x_{\operatorname{out}}) \\
\infty & \text{otherwise}
\end{cases}
\end{equation*}
\end{definition}
\begin{lemma}\label{lem:aug_graph_properties}
Let $C^*$ be the minimum-capacity $(s,t)$-cut in $G'$. Recall
that $c_{G'}(C^*)$ is its capacity and $\nu$ and $k$ satisfy
\Cref{eq:local_condition} or \Cref{eq:local_condition_dense} .
\begin{enumerate}[noitemsep,nolistsep,label=(\Roman*)]
\item \label{item:aug_graph_properties_one} If there exists a
separation triple $(L,S,R), x \in L$ in $G$
satisfying \Cref{eq:unfound_cut},
then $c_{G'}(C^*) \leq \nu/\epsilonilon + \nu$.
\item \label{item:aug_graph_properties_two} If $c_{G'}(C^*)
\leq \nu / \epsilonilon + \nu$, then there exists a separation
triple $(L,S,R), x \in L$ in $G$ satisfying \Cref{eq:found_cut}.
\end{enumerate}
\end{lemma}
We prove \Cref{lem:aug_graph_properties} in the rest of this
subsection.
We define useful notations. For $U \subseteq V $ in $G$,
define $V_{\text{out}}(U) =\{ v_{\text{out}} \text{ } | \text { } v
\in U\} \subseteq V_{\text{out}}$ in $G'$. Similarly, we define
$V_{\text{in}}(U) = \{ v_{\text{in}} \text{ } | \text { } v
\in U\} \subseteq V_{\text{in}}$ in $G'$ .
We first introduce a standard \textit{split graph} $SG$ from $G'$.
\begin{definition} [Split graph $SG$]\label{def:sg}
Given $G'$, a \textit{split graph} $SG$ is an induced graph $SG = G'[W]$ where
$$W = V_{\text{in}} \sqcup V_{\text{out}} \sqcup \{ x \}, $$
with capacity function $c_G'(e)$ restricted to edges in $G'[W]$ where
the edge set of $G'[W]$ is $E_\nu \sqcup E_\infty$.
\end{definition}
\begin{proof}[Proof of
\Cref{lem:aug_graph_properties}\ref{item:aug_graph_properties_one}]
We fix a separation triple $(L,S,R)$ given in the statement. Since $x \in L$, $S$ is an
$(x,y)$-vertex-cut for some $y \in R$ by \Cref{def:separation_triple}.
Let $C = \{ (u_{\text{in}}, u_{\text{out}}) \colon u \in S \}$. It is easy to see that $C$ is an $(x_{\operatorname{out}},y_{\operatorname{in}})$-edge-cut in the split graph
$SG$. Since $S$ is an $(x,y)$-vertex-cut, there is no vertex-disjoint paths from $x$
to $y$ in $G \setminus S$. By transforming from $G$ to $G'$, vertex
$y$ in $G$ becomes $y_{\operatorname{in}}$ and $y_{\operatorname{out}}$ in $G'$. Since $S$ separates $x$ and $y$ in $G$, by
construction of $C$, $C$ must separate $x$ and $y_{\operatorname{in}}$ in $G'$ and thus in $SG$. Therefore
there is no $(x_{\operatorname{out}},y_{\operatorname{in}})$-path in $SG \setminus C$, and
the claim follows.
In $G'$, we define an edge-set $C' = C \sqcup \{ (v,t) \text{ } |
\text{ } v \in V_{\text{out}}(L) \}$. It is easy to see that $C'$ is an $(s,t)$-edge-cut in $G'$. Since $C$ is an
$(x_{\operatorname{out}},y_{\operatorname{in}})$-edge-cut in the split graph $SG$. The graph $ G'
\setminus C$ has no $(s,V_{\text{out}} ( S \sqcup R))$-paths.
Since $G$ is strongly connected, the sink vertex
$t$ in $G' \setminus C$ is reachable from $s$ via only vertices in
$V_{\text{out}}(L)$. Hence, it is enough to remove the
edge-set $\{ (v,t) \colon v \in V_{\text{out}}(L) \}$ to
disconnect all $(s,t)$-paths in $G' \setminus C$, and the claim
follows.
We now compute the capacity of the cut $C'$.
\begin{align*}
c_{G'}(C') &= c_{G'}( C \sqcup \{ (v,t) \colon v \in V_{\text{out}} (L) \} ) \\
& = c_{G'}( C ) + c_{G'}( \{ (v,t) \colon v \in V_{\text{out}} (L) \}) \\
&= \nu |S|/ (\epsilonilon k) + \sum_{v \in S}
\text{deg}_G^{\text{out}}(v) \\
& = \nu |S| / (\epsilonilon k) + \text{vol}_G^{\text{out}}(S) \\
& \leq \nu/ \epsilonilon + \nu
\end{align*}
\begin{comment}
$c_{G'}(C') = c_{G'}( f_x(N_G^{\text{out}}(S)) \sqcup \{ (v,t) \text{
} | \text{ } v \in V_{\text{out}}[S] \} ) = c_{G'}(
f_x(N_G^{\text{out}}(S))) + c_{G'}( \{ (v,t) \text{
} | \text{ } v \in V_{\text{out}}[S] \}) = 2\nu |N_G^{\text{out}}(S)|
+ \sum_{v \in S} \text{deg}_G^{\text{out}}(v) = 2\nu
|N_G^{\text{out}}(S)| + \text{vol}_G^{\text{out}}(S) \leq 2\nu k+
\nu$.
\end{comment}
The last inequality follows from $|S| \leq k$ and $\text{vol}_G^{\text{out}}(S) \leq \nu$.
Hence, the capacity of the minimum $(s,t)$-cut $C^*$ is $ c_{G'}(C^*) \leq c_{G'}(C')
\leq \nu/\epsilonilon + \nu $.
\end{proof}
Before proving \Cref{lem:aug_graph_properties}\ref{item:aug_graph_properties_two}, we
observe structural properties of an $(s,t)$-edge-cut in $G'$.
\begin{definition}
Let $\mathcal{C}\xspace$ be the set of $(s,t)$-cuts of finite capacities in $G'$. We define three
subsets of $\mathcal{C}\xspace$ as,
\begin{itemize}[noitemsep,nolistsep]
\item $\mathcal{C}\xspace_1 = \{ C \colon C \in \mathcal{C}\xspace $, and one side of vertices in $ G'\setminus C $ contains
$ s $ or $ t $ as a singleton $ \} $.
\item $\mathcal{C}\xspace_2 = \{ C \colon C \in \mathcal{C}\xspace \setminus \mathcal{C}\xspace_1$,
and $ C $ is an $ (\{ s \}
\sqcup V_{\text{in}}, \{ t \})$-edge-cut$ \} $.
\item $\mathcal{C}\xspace_3 = \{ C \colon C \in \mathcal{C}\xspace \setminus \mathcal{C}\xspace_1 $,
and $ C $ is an $
(\{ s \}, \{v_{\text{in}},t \})\text{-edge-cut } $ for some $
v_{\text{in}} \in V_{\text{in}} \} $.
\end{itemize}
\label{def:cut-structure}
\end{definition}
Observe that three partitions in \Cref{def:cut-structure}
formed a complete set $\mathcal{C}\xspace$ and are pairwise disjoint by
\Cref{def:edge-or-vertex-cuts}, and by the construction of $G'$.
\begin{observation}
\label{obs:3partitions}
$$ \mathcal{C}\xspace = \mathcal{C}\xspace_1 \sqcup \mathcal{C}\xspace_2 \sqcup \mathcal{C}\xspace_3 $$
\end{observation}
\begin{proposition} We have the following lower bounds on cut capacity
for cuts in $\mathcal{C}\xspace_1 \sqcup \mathcal{C}\xspace_2$.
\begin{itemize}[noitemsep,nolistsep]
\item For all $C \in \mathcal{C}\xspace_1$, $c_{G'}(C) \geq \min ( \nu/\epsilonilon + \nu
+1 , m )$
\item For all $C \in \mathcal{C}\xspace_2$, $c_{G'}(C) \geq \min ( \nu/\epsilonilon + \nu
+1, \max((n-(1+\epsilonilon)k)k, m - (1+\epsilonilon)nk))$
\end{itemize}
\label{pro:cut-structure}
\end{proposition}
\begin{proof}
\begin{comment}
We first show the classification of finite $(s,t)$-cuts in $G'$. For all finite cut $C \in \mathcal{C}\xspace$ in $G'$, $C \subseteq E_\nu
\sqcup E_{\text{deg}} \sqcup \{ (s,x) \}$. If $G'\setminus C$ contains
$s$ or $t$ as a singleton component, then $C \in C_1$. Otherwise, $G'
\setminus C$ does not contain $s$ or $t$ as a singleton component. In
particular, $(s,x) \not \in C$, and $E_{\text{deg}} \not \subseteq C$. We
can write $C = E_\nu^* \sqcup E^*_{\text{deg}}$ where $E_\nu^*
\subseteq E_\nu$ and $E^*_{\text{deg}} \subseteq
E_{\text{deg}}$. Observe that $E^*_\nu \neq \emptyset$ since otherwise
$G' \setminus C$ must contain $s$ or $t$ as a singleton component.
Hence, $E^*_\nu$ is either an $(s, \{v_{\text{in}},t \})\text{-edge-cut }
$ for some $ v_{\text{in}} \in V_{\text{in}}$, or an $(s \sqcup
V_{\text{in}},t)$-edge-cut.
\end{comment}
By \Cref{def:cut-structure}, any $C \in \mathcal{C}\xspace_1$ contains $(s,x_{\operatorname{out}})$ or
$E_{\text{deg}}$. So, $C$ has capacity $c_{G'}(C) \geq
\min(\nu/\epsilonilon + \nu +1, \sum_{v \in V} \text{deg}_G^{\text{out}}(v))=
\min(\nu/\epsilonilon + \nu +1, m)$.
Next, we show that if $C \in \mathcal{C}\xspace_2$, then $c_{G'}(C) \geq \min (
\nu/\epsilonilon + \nu + 1, \max((n-(1+\epsilonilon)k)k/2, m - (1+\epsilonilon)nk))$. By \Cref{def:cut-structure}, $C$ has finite capacity. We can write $C = E_\nu^* \sqcup E^*_{\text{deg}}$ where $E_\nu^*
\subseteq E_\nu$ and $E^*_{\text{deg}} \subseteq
E_{\text{deg}} $.
If $|E^*_\nu| > (1+\epsilonilon)k$, then, by construction of $G'$,
$c_{G'}(C) \geq \frac{\nu}{\epsilonilon k} |E_\nu^*| > \nu/\epsilonilon +
\nu$.
From now, we assume that $|E^*_\nu| \leq (1+\epsilonilon)k$. We show two
inequalities:
\begin{align} \label{eq:ineqPart1}
c_{G'}(C) \geq (n-(1+\epsilonilon)k)k
\end{align}
and
\begin{align} \label{eq:ineqPart2}
c_{G'}(C) \geq m - nk(1+\epsilonilon).
\end{align}
We claim that $|E^*_{\text{deg}}| \geq n-(1+\epsilonilon)k$. Consider $G' \setminus C$. Let $S = \{ x \} \sqcup V_{\text{in}}$. Observe that any $w
\in S$ cannot reach $t$ in $G' \setminus C$ since $C$ is an $ (\{ s \}
\sqcup V_{\text{in}}, \{ t \})$-edge-cut. So, for all $v_{\text{in}}
\in V_{\text{in}}$, we have $(v_{\text{in}}, v_{\text{out}}) \in C$ or
$(v_{\text{out}}, t) \in C$. Since
$|E^*_\nu| \leq (1+\epsilonilon)k$, this means we can include edges of type
$(v_{\operatorname{in}}, v_{\operatorname{out}})$ at most $(1+\epsilonilon)k$ edges. Hence, the rest of the
edges must be of the form $(v,t)$, and thus $|E^*_{\text{deg}}| \geq
n- (1+\epsilonilon)k$.
We now show \Cref{eq:ineqPart1}.
By \Cref{eq:local_condition} or \Cref{eq:local_condition_dense}, $\deg_{\min}^{\operatorname{out}} \geq k$.
Since $C = E^*_\nu \sqcup E^*_{\text{deg}}$, we have $c_{G'}(C) \geq
c_{G'}(E^*_{\text{deg}}) \geq |E^*_{\text{deg}}| \deg_{\min}^{\operatorname{out}} \geq (n-(1+\epsilonilon)k)k.$
Finally, we show \Cref{eq:ineqPart2}. Since $|E^*_{\text{deg}}| \geq
n-(1+\epsilonilon)k$, $|E_{\text{deg}} \setminus E^*_{\text{deg}} | \leq (1+\epsilonilon)k$, and thus
$c_{G'}(E_{\text{deg}} \setminus E^*_{\text{deg}}) \leq (1+\epsilonilon)nk$, (recall
each vertex has degree at most $n-1$). Therefore,
$$c_{G'}(C) \geq c_{G'}(E^*_{\text{deg}}) = \sum_{v} \text{deg}_{G}^{\operatorname{out}}(v) -
c_{G'}(E_{\text{deg}} \setminus E^*_{\text{deg}}) \geq m - (1+\epsilonilon)nk $$
\end{proof}
\begin{corollary} \label{cor:goodcut}
For all $C \in \mathcal{C}\xspace$, if $c_{G'}(C) \leq \nu/\epsilonilon + \nu$, then $C \in \mathcal{C}\xspace_3$
\end{corollary}
\begin{proof}
By \Cref{obs:3partitions} and \Cref{pro:cut-structure} , it is enough
to show that $C \not \in \mathcal{C}\xspace_1$ and $C \not \in \mathcal{C}\xspace_2$ using \Cref{eq:local_condition}, or
\Cref{eq:local_condition_dense}. By either \Cref{eq:local_condition} or
\Cref{eq:local_condition_dense}, $ \nu/\epsilonilon + \nu < m$, and thus $C \not
\in \mathcal{C}\xspace_1$. Next, we show that $C \not \in \mathcal{C}\xspace_2$. It is enough to
show that $\nu/\epsilonilon + \nu $ is smaller than one of two terms in
max. If \Cref{eq:local_condition} is satisfied, then $(1+\epsilonilon)(
2\nu/ (\epsilonilon k) + k) < n $. This implies $\nu/\epsilonilon + \nu < (n-
(1+\epsilonilon)k) k.$
If \Cref{eq:local_condition_dense} is satisfied, then we immediately
get $\nu/\epsilonilon + \nu < m - (1+\epsilonilon) nk $.
\end{proof}
We now ready to prove
\Cref{lem:aug_graph_properties}\ref{item:aug_graph_properties_two}.
\begin{proof}[Proof of \Cref{lem:aug_graph_properties}\ref{item:aug_graph_properties_two}]\danupon{I
didn't read this proof yet}
In $G$, we show the existence of a separation triple $(L,S,R)$ where $x
\in L, |S| \leq (1+\epsilonilon)k$.
The minimum $(s,t)$-cut in $G'$, $C^* $, is an $
(s, \{v_{\text{in}},t \})\text{-edge-cut } $ (with finite capacity) for some $
v_{\text{in}} \in V_{\text{in}} $. Since $c_{G'}(C^*) \leq
\nu/\epsilonilon + \nu $, by \Cref{cor:goodcut}, $C^* \in \mathcal{C}\xspace_3$.
We can write $C^*= E^*_{\text{deg}} \sqcup E^*_{\nu}$ where $\emptyset \not = E^*_{\text{deg}}
\subsetneq E_{\text{deg}} $ and $\emptyset \not = E^*_{\nu} \subsetneq E_{\nu}$ in
$G'$. To see that $ E^*_{\nu} \not = \emptyset $, suppose otherwise, then $ C^*
$ must be in $\mathcal{C}\xspace_1$, a contradiction.
It is easy to see that $E^*_\nu$ is an $(x_{\operatorname{out}},v_{\text{in}})$-edge-cut in
$SG$. First of all, $E^*_\nu$ is the subset of edges in $SG$ by
\Cref{def:sg}. Since $v_{\text{in}}$ is not reachable from $s$ in $G' \setminus C^*$
and $(s,x) \not \in C^*$, $x$ cannot reach $v_{\text{in}}$ in $G'
\setminus C^*$. Observe that edges in $E_{\text{deg}}$ (and in
particular, $E^*_{\text{deg}}$) have
no effect for reachability of the $(x_{\operatorname{out}},v_{\text{in}})$
path in $G'$. Since $C^* = E^*_{\text{deg}} \sqcup E^*_{\nu}$, only
edges in $E^*_\nu$ can affect the reachability of the
$(x,v_{\text{in}})$ path in $G'$. So, when restricting $G'$ to $SG$, $x_{\operatorname{out}}$ cannot reach
$v_{\text{in}}$ in $SG \setminus E^*_\nu$. Therefore, $E^*_\nu$ is an $(x_{\operatorname{out}},v_{\text{in}})$-edge-cut in
$SG$, and the claim follows.
To show a separation triple $(L,S,R)$, it is enough to define $S$, and show
that $S$ is an $(x,y)$-vertex-cut where $x \in L$ and $y \in R$. This
is because $L$ and $R$ can be found trivially when we remove $S$
from $G$.
Let $S= \{ u \in V \colon (u_{\text{in}},u_{\text{out}}) \in E^*_{\nu} \}$. It is easy to see that $S$ is an $(x,y)$-vertex-cut in $G$ for some $y \in
V$. Since $E^*_{\nu} \not = \emptyset$ is an
$(x,y_{\operatorname{in}})$-edge-cut in $SG$, $y_{\operatorname{in}}$ is not reachable by
$x$ in $SG \setminus
E^*_{\nu}$. By construction of $G'$ (\Cref{def:aug_graph}), the corresponding out-vertex pair of $y_{\operatorname{in}}$, $y_{\operatorname{out}}$, has
in-degree one from $y_{\operatorname{in}}$. So, $y_{\operatorname{out}}$ in $SG \setminus
E^*_{\nu}$ is also not reachble by $x$. Hence, in $SG \setminus
E^*_{\nu}$, both $y_{\operatorname{in}}$ and $y_{\operatorname{out}}$ are not reachable from
$x$. So, by the construction of $G'$, and in $G \setminus U$, $y$ is
not reachable from $x$. Therefore, $U$ is an $(x,y)$-vertex-cut in $G$.
Next, $|S| \leq (1+\epsilonilon)k$ since otherwise $c_{G'}(C^*) >
(1+\epsilonilon)k (\nu/(\epsilonilon k)) = \nu/\epsilonilon+\nu $, a contradiction to the capacity of $C^*$.
We next show that $\operatorname{vol}_G^{\text{out}}(L) \leq \nu/\epsilonilon + \nu +
1$.
Let $E_{\text{deg}}(L) = \{ (v_{\text{out}},t) \colon v \in L \} $. We claim that $ E^*_{\text{deg}} =
E_{\text{deg}}(L)$. Since $E^*_{\nu}$ is an $(x_{\operatorname{out}},v_{\text{in}})$-cut in $SG$, the graph $G' \setminus
E^*_\nu$ has no $(s,V_{\text{out}} (S \sqcup R))$-paths. Since $G$ is strongly
connected, the sink vertex $t$ in $G' \setminus E^*_\nu $ is
reachable from $s$ via only vertices in $V_{\text{out}}(L)$. Since $C^*$ is the minimum $(s,t)$-cut in $G'$, $E^*_{\text{deg}}$ only contains edges in $E_{\text{deg}}(L)$. The claim follows.
We now show that $\operatorname{vol}_G^{\text{out}}(L) \leq \nu/\epsilonilon + \nu +1
$. By the previous claim, $c_{G'}(E^*_{\text{deg}}) = \sum_{v \in S}
\text{deg}_G^{\text{out}}(v) = \text{vol}_G^{\text{out}}(S).$ Also,
denote $F^*$ as the value of the maximum $(s,t)$-flow in $G'$. By
strong duality (max-flow min-cut theorem), $C_{G'}(C^*) = F^*$. Note
that $F^* \leq \nu/\epsilonilon + \nu + 1$ since this corresponds to an
$(s,t)$-edge cut that contains edge $(s,x_{\operatorname{out}})$. Hence,
\begin{align*}
\text{vol}_G^{\text{out}}(S) + c_{G'}(E^*_\nu) &=
c_{G'}(E^*_{\text{deg}})+ c_{G'}(E^*_\nu) \\
&= c_{G'}( E^*_{\text{deg}} \sqcup E^*_\nu) \\
&= c_{G'}(C^*) \\
& = F^* \\
& \leq \nu/\epsilonilon + \nu +1.
\end{align*}
Therefore, $\text{vol}_G^{\text{out}}(S) + c_{G'}(E^*_\nu) \leq
\nu/\epsilonilon + \nu +1$, and thus $\text{vol}_G^{\text{out}}(S) \leq
\nu/\epsilonilon + \nu +1$ as desired.
\end{proof}
\subsection{Preliminaries for Flow Network and Binary Blocking Flow}
We define notations related flows on a capacitated directed graph $G = (V,E,c)$. We fix vertices $s$ as source and $t$ as sink.
\begin{definition} [Flow]
For a capacitated graph $G=(V,E,c)$, a \textit{flow} $f$ is a function
$f : E \rightarrow \mathbb{R}$ satisfying two conditions:
\begin{itemize} [noitemsep, nolistsep]
\item For any $(v,w) \in E, f(v,w) \leq c(v,w)$, i.e., the flow on
each edge does not exceed its capacity.
\item For any vertex $v \in V \setminus \{s, t\}, \sum_{u: (u,v) \in E}
f(u,v) = \sum_{w: (v,w) \in E} f(v,w)$, i.e., for each vertex except
for $s$ or $t$, the amount of incoming flow is equal to the amount of outgoing flow.
\end{itemize}
We denote $|f| = \sum_{v : (v,t) \in E} f(v,t)$ as the value of flow $f$.
\end{definition}
\begin{definition}[Residual graph]
Given a capacitated graph $G = (V,E,c)$ and a flow function $f$, we define the
\textit{residual graph with respect to } $f$ as $(G,c,f) = (V, E_f, c_f)$
where $E_f$ contains all edges $(v,w) \in E$ with $c(v,w) - f(v,w) >
0$. Note that $f(v,w)$ can be negative if the actual flow goes from $w$
to $v$, and $E_f$ may contain reverse edge $e^R$ to the original graph
$G$. We call an edge in $E_f$ as \textit{residual edge} with
\textit{residual capacity} $c_f(v,w) = c(v,w) - f(v,w)$. An edge in
$E$ is not in $E_f$ when the amount of flow through this edge equals
its capacity. Such an edge is called an \textit{saturated edge}. We
sometimes use notation $G_f$ as the shorthand for the residual graph
$(G,c,f)$ when the context is clear.
\end{definition}
\begin{definition}[Blocking flow] \label{def:blockingflow}
Given a capacitated graph $G = (V,E,c)$, a \textit{blocking flow} is a
flow that saturates at least one edge on every $(s,t)$-path in $G$.
\end{definition}
We will use \Cref{def:blockingflow} mostly on the residual graph $G_f$.
Given a binary length function $\ell$ on $(G,c,f)$, we define
a natural distance function to each vertex in $(G,c,f)$ under
$\ell$.
\begin{definition} [Distance function] \label{def:distance-function}
Given a residual graph $G_f$, and binary length function $\ell$, a
function $d_{\ell} : V \rightarrow \mathbb{Z}_{\geq 0}$ is a distance
function if $d(v)$ is the length of the shortest $(s,v)$-path in $G_f$
under the binary length function $\ell$
\end{definition}
For any $(v,w) \in E_f$, $d_{\ell}(v) +\ell(v,w) \geq d_{\ell}(w)$ by
\Cref{def:distance-function}. If $d_{\ell}(v) + \ell(v,w)= d_{\ell}(w)$, then
we call $(v,w)$ \textit{admissible edge} under length function
$\ell$.
We denote $E_a$ to be the set of admissible edges of $E_f$ in
$(G,c,f)$ under length function $\ell$.
\begin{definition}[Admissible graph]
Given a residual graph $(G,c,f)$, and a length function function $\ell$, we
define an \textit{admissible graph} $A(G,c,f,\ell) = (G[E_a],c,f)$ to
be an induced subgraph of $(G,c,f)$ that contains only admissible
edges under length function $\ell$.
\end{definition}
\begin{definition}[$\mathcal{D}\xspaceelta'$-or-blocking flow]
For any $\mathcal{D}\xspaceelta' > 0$, a flow is called a $\mathcal{D}\xspaceelta'$-\textit{or-blocking flow} if it is a flow of
value exactly $\mathcal{D}\xspaceelta'$, or a blocking flow.
\end{definition}
\begin{definition} [Binary length function $\tilde \ell$] \label{def:gr-binary-length}
Given $\mathcal{D}\xspaceelta > 0$, a capacitated graph $(G,c)$ and a flow $f$, we
define binary length functions $\hat \ell$ and $\tilde \ell$ for any edge $(u,v)$ in a residual graph $(G,c,f)$ as follows.
$$
\hat \ell(u,v) = \left\{ \begin{array}{rl}
0 &\mbox{ if residual capacity $c(u,v) - f(u,v) \geq \mathcal{D}\xspaceelta$} \\
1 &\mbox{ otherwise}
\end{array} \right.
$$
Let $\hat d(v)$ be the shortest path distance between $s$ and $v$ under the
length function $\hat \ell$. We define \textit{special edge} $(u,v)$ to be
an edge $(u,v)$ such that $\hat d(u) = \hat d(v), \mathcal{D}\xspaceelta/2 \leq c(u,v) - f(u,v)
< \mathcal{D}\xspaceelta ,$ and $c(v,u) - f(v,u) \geq \mathcal{D}\xspaceelta$. We define the next
length function $\tilde \ell$.
$$
\tilde \ell(u,v) = \left\{ \begin{array}{rl}
0 &\mbox{ if $(u,v)$ is special} \\
\hat \ell(u,v) &\mbox{ otherwise}
\end{array} \right.
$$
\end{definition}
Classic near-linear time blocking flow algorithm by \cite{SleatorT83}
works only for acyclic admissible graph. Note that an admissible graph
$A(G,c,f,\tilde \ell) $ may contain cycles since an edge-length can
be zero. To handle this issue, the key idea by \cite{GoldbergR98} is
to contract all strongly connected components, and run the algorithm
by \cite{SleatorT83}. To route the flow, they construct a routing flow
network inside each strongly connected components using two directed
trees for a fixed root in the componenet. One tree is for routing
in-flow, the other one is for routing out-flow from the component. The
internal routing network ensures that each edge inside is used at most
twice. Hence, by restricting at most $\mathcal{D}\xspaceelta/4$ amount of flow, each
edge is used at most $\mathcal{D}\xspaceelta/2$. Since each edge in the component has
length zero, it has residual capacity at least $\mathcal{D}\xspaceelta$. So, the result of flow augmentation
respects the edge capacity. Finally, speical edges (with the condition related to $\mathcal{D}\xspaceelta /2$) play an
important role to ensure that blocking flow augmentation strictly increases the distance $d_{\tilde \ell}(t)$.
The following lemma summarizes the sketch of aforementioned algorithm.
\begin{lemma} [\cite{GoldbergR98}] \label{lem:binaryblockingflowalg}
Let $A(G,c,f,\ell)$ be an admissible graph and $m_A$ be its number
of edges. Then, there exists an algorithm that takes as input
$A$ and $\mathcal{D}\xspaceelta > 0$, and
in $O(m_A \log (m_A))$ time, outputs a $\mathcal{D}\xspaceelta/4\text{-or}$-blocking flow.
\begin{comment} either:
\begin{itemize}[noitemsep, nolistsep]
\item a flow of value $\mathcal{D}\xspaceelta/4$, or
\item a blocking flow.
\end{itemize}
\end{comment}
We call the algorithm as $\operatorname{BinaryBlockingFlow} (A(G,c,f,\ell), \mathcal{D}\xspaceelta)$.
\end{lemma}
We now define the notion of {\em shortest-path flow}. Intuitively, it
is a union of shortest paths on admissible graphs. This is the flow resulting from, e.g., the Binary Blocking Flow algorithm \cite{GoldbergR98}.
\begin{definition} [Shortest-path flow]
Given a graph $(G,c)$ with a flow $f$, and length function $\ell$, and
let $G_f$ be the residual graph. A flow $f^*$ in $G_f$ is called
\textit{shortest-path flow} if it can be decomposed into a set of
shortest paths under length function $\ell$, i.e., $f^* = \sum_{i=1}^{b} f_i^*$ for
some integer $b > 0$ where support$(f_i^*)$ is a shortest-path in $G_f$ under length function $\ell$.
\end{definition}
\begin{comment}
\begin{definition} [Shortest-path flow]
Given a graph $(G,c)$ with a flow $f$, and length function $\ell$, and
let $A(G,c, F, \ell)$ be the admissible graph, a flow $f^*$ is called
\textit{shortest-path flow} in $A(G,c, F, \ell)$ if it is a
union of shortest paths under length function $\ell$, i.e., $f^* =
\bigcup_{i=1}^{\ell} f_i^*$ where support$(f_i^*)$ forms a
shortest-path in .
\end{definition}
\begin{definition} [Flow sequence]
Given a graph $(G,c)$ and a length function $\ell$,
let $A_0 = A(G, c, 0, \ell_0)$, and $A_i = A(G,c,F_i,\ell_i)$ for $i > 0$ where $f_{i+1}$
is a flow in $A_i$ and $F_i = \sum_{j=1}^{i-1} f_j$. We call $f_{i+1}$
is a flow \textit{generated} from $A_i$. The following sequence is called a \textit{flow sequence}:
$$ A_0 \xrightarrow{f_1} A_1 \xrightarrow{f_2} \ldots
\xrightarrow{f_k} A_k$$ if $f_i$ is generated from $A_{i-1}$ for $i > 0$.
\end{definition}
\begin{definition}[Shortest augmenting flow]\label{def:saf}
A flow $f$ in $(G,c)$ is called \textit{shortest augmenting flow} if
for some $k \geq 0$ there is a flow sequence $$A(G, c, 0, \ell_0) = A_0 \xrightarrow{f_1} A_1
\xrightarrow{f_2} \ldots \xrightarrow{f_k} A_k $$ such that
\begin{enumerate}[noitemsep,nolistsep]
\item $f = \sum_{i=1}^kf_i$.
\item $f_i$ is a shortest-path flow in $A_{i-1}$ for every $i > 0$.
\end{enumerate}
\end{definition}
\end{comment}
Observe that $\operatorname{BinaryBlockingFlow} (A(G,c,f,\ell), \mathcal{D}\xspaceelta)$ always produces
a shortest-path flow.
\begin{comment}
That is,a flow
that is a union of a sequence of flows obtained from the shortest $(s,t)$-path in admissible graph in previous
round. We call this flow as \textit{current flow}.
\end{comment}
From the rest of this section, we fix an augmented graph $(G',c_{G'})$
(\Cref{def:aug_graph}), and also a flow $f$.
Given residual graph $G'_f$, and $d_{\ell}$, we can use
$\operatorname{BinaryBlockingFlow} (A(G',c_{G'},f, \tilde \ell),\mathcal{D}\xspaceelta)$ to compute a
$\mathcal{D}\xspaceelta/4\text{-or}$-binary blocking flow in $(G',c_{G'},f)$ in $\tilde{O}(m)$ time.
\begin{comment}
Given a shortest augmenting flow $f$ on the augmented graph $(G',c_{G'})$, and $d_{\ell}$, we can use
$\operatorname{BinaryBlockingFlow} (A(G',c_{G'},f, \tilde \ell),\mathcal{D}\xspaceelta)$ to compute a $\mathcal{D}\xspaceelta/4\text{-or}$-binary
blocking flow in $(G',c_{G'},f)$ in $\tilde{O}(m)$ time.
\end{comment}
\cite{OrecchiaZ14} provide a slightly different binary length function such that the
algorithm in \cite{GoldbergR98} has local running time.
Our goal in next section is to output the same $\mathcal{D}\xspaceelta/4\text{-or}$-binary
blocking flow in $G'_f$ in $\tilde{O}(\nu k)$ time using a slight adjustment
from \cite{OrecchiaZ14}.
\subsection{Local Augmented Graph and Binary Blocking Flow in Local Time}
\label{sec:local binary blocking flow}
The goal in this section is to compute binary blocking flow on the
residual graph of the augmented graph $(G',c_{G'})$ with a flow $f$ in
``local'' time. To ensure local running time, we cannot construct the
augmented graph $G'$ explicitly. Instead, we compute binary blocking flow from a subgraph of $G'$ based on ``absorbed'' vertices.
\begin{definition} [Split-node-saturated set] \label{def:splitnodesauratedset}
Given a residual graph $(G',c_{G'},f)$, let $B_{\operatorname{out}}$ be the set of vertices $v \in V_{\operatorname{out}} \sqcup
\{ x \}$ in the residual graph $(G',c_{G'},f)$ whose edge to $t$ is
saturated. The \textit{split-node-saturated set} $B$ is defined as: $$B = B_{\operatorname{out}} \sqcup N_{G'}^{\operatorname{out}}(B_{\operatorname{out}})\setminus \{ t \}$$
Note that $x$ is a fixed vertex as in \Cref{def:aug_graph}.
\end{definition}
\begin{definition} [Local binary length function] \label{def:local-length-function}
Fix a parameter $\mathcal{D}\xspaceelta > 0$ to be selected, let $\tilde \ell$ be the
length function in \Cref{def:gr-binary-length} for the residual graph
$(G',c_{G'},f)$. For vertex $u,v$ in the residual graph, if $u,v \in
B$, we call residual edge $(u,v)$ \textit{modern}. Otherwise, we call
residual edge $(u,v)$ \textit{classical}.
We define \textit{local binary length} function $\ell$:
$$
\ell(u,v) = \left\{ \begin{array}{rl}
1 &\mbox{ if $(u,v)$ is classical} \\
\tilde \ell(u,v) &\mbox{ otherwise}
\end{array} \right.
$$
\end{definition}
\begin{definition} [Distance under local binary length $\ell$]
Define distance function $d(v)$ as the shortest path distance between the source vertex $s$
and vertex $v$ in the residual graph $(G',c_{G'},f)$ under the local length function $\ell$.
\end{definition}
The following obsevations about structural properties of the residual graph
$G'_f$ follows immediately from the definition of local length function $\ell$.
\begin{observation} \label{obs:locallength}
For a given residual graph $(G',c_{G'},f)$,
\begin{itemize}[noitemsep,nolistsep]
\item for any residual edge $(u,v) \in E_{\infty,f}$ that is modern, $\ell(u,v) = 0 $.
\item for any residual edge $(u,v) \in E_{\deg,f} \sqcup (s,x)$,
$(u,v)$ is classical.
\item any residual edge with length zero is modern.
\end{itemize}
\end{observation}
\begin{definition} [Layers]
Given distance function $d$ on residual graph $(G',c_{G'},f)$, define
$L_j = \{ v \in G' \colon d(v) = j \}$ to be the set of $j^{\text{th}}-$layer with respect to distance $d$. Define $d_{\text{max}} = d(t)$ to be distance between $s$ and $t$ in $(G',c_{G'},f)$.
\end{definition}
The proof of the following Lemma is similar to that from
\cite{OrecchiaZ14}, but we focus on the augmented graph
$(G',c_{G'},f)$. Recall split-node-saturated set $B$ from \Cref{def:splitnodesauratedset}.
\begin{lemma} \label{lem:layerproperties}
If $d_{max} < \infty$ and $(x,t)$ is saturated, then we have:
\begin{enumerate}[noitemsep,nolistsep,label=(\Roman*)]
\item \label{item:layerproperties1} $d_{\text{max}} \geq 3$.
\item \label{item:layerproperties2} $L_0 = \{ s \}$.
\item \label{item:layerproperties3} $L_j \subseteq B$ for $1 \leq j \leq d_{\text{max}}-2$.
\item \label{item:layerproperties4} $L_j \subseteq B \cup N_{G'}^{\operatorname{out}}( B )$ for $ j = d_{\text{max}} -1$.
\end{enumerate}
\end{lemma}
\begin{proof}[Proof of
\Cref{lem:layerproperties}\ref{item:layerproperties1}]
First, $d_{\max} \geq 2$ since $(s,x_{\operatorname{in}})$ and
any $(v_{\operatorname{out}},t) \in E_{\text{deg},f}$ is classical by
\Cref{obs:locallength}. This means $d_{\max} \geq 3$ or $d_{\max} =
2$. We show that $d_{\max} = 2$ is not possible. Suppose for the
contradiction that $d_{\max} = 2$. Then every intermediate edge in any $(s,t)$-path, i.e.,
$s\rightarrow x\rightarrow v_{\operatorname{in}} \rightarrow v_{\operatorname{out}}
\rightarrow \ldots
\rightarrow w_{\operatorname{in}} \rightarrow w_{\operatorname{out}} \rightarrow t$, must have
zero length. Also, the path cannot be of the form $s \rightarrow x \rightarrow
t$ since $(x,t)$ is assumed to be saturated. In particular, $(w_{\operatorname{in}}, w_{\operatorname{out}})$ has zero
length. By \Cref{obs:locallength}, $(w_{\operatorname{in}}, w_{\operatorname{out}})$ must be
modern. This edge is modern when $w_{\operatorname{out}} \in B$ by definition of
split-node-saturated set $B$. Therefore, $(w_{\operatorname{out}},t)$ is saturated,
a contradiction.
\end{proof}
\begin{proof}[Proof of
\Cref{lem:layerproperties}\ref{item:layerproperties2}]
The second item follows from the fact that $(s,x)$ is the only outgoing-edge from
$s$ and $(s,x)$ is classical and hence has length 1.
\end{proof}
\begin{proof}[Proof of
\Cref{lem:layerproperties}\ref{item:layerproperties3}]
For $1 \leq j \leq d_{\text{max}}-2$, if $v
\in L_j$, then we consider two types of $v$. If $v$ is an out-vertex $v_{\operatorname{out}}$, then
$d(v_{\operatorname{out}}) = j \leq d_{\max}-2$. Thus, $(v_{\operatorname{out}},t)$ must be saturated since $d(t) = d_{\text{max}} >
d_{\text{max}}-1 \geq d(v_{\operatorname{out}})+1$. Hence, $v_{\operatorname{out}} \in B_{out}$, which is in $B$.
If $v$ is an in-vertex $v_{\operatorname{in}}$, then there must be an out-vertex
$u_{\operatorname{out}}$ such that
\begin{equation} \label{eq:in-out-vertex1}
d(v_{\operatorname{in}})= d(u_{\operatorname{out}}) +\ell(u_{\operatorname{out}}, v_{\operatorname{in}})
\end{equation}
We consider two cases for $j$. We show that $v_{\operatorname{in}} \in B$ for either case.
\begin{itemize}
\item If $j = 1$, then $u_{\operatorname{out}}$ could also be $x$. Since $d_{\text{max}} \geq 3$,
$u_{\operatorname{out}}$ at $L_1$ must be saturated, meaning that $u_{\operatorname{out}} \in B_{\operatorname{out}}$. Hence, $v_{\operatorname{in}}$ is an
out-neighbor of $u_{\operatorname{out}} \in B_{\operatorname{out}}$.
\item If $j \geq 2$, then we show that $1 \leq d(u_{\operatorname{out}}) \leq
d_{\text{max}}-2$. For the upper bound $d(u_{\operatorname{out}}) \leq
d_{\text{max}}-2$, rearranging \Cref{eq:in-out-vertex1}, and use the
fact that $\ell$ is a binary function, $\ell(u_{\operatorname{out}}, v_{\operatorname{in}})
\in \{ 0, 1 \}$ to get: $$d(u_{\operatorname{out}}) =d(v_{\operatorname{in}}) - \ell(u_{\operatorname{out}}, v_{\operatorname{in}}) \leq d(v_{\operatorname{in}}) = j \leq
d_{\text{max}}-2$$
The lower bound $d(u_{\operatorname{out}}) \geq 1$ follows from $ d(v_{\operatorname{in}}) = j
\geq 2$, \Cref{eq:in-out-vertex1}, and $\ell$ is binary.
Since $1 \leq d(u_{\operatorname{out}}) \leq d_{\text{max}}-2$, by the previous
discussion, $u_{\operatorname{out}} \in B_{\operatorname{out}}$. Therefore, $v_{\operatorname{in}} \in B$ since
$v_{\operatorname{in}}$ is the out-neighbor of $u_{\operatorname{out}} \in B_{\operatorname{out}}$.
\end{itemize}
\end{proof}
\begin{proof}[Proof of
\Cref{lem:layerproperties}\ref{item:layerproperties4}]
For any $v \in L_{d_{\text{max}}-1}$, if $v
\in B$, then we are done. Now, assume that $v \not \in B$. Then, $v$
is either an in-vertex or out-vertex. We first show that $v$ cannot be
an in-vertex.
Suppose for contradiction that $v$ is an in-vertex $v_{\operatorname{in}} \not
\in B$, then there must be a vertex $u_{\operatorname{out}}$ such that $
d(v_{\operatorname{in}})= d(u_{\operatorname{out}}) +\ell(u_{\operatorname{out}}, v_{\operatorname{in}})$. Since
$v_{\operatorname{in}} \not \in B$, the residual edge $(u_{\operatorname{out}}, v_{\operatorname{in}})$ is classical. Then, $\ell(u_{\operatorname{out}}, v_{\operatorname{in}}) = 1$. So, $$d(u_{\operatorname{out}}) =
d(v_{\operatorname{in}}) - \ell(u_{\operatorname{out}}, v_{\operatorname{in}})= (d_{\text{max}} -1)- 1 \leq
d_{\max}-2$$ By
\Cref{lem:layerproperties}\ref{item:layerproperties3}, $u_{\operatorname{out}}$ is
in $B$, which means $u_{\operatorname{out}} \in B_{\operatorname{out}}$. Hence, $v_{\operatorname{in}}$ is an out-neighbor of $u_{\operatorname{out}}
\in B_{\operatorname{out}}$. So, $v_{\operatorname{in}} \in B$, a contradiction.
Finally, if $v = v_{\operatorname{out}} \not \in B$, then we show that $v_{\operatorname{out}} \in N_{G'}^{\operatorname{out}}( B
)$. There exists $u_{\operatorname{in}}$ such that $d(v_{\operatorname{out}}) = d(u_{\operatorname{in}}) +
\ell(u_{\operatorname{in}}, v_{\operatorname{out}})$. Since $v_{\operatorname{out}} \not \in B$,
$v_{\operatorname{out}}$ is not saturated. Hence, $(u_{\operatorname{in}},v_{\operatorname{out}})$ is
classical. Therefore, $u_{\operatorname{in}} \in L_j$ for $j \leq d_{\text{max}}
-2$. So, $u_{\operatorname{in}} \in B$ by
\Cref{lem:layerproperties}\ref{item:layerproperties3}, and
$v_{\operatorname{out}}$ is the out-neighbor of $u_{\operatorname{in}}$. Therefore, $v_{\operatorname{out}} \in N_{G'}^{\operatorname{out}}( B)$.
\end{proof}
\begin{definition}[Local graph, $LG$] \label{def:aug_graph_local}
Given the augmented graph $G' = (V',E')$ and split-node-saturated set $B$, we define the \textit{local graph}
$LG(G',B) = G'[ V'' ] = (V'', E'') $ as an induced subgraph of $G'$ where
\begin{align}\label{eq:aug_graph_local}
V'' = B \sqcup N_{G'}^{\operatorname{out}}(B) \sqcup \{ s , t \} \quad\mbox{and}\quad E'' = E''_\nu \sqcup E''_\infty \sqcup
E''_{\text{deg}} \sqcup \{ (s,x) \}
\end{align}
where the sets in \Cref{eq:aug_graph_local} are defined as follows.
\begin{itemize}[noitemsep,nolistsep]
\item $E''_{\nu} = \{ (v_{\operatorname{in}},v_{\operatorname{out}}) \colon v_{\operatorname{out}} \in
B_{\operatorname{out}} \sqcup N_{G'}^{\operatorname{out}}(B) , (v_{\operatorname{in}}, v_{\operatorname{out}}) \in E_{\nu} \}$.
\item $E''_{\infty} = \{(v_{\operatorname{out}}, w_{\operatorname{in}}) \colon v_{\operatorname{out}} \in B_{\operatorname{out}}, w_{\operatorname{in}} \in V', (v_{\operatorname{out}}, w_{\operatorname{in}}) \in E_{\infty} \}$.
\item $E''_{\text{deg}} = \{ (v_{\operatorname{out}},t) \colon v_{\operatorname{out}} \in
B_{\operatorname{out}} \sqcup N_{G'}^{\operatorname{out}}(B) \}$.
\end{itemize}
Using the same capacity and flow as in $G'$, the \textit{residual local graph} is $(LG(G',B), c_{LG},
f_{LG})$ where $c_{LG}$ and $f_{LG}$ are the same as $c_{G'}$
and $f_{G'}$, but restricted to the edges in $LG(G',B)$. The
local length function $\ell$ also applies to $LG(G',B)$.
\end{definition}
\begin{lemma} \label{lem:lg-size}Let $m'$ be the number of edges in
$LG(G',B)$, and $n' = |V''|$ be the number of vertices in
$LG(G',B)$. We have $$m' \leq 4 \nu /\epsilonilon \quad \mbox{ and } \quad n' \leq 8 \nu/ (\epsilonilon k). $$
\end{lemma}
\begin{proof}
For any out-vertex $v_{\operatorname{out}} \in B_{\operatorname{out}}$, its edge to $t$ must be saturated
before it is included in $B$ with capacity of
$\text{deg}_{G}^{\operatorname{out}}(v)$. The edge $(s,x)$ is also an $(s,t)$-edge cut in $G'$ with capacity
$\nu/\epsilonilon + \nu + 1$. Hence, the maximum flow $F^*$ in $G'$ is at most
$\nu/\epsilonilon + \nu + 1 $. We have $$ \sum_{v_{\operatorname{out}} \in B_{\operatorname{out}}}
\text{deg}_{G}^{\operatorname{out}}(v) \leq F^* \leq \nu/\epsilonilon + \nu + 1.$$
By \Cref{lem:layerproperties} and \Cref{def:aug_graph_local}, $m' =
|E''_\nu|+ |E''_{\infty}|+|E''_{\deg}| +1$ where $ |E''_\nu| =
|B_{\operatorname{out}}|+|N_{G'}^{\operatorname{out}}(B)| -1, |E''_{\infty}| = \sum_{v_{\operatorname{out}} \in B_{\operatorname{out}}}
\text{deg}_{G}^{\operatorname{out}}(v), $ and $|E''_{\deg}| = |B_{\operatorname{out}}|
+|N_{G'}^{\operatorname{out}}(B)| $. By \Cref{def:aug_graph_local}, $ B_{\operatorname{out}}
\sqcup N_{G'}^{\operatorname{out}}(B)\subset V''$. Since $|V''| = n'$ and every
out-vertex has a corresponding in-vertex ($x$ has $s$), $|B_{\operatorname{out}}|
+ |N_{G'}^{\operatorname{out}}(B)| \leq n'/2 \leq \sum_{v_{\operatorname{out}} \in B_{\operatorname{out}}} \text{deg}_{G}^{\operatorname{out}}(v)$. So,
$$ m' \leq 2 \sum_{v_{\operatorname{out}} \in B_{\operatorname{out}}} \text{deg}_{G}^{\operatorname{out}}(v)
\leq 2(\nu/\epsilonilon + \nu + 1) \leq 4 \nu /\epsilonilon. $$
To compute $n'$, note that each $v_{\operatorname{out}}$ has at least
$d_{\text{min}}^{\operatorname{out}} \geq k $ edges. Therefore, the number of vertices including $v_{\operatorname{in}}$ is at most
$n' \leq 2(m'/d_{\text{min}}^{\operatorname{out}}) \leq 2 m'/k \leq 8
\nu/(\epsilonilon k) $.
\end{proof}
\begin{corollary} \label{cor:construct-lg-local}
Given a residual graph $ (G',c_{G'},f) $ and split-node-saturated
set $B$, and a pointer to vertex $x$, we can
construct $(LG(G',B), c_{LG}, f)$ in $O(m') = O(\nu / \epsilonilon)$ time.
\end{corollary}
\begin{comment}
\begin{definition}
For input residual graph $G_f$ with length function $\ell$ and
$\mathcal{D}\xspaceelta$, BinaryBlockingFlow$(G_f,\ell,\mathcal{D}\xspaceelta)$ outputs either a
blocking flow or a flow with value $\mathcal{D}\xspaceelta/4$.
\end{definition}
\end{comment}
The proof of the following Lemma is a straightforward modification from \cite{OrecchiaZ14}.
\begin{lemma}\label{lem:localbinaryblockingflow}
Given the local length function $\ell$ on both residual augmented
graph $(G', c_{G'}, f) $
and residual local graph $(LG, c_{LG}, f_{LG}) = (V'', E''_f,
c_{LG,f})$ (Recall $f_{LG}$ from \Cref{def:aug_graph_local}). Let $f_1$ be the output of $\operatorname{BinaryBlockingFlow}
(A(G',c_{G'}, f, \ell), \mathcal{D}\xspaceelta)$. Let $f_2$ be the output of\\
$\operatorname{BinaryBlockingFlow} (A(LG,c_{LG}, f_{LG}, \ell), \mathcal{D}\xspaceelta)$. Then,
\begin{itemize}[noitemsep,nolistsep]
\item $ f_1 = z(f_2)$ where
$$
z (f_2) (e) = \left\{ \begin{array}{rl}
0 &\mbox{ if $e \not \in E''_f$.} \\
f_2 (e) &\mbox{ otherwise}
\end{array} \right.
$$ i.e., $f_1$ and $f_2$ coincide.
\item $\operatorname{BinaryBlockingFlow}(A(LG,c_{LG}, f_{LG}, \ell), \mathcal{D}\xspaceelta)$ takes $\tilde{O}(\nu /\epsilonilon)$ time.
\end{itemize}
\end{lemma}
\begin{proof}
We focus on proving the first item. For notational convenience,
denote $G'_f = (G', c_{G'}, f)$, and $LG_f = (LG, c_{LG}, f_{LG})$. We show that there is no
$(s,t)$-path in $G_f'$ if and only if there is no $(s,t)$-path in
$LG_f$. The forward direction follows immediately from the fact that
$LG_f$ is a subgraph of $G'_f$. Next, we show the backward
direction. Let $U$ be a subset of vertices in graph $LG_f$
such that $s \in U$ and $t \not \in U$ and there is no edge between
$U$ and $V_{LG_f} \setminus U$. By \Cref{def:aug_graph_local}, $U \subseteq
V_{LG_f} = B \sqcup N_{G'}^{\operatorname{out}}(B) \sqcup \{ s \}$. In fact, $U
\subseteq B \{ s \}$ since vertices in $N_{G'}^{\operatorname{out}}(B)$ have residual edges to
sink $t$ with positive residual capacity by the construction of
$LG_f$. Now, we claim that all edges at the boundary of $B \sqcup \{ s \}$ in $G'$ and
$LG$ are the same. Indeed, all edges at the
boundary of $B \sqcup \{ s \}$ have the form $(u,v)$ where $u \in B$
and $v \in N_{G'}^{\operatorname{out}}(B)$ and $B \sqcup N_{G'}^{\operatorname{out}}(B) \subseteq
V''$ in $LG_f$. Furthermore, there is no $(U,t)$ path in $LG_f$ where $U \ni s$. Therefore, there is no $(s,t)$ path in $G'_f$.
For the rest of the proof, we assume that there is an $(s,t)$ path,
i.e., $d(t) = d_{\text{max}} < \infty$.
We claim that a flow $f^*$ in $G'_f$ is shortest-path flow if
and only if the same flow $f^*$ when restricting to edges in $LG_f$ is
shortest-path flow.
We show the forward direction. If $f^*$ in $G'_f$ is
shortest-path flow, then by definition of shortest-path
flow, the support of $f^*$ contains only vertices with $d(v) <
d_{\text{max}}$ and $t$. By \Cref{lem:layerproperties},
$$ \{ s \} \sqcup L_1 \sqcup \ldots \sqcup L_{d_{\text{max}}-1} \sqcup
\{ t \} \subseteq \{s,t\} \sqcup B \sqcup N_{G'}^{\operatorname{out}}(B)$$
We show that support of $f^*$ form a subgraph of $LG$. The edges are
either between consecutive layers $L_i, L_{i+1}$ or within a layer. We
can limit the edges using \Cref{lem:layerproperties}. From $s$ to vertices in $L_1$, there is only one edge
$(s,x)$. Edges from $L_i$ to $L_{i+1}$ for $1 \leq i \leq
d_{\text{max}}-2$ must be of the form $\{(v_{\operatorname{in}},v_{\operatorname{out}}),
(v_{\operatorname{out}},v_{\operatorname{in}}), \colon v_{\operatorname{in}}, v_{\operatorname{out}} \in B \}$
or $\{ (v_{\operatorname{in}}, w_{\operatorname{out}}) \colon w_{\operatorname{out}} \in
N_{G'}^{\operatorname{out}}(B), w_{\operatorname{in}} \in B\}$. From
$L_{d_{\text{max}}-1}$ to $L_{d_{\text{max}}}$, the edge must be of the form $\{ (v_{\operatorname{out}},t) \colon v_{\operatorname{out}} \in B_{\operatorname{out}} \}$. If the
edges are within a layer, then they must be modern since their length
is zero. This has the form of $\{ (u,v) \in E' \colon u, v \in B
\}$.
Since the support of $f^*$ form a subgraph of $LG$, we can restrict
$f^*$ to the graph $LG$, and we are done with the forward direction of
the claim.
We show the backward direction of the claim. Let $f'$ be a
shortest-path flow in $LG_f$. We can extend $f'$ to be the flow
in $G'_f$ by the function $z(f')$. $$
z (f') (e) = \left\{ \begin{array}{rl}
0 &\mbox{ if $e \not \in E''_f$.} \\
f' (e) &\mbox{ otherwise}
\end{array} \right.
$$
The support of the flow function $z(f')$ in $G'_f$ contains
vertices in contains only vertices with $d(v) < d_{\text{max}}$ and
$t$ since $f'$ is the shortest-path flow. Therefore, $z(f')$ is
the shortest-path flow in $G'_f$, and we are done with the
backward direction of the claim.
Finally, the first item of the lemma follows since $\operatorname{BinaryBlockingFlow}$
outputs a shortest-path flow.
The running time for the second item follows from the fact that number
of edges $m'$ in $LG(G',B) $ is $O(\nu/\epsilonilon )$ by \Cref{lem:lg-size}. By
\Cref{lem:binaryblockingflowalg}, $\operatorname{BinaryBlockingFlow} (A(LG,c_{LG}, f, \ell), \mathcal{D}\xspaceelta)$ can be
computed in $\tilde{O}(m') = \tilde{O}(\nu/\epsilonilon)$ time.
\end{proof}
\subsection{Local Goldberg-Rao's Algorithm for Augmented Graph}
\begin{theorem} \label{lem:local-gold-berg-rao}
Given graph $G$, we can compute the $(s,t)$ max-flow in $G'$ in $\tilde{O}
(\nu^{3/2}/(\epsilonilon^{3/2} \sqrt{k}))$ time.
\end{theorem}
\begin{algorithm}[H]
\KwIn{$x \in V, \nu, k$}
\KwOut{maximum $(s,t)$-flow and its corresponding minimum
$(s,t)$-edge-cut in $G'$}
\BlankLine
Let $G'$ be an \textit{implicit} augmented graph on $G$. \tcp*{No
need to construct explicitly.}
$\Lambda \gets \sqrt{8\nu/(\epsilonilon k)} $ \;
$F \leftarrow 2\nu k+ \nu + 1 -\text{deg}_{G}^{\operatorname{out}}(x) $ \tcp*{$F$ is an upper bound on
$(s,t)$-flow value in $G'$.}
\lIf{ $F \leq 0 $} { the minimum $(s,t)$-edge-cut is $(s,x)$, and return. }
$f \leftarrow $ a flow of value $\text{deg}_{G}^{\operatorname{out}}(x)$ through
$s-x-t$ path. \;
$B \leftarrow \{ x \} \sqcup N_{G'}^{\text{out}}(x)$ \tcp*{a set of
saturated vertices and out-neighbors.}
\While{ $F \geq 1$}
{
$\mathcal{D}\xspaceelta \leftarrow F/(2\Lambda)$ \;
\For{ $i\leftarrow 1$ \KwTo $5\Lambda$} {
$LG \leftarrow $ local subgraph of $G'$ given $B$. \tcp*{ see \Cref{def:aug_graph_local}, \Cref{cor:construct-lg-local}}
$\ell \gets $ local length function on current flow $f$.\;
$f \leftarrow f + \operatorname{BinaryBlockingFlow}(A(LG,c_{LG}, f, \ell), \mathcal{D}\xspaceelta)$. \;
$C \gets $ vertices in $N_{G'}^{\text{out}}(B)$ whose edges to sink
are saturated in the new flow. \;
$B \gets B \sqcup C \sqcup N_{G'}^{\text{out}}(C)$ \;
}
$F \gets F/2$ \;
}
\Return{maximum $(s,t)$-flow $f$ and its corresponding minimum
$(s,t)$-edge-cut $A$ in $G'$.}
\caption{LocalFlow$(G, x,\nu,k)$}
\label{alg:localgoldbergrao}
\end{algorithm}
\textbf{Correctness.} We show that $F$ is the upper bound on the
maximum flow value in $G'_f$. We use induction on inner loop. Before
entering the inner loop for the first time, $F$ is set to be the value
of $(s,t)$ edge minus $\text{deg}_{G}^{\operatorname{out}}(x)$. Since $F$ is
positive, then $G_f$ has valid maximum flow upper bound $F$. Now, we
consider the inner loop. After $5\Lambda$ times, either
\begin{itemize}[noitemsep, nolistsep]
\item we find a flow of value $\mathcal{D}\xspaceelta/4$ at least $4\Lambda$ times, or
\item we find a blocking flow at least $\Lambda$ times.
\end{itemize}
If the first case holds, then we increase the flow by at least
$\geq (\mathcal{D}\xspaceelta/4)(4\Lambda) = F/2$. Hence, the flow $F/2$ is the valid
upper bound. For the second case, we need the following Lemma whose
proof is essentially the same as the original proof of Goldberg-Rao's algorithm:
\begin{lemma} \label{lem:block-flow-increase-dist}
A flow augmentation does not decrease the
distance $d(t)$. On the other hand, a blocking flow augmentation strictly increases $d(t)$.
\end{lemma}
\begin{proof}[Proof Sketch]
The only issue for a blocking flow augmentation is that $s-t$ distance in
residual graph may not increase if an edge length decrease from 1 to 0. This happens when
such an edge is modern since classical edges have a constant length of
1. The proof that modern edges do not have the issue follows exactly
from the classic Gaoberg-Rao algorithm \cite{GoldbergR98} using the
notion of special edges.
\end{proof}
If the second case holds, we claim:
\begin{claim}
If we find a blocking flow at least $\Lambda$ times, then there exists an $(s,t)$-edge cut of
capacity at most $\mathcal{D}\xspaceelta \Lambda = F/2$, which is an upper bound of the remaining
flow to be augmented.
\end{claim}
\begin{proof}
Before entering the inner loop for the first time, by
\Cref{lem:layerproperties}, $d(t) = d_{\text{max}} \geq 3$. After
$\Lambda$ blocking flow augmentation, $d(t) \geq 3 + \Lambda$ by
\Cref{lem:block-flow-increase-dist}. Since the $\mathcal{D}\xspaceelta$-blocking flow in
$G'$ on $B$ and $LG$ coincide by \Cref{lem:localbinaryblockingflow}, we
always get the correct blocking flow augmentation.
Let $L_0, L_1, \ldots, L_{d_{\text{max}}}$ be the layers of vertices
with distance $0, 1, \ldots, d_{\text{max}} = d(t) \geq 3 +
\Lambda $. We focus on edges between layers $L_i, L_{i+1}$ for $1 \leq
i \leq d_{\text{max}}-2$. By \Cref{lem:layerproperties}, any two vertices $v_1 \in L_i,
v_2 \in L_{i+1}$ must be in $B$. Therefore, by definition of local
length function $\ell$, all edges between $L_i, L_{i+1}$ must be
modern. Since any edge between $L_i, L_{i+1}$ is modern, and has
length 1, it must be of the form $(v_{\operatorname{in}}, v_{\operatorname{out}})$ or
$(v_{\operatorname{out}}, v_{\operatorname{in}})$ with residual capacity $\leq \mathcal{D}\xspaceelta$ by
definition of local length function (\Cref{def:local-length-function}).
Since there are at least $\Lambda = \sqrt{8\nu/(\epsilonilon k)} \geq \sqrt{n'}$
layers $L_i$ (By \Cref{lem:lg-size}) where $1 \leq
i \leq d_{\text{max}}-2$, by counting argument, there must be a layer
$L'$ such that $|L'| \leq \sqrt{n'}$.
Next, for any vertex $v \in L'$, $v$ has either a single outgoing edge or a single incoming edge by
construction of $G'$ since this edge must be of the form $(v_{\operatorname{in}}, v_{\operatorname{out}})$ or
$(v_{\operatorname{out}}, v_{\operatorname{in}})$.
Therefore, we find an $(s,t)$-edge-cut consisting of the single
incoming-or-outgoing edge from each node in $L'$. This cut has
capacity at most $\mathcal{D}\xspaceelta \sqrt{n'} \leq \mathcal{D}\xspaceelta \sqrt{8\nu/(\epsilonilon
k)} = \mathcal{D}\xspaceelta \Lambda = F/2$.
\end{proof}
The correctness follows since at the end of the loop we have $F < 1$.
\textbf{Running Time.} By \Cref{lem:localbinaryblockingflow}, we can
compute $\mathcal{D}\xspaceelta$-blocking flow in $LG$ with local binary length function
$\ell$ in $\tilde{O}(\nu/\epsilonilon)$ time. The time already includes the time to
read $LG$. The number of such computations is
$O(\Lambda \log (\nu /\epsilonilon) ) = O(\sqrt{\nu/(\epsilonilon k)} \log (m) ) = \tilde{O}(
\sqrt{\nu/(\epsilonilon k)})$. So the total running time is
$\tilde{O}(\nu^{3/2} / (\epsilonilon ^{3/2} k^{1/2}))$. This
completes the proof of \Cref{lem:local-gold-berg-rao}.
\subsection{Proof of \Cref{thm:local-vertex-connectivity}}
\begin{proof}[Proof of \Cref{thm:local-vertex-connectivity}]
Given $G, x, \nu, k, \epsilonilon$, by \Cref{lem:local-gold-berg-rao}, we compute the minimum
$(s,t)$-edge-cut $C^*$ in $G'$ in $ \tilde{O}(\nu^{3/2} / (\epsilonilon ^{3/2} k^{1/2})$ time. If the
edge-cut $C^*$ has capacity $> \nu/\epsilonilon + \nu$, then by \Cref{lem:aug_graph_properties}\ref{item:aug_graph_properties_one}, we can
output $\perp$. Otherwise, $C^*$ has capacity at most $\nu/\epsilonilon +
\nu$, by \Cref{lem:aug_graph_properties}\ref{item:aug_graph_properties_two}, we can
output the separation triple $(L,S,R)$ with the properties in
\Cref{lem:aug_graph_properties}\ref{item:aug_graph_properties_two}.
\end{proof}
\section{Vertex Connectivity via Local Vertex Connectivity}
\begin{theorem} [Exact vertex connectivity]
\label{thm:exact_vertex_connectivity}
There exist randomized (Monte Carlo) algorithms that take as inputs a
graph $G$, integer $0 < k < O(\sqrt{n})$, and in $ \tilde{O} ( m + k^{7/3}n^{4/3} ) $ time for undirected graph (and
in $\tilde{O} (\min( km^{2/3}n, km^{4/3})) $ time for directed graph) can
decide w.h.p. if $\kappa_G \ge k$. If $\kappa_{G}<k$, then the
algorithms also return the corresponding vertex-cut.
\end{theorem}
We define the function $T(k,m,n)$ as
\begin{equation} \label{eq:exact-directed-time}
T(k,m,n)= \left\{ \begin{array}{rl}
\min(m^{4/3}, nm^{2/3}k^{1/2},\\ mn^{2/3+o(1)}/k^{1/3}, \\ n^{7/3+o(1)}/k^{1/6} ) &\mbox{ if $ k \leq n^{4/5}$,} \\
n^{3+o(1)}/k &\mbox{ if $k > n^{4/5}.$ }
\end{array} \right.
\end{equation}
\begin{theorem} [Approximate vertex connectivity] \label{thm:approx_vertex_connectivity}
There exist randomized (Monte Carlo) algorithms that take as inputs a
graph $G$, an positive integer $k$, and positive real $\epsilonilon < 1$,
and in $\tilde{O} ( m+\operatorname{poly}(1/\epsilonilon) \min( k^{4/3} n^{4/3}, k^{2/3}
n^{5/3+o(1)}, n^{3+o(1)}/k)) $
time for undirected graph
(and in $\tilde{O} ( \operatorname{poly}(1/\epsilonilon) T(k,m,n))$ time for directed graph
where $T(k,m,n)$ is defined as in \Cref{eq:exact-directed-time})
w.h.p. return a vertex-cut with size at most
$(1+O(\epsilonilon))\kappa_G$ or cerify that $\kappa_G \geq k$.
\end{theorem}
This section is devoted to proving \Cref{thm:exact_vertex_connectivity}. and \Cref{thm:approx_vertex_connectivity}.
\subsection{Vertex Connectivity Algorithms}
\begin{algorithm}[H]
\KwIn{Sampling method, LocalVC, G = (V,E), k, a, $\epsilonilon$}
\KwOut{a vertex-cut $U$ such that $|U| \leq k$ or a symbol $\perp$.}
\BlankLine
If undirected, replace $E = \{ (u,v) , (v,u) \colon (u,v) \in
E({H_{k+1}}) \}$ where $H_{k+1}$ as in \Cref{thm:sparsification}. \;
\If{\normalfont{Sampling} method $ = $ vertex}
{
\For{ $i \leftarrow 1$ \KwTo $n/(\epsilonilon a)$ \normalfont{(use} $n/a$ for exact version) }
{
Sample a random pair of vertices $x, y \in V$. \;
\lIf{$k$ \normalfont{is not specified}}
{
compute approximate $\kappa_G(x,y)$.
}
\If{$\kappa_G(x,y) \leq (1+\epsilonilon)k$}
{
\Return{\normalfont{the} corresponding $(x,y)$-vertex-cut $U$.}
}
}
}
\If{\normalfont{Sampling} method $ = $ edge}
{
\For{ $i \leftarrow 1$ \KwTo $m/(\epsilonilon a)$ \normalfont{(use} $m/a$ for exact version) }
{
Sample a random pair of edges $(x_1,y_1), (x_2,y_2) \in E$. \;
\If{$k$ \normalfont{is not specified}}
{
compute approximate $\kappa_G(x_1,y_2), \kappa_G(x_1,x_2), \kappa_G(y_1,x_2),
\kappa_G(y_1,y_2)$.
}
\If{$\min(\kappa_G(x_1,y_2), \kappa_G(x_1,x_2), \kappa_G(y_1,x_2),
\kappa_G(y_1,y_2)) \leq (1+\epsilonilon)k$}
{
\Return{\normalfont{the} corresponding $(x,y)$-vertex-cut $U$.}
}
}
}
\If{\normalfont{LocalVC is not specified}} {
Let $x^*,y^*$ be vertices with minimum $\kappa_G(x^*,y^*)$ computed so far.\;
Let $W$ be the vertex-cut corresponding to $\kappa_G(x^*,y^*)$ \;
Let $v_{\text{min}}, u_{\text{min}} $ be the vertex with the minimum out-degree in $G$ and $G^R$ respectively. \;
\Return{ \normalfont{The smallest set among }$ \{ W,
N_G^{\operatorname{out}}(v_{\text{min}}),N_{G^R}^{\operatorname{out}}(u_{\text{min}}) \} $.} }
Let $\mathcal{L}\xspace = \{ 2^{\ell} \colon 1 \leq \ell \leq \lceil \log_2 a \rceil $, and $\ell \in \mathbb{Z} \}$. \;
\If{\normalfont{Sampling method} $ = $ vertex}
{
\For{$ s \in \mathcal{L}\xspace$}
{
\For{ $i \gets 1 $ \KwTo $n/s$}
{
Sample a random vertex $x \in V$. \;
Let $\nu \leftarrow O(s(s+k))$. \;
\If{\normalfont{LocalVC}$(G,x,\nu,k,\epsilonilon)$ or \normalfont{LocalVC}$(G^R,x,\nu,k,\epsilonilon)$ outputs a vertex-cut $U$} {\Return{$U$.} }
}
}
}
\If{\normalfont{Sampling method} $ = $ edge}
{
\For{$ s \in \mathcal{L}\xspace$}
{
\For{ $i \gets 1 $ \KwTo $m/s$}
{
Sample a random edge $(x,y) \in E$. \;
Let $\nu \gets O(s),$ and $\mathcal{G} = \{ G, G^R \}$. \;
\For{$H \in \mathcal{G}, z \in \{x,y\}$}
{
\If{\normalfont{LocalVC}$(H,z,\nu,k,\epsilonilon)$ outputs a vertex-cut $U$.}
{
\Return{$U$.}
}
}
}
}
}
\Return{$\perp$.}
\caption{VC$(\text{Sampling method}, \text{LocalVC}, \kappa(x,y); G, k, a, \epsilonilon)$}
\label{alg:vcframework}
\end{algorithm}
\subsection{Correctness}
We can compute approximate vertex connectivity by standard binary search on $k$ with the decision problem. We focus on correctness of \Cref{alg:vcframework} for approximate version. For exact version, the same proof goes through when we use $\epsilonilon = 1/(2k)$, and $\kappa_G \leq \sqrt{n}/2$. Let $\mathcal{D}\xspaceelta = \min(n/(1+\epsilonilon), (m/(1+\epsilonilon))^{1/2}))$. For the purpose of analysis of the decision problem, we assume the followings.
\begin{assumption} \label{ass:correctness}
If $k$ is specified in \Cref{alg:vcframework}, then
\begin{enumerate}[noitemsep,nolistsep,label=(\Roman*)]
\item \label{item:correct_one} $\deg_{\min}^{\operatorname{out}} \geq k$.
\item \label{item:correct_two} $k \leq \mathcal{D}\xspaceelta$. We use $k \leq \sqrt{n}/2$ for exact vertex connectivity.
\item \label{item:correct_three} Local conditions in \Cref{thm:local-vertex-connectivity} are satisfied. We use exact version of local conditions for exact vertex connectivity.
\end{enumerate}
\end{assumption}
We justify above assumptions. For
\Cref{ass:correctness}\ref{item:correct_one}, if it does not hold,
then we can trivially output the neighbors of the vertex with minimum
degree and we are done.
For \Cref{ass:correctness}\ref{item:correct_two}, if we can verify that $\kappa_G \geq k = \mathcal{D}\xspaceelta$, then in \Cref{sec:high_vertex_conn} we show that the out-neighbors of the vertex with minimum out-degree is an approximate solution. For exact vertex connectivity, we either find a minimum vertex-cut or verify that $\kappa_G \geq \sqrt{n}/2$. For \Cref{ass:correctness}\ref{item:correct_three}, we can easily verify the parameters $a$ and $\nu,k,\epsilonilon$ supplied to the LocalVC.
Ignoring running time, we classify \Cref{alg:vcframework} into four
algorithms depending on sampling edges or vertices, and using LocalVC
or not. We omit edge-sampling without LocalVC since the running time
is subsumed by vertex-sampling counterpart. We now prove the correctness for each of them.
\subsubsection{High Vertex Connectivity} \label{sec:high_vertex_conn}
We show that if we can verify that the graph has high vertex connectivity, then we can simply output the out-neighbors of the vertex with minimum out-degree to obtain an $(1+\epsilonilon)$-approximate solution.
\begin{proposition}
If $\kappa_G \geq \mathcal{D}\xspaceelta$, then $|\deg_{\min}^{\operatorname{out}}| \leq (1+\epsilonilon)\kappa_G$.
\end{proposition}
\begin{proof}
We first show that if $\kappa_G \geq (m/(1+\epsilonilon))^{1/2}$, then $\kappa_G \geq n/(1+\epsilonilon)$. Since $\kappa_G \geq (m/(1+\epsilonilon))^{1/2}$, we have $\kappa_G^2 \geq m/(1+\epsilonilon)$. Therefore, we obtain
$ \kappa_G \deg_{\min}^{\operatorname{out}} \geq \kappa_G^2 \geq m/(1+\epsilonilon) \geq n \deg_{\min}^{\operatorname{out}} /(1+\epsilonilon).$
The first inequality follows from \Cref{obs:kappa-degree}, which is $\deg_{\min}^{\operatorname{out}} \geq \kappa_G$. The second inequality follows from above discussion. The third inequality follows from each vertex has at least $\deg_{\min}^{\operatorname{out}}$ edges. Therefore, $ \kappa_G \geq n/(1+\epsilonilon)$.
Now, we show that if $\kappa_G \geq n/(1+\epsilonilon)$, then $ \deg_{\min}^{\operatorname{out}} \leq (1+\epsilonilon)\kappa_G$. We have
$ (1+\epsilonilon)\kappa_G \geq n \geq \deg_{\min}^{\operatorname{out}} \geq \kappa_G.$ The first inequality follows from the condition above. The second inequality follows from size of vertex cut is at most $n$. The third inequality follows from \Cref{obs:kappa-degree}.
\end{proof}
\subsubsection{Edge-Sampling with LocalVC}
\begin{lemma} \label{lem:edge_sampling_localvc_correct}
\Cref{alg:vcframework} with edge-sampling, and $\operatorname{LocalVC }$ outputs correctly w.h.p. a vertex-cut of
size $\leq (1+\epsilonilon)k$ if $\kappa_G \leq k$, and a symbol $\perp$ if $\kappa_G > k $.
\end{lemma}
We describe notations regarding edge-sets from a separation triple
$(L,S,R)$ in $G$. Let $ E^*(L,S) = E(L,L) \sqcup E(L,S) \sqcup E(S,L)$,
and $ E^*(S,R) = E(R,R) \sqcup E(S,R) \sqcup E(R,S)$.
\begin{definition} [$L$-volume, and $R$-volume of the separation
triple] \label{def:lrvol}
For a separation triple $(L,S,R)$, we denote $\operatorname{vol}_G^*(L) = \sum_{v \in L} \deg_G^{\operatorname{out}}(v) + |E(S,L)|$ and
$\operatorname{vol}_G^*(R) = \sum_{v \in R} \deg_G^{\operatorname{out}}(v) + |E(S,R)|$.
\end{definition}
It is easy to see that $\operatorname{vol}_G^*(L) = |E^*(L,S)|$ and $\operatorname{vol}_G^*(R) = |E^*(S,R)|$.
The following observations follow immediately from the definition of
$E^*(L,S)$ and $E^*(S,R)$, and a separation triple $(L,S,R)$.
\begin{observation} \label{obs:edge_star}
We can partition edges in $G$ according to $(L,S,R)$ separation triple as $$E
= E^*(L,S) \sqcup E(S,S) \sqcup E^*(S,R)$$
And,
\begin{itemize}[nolistsep,noitemsep]
\item For any edge $ (x,y) \in E^*(L,S), x \in L $ or $ y \in L$.
\item For any edge $ (x,y) \in E^*(S,R), x \in R $ or $ y \in R$.
\end{itemize}
Furthermore, $$ m = \operatorname{vol}_G^*(L) + |E(S,S)| + \operatorname{vol}_G^*(R)$$
\end{observation}
We proceed the proof. There are three cases for the set of all
separation triples in $G$. The first case is there exists a separation triple $(L,S,R)$ such
that $|S| \leq k, \operatorname{vol}^*_G(L) \geq a, \operatorname{vol}^*_G(R) \geq a$ We show that w.h.p. \Cref{alg:vcframework} outputs a vertex-cut of size at most $(1+\epsilonilon)k$.
\begin{lemma} \label{lem:balanced_edge}
If $G$ has a separation triple $(L,S,R)$ such that $ |S| \leq k,
\operatorname{vol}^*_G(L) \geq a, \operatorname{vol}^*_G(R) \geq a $, then w.h.p. \Cref{alg:vcframework} outputs a vertex-cut of size at most $(1+\epsilonilon)k$.
\end{lemma}
\begin{proof}
We show that the first loop (with edge-sampling method) of \Cref{alg:vcframework} finds a vertex-cut of size at most $(1+\epsilonilon)k$.
We sample two edges randomly $e_1 = (x_1,y_1), e_2 = (x_2,y_2) \in
E$. The probability that $e_1 \in E^*(L,S)$ and $e_2 \in
E^*(S,R)$ is $$ P(e_1 \in E^*(L,S), e_2 \in E^*(S,R)) = P(e_1
\in E^*(L,S)) P( e_2 \in E^*(S, R)) $$ This follows from the
two events are independent.
By \Cref{ass:correctness}\ref{item:correct_two}, $k \leq \mathcal{D}\xspaceelta$, which means $k^2 \leq m/(1+\epsilonilon)$. For exact vertex connectivity, we have $k^2 \leq n/4 \leq m/4$. For generality, we denote $k^2 \leq m/c$. We use $c = 1+\epsilonilon$ for the approximate vertex connectivity, and $c = 4$ for exact version.
We claim $\operatorname{vol}^*_G(L) + \operatorname{vol}^*_G(R) = \Omega((1-1/c)m)$. Indeed, by \Cref{obs:edge_star}, $\operatorname{vol}^*_G(L) + \operatorname{vol}^*_G(R) = m - |E(S,S)|$, and we have $|E(S,S)| \leq k^2 \leq m/c$.
If $\operatorname{vol}^*_G(R) = \Omega((1-1/c) m)$, then $ P(e_1 \in E^*(L,S), e_2 \in E^*(S,R)) = P(e_1 \in E^*(L,S)) P( e_2 \in E^*(S, R)) \geq \operatorname{vol}^*_G(R) a/ m^2 = \Omega( (1-1/c)a/m) $.
Otherwise, $\operatorname{vol}^*_G(L) = \Omega((1-1/c)m)$. Similarly, we get $ P(e_1 \in E^*(L,S), e_2 \in E^*(S,R)) = \Omega( (1-1/c)a/m) $.
Therefore, it is enough to sample $O(m/(\epsilonilon a))$ times ($O(m/a)$ times for the exact vertex connectivity) to get w.h.p. at least
one trial where $e_1 = (x_1, y_1) \in E^*(L,S), e_2 = (x_2,y_2)
\in E^*(S,R)$. From now we assume, $e_1 = (x_1, y_1) \in E^*(L,S), e_2 = (x_2,y_2) \in E^*(S,R)$.
Finally, we show that the first loop of \Cref{alg:vcframework}
outputs a vertex-cut of size at most $k$. By \Cref{obs:edge_star},
at least one vertex in $(x_1,y_1)$ is in $L$, and at least on vertex in $(x_2,y_2)$ is in
$R$. Therefore, we find a separation triple corresponding to $\min(\kappa_G(x_1,y_2), \kappa_G(x_1,x_2), \kappa_G(y_1,x_2), \kappa_G(y_1,y_2)) \leq (1+\epsilonilon)k$.
\end{proof}
The second case is there exists a separation triple $(L,S,R)$ such that
$|S| \leq k$ and $\operatorname{vol}^*_G(L) < a $ or $\operatorname{vol}^*_G(R) < a$. We show that
w.h.p. \Cref{alg:vcframework} outputs a vertex-cut of size at most
$(1+\epsilonilon)k$.
\begin{lemma}\label{lem:imbalanced_edge}
If $G$ has a separation triple $(L,S,R)$ such that $|S| \leq k$ and $\operatorname{vol}^*_G(L) < a $ or $\operatorname{vol}^*_G(R) < a$,
then w.h.p. \Cref{alg:vcframework} outputs a vertex-cut of size at most $(1+\epsilonilon)k$.
\end{lemma}
\begin{proof}
We show that the second loop (LocalVC with edge-sampling mode) of \Cref{alg:vcframework} finds a vertex-cut of size at most $(1+\epsilonilon)k$.
We focus on the case $\operatorname{vol}^*_G(L) < a$. The case $\operatorname{vol}^*_G(R) < a$ is similar, except that we need to compute local vertex connectivity on the reverse graph instead.
We show that w.h.p. there is an event $e = (x,y) \in E^*(L,S)$. Since $\operatorname{vol}^*_G(L) < a$, there exists an integer $\ell$ in range $1 \leq \ell \leq \lceil \log_2 a
\rceil$ such that $ 2^{\ell -1} \leq \operatorname{vol}^*_G(L) \leq s^{\ell}$. That is, $s/2
\leq \operatorname{vol}^*_G(L) \leq s$ for $s = 2^{\ell}$. The probability that $e
\in E^*(L,S)$ is $\operatorname{vol}^*_G(L)/m \geq s/(2m)$. Hence, it is enough to
sample $O(m/s)$ edges to get an event $e \in E^*(L,S)$ w.h.p.
From now we assume that $\operatorname{vol}^*_G(L) \leq s$ and that $e = (x,y) \in
E^*(L,S)$. By \Cref{def:lrvol}, $\operatorname{vol}_G^*(L) =\sum_{v \in L}
\deg_G^{\operatorname{out}}(v) + |E(S,L)| \leq s$. Therefore, $\operatorname{vol}_G^{\operatorname{out}}(L) = \sum_{v \in L} \deg_G^{\operatorname{out}}(v) \leq s$.
By \Cref{obs:edge_star}, $x \in L$ or $y \in L$. We assume
WLOG that $x \in L$ (\Cref{alg:vcframework} runs LocalVC on both $x$
and $y$).
Hence, we have verified the following conditions for the parameters $x, \nu,
k$ for LocalVC$(G,x,\nu,k)$:
\begin{itemize}[nolistsep, noitemsep]
\item Local conditions are satisfied by \Cref{ass:correctness}\ref{item:correct_three}.
\item $x \in L$.
\item $|S| \leq k$.
\item $\operatorname{vol}_G^{\operatorname{out}}(L) \leq \nu$ and we use $\nu = s$.
\end{itemize}
By \Cref{thm:local-vertex-connectivity}, LocalVC outputs a vertex-cut of size at most $(1+\epsilonilon)k$.
\end{proof}
The final case is when every separation triple $(L,S,R)$ in $G$, $|S| >
k$. In other words, $\kappa_G > k$. If \Cref{alg:vcframework} outputs a vertex-cut, then it is a $(1+\epsilonilon)$-approximate vertex-cut. Otherwise, \Cref{alg:vcframework} outputs $\perp$ correctly.
\begin{comment}
\begin{lemma}\label{lem:null_edge}
If $\kappa_G > k$, then \Cref{alg:edge_sampling} outputs $\perp$.
\end{lemma}
\begin{proof}
In the first loop, we always outputs $\perp$ since $\kappa(x,y) \geq \kappa > k$, and
FoldFulkerson max-flow can verify that $\kappa_G > k$. In the second
loop, recall that \Cref{eq:local_condition} or \Cref{eq:local_condition_dense} is
satisfied by \Cref{pro:nuk_sat_vertex}. Furthermore, every separation triple $(L,S,R)$ in $G$, $|S| >
k$. Therefore, by \Cref{thm:local-vertex-connectivity}, LocalVC
always output $\perp$ for any sample vertex $x$.
\end{proof}
\end{comment}
The proof of \Cref{lem:edge_sampling_localvc_correct} is complete since
\Cref{lem:edge_sampling_localvc_correct} follows from
\Cref{lem:balanced_edge}, \Cref{lem:imbalanced_edge}, and the case $\kappa_G > k$ corresponding the three cases of the set of separation triples in $G$.
\subsubsection{Vertex-Sampling with LocalVC}
\begin{lemma} \label{lem:vertex_sampling_localvc_correct}
\Cref{alg:vcframework} with vertex-sampling, and $\operatorname{LocalVC}$ outputs correctly w.h.p. a vertex-cut of
size $\leq (1+\epsilonilon)k$ if $\kappa_G \leq k$, and a symbol $\perp$ if $\kappa_G > k $.
\end{lemma}
We consider three cases for the set of all separation triples in $G$.
The first case is there exists a separation triple $(L,S,R)$ such
that $|S| \leq k, |L| \geq a,$ and $ |R| \geq a$. We show that w.h.p.
\Cref{alg:vcframework} outputs a vertex-cut of size at most $(1+\epsilonilon)k$.
\begin{lemma} \label{lem:balanced_v}
If $G$ has a separation triple $(L,S,R)$ such that $|S| \leq k, |L|
\geq a,$ and $ |R| \geq a$ Then w.h.p. \Cref{alg:vcframework} outputs a
vertex-cut of size at most $(1+\epsilonilon)k$.
\end{lemma}
\begin{proof}
We show that the first loop of \Cref{alg:vcframework} finds a
vertx-cut of size at most $(1+\epsilonilon)k$.
We sample two vertices independently $x,y \in V$. Since
two events $x \in L$ and $y \in R$ are independent, the probability that
$x \in L$ and $y \in R$ is $P(x \in L, y \in R) = P(x \in L) P(y \in R)$.
By \Cref{ass:correctness}\ref{item:correct_two}, $k \leq \mathcal{D}\xspaceelta$, which means $k \leq n/(1+\epsilonilon)$. For exact vertex connectivity, we have $k \leq \sqrt{n}/2 \leq n/2$. For generality, we denote $k \leq n/c$. We use $c = 1+\epsilonilon$ for the approximate vertex connectivity, and $c = 2$ for exact version.
Since $k \leq n/c$, we have $|L|+|R| = n - |S| \geq n - k \geq n - n/c = (1-1/c)n$.
If $|R| = \Omega((1-1/c)n)$, then $P(x \in L, y \in R) = P(x \in L) P(y \in
R) \geq |R| a/n^2 = \Omega( (1-1/c)a/n) $. Otherwise, $|L| = \Omega((1-1/c)n)$, and with similar argument we get $P(x \in L, y \in R) = \Omega( (1-1/c)a/n) $.
\begin{comment}
we have $|L|+|R| = \Omega(n)$. If $|R| =
\Omega(n)$, then $$P(x \in L, y \in R) = P(x \in L) P(y \in
R) \geq |R| n^{a}/n^2 = \Omega( n^{a-1}) $$
Otherwise, $|L| = \Omega(n)$, then $$P(x \in L, y \in R) = P(x \in L) P(y \in
R) \geq |L| n^{a}/n^2 = \Omega( n^{a-1}) $$
\end{comment}
Therefore, it is enough to sample $O(n/(a\epsilonilon))$ times (and $O(n/a)$ times for exact version) to get at least one trial corresponding to the event
$x \in L$ and $y \in R$ w.h.p. With that event, we can find a
separation triple corresponding to $\kappa(x,y) \leq (1+\epsilonilon)k$.
\end{proof}
The second case is there exists a separation triple $(L,S,R)$ such
that $|S| \leq k$ and $|L| < a$ or $|R| < a$. We show that w.h.p.
\Cref{alg:vcframework} outputs a vertex-cut of size at most $(1+\epsilonilon)k$.
\begin{lemma}\label{lem:imbalanced_v}
If $G$ has a separation triple $(L,S,R)$ such that
$|S| \leq k$ and $|L| < a$ or $|R| < a,$ then w.h.p. \Cref{alg:vcframework} outputs a vertex-cut of size at most $(1+\epsilonilon)k$.
\end{lemma}
\begin{proof}
We show that the second loop (LocalVC with vertex sampling method) of \Cref{alg:vcframework} finds a vertex-cut of size at most $(1+\epsilonilon)k$.
We focus on the case $|L| < a$. The case $|R| < a$ is similar,
except that we need to compute local vertex connectivity on the reverse graph instead.
We show that w.h.p, there is an event $x \in L$. Since $|L| < a$, there exists $\ell$ in range $1 \leq \ell \leq \lceil \log_2a
\rceil$ such that $ 2^{\ell -1} \leq |L| \leq s^{\ell}$. In other words, for $s = 2^{\ell}$, we have $s/2 \leq |L| \leq s$. Since $x$ is independently and uniformly sampled, the probability that $x \in L$
is $|L|/n$, which is at least $\geq s/(2n)$. Therefore, by sampling $O(n/s)$ rounds, w.h.p. there is at least one event where $x \in L$.
From now we assume that $|L| \leq s$ and that $x \in L$.
We show that $\operatorname{vol}_G^{\operatorname{out}}(L) = s(s+k)$. Since $|L| \leq s$, $\operatorname{vol}_G^{\operatorname{out}}(L) = |E_G(L, L)| + |E_G(L, S)| \leq |L|^2 + |L| |S| \leq s^2 + sk $.
We have verified the following conditions for the parameters $x, \nu,
k$ for LocalVC$(G,x,\nu,k)$:
\begin{itemize}[nolistsep, noitemsep]
\item Local conditions are satisfied by \Cref{ass:correctness}\ref{item:correct_three}.
\item $x \in L$.
\item $|S| \leq k$.
\item $\operatorname{vol}_G^{\operatorname{out}}(L) \leq \nu$ since $\operatorname{vol}_G^{\operatorname{out}}(L) \leq s^2 + sk$, and we use $\nu = s^2+sk$.
\end{itemize}
By \Cref{thm:local-vertex-connectivity}, LocalVC outputs a vertex-cut of size at most $(1+\epsilonilon)k$.
\begin{comment}
For undirected graph, the same argument applies except that we can use
$\nu = O(sk)$. To see this, we need to show $\operatorname{vol}(L) = O(sk)$. Recall that we
compute the connectivity on $H_{k+1}$
given by \Cref{thm:sparsification}. Therefore, $|E_{H_{k+1}}(L, L)| \leq (k+1)|L|
$. Hence, $$ \operatorname{vol}(L) = 2|E_{H_{k+1}}(L, L)| + |E_{H_{k+1}}(L,S)|
\leq 2(k+1) |L| + |L| |S| \leq 2s (k+1) + sk = O(sk)$$
\end{comment}
\end{proof}
The final case is when every separation triple $(L,S,R)$ in $G$, $|S| >
k$. In other words, $\kappa_G > k$. If \Cref{alg:vcframework} outputs a vertex-cut, then it is a $(1+\epsilonilon)$-approximate vertex-cut. Otherwise, \Cref{alg:vcframework} outputs $\perp$ correctly.
\begin{comment}
\begin{lemma}\label{lem:null_v}
If $\kappa_G > k$, then \Cref{alg:vcframework} outputs $\perp$.
\end{lemma}
\begin{proof}
In the first loop, we always outputs $\perp$ since $\kappa(x,y) \geq \kappa > k$, and
FoldFulkerson max-flow can verify that $\kappa_G > k$. In the second
loop, recall that \Cref{eq:local_condition} or \Cref{eq:local_condition_dense} is
satisfied by \Cref{pro:nuk_sat_vertex}. Furthermore, every separation triple $(L,S,R)$ in $G$, $|S| >
k$. Therefore, by \Cref{thm:local-vertex-connectivity}, LocalVC
always output $\perp$ for any sample vertex $x$.
\end{proof}
\end{comment}
The proof of \Cref{lem:vertex_sampling_nolocalvc_correct} is complete since
\Cref{lem:vertex_sampling_nolocalvc_correct} follows from
\Cref{lem:balanced_v}, \Cref{lem:imbalanced_v}, and the case $\kappa_G > k$ corresponding the three cases of the set of separation triples in $G$.
\begin{comment}
\subsubsection{Edge-Sampling without LocalVC}
We do not specify $k$ and LocalVC algorithm.
\begin{lemma} \label{lem:edge_sampling_nolocalvc_correct}
\Cref{alg:vcframework} with edge-sampling, but without $\operatorname{LocalVC }$ w.h.p. outputs a vertex-cut of size $\leq (1+\epsilonilon)\kappa_G$.
\end{lemma}
\begin{proof}
Let $\tilde \kappa$ be the answer of our algorithm. By design, we have $\tilde \kappa \leq \min(d_{\min}^{\operatorname{out}}, d_{\min}^{\operatorname{in}})$. Also, $\tilde \kappa \geq \kappa$ since the answer corresponds to some vertex-cut. It remains to show $\tilde \kappa \leq (1+O(\epsilonilon))\kappa$.
Let $(L,S,R)$ be an optimal separation triple. We assume without loss of generality that $\operatorname{vol}_G^*(L) \leq \operatorname{vol}_G^*(R)$ (recall \Cref{def:lrvol}). The other case is symmetric, where we use $d_{\min}^{\operatorname{in}}$ on the reverse graph instead.
We first show the inequality $|L| \geq d_{\min}^{\operatorname{out}} - \kappa$. Since $(L,S,R)$ is a separation triple where $|S| = \kappa$, the number of out-neighbors of a fixed vertex $x \in L$ that can be included in $S$ is at most $\kappa$. By definition of separation triple, neighbors of $x$ cannot be in $R$, and so the rest of the neighbors must be in $L$.
Next, we show $\operatorname{vol}_G^{\operatorname{out}}(L) \geq (d_{\min}^{\operatorname{out}})^2 - d_{\min}^{\operatorname{out}}\kappa$. By definition of volume, $\operatorname{vol}_G^{\operatorname{out}}(L) \geq |L|(d_{\min}^{\operatorname{out}})$, and by the inequality above, $|L|(d_{\min}^{\operatorname{out}}) \geq (d_{\min}^{\operatorname{out}})^2 - d_{\min}^{\operatorname{out}}\kappa$. Therefore, the claim follows.
If $\operatorname{vol}_G^*(L) \leq \epsilonilon (d_{\min}^{\operatorname{out}})^2$, then $\epsilonilon (d_{\min}^{\operatorname{out}})^2 \geq \operatorname{vol}_G^*(L) \geq \operatorname{vol}_G^{\operatorname{out}}(L) \geq (d_{\min}^{\operatorname{out}})^2 - d_{\min}^{\operatorname{out}}\kappa$. Hence, $\epsilonilon (d_{\min}^{\operatorname{out}})^2 \geq (d_{\min}^{\operatorname{out}})^2 - d_{\min}^{\operatorname{out}}\kappa$, and so $\kappa \geq (d_{\min}^{\operatorname{out}}) - \epsilonilon (d_{\min}^{\operatorname{out}}) = (1-\epsilonilon)(d_{\min}^{\operatorname{out}})$. Therefore, $\tilde{\kappa}$ is indeed an $(1+O(\epsilonilon))$-approximation
of $\kappa$ in this case.
On the other hand, if $\operatorname{vol}_G^*(L) \geq \epsilonilon (d_{\min}^{\operatorname{out}})^2$, then we claim that $\operatorname{vol}_G^*(R)\geq \epsilonilon m/4$. Indeed, if $(d_{\min}^{\operatorname{out}})^2 \geq m/2$, then $\operatorname{vol}_G^*(R) \geq \operatorname{vol}_G^*(L) \geq \epsilonilon (d_{\min}^{\operatorname{out}})^2 \geq m/2$. Otherwise, $(d_{\min}^{\operatorname{out}})^2 \leq m/2.$ In this case, $\kappa^2 \leq (d_{\min}^{\operatorname{out}})^2 \leq m/2$. Therefore, $2\operatorname{vol}_G^*(R) \geq \operatorname{vol}_G^*(L)+\operatorname{vol}_G^*(R) = m - E(S,S) \geq m - |S|^2 = m - \kappa^2 \geq m/2$. In either case, the claim follows.
We show that the probability that two sample edges $e_1 \in E^*(L,S)$ and $e_2 \in E^*(S,R)$ is at least $\epsilonilon^2 d_{\min}^{\operatorname{out}}/(4m)$. First of all, the two events are independent. Recall that $\operatorname{vol}_G^*(L) \geq \epsilonilon (d_{\min}^{\operatorname{out}})^2$ and $\operatorname{vol}_G^*(R)\geq \epsilonilon m/4$. Therefore, $P(e_1 \in E^*(L,S), e_2 \in E^*(S,R)) = P(e_1 \in E^*(L,S)P(e_2 \in E^*(S,R)) = (\operatorname{vol}_G^*(L)/m)(\operatorname{vol}_G^*(R)/m) \geq \epsilonilon^2 (d_{\min}^{\operatorname{out}})^2/(4m)$.
Therefore, we sample for $\tilde{O}(m/(\epsilonilon^2 (d_{\min}^{\operatorname{out}} )^2)$ many times to get the event $e_1 \in E^*(L,S)$ and $e_2 \in E^*(S,R)$ w.h.p. Hence, we compute approximate $\kappa(x,y)$ correctly, and so our answer $\tilde \kappa$ is indeed an $(1+\epsilonilon)$-approximation.
\end{proof}
\end{comment}
\subsubsection{Vertex-Sampling without LocalVC}
We do not specify $k$ and LocalVC algorithm.
\begin{lemma} \label{lem:vertex_sampling_nolocalvc_correct}
\Cref{alg:vcframework} with vertex-sampling, but without $\operatorname{LocalVC }$ w.h.p. outputs a vertex-cut of size $\leq (1+\epsilonilon)\kappa_G$
\end{lemma}
\begin{proof}
Let $\tilde \kappa$ be the answer of our algorithm. By design, we have $\tilde \kappa \leq \min(d_{\min}^{\operatorname{out}}, d_{\min}^{\operatorname{in}})$. Also, $\tilde \kappa \geq \kappa$ since the answer corresponds to some vertex-cut. It remains to show $\tilde \kappa \leq (1+O(\epsilonilon))\kappa$.
Let $(L,S,R)$ be an optimal separation triple. We assume without loss
of generality that $|L| \leq |R|$. The other case is symmetric, where
we use $d_{\min}^{\operatorname{in}}$ instead.
We first show the inequality $|L| \geq d_{\min}^{\operatorname{out}} - \kappa$. Since $(L,S,R)$ is a separation triple where $|S| = \kappa$, the number of out-neighbors of a fixed vertex $x \in L$ that can be included in $S$ is at most $\kappa$. By definition of separation triple, neighbors of $x$ cannot be in $R$, and so the rest of the neighbors must be in $L$.
If $|L| \leq \epsilonilon d_{\min}^{\operatorname{out}}$, then $\kappa=|S|\ge d_{\min}^{\operatorname{out}}-\epsilonilon d_{\min}^{\operatorname{out}} \ge\tilde{\kappa}(1-\epsilonilon)\ge\kappa(1-\epsilonilon)$.
That is, $\tilde{\kappa}$ is indeed an $(1+O(\epsilonilon))$-approximation
of $\kappa$ in this case.
On the other hand, if $|L| \geq \epsilonilon d_{\min}^{\operatorname{out}}$, then we claim that $|R| \geq \epsilonilon n/4$. To see this, if $d_{\min}^{\operatorname{out}} \geq n/2$, then $|R| \geq |L| \geq \epsilonilon d_{\min}^{\operatorname{out}} \geq \epsilonilon n/2$. Otherwise, $d_{\min}^{\operatorname{out}} \leq n/2$. In this case, $\kappa \leq d_{\min}^{\operatorname{out}} \leq n/2$. Therefore, $2|R| \geq |L|+|R| = n - |S| = n - \kappa \geq n/2$. In either case, the claim follows.
We show that the probability that two sample vertices $x \in L$ and $y \in R$ is at least $\epsilonilon^2 d_{\min}^{\operatorname{out}}/(4n)$. First of all, the two events are independent. Recall that $|L| \geq \epsilonilon d_{\min}^{\operatorname{out}}$ and $|R| \geq \epsilonilon n/4$. Therefore, $P(x \in L, y \in R) = P(x \in L)P(y \in R) = (|L|/n)(|R|/n) \geq \epsilonilon^2 d_{\min}^{\operatorname{out}}/(4n)$.
Therefore, we sample for $\tilde{O}(n/(\epsilonilon^2d_{\min}^{\operatorname{out}} ))$ many times to get the event $x \in L$ and $y \in R$ w.h.p. Hence, we compute approximate $\kappa(x,y)$ correctly, and so our answer $\tilde \kappa$ is indeed an $(1+\epsilonilon)$-approximation.
\end{proof}
\subsection{Running Time}
Let $T_1(m,n, k,\epsilonilon)$ be the time for deciding if $\kappa(x,y) \leq (1+\epsilonilon)$, $T_2(\nu, k, \epsilonilon)$ be the
running time for approximate LocalVC, and $T_3(m,n,\epsilonilon)$ be the time for computing approximate $\kappa(x,y)$. If $G$ is undirected, we can replace $m$ with $nk$ with additional $O(m)$ preprocessing time. The running time for exact version is similar except that we do not have to pay $1/\epsilonilon$ factor for the first loop of \Cref{alg:vcframework}.
\subsubsection{Edge-Sampling with LocalVC}
\begin{lemma} \label{lem:edge_sampling_localvc_time}
\Cref{alg:vcframework} with edge-sampling, and
$\operatorname{LocalVC }$ terminates in time $$\tilde{O}( (m/(\epsilonilon a))(T_1(m,n,
k,\epsilonilon) + T_2(a, k, \epsilonilon) )).$$
\end{lemma}
\begin{proof}
The first term comes from the first loop of \Cref{alg:vcframework}. That is, we repeat $O(m/(a\epsilonilon))$ times for computing approximate $\kappa(x,y)$, and each iteration takes $T_1(m,n, k,\epsilonilon)$ time.
The second term comes from computing local vertex connectivity. For each $s \in \mathcal{L}\xspace$, we repeat the second loop for $O(m/s)$ times, each LocalVC subroutine takes $T_2(\nu, k, \epsilonilon)$ time where $\nu = s$. Therefore, the total time for the second loop is $\sum_{s \in \mathcal{L}\xspace}(m/s)T_2(s, k, \epsilonilon) = \tilde{O}( (m/a)T_2(a, k, \epsilonilon ))$.
\end{proof}
\subsubsection{Vertex-Sampling with LocalVC}
\begin{lemma} \label{lem:vertex_sampling_localvc_time}
\Cref{alg:vcframework} with vertex-sampling, and
$\operatorname{LocalVC }$ terminates in time $$\tilde{O}( (n/(\epsilonilon a))(T_1(m,n, k,\epsilonilon) + T_2(a^2+ak, k, \epsilonilon) )).$$
\end{lemma}
\begin{proof}
The first term comes from the first loop of \Cref{alg:vcframework}. That is, we repeat $O(n/(a\epsilonilon))$ times for computing approximate $\kappa(x,y)$, and each iteration takes $T_1(m,n, k,\epsilonilon)$ time.
The second term comes from computing local vertex connectivity. For each $s \in \mathcal{L}\xspace$, we repeat the second loop for $O(n/s)$ times, each LocalVC subroutine takes $T_2(\nu, k, \epsilonilon)$ time where $\nu = O(s(s+k))$. Therefore, the total time for the second loop is $\sum_{s \in \mathcal{L}\xspace}(n/s)T_2(s, k, \epsilonilon) = \tilde{O}( (n/a)T_2(a^2 +ak, k, \epsilonilon ))$.
\end{proof}
\begin{comment}
\subsubsection{Edge-Sampling without LocalVC}
\begin{lemma} \label{lem:edge_sampling_nolocalvc_time}
\Cref{alg:vcframework} with edge-sampling, but without
$\operatorname{LocalVC }$ terminates in time $$\tilde{O}( m/(\epsilonilon k^2) \min(km, n^2/\epsilonilon)).$$
\end{lemma}
\end{comment}
\subsubsection{Vertex-Sampling without LocalVC}
\begin{lemma} \label{lem:vertex_sampling_nolocalvc_time}
\Cref{alg:vcframework} with vertex-sampling, but without
$\operatorname{LocalVC }$ terminates in time $$\tilde{O}( n/(\epsilonilon^2 k )T_3(m,n,\epsilonilon)).$$
\end{lemma}
\begin{proof}
The running time follows from the first loop where we set $a$ such
that the number of sample is $n/(\epsilonilon^2 k )$, and computing approximate $\kappa(x,y)$ can be done in $T_3(m,n,\epsilonilon)$ time.
\end{proof}
\subsection{Proof of \Cref{thm:exact_vertex_connectivity,thm:approx_vertex_connectivity}}
For exact vertex connectivity, LocalVC runs in $\nu^{1.5}k$
time by \Cref{cor:exact-local-vertex-connectivity}. We can decide $\kappa(x,y) \leq k$ in $O(mk)$ time.
For undirected exact vertex connectivity where $k < O(\sqrt{n})$, we first sparsifiy the graph
in $O(m)$ time. Then, we use edge-sampling with LocalVC algorithm
where we set $a = m'^{2/3}$, where $m' = O(nk)$ is the number of edges of sparsified graph.
For directed exact vertex connectivity where $k < O(\sqrt{n})$, we use edge-sampling with
LocalVC algorithm where we set $a = m^{2/3}$ if $m < n^{3/2}$. If $m >
n^{3/2}$, we use vertex-sampling with LocalVC algorithm where we set
$a = m^{1/3}$.
For approximate vertex connectivity, approximate LocalVC runs in $
\operatorname{poly}(1/\epsilonilon) \nu^{1.5}/ \sqrt{k}$ by
\Cref{thm:local-vertex-connectivity}. Also, we can decide $\kappa(x,y)
\leq (1+O(\epsilonilon))k$ or cerify that $\kappa \geq k$ in time
\\$\tilde{O}(\operatorname{poly}(1/\epsilonilon) \min(mk,n^{2+o(1)}))$. The running time
$\operatorname{poly}(1/\epsilonilon) n^{2+o(1)} $ is due to \cite{ChuzhoyK19}.
For undirected approximate vertex connectivity, we first sparsify the
graph in $O(m)$ time. Let $m'$ be the number of edges of the
sparsified graph. For $k < n^{0.8}$, we use edge-sampling with approximate LocalVC
algorithm where we set $a = m^{\hat a}$, where $\hat a = \frac{\min(5\hat k +
2, \hat k +4)}{ 3\hat k + 3}$, and $\hat k = \log_nk$. For $k > n^{0.8}$, we use
vertex-sampling without LocalVC.
For directed approximate vertex connectivity, If $k \leq \sqrt{n}$ , we run edge-sampling with $a = m^{\hat a}, \hat a =
\min(2/3+\hat k, 1)$ where $\hat k = \log_m k$, or we run
vetex-sampling with $a = m^{1/3} k^{1/2}$. If $ \sqrt{n} < k \leq
n^{0.8}$, we run edge-sampling with $a = m^{\hat a}$ where $\hat a =
4\log_mn/3 + \log_mk/3$ or vertex-sampling with $a = n^{\hat a}$ where
$\hat a = (2/3+ (\log_nk)/6)$. Finally, if $k > n^{4/5}$, we use vertex-sampling without
LocalVC.
\section{$(1+\epsilonilon)$-Approximate Vertex Connectivity via Convex Embedding}
\begin{theorem}
There exists an algorithm that takes $G$ and $\epsilonilon > 0$, and in
$O(n^{\omega}/ \epsilonilon^2 + \min(\kappa_G, \sqrt{n})m )$ time outputs a
vertex-cut $U$ such that $|U| \leq (1+\epsilonilon)\kappa$.
\end{theorem}
\subsection{Preliminaries}
\begin{definition}[Pointset in $\mathbb{F}^k$]
Let $\mathbb{F}$ be any field. For $k \geq 0$, $\mathbb{F}^k$ is
$k$-dimensional linear space over $\mathbb{F}$. Denote $X = \{ x_1,
\ldots, x_n \}$ as a finite set of points in $\mathbb{F}^k$. The
\textit{affline hull} of $X$ is aff$(X) = \{ \sum_{i=1}^k c_ix_i
\text{ | } x_i \in X \text{ and } \sum_{i=1}^k c_i = 1\}$. The rank
of $X$ denoted as rank$(X)$ is one plus dimension of aff$(X)$. In particular, if
$\mathbb{F} = \mathbb{R}$, then we will consider the \textit{convex hull} of $X$, denoted as conv$(X)$.
\end{definition}
For any sets $V, W $, any funtion $f : V \rightarrow W$, and any subset $U \subseteq V$,
we denote $f(U) = \{ f(u) \text{ | } u \in U \}$.
\begin{definition}[Convex directed $X$-embedding]
For any $X \subset V$, a convex directed $X$-embedding of
a graph $G = (V,E) $ is a function $f : V \rightarrow \mathbb{R}^{|X|-1}$
such that for each $v \in V \setminus X$, $f(v) \in \text{conv}(f(N_G^{\text{out}}(v)))$.
\end{definition}
For efficiency point of view, we use the same method from \cite{LinialLW88,CheriyanR94}
that is based on convex-embedding over finite field $\mathbb{F}$. In
particular, they construct the directed $X$-embedding over the field of integers
modulo a prime $p$, $\mathbb{Z}_p$ by fixing a random prime number $p
\in [n^5, n^6]$, and choosing a random nonzero coefficient function
$c: E \rightarrow (\mathbb{Z}_p \setminus \{ 0 \})$ on edges. This
construction yields a function $f : V \rightarrow
(\mathbb{Z}_p^{|X|-1})$ called \textit{random modular directed
$X$-embedding}.
\begin{definition}
For $X,Y \subseteq V$, $p(X,Y)$ is the maximum number of vertex-disjoint paths
from $X$ to $Y$ where different paths have different end points.
\end{definition}
\begin{lemma} \label{lem:embedding}
For any non-empty subset $U \subseteq V \setminus X$, w.h.p. a random modular directed
$X$-embedding $f : V \rightarrow \mathbb{Z}_p^{|X|-1}$ satisfies
$\operatorname{rank}(f(U)) = p(U,X)$.
\end{lemma}
\begin{definition} [Fixed $k$-neighbors]
For $v \in V$, let $N_{G,k}^{\text{out}}(v)$ be a fixed, but arbitrarily selected
subset of $N_{G}^{\text{out}}(v)$ of size
$k$. Similarly, For $v \in V$, let $N_{G,k}^{\text{in}}(v)$ be a fixed, but arbitrarily selected
subset of $N_{G}^{\text{in}}(v) $ of size $k$.
\end{definition}
\begin{lemma} \label{lem:embeddingtime}Let $\omega$ be the exponent of the running time of the
optimal matrix multiplication algorithm. Note it is known that $\omega \leq 2.372$.
\begin{itemize}
\item For $y \in V$, a random modular directed
$N_{G,k}^{\operatorname{out}}(y)$-embedding $f$ can be constructed in $O(n^{\omega})$
time.
\item Given such $f$, for $U \subseteq V$ with $|U| = k$, $\operatorname{rank} (f(U))$
can be computed in $O(k^{\omega})$ time.
\end{itemize}
\end{lemma}
\begin{lemma} [\cite{Gabow06}] \label{lem:gabow-shore}
For any optimal out-vertex shore $S$ such that $|N_G^{\operatorname{out}}(S)| =
\kappa_G$, then $\kappa_G \geq d_{\text{min}}^{\operatorname{out}} - |S|$.
\end{lemma}
For any set $S,S'$, we denote $\min(S,S')$ as the set with smaller
cardinality.
\subsection{Algorithm}
\begin{algorithm}[H]
\KwIn{$G = (V,E)$, and $\epsilonilon > 0$}
\KwOut{A vertex-cut $U$ such that w.h.p. $|U| \leq (1+\epsilonilon)
\kappa_G$.}
\BlankLine
Let $k \leftarrow \max(d_{\text{min}}^{\operatorname{out}},
d_{\text{min}}^{\operatorname{in}})$. \;
Let $k' \leftarrow \min(d_{\text{min}}^{\operatorname{out}}, d_{\text{min}}^{\operatorname{in}})$.
\Repeat{ $\Theta (1/\epsilonilon)$ \normalfont{times}}
{
Sample two random vertices $x_2,y_1 \in V$. \;
Let $f$ be a random modular directed
$N_{G,k}^{\operatorname{in}}(y_1)$-embedding. \tcp*{$O(n^{\omega})$ time.}
Let $f^R$ be a random modular directed $N_{G^R,k}^{\operatorname{in}}(x_2)$-embedding. \;
\Repeat{ $\Theta (n/(\epsilonilon k'))$ \normalfont{times}}
{
Sample two random vertices $y_2,x_1 \in V$. \;
$\operatorname{rank} (x_1, y_1) \leftarrow \operatorname{rank} (f(N_{G,k}^{\operatorname{out}}(x_1)))$ \tcp*{$O(k^{\omega})$ time.}
$\operatorname{rank} (x_2, y_2) \leftarrow \operatorname{rank} (f^R(N_{G^R,k}^{\operatorname{out}}(y_2)))$ \;
}
}
Let $x^*,y^*$ be the pair of vertices with minimum $\operatorname{rank}(x,y)$ for all $x,y$ computed so far.\;
Let $W \leftarrow \min(\kappa_G(x^*,y^*), \kappa_{G^R}(x^*,y^*))$ \;
Let $v_{\text{min}}, u_{\text{min}} $ be the vertex with the minimum out-degree in $G$ and $G^R$ respectively. \;
\Return{ \normalfont{min}$(W,
|N_G^{\operatorname{out}}(v_{\text{min}})|,|N_{G^R}^{\operatorname{out}}(u_{\text{min}})|) $}
\tcp*{Vertex-cut with minimum cardinality.}
\caption{ApproxConvexEmbedding$(G, \epsilonilon)$}
\label{alg:approxconvexembedding}
\end{algorithm}
\subsection{Analysis}
\global\long\defd_{\min}^{\out}{d_{\min}^{\operatorname{out}}}
\global\long\defd_{\min}^{\textin}{d_{\min}^{\operatorname{in}}}
\begin{lemma}
\Cref{alg:approxconvexembedding} outputs w.h.p. a vertex-cut $U$
such that $|U| \leq (1+\epsilonilon)\kappa_G$.
\end{lemma}
\begin{proof}
Let $\tilde{\kappa}$ denote the answer of our algorithm. Clearly
$\tilde{\kappa}\led_{\min}^{\out}$ and $\tilde{\kappa}\led_{\min}^{\textin}$ by design.
Observe also that $\tilde{\kappa}\ge\kappa$ because the answer corresponds
to some vertex cut. Let $(A,S,B)$ be the optimal separation triple
where $A$ is a out-vertex shore and $|S|=\kappa$. W.l.o.g. we assume
that $|A|\le|B|$, another case is symmetric.
Suppose that $|A|\le\epsilonilond_{\min}^{\out}$. Then $\kappa=|S|\ged_{\min}^{\out}-\epsilonilond_{\min}^{\out}\ge\tilde{\kappa}(1-\epsilonilon)\ge\kappa(1-\epsilonilon)$.
That is, $\tilde{\kappa}$ is indeed an $(1+O(\epsilonilon))$-approximation
of $\kappa$ in this case.
Suppose now that $|A|\ge\epsilonilond_{\min}^{\out}$. We claim that $|B|\ge\epsilonilon n/4$.
Indeed, if $d_{\min}^{\out}\ge n/2$, then $|B|\ge\epsilonilon n/2$. Else if,
$d_{\min}^{\out}\le n/2$, then we know $|S|=\kappa\le n/2$. But $2|B|\ge|A|+|B|=n-|S|\ge n/2$.
In either case, $|B|\ge\epsilonilon n/4$.
Now, as $|B|\ge\epsilonilon n/4$ and we sample $\tilde{O}(1/\epsilonilon)$
many $y_{1}$. There is one sample $y_{1}\in B$ w.h.p. and now we
assume that $y_{1}\in B$. In the iteration when $y_{1}$ is sampled.
As $|A|\ge\epsilonilond_{\min}^{\out}$ and we sample at least $\tilde{O}(n/d_{\min}^{\out}\epsilonilon)$
many $x_{1}$. There is one sample $x_{1}\in A$ w.h.p.
By \Cref{lem:embedding}, w.h.p., $$\operatorname{rank}(x_1,y_1) = \operatorname{rank}
(f(N_{G,k}^{\operatorname{out}}(x_1))) = p(N_{G,k}^{\operatorname{out}}(x_1), N_{G,k}^{\operatorname{in}}(y_1)
) = \kappa(x_1,y_1) = \kappa.$$
So our answer $\tilde{\kappa} = \kappa$ in this case.
\end{proof}
\begin{lemma}
\Cref{alg:approxconvexembedding} terminates in $O(n^{\omega}/ \epsilonilon^2
+\min(\kappa_G, \sqrt{n})m )$ time.
\end{lemma}
\begin{proof}
By \Cref{lem:embeddingtime}, the construction time for a random modular directed
$N_{G,k}^{\operatorname{in}}(y_1)$-embedding is $O(n^{\omega})$. Given $y_1$,
we sample $x_1$ for $\Theta (n/(\epsilonilon k))$ rounds. Each round we can
compute $\kappa(x_1,y_1)$ by computing
$\operatorname{rank}(f(N_{G,k}^{\operatorname{out}}(x_1)))$ in $O( k^{\omega})$ time. Hence,
the total time is to find the best pair $(x,y)$ is $O(n^{\omega} + n k^{\omega-1} / \epsilonilon) =
O(n^{\omega}/\epsilonilon) $. Finally, we can compute $\kappa_G(x,y)$ to obtain the
vertex-cut for the best pair in $O( \min(\kappa_G, \sqrt{n})m)$
time. It takes linear time to compute $ N_G^{\operatorname{out}}(v_{\text{min}})$ and
$N_{G^R}^{\operatorname{out}}(u_{\text{min}})$. Hence, the result follows.
\end{proof}
\section{Open Problems}
\label{sec:open}
\begin{enumerate}[nolistsep,noitemsep]
\item Is there an $O(\nu k)$-time LocalVC algorithm?
\item Can we break the $O(n^3)$ time bound when $k = \Omega(n)$? This would still be hard to break even if we had an $O(\nu k)$-time LocalVC algorithm.
\item Is there an $o(n^2)$-time algorithm for vertex-weighted graphs when $m=O(n)$? Our LocalVC algorithm does not generalize to the weighted case.
\item Is there an $o(n^2)$-time algorithm for the single-source max-flow problem when $m=O(n)$?
\item Is there a near-linear-time $o(\log n)$-approximation algorithm?
\item How fast can we solve the vertex connectivity problem in the dynamic setting (under edge insertions and deletions) and the distributed setting (e.g. in the CONGEST model)?
\end{enumerate}
\fi
\end{document}
|
\begin{document}
\title{Resolving Knudsen Layer by High Order Moment Expansion}
\begin{abstract}
We model the Knudsen layer in Kramers' problem by linearized high
order hyperbolic moment system. Due to the hyperbolicity, the
boundary conditions of the moment system is properly reduced from
the kinetic boundary condition. For Kramers' problem, we give the
analytical solutions of moment systems. With the order increasing of
the moment model, the solutions are approaching to the solution of
the linearized BGK kinetic equation. The velocity profile in the
Knudsen layer is captured with improved accuracy for a wide range of
accommodation coefficients.
\vspace*{4mm}
\end{abstract}
\section{Introduction}
In the area of kinetic theory, Kramers' problem \cite{Kramers1949} is
generally considered as the most basic way to understand the
fundamental flow physics of the wall, which defining the Knudsen layer
\cite{Lilley2007}, without some of the additional complications in
other more realistic problems, such as flow in a plane channel
\cite{Garcia2009} or cylindrical tube \cite{Higuera1989,
Grucelski2013}. It is well known \cite{Karniadakis2002, Zhang2012}
that the classical Navier-Stokes-Fourier(NSF) equations with
appropriate boundary conditions can be used to describe the flow with
satisfactory accuracy when the gas is close to a statistical
equilibrium state. However, more accurate model is needed to depict
the nonequilibrium effects near the wall, where the continuum
assumption is essentially broken down and NSF equations themselves
become inappropriate \cite{Lilley2007, Dongari2009}. This is exactly
the case in Knudsen layers.
During the past decades, various methods have been developed to
investigate the Kramers' problem based on the Boltzmann equation. Highly
accurate results on the dependence of slip coefficient for the
unmodeled Boltzmann equation and general boundary condition have been
reported \cite{Loyalka1967, Loyalka1971, Klinc1972}. Variable
collision frequency models of the Boltzmann equation
\cite{Cercignani1969, Williams2001, Loyalka1967, Loyalka1975,
Loyalka1990, Siewert2001} are extensively discussed. We note that
the direct simulation Monte Carlo (DSMC) method \cite{Bird} is widely
used to solve the Boltzmann equation numerically. Unfortunately, DSMC
calculations impose prohibitive computational demands for many
applications of current interests. The intensive computational
demands of DSMC method have motivated recent interests in the
application of higher-order hydrodynamic models to simulate rarefied
flows \cite{Reese2003, Guo2006, Gu2010, Mizzi2007}. There are many
competing sets of higher-order constitutive relations, which are
derived from the fundamental Boltzmann equation using differing
approaches. The classical approaches are the Chapman-Enskog technique
and Grad's moment method. Among these alternative macroscopic modeling
and simulation strategies \cite{Grad, Levermore}, the moment method is
quite attractive due to its numerous advantages \cite{Muller,
Struchtrup2002, TorrilhonEditorial}. It is regarded as a useful
tool to extend classical fluid dynamics, and achieves highly accurate
approximations with great efficiency.
The moment method for gas kinetic theory \cite{Grad} has been applied
on wall-bounded geometries which supplemented by slip and jump
boundary conditions \cite{Marques2001}, while its application is
seriously limited due to the lack of hyperbolicity \cite{Muller,
Grad13toR13}. Particularly for the $3$D case, the moment system is
not hyperbolic in any neighborhood of the Maxwellian. Only recently
this fatal defect has been remedied \cite{Fan, Fan_new, ANRxx} that
globally hyperbolic models can be deduced. The global hyperbolicity of
the new models provides us the information propagation directions,
and thus a proper boundary condition of the moment model may be
proposed. This motivates us to study the Kramers' problem using the
new moment models.
Starting from the globally hyperbolic moment system (HME), we first
derive a linearized hyperbolic moment model to depict the Kramers'
problem. We found that the linearized model is even simpler than one's
expectation, since the equations for velocity are decoupled from
other equations in the system involving high order moments. The number
of equations in the decoupled part related with velocity is the same
as the moment expansion order only. Then we establish the boundary
conditions for the linearized moment model according to physical and
mathematical requirements for the system. Following Grad's approach in
\cite{Grad} for the kinetic accommodation model by Maxwell
\cite{Maxwell}, we propose the general boundary conditions for shear
flows. After that, by linearizing the velocity jump and high order
terms in the expression of the general boundary conditions, it is then
adapted to the boundary condition for the linearized model. This makes us
able to give the expression of velocity by solving the decoupled
system related with velocity together with the corresponding boundary
condition. It is extensively believed that the linearized system is
accurate enough for low-speed flows, which encourages us to apply the
solution of the velocity obtained to study Kramers' problem.
To obtain the full velocity profile and the velocity slip coefficient
in Kramers' problem, one may adopt a certain direct numerical method to
solve the linearized Boltzmann equation. However, the linearized moment
system can depict the velocity profile in the Knudsen layer with
analytical expressions. This can be used to provide a convenient
correction near the wall \cite{Lockerby2008} for the lower order
macroscopic system, such as NSF equations. In the moment method, the
Knudsen layer appears as superpositions of exponential layers
\cite{Struchtrup2008.1}. For the result we give based on HME, the
number of exponential layers is increasing. Comparing with the results
given by direct numerical simulation, our solutions illustrate a
significant improvement in accuracy than the results in references
when more and more high order moments are considered. Particularly,
our results can capture the velocity profile in the Knusen layer
accurately for a wide range of accommodation coefficients. We note that
our linearized model is of the same computational cost as the lower
order moment system.
This paper is organized as follows. In Section \ref{sec:hme} we
reviewed HME for Boltzmann equations and derived the linearized HME.
The boundary conditions for HME and linearized HME are established in
Section \ref{sec:bc}. The solutions of linearized equations are solved in
detail for Kramers' problem in Section \ref{sec:kramers}. With the
solutions of the velocity profile in Knudsen layer, some important
coefficients, such as defect velocity, are compared with the other
model of kinetic solution in the same section. We then draw some
conclusions to end this paper.
\section{Linearized HME for Boltzmann Equation}
\label{sec:hme}
\subsection{Boltzmann equation}
In gas kinetic theory, the motion of particles of gas can be depicted
by the Boltzmann equation \cite{Boltzmann}
\begin{equation}\label{eq:boltzmann}
\pd{f}{t} + \bxi\cdot\nabla_{\bx}f = Q(f,f),
\end{equation}
where $f(t,\bx,\bxi)$ is the number density distribution function
which depends on the time $t\in\bbR^+$, the spatial position
$\bx\in\bbR^3$ and the microscopic particle velocity $\bxi\in\bbR^3$,
and $Q(f,f)$ is the collision term. In this paper, we limit the
discussion on the BGK collision model \cite{BGK}, which reads:
\begin{equation}\label{eq:collision}
Q(f,f) = \frac{\rho\theta}{\mu}(\mathcal{M} - f) ,
\end{equation}
where $\mu$ is the viscosity and $\mathcal{M}$ is the local
thermodynamic equilibrium, usually called the local Maxwellian,
defined by
\begin{displaymath}
\mathcal{M}=\frac{\rho}{(2\pi\theta)^{3/2}}
\exp\left( -\frac{|\bxi-\bu|^2}{2\theta} \right).
\end{displaymath}
Here the density $\rho$, the macroscopic velocity $\bu$ and the
temperature $\theta$ are related to the distribution function as
\begin{equation}
\rho =\int_{\bbR^3}f\dd\bxi,\qquad
\rho\bu =\int_{\bbR^3}\bxi f\dd\bxi,\qquad
\rho|\bu|^2+3\rho\theta =\int_{\bbR^3}|\bxi|^2f\dd\bxi.
\end{equation}
Multiplying the Boltzmann equation by $(1,\bxi,|\bxi|^2)$ and
integrating both sides over $\bbR^3$ with respect to $\bxi$, we obtain
the conservation laws of mass, momentum and energy as
\begin{equation}\label{eq:conservationlaws}
\begin{aligned}
\odd{\rho}{t}&+\rho\sum_{d=1}^3\pd{u_d}{x_d}=0,\\
\rho\odd{u_i}{t}&+\sum_{d=1}^3\pd{p_{id}}{x_d}=0,\\
\frac{3}{2}\rho\odd{\theta}{t}&+\sum_{k,d=1}^3p_{kd}\pd{u_k}{x_d}+\sum_{d=1}^3\pd{q_d}{x_d}=0,
\end{aligned}
\end{equation}
where
$\odd{\cdot}{t} := \pd{\cdot}{t} + \displaystyle \sum_{d=1}^3
u_d \pd{\cdot}{x_d}$
is the material derivative, and the pressure tensor $p_{ij}$ and the
heat flux $q_i$ are defined by
\begin{equation}
p_{ij}=\int_{\bbR^3}(\xi_i-u_i)(\xi_j-u_j)f\dd\bxi,\quad
q_i=\frac{1}{2}\int_{\bbR^3}|\bxi-\bu|^2(\xi_i-u_i)f\dd\bxi, \quad
i,j=1,2,3.
\end{equation}
For convenience, we define the pressure $p$ and the stress tensor
$\sigma_{ij}$ by
\[
p=\sum_{d=1}^3\frac{p_{dd}}{3}=\rho\theta,\quad
\sigma_{ij}=p_{ij}-p\delta_{ij},\quad i,j=1,2,3.
\]
\subsection{HME and its linearization}
The moment method in kinetic theory is first proposed by Grad in 1949
\cite{Grad}. The primary idea is to expand the distribution function
around the Maxwellian into Hermite series
\begin{equation}\label{eq:expansion}
f(t,\bx,\bxi) =
\frac{\mathcal{M}}{\rho}\sum_{\alpha\in\bbN^3}f_{\alpha}(t,\bx)\He_{\alpha}^{[\bu,\theta]}(\bxi)=
\sum_{\alpha\in\bbN^3}f_{\alpha}(t,\bx)\mH_{\alpha}^{[\bu,\theta]}(\bxi),
\end{equation}
where $\alpha = (\alpha_1, \alpha_2, \alpha_3) \in \bbN^3$ is a 3D
multi-index, and $\He_{\alpha}^{[\bu,\theta]}(\bxi)$ are generalized
Hermite polynomials defined by
\begin{equation}\label{eq:hermite-poly}
\He_{\alpha}^{[\bu,\theta]}(\bxi) =
\frac{(-1)^{|\alpha|}}{\mathcal{M}}
\dfrac{\partial^{|\alpha|} \mathcal{M}}{\partial
\xi_1^{\alpha_1} \partial \xi_2^{\alpha_2} \partial
\xi_3^{\alpha_3}}, \qquad
|\alpha|=\sum_{d=1}^3\alpha_d,
\end{equation}
and $\mH_{\alpha}^{[\bu,\theta]}(\bxi)$ is the basis function defined
by
\begin{equation}\label{eq:basis-fun}
\mH_{\alpha}^{[\bu,\theta]}(\bxi) = \frac{\mathcal{M}}{\rho}
\He_{\alpha}^{[\bu,\theta]}(\bxi).
\end{equation}
Directly calculations yield, for
$i,j=1,2,3$,
\begin{equation}
\begin{aligned}
&f_{0}=\rho,\quad
f_{e_i}=0,\quad
\sum_{d=1}^3f_{2e_d}=0,\\
p_{ij}=p\delta_{ij} &+ (1+\delta_{ij})f_{e_i+e_j},\quad
q_i = 2 f_{3e_i}+\sum_{d=1}^3 f_{e_i+2e_d}.
\end{aligned}
\end{equation}
Substituting Grad's expansion \eqref{eq:expansion} into the
Boltzmann equation, and matching the coefficient of the basis function
$\mH^{[\bu,\theta]}_{\alpha}(\bxi)$, one can obtain the governing equations of
$\bu$, $\theta$ and $f_{\alpha}$, $\alpha\in\bbN^3$. However, the
resulting system contains infinite number of equations. Choosing a
positive integer $3\leq M\in\bbN$, and discarding all the equations
including $\pd{f_{\alpha}}{t}$, $|\alpha|>M$, and setting
$f_{\alpha}=0$, $|\alpha|>M$ to closure the residual system, we obtain
the $M$-th order Grad's moment system. Since
\[
Q(f,f) = - \frac{p}{\mu} \sum_{|\alpha| \geq 2} f_{\alpha}
\mH_{\alpha}^{[\bu,\theta]}(\bxi)
=-\frac{p}{\mu}\mathrm{H}(|\alpha|-2)f_{\alpha},
\]
where $\mathrm{H}(n)$ is the Heaviside step function
\[
\mathrm{H}(n) = \left\{ \begin{array}{ll}
0, & n<0,\\
1, & n\geq0,
\end{array} \right.
\]
the $M$-th order Grad's moment system can be written as
\begin{equation}\label{eq:arbit-system}
\begin{aligned}
\odd{f_{\alpha}}{t} &+ \sum_{d=1}^3 \left( \theta \pd{f_{\alpha-e_d}}{x_d} +
(1-\delta_{|\alpha|,M})(\alpha_d + 1)\pd{f_{\alpha+e_d}}{x_d} \right) \\
+ \sum_{k=1}^3 f_{\alpha-e_k} \odd{u_k}{t} &+ \sum_{k,d=1}^3 \pd{u_k}{x_d}
\left(\theta f_{\alpha-e_k-e_d} + (\alpha_d + 1) f_{\alpha-e_k+e_d}
\right) \\
+ \frac{1}{2} \sum_{k=1}^3 f_{\alpha-2e_k} \odd{\theta}{t} &+ \sum_{k,d=1}^3
\frac{1}{2} \pd{\theta}{x_d} \left(
\theta f_{\alpha-2e_k-e_d} + (\alpha_d + 1) f_{\alpha-2e_k+e_d}
\right)\\
&= -\frac{p}{\mu}f_{\alpha}\mathrm{H}(|\alpha|-2) , \quad |\alpha| \leq M,
\end{aligned}
\end{equation}
where $\delta_{i,j}$ is Kronecker delta. Here and hereafter we agree
that $(\cdot)_{\alpha}$ is taken as zero if any component of $\alpha$
is negative.
However, as is pointed in \cite{Muller,Grad13toR13}, Grad's moment
system lacks global hyperbolicity and is not hyperbolic even in any
neighborhood of local Maxwellian. The globally hyperbolic
regularization proposed in \cite{Fan,Fan_new} figures the drawback out
essentially, and results in globally Hyperbolic Moment Equations (HME)
as
\begin{equation}\label{eq:moment-system}
\begin{aligned}
\odd{f_{\alpha}}{t} &+ \sum_{d=1}^3 \left( \theta \pd{f_{\alpha-e_d}}{x_d} +
(1 - \delta_{|\alpha|,M})(\alpha_d + 1)\pd{f_{\alpha+e_d}}{x_d} \right) \\
+ \sum_{k=1}^3 f_{\alpha-e_k} \odd{u_k}{t} &+ \sum_{k,d=1}^3 \pd{u_k}{x_d}
\left(\theta f_{\alpha-e_k-e_d} + (1 - \delta_{|\alpha|,M})(\alpha_d + 1) f_{\alpha-e_k+e_d}
\right) \\
+ \frac{1}{2} \sum_{k=1}^3 f_{\alpha-2e_k} \odd{\theta}{t} &+ \sum_{k,d=1}^3
\frac{1}{2} \pd{\theta}{x_d} \left(
\theta f_{\alpha-2e_k-e_d} + (1 - \delta_{|\alpha|,M})(\alpha_d + 1) f_{\alpha-2e_k+e_d}
\right)\\
&= -\frac{p}{\mu}f_{\alpha}\mathrm{H}(|\alpha|-2) , \quad |\alpha| \leq M.
\end{aligned}
\end{equation}
Next, we try to derive the linearized system of
\eqref{eq:moment-system}. This requires us to examine the case that
the distribution function is in a small neighborhood of an equilibrium
state
\[
f_0(\bxi) = \frac{\rho_0}{(2\pi\theta_0)^{\frac{3}{2}}} \mathrm{exp}
\left( - \frac{|\bxi|^2}{2\theta_0}\right) ,
\]
given by $\rho_0, \theta_0, \bu = 0$. We introduce the dimensionless
variables $\bar{\rho}$, $\bar{\theta}$, $\bar{\bu}$, $\bar{p}$,
$\bar{p}_{ij}$ and $\bar{f}_{\alpha}$ as
\begin{equation}\label{eq:dimensionless}
\begin{aligned}
&\rho = \rho_0 (1 + \bar{\rho}),\quad \bu = \sqrt{\theta_0} \bar{\bu},
\quad \theta = \theta_0 (1 + \bar{\theta}),
\quad p = p_0 (1 + \bar{p}),\\
&p_{ij}=p_0(\delta_{ij}+\bar{p}_{ij}),
\quad f_{\alpha}=\rho_0\theta_0^{\frac{|\alpha|}{2}} \cdot \bar{f}_{\alpha},
\quad \bx = L\cdot
\bar{\bx},\quad t = \frac{L}{\sqrt{\theta_0}}\bar{t},
\end{aligned}
\end{equation}
where $L$ is a characteristic length, $\bar{\bx}$ and $\bar{t}$ are
the dimensionless coordinates and time, respectively. Assume all the
dimensionless variables $\bar{\rho}$, $\bar{\theta}$, $\bar{\bu}$,
$\bar{p}$, $\bar{p}_{ij}$ and $\bar{f}_{\alpha}$ are small quantities.
Substituting \eqref{eq:dimensionless} into the globally hyperbolic
moment system \eqref{eq:moment-system}, and discarding all the
high-order small quantities, and noticing that $u_d \pd{\cdot}{x_d}$
is high-order small quantity, $\odd{\cdot}{t}\approx\pd{\cdot}{t}$, we
obtain the linearized HME as
\begin{equation}\label{eq:linear-system}
\begin{aligned}
&\pd{\bar{\rho}}{\bar{t}} + \sum_{d=1}^3\pd{\bar{u}_d}{\bar{x}_d} = 0,\\
&\pd{\bar{u}_k}{\bar{t}} + \pd{\bar{p}}{\bar{x}_k} +
\sum_{d=1}^3\pd{\bar{\sigma}_{kd}}{\bar{x}_d} = 0,\\
&\pd{\bar{p}_{ij}}{\bar{t}} + \sum_{d=1}^3\delta_{ij}\pd{\bar{u}_d}{\bar{x}_d} +
\pd{\bar{u}_j}{\bar{x}_i} +
\pd{\bar{u}_i}{\bar{x}_j} + \sum_{d=1}^3(e_i + e_j + e_d)!
\pd{\bar{f}_{e_i + e_j + e_d}}{\bar{x}_d} =
-\frac{\bar{\sigma}_{ij}}{{\Kn}},\\
&\begin{split}
&\pd{\bar{f}_{\alpha}}{\bar{t}} + \sum_{d=1}^3\pd{\bar{f}_{\alpha - e_d}}{\bar{x}_d}
+ \sum_{d=1}^3(\alpha_d + 1)(1 - \delta_M)
\pd{\bar{f}_{\alpha + e_d}}{\bar{x}_d} \\
&\qquad\qquad\qquad\qquad+ \sum_{d=1}^3\frac{1}{2}\delta_{\alpha,e_d+2e_k}
\pd{\bar{\theta}}{\bar{x}_d}
=-\frac{\bar{f}_{\alpha}}{{\Kn}},\quad 3 \leq |\alpha|\leq M,
\end{split}
\end{aligned}
\end{equation}
where $\bar{\sigma}_{ij}=\bar{p}_{ij}-\bar{p}\delta_{ij}$,
$i,j=1,2,3$, and $\delta_{\alpha, e_d+2e_k}$ is $1$ iff
$\alpha=e_d+2e_k$, otherwise is $0$.
The Knudsen number $\Kn$ is defined by
\begin{displaymath}
\Kn = \frac{\lambda}{L},
\end{displaymath}
where $\lambda = \frac{\mu}{p_0}\sqrt{\theta_0}$ is the mean free
path.
\section{Boundary Condition}\label{sec:bc}
In this paper, we adopt Maxwell's accommodation boundary condition
\cite{Maxwell}, which is the most commonly used boundary condition in
gas kinetic theory. It is formulated as a linear combination of the
specular
reflection and the diffuse reflection. Wall boundary only requires the
incoming half of the distribution function when $\bxi \cdot \bn > 0$,
where $\bn$ is the unit normal vector pointing into the gas. With the
given velocity $\bu^W(t,\bx)$ and temperature $\theta^W(t,\bx)$ of the
wall, the boundary condition at the wall is
\begin{equation}\label{eq:Maxwell}
f^W(t, \bx, \bxi) =
\left \{
\begin{array}{ll}
\chi f^W_M(t, \bx, \bxi) +(1 - \chi)f(t, \bx, \bxi^{\ast}),
& \bC^W \cdot \bn > 0, \\
f(t, \bx, \bxi), & \bC^W \cdot \bn \leq 0,
\end{array}
\right.
\end{equation}
where
\begin{equation}\label{eq:equilibrium}
\begin{aligned}
\bxi^{\ast} = \bxi - 2(\bC^W \cdot \bn)\bn,
\quad \bC^W = \bxi - \bu^W(t,\bx) ,\\
f^W_M(t, \bx, \bxi) =
\frac{\rho^W(t,\bx)}{(2\pi\theta^W(t,\bx))^{3/2}}
\exp\left(-\frac{|\bxi -
\bu^W(t,\bx)|^2}{2\theta^W(t,\bx)}\right),
\end{aligned}
\end{equation}
and $\chi \in [0,1]$ is the accommodation coefficient.
A boundary condition for general hyperbolic moment system was proposed
in \cite{Li}, which is derived from the Maxwell boundary condition by
calculating the expression of the moments at the wall. Here we are
purposely considering only steady shear flow, thus we adopt an
alternative approach to derive our boundary conditions. Let the unit
normal vector of the wall $\bn = (0,1,0)^T$. The velocity of the wall
$\bu^W = (u^W, 0, 0)$, and velocity for steady shear flow is
$\bu = (u_1, 0, 0)$. For $\bxi^{\ast} = (\xi_1,-\xi_2,\xi_3)$,
\eqref{eq:Maxwell} is precisely as
\begin{equation}\label{eq:wall-function}
f^W(\bx,\bxi) = \left \{
\begin{array}{ll}
\chi f^W_M(\bx, \bxi) +(1 - \chi)f(\bx, \bxi^{\ast}),
& \xi_2 > 0, \\
f(\bx, \bxi), & \xi_2 \leq 0.
\end{array}
\right .
\end{equation}
Denote $\Omega = \{\bxi \in \bbR^3\}$,
$\Omega^+ = \{\xi_1 \in \bbR, \xi_2 \in \bbR^+, \xi_3 \in \bbR \}$ and
$\Omega^- = \{\xi_1 \in \bbR, \xi_2 \in \bbR^-, \xi_3 \in \bbR \}$.
The integral of the wall distribution function
\eqref{eq:wall-function} with any function $\psi(\bC)$ gives us an
equation
\begin{equation}\label{eq:integral-equation}
\begin{aligned}
\int_{\Omega}\psi(\bC) f^W(\bx, \bxi) & \dd\bxi =
\int_{\Omega^-} \psi(\bC) f(\bx, \bxi) \dd\bxi \\
&+\int_{\Omega^+}\psi(\bC)\left(\chi f_M^W(\bx, \bxi-\bu^W) + (1 -
\chi) f(\bx, \bxi^{\ast})\right) \dd\bxi,
\end{aligned}
\end{equation}
where $\bC = (\xi_1-u_1, \xi_2, \xi_3)$.
Definitely, for HME one has to restrict the form of function
$\psi(\bC)$, otherwise \eqref{eq:integral-equation} will produce too
many boundary conditions. It is clear that we should restrict
ourselves to those $\psi$'s that the moments in the equation can be
retrieved. Thus those $\psi$'s are polynomials as $\bC^{\beta}$,
$|\beta\leq M$, where $\beta = (\beta_1, \beta_2, \beta_3) \in
\bbN^3$ is a 3D multi-index. Moreover, the distribution function of
shear flow is an
even function in the $\xi_3$ direction, which leads to
$f_{\beta} = 0$, for $\beta_3$ is odd. Following Grad's theory
\cite{Grad} to limit the number of boundary condition in order to
ensure the continuity of boundary conditions when $\chi \to 0$, only a
subset of all the moments corresponding to
\begin{equation}\label{set:bbI}
\{\bC^{\beta} \big| \beta \in \bbI \}, \qquad \text{where}\qquad
\bbI = \{|\beta| \leq M~\big|~\beta_2~\text{is odd and}~\beta_3
~\text{is even}\}
\end{equation}
can be used to construct the wall boundary conditions. Then
we reformulate the equation \eqref{eq:integral-equation} as
\begin{equation}\label{eq:bc-xi2}
\int_{\Omega^+}\bC^{\beta}f^W_M(\bx, \bxi-\bu^W) \dd\bxi =
\frac{1}{\chi}\left( \int_{\Omega^+}\bC^{\beta}\left(f(\bx, \bxi) -
(1-\chi)f(\bx, \bxi^{\ast})\right)\dd\bxi\right),
\quad \beta\in\bbI.
\end{equation}
Notice that the basis function defined in \eqref{eq:basis-fun} is
decoupled in compoents of $\bxi$. We then substitute
\eqref{eq:expansion} into \eqref{eq:bc-xi2} to calculate the integral
on both left and right hand side in \eqref{eq:bc-xi2},
respectively. To give the results, we first make some simplification
and define the following notations. Let
\begin{equation}
J_0(u,\theta) = 1,\quad
J_1(u,\theta) = u,\quad
J_{k+1}(u,\theta)= u J_k(u,\theta)+k\theta J_{k-1},
k\geq 1,
\end{equation}
then
\[
\frac{1}{\sqrt{2\pi\theta^W}}\int_{-\infty}^{\infty}(\xi_1 - u_1)^k
\exp\left(-\frac{|\xi_1-u_1^W|^2}{2\theta^W}\right)\dd\xi_1
=J_k(u_1^W-u_1,\theta^W).
\]
Let
\[
K(k,m) := \int_{-\infty}^{\infty}\frac{1}{\sqrt{2\pi}}\exp\left(
-\frac{|\xi|^2}{2} \right)\xi^k \He_m(\xi)\dd\xi,
\]
where $\He_m(\xi)$ is $m$-th Hermite polynomial, then using the
orthogonal relation of the Hermite polynomials, one can find
$K(0,m) = \delta_{0,m}$.
Denote the half space integral by
\begin{equation}
S^\star (k,m) := \int_0^{\infty}\xi^k
\He_m(\xi)\exp\left(-\frac{\xi^2}{2}\right)\dd\xi,
\end{equation}
and we have the following properties for $S^\star (k,m)$.
\begin{itemize}
\item Recursion relation:
\begin{equation}\label{eq:rec}
S^{\star}(k, m) = (k - 1) S^{\star}(k-2, m) + m S^{\star}(k-1, m-1).
\end{equation}
\item The value of $S^\star (k,m)$ is:
\begin{enumerate}
\item If $m \leq k$:
\begin{enumerate}
\item If $k - m$ is even, $S^{\star}(k, m) = \sqrt{2 \pi}\cdot A$;
\item If $k - m$ is odd, $S^{\star}(k, m) = B$;
\end{enumerate}
here $A$ and $B$ are two algebraic numbers.
\item If $m > k$ and $k - m$ is even, $S^{\star}(k, m) = 0$.
\end{enumerate}
\end{itemize}
Let
\begin{equation}\label{eq:integralS}
\begin{aligned}
S(k,m) &:= \frac{\hat{\chi}}{\sqrt{2\pi}} S^\star(k,m) \\
&\ = \frac{\theta^{(m-k)/2}}{\chi}
\int_0^{\infty}\xi^k\left(\He_m(\xi)-
(1-\chi)\He_m(-\xi)\right)\exp\left( -\frac{|\xi|^2}{2} \right)\dd\xi,
\end{aligned}
\end{equation}
where
\[
\hat{\chi} = \left\{
\begin{array}{ll}
1, & m ~\text{is even},\\
\frac{2-\chi}{\chi}, & m ~\text{is odd},
\end{array} \right.
\]
then for each $\beta \in \bbI$ in \eqref{set:bbI}, the left and right
hand side of \eqref{eq:bc-xi2} can be represented by
\begin{equation}\label{eq:bc}
\begin{aligned}
\text{lhs of \eqref{eq:bc-xi2}}
&=\frac{\rho^W \left(\theta^W\right)^{(\beta_2+\beta_3)/2}}{\sqrt{2\pi}}
J_{\beta_1}\left(u_1^W-u_1,\theta^W\right)
(\beta_2-1)!!(\beta_3-1)!!,\\
\text{rhs of \eqref{eq:bc-xi2}} &=
\sum_{\alpha\in\bbN^3} \left(
K(\beta_1,\alpha_1)
S(\beta_2,\alpha_2)
K(\beta_3,\alpha_3)
\theta^{(\beta_2-\alpha_2)/2}
\right) f_{\alpha}.
\end{aligned}
\end{equation}
Noticing $K(0,m)=\delta_{0,m}$, by setting $\beta=e_2$ in
\eqref{eq:bc}, we have
\begin{equation}\label{eq:e2bc}
\rho^W\sqrt{\frac{\theta^W}{2\pi}} = \sum_{m=0}^{\infty}
S(1,m)\frac{f_{me_2}}{{\theta}^{(m-1)/2}}.
\end{equation}
Let $p_{w} = \rho^W\sqrt{\theta^W \theta}$, then we have
\begin{equation}
p_{w} =
\sqrt{2\pi\theta}\sum_{m=0}^{\infty}S(1,m)\frac{f_{me_2}}{\theta^{(m-1)/2}}
=p + f_{2e_2} - \frac{f_{4e_2}}{\theta} + \frac{3}{\theta^2}
f_{6e_2} - \frac{15}{\theta^3}f_{8e_2} + \cdots.
\end{equation}
The boundary condition for the case $\beta = e_1+\beta_2e_2 \in \bbI$
in \eqref{set:bbI} is
\begin{equation}\label{eq:beta2oddbc}
\frac{\rho^W}{\sqrt{2\pi}}(\theta^W)^{\frac{\beta_2}{2}}(u_1^W - u_1)
(\beta_2 -1)!! = \sum_{\alpha_2} S(\beta_2,
\alpha_2) f_{e_1+ \alpha_2e_2} \theta^{(\beta_2-\alpha_2)/2}.
\end{equation}
Particularly, for the case $\beta = e_1+e_2$, one has
\[
p_{w}\sqrt{\frac{\theta^W}{2\pi\theta}}(u_1^W - u_1) =
S(1,1) \sigma_{12} + \sum_{\alpha_2 > 1}
S(1,\alpha_2) f_{e_1+\alpha_2e_2}\theta^{(1-\alpha_2)/2}.
\]
Here we only consider the boundary condition for the specific case
that $\beta = e_1+\beta_2e_2 \in \bbI$ in \eqref{eq:beta2oddbc},
which is
\begin{equation}\label{eq:bc-palpha}
p_{w}
\frac{\left(\theta^W\right)^{\frac{\beta_2-1}{2}}}{\sqrt{2\pi}}(\beta_2 -1)!!
(u_1^W - u_1) =
\sum_{\alpha_2} \theta^{\frac{1+\beta_2-\alpha_2}{2}}
S(\beta_2, \alpha_2) f_{e_1+\alpha_2e_2}.
\end{equation}
We linearize this condition at $\theta_0$ as that in
\eqref{eq:dimensionless} for our purpose, and assume
$\theta^W-\theta_0$ is a small quantity. By substituting
\eqref{eq:dimensionless} into \eqref{eq:bc-palpha}, and applying the
closure of HME, i.e $f_{\alpha}=0$, $|\alpha|>M$, the linearized
boundary condition is arrived at as
\begin{equation}\label{bc:linear}
\frac{(\beta_2 -1)!!}{\sqrt{2\pi}}(\bar{u}_1^W - \bar{u}_1) = \sum_{\alpha_2\leq M}
S(\beta_2,\alpha_2) \bar{f}_{e_1+\alpha_2e_2},
\end{equation}
where $\bar{u}_1^W$ is defined as dimensionless variable $u_1^W =
\sqrt{\theta_0}\bar{u}_1^W$, and $\beta_2$ is odd and $|\beta_2|\leq
M$.
\section{Kramers' Problem}
\label{sec:kramers}
Our setup for Kramers' problem is standard. The gas flow in a
half-space over a flat wall is considered, and the coordinates are
chosen such that $x$ direction is parallel to the wall, and $y$
direction is perpendicular to the wall. The solid wall is fixed on
$\bar{y} = 0~(\bar{u}_1^W = 0)$. The temperature and density of the
gas far from the wall are constant. Gas velocity is $\bar{\bu} =
(\bar{u}_1, 0, 0)$ and all derivatives in equations
\eqref{eq:linear-system} in $x$ and $z$ direction are zero.
\subsection{Formal solution of linearized HME}
We give the formal solution of the linearized HME at first. The setup
of Kramers' problem makes the equations of linearized moment system
\eqref{eq:linear-system} related to velocity decoupled from the whole
linearized moment system, which enables us to investigate the velocity
by studying a small system as
\begin{equation}\label{eq:velocity}
\begin{aligned}
&\od{\bar{\sigma}_{12}}{\bar{y}} = 0, \\
&\od{\bar{u}_1}{\bar{y}} + 2\od{\bar{f}_{e_1+2e_2}}{\bar{y}}
= -\frac{1}{\Kn}\bar{\sigma}_{12}, \\
&\od{\bar{\sigma}_{12}}{\bar{y}} +
3\od{\bar{f}_{e_1+3e_2}}{\bar{y}} =
-\frac{1}{\Kn}\bar{f}_{e_1+2e_2}, \\
&\cdots \\
&\od{\bar{f}_{e_1+ (M-2)e_2}}{\bar{y}}
= -\frac{1}{\Kn}\bar{f}_{e_1+(M-1)e_2}.
\end{aligned}
\end{equation}
We collect the variables involved in \eqref{eq:velocity} into a vector
\[
V = \left(\bar{u}_1, \bar{\sigma}_{12}, \bar{f}_{e_1+2e_2},
\bar{f}_{e_1+3e_2},\cdots, \bar{f}_{e_1+(M-1)e_2}\right)^T,
\]
and then \eqref{eq:velocity} is formulated as
\begin{equation}\label{eq:simple-velocity}
\bM \od{V}{\bar{y}} = -\frac{1}{\Kn}\bQ V,
\end{equation}
where
\begin{equation}\label{eq:def_MQ}
\bM = \left(
\begin{array}{cccccc}
0 & 1 & & & & \\
1 & 0 & 2 & & & \\
& 1 & 0 & 3 & & \\
& & \ddots & \ddots & \ddots & \\
& & & 1 & 0 & M - 1\\
& & & & 1 & 0
\end{array}
\right), \quad \bQ = \left(
\begin{array}{cccc}
0 & & & \\
& 1 & & \\
& & \ddots & \\
& & & 1
\end{array}
\right).
\end{equation}
Easy to check that the matrix $\bM$ is real diagonalizable.
Actually, we have the eigen-decomposition of $\bM$
as $\bM = \bR \bLambda \bR^{-1}$, where $\bR$ is the Hermite transformation matrix
\begin{equation}\label{mat:eigen-vec}
\bR = (r_{ij})_{M\times M},\quad
r_{ij}=\frac{\He_{i-1}(\lambda_j)}{(i-1)!},
\quad i,j=1,\cdots,M,
\end{equation}
and $\bLambda = \diag\{\lambda_i; i = 1, \cdots, M\}$,
where the eigenvalues $\lambda_i$, $i=1,\cdots,M$ are zeros of the
$M$-th order Hermite polynomial $\He_M(x)$. We sort the eigenvalues
$\lambda_i$ in decending order, saying $\lambda_i >
\lambda_{i+1}$. The diagonal matrix $\bLambda$ can then be written as
\begin{equation}\label{mat:Lamb}
\bLambda = \left(
\begin{array}{cc}
\bLambda_+ & \\
& \bLambda_{\leq 0}
\end{array}
\right),
\end{equation}
\begin{equation}\label{mat:pos-neg}
\begin{aligned}
& \bLambda_+ = \diag \left\{\lambda_i;~ i =
1, \cdots, \lfloor \frac{M}{2} \rfloor \right\}, \\
& \bLambda_{\leq 0} = \diag \left\{\lambda_i;~ i =
\lfloor \frac{M}{2} \rfloor + 1, \cdots, M \right\}.
\end{aligned}
\end{equation}
The first equation of \eqref{eq:simple-velocity} indicates
$\bar{\sigma}_{12}$ is a constant and the second equation of
\eqref{eq:simple-velocity} gives that
\[
\bar{u}_1(\bar{y}) = -\dfrac{\bar{y}}{\Kn} \bar{\sigma}_{12} -
2\bar{f}_{e_1+2e_2}(\bar{y}) + c_0,
\]
where $c_0$ is a constant to be determined. We denote
\[
\hat{V} = (\bar{f}_{e_1+2e_2}, \bar{f}_{e_1+3e_2}, \cdots,
\bar{f}_{e_1+(M-1)e_2})^T,
\]
which is the remaining part of $V$ excluded the first two variables
$\bar{u}_1$ and $\bar{\sigma}_{12}$. Then the system with higher order
moments is separated from \eqref{eq:simple-velocity}, which reads as
\begin{equation}\label{eq:matrix-hatM}
\hat{\bM} \od{\hat{V}}{\bar{y}} = -\frac{1}{\Kn} \hat{V},
\end{equation}
where
\[
\hat{\bM} = \left(
\begin{array}{cccccc}
0 & 3 & & & & \\
1 & 0 & 4 & & & \\
& 1 & 0 & 5 & & \\
& & \ddots & \ddots & \ddots & \\
& & & 1 & 0 & M - 1\\
& & & & 1 & 0
\end{array}
\right).
\]
Correspondingly to the matrix $\bM$, the matrix $\hat{\bM}$ is real
diagonalizable, too. Precisely, let
\[
\hat{\He}_0(x) = 1,~\hat{\He}_1(x)=x,~
\hat{\He}_{k+1}(x)=x\hat{\He}_k(x)-(k+2)\hat{\He}_{k-1}(x),
~k\geq1,
\]
then the characteristic polynomial of $\hat{\bM}$ is
$\hat{\He}_{M-2}(\lambda)$. The recursion relation implies that
$\hat{\He}_k(x)$ has $k$ real and simple zeros, and thus $\hat{\bM}$
is real diagonalizable and the eigenvalues $\hat{\lambda}_i$,
$i = 1, \cdots, M-2$, are the zeros of $\hat{\He}_{M-2}
(\lambda)$.
Furthermore, if $\hat{\lambda}_i$ is an eigenvalue of $\hat{\bM}$,
then $-\hat{\lambda}_i$ has to be an eigenvalue of $\hat{\bM}$, since
$\hat{\He}_{M-2}(x)$ is an odd function if $M$ is odd and is an even
function if $M$ is even. As for the matrix $\bM$, we sort the
eigenvalues $\hat{\lambda}_i$ in decending order, too, to make the
diagonal matrix
\[
\hat{\bLambda} = \left(
\begin{array}{cc}
\hat{\bLambda}_+ & \\
& \hat{\bLambda}_{\leq 0}
\end{array}
\right),
\]
\[
\begin{aligned}
& \hat{\bLambda}_+ = \diag \left\{ \hat{\lambda}_i;~ i = 1, \cdots,
\lfloor \frac{M}{2} \rfloor - 1 \right\}, \\
& \hat{\bLambda}_{\leq 0} = \diag \left\{ \hat{\lambda}_i;~ i =
\lfloor \frac{M}{2} \rfloor, \cdots, M - 2 \right\}.
\end{aligned}
\]
Then eigen-decomposition of $\hat{\bM}$ is
$\hat{\bM} = \hat{\bR} \hat{\bLambda} \hat{\bR}^{-1}$, where
\begin{equation}\label{eq:def_hatbR}
\hat{\bR}=(\hat{r}_{ij})_{(M-2)\times(M-2)},\quad
\hat{r}_{ij}=\frac{\hat{\He}_{i-1}(\hat{\lambda}_j)}{(i+1)!},
\quad i,j=1,\cdots,M-2.
\end{equation}
Let us define the matrix $\hat{\bR}_+$ as the left
$\lfloor \dfrac{M}{2} \rfloor - 1$ colomns of $\hat{\bR}$,
$\hat{\bR}_-$ as the right $\lceil \dfrac{M}{2} \rceil - 1$ colomns of
$\hat{\bR}$ for latter usage, and precisely, we have
\[
\hat{\bR}_+ = \left(
\begin{array}{ccc}
\frac{\hat{\He}_0(\hat{\lambda}_1)}{2!} & \cdots
& \frac{\hat{\He}_0(\hat{\lambda}_{\lfloor \frac{M}{2} \rfloor - 1})}{2!} \\
\vdots & \ddots & \vdots \\
\frac{\hat{\He}_{M-3}(\hat{\lambda}_1)}{(M-1)!} & \cdots
& \frac{\hat{\He}_{M-3}(\hat{\lambda}_{\lfloor \frac{M}{2} \rfloor - 1})}{(M-1)!}
\end{array}\right), \quad
\hat{\bR}_- = \left(
\begin{array}{ccc}
\frac{\hat{\He}_0(\hat{\lambda}_{\lfloor \frac{M}{2} \rfloor})}{2!} & \cdots
& \frac{\hat{\He}_0(\hat{\lambda}_{M-2})}{2!} \\
\vdots & \ddots & \vdots \\
\frac{\hat{\He}_{M-3}(\hat{\lambda}_{\lfloor \frac{M}{2} \rfloor})}{(M-1)!} & \cdots
& \frac{\hat{\He}_{M-3}(\hat{\lambda}_{M-2})}{(M-1)!}
\end{array}\right).
\]
Let $\hat{\bR}_{+,\text{even}}$, which is made with the even rows of
$\hat{\bR}_+$ as
\[
\begin{aligned}
\hat{\bR}_{+,\text{even}} &\triangleq
(\hat{r}_{ij}), \text{~where~} i \text{~is
even,~} j = 1,\cdots,\lfloor \frac{M}{2} \rfloor - 1, \\
&=\left(
\begin{array}{cccc}
\dfrac{\hat{\He}_1(\hat{\lambda}_1)}{3!}
& \dfrac{\hat{\He}_1(\hat{\lambda}_2)}{3!}
& \cdots
& \dfrac{\hat{\He}_1(\hat{\lambda}_{\lfloor \frac{M}{2} \rfloor - 1})}{3!} \\
\dfrac{\hat{\He}_3(\hat{\lambda}_1)}{5!}
& \dfrac{\hat{\He}_3(\hat{\lambda}_2)}{5!}
& \cdots
& \dfrac{\hat{\He}_3(\hat{\lambda}_{\lfloor \frac{M}{2} \rfloor - 1})}{5!} \\
\vdots & \vdots & \cdots & \vdots \\
\end{array}
\right),
\end{aligned}
\]
be a $\lfloor \frac{M}{2} \rfloor - 1 \times \lfloor \frac{M}{2}
\rfloor - 1$ square matrix. And corresponding to
$\hat{\bR}_{+,\text{even}}$, define $\hat{\bR}_{+,\text{odd}},
\hat{\bR}_{-,\text{odd}}, \hat{\bR}_{-,\text{odd}}$ as
\[
\begin{aligned}
\hat{\bR}_{+,\text{odd}} &\triangleq
(\hat{r}_{ij}), \text{~where~} i \text{~is
odd,~} j = 1,\cdots,\lfloor \frac{M}{2} \rfloor - 1, \\
\hat{\bR}_{-,\text{even}} &\triangleq
(\hat{r}_{ij}), \text{~where~} i \text{~is
even,~} j = \lfloor \frac{M}{2} \rfloor,\cdots,M-2, \\
\hat{\bR}_{-,\text{odd}} &\triangleq
(\hat{r}_{ij}), \text{~where~} i \text{~is
odd,~} j = \lfloor \frac{M}{2} \rfloor,\cdots,M-2.
\end{aligned}
\]
We declare that
\begin{lemma}\label{lem:R_peven_invertible}
$\hat{\bR}_{+,\text{even}}$ is invertible.
\end{lemma}
\begin{proof}
Let $\boldsymbol{P}_\sigma$ to be the permutation matrix of the
permutation
\begin{equation}\label{mat:P_sigma}
\sigma: \left\{ 1, 2, \cdots , M-2 \right\} \rightarrow \left\{ 1,
2, \cdots , M-2 \right\},
\end{equation}
that
\[
\sigma(i) = \mod(i,2) \times (\lfloor \frac{M}{2} \rfloor - 1) +
\lfloor i/2 \rfloor.
\]
The permutation maps the list of numbers $1, 2, \cdots, M -2 $ to
\[
2, 4, 6, \cdots, 1, 3, 5, \cdots
\]
that the even numbers are ahead of the odd numbers.
Then matrix $\hat{\bR}$ is re-organized by the
permutation matrix as
\[
\boldsymbol{P}^{-1}_\sigma \hat{\bR} = \left(
\begin{array}{c|c}
\hat{\bR}_{+,\text{even}} & \hat{\bR}_{-,\text{even}}\\
\hat{\bR}_{+,\text{odd}} & \hat{\bR}_{-,\text{odd}}
\end{array}
\right).
\]
Notice that each eigenvalue $\hat{\lambda}_i \in \hat{\bLambda}_+$,
$-\hat{\lambda}_i \in \hat{\bLambda}_{\leq 0}$. Then for any
eigenvector
\[
\hat{\br}_i = (\hat{\br}_{i,\text{even}} |
\hat{\br}_{i,\text{odd}})^T \in (\hat{\bR}_{+,\text{even}} |
\hat{\bR}_{+,\text{odd}})^T,
\]
there exists a column vector
\[
\hat{\br}_j = (-\hat{\br}_{i,\text{even}} |
\hat{\br}_{i,\text{odd}})^T \in (\hat{\bR}_{-,\text{even}} |
\hat{\bR}_{-,\text{odd}})^T.
\]
Then
\[
\hat{\br}_i - \hat{\br}_j = 2(\hat{\br}_{i,\text{even}} |
\boldsymbol{0})^T.
\]
The set of vectors $\hat{\br}_i - \hat{\br}_j$ are linearly
independent since $\hat{\br}_i, \hat{\br}_j$ are eigenvectors of
$\hat{\bR}$, then
columns of $\hat{\bR}_{+,\text{even}}$ are linearly independent.
Thus $\hat{\bR}_{+,\text{even}}$ is invertible.
\end{proof}
\subsubsection{Illustrative examples: $M \leq 5$}
We examine the cases for small $M$ to find out the formal solution for
generic $M$. The simplest system is the case for $M = 3$. The
variables are $V = (\bar{u}_1, \bar{\sigma}_{12},
\bar{f}_{e_1+2e_2})^T$, and matrices $\bM$ and $\bQ$ in
\eqref{eq:simple-velocity} are
\[
\bM =\left(
\begin{array}{ccc}
0 & 1 & 0 \\
1 & 0 & 2\\
0 & 1 & 0
\end{array} \right),
\quad
\bQ = \left(
\begin{array}{ccc}
0 & & \\
& 1 & \\
& & 1
\end{array} \right).
\]
Since $\bar{\sigma}_{12}$ is constant and the velocity is
\[
\bar{u}_1 = -\bar{\sigma}_{12}\frac{\bar{y}}{\Kn} + c_0,
\]
it is clear that the solution of $\bar{u}_1$ is not able to capture
the boundary layer since here $\bar{u}_1$ is a linear function of
$\bar{y}$. To capture the boundary layer of velocity, we need more
moments thus we turn to the case $M = 4$. For $M = 4$, the equations
\eqref{eq:velocity} are
\begin{equation}\label{eq:vel-M4}
\begin{aligned}
&\od{\bar{\sigma}_{12}}{\bar{y}} = 0, \\
&\od{\bar{u}_1}{\bar{y}} + 2\od{\bar{f}_{e_1+2e_2}}{\bar{y}}
= -\frac{1}{\Kn}\bar{\sigma}_{12}, \\
&\od{\bar{\sigma}_{12}}{\bar{y}} +
3\od{\bar{f}_{e_1+3e_2}}{\bar{y}} = -\frac{1}{\Kn}\bar{f}_{e_1+2e_2}, \\
&\od{\bar{f}_{e_1+2e_2}}{\bar{y}} = -\frac{1}{\Kn}\bar{f}_{e_1+3e_2}. \\
\end{aligned}
\end{equation}
The solution gives us the expression of velocity as
\begin{equation} \label{eq:velo-ori}
\bar{u}_1 = - \bar{\sigma}_{12}\frac{\bar{y}}{\Kn} - 2 \bar{f}_{e_1+2e_2} + c_0
\end{equation}
from the second equation in \eqref{eq:vel-M4}. Here we need to solve the equations
\eqref{eq:matrix-hatM} for $\hat{V} = (\bar{f}_{e_1+2e_2}, \bar{f}_{e_1+3e_2})^T$,
where
\begin{equation}\label{mat:tridiagonal}
\hat{\bM} = \left(
\begin{array}{cc}
0 & 3\\
1 & 0
\end{array}
\right ).
\end{equation}
The matrix $\hat{\bM}$ can be decomposited as
$\hat{\bM} = \hat{\bR} \hat{\bLambda} \hat{\bR}^{-1}$,
\[
\hat{\bLambda} = \left(
\begin{array}{cc}
\sqrt{3} & \\
& -\sqrt{3}
\end{array} \right), \qquad
\hat{\bR} = \left(
\begin{array}{cc}
1 & 1 \\
\frac{1}{\sqrt{3}} & -\frac{1}{\sqrt{3}}
\end{array} \right).
\]
Hence, the solution of system \eqref{eq:matrix-hatM} is
\begin{equation*}
\hat{V} = \hat{\bR} \exp \left(-\frac{\bar{y}}{\Kn} \hat{\bLambda}^{-1} \right)
\hat{\bR}^{-1} \hat{V}^{(0)}.
\end{equation*}
By setting
$\hat{\bc}= (\hat{c}_1, \hat{c}_2)^T = \hat{\bR}^{-1}\hat{V}^{(0)}$,
the equations above result in
\begin{equation*}
\hat{V}=\left(\begin{array}{l}
\bar{f}_{e_1+2e_2}\\
\bar{f}_{e_1+3e_2}
\end{array}\right)
=\hat{\bR} \exp \left(-\frac{\bar{y}}{\Kn} \hat{\bLambda}^{-1}
\right)\hat{\bc}
=
\left( \begin{array}{l}
\hat{c}_1 \exp(-\frac{\bar{y}}{\sqrt{3}\Kn}) +
\hat{c}_2\exp(\frac{\bar{y}}{\sqrt{3}\Kn})\\
\frac{\sqrt{3}}{3} \hat{c}_1
\exp(-\frac{\bar{y}}{\sqrt{3}\Kn}) - \frac{\sqrt{3}}{3} \hat{c}_2
\exp(\frac{\bar{y}}{\sqrt{3}\Kn})
\end{array} \right).
\end{equation*}
The exponential terms provide us the boundary layer. Since all the
variables have to remain finite as $\bar{y} \to \infty$, the term
$\exp(\frac{1}{\sqrt{3}\Kn}\bar{y})$ has to be dropped. Therefore,
\[
\left(\begin{array}{l}
\bar{f}_{e_1+2e_2}\\
\bar{f}_{e_1+3e_2}
\end{array}\right)
=\hat{\bR} \left(
\begin{array}{cc}
\exp \left(-\frac{\bar{y}}{\Kn} \hat{\bLambda}_+^{-1} \right)
& \\ & \bzero
\end{array}
\right) \hat{\bc}
= \hat{c}_1 \exp(-\frac{\bar{y}}{\sqrt{3}\Kn}) \left(
\begin{array}{c}
1 \\
\frac{\sqrt{3}}{3}
\end{array}
\right).
\]
Here $\hat{\bLambda}_{+}$ is a $1 \times 1$ matrix with its entry as
$\sqrt{3}$. Applying the linearized boundary condition
\eqref{bc:linear}, i.e.
\begin{equation*}
\begin{aligned}
&- \frac{1}{\sqrt{2\pi}} \bar{u}_1 = S(1,1) \bar{\sigma}_{12} + S(1,2)
\bar{f}_{e_1+2e_2} + S(1,3)\bar{f}_{e_1+3e_2}, \\
&- \frac{2}{\sqrt{2\pi}} \bar{u}_1 = S(3,1) \bar{\sigma}_{12} + S(3,2)
\bar{f}_{e_1+2e_2} + S(3,3)\bar{f}_{e_1+3e_2},
\end{aligned}
\end{equation*}
we can obtain
\begin{equation*}
\hat{c}_1 = -\frac{\sqrt{\pi}(\chi - 2)}{2(\sqrt{3\pi}(2 -
\chi) + 2\sqrt{2}\chi)} \bar{\sigma}_{12}, \quad c_0 =
\sqrt{\frac{\pi}{2}} \frac{\chi - 2}{\chi}\left( 1 +
\frac{\sqrt{2}\chi}{4\sqrt{2}\chi + 2\sqrt{3\pi}(2 - \chi)}\right)
\bar{\sigma}_{12}.
\end{equation*}
Then the solution of velocity is
\begin{equation*}
\bar{u}_1 = - \bar{\sigma}_{12}\frac{\bar{y}}{\Kn}
-2\hat{c}_1\exp(-\frac{\bar{y}}{\sqrt{3}\Kn}) + c_0.
\end{equation*}
Similar procedure can be carried out for greater $M$. For example, if
we set $M = 5$, then
$V = (\bar{u}_1, \bar{\sigma}_{12}, \bar{f}_{e_1+2e_2},
\bar{f}_{e_1+3e_2}, \bar{f}_{e_1+4e_2})^T$
and $\hat{V} = (\bar{f}_{e_1+2e_2}, \bar{f}_{e_1+3e_2}, \bar{f}_{e_1+4e_2})^T$. The
matrix $\hat{\bM} = \hat{\bR} \hat{\bLambda}\hat{\bR}^{-1}$ in
\eqref{eq:matrix-hatM} is
\begin{equation*}
\hat{\bM} = \left(
\begin{array}{ccc}
0 & 3 & 0 \\
1 & 0 & 4 \\
0 & 1 & 0
\end{array}
\right ) \text{ with }
\hat{\bLambda} = \left(
\begin{array}{ccc}
\sqrt{7} & & \\
& 0 & \\
& & -\sqrt{7}
\end{array} \right),
~~
\hat{\bR} = \left(
\begin{array}{ccc}
1 & 1 & 1 \\
\sqrt{7}/3 & 0 & -\sqrt{7}/3 \\
1/3 & -1/4 & 1/3
\end{array} \right).
\end{equation*}
Notice that $M = 5$ is odd, so zero is a simple eigenvalue of
$\hat{\bM}$ . This vanished eigenvalue provides a constant factor in
the exponential terms in the boundary layer, while the eigenvalue
$\sqrt{7}$ of matrix $\hat{\bM}$ provides the only stable term which
survives in the solution. The solution of \eqref{eq:matrix-hatM} is
\begin{equation}
\hat{V}=
\left(\begin{array}{l}
\bar{f}_{e_1+2e_2}\\
\bar{f}_{e_1+3e_2}\\
\bar{f}_{e_1+4e_2}\\
\end{array}\right)
=\hat{\bR} \left(
\begin{array}{cc}
\exp \left(-\frac{\bar{y}}{\Kn} \hat{\bLambda}_+^{-1} \right)
& \\ & \bzero
\end{array}
\right) \hat{\bc}
= \hat{c}_1 \exp(-\frac{\bar{y}}{\sqrt{7}\Kn})
\left( \begin{array}{c}
1 \\
\frac{\sqrt{7}}{3} \\
\frac{1}{3}
\end{array} \right),
\end{equation}
where
$\hat{\bc}=(\hat{c}_1, \hat{c}_2, \hat{c}_3)^T =
\hat{\bR}^{-1}\hat{V}^{(0)}$
and the entry of the $1 \times 1$ matrix $\hat{\bLambda}_+$ is
$\sqrt{7}$. Similar as the case $M = 4$, there are $2$ coefficients
$c_0$ and $\hat{c}_1$ to be determined. To fix the coefficients, we
utilize two boundary conditions by setting $\beta_2 = 1,3$ in
\eqref{bc:linear}
\begin{equation*}
\begin{aligned}
&- \frac{1}{\sqrt{2\pi}} \bar{u}_1 = S(1,1) \bar{\sigma}_{12} + S(1,2)
\bar{f}_{e_1+2e_2} + S(1,3)\bar{f}_{e_1+3e_2} + S(1,4)\bar{f}_{e_1+4e_2} , \\
&- \frac{2}{\sqrt{2\pi}} \bar{u}_1 = S(3,1) \bar{\sigma}_{12} + S(3,2)
\bar{f}_{e_1+2e_2} + S(3,3)\bar{f}_{e_1+3e_2} + S(3,4)\bar{f}_{e_1+4e_2}.
\end{aligned}
\end{equation*}
Direct calculations yield
\begin{equation*}
\hat{c}_1 = -\frac{3\sqrt{\pi}(\chi - 2)}{2(3\sqrt{7\pi}(\chi -
2) - 10\sqrt{2}\chi)} \bar{\sigma}_{12}, \quad c_0 = \sqrt{\frac{\pi}{2}}
\frac{\chi - 2}{\chi}\left(1 -
\frac{2\sqrt{2}\chi}{3\sqrt{7\pi}(\chi - 2) - 10\sqrt{2}\chi}\right)
\bar{\sigma}_{12}.
\end{equation*}
Then the solution of velocity is given by
\begin{equation*}
\bar{u}_1 = -\bar{\sigma}_{12}\frac{\bar{y}}{\Kn} - 2\hat{c}_1
\exp\left(-\frac{\bar{y}}{\sqrt{7}\Kn}\right) + c_0.
\end{equation*}
\subsubsection{General case: arbitrary $M$}
Now we are ready to present the formal solution for arbitrary
$M$. Following the examples above, we have to drop those unbounded
factors to attain a stable solution that only the terms contributed
from the positive eigenvalues of $\hat{\bM}$ are kept. Thus the stable
solution of \eqref{eq:matrix-hatM} is
\begin{equation}\label{sol:general}
\hat{V}(\bar{y}) = \hat{\bR} \left(
\begin{array}{cc}
\exp \left(-\frac{\bar{y}}{\Kn} \hat{\bLambda}_{+}^{-1}\right)
& \\ & \bzero
\end{array}
\right)
\hat{\bc},
\end{equation}
where
$\hat{\bc}=(\hat{c}_1, \cdots, \hat{c}_{M-2})^T = \hat{\bR}^{-1}
\hat{V}^{(0)}$.
Clearly, there are only the beginning $\lfloor \dfrac{M}{2} \rfloor - 1$
entries in $\hat{\bc}$ appears in $\hat{V}(\bar{y})$. With the expression of
$\bar{f}_{e_1+2e_2}(\bar{y})$ provided as the first entry of
$\hat{V}(\bar{y})$, the velocity is again given by the second equation
in \eqref{eq:velocity} as
\begin{equation}\label{sol:velocity}
\bar{u}_1(\bar{y}) = - \bar{\sigma}_{12}\frac{\bar{y}}{\Kn} - 2
\be_1^T\hat{V}(\bar{y}) + c_0,
\end{equation}
where $\be_1 = (1, 0, \cdots, 0)^T$. Since $c_0$ in the expression of
$\bar{u}_1(\bar{y})$ is also to be determined, there are in total
$\lfloor \dfrac{M}{2} \rfloor$ indeterminate coefficients in $V(\bar{y})$.
Combining \eqref{sol:general} and \eqref{sol:velocity} with linearized
boundary condition \eqref{bc:linear}, we can obtain the boundary
condition for \eqref{eq:matrix-hatM} as
\begin{equation}\label{eq:simple-bc}
-\frac{(\beta_2 -1)!!}{\sqrt{2\pi}} \bar{u}_1 = S(\beta_2,1)\bar{\sigma}_{12} +
\sum_{\alpha_2=2}^{M-1} S(\beta_2,\alpha_2) \bar{f}_{e_1+\alpha_2e_2},
\quad \beta_2=1,3,\cdots,2\lfloor\frac{M}{2}\rfloor-1.
\end{equation}
The total number of boundary condition is $\lfloor\frac{M}{2}\rfloor$,
which may fix all coefficients in the solution of $V(\bar{y})$. Once these
coefficients are fixed by the boundary conditions, we eventually
attain $\bar{u}_1$ formated as
\begin{equation}\label{eq:sol_u1_formal}
\begin{aligned}
\bar{u}_1(\bar{y}) &= - \bar{\sigma}_{12}\frac{\bar{y}}{\Kn} - 2
\be_1^T\hat{V}(\bar{y}) + c_0 \\
&= - \bar{\sigma}_{12}\frac{\bar{y}}{\Kn} - 2 \be_1^T \hat{\bR} \left(
\begin{array}{cc}
\exp \left(-\frac{\bar{y}}{\Kn} \hat{\bLambda}_{+}^{-1}\right)
& \\ & \bzero
\end{array}
\right)
\hat{\bc} + c_0\\
&= - \bar{\sigma}_{12}\frac{\bar{y}}{\Kn} - 2\sum_{i = 1}^{\lfloor
\frac{M-2}{2} \rfloor}\hat{c_i}
\exp\left(-\frac{\bar{y}}{\Kn\hat{\lambda}_i}\right) +c_0.
\end{aligned}
\end{equation}
We let $\bar{y} = 0$ in \eqref{eq:sol_u1_formal} to have
$\bar{u}_1 = -2 \sum_{i = 1}^{\lfloor \frac{M}{2} \rfloor -1}\hat{c_i} +
c_0$
and substitute it into \eqref{eq:simple-bc} to obtain the following
linear system
\begin{equation}\label{sol:linear}
\begin{aligned}
& \quad \qquad - \frac{-2 \displaystyle \sum_{i = 1}^{\lfloor
\frac{M}{2} \rfloor-1}\hat{c_i} + c_0}{\sqrt{2\pi}}\left(
\begin{array}{c}
1 \\
(3 - 1)!!\\
\vdots\\
(2\lfloor \frac{M}{2} \rfloor - 2)!!
\end{array}\right) = \bar{\sigma}_{12} \left(
\begin{array}{c}
S(1,1) \\ (S(3,1) \\ \vdots \\
S(2\lfloor \frac{M}{2} \rfloor -1,1)
\end{array}
\right) \\
& + \left( \begin{array}{cccc}
S(1,2) & S(1,3) &\cdots & S(1,M-1) \\
S(3,2) & S(3,3) &\cdots & S(3,M-1) \\
\vdots & \vdots &\ddots & \vdots \\
S(2\lfloor \frac{M}{2} \rfloor -1,2)
& S(2\lfloor \frac{M}{2} \rfloor -1,3)
& \cdots & S(2\lfloor \frac{M}{2} \rfloor -1,M-1)
\end{array} \right)
\hat{\bR} \left(
\begin{array}{c}
\hat{c}_1 \\ \vdots \\ \hat{c}_{\lfloor \frac{M}{2} \rfloor - 1}
\\ 0 \\ \vdots \\ 0
\end{array}
\right)
\end{aligned}
\end{equation}
Clearly, this is a linear system of for
$\bc = (c_0, \hat{c}_1, \cdots, \hat{c}_{\lfloor \frac{M}{2}
\rfloor-1})^T$. Precisely, we let
\[
\bh = \frac{1}{\sqrt{2\pi}} \left(
\begin{array}{c}
1 \\
(3 - 1)!!\\
\vdots\\
(2\lfloor \frac{M}{2} \rfloor - 2)!!
\end{array}\right),
\qquad \bb = \left(
\begin{array}{c}
S(1,1)\\
S(3,1)\\
\vdots\\
S(2\lfloor \frac{M}{2} \rfloor-1,1)
\end{array} \right),
\]
\[
\bS = \left(
\begin{array}{cccc}
S(1,2) & S(1,3) & \cdots & S(1,M-1) \\
S(3,2) & S(3,3) & \cdots & S(3,M-1) \\
\vdots & \vdots & \ddots & \vdots \\
S(2\lfloor \frac{M}{2} \rfloor-1 ,2)
& S(2\lfloor \frac{M}{2} \rfloor-1, 3)
& \cdots & S(2\lfloor \frac{M}{2} \rfloor-1, M-1)
\end{array} \right),
\]
then the system \eqref{sol:linear} is formulated as
\begin{equation}\label{sol:linear1}
\bA \bc = -\sigma_{12} \bb,
\end{equation}
where $\bA = \left( \bh, (\bS - 2 \bh \be_1^T)\hat{\bR}_+ \right)$.
To fix the parameters in $\bc$, the unique solvability of
\eqref{sol:linear1} is required. Currently, we can only claim the
system \eqref{sol:linear1} is uniquely solvable when $\chi$ is an
algebraic number. Precisely, we have the following theorem:
\begin{theorem}
$\left| \bA \right| \neq 0$ if $\chi$ is an algebraic number.
\end{theorem}
\begin{proof}
We times $\bA$ by
$\left(
\begin{array}{cc}
1 & 2 \be_1^T \hat{\bR}_+ \\
\bzero & \bI \end{array}
\right)$ to obtain $(\bh, \bS \hat{\bR}_+)$. Thus $|\bA| = |(\bh,\bS
\hat{\bR}_+)|$.
We retrieve the coefficient $h_m(\chi)$ in $S(k,m)$ to have
\[
\bS = \bS^\star_0 \boldsymbol{H}
\]
where
$\boldsymbol{H} = \dfrac{1}{\sqrt{2\pi}} \diag \{ h_2(\chi),
h_3(\chi), \cdots, h_{M-1}(\chi) \}$ and
\[
\bS^\star_0 = \left(
\begin{array}{cccc}
S^\star(1,2) & S^\star(1,3) & \cdots & S^\star(1,M-1) \\
S^\star(3,2) & S^\star(3,3) & \cdots & S^\star(3,M-1) \\
\vdots & \vdots & \ddots & \vdots \\
S^\star(2\lfloor \frac{M}{2} \rfloor-1 ,2)
& S^\star(2\lfloor \frac{M}{2} \rfloor-1, 3)
& \cdots & S^\star(2\lfloor
\frac{M}{2} \rfloor-1, M-1)
\end{array} \right).
\]
By the recursion relation \eqref{eq:rec} of $S^\star(k,m)$, we have
that
\[
\boldsymbol{L} \bS^\star_0 = \left(
\begin{array}{c}
S^\star(1,2), ~ S^\star(1,3), ~ \cdots, ~ S^\star(1,M-1) \\
\bS^\star_1
\end{array}
\right),
\]
where
\[
\boldsymbol{L} = \left(
\begin{array}{ccccc}
1 &&&& \\
-2 & 1 &&& \\
& -4 & 1 && \\
& & \ddots & \ddots & \\
& & & -(2\lfloor \frac{M}{2} \rfloor-2) & 1
\end{array}
\right),
\]
and
\[
\bS^\star_1 = \left(
\begin{array}{cccc}
2S^\star(2,1) & 3S^\star(2,2) & \cdots & (M-1)S^\star(2,M-2) \\
2S^\star(4,1) & 3S^\star(4,2) & \cdots & (M-1)S^\star(4,M-2) \\
\vdots & \vdots & \ddots & \vdots \\
2S^\star(2\lfloor \frac{M}{2} \rfloor-2,1)
& 3S^\star(2\lfloor \frac{M}{2} \rfloor-2, 2)
& \cdots & (M-1)S^\star(2\lfloor
\frac{M}{2} \rfloor-2, M-2)
\end{array}
\right).
\]
Noticing that $\boldsymbol{L} \bh = (1/\sqrt{2\pi}, 0, \cdots, 0)^T$, we have
that
\[
(\bh, \bS \hat{\bR}_+) = \boldsymbol{L}^{-1} \left(
\begin{array}{cc}
1/\sqrt{2\pi} & (S^\star (1, 2), \cdots, S^\star (1, M-1)) \\
\bzero & \bS^\star_1
\end{array}
\right) \left(
\begin{array}{cc} 1 & \\ & \boldsymbol{H} \hat{\bR}_+ \end{array}
\right).
\]
Thus we need only to verify the determinant of
$\bS^\star_1 \boldsymbol{H} \hat{\bR}_+$ is not vanished.
Consider the permutation matrix in \eqref{mat:P_sigma},
we then see that
\[
\bS^\star_1 \boldsymbol{P}_\sigma = (\bS^\star_{\text{even}},
\bS^\star_{\text{odd}}),
\]
where $\bS^\star_{\text{even}}$ is made with the even columns of
$\bS^\star_1$ as
\[
\bS^\star_{\text{even}} = \left(
\begin{array}{cccc}
3S^\star(2,2) & 5S^\star(2,4) & 7S^\star(2,6) & \hdots \\
3S^\star(4,2) & 5S^\star(4,4) & 7S^\star(4,6) & \hdots \\
\vdots & \vdots & \vdots & \vdots \\
3S^\star(2\lfloor \frac{M}{2} \rfloor-2,2) & 5S^\star(2\lfloor
\frac{M}{2} \rfloor-2,4) & 7S^\star(2\lfloor \frac{M}{2}
\rfloor-2,6)& \hdots
\end{array}
\right),
\]
and $\bS^\star_{\text{odd}}$ is made with the odd columns of
$\bS^\star_1$ as
\[
\bS^\star_{\text{odd}} = \left(
\begin{array}{cccc}
2S^\star(2,1) & 4S^\star(2,3) & 6S^\star(2,5) &\hdots \\
2S^\star(4,1) & 4S^\star(4,3) & 6S^\star(2,5) &\hdots \\
\vdots & \vdots & \vdots & \vdots \\
2S^\star(2\lfloor \frac{M}{2} \rfloor-2,1) & 4S^\star(2\lfloor
\frac{M}{2} \rfloor-2,3) & 6S^\star(2\lfloor \frac{M}{2}
\rfloor-2,5) & \hdots
\end{array}
\right).
\]
With the integral properties of $S^{\star}(k,m)$ in ~\ref{sec:bc},
$\bS^{\star}_{\text{even}}$ is a lower triangular matrix and each
entry in its lower triangular part is an
algebraic number$\times \sqrt{2\pi}$.
The diagonal matrix $\boldsymbol{H}$ is turned into
\[
\boldsymbol{P}_\sigma^{-1} \boldsymbol{H} \boldsymbol{P}_\sigma =
\dfrac{1}{\sqrt{2\pi}}\left(
\begin{array}{cc}
\bI & \\
& \dfrac{2-\chi}{\chi} \bI
\end{array}
\right).
\]
We then have that
\[
\begin{aligned}
\bS^\star_1 \boldsymbol{H} \hat{\bR}_+ = &
\bS^\star_1 \boldsymbol{P}_\sigma ~ \boldsymbol{P}_\sigma^{-1}
\boldsymbol{H} \boldsymbol{P}_\sigma ~ \boldsymbol{P}_\sigma^{-1}
\hat{\bR}_+ \\
= & \dfrac{1}{\sqrt{2\pi}}
(\bS^\star_{\text{even}}, \bS^\star_{\text{odd}})
\left(
\begin{array}{cc}
\bI & \\
& \dfrac{2-\chi}{\chi} \bI
\end{array}
\right) \left(
\begin{array}{c}
\hat{\bR}_{+,\text{even}} \\
\hat{\bR}_{+,\text{odd}}
\end{array}
\right) \\
= & \dfrac{1}{\sqrt{2\pi}} \left( \bS^\star_{\text{even}}
\hat{\bR}_{+,\text{even}} + \dfrac{2-\chi}{\chi}
\bS^\star_{\text{odd}} \hat{\bR}_{+,\text{odd}} \right).
\end{aligned}
\]
Since $\hat{\bR}_{+,\text{even}}$ is invertible by Lemma
\ref{lem:R_peven_invertible},
\begin{equation}\label{eq:SHR}
\bS^\star_1 \boldsymbol{H} \hat{\bR}_+ = \dfrac{1}{\sqrt{2\pi}}
\left( \bS^\star_{\text{even}} + \dfrac{2-\chi}{\chi}
\bS^\star_{\text{odd}} \hat{\bR}_{+,\text{odd}}
\hat{\bR}_{+,\text{even}}^{-1}\right)\hat{\bR}_{+,\text{even}},
\end{equation}
and we only need to verify the matrix
$\bS^\star_{\text{even}} + \dfrac{2-\chi}{\chi}
\bS^\star_{\text{odd}} \hat{\bR}_{+,\text{odd}}
\hat{\bR}_{+,\text{even}}^{-1}$
in \eqref{eq:SHR} is not singular. Considering the polynomial of
$\lambda$ defined by
\[
p(\lambda) \triangleq
\left|\dfrac{\lambda}{\sqrt{2\pi}}\bS^\star_{\text{even}} +
\dfrac{2-\chi}{\chi} \bS^\star_{\text{odd}}
\hat{\bR}_{+,\text{odd}} \hat{\bR}_{+,\text{even}}^{-1}\right|,
\]
we point out that $p(\lambda)$ is a polynomial with all coefficients
to be algebraic numbers, since $\chi$ is assumed to be algebraic,
and entries of matrices
$\dfrac{1}{\sqrt{2\pi}}\bS^\star_{\text{even}}$,
$\hat{\bR}_{+,\text{even}}^{-1}$, $\bS^\star_{\text{odd}}$, and
$ \hat{\bR}_{+,\text{odd}}$ are all algebraic
numbers. Particularly, the coefficient of the leading term of
$p(\lambda)$ is the product of all diagonal entries in matrix
$\dfrac{1}{\sqrt{2\pi}}\bS^{\star}_{\text{even}}$ and thus is not
vanished. Therefore, $p(\lambda) = 0$ can only valid for $\lambda$
to be an algebraic number, too. Thus $p(\sqrt{2\pi}) \neq 0$ and
consequently $\left| \bA \right| \neq 0$. We conclude that the
linear system \eqref{sol:linear1} is uniquely solvable.
\end{proof}
\begin{remark}
Definitely, we speculate that $\left| \bA \right| \neq 0$ for all
$\chi$, while currently we can not prove it unfortunately. If we
take $\left| \bA \right|$ as a function of $\chi$, it is clearly a
continuous function. The theorem above declares that the value of
the function is not zero on all algebraic numbers, and by the
continuity of the function, the roots for $\left| \bA \right| = 0$
can not be dense on $\mathbb{R}$ at least.
\end{remark}
\subsection{Convergence in moment order}\label{sec:convergence}
Let us reveal below the connection of the linearized HME and the
linearized Boltzmann equation in \cite{Williams2001}. For Kramers'
problem, the boundary condition we proposed is related with that in
\cite{Williams2001}, either. Roughly speaking, our system is
illustrated to be a particular discretization of the equation in
\cite{Williams2001}. This allows to examine the convergence of the
solution of our systems to the numerical results of the equation in
\cite{Williams2001}. It is demonstrated that the solution converges to
that of the linearized Boltzmann equation in \cite{Williams2001} with
the increasing of moment order. Let us start from a brief review of
the main result on the Kramers' problem in \cite{Williams2001}.
For the time independent Boltzmann equation
\begin{equation}\label{eq:space-Boltz}
\bxi\cdot\nabla_{\bx} f = Q(f,f),
\end{equation}
considering here only Kramers' problem is studied, we linearize the
distribution function $f$ as
\begin{equation}\label{eq:linear-func}
f(\bx,\bxi) = \mathcal{M}(\bx,\bxi) [ 1 + h(\bx,\bxi) ],
\end{equation}
where $h(\bx,\bxi)$ is a disturbance term caused by the small
perturbation near the local equilibrium Maxwellian
$\mathcal{M}(\bx,\bxi)$, which has the form
\[
\mathcal{M}(\bx,\bxi) =
\frac{\rho_{0}(x,y)}{(2\pi\theta_{0}(x,y))^{3/2}}
\exp\left( -\frac{(\xi_x - u(y))^2 + \xi_y^2 + \xi_z^2}
{2\theta_{0}(x,y)} \right).
\]
Here $\rho_0$ and $\theta_0$ are same as that in
\eqref{eq:dimensionless}, and
\[
u(y) = K y.
\]
Inserting \eqref{eq:linear-func} into the time independent Boltzmann
equation \eqref{eq:space-Boltz}, and discarding the high-order small
quantities, we can obtain
\begin{equation}\label{eq:insert-Boltz}
\bxi \cdot \nabla_{\bx} \mathcal{M} + \mathcal{M} \bxi \cdot \nabla_{\bx} h =
J(\mathcal{M},h),
\end{equation}
where $J(\mathcal{M},h)$ is the linearized BGK collision
\[
J(\mathcal{M},h) = - \mathcal{M}\left\{ \nu h - \frac{\nu}{\sqrt{2\pi\theta_0}^3}
\int h(y, \bxi') \left[1 + \frac{1}{\theta_0} \bxi \cdot \bxi' +
\frac{2}{3}\left(\frac{|\bxi|^2}{2\theta_0}- \frac{3}{2}\right)
\left(\frac{|\bxi'|^2}{2\theta_0}-\frac{3}{2}\right)\right]
\exp\left(-\frac{|\bxi|^2}{2\theta_0}\right) \dd \bxi' \right\},
\]
where $\nu$ is the collision frequency of BGK model.
For convenience, we introduce the dimensionless variables
\[
\xi_i = \sqrt{\theta_0}\bar{\xi}_i, \qquad K = \sqrt{\theta_0}K_0,
\qquad \bx=L\bar{\bx},\qquad
\Kn = \frac{\sqrt{\theta_0}}{L\nu}.
\]
Then direct calculations and some simplification yield
\[
\begin{aligned}
&\bar{\xi}_x
\bar{\xi}_y K_0 + \bar{\xi}_y \pd{h(\bar{y},\bar{\bxi})}{\bar{y}} \\
&= - \frac{1}{\Kn} \left\{ h(\bar{y}, \bar{\bxi}) - \frac{1}{\sqrt{2\pi}^3} \int
h(\bar{y}, \bar{\bxi'}) \left[1 + \bar{\bxi} \cdot \bar{\bxi'} +
\frac{2}{3}\left(\frac{|\bar{\bxi}|^2-3}{2}\right)
\left(\frac{|\bar{\bxi'}|^2-3}{2}\right)\right]
\exp(-\frac{|\bar{\bxi}|^2}{2}) \dd \bar{\bxi}'\right\},
\end{aligned}
\]
and
\begin{equation}\label{eq:integralu}
\bar{u}_1(\bar{y}) = K_0 \bar{y} + \frac{1}{\sqrt{2\pi}}
\int_{-\infty}^{\infty} Z(\bar{y},\bar{\xi}) \exp\left(
-\frac{\bar{\xi}^2}{2} \right) \dd \bar{\xi},
\end{equation}
where
\[
Z(\bar{y}, \bar{\xi}_y) = \frac{1}{2\pi} \int_{-\infty}^{\infty}
\int_{-\infty}^{\infty} \bar{\xi}_x h(\bar{y}, \bar{\bxi})
\exp\left(-\frac{\bar{\xi}_x^2+\bar{\xi}_z^2}{2}\right) \dd
\bar{\xi}_x \dd \bar{\xi}_z .
\]
Then we have
\begin{equation}\label{eq:William}
K_0 \bar{\xi} + \bar{\xi} \pd{Z(\bar{y},\bar{\xi})}{\bar{y}}
=\frac{1}{\Kn} \left(
- Z(\bar{y},\bar{\xi}) + \frac{1}{\sqrt{2\pi}} \int_{\bbR}
Z(\bar{y}, \bar{\xi'}) \exp\left (-\frac{\bar{\xi'}^2}{2}\right)
\dd \bar{\xi'} \right).
\end{equation}
From \eqref{eq:sol_u1_formal} and \eqref{eq:integralu} we notice that
\[
K_0 = - \frac{\bar{\sigma}_{12}}{\Kn},
\]
and
\begin{equation}\label{eq:integralZ}
\begin{aligned}
\bar{f}_{e_1+ie_2} &= \frac{1}{i!}\frac{1}{\sqrt{2\pi}^3}
\int_{\bbR^3}\He_1(\bar{\xi}_x) \He_i(\bar{\xi}_y)
h(\bar{y},\bar{\bxi}) \exp\left(-\frac{\bar{\bxi}^2}{2}\right)
\dd\bar{\bxi}, \quad i=1, \dots,M-1,\\
&= \frac{1}{i!}\frac{1}{\sqrt{2\pi}}
\int_{\bbR} \He_i(\bar{\xi}) Z(\bar{y},\bar{\xi})
\exp\left(-\frac{\bar{\xi}^2}{2}\right)
\dd\bar{\xi}, \quad i= 1,\dots,M-1.
\end{aligned}
\end{equation}
Following \cite{Williams2001}, we use the model which is
also based on the diffuse-specular process, and boundary condition can
be written as
\[
f(0, \bxi) = \chi N f^W_M(\bx, \bxi) + (1-\chi) f(0, \bxi^{\ast}),
\]
where $f^W_M, \bxi^{\ast}$ is defined in \eqref{eq:equilibrium}, $N$
is a normalizing factor to be determined
\cite{Williams2001}. Using the zero mass flux condition
\[
\int_{\xi_y<0} \xi_y f(0,\bxi) \dd\bxi + \int_{\xi_y>0} \xi_y
f(0,\bxi) \dd\bxi = 0
\]
at $y=0$ to calculate the $N$.
After the linearization and nondimensionalization, the boundary
condition in Cartesian velocity coordinates as follows
\[
\begin{aligned}
h(0, \bar{\xi}_x, \bar{\xi}_y, \bar{\xi}_z) &= \chi[ \bar{\xi}_x
\bar{u}_1^W + \delta(\frac{\bar{\xi}^2}{2} - 2)] + (1- \chi) h(0, \bar{\xi}_x,
-\bar{\xi}_y, \bar{\xi}_z) \\
&- \frac{\chi}{2\pi} \int^0_{-\infty} \bar{\xi}'_y \dd \bar{\xi}'_y
\int_{\bbR^2} h(0, \bar{\xi}'_x, \bar{\xi}'_y, \bar{\xi}'_z)
\exp(-\frac{\bar{\bxi}'^2}{2}) \dd \bar{\xi}'_x \bar{\xi}'_z; \quad
\bar{\xi}_y > 0.
\end{aligned}
\]
Consider the Kramers' problem with boundary condition $\bar{u}_1^W =
0$ and $\delta = (\theta_W - \theta_0)/ \theta_W = 0$, then we have
\begin{equation}\label{eq:William-bc}
Z(0, \bar{\xi}) = (1 - \chi) Z(0, -\bar{\xi}) ; \quad \bar{\xi} > 0.
\end{equation}
The equation \eqref{eq:William} is an integral equation on $\bar{\xi}$
and differential equation on $y$. Here we discretize it on
$\bar{\xi}$. Consider the Gauss-Hermite quadrature with $M\in\bbN$
points, and denote the weights and integral points by $\omega_i$ and
$\bar{\xi}_i$, $i=1,\cdots,M$. If we sort the $\bar{\xi}_i$ in
decending order, then $\bar{\xi}_i = \lambda_i$ in
\eqref{mat:pos-neg}. Let $Z(\bar{y})^k =
Z(\bar{y},\bar{\xi}_k)$ and
$\bZ(\bar{y})=(Z(\bar{y})^1,\cdots,Z(\bar{y})^M)^T$ and
$\bomega=(\omega_1,\cdots,\omega_M)^T$, then we have
\begin{equation}
K_0 \bLambda \boldsymbol{1} +
\bLambda\od{\bZ(\bar{y})}{\bar{y}}=\frac{1}{\Kn}\left(
\boldsymbol{1}\bomega^T-\bI \right)\bZ(\bar{y}),
\end{equation}
where $\bLambda$ is same as the \eqref{mat:Lamb}
and $\bI$ is
the $M\times M$ identity matrix, and
$\boldsymbol{1}=(1,\cdots,1)^T\in\bbR^M$.
Let $\bW = \diag\{\omega_i;~ i=1, \dots, M\}$, since $\bW$ is
independent of $\bar{y}$ and $\bW$ and $\bLambda$ are both diagonal
matrices, the upper formulation can be rewritten as
\begin{equation}\label{eq:discrete-matrix}
K_0 \bLambda \bW \boldsymbol{1} + \bLambda
\od{\bW\bZ(\bar{y})}{\bar{y}} = \frac{1}{\Kn} \left(
\bW\boldsymbol{1}\bomega^T\bW^{-1} - \bI \right) \bW\bZ(\bar{y}).
\end{equation}
Noticing $\bar{\xi}_i$, $i=1,\cdots,M$ are Gauss-Hermite integral
points, we have $\He_M(\bar{\xi}_i)=0$, which indicates
$\bLambda = \bR^{-1} \bM \bR$, where $\bM$ and $\bR$ are defined in
\eqref{eq:def_MQ} and \eqref{mat:eigen-vec}, respectively.
The originality of the Hermite polynomial indicates
$\sum_{i=1}^Mw_i\He_j(\bar{y}_i)=\delta_{j,0}$, thus we have
\begin{equation*}
\bR\bW\boldsymbol{1} = \be_1, \quad \bomega^T = \be_1^T
\bR \bW.
\end{equation*}
Now \eqref{eq:discrete-matrix} can be rewritten as
\begin{equation}\label{eq:discrete}
\begin{aligned}
K_0 \be_2 + \bM \od{[\bR\bW \bZ(\bar{y})]}{\bar{y}} &=
\bM \od{V}{\bar{y}}\\
&= \frac{1}{\Kn} \bR \left(\bW\boldsymbol{1}\bomega^T\bW^{-1}
- \bI \right) \bW\bZ(\bar{y})\\
&= \frac{1}{\Kn}
\left(\bR\bW\boldsymbol{1}\bomega^T\bW^{-1}\bR^{-1}
- \bI \right) [\bR\bW\bZ(\bar{y})]\\
&=\frac{1}{\Kn}\left( \be_1\be_1^T-\bI \right)
[\bR\bW\bZ(\bar{y})]\\
&=-\frac{1}{\Kn}\bQ [\bR\bW\bZ(\bar{y})]
= -\frac{1}{\Kn} \bQ V,
\end{aligned}
\end{equation}
where $\bQ$ is defined in \eqref{eq:def_MQ} and
it is readily shown that $V$ can be written as
\[
V = \left\{
\begin{array}{l}
\bar{u}_1(\bar{y}) = K_0\bar{y} + \displaystyle\sum_{j=1}^M \omega_j
Z^j(\bar{y}), \\
\bar{f}_{e_1+ie_2} = (\bR\bW \bZ(\bar{y}))_{i+1} = \dfrac{1}{i!}
\displaystyle\sum_{j=1}^M
\omega_j\He_i(\bar{\xi}^j)Z^j(\bar{y}), \quad i=1,\dots,M-1,
\end{array} \right.
\]
which is the discrete form of \eqref{eq:integralu} and
\eqref{eq:integralZ}.
Similar discretization can be carried out for boundary condition
\eqref{eq:William-bc}. It can be written as
\[
Z(0, \bar{\xi}_i) = (1 - \chi) Z(0, -\bar{\xi}_i),
\quad (i=1, \dots, \lfloor \frac{M}{2} \rfloor).
\]
Since the zeros of Hermite polynomials are symmetric, the equation
\[
\omega_i Z(0, \bar{\xi}_i) = (1- \chi) \omega_j Z(0, \bar{\xi}_j),
\quad (i = 1, \dots, \lfloor \frac{M}{2} \rfloor)
\]
has to be satisfied for $j = M+1-i$. Thus we have
\begin{equation}\label{bc:discrete}
\bH_{\chi} \bW \bZ(0) = 0,
\end{equation}
where $\bZ(0) = (Z(0, \bar{\xi}_1), \dots, Z(0, \bar{\xi}_M))^T$ and
\[
\begin{aligned}
&\text{when $M$ is even:}~
\bH_{\chi} = \left(
\begin{array}{cccccc}
1 & & & & & \chi-1 \\
& \ddots & & & \iddots & \\
& & 1 & \chi-1 & &
\end{array} \right )_{\frac{M}{2} \times M} ,\\
&\text{when $M$ is odd:}~
\bH_{\chi} = \left(
\begin{array}{ccccccc}
1 & & & 0 & & & \chi-1 \\
& \ddots & & \vdots & & \iddots & \\
& & 1 & 0 &\chi-1 & &
\end{array} \right )_{\lfloor \frac{M}{2} \rfloor \times M}.
\end{aligned}
\]
Let
\[
\bK_v = \frac{1}{\chi} \left(
\begin{array}{cccc}
\bar{\xi}_1 & \bar{\xi}_2 & \dots & \bar{\xi}_{\lfloor \frac{M}{2} \rfloor}\\
\bar{\xi}^3_1 & \bar{\xi}^3_2 & \dots & \bar{\xi}^3_{\lfloor \frac{M}{2} \rfloor}\\
\vdots & \vdots & \ddots & \vdots\\
\bar{\xi}^{2\lfloor \frac{M}{2} \rfloor-1}_1 & \bar{\xi}^{2\lfloor
\frac{M}{2} \rfloor-1}_2 & \dots & \bar{\xi}^{2\lfloor \frac{M}{2}
\rfloor-1}_{\lfloor \frac{M}{2} \rfloor}
\end{array} \right)_{\lfloor \frac{M}{2} \rfloor \times \lfloor
\frac{M}{2} \rfloor}.
\]
Since $\bar{\xi}_1,\cdots,\bar{\xi}_{\lfloor\frac{M}{2}\rfloor}$ are
distinct, $\bK_v$ is invertible due to the invertibility of
Vandermonde matrix.
Let $\tilde{\bR} = (\tilde{r}_{ij})_{M\times M}$ with $\tilde{r}_{ij}
= \He_{i-1}(\lambda_j)$, $i, j = 1,\dots,M,$ then from
\eqref{mat:eigen-vec} we have
$\bR = \diag \{1,1,\frac{1}{2!},\dots,\frac{1}{(M-1)!}\} \cdot
\tilde{\bR}$.
Using the orthogonality of Hermite polynomials
\[
\frac{1}{\sqrt{2\pi}}\int_{\bbR} \He_j(x) \He_k(x)
\exp\left(-\frac{x^2}{2}\right) \dd x = j!\delta_{jk},
\]
we have
\[
\bW \tilde{\bR}^T\bR = \bI.
\]
Then we multiply matrix \eqref{bc:discrete} by $\bK_v$, and the matrix
form of boundary condition becomes
\begin{equation}\label{bc:mat-form}
\bK_v \bH_{\chi} \bW \bZ(0) = [\bK_v \bH_{\chi} \bW \tilde{\bR}^T]
\cdot [\bR\bW \bZ(0)] = 0.
\end{equation}
The discretization of $S(l,m)$ in \eqref{eq:integralS} is
\[
\begin{aligned}
S(l,m) &= \frac{1}{\chi} \sum_{j=1}^{\lfloor \frac{M}{2} \rfloor}
\{\bar{\xi}_j^l \omega_j
[\He_m(\bar{\xi}_j) - (1 - \chi)\He_m(-\bar{\xi}_j)]\} \\
&=\frac{1}{\chi} (\bar{\xi}_1^l, \bar{\xi}_2^l, \dots,
\bar{\xi}_{\lfloor \frac{M}{2} \rfloor}^l)
\cdot \bH_{\chi} \bW \cdot (\He_m(\bar{\xi}_1),
\He_m(\bar{\xi}_2),\dots, \He_m(\bar{\xi}_M))^T,
\end{aligned}
\]
then
\[
\bK_v \bH_{\chi} \bW \tilde{\bR}^T = \left(
\begin{array}{cccc}
S(1,0) & S(1,1) & \cdots & S(1,M-1) \\
S(3,0) & S(3,1) & \cdots & S(3,M-1) \\
\vdots & \vdots & \ddots & \vdots \\
S(2\lfloor \frac{M}{2} \rfloor-1,0) & S(2\lfloor \frac{M}{2} \rfloor-1,1)
& \cdots & S(2\lfloor \frac{M}{2} \rfloor-1,M-1)
\end{array} \right).
\]
And \eqref{bc:mat-form} is then turned into
\[
\left(
\begin{array}{cccc}
S(1,0) & S(1,1) & \cdots & S(1,M-1)\\
S(3,0) & S(3,1) & \cdots & S(3,M-1)\\
\vdots & \vdots & \ddots & \vdots\\
S(2\lfloor \frac{M}{2} \rfloor-1,0) & S(2\lfloor \frac{M}{2}
\rfloor-1,1) & \cdots & S(2\lfloor \frac{M}{2} \rfloor-1,M-1)
\end{array} \right) \cdot V^{(0)} = 0,
\]
which is same as the boundary condition in \eqref{bc:linear}.
\section{Quantity Validification}
In this section, we numerically study the convergence of the solutions
of the linearized HME to that of the linearized Boltzmann equation,
and Knudsen layer effect of the velocity and effective viscosity, and
compare them with the existing results. In all the tests, high
precision computation in Maple\footnote{Maple is a trademark of
Waterloo Maple Inc.} is used to reduce the numerical error.
\subsection{Convergence in moment order}\label{sec:convergenceNum}
In order to compare the results with linearized Boltzmann equation
\cite{Williams2001}, we normalized the velocity in
\eqref{eq:sol_u1_formal} as
\begin{equation}\label{eq:refer_velo}
\tilde{u}(\bar{y}) = -\Kn\dfrac{\bar{u}}{\bar{\sigma}_{12}}
= \bar{y} + \frac{\Kn}{\bar{\sigma}_{12}} \left(2\sum_{i = 1}^{\lfloor
\frac{M-2}{2} \rfloor} \hat{c_i}
\exp\left(-\frac{\bar{y}}{\Kn\hat{\lambda}_i}\right) - c_0\right).
\end{equation}
The normalized velocity can be split into three parts
\cite{Williams2001,Siewert2001}
\begin{equation}\label{eq:defe_velo}
\tilde{u}(\bar{y})
= \bar{y} + \zeta - \tilde{u}_d(\bar{y}),
\end{equation}
where $\tilde{u}_d(\bar{y})$ is the velocity defect, satisfying
$\lim\limits_{\bar{y} \to +\infty} \tilde{u}_d(\bar{y}) = 0$, and
$\zeta$ is the slip coefficient, which is
\begin{equation}\label{eq:slip_coefficient_approx}
\zeta =
\lim_{\bar{y}\to+\infty}(\tilde{u}(\bar{y})+\tilde{u}_d(\bar{y})-\bar{y})
= -\Kn \cdot \frac{c_0}{\bar{\sigma}_{12}}.
\end{equation}
Then the velocity defect is
\begin{equation}
\tilde{u}_d(\bar{y}) =
2\Kn \sum_{i = 1}^{\lfloor
\frac{M-2}{2} \rfloor} \frac{\hat{c_i}}{\bar{\sigma}_{12}}
\exp\left(-\frac{\bar{y}}{\Kn\hat{\lambda}_i}\right).
\end{equation}
Here we notice that there is always a factor $\bar{\sigma}_{12}$ in
the expression of $\hat{c}_i$ and $c_0$ in \eqref{eq:sol_u1_formal}.
In this subsection, we fix $\Kn = 1/\sqrt{2}$ as a constant
for convenience. Next we study the convergence of the velocity defect
and slip coefficient, respectively.
\begin{figure}
\caption{Profile of the defect velocity $\tilde{u}
\label{fig:diff_M_1}
\end{figure}
\begin{figure}
\caption{Profile of the defect velocity $\tilde{u}
\label{fig:diff_M_9}
\end{figure}
\begin{figure}
\caption{\label{fig:hatlambdai}
\label{fig:hatlambdai}
\end{figure}
For the velocity defect $\tilde{u}_d(\bar{y})$, the analytical results
with $M$ ranging from $5$ to $80$ are presented in Fig.
\ref{fig:diff_M_1} for $\chi = 0.1$ and Fig. \ref{fig:diff_M_9} for
$\chi = 0.9$, which are compared with the Siewert's numerical results
in \cite{Siewert2001} for the linearized BGK model. It is clear that
the results of the linearized HME converge to Siewert's result as $M$
increasing, which is consistent with the theoretical analysis in Sec.
\ref{sec:convergence}.
Meanwhile, one can find that the defect velocity of even order
converges faster to the reference solution than that of odd order.
This can be understood based on the smallest width of the boundary
layer, which is represented by $w_M:=\min\{\hat{\lambda}_i:
i=1,\cdots,\lfloor \frac{M}{2}-1\rfloor\}$. The smaller $w_M$, the
closer of the defect velocity of the linearized HME to the reference
solution. Fig. \ref{fig:hatlambdai} gives all the
$\hat{\lambda}_i$ for $M$ ranging from $3$ to $40$. One can observe
that $w_M$ for even $M$ is quite smaller than that for the adjacent
odd $M$.
Moreover, comparing with Fig. \ref{fig:diff_M_1} and
\ref{fig:diff_M_9}, one can find that for a given $M$, the relative
error in Fig. \ref{fig:diff_M_1} is a little larger than that of Fig.
\ref{fig:diff_M_9}. Actually, for smaller $\chi$, the diffusion
interaction between gas and the wall turns weak, then the distribution
function is expected to be more far from the equilibrium, which
indicates more moment is needed.
For the slip coefficient $\zeta$, the analytical results for different
$M$ are plotted in Fig. \ref{fig:zeta}. Similar convergence can be
readily observed in Fig. \ref{fig:zeta}.
All the phenomena observed in Fig. \ref{fig:diff_M_1} and
\ref{fig:diff_M_9} are also valid in Fig. \ref{fig:zeta}.
\begin{figure}
\caption{Values of the slip coefficient $\zeta$ for different $M$
and $\chi$. The reference solution is Siewert's result in
\cite{Siewert2001}
\label{fig:zeta}
\end{figure}
\subsection{Knudsen layer}
In this subsection, we study the Knudsen layer of Kramers' problem in
three aspects. The first one is the profile of the normalized velocity
$\tilde{u}(\bar{y})$ \eqref{eq:refer_velo}. For convergence, here we
also fix $\Kn$ as a constant $1 /\sqrt{2}$. Fig.
\ref{fig:diff_chi} gives the profile of $\tilde{u}(\bar{y})$ in
\eqref{eq:refer_velo} of linearized HME with $M=8$ and $M=9$.
Compared with numerical results of linearized Boltzmann equation in
\cite{Loyalka1975}, the good agreement of the solutions of the
linearized HME in Fig. \ref{fig:diff_chi} indicates the moment system
with a small $M$ is good enough to describe the velocity profile in
the Knudsen layer.
Moreover, the value of $\tilde{u}(\bar{y})$ increases, as $\chi$
decreasing. This is because the coefficients $\hat{c}_i$ and $c_0$ are
dependent on $\frac{2-\chi}{\chi}$.
As discussed in the Section \ref{sec:convergenceNum}, the diffusion
interaction between gas and the wall is weaker for smaller $\chi$.
\begin{figure}
\caption{ Profile of $\tilde{u}
\label{fig:diff_chi}
\end{figure}
\begin{figure}
\caption{Profile of $\tilde{u}
\label{fig:diff_Kn}
\end{figure}
The second one is the profile of the velocity defect
$\tilde{u}_d(\bar{y})$ in \eqref{eq:defe_velo}.
Fig. \ref{fig:diff_Kn} shows the profile of $\tilde{u}_d(\bar{y})$
with $M=20$ for different Knudsen number.
The thickness of the Knudsen layer largens as $\Kn$ increasing and the
strength of of the Knudsen layer enhances. In practical application,
more moments are needed for large $\Kn$.
The third one is the effective viscosity. The Navier-Stokes law
indicates $\sigma_{12}=-\mu\pd{u}{y}$. However, in the Knudsen layer,
the Navier-Stokes does not hold anymore. To describe
the non-Newtonian behavior inherent in the Knudsen layer, we formally
write the Navier-Stokes law on the shear stress $\sigma_{12}$ as
\begin{equation}\label{eq:effective}
\sigma_{12} = -\mu_{\mathrm{eff}}\pd{u}{y},
\end{equation}
where $\mu_{\mathrm{eff}}$ is called the ``effective viscosity''.
Since the shear stress $\sigma_{12}$ is constant in the Kramers'
problem, we have
\begin{equation}
\frac{\mu_{\mathrm{eff}}}{\mu}
= -\left(\frac{\sigma_{12}}{\partial u / \partial y}\right)
\Big{/} \left(\frac{\lambda p_0}{\sqrt{\theta_0}}\right)
= - \frac{1}{\Kn} \frac{\bar{\sigma}_{12}}{\partial \bar{u} / \partial\bar{y}}
= \frac{1}{\partial \tilde{u} / \partial\bar{y}}.
\end{equation}
Noticing the definition of the normalized velocity
\eqref{eq:refer_velo}, one can directly calculate
\begin{equation}\label{eq:eff_HME}
\mu_{\mathrm{eff}} = \frac{\mu}{1 + \sum_{i = 1}^{\lfloor
\frac{M-2}{2} \rfloor} c_i
\exp\left(-\frac{\bar{y}}{\hat{\lambda}_i\Kn}\right)},
\quad c_i =
- \frac{2\hat{c}_i}{\hat{\lambda}_i \bar{\sigma}_{12}}.
\end{equation}
\begin{figure}
\caption{Effective viscosity $\mu_{\mathrm{eff}
\label{fig:eff_vis}
\end{figure}
\begin{figure}
\caption{Comparison between effective viscosity $\mu_{\mathrm{eff}
\label{fig:eff_vis_err}
\end{figure}
In the past, the effective viscosity is well studied. For example, in
\cite{Gu_R26}, Gu investigated the R26 moment equations and predicted
the effective viscosity as
\begin{equation}\label{eq:eff_Gu}
\mu_{\mathrm{eff}} = \left[1
-\left(1.3042C_1 \exp\left(-\frac{1.265\bar{y}}{\Kn}\right)
+ 1.6751C_2 \exp\left(-\frac{0.5102\bar{y}}{\Kn}\right) \right)
\right]^{-1} \mu,
\end{equation}
where
\[
\begin{aligned}
C_1 = \frac{\chi -2}{\chi} \frac{0.81265 \times 10^{-1}\chi^2 +
1.2824\chi}{0.48517 \times 10^{-2} \chi^2 + 0.64884 \chi +
8.0995},\\
C_2 = \frac{\chi -2}{\chi} \frac{0.8565 \times 10^{-3}\chi^2 +
0.362 \chi}{0.48517 \times 10^{-2} \chi^2 + 0.64884 \chi +
8.0995}.
\end{aligned}
\]
This model is similar as the linearized HME. Actually, since R26
moment system can be derived from HME, Gu's result can be treated as a
special case of the linearized HME.
In \cite{Lockerby2008}, Lockerby et al. studied the effective
viscosity based on the two low-$\Kn$ BGK results, and proposed an
empirical expression as
\begin{equation}\label{eq:vis_Lockerby}
\mu_{\mathrm{eff}} = \left(1
+ 0.1859\bar{y}^{-0.464} \exp\left(-0.7902\bar{y}\right)
\right)^{-1} \mu.
\end{equation}
For Lockerby's model, we have $\mu_{\mathrm{eff}}\to0$ as $y\to 0+$,
which indicates the velocity gradient to approach infinity at the
wall.
For convenience, here we let $\chi=1$ and $\Kn=1/\sqrt{2}$. Fig.
\ref{fig:eff_vis} shows the profile of the effective viscosity of
these models. Due to the convergence of the linearized HME, we take
the solution the linearized HME with $M=200$ as the reference
solution. One can observe that Gu gives a relative larger effective
viscosity $\mu_{\mathrm{eff}}$, while Lockerby gives a relative smaller
one. If one want to obtain a good approximation of the effective
viscosity close to the wall, a lot of moments are needed.
We also take the solution of the linearized HME with $M=200$ as the
reference solution, and define the error as
\[
\mathrm{err} = \mu_{\mathrm{eff}}^{\mathrm{reference}} - \mu_{\mathrm{eff}}^{\mathrm{model}}.
\]
Fig. \ref{fig:eff_vis_err} shows the error of Gu's and Lockerby's
models and the linearized HME with $M=30$. Gu's model agrees with the
reference very well away from the Knudsen layer and gives too large
effective viscosity, while Lockerby's model gives too small effective
viscosity. For the linearized HME, by choosing a proper $M$, the
effective viscosity can be well captured.
\section{Conclusion}
In this paper, the globally hyperbolic moment equations (HME) is
employed to study Kramers'
problem. Firstly, the set of linearized globally hyperbolic moment
equations and their boundary conditions are built. The analytical
solutions for the defect velocity and slip coefficient have been
obtained for arbitrary order moment equations. In comparison with
data from kinetic theory, it has been shown that they can accurately
capture the Knudsen layer velocity profile over a wide range of
accommodation coefficients, especially for the small accommodation
coefficients case. The results indicate that the physics of
non-equilibrium gas flow can be captured by high-order HME system.
\end{document}
|
\begin{document}
\title{The size of oscillations in the Goldbach conjecture}
\author[M.~J. Mossinghoff]{Michael J. Mossinghoff}
\address{Center for Communications Research\\
Princeton, NJ, USA}
\email{[email protected]}
\author[T.~S. Trudgian]{Timothy S. Trudgian}\thanks{This work was supported by a Future Fellowship (FT160100094 to T.~S. Trudgian) from the Australian Research Council.}
\address{School of Science\\
UNSW Canberra at ADFA\\
ACT 2610, Australia}
\email{[email protected]}
\keywords{Goldbach conjecture, Hardy--Littlewood conjectures, oscillations, Riemann hypothesis, simultaneous approximation}
\subjclass[2010]{Primary: 11P32; Secondary: 11J20, 11M26, 11Y35}
\date{\today}
\begin{abstract}
\noindent
Let $R(n) = \sum_{a+b=n} \Lambda(a)\Lambda(b)$, where $\Lambda(\cdot)$ is the von Mangoldt function.
The function $R(n)$ is often studied in connection with Goldbach's conjecture.
On the Riemann hypothesis (RH) it is known that $\sum_{n\leq x} R(n) = x^2/2 - 4x^{3/2} G(x) + O(x^{1+\epsilon})$, where $G(x)=\Re \sum_{\gamma>0} \frac{x^{i\gamma}}{(\frac{1}{2} + i\gamma)(\frac{3}{2} + i\gamma)}$ and the sum is over the ordinates of the nontrivial zeros of the Riemann zeta function in the upper half-plane.
We prove (on RH) that each of the inequalities $G(x) < -0.02093$ and $G(x)> 0.02092$ hold infinitely often, and establish improved bounds under an assumption of linearly independence for zeros of the zeta function.
We also show that the bounds we obtain are very close to optimal.
\end{abstract}
\maketitle
\section{Introduction}\label{Intro}
Let $\Lambda(n)$ denote the von Mangoldt function, and define $R(n)$ by
\begin{equation}\label{cumin}
R(n) = \sum_{a+b=n} \Lambda(a) \Lambda(b),
\end{equation}
where the sum is over positive integers $a$ and $b$ that sum to $n$.
This function arises naturally in the study of Goldbach's problem: clearly $R(n)>0$ precisely when $n$ is the sum of two positive prime powers.
The use of the von Mangoldt function makes the problem more amenable to analysis, and Goldbach's conjecture would follow if it could be shown that $R(n)$ were sufficiently large at even integers $n>2$.
It is natural then to study the average value of $R(n)$.
It is known that
\begin{equation}\label{coriander}
\sum_{n\leq x} R(n) = \frac{1}{2} x^{2} + O\left(x^{2}(\log x)^{-A}\right),
\end{equation}
unconditionally, for any positive constant $A$.
In a series of articles in 1991, Fujii obtained improvements on the error term in \eqref{coriander} that are conditional on the Riemann hypothesis.
In the first of this series, he established \cite{Fujii1991P1} that
\[
\sum_{n\leq x} R(n) = \frac{1}{2}x^{2} + O(x^{3/2}),
\]
and in the second paper \cite{Fujii1991P2} he refined the error term, proving that\footnote{We note that the sum over zeros, written in the form $2 \sum_{\rho}\frac{x^{\rho+1}}{\rho(\rho+1)}$ appears here even without assuming the Riemann hypothesis.}
\begin{equation}\label{ginger}
\sum_{n\leq x} R(n) = \frac{1}{2}x^{2} -4x^{3/2}\Re\sum_{\gamma>0} \frac{x^{i\gamma}}{(\frac{1}{2} + i\gamma)(\frac{3}{2} + i\gamma)} + O\left((x \log x)^{4/3}\right).
\end{equation}
Similar statements, with a slightly larger power on the $\log x$ term, were proved by Goldston \cite{Goldston} and by Granville \cite{G2}.
Reductions in the error term in \eqref{ginger} were made by Bhowmik and Schlage-Puchta \cite{BSP} and then by Languasco and Zaccagnini \cite{LZ}, who established $O(x \log^{3} x)$.
This is fairly close to optimal, since Bhowmik and Schlage-Puchta also proved that the error term here is $\Omega(x \log\log x)$. Analogous results for forms of \eqref{cumin}, where $n$ is written as the sum of $k$ prime powers, have been proved by Languasco and Zaccagnini \cite{LZ} and by Bhowmik, Ramar\'{e}, and Schlage-Puchta \cite{BRS}.
In this article, we study the oscillations in the sum on the right side of \eqref{ginger}.
To this end, define $G(x)$ by
\begin{equation}\label{turmeric}
G(x) = \Re \sum_{\gamma>0} \frac{x^{i\gamma}}{(\frac{1}{2} + i\gamma)(\frac{3}{2} + i\gamma)},
\end{equation}
where the sum is over the ordinates of the zeros of the Riemann zeta function in the upper half-plane.
We assume the Riemann hypothesis, so each such zero has real part $1/2$.
In his third paper of 1991 on this topic \cite{Fujii1991P3}, Fujii proved that if the ordinates of the first $70$ zeros of the Riemann zeta function on the critical line are linearly independent over the rationals, then each of the inequalities
\begin{equation}\label{eqnFujiiBounds}
G(x) < -0.012,
\quad
G(x) > 0.012
\end{equation}
would hold for an unbounded sequence of positive real numbers $x$.
He noted that this conclusion could also be established without the linear independence hypothesis, if one instead employed a method of Odlyzko and te Riele to solve certain simultaneous approximation problems involving these $70$ real numbers.
In 1985 Odlyzko and te Riele \cite{OTR} famously employed this method to disprove the Mertens conjecture regarding the size of oscillations in the function $M(x) =\sum_{n\leq x} \mu(n)$, where $\mu(\cdot)$ represents the M\"obius function.
Recently, Hurst \cite{Hurst} used the same method, along with additional techniques, to obtain the presently best known result in this problem.
Odlyzko and te Riele established large oscillations in the positive direction by determining a real number $y$ and integers $m_1$, \ldots, $m_{70}$ with the property that
\[
\abs{\gamma_{k_j} y - \psi_{k_j} - 2m_j\pi} < \epsilon_1,
\]
for $1\leq j\leq70$, for a small positive number $\epsilon_1$.
Here $\psi_{k_j}$ represents the argument of the residue of $1/\zeta(s)$ at $s=1/2+i\gamma_{k_j}$, and $1\leq k_1<k_2<\cdots<k_{70}\leq400$ denotes a particular sequence of positive integers corresponding to the zeros which produced the most beneficial contributions in the method employed there.
Likewise, to establish large oscillations in the negative direction, they determined $z$, $n_1$, \ldots, $n_{70}$
so that
\[
\abs{\gamma_{k_j} z - \psi_{k_j} - (2n_j+1)\pi} < \epsilon_2,
\]
for $1\leq j\leq 70$, for a small positive number $\epsilon_2$.
In \cite{Fujii1991P3}, Fujii required analogous results for the same problems, but with each $\psi_{k_j}$ eliminated, $k_j=j$ for each $j$, and $\epsilon_1 = \epsilon_2 = 0.1$.
(The first case is then a homogeneous approximation problem, and one naturally also requires $y\neq0$ there.)
It is not clear however if the required computations were in fact performed in \cite{Fujii1991P3}: it is stated that the argument there implies the bounds \eqref{eqnFujiiBounds} ``in principle.''
In this article, we analyze the oscillations in $G(x)$, and prove two main results.
First, we use the method of Odlyzko and te Riele to establish a lower bound on the oscillations exhibited by this function in each direction, improving \eqref{eqnFujiiBounds}.
We also establish improved bounds under an assumption of linear independence for the zeros of the zeta function.
Second, we establish an upper bound on these oscillations, which shows that our results are close to optimal.
We prove the following theorem.
\begin{theorem}\label{thmMain}
With $G(x)$ as in \eqref{turmeric}, on the Riemann hypothesis each of the following inequalities holds for an unbounded sequence of positive real numbers $x$:
\begin{equation}\label{paprika}
G(x) < -0.020932, \quad G(x) > 0.020927.
\end{equation}
Moreover, for all $x>0$,
\begin{equation}\label{saffron}
\abs{G(x)} < 0.023059.
\end{equation}
In addition, if the ordinates of the first $10^6$ zeros of the Riemann zeta function in the upper half-plane are linearly independent over $\mathbb{Q}$, then each of the following inequalities holds for an unbounded sequence of positive real numbers $x$:
\begin{equation}\label{fenugreek}
G(x) < -0.022978, \quad G(x) > 0.022978.
\end{equation}
\end{theorem}
This paper is organized in the following way.
Section~\ref{sage} establishes the upper bound \eqref{saffron} of Theorem~\ref{thmMain}.
Section~\ref{secCondLB} obtains lower bounds for oscillations in $G(x)$, conditioned on the existence of solutions to particular simultaneous approximation problems involving a number of zeros of the Riemann zeta function, and establishes \eqref{fenugreek}.
Last, Section~\ref{secComputations} describes the calculations required to establish the bounds \eqref{paprika} on the oscillations in this function without assuming any linear independence conditions to complete the proof of Theorem~\ref{thmMain}.
We remark that Hardy and Littlewood \cite{HL23} conjectured that $R(n) \sim nS(n)$ for even integers $n$, where
\begin{equation*}\label{cloves}
S(n) = \prod_{p|n} \left(1 + \frac{1}{p-1}\right) \prod_{p\nmid n} \left( 1 - \frac{1}{(p-1)^{2}}\right),
\end{equation*}
and that several authors encounter $G(x)$ when estimating the average value of $R(n) - n S(n)$.
For example, Fujii \cite{Fujii1991P2} in fact established \eqref{ginger} in the form
\[
\sum_{n\leq x} \bigl(R(n)-n S(n)\bigr) = -4 x^{3/2} G(x) + O\left((x\log x)^{4/3}\right).
\]
It is readily seen that the two forms are equivalent, since from Montgomery and Vaughan \cite[Lem.~1]{MV} we have that
\[
\sum_{n\leq x} n S(n) = \frac{1}{2} x^2 + O(x\log x).
\]
Additional estimates involving $S(n)$ and related functions and their application in problems in additive number theory can be found in \cite{MV}.
\section{An upper bound for $\abs{G(x)}$}\label{sage}
Taking the real part of the sum in \eqref{turmeric} produces
\begin{align}
\label{rosemary}
G(x) &= -\sum_{\gamma>0} \frac{\cos(\gamma \log x)}{\gamma^{2} +\frac{1}{4}}
+ \sum_{\gamma>0} \frac{3\cos(\gamma\log x) + 2\gamma \sin(\gamma \log x)}{(\gamma^{2} + \frac{1}{4})(\gamma^{2} + \frac{9}{4})}\\
\label{R1}
&= \sum_{\gamma>0} \frac{ (\frac{3}{4} - \gamma^{2}) \cos(\gamma \log x) + 2\gamma \sin(\gamma \log x)}{(\gamma^{2} + \frac{1}{4})(\gamma^{2} + \frac{9}{4})}.
\end{align}
With a little calculus one can show that the maximal value of the numerator in \eqref{R1} is $\sqrt{ \gamma^{4} + \frac{5}{2} \gamma^{2} + \frac{9}{16}}$, occurring when
\[
\tan(\gamma\log x) = \frac{ 2\gamma}{\frac{3}{4} - \gamma^{2}},
\]
and that the minimal value is $-\sqrt{ \gamma^{4} + \frac{5}{2} \gamma^{2} + \frac{9}{16}}$, so
\begin{equation}\label{R4}
|G(x)| \leq \sum_{\gamma>0} h(\gamma), \quad h(\gamma) = \frac{\sqrt{ \gamma^{4} + \frac{5}{2} \gamma^{2} + \frac{9}{16}}}{(\gamma^{2}+\frac{1}{4})(\gamma^{2} + \frac{9}{4})}.
\end{equation}
A simple expansion shows that
\[
h(\gamma) = \frac{1}{\gamma^{2}+ \frac{1}{4}} - \frac{1}{\gamma^{4}} + \frac{2}{\gamma^{6}} - \frac{61}{16 \gamma^{8}} + O\left(\gamma^{-10}\right),
\]
and from Davenport \cite[ch.\ 12]{Davenport} we have that
\begin{equation}\label{porto}
\sum_{\gamma>0} \frac{1}{\gamma^{2} + \frac{1}{4}} = \sum_{\rho} \Re \left(\rho^{-1}\right) = 1 + \frac{\xi}{2} - \frac{\log 4\pi}{2} = 0.02309\ldots,
\end{equation}
where $\rho=1/2+i\gamma$ and $\xi=0.577\ldots$ represents Euler's constant.
We write
\begin{equation}\label{Q2}
h(\gamma) = \frac{1}{\gamma^{2} + \frac{1}{4}} - \frac{1}{\gamma^{4}} + U(\gamma).
\end{equation}
A simple calculation reveals that $U(\gamma)\gamma^{6} \leq 2$.
Therefore, to obtain an upper bound on $\abs{G(x)}$, we require an upper bound on $\sum_{\gamma>0} \gamma^{-6}$.
(We also need a lower bound on the sum over $\gamma^{-4}$ from \eqref{Q2}, but clearly any finite sum will work.)
For this, we employ the result of Lehman \cite[Lem.\ 3]{Lehman} stating that
\begin{equation}\label{Q2a}
\sum_{\gamma>T} \gamma^{-n} < \frac{\log T}{T^{n-1}}
\end{equation}
provided $T\geq 2\pi e=17.079\ldots$ and $n\geq 2$.
Using \eqref{R4}, \eqref{porto}, \eqref{Q2}, and \eqref{Q2a}, we therefore conclude that
\[
|G(x)| < 1 + \frac{\xi}{2} - \frac{\log 4\pi}{2} - \sum_{0< \gamma \leq T_1} \frac{1}{\gamma^{4}} + 2 \sum_{0<\gamma\leq T_2} \frac{1}{\gamma^{6}} + \frac{\log T_2}{T_2^{5}},
\]
where we may choose any values for $T_1>0$ and $T_2\geq2\pi e$.
Choosing the first $1000$ zeros for each sum, that is, taking $T_1 = T_2 = 1420.41$, we find that $|G(x)| < 0.023058681$, which establishes \eqref{saffron}.
\section{Conditional lower bounds}\label{secCondLB}
We may determine lower bounds on the oscillations of $G(x)$, conditioned on the existence of solutions to certain simultaneous approximation problems involving a number of nontrivial zeros of the Riemann zeta function.
We treat large oscillations in the positive direction here; large displacements in the negative direction follow analogously.
Given a positive integer $N$ and a positive real number $\epsilon$, suppose there exists a real number $y$ and integers $m_1$, \ldots, $m_N$ so that
\begin{equation}\label{oregano}
\abs{\gamma_k y - (2m_k+1)\pi} \leq \epsilon
\end{equation}
for $1\leq k\leq N$.
Then certainly
\begin{equation}\label{saltpepper}
\cos(\gamma_k y) < -1 + \frac{\epsilon^2}{2}
\end{equation}
for each $k$.
Let $T>2\pi e$ be a real number selected so that the number of nontrivial zeros of the Riemann zeta function with ordinate $\gamma<T$ is exactly $N$.
From \eqref{rosemary}, we have
\begin{equation}\label{cocoa}
\begin{split}
G(x) &= -\sum_{\gamma\leq T} \frac{\cos(\gamma \log x)}{\gamma^2+\frac{1}{4}}
+ \sum_{\gamma\leq T} \frac{3\cos(\gamma \log x)+2\gamma\sin(\gamma \log x)}{(\gamma^2+\frac{1}{4})(\gamma^2+\frac{9}{4})}\\
&\qquad + \sum_{\gamma>T} \frac{(\frac{3}{4}-\gamma^2)\cos(\gamma \log x) + 2\gamma\sin(\gamma \log x)}{(\gamma^2+\frac{1}{4})(\gamma^2+\frac{9}{4})}\\
&=: G_1(x,T) + G_2(x,T) + G_3(x,T).
\end{split}
\end{equation}
From \eqref{R4}, we have
\[
\abs{G_3(x,T)}
\leq\sum_{\gamma> T} \frac{\sqrt{\gamma^4+\frac{5}{2}\gamma^2+\frac{9}{16}}}{(\gamma^2+\frac{1}{4})(\gamma^2+\frac{9}{4})}
< \sum_{\gamma>T} \frac{1}{\gamma^2},
\]
and from \cite[Lem.\ 1]{Lehman} we obtain
\begin{equation}\label{mace}
\sum_{\gamma>T} \frac{1}{\gamma^2} = \frac{1}{2\pi}\int_T^\infty \frac{\log(t/2\pi)}{t^2}\,dt + \vartheta\left(\frac{4}{T^2}\log T + 2\int_T^\infty \frac{dt}{t^3}\right),
\end{equation}
where $\vartheta$ is a complex number satisfying $\abs{\vartheta}\leq1$.
This gives a better estimate than that in \eqref{Q2a}, which we shall need in what follows.
While the constants in the error in \eqref{mace} could be improved by the results in \cite{PT,TST}, the range of $T$ that we are considering here makes any potential gain negligible.
Consequently,
\begin{equation}\label{nutmeg}
\abs{G_3(x,T)} < B_3(T) := \frac{1}{2\pi T}\left(\log T + 1-\log 2\pi + \frac{2\pi}{T}(1+4\log T)\right)
\end{equation}
for all $x>0$.
For $G_1$, we use \eqref{saltpepper} to find
\[
G_1(e^y,T) > \left(1-\frac{\epsilon^2}{2}\right)\sum_{\gamma\leq T} \frac{1}{\gamma^2+\frac{1}{4}} .
\]
For $G_2$, we observe that $3\cos t + 2\gamma\sin t$ is decreasing near $t=\pi$, so
\[
G_2(e^y,T) \geq -\sum_{\gamma\leq T} \frac{3\cos\epsilon+2\gamma\sin\epsilon}{(\gamma^2+\frac{1}{4})(\gamma^2+\frac{9}{4})}.
\]
Therefore,
\begin{equation}\label{eqnCondUpper}
G(e^y) > \left(1-\frac{\epsilon^2}{2}\right)\sum_{\gamma\leq T} \frac{1}{\gamma^2+\frac{1}{4}} - \sum_{\gamma\leq T} \frac{3\cos\epsilon+2\gamma\sin\epsilon}{(\gamma^2+\frac{1}{4})(\gamma^2+\frac{9}{4})} - B_3(T).
\end{equation}
Similarly, given $N$ and $\epsilon$, if we suppose there exists a real number $z$ and integers $m_1$, \ldots, $m_N$ so that
\begin{equation}\label{cardomom}
\abs{\gamma_k z - 2m_k\pi} \leq \epsilon
\end{equation}
for $1\leq k\leq N$, then we obtain the negation of the expression in \eqref{eqnCondUpper} as a lower bound on the oscillations of $G(x)$:
\[
G(e^z) < \left(\frac{\epsilon^2}{2}-1\right)\sum_{\gamma\leq T} \frac{1}{\gamma^2+\frac{1}{4}} + \sum_{\gamma\leq T} \frac{3\cos\epsilon+2\gamma\sin\epsilon}{(\gamma^2+\frac{1}{4})(\gamma^2+\frac{9}{4})} + B_3(T).
\]
In Table~\ref{tableCondEst} we list a few values for the bound \eqref{eqnCondUpper} for a number of choices of $N$.
In each case we assume $\epsilon=0.01$, and take $T = T^*(N)$, where
\begin{equation}\label{allspice}
T^*(N) = \gamma_{N+1} - \frac{\gamma_{N+1}-\gamma_N}{100}.
\end{equation}
\begin{table}[tbh]
\caption{Conditional lower bounds for large positive values of $G(x)$ from \eqref{eqnCondUpper}, assuming the simultaneous approximation problem \eqref{oregano} has a solution with $\epsilon=0.01$.}\label{tableCondEst}
\begin{tabular}{|cc|cc|}\hline
$N$ & Bound\ & $N$ & Bound\\\hline
70 & $0.014756$ & 500 & $0.020630$\\
100 & $0.016352$ & 600 & $0.020902$\\
150 & $0.017837$ & 700 & $0.021109$\\
200 & $0.018692$ & 800 & $0.021272$\\
250 & $0.019269$ & 900 & $0.021404$\\
300 & $0.019684$ & 1000 & $0.021515$\\
350 & $0.020001$ & 2000 & $0.022079$\\
400 & $0.020254$ & $10^4$ & $0.022699$\\
450 & $0.020459$ & $10^5$ & $0.022925$\\\hline
\end{tabular}
\end{table}
If the ordinates of the first $N$ nontrivial zeros of the zeta function are linearly independent, then by Kronecker's theorem the corresponding bound in Table~\ref{tableCondEst} would necessarily follow, as would any value computed with an arbitrary choice of $\epsilon>0$.
Selecting $\epsilon=10^{-6}$ with $N=10^6$ produces the value $0.02297864\ldots$\,, which verifies \eqref{fenugreek} in Theorem~\ref{thmMain}.
To obtain bounds without linear independence, in the next section we turn to the method of Odlyzko and te Riele for constructing solutions to some of these simultaneous approximation problems.
\section{Computations}\label{secComputations}
We complete the proof of Theorem~\ref{thmMain} by solving the simultaneous approximation problems \eqref{oregano} and \eqref{cardomom} for particular $N$ and $\epsilon$.
For this we employ the method of Odlyzko and te Riele \cite{OTR}, which we briefly describe here.
Let $\rd{x}$ denote the integer nearest the real number $x$, and let $\mathbf{e}_k$ denote the $k$th elementary unit column vector in the appropriate real vector space.
The construction requires values for four integer parameters: $N$, $b$, $c$, and $d$.
Here, $b$ represents the number of bits of precision used in the computation; $c$ and $d$ are small positive integers whose meanings will be described shortly.
\subsection{Large positive values for $G(x)$}\label{subsecHigh}
Consider first the inhomogeneous problem \eqref{oregano}, where we require a real number $y$ with the property that $\gamma_k y$ is near $\pi$, modulo integer multiples of $2\pi$, for $1\leq k\leq N$.
We construct the $(N+2)\times(N+2)$ integer matrix $M$ whose column vectors are
\begin{gather*}
\rd{2^{b+1}\pi}\mathbf{e}_k,\; 1\leq k\leq N,\\
\mathbf{e}_{N+1}-\sum_{k=1}^N \rd{2^{b-c} \gamma_k}\mathbf{e}_k,\\
2^b N^d \mathbf{e}_{N+2}+\rd{2^b\pi}\sum_{k=1}^N \mathbf{e}_k.
\end{gather*}
That is, $M$ consists of an $(N+2)\times N$ diagonal matrix with entries $\rd{2^{b+1}\pi}$ on the diagonal, augmented with one column carrying rounded multiples of the $\gamma_k$, and another largely filled with a rounded multiple of the inhomogeneous part, $\pi$.
The penultimate vector carries the lone nonzero value in vector position $N+1$, set to $1$ so that we can recover a coefficient later in the computation.
The last vector has the only nonzero value in the last position, chosen to be much larger than the other entries of the matrix.
We apply the LLL algorithm \cite{LLL} to $M$ to compute a reduced basis for the lattice spanned by its column vectors.
This reduced basis consists of vectors that are relatively short, in fact within a factor (whose value is bounded by an expression that is exponential in the dimension) of the shortest independent vectors in the lattice.
Since the last coordinate of every vector in the lattice is an integer multiple of the large integer $2^b N^d$, it is likely that there is only one vector in the reduced lattice with a nonzero value in this position, which is very likely to be $\pm2^b N^d$.
If this value is negative we can negate the vector, so suppose it is $(r_1,\ldots,r_N,s,2^b N^d)^T$.
We then have that there exist integers $m_1$, \ldots, $m_N$ such that
\[
r_k = m_k \rd{2^{b+1}\pi} + \rd{2^b\pi} - s\rd{2^{b-c}\gamma_k}
\]
for $1\leq k\leq N$, and that the $r_k$ are relatively small.
If $s<0$ then we can negate this vector so that our inhomogeneous part is $-\pi$, which serves us just as well, so we assume $s\geq0$ here.
We might then expect
\[
\gamma_k s 2^{-c} \approx 2\pi m_k+ \pi
\]
so we take $y=s/2^c$, and use this in \eqref{cocoa} and \eqref{nutmeg} to compute the resulting lower bound on positive values reached by $G(x)$:
\begin{equation}\label{basil}
G_1(e^y,T^*(N)) + G_2(e^y,T^*(N)) - B_3(T^*(N)),
\end{equation}
with $T^*(N)$ as in \eqref{allspice}.
For each $k$ we also compute $m_k = \rd{(\gamma_k y - \pi)/2\pi}$, and then
\begin{equation}\label{eqnEps1}
\epsilon_1 = \max_{1\leq k\leq N}\{\abs{\gamma_k y - (2m_k+1)\pi}\}.
\end{equation}
A large value of $\epsilon_1$ (and consequently a small value in \eqref{basil}) likely indicates that insufficient precision was employed.
In that case we repeat this process with a larger value of $b$.
Odlyzko and te Riele used $c=10$ and $d=4$.
Both values worked sufficiently well in our application, too, so we did not alter these in our principal runs.
Those authors also reported selecting $b$ between $6.6N$ and $13.3N$ (that is, using between $2N$ and $4N$ decimal digits of precision).
The larger end of this range sufficed in our application only for $N$ up to about $250$, where we produced $\epsilon_1=0.035$.
For larger dimensions we needed to select $b$ as large as $25N$.
\subsection{Large negative values for $G(x)$}\label{subsecLow}
For this case, we need to solve the homogeneous simultaneous approximation problem \eqref{cardomom}, as we need to find a value $z$ so that $\gamma_k z$ is very near an integer multiple of $2\pi$, for each $k$.
No additional computations are required here, as our reduced basis from the prior computation already contains many vectors of the form $(r_1,\ldots,r_N,t,0)$, in fact, there are likely to be $N+1$ of these.
Each one represents a viable solution to the homogeneous problem, since here for each $k$ we have
\[
r_k = m_k \rd{2^{b+1}\pi} - t\rd{2^{b-c}\gamma_k}
\]
for some integer $m_k$, and again the $r_k$ value is relatively small, so we might expect
\[
\gamma_k t 2^{-c} \approx 2\pi m_k
\]
for each $k$.
We may assume $t\geq0$.
For each such vector, we set $z=t/2^c$, and compute the resulting bound on negative values achieved by $G(x)$:
\[
G_1(e^z,T^*(N)) + G_2(e^z,T^*(N)) + B_3(T^*(N)).
\]
Among all such vectors we may select the $z$ that produces the best value.
With this set, then for each $k$ we may compute $m_k = \rd{\gamma_k z/2\pi}$, and then
\begin{equation}\label{eqnEps2}
\epsilon_2 = \max_{1\leq k\leq N}\{\abs{\gamma_k z - 2m_k\pi}\}.
\end{equation}
Again, if $\epsilon_2$ is too large, then we can repeat the process with a larger value of $b$.
In practice, if $\epsilon_1$ was sufficiently small then $\epsilon_2$ was as well.
\subsection{Results}\label{subsecResults}
All computations were performed in SageMath \cite{Sage}, using resources at NCI Australia and at the Center for Communications Research.
High-precision values for zeros of the Riemann zeta function were computed using the \texttt{mpmath} Python library \cite{mpmath}, available within SageMath.
Table~\ref{tableResults} records the bounds we obtained on $G(x)$ in this way, using different values for $N$ and $b$.
The last line in this table records the parameters and results of the computation that establishes \eqref{paprika} in Theorem~\ref{thmMain}.
This calculation required almost two weeks of core time on an Intel Xeon Platinum 8175M processor running at $2.5$ GHz.
Figure~\ref{figYZ} exhibits the values of $2^{10}y$ and $2^{10}z$ obtained for this case, using base $36$ for economy of space.
\begin{table}[tbh]
\caption{Guaranteed oscillations in $G(x)$, along with the errors $\epsilon_1$ and $\epsilon_2$ from \eqref{eqnEps1} and \eqref{eqnEps2}, obtained by solving the simultaneous approximation problems using the first $N$ zeros of the Riemann zeta function, and using $b$ bits of precision. The displayed values for the bounds are truncated at the last displayed digit; those for $\epsilon_1$ and $\epsilon_2$ are rounded up at the last displayed digit.}\label{tableResults}
\begin{tabular}{|cccccc|}\hline
$N$ & $b$ & Lower & Upper & $\epsilon_2$ & $\epsilon_1$\\\hline
70 & 930 & $-0.0147727$ & $0.0147720$ & $0.00092$ & $0.00089$\\
100 & 1330 & $-0.0163668$ & $0.0163683$ & $0.00168$ & $0.00125$\\
150 & 2000 & $-0.0178557$ & $0.0178520$ & $0.00444$ & $0.00394$\\
200 & 2660 & $-0.0186992$ & $0.0187115$ & $0.01340$ & $0.01160$\\
250 & 3325 & $-0.0192583$ & $0.0192902$ & $0.03166$ & $0.03525$\\
300 & 4500 & $-0.0197172$ & $0.0196887$ & $0.02851$ & $0.03691$\\
350 & 6000 & $-0.0200320$ & $0.0200230$ & $0.02075$ & $0.01719$\\
400 & 7600 & $-0.0202570$ & $0.0202690$ & $0.01772$ & $0.00990$\\
450 & 8600 & $-0.0204387$ & $0.0204629$ & $0.04154$ & $0.03556$\\
500 & 11000 & $-0.0206646$ & $0.0206304$ & $0.02424$ & $0.02210$\\
600 & 15000 & $-0.0209324$ & $0.0209272$ & $0.02479$ & $0.02106$\\\hline
\end{tabular}
\end{table}
\begin{figure}
\caption{$2^{10}
\label{figYZ}
\end{figure}
\section*{Acknowledgments}
We thank NCI Australia, UNSW Canberra, and the Center for Communications Research for computational resources.
This research was undertaken with the assistance of resources and services from the National Computational Infrastructure (NCI), which is supported by the Australian Government.
\end{document}
|
\begin{document}
\setlength{\unitlength}{1mm}
\title{Minimal obstructions for 1-immersions and hardness of 1-planarity testing}
\author{ \makebox[10cm]{}
\\ Vladimir P. Korzhik\thanks{This paper was done while the author
visited Simon Fraser University.}\\
National University of Chernivtsi, Chernivtsi\\
and\\
Institute of Applied Problems of Mechanics and Mathematics\\
of National Academy of Science of Ukraine, Lviv\\
Ukraine
\and \makebox[10cm]{}
\\ Bojan Mohar\thanks{Supported in part by the
Research Grant P1--0297 of ARRS (Slovenia), by an NSERC Discovery Grant (Canada)
and by the Canada Research Chair program.}~\thanks{On leave from:
IMFM \& FMF, Department of Mathematics, University of Ljubljana, Ljubljana,
Slovenia.} \\
Department of Mathematics \\
Simon Fraser University \\
Burnaby, B.C. V5A 1S6 \\ Canada}
\date{}
\maketitle
\begin{abstract}
A graph is \emph{$1$-planar} if it can be drawn on the
plane so that each edge is crossed by no more than
one other edge (and any pair of crossing edges cross only once). A non-1-planar graph $G$ is
\emph{minimal} if the graph $G-e$ is 1-planar for
every edge $e$ of $G$. We construct two infinite
families of minimal non-1-planar graphs and show that
for every integer $n\geq 63$, there are at least
$2^{(n-54)/4}$ nonisomorphic minimal
non-1-planar graphs of order $n$.
It is also proved that testing 1-planarity is NP-complete.
\end{abstract}
\noindent\textbf{Running head:}\\
Obstructions for 1-immersions\\
\noindent\textbf{Corresponding author:} Bojan Mohar\\
\noindent\textbf{AMS classifications:}\\
05C10, 05B07.
\noindent\textbf{Keywords:}\\
Topological graph, crossing edges, 1-planar graph, 1-immersion.
\section{Introduction}\label{Intr}
A graph drawn in the plane is \emph{$1$-immersed\/} in the plane
if any edge is crossed by at most one other edge (and any pair of crossing edges cross only once). A graph is
\emph{$1$-planar} if it can be 1-immersed into the plane.
It is easy to see that if a graph has 1-immersion in which
two edges $e,f$ with a common endvertex cross, then the
drawing of $e$ and $f$ can be changed so that these two
edges no longer cross. Consequently, we may assume that
adjacent edges are never crossing each other and that
no edge is crossing itself. We take this assumption as
a part of the definition of 1-immersions since this limits
the number of possible cases when discussing 1-immersions.
The notion of 1-immersion
of a graph was introduced by Ringel \cite{R} when
trying to color the vertices and faces of a plane
graph so that adjacent or incident elements receive
distinct colors. In the last two decades this class of
graphs received additional attention because of its
relationship to the family of \emph{map graphs}, see
\cite{CGP,CGP2} for further details.
Little is known about 1-planar graphs. Borodin
\cite{B1,B2} proved that every 1-planar graph is
6-colorable. Some properties of maximal 1-planar
graphs are considered in \cite{S}. It was shown in
\cite{BKRS} that every 1-planar graph is acyclically
20-colorable. The existence of subgraphs of bounded
vertex degrees in 1-planar graphs is investigated in
\cite{FM}. It is known (see \cite{BSW,Ch1,Ch2}) that a
1-planar graph with $n$ vertices has at most $4n-8$
edges and that this upper bound is tight. In the
paper \cite{CK} it was observed that the class of
1-planar graphs is not closed under the operation of
edge-contraction.
Much less is known about non-1-planar graphs.
The basic question is how to recognize 1-planar graphs.
This problem is clearly in NP, but it is not clear at all
if there is a polynomial time recognition algorithm.
We shall answer this question by proving that 1-planarity
testing problem is NP-complete.
The recognition problem is closely related to the study of
minimal obstructions for 1-planarity.
A graph $G$ is said to be a \emph{minimal} non-1-planar graph
(\emph{{\rm MN}-graph}, for short) if $G$ is not 1-planar, but
$G-e$ is 1-planar for every edge $e$ of $G$.
An obvious question is:
\begin{quote}
How many MN-graphs are there? Is their number finite?
If not, can they be characterized?
\end{quote}
\noindent
The answer to the first question is not hard: there are
infinitely many.
This was first proved in \cite{K}.
Here we present two additional simple arguments implying the
same conclusion.
{\bf Example 1.}
Let $G$ be a graph such that
$t = \lceil{\rm cr}(G)/|E(G)|\rceil - 1\geq 1$,
where ${\rm cr}(G)$ denotes the crossing
number of $G$. Let $G_t$ be the graph obtained from
$G$ by replacing each edge of $G$ by a path of length
$t$. Then $|E(G_t)| = t|E(G)| < {\rm cr}(G) = {\rm
cr}(G_t)$. This implies that $G_t$ is not 1-planar.
However, $G_t$ contains an MN-subgraph $H$. Clearly,
$H$ contains at least one subdivided edge of $G$ in
its entirety, so $|V(H)| > t$. Since $t$ can be
arbitrarily large (see, for example, the well-known lower bound on ${\rm cr}(K_n)$), this shows that there are
infinitely many MN-graphs.
Before giving the next example, it is worth noticing that 3-cycles must be embedded in a planar way in every 1-immersion of a graph in the plane.
{\bf Example 2.}
Let $K\in\{ K_5, K_{3,3} \}$ be one of the Kuratowski graphs.
For each edge $xy\in E(K)$, let $L_{xy}$ be a 5-connected triangulation
of the plane and $u,v$ be adjacent vertices of $L_{xy}$ whose degree
is at least 6. Let $L'_{xy} = L_{xy}-uv$. Now replace each edge $xy$
of $K$ with $L'_{xy}$ by identifying $x$ with $u$ and $y$ with $v$.
It is not hard to see that the resulting graph $G$ is not 1-planar
(since two of graphs $L'_{xy}$ must ``cross each other'', but that is
not possible since they come from 5-connected triangulations).
Again, one can argue that they contain large MN-graphs.
The paper \cite{K} and the above examples prove the existence of infinitely
many MN-graphs but do not give any concrete examples.
They provide no information on properties of MN-graphs.
Even the most basic question if there are infinitely many MN-graphs
whose minimum degree is at least three cannot be answered by considering
these constructions. In \cite{K},
two specific MN-graphs of order 7 and 8, respectively, are given.
One of them, the graph $K_7-E(K_3)$, is the unique
7-vertex MN-graph and since all 6-vertex graphs are
1-planar, the graph $K_7-E(K_3)$ is the MN-graph with
the minimum number of vertices. Surprisingly enough, the
two MN-graphs in \cite{K} are the only explicit MN-graphs known
in the literature.
The main problem when trying to construct 1-planar
graphs is that we have no characterization of
1-planar graphs. The set of 1-planar graphs is not
closed under taking minors, so 1-planarity can not be
characterized by forbidding some minors.
In the present paper we construct two explicit infinite
families of MN-graphs whose minimum degree is at least three
and, correspondingly, we give
two different approaches how to prove that a graph
has no plane 1-immersion.
In Sect.~\ref{Sec:2} we construct MN-graphs based
on the Kuratowski graph $K_{3,3}$. To obtain them,
we replace six edges of $K_{3,3}$ by some
special subgraphs. The minimality of these examples is easy
to verify, but their non-1-planarity needs long and delicate
arguments. Using these MN-graphs, we show that for
every integer $n\geq 63$, there are at least $2^{(n-54)/4}$
nonisomorphic minimal non-1-planar graphs of order $n$.
In Sect.~\ref{Sec:3} we describe a class of
3-connected planar graphs that have no plane
1-immersions with at least one crossing point
(\emph{{\rm PN}-graphs}, for short). Every
PN-graph has a unique plane 1-immersion, namely, its
unique plane embedding. Hence, if a
1-planar graph $G$ contains a PN-graph $H$ as a subgraph,
then in every plane 1-immersion of $G$ the subgraph
$H$ is 1-immersed in the plane in the same way.
Having constructions of PN-graphs, we can construct
1-planar and non-1-planar graphs with some desired
properties: 1-planar graphs that have exactly $k>0$
different plane 1-immersions; MN-graphs, etc.
In Sect.~\ref{Sec:4} we construct MN-graphs based
on PN-graphs. Each of these MN-graphs $G$ has as a
subgraph a PN-graph $H$ and the unique plane
1-immersion of $H$ prevents 1-immersion of the remaining part
of $G$ in the plane.
Despite the fact that minimal obstructions for 1-planarity
(i.e., the MN-graphs) have diverse structure, and despite
the fact that discovering 1-immer\-sions of specific graphs
can be very tricky, it turned out to be a hard problem
to establish hardness of 1-planarity testing.
A solution is given in Sect.~\ref{sect:NPC}, where
we show that 1-planarity testing is NP-complete,
see . The proof is geometric in the sense
that the reduction is from 3-colorability of planar graphs
(or similarly, from planar 3-satisfiability).
In Sect.~\ref{Sec:6} we show how the proof of Theorem~\ref{thm:NPC1} can be modified to obtain a proof that $k$-planarity testing for multigraphs is NP-complete.
An extended abstract of this paper was published in Graph Drawing 2008 \cite{GD08}.
\section{Chain graphs based on $K_{3,3}$}
\label{Sec:2}
Two cycles of a graph are \emph{adjacent} if they share
a common edge. If a graph $G$ is drawn in the plane, then we say that
a vertex $x$ lies \emph{inside} (resp.~\emph{outside}) an
embedded (that is, non-self-intersecting) cycle $C$, if $x$
lies in the interior (resp.\ exterior) of $C$, and
does not lie on $C$. Having two embedded adjacent
cycles $C$ and $C'$, we say that $C$ lies inside
(resp. outside) $C'$ if every point of $C$ either
lies inside (resp. outside) $C'$ or lies on $C'$.
From this point on, by a 1-immersion of a graph we mean a plane
1-immersion. We assume that in 1-immersions,
adjacent edges do not cross each other and no edge
crosses itself. Thus, every 3-cycle of a 1-immersed graph is
embedded in the plane. Hence, given a 3-cycle of a
1-immersed graph, we can speak about its interior and
exterior. We say that an
embedded cycle \emph{separates} two vertices $x$ and
$y$ on the plane, if one of the vertices lies inside
and the other one lies outside the cycle. Two
edges $e$ and $e'$ of a graph $G$ \emph{separate}
vertices $x$ and $y$ of the graph if $x$ and $y$
belong to different connected components of the graph
$G-e-e'$.
Throughout the paper we will deal with 1-immersed graphs.
When an immersion of a graph $G$ is clear from the context,
we shall identify vertices, edges, cycles and subgraphs of
$G$ with their image in $\RR^2$ under the 1-immersion.
Then by a face of a 1-immersion of $G$ we mean any connected component
of $\RR^2\setminus G$.
By using M\"obius transformations combined with homeomorphisms
of the plane it is always possible to exchange the interior and
exterior of any embedded cycle and it is possible to change any
face of a given 1-immersion into the outer face of a 1-immersion.
Formally, we have the following
observation (which we will use without referring to it every time):
(A) Let $C$ be a cycle of a graph $G$. If $G$ has a 1-immersion $\varphi$
in which $C$ is embedded, then $G$ has a 1-immersion $\varphi'$ with the same
number of crossings as $\varphi$, in which
$C$ is embedded and all vertices of $G$, which lie inside $C$ in $\varphi$,
lie outside $C$ in $\varphi'$ and vice versa.
Now we begin describing a family of MN-graphs based on the
graph $K_{3,3}$.
By a \emph{link} $L(x,y)$ connecting two vertices $x$
and $y$ we mean any of the graphs shown in
Fig.~\ref{Fig:2.1} where
$\{z,\overline{z}\}=\{x,y\}$. We say that the
vertices $x$ and $y$ are incident with the link.
The links in Figs.~\ref{Fig:2.1}(A) and (B) are called
\emph{A-link} and \emph{B-link}, respectively, and the one
in Fig.~\ref{Fig:2.1}(C) is called a \emph{base link}.
Every link has a \emph{free cycle}: both 3-cycles in
an A-link are its free cycles, while every B-link or base
link has exactly one free cycle (the cycle indicated by thick
lines in Fig.~\ref{Fig:2.1}).
\begin{figure}
\caption{Links.}
\label{Fig:2.1}
\end{figure}
By an A-\emph{chain} of length $n\geq 2$ we mean the
graph shown in Fig.~\ref{Fig:2.2}(a). By a
B-\emph{chain} of length $n\geq 2$ we mean the graph
shown in Fig.~\ref{Fig:2.2}(c) and, for $n\geq 3$, every graph
obtained from that graph in the following way: for
some integers $h_1,h_2,\ldots,h_t$, where $t\geq 1$
and $1\leq h_1<h_2<\cdots<h_t\leq n-2$, we replace the link
at the left of Fig.~\ref{Fig:2.2}(e) by the link shown
at the right, for $i=1,2,\ldots,t$. Note that, by definition,
A- and B-chains have length at least 2. We say that the
chains in Figs.~\ref{Fig:2.2}(a) and (c) connect the
vertices $v(0)$ and $v(n)$ which are called the
\emph{end vertices} of the chain. Two chains are
\emph{adjacent} if they share a common end vertex.
A-chains and B-chains will be represented as shown
in Figs.~\ref{Fig:2.2}(b) and (d), respectively,
where the arrow points to the end vertex incident
with the base link. The vertices
$v(0),v(1),v(2),\ldots,v(n)$ are the \emph{core
vertices} of the chains. Every free cycle of a link
contains exactly one core vertex. The two edges of a
free cycle $C$ incident to the core vertex
are the \emph{core-adjacent} edges of $C$.
It is easy to see that two edges $e$ and $e'$ of a
chain separate the end vertices of the chain if and
only if the edges are the core-adjacent edges of a
free cycle of a link of the chain.
\begin{figure}
\caption{A- and B-chains.}
\label{Fig:2.2}
\end{figure}
By a \emph{subchain} of a chain shown in
Figs.~\ref{Fig:2.2}(a) and (c) we mean a subgraph of
the chain consisting of links incident with $v(i)$
and $v(i+1)$ for all $i=m,m+1,\ldots,m'-1$ for some
$0\leq m<m'\leq n$. We say that the subchain \emph{connects}
the vertices $v(m)$ and $v(m')$.
A \emph{chain graph} is any graph obtained from
$K_{3,3}$ by replacing three of its edges incident with
the same vertex by A-chains and three edges incident
with another vertex in the same chromatic class by B-chains, where the chains can have
arbitrary lengths $\geq 2$. These changes are to be
made as shown in Fig.~\ref{Fig:2.3}(a). The vertices
$\Omega(1)$, $\Omega (2)$, and $\Omega (3)$ are the
\emph{base vertices} of the chain graph. The edges
joining the vertex $\Omega$ to the base vertices are
called the $\Omega$-\emph{edges}.
\begin{figure}
\caption{A chain graph $G$ and 1-planarity of the graph $G-e$.}
\label{Fig:2.3}
\end{figure}
We will show that every chain graph is an MN-graph.
\begin{lem}
\label{lem:1.1}
Let\/ $G$ be a chain graph and $e\in E(G)$.
Then $G-e$ is\/ $1$-planar.
\end{lem}
\begin{proof}
If $e$ is an $\Omega$-edge, then $G-e$ is planar and hence 1-planar.
Suppose now that $e$ is not an $\Omega$-edge.
By symmetry, we may assume that $e$ belongs
to an A- or B-chain incident to $\Omega(2)$. If $e$ is the ``middle''
edge of a B-link, then Fig.~\ref{Fig:2.3}(b) shows that the corresponding
B-chain can be crossed by an A-chain, and it is easy to see that this can be
made into a 1-immersion of $G-e$.
In all other cases, 1-immersions are
made by crossing the link $L$ whose edge $e$ is deleted with the edges incident
with the vertex $\Omega$. The upper row in Fig.~\ref{Fig:2.3}(c)
shows the cases when $L$ is a base link. The lower row covers the cases
when $L$ is an A-link or a B-link.
The edge $e$ is shown in all cases as the dotted edge.
\end{proof}
Our next goal is to show that chain graphs are not 1-planar.
In what follows we let $G$ be a chain graph and $\varphi$
a (hypothetical) 1-immersion of $G$.
\begin{lem}
\label{lem:2.1}
Let $\varphi$ be a $1$-immersion of a chain graph\/ $G$ such that
the number of crossings in $\varphi$ is minimal among all\/ $1$-immersions of $G$.
If\/ $L$ is a link in an A- or B-chain of\/ $G$, then no two edges of\/ $L$
cross each other in $\varphi$.
\end{lem}
\begin{proof}
The first thing to observe is that whenever edges $ab$ and $cd$ cross,
there is a disk $D$ having $a,c,b,d$ on its boundary, and $D$ contains these
two edges but no other points of $G$. In 1-immersions with minimum number of
crossings this implies that no other edges between the vertices $a,c,b,d$
are crossed. Similarly, if $L=L(z,\bar z)$ is a link in a chain, and an edge
incident with $z$ crosses an edge incident with $\bar z$, the whole link $L$
can be drawn in $D$ without making any crossings. This shows that the only
possible cases for a crossing of two edges $e,f$ in $L$ are the following ones,
where we take the notation from Figure~\ref{Fig:2.1} and we let $u$ be
the vertex of $L$ that is not labeled in the figure:
(a) $L$ is a B-link and $e=zv$, $f=uw$.
(b) $L$ is a B-link and $e=\bar zv$, $f=uw$.
(c) $L$ is a base link and $e=zv$, $f=uw$.
(d) $L$ is a base link and $e=vw$, $f=\bar zu$.
(e) $L$ is a base link and $e=\bar zw$, $f=vu$.
\noindent
Let $D$ be a disk as discussed above corresponding to the crossing of $e$ and $f$.
In cases (b), (d) and (e), the 3-valent vertex $u$ has all neigbors on the boundary of $D$,
so the crossing between $e$ and $f$ can be eliminated by moving $u$ inside $D$
onto the other side of the edge $e$ (see Fig.~\ref{Fig:2.3E}(a)).
\begin{figure}
\caption{Eliminating the crossing between the edges $e$ and $f$.}
\label{Fig:2.3E}
\end{figure}
It remains to consider cases (a) and (c). Observe that the boundary of $D$
contains vertices $z,u,v,w$ in this order and that $u$ and $v$ both have
precisely one additional neighbor $\bar z$ outside of $D$. Therefore, we
can turn $\varphi$ into another 1-immersion of $G$ by swapping $u$ and $v$
and only redraw the edges inside $D$ (see Fig.~\ref{Fig:2.3E}(b)). However, this eliminates the crossing in $D$ and yields a 1-immersion with fewer crossings, a contradiction.
\end{proof}
\begin{lem}
\label{lem:2.2}
Let $\varphi$ be a $1$-immersion of a chain graph\/ $G$ such that
the number of crossings in $\varphi$ is minimal among all\/ $1$-immersions of $G$.
If\/ $\Pi$ and\/ $\Pi'$ are nonadjacent A- and B-chains, respectively, then in
$\varphi$ the following holds for every 3-cycle $C$ of\/ $\Pi$:
\begin{itemize}
\item [\rm (i)] The core vertices of\/ $\Pi'$ either
all lie inside or all lie outside $C$.
\item [\rm (ii)] If all core vertices of\/ $\Pi'$ lie
inside $($resp. outside$)$ $C$, then at most one
vertex of\/ $\Pi'$ lies outside $($resp.\ inside$)$ $C$.
\end{itemize}
\end{lem}
\begin{proof}
First we show (i). If $C$ does not contain the vertex $A$, then every
two core vertices of $\Pi'$ are connected by four
edge-disjoint paths not passing through the vertices of $C$,
hence (i) holds for $C$.
Suppose now that $C$ contains the vertex $A$ and that
core vertices of $\Pi'$ lie inside and outside $C$.
Then there is a link $L(z,\overline{z})$ of $\Pi'$
such that the vertex $z$ lies inside and the vertex
$\overline{z}$ lies outside $C$. We may assume
without loss of generality that $\Pi$ and $\Pi'$ are
incident to the base vertices $\Omega(1)$ and
$\Omega(2)$, respectively, and (taking (A) into account) that the vertex $z$
(if $z\neq B$) separates the vertices $B$ and
$\overline{z}$ in $\Pi'$ (see Fig.~\ref{Fig:2.4}(a), where in
$L(z,\overline{z})$ the dotted line indicates that
the link has either edge $\varepsilon z$ or
$\varepsilon \overline{z}$; also if $\bar z = \Omega(2)$,
then the link indicated in Fig.~\ref{Fig:2.4}(a) is a base link).
The 3-cycle $C$ crosses at least two edges of
$L(z,\overline{z})$. The vertex $z$ (resp.
$\overline{z}$) is connected to each of the vertices
$\Omega(1)$ and $\Omega(3)$ (resp. to the vertex
$\Omega(2)$) by two edge-disjoint paths not passing through
$V(C)$ or through the noncore vertices of
$L(z,\overline{z})$. Hence, $\Omega(1)$
and $\Omega(3)$ lie inside $C$ (resp. $\Omega(2)$
lies outside $C$). It follows that the vertex
$\Omega$ lies inside $C$ and the edge $(\Omega,\Omega(2))$
is the third edge that crosses $C$. We conclude that $C$
crosses exactly two edges of $L(z,\overline{z})$ and
the two edges separate $z$ from $\overline{z}$ in
$L(z,\overline{z})$. Thus, the two edges are the
core-adjacent edges of the free cycle of
$L(z,\overline{z})$.
Hence, in $\varphi$, the link
$L(z,\overline{z})$ is 1-immersed as shown
in Fig.~\ref{Fig:2.4}(b), where the dotted edges indicate
alternative possibilities for the position of $z$ (at top) or $\overline{z}$ (at bottom).
\begin{figure}
\caption{Cases in the proof of Lemma \ref{lem:2.2}
\label{Fig:2.4}
\end{figure}
Let $v,\bar v$ be the vertices of $C$ different from $A$
and let $x$ be the fourth vertex of the link containing $C$.
The vertex $x$ is connected to $\Omega(1)$ by two
edge-disjoint paths not passing through the vertices of $C$,
hence $x$ lies inside $C$. At most two vertices of
$C$ lie inside the free cycle of $L(z,\overline{z})$. Suppose exactly one of $v$ and $\overline{v}$ is inside the free cycle. If we are in the case of the bottom of Fig.~\ref{Fig:2.4}(b), then the path $vx\overline{v}$ can not lie inside $C$, a contradiction. In the case of the top of Fig.~\ref{Fig:2.4}(b), if the path $vx\overline{v}$ lies inside $C$, then $x$ must lie
inside a 3-cycle $Q$ of $L(z,\overline{z})$ incident
to $z$, whereas $A$ lies outside $Q$, a
contradiction, since $Q$ is not incident to B and in
$G$ there are two edge-disjoint paths connecting $x$
to $A$ and not passing through the vertices $v$,
$\overline{v}$, and the vertices of $Q$.
If either both $v$ and $\overline{v}$ or none of them
lie inside the free 4-cycle, then in the case of
Fig.~\ref{Fig:2.4}(c) (resp. (d)), where we depict
the two possible placements of the nonbase link
$L(z,\overline{z})$, there are two edge-disjoint
paths of $G$ connecting $A$ and $\Omega(3)$ (resp. $A$ and
$\Omega(2)$) and not passing through $z$ (resp.
$\overline{z}$), a contradiction. Reasoning exactly
in the same way, we also obtain a contradiction
when $L(z,\overline{z})$ is a base link.
Now we prove (ii). By (A), we may assume that all core
vertices of $\Pi'$ lie inside $C$. By inspecting Fig.~\ref{Fig:2.1},
it is easy to check
that for every link $L$, for every set $W$ of noncore
vertices of $L$ such that $|W|\geq 2$, there are at least four edges
joining $W$ with of $V(L)\setminus W$. Hence, if at least two noncore
vertices belonging to the same link of $\Pi'$ lie outside $C$,
then at least four edges join them with the vertices of $\Pi'$
lying inside $C$, a contradiction. Every noncore
vertex of $\Pi'$ has valence at least 3. Hence if exactly $n$ ($n\geq 2$)
noncore vertices of $\Pi'$ lie outside $C$ and if they all belong to
different links, then at least $3n\geq 6$ edges join them with
the vertices of $\Pi'$ lying inside $C$, a contradiction.
\end{proof}
Two chains \emph{cross} if an edge of one crosses an edge of the other.
\begin{lem}
\label{lem:2.3}
Let $\varphi$ be a $1$-immersion of a chain graph\/ $G$ such that
the number of crossings in $\varphi$ is minimal among all\/ $1$-immersions of $G$.
If\/ $\Pi$ and\/ $\Pi'$ are nonadjacent A- and B-chains,
respectively, then $\Pi$ does not cross $\Pi'$ in
$\varphi$.
\end{lem}
\begin{proof}
Suppose, for a contradiction, that $\Pi$ crosses $\Pi'$.
Then an edge of a link $L(z,\overline{z})$ of $\Pi'$ crosses
a 3-cycle $C=xv\bar v$ of a link $L$ of $\Pi$.
Let $\overline{C}=\bar x v \bar v$ be a 3-cycle that is adjacent
to $C$ in $L$. (If $L$ is not a base link, then $L=C\cup\overline{C}$
and $x,\bar x$ are the core vertices of $L$.)
By Lemma~\ref{lem:2.2}, we may assume that all core vertices
of $\Pi'$ lie outside $C$ and that exactly one vertex
$u$ of $\Pi'$ lies inside $C$. The vertex $u$ is
3-valent and is not a core vertex. The three edges incident with $u$ cross all three edges of $C$, hence $\overline{C}$ does not cross $C$. If $\overline{C}$ lies inside $C$, then one of the three edges incident with $u$ crosses $C$ and $\overline{C}$, a contradiction. If $C$ lies inside $\overline{C}$, then we consider the plane as the complex plane and
apply the M\"obius transformation $f(z)=1/(z-a)$ with the point $a$ taken inside $\overline{C}$ but outside $C$. This yields a 1-immersion of $G$ such that
(a) $C$ lies outside $\overline{C}$, $\overline{C}$ lies outside $C$, and exactly one vertex $u$ of $\Pi'$ lies inside $C$.
\noindent Therefore, we may assume that in $\varphi$ we have (a). Since the three edges incident with $u$ cross all three edges of $C$, at least two vertices of $\Pi'$ lie outside $\overline{C}$, hence, by Lemma~\ref{lem:2.2}, all core vertices of $\Pi'$ lie outside $C$ and $\overline{C}$. Since the edge $v\bar v$ in $C\cap\overline{C}$ is crossed by
an edge of $L(z,\bar z)$, also $\overline C$ contains precisely one
vertex $u'$ of $L(z,\bar z)$ and $u'$ has degree 3 and is not a core
vertex.
\begin{figure}
\caption{Cases in the proof of Lemma \ref{lem:2.3}
\label{Fig:2.5}
\end{figure}
Adjacent trivalent vertices $u,u'$ cannot be contained in a base
link. Therefore, $L(z,\overline{z})$ is not a base link.
Let $L(z,\overline{z})$ be depicted as shown in Fig.~\ref{Fig:2.5}(a).
Because of symmetry, we may assume that $u=w$ and
$u'=\varepsilon$, and that the crossings are as shown in
Fig.~\ref{Fig:2.5}(b) and (c).
In the case of Fig.~\ref{Fig:2.5}(b) the adjacent vertices $z$ and $\overline{w}$
of $L(z,\overline{z})$ are separated by the 3-cycle $\overline{z} w \varepsilon$,
whose edges are crossed by three edges different from the edge
$z\overline{w}$, a contradiction.
Consider the case in Fig.~\ref{Fig:2.5}(c). If $x$ and $\overline{x}$
are core vertices of $\Pi$, then they are separated by the 3-cycle
$\overline{z} \overline{w} \varepsilon$ of $\Pi'$, a contradiction, since there are 4 edge-disjoint paths between $x$ and $\overline{x}$ that avoid this 3-cycle (the 3-cycle does not contain the vertex $B$).
Suppose that $x$ and $\overline{x}$ are not two core vertices. This is possible only when $L$ is a base link. The 3-cycle $\overline{z} w \varepsilon$ of $L(z,\overline{z})$ crosses the three edges joining the vertex $\overline{v}$ of $L$ with three vertices $x$, $v$, and $\overline{x}$ of $L$. The fifth vertex of $L$ is adjacent to at least one vertex from $x$, $v$, and $\overline{x}$, hence it lies outside $\overline{z} w \varepsilon$ and is not adjacent to $\overline{v}$. Thus, $\overline{v}$ has valence 3 in $L$. If $\overline{v}$ is a core vertex, then, since the 3-cycle $\overline{z} w \varepsilon$ does not contain the vertex $B$, $\overline{v}$ is connected to one of the vertices $x$, $v$, and $\overline{x}$ by a path passing through $B$ and not passing through the vertices of $\overline{z} w \varepsilon$, a contradiction. Hence $\overline{v}$ is not a core vertex. The link $L$ has
exactly one noncore vertex $\overline{v}$ of valence 3 and the vertices $x$
and $\overline{x}$ are adjacent. Hence the 3-cycle $x \overline{v} \overline{x}$
separates two core vertices $z$ and $\overline{z}$ of $\Pi'$, a contradiction, since $z$ and $\overline{z}$ are connected by a path not passing through the vertices of the 3-cycle $x \overline{v} \overline{x}$.
\end{proof}
\begin{thm}
\label{thm:2.1}
Every chain graph is an MN-graph.
\end{thm}
\noindent
\emph{Proof}. Let $G$ be a chain graph. By Lemma \ref{lem:1.1}, it suffices
to prove that $G$ is not 1-planar. Consider, for a contradiction, a
1-immersion $\varphi$ of $G$ and suppose that $\varphi$ has minimum
number of crossings among all 1-immersions of $G$.
We know by Lemma~\ref{lem:2.3} that non-adjacent chains
do not cross each other. In the sequel we will consider
possible ways that the $\Omega$-edges cross with one of the
chains. Let us first show that such a crossing is inevitable.
\begin{clm}
\label{clm:2.0}
At least one of the chains contains a link $L=L(x,y)$ such that
every $(x,y)$-path in $L$ is crossed by an $\Omega$-edge.
\end{clm}
\begin{proof}
Suppose that for every link $L=L(x,y)$, an $(x,y)$-path in $L$ is
not crossed by any $\Omega$-edge. Then every chain contains a path
joining its end vertices that is not crossed by the $\Omega$-edges.
All six such paths plus the $\Omega$-edges form a subgraph of $G$
that is homeomorphic to $K_{3,3}$. By Lemma~\ref{lem:2.3},
the only crossings between subdivided edges of this $K_{3,3}$-subgraph
are among adjacent paths. However, it is easy to eliminate crossings
between adjacent paths and obtain an embedding of $K_{3,3}$ in
the plane. This contradiction completes the proof of the claim.
\end{proof}
Let $L=L(x,y)$ be a link in an A- or B-chain $\Pi$ whose $(x,y)$-paths are
all crossed by the $\Omega$-edges. We may assume that $L$ is contained in
a chain connecting the vertex $\Omega(1)$ with $A$ or $B$ and that $x$ separates $y$ and $\Omega(1)$ in $\Pi$.
By Lemma \ref{lem:2.1}, the induced 1-immersion of $L$ is an embedding. The vertex $\Omega$ lies inside a face of $L$ and all $\Omega$-edges that cross $L$ cross the edges of the boundary of the face. Considering the possible embeddings of $L$, it is easy to see that all $(x,y)$-paths are crossed by $\Omega$-edges only in the case when $\Omega$ lies inside a face of $L$ whose boundary contains two core-adjacent edges of a free cycle $C$ of $L$, and two $\Omega$-edges cross the two core-adjacent edges. By (A), we may assume that $\Omega$ lies inside $C$.
If $C$ is a $k$-cycle, $k\in\{3,4\}$, then $L$ has another cycle $C'$ that shares with $C$ exactly $k-2$ edges and contains a core vertex not belonging to $C$. If $C$ lies inside $C'$, then we consider the plane as the complex plane and
apply the M\"obius transformation $f(z)=1/(z-a)$ with the point $a$ taken inside $C'$ but outside $C$. This yields a 1-immersion of $G$ such that $C$ does not lie inside $C'$ and $\Omega$ lies inside $C$. Hence, we may assume that $C$ does not lie inside $C'$, that $\Omega$ lies inside $C$ and two $\Omega$-edges $h$ and $h'$ cross two core-adjacent edges of $C$. Note that any two among the vertices $A,B,\Omega(2),\Omega(3)$ are joined
by four edge-disjoint paths not using any edges in the chain $\Pi$ containing $L$. Therefore, these four vertices of $G$ are all immersed in the same face of $L$.
Let the $\Omega$-edges $h$ and $h'$ join the vertex $\Omega$ with basic vertices $\Omega(i)$ and $\Omega(j)$, respectively. If the third basic vertex $\Omega(\ell)$ is $\Omega(2)$ or $\Omega(3)$, then $\Omega(2)$ and $\Omega(3)$ lie inside different faces of $L$, a contradiction, hence $\Omega(\ell)=\Omega(1)$.
The vertex $\Omega(1)$ is connected to one of the vertices $A$ and $B$ by two edge-disjoint paths, not passing the vertices of $C$. Hence, if $C$ is a 3-cycle, then $\Omega(1)$ is not inside $C$.
Now the embeddings of possible links $L$ (so that we can join the vertices $\Omega$ and $\Omega(1)$ by an edge not violating the 1-planarity) are shown in Figure \ref{Fig:2.6}.
\begin{figure}
\caption{The $\Omega$-edges crossing a link}
\label{Fig:2.6}
\end{figure}
Let us now consider particular cases (a)--(f) of Figure \ref{Fig:2.6}.
(a): In this case, $L$ is a base link and $x=\Omega(1)$. Consider two edge-disjoint paths in a chain $\overline{\Pi}$ joining $\Omega(1)$ with the vertex $A$ or $B$ which is not incident with $\Pi$. These paths must cross the edges $e$ and $f$ indicated in the figure. Let $a$ be the edge crossing $e$ and $b$ be the edge crossing $f$. It is easy to see that $a$ and $b$ cannot be both incident with $\Omega(1)$ since $\Omega(1)$ is incident with three edges of the base link in the chain $\overline{\Pi}$. The 1-planarity implies that the edges $a,b$ and the vertex $\Omega(1)$
separate the graph $G$. Therefore, $a$ and $b$ are core-adjacent edges of a link in the chain $\overline{\Pi}$. If the edge $g$ (shown in the figure) is crossed by an edge $c$ of $\overline{\Pi}$, then also $a,c$ and $\Omega(1)$ separate the graph. Thus $a,c$ would be core-adjacent edges in a link in $\overline{\Pi}$ as well, a contradiction. But if $g$ is not crossed and $a$ is not incident with $\Omega(1)$, then the edge $a$ and the vertex $\Omega(1)$ separate $G$, a contradiction. If $a$ is incident with $\Omega(1)$, then $b$ is not (as proved above). Now we get a contradiction by considering the separation of $G$ by the edge
$b$ and the vertex $\Omega(1)$.
(b): In this case, $L$ is a B-link, the cycle $C'$ lies inside $C$ and $\Omega(1)$ is inside $C$ but outside $C'$ (see Fig.~\ref{Fig:2.6} (b)). In the figure, the vertex labeled $x$ is actually $x$ and not $y$ because the vertex is not $B$ and is connected to $\Omega(1)$ by two edge-disjoint paths not passing through the vertices of $C$, and $y$ is connected to the vertex $B$ (lying outside $C$) by two edge-disjoint paths not passing through the vertices of $C$. Again, consider two edge-disjoint paths in the A-chain $\overline{\Pi}$ joining $\Omega(1)$ with the vertex $A$. Their edges $a$ and $b$ (say) must cross the edges $e$ and $f$, respectively. As in the proof of case (a), we see that the edges $a$, $b$ and the vertex $\Omega(1)$ separate $G$, thus $a$ and $b$ are core-adjacent edges of a free cycle of a link of $\overline{\Pi}$. This free cycle has length 3 and separates $x$ from $\Omega(1)$, a contradiction, since there is a B-subchain from $x$ to $\Omega(1)$ disjoint from the free 3-cycle and not containing the edges of $L$ incident with $x$.
(c): In this case, $L$ is a B-link, the cycle $C'$ lies outside $C$ and $\Omega(1)$ is inside $C$. The vertex $\Omega(1)$ is connected by 4 edge-disjoint paths with vertices $x$ and $A$ such that the paths do not pass through noncore vertices of $L$, a contradiction.
(d) and (e): In these cases, $L$ is a B-link and $\Omega(1)$ lies inside a 3-cycle of $L$. Two edge-disjoint paths from $\Omega(1)$ to $A$ cross the edges $e$ and $f$ of $L$. One of the paths also crosses the edge $g$. Then the edges crossing $e$ and $g$ separate $G$, a contradiction, since $G$ is 3-edge-connected.
(f): In this case, $L$ is an A-link and the chain $\Pi$ is joining $\Omega(1)$
with the vertex $A$. Again, consider two edge-disjoint paths in the B-chain $\overline{\Pi}$ joining $\Omega(1)$ with $B$. Their edges $a$ and $b$ (say) must cross
the edges $e$ and $f$, respectively.
As in the proof of case (a), we see that the edges $a,b$ and the vertex
$\Omega(1)$ separate $G$, thus $a,b$ are core-adjacent edges
in a link $L'=L(z,z')$ in $\overline{\Pi}$. Let $z$ be the core vertex of $L'$ incident
with the edges $a=zp$ and $b=zq$. Note that in $L'$, vertices $p,q$ have
two common neighbors, a vertex $r$ of degree 3 in $G$ and the core vertex $z'$,
and that $z'$ is adjacent to $r$. Inside $L$ we have the subchain $\Pi'$ of the A-chain $\Pi$ connecting $x$ with $\Omega(1)$. Hence, the free cycle of $L'$ containing the edges $a$ and $b$ must have length at least 4 (that is, $L'$ is not a base link) and $z$ is immersed outside $L$,
while $p,q,r,z'$ are inside. The subchain $\Pi'$ has two
edge-disjoint paths that are crossed by the paths $prq$ and $pz'q$.
Each of the paths $prq$ and $pz'q$ crosses core-adjacent edges in $\Pi$ since the crossed edges
and $\Omega(1)$ separate $G$. Thus, they enter faces of these links
that are bounded by free cycles of the links. However, the edge $rz'$
would need to cross one edge of each of these two free cycles, hence the two free cycles are adjacent, that is, they are the two free cycles of an A-link of $\Pi$. Then the vertex $z'$ lies inside one of the two free cycles and the free cycle separates $z'$ from $\Omega(1)$, a contradiction, since $z'$ is connected with $\Omega(1)$ by a B-subchain.
$\square$
The following theorem shows how chain graphs can be used to
construct exponentially many nonisomorphic MN-graphs of order $n$.
\begin{thm}\label{thm:2}
For every integer $n\geq 63$, there are at least
$2^{(n-54)/4}$ nonisomorphic MN-graphs of order $n$.
\end{thm}
\begin{proof}
The A-chain of length $t$ has $3t+2$
vertices and a B-chain of length $t$ has $4t+1$
vertices. Consider a chain graph whose three A-chains
have length 2, 2, and $\ell\geq 2$, respectively, and
whose B-chains have length 2, 3, and $t\geq 4$,
respectively. The graph has $35+3\ell+4t$ vertices.
Applying the modification shown in
Fig.~\ref{Fig:2.2}(e) to links of the two B-chains of the
graph which have length at least 3, we obtain $2^{t-1}$ nonisomorphic chain graphs
of order $35+3\ell+4t$, where $\ell\geq 2$ and $t\geq
4$. We claim that for every integer $n\geq 63$, there
are integers $2\leq \ell\leq 5$ and $t\geq 4$ such
that $n=35+3\ell+4t$. Indeed, if $n\equiv
0,1,2,3\pmod{4}$, put $\ell=3,2,5,4$, respectively.
If $n=35+3\ell+4t$, where $2\leq \ell\leq 5$, then
$t\geq n/4-50/4$. Hence, there are at
least $2^{\frac{n}{4}-\frac{54}{4}}$ nonisomorphic
chain graphs of order $n\geq 63$. Since every chain
graph is a MN-graph, the theorem follows.
\end{proof}
\section{PN-graphs} \label{Sec:3}
By a \emph{proper} 1-immersion of a graph we mean a
1-immersion with at least one crossing point.
Let us recall that a PN-\emph{graph} is a planar graph
that does not have proper 1-immersions.
In this section we describe a class of PN-graphs and
construct some graphs of the class. They will
be used in Section~\ref{Sec:4} to construct MN-graphs.
For
every cycle $C$ of $G$, denote by $N(C)$ the set
of all vertices of the graph not belonging to $C$ but
adjacent to $C$. Two disjoint edges $vw$ and $v'w'$ of a graph $G$
are \emph{paired} if the four vertices $v,w,v',w'$
are all four vertices of two adjacent 3-cycles (two cycles are \emph{adjacent} if they share an edge).
Following Tutte, we call a cycle $C$ of a graph $G$ \emph{peripheral}
if it is an induced cycle in $G$ and $G-V(C)$ is connected.
If $G$ is 3-connected and planar, then the face boundaries in its
(combinatorially unique) embedding in the plane are
precisely its peripheral cycles.
\begin{thm}\label{thm:3.1}
Suppose that a 3-connected planar graph $G$ satisfies
the following conditions:
\begin{itemize}
\myitemsep
\item [{\rm (C1)}] Every vertex has degree at least\/ $4$
and at most\/ $6$.
\item [{\rm (C2)}] Every edge belongs to at least one
$3$-cycle.
\item [{\rm (C3)}] Every $3$-cycle is peripheral $($in other words, there are no separating 3-cycles$)$.
\item [{\rm (C4)}] Every $3$-cycle is adjacent to at most
one other $3$-cycle.
\item [{\rm (C5)}] No vertex belongs to three mutually
edge-disjoint $3$-cycles.
\item [{\rm (C6)}] Every $4$-cycle is either peripheral or is
the boundary of two adjacent triangular faces $($this means that there are no separating 4-cycles$)$.
\item [{\rm (C7)}] For every $3$-cycle $C$, any two
vertices of $V(G)\setminus (V(C)\cup N(C))$ are
connected by four edge-disjoint paths not passing through
the vertices of $C$.
\item [{\rm (C8)}] If an edge $vw$ of a
nontriangular peripheral cycle $C$ is paired with an
edge $v'w'$ of a nontriangular peripheral cycle
$C'$, then:
\begin{enumerate}\myitemsep
\item [{\rm (i)\,}]
$C$ and $C'$ have no vertices in common;
\item [{\rm (ii)}]
any two vertices $a$ and $a'$ of
$C$ and $C'$, respectively, such that
$\{a,a'\}\not\subseteq\{v,w,v',w'\}$ are
non-adjacent and are not connected by a path
$aba'$ of length\/ $2$, where $b$ does not
belong to $C$ and $C'$.
\end{enumerate}
\item [{\rm (C9)}] $G$ does not contain the subgraphs
shown in Fig.~\ref{Fig:3.1} $($in this figure, $4$-valent
$($resp.\ $5$-valent$)$ vertices of $G$ are encircled
$($resp.\ encircled twice$)$ and the two starred vertices can be the same vertex$)$.
\end{itemize}
Then $G$ has no proper $1$-immersion.
\end{thm}
\begin{figure}
\caption{Forbidden subgraphs.}
\label{Fig:3.1}
\end{figure}
\begin{proof}
Denote by $f$ the unique plane
embedding of $G$. Suppose, for a contradiction, that there is a
proper 1-immersion $\varphi$ of $G$. Below we consider the 1-immersion
and show that then $G$ has a subgraph which is excluded by (C8) and (C9),
thereby obtaining a contradiction. In the figures below, the encircled
letter $f$ (resp. $\varphi$) at the top left of a figure means that the
figure shows a fragment of the plane embedding $f$
(resp.\ 1-immersion $\varphi$).
\begin{lem}\label{lem:3.1}
In $\varphi$, there is a $3$-cycle such that there is a vertex inside
and a vertex outside the cycle.
\end{lem}
\begin{proof} The 1-immersion $\varphi$ has crossing
edges $e$ and $e'$. By (C2), the crossing edges
belong to different 3-cycles. If the 3-cycles are nonadjacent, then we
apply the following obvious observation:
(a) If two nonadjacent 3-cycles $D$ and $D'$ cross each other,
then there is a vertex of $D$ inside and outside $D'$.
If $e=xy$ and $e'=x'y'$ belong to adjacent 3-cycles $xyy'$
and $x'yy'$, respectively (see Fig.~\ref{Fig:3.2}), then, by (C4),
there are nontriangular peripheral cycles $C$ and $C'$ containing $e$
and $e'$, respectively. The cycles $C$ and $C'$ intersect at some point
$\delta$ different
from the intersection point of edges $e$ and $e'$. By
(C8)(i), the two cycles do not have a common vertex,
hence $\delta$ is the intersection point of two edges. By (C2),
these two edges belong to some 3-cycles, $D$ and $D'$.
Property (C8)(ii) implies that $D$ and $D'$ are nonadjacent 3-cycles.
By (a), the proof is complete.
\end{proof}
\begin{figure}
\caption{Crossing two adjacent 3-cycles.}
\label{Fig:3.2}
\end{figure}
\begin{lem}\label{lem:3.3}
If\/ $C=u_1u_2u_3$ is a $3$-cycle such that there is a vertex
inside and a vertex outside $C$, then there is
only one vertex inside $C$ or only one vertex outside
$C$, and this vertex belongs to $N(C)$.
\end{lem}
\begin{proof} By (C7), we may assume that all vertices of
$V(G)\setminus (V(C)\cup N(C))$ lie outside $C$.
Then there can be only vertices of $N(C)$ inside $C$.
To prove the lemma, it suffices to show the following:
(a) For every $Q\subseteq N(C)$, $|Q|\geq 2$,
at least four edges join vertices of
$Q$ to vertices in $V(G)\setminus(V(C)\cup Q)$.
By (C1), every vertex of $Q$ has valence at least 4. By (C4),
every vertex of $N(C)$ is adjacent to at most two vertices of $C$.
We claim that if a vertex $v\in N(C)$ is adjacent to two vertices
$u_1$ and $u_2$ of $C$, then $v$ is not adjacent to other vertices
of $N(C)$. Suppose, for a contradiction, that $v$ is adjacent to
a vertex $w\in N(C)$. Then, by (C4), the vertex $w$ can be adjacent
only to $u_3$ and the 4-cycle $vwu_3u_2$ is not the boundary of two adjacent 3-cycles, hence, by (C6), the 4-cycle is peripheral.
Then, by (C3), any two of the three edges $u_2v$, $u_2u_1$,
and $u_2u_3$ are two edges of a peripheral cycle, hence
$u_2$ has valence 3, contrary to (C1).
Now to prove (a) it suffices to prove the following claim:
(b) For every $Q\subseteq N(C)$, $|Q|=1$ (resp. $|Q|\geq 2$), such that
every vertex of $Q$ is adjacent to exactly one vertex of $C$, at least
2 (resp. 4) edges join vertices of
$Q$ to vertices of $V(G)\setminus(V(C)\cup Q)$.
The claim is obvious for $|Q|\in\{1,2\}$.
For $|Q|=3$, it suffices to show that the three vertices of $Q$ are
not all pairwise adjacent. Suppose, for a contradiction, that the vertices
$v_1$, $v_2$, and $v_3$ of $Q$ are pairwise adjacent. Then, by (C4),
the three vertices of $Q$ are not adjacent to the same vertex of $C$.
Let $v_1$ and $v_2$ be adjacent to the vertices $u_1$ and $u_2$ of $C$,
respectively. Then any two of the edges $v_1u_1$, $v_1v_2$, and
$v_1v_3$ are two edges of a 3-cycle (peripheral cycle) or a 4-cycle
which is not the boundary of two adjacent 3-cycles, so by (C6),
that 4-cycle is also peripheral. Hence, $v_1$ has valence 3,
contrary to (C1).
For $|Q|\geq 4$, it suffices to show that no vertex of $Q$ is
adjacent to three other vertices in $Q$. Suppose, for a contradiction,
that $v\in Q$ is adjacent to $w_1, w_2, w_3 \in Q$. If $v$ is adjacent to
$u_1$, then the edge $vu_1$ belongs to three cycles $D_1$, $D_2$, and $D_3$
such that for $i=1,2,3$,
the cycle $D_i$ contains edges $vu_1$ and $vw_i$, has length 3 or 4,
and if $D_i$ has length 4, then $D_i$ is not the boundary of two adjacent
3-cycles. By (C3) and (C6), these three cycles are peripheral.
This contradiction completes the proof of (b).
\end{proof}
Suppose that a vertex $h$ belongs to two adjacent 3-cycles
$hvw$ and $hvw'$. Since $\deg(h)\ge4$, $h$ is adjacent to a vertex
$u\not\in\{v,w,w'\}$. By (C2), the edge $hu$
belongs to a 3-cycle $huu'$. By (C4),
$u'\not\in\{v,w,w',u\}$. Hence, we have the
following:
(B) If an edge $e$ is contained in two 3-cycles of $G$, then both
endvertices of $e$ have valence at least 5.
In the remainder of the proof of Theorem \ref{thm:3.1},
we will show that any two crossing
edges of the proper 1-immersion $\varphi$ belong to a subgraph
that is excluded by (C8) and (C9).
By Lemma~\ref{lem:3.1}, there is a 3-cycle
$C=xyz$ such that there is a vertex inside and a
vertex outside $C$. By Lemma~\ref{lem:3.3}, there is only one
vertex $v$ inside $C$ and $v$ is adjacent to $x$.
Now we show that there is a 3-cycle $B=vuw$
disjoint from $C$. Let $x,a_1,a_2,\ldots,a_t$
($t\geq 3$) be all vertices adjacent to $v$. Suppose
there is a 3-cycle $D=va_ib$, where
$b\in\{x,y,z\}$. If $b\in\{y,z\}$, then the 3-cycle
$xvb$ is adjacent to two 3-cycles $C$ and $D$,
contrary to (C4). Hence $D=va_ix$. By (C4), at
most two vertices of $\{a_1,a_2,\ldots,a_t\}$ are
adjacent to $x$. Hence, there is a vertex $a_j$ such
that a 3-cycle $B$ containing the edge $va_j$ is
disjoint from $C$.
Since there is only one vertex $v$ inside $C$,
exactly two edges of $B$ cross edges of $C$. First,
suppose that $B$ separates $x$ from $y$ and $z$
(Fig.~\ref{Fig:3.3}(a)). By (C2), the edge $xv$
belongs to a 3-cycle $R=xva$. If
$a\not\in\{y,z,u,w\}$, then two of the vertices
$y,z,u,w$ lie inside $R$ and the other two vertices
lie outside $R$ (see Fig.~\ref{Fig:3.3}(b)), contrary
to Lemma~\ref{lem:3.3}. So, we may assume, without
loss of generality, that $a=z$
(Fig.~\ref{Fig:3.3}(c)). Then the vertex $x$ belongs
to two adjacent 3-cycles, $C$ and $vxz$, hence, by (B),
$\deg(x)\ge5$. By (C4), $x$ is not adjacent to $u$ or $w$.
Since $x$ is the only vertex inside $B$, $x$ has valence at most 4,
a contradiction. Hence, $B$ can not separate $x$ from
$y$ and $z$.
\begin{figure}
\caption{The 3-cycle $B$ separates $x$ from $y$ and $z$.}
\label{Fig:3.3}
\end{figure}
Now suppose that $B$ separates the vertices $y$ and
$z$, and, without loss of generality, let $z$ lie
inside $B$ (Fig.~\ref{Fig:3.4}(a)). If $z$ is
adjacent to $v$, then, by (B), $z$ has valence at
least 5, hence $z$ is adjacent to a vertex
$b\in\{u,w\}$. But then the 3-cycle $xzv$ is
adjacent to two 3-cycles, $C$ and $vbz$, contrary
to (C4). If $z$ is adjacent to $u$ and $w$, then the
edge $xz$ belongs to three peripheral cycles, $C$,
$xvuz$, and $xvwz$, a contradiction, since
every edge belongs to at most two peripheral cycles.
Hence, since $z$ is the only vertex inside $B$, $z$
has valence 4, $z$ is not adjacent to $v$ and is
adjacent to exactly one of the vertices $u$ and $w$. The vertices $u$ and $w$ are not symmetric in Fig.~\ref{Fig:3.4}(a), so we have to consider two cases.
\emph{Case 1. The vertex $z$ is adjacent to $u$ $($Fig.~\ref{Fig:3.4}$($b$)$$)$.}
\begin{figure}
\caption{The 3-cycle $B$ separates $y$ and $z$.}
\label{Fig:3.4}
\end{figure}
Consider the vertex $v$. If $v$ is adjacent to $y$,
then (see Fig.~\ref{Fig:3.4}(b)) $x$ is incident with
exactly three peripheral cycles and has valence 3, a
contradiction. Hence, $v$ is not adjacent to $y$ and
since $v$ is the only vertex inside $C$, $v$ has
valence 4 (see Fig.~\ref{Fig:3.4}(c)). By (C2),
we obtain a subgraph shown in
Fig.~\ref{Fig:3.4}(d). By (C9), at least one of the
vertices $u$ and $x$, say $u$, is not 4-valent.
Then, by (C5), at least one of the edges $au$ and $uw$ belongs
to two 3-cycles. Here we have two subcases to consider.
\emph{Subcase 1.1. The edge $uw$ belongs to two
3-cycles $uwv$ and $uwb$
$($Fig.~\ref{Fig:3.5}$($a$)$$)$.}
\begin{figure}
\caption{The edge $uw$ belongs to two 3-cycles.}
\label{Fig:3.5}
\end{figure}
Now, $a$ is the only vertex inside the
3-cycle $uwb$ (Fig. \ref{Fig:3.5}(b)). By (C4),
$a$ is non-adjacent to $w$ and $b$, hence $a$ has
valence 4. The edges $yz$ and $za$
(see Fig. \ref{Fig:3.5}(a)) belong to a
nontriangular peripheral cycle $yzac\ldots$. The
edge $ac$ belongs to a 3-cycle $acd$ and
$b$ is the only vertex inside the 3-cycle
$acd$. The vertex $b$ is not adjacent to $c$,
since the 4-cycle $bcau$ can not be peripheral
(see Fig.~\ref{Fig:3.5}(a)). Since
$b$ has valence at least 4, $b$ is adjacent to $d$
and has valence 4. The 4-cycle $dbua$ is peripheral,
so we obtain a subgraph of $G$ shown in Fig.~\ref{Fig:3.5}(c). Note that, by (C3)--(C6), the vertex at the top of Figure~\ref{Fig:3.5}(c)
is different from all other vertices shown in the figure.
This contradicts (C9).
\emph{Subcase 1.2. The edge $au$ belongs to two
3-cycles $auz$ and $aub$ $($Fig.~\ref{Fig:3.6}$($a$)$$)$.}
Since $b$ has valence at least 4, $b$ lies outside
the 3-cycle $auz$ (Fig.~\ref{Fig:3.6}(b)). The
edges $yz$ and $za$ (resp.\ $wu$ and $ub$)
belong to a nontriangular peripheral cycle $C_1$
(resp.~$C_2$). The cycles $C_1$ and $C_2$ are paired. In
$\varphi$, the crossing point of edges $uw$ and
$az$ is an intersection point of $C_1$ and $C_2$.
The cycles $C_1$ and $C_2$ have at least one other
crossing point, denote this intersection point by
$\delta$. By (C8)(i), $C_1$ and $C_2$ are
vertex-disjoint, hence, $\delta$ is the crossing
point of $C_1$ and an edge $h_1h_2$ of $C_2$
(Fig.~\ref{Fig:3.6}(c)). The edge $h_1h_2$ is not
the edge $uw$ and belongs to a 3-cycle
$h_1h_2h_3$. If $h_3$ belongs to $C_2$, then in the embedding
$f$ the edge $h_1h_3$ is a chord of the embedded peripheral cycle $C_2$
and thus $\{h_1,h_3\}$ is a separating vertex set of $G$. But $G$
is 3-connected, a contradiction. Hence $h_3$ does not belong to $C_2$.
\begin{figure}
\caption{The edge $au$ belongs to two 3-cycles.}
\label{Fig:3.6}
\end{figure}
Now suppose that $h_1h_2\neq bu$. We have
$h_2\not\in\{a,z,b,u\}$. By (C8)(ii), $h_3\neq a$,
$a$ is not adjacent to $h_2$ and $h_3$, and $C_1$
does not pass through $h_3$ (that is, $h_3$ does
not belong to $C_1$ and $C_2$). Hence, a vertex $s$ of $C_1$ lies
inside the 3-cycle $h_1h_2h_3$ (see Fig.~\ref{Fig:3.6}(c)). By
(B), $\deg(a)\ge5$. If $s=a$, then,
since $s$ is the only vertex inside the 3-cycle
$h_1h_2h_3$, $a$ is adjacent to at least one
of $h_2$ and $h_3$, a contradiction.
Hence, $s\neq a$. Since $z$ is the only vertex inside
the 3-cycle $B$, and the 3-cycle $h_1h_2h_3$ is not $B$
(since $h_1h_2 \neq uw$), we have $s\neq z$. Now, since
$s$ has valence at least 4, $s$ is adjacent to at
least one of the vertices $h_1$, $h_2$, and $h_3$,
contrary to (C8)(ii). Hence $h_1h_2=bu$ and $h_3=a$.
We have $C_1=yzah\ldots$ and the edge $ah$
belongs to a 3-cycle $ahd$
(see Figs.~\ref{Fig:3.6}(d) and (e)). Considering
Fig.~\ref{Fig:3.6}(e), if there is a vertex inside
the 3-cycle $auz$, the vertex has valence at most
3, a contradiction. Hence no edge crosses the edge
$au$. If $h$ lies inside the 3-cycle $aub$,
then, by (C4), $h$ is not adjacent to $b$ and
$u$, $h$ has valence at most 3, a contradiction.
Hence, $h$ lies outside the 3-cycle $aub$ and
$b$ is the only vertex inside the 3-cycle
$ahd$ (see Fig.~\ref{Fig:3.6}(e)). Note that the edge $ah$ belongs to $C_1$, hence $C_1$ does not cross both $bu$ and $ab$.
By (C4), $b$ is not adjacent to $d$ and $h$, hence
$b$ has valence 4 and belongs to a 3-cycle $btt'$
disjoint from $aub$ (Fig.~\ref{Fig:3.6}(f)),
where $C_2=wubt\ldots$. Now $d$ is the only
vertex inside the 3-cycle $btt'$
(Fig.~\ref{Fig:3.6}(e)). By (C8)(ii), $d$ is not
adjacent to $t$. Since $d$ has valence at least 4,
$d$ is adjacent to $t'$ and has valence 4. The
4-cycle $dt'ba$ is peripheral. We obtain a subgraph
of $G$ shown in Fig.~\ref{Fig:3.6}(g), contrary to
(C9). The obtained contradiction completes the proof in the Case 1.
\emph{Case 2. The vertex $z$ is adjacent to $w$.}
This case is dealt in much the same way as the Case 1. Here we describe only what figures will be in the Case 2 instead of the Figs.~\ref{Fig:3.4}-\ref{Fig:3.6} in the Case 1. We hope that the reader is familiar enough with the proof of the Case 1 to supply the missing details himself.
In Figs.~\ref{Fig:3.4}(a), \ref{Fig:3.4}(d), \ref{Fig:3.5}(a), and \ref{Fig:3.5}(c) interchange the letters $u$ and $w$. In Figs.~\ref{Fig:3.4}(c) and \ref{Fig:3.5}(b) replace the edge $uz$ by the edge $zw$. In Figs.~\ref{Fig:3.6}(a), (d), (f), and (g) interchange the letters $u$ and $w$. Figs.~\ref{Fig:3.6E}(b), (c), and (e) are replaced by the Figs.(a), (b), and (c), respectively.
\begin{figure}
\caption{The edge $aw$ belongs to two 3-cycles.}
\label{Fig:3.6E}
\end{figure}
\end{proof}
Denote by $\mathcal{A}$ the class of all 3-connected
plane graphs $G$ satisfying the conditions (C1)--(C9)
of Theorem~\ref{thm:3.1}. In what follows we show how
to construct some graphs in $\mathcal{A}$ and,
as an example, we shall give two infinite families of graphs in $\mathcal{A}$,
one of which will be used in Section~\ref{Sec:4} to construct MN-graphs.
First we describe a large family of 3-connected plane
graphs satisfying the conditions (C1)--(C6) and
(C8) of Theorem~\ref{thm:3.1}.
Given a 4-valent vertex $v$ of a 3-connected plane
graph, two peripheral cycles $C$ and $C'$ containing $v$ are
\emph{opposite} peripheral cycles at $v$ if $C$ and $C'$ have
no edges incident with $v$ in common.
Denote by $\mathcal{H}$ the class of all 3-connected (simple) planar graphs $H$ satisfying
the following conditions (H1)--(H4):
\begin{itemize}\myitemsep
\item [(H1)] Every vertex has valence 3 or 4.
\item [(H2)] $H$ has no 3-cycles.
\item [(H3)] Every 4-cycle is peripheral.
\item [(H4)] For every 4-valent vertex $v$ and for
any two opposite peripheral cycles $C$ and $C'$ at
$v$, no edge joins a vertex of $C-v$ to a
vertex of $C'-v$.
\end{itemize}
A plane graph $G$ is a \emph{medial extension} of a graph $H\in\mathcal{H}$\/ if $G$ is obtained from $H$ in the
following way. The vertex set of $G$ is the set
$\{v(e):e\in E(H)\}$. The edge set of $G$ is defined
as follows. For every 3-valent vertex $v$ of $H$, if
$e_1,e_2,e_3$ are the edges incident with $v$, then
in $G$ the vertices $v(e_1)$, $v(e_2)$, and $v(e_3)$
are pairwise adjacent (the three edges of $G$ are said to
be \emph{associated with\/} $v$).
For every 4-valent vertex $w$ of $H$, if
$(e_1,e_2,e_3,e_4)$ is the cyclic order of edges
incident to $w$ around $w$ in the plane, then $G$
contains the edges of the 4-cycle $v(e_1)v(e_2)v(e_3)v(e_4)$,
and contains either the edge
$v(e_1)v(e_3)$ or the edge $v(e_2)v(e_4)$; these
five edges of $G$ are said to be \emph{associated with\/} the
4-valent vertex of $H$. Note that $G$ can be obtained from
the medial graph of $H$ by adding a diagonal to every 4-cycle
associated with a 4-valent vertex of~$H$.
\begin{lem}\label{lem:3.4}
Every medial extension $G$ of any graph $H\in \mathcal{H}$ is a
3-connected planar graph satisfying the conditions
{\rm (C1)--(C6)} and\/ {\rm (C8)} of Theorem~\ref{thm:3.1}.
\end{lem}
\begin{proof}
By the construction of $G$, if
$\{v(e),v(e')\}$ is a separating vertex set of $G$,
then the graph $H-e-e'$ is disconnected, a
contradiction, since $H$ is 3-connected. Hence $G$ is
3-connected. Every peripheral cycle
of $H$ \emph{induces} a peripheral cycle of $G$. It is
easy to see that all peripheral cycles of $G$ that are not
induced by the peripheral cycles of $H$ are the 3-cycles formed by
the edges associated with the vertices of $H$.
It is easy to see that $G$ satisfies (C1)--(C5). To show that $G$
satisfies (C6), let
$J=v(e_1)v(e_2)v(e_3)v(e_4)$ be a 4-cycle of
$G$. By the construction of $G$, if vertices $v(e)$
and $v(e')$ of $G$ are adjacent, then the edges $e$
and $e'$ of $H$ are adjacent, too. Since in $H$ no three edges among $e_1$, $e_2$, $e_3$, $e_4$
form a cycle (by (H2)), these four edges either
form a 4-cycle (in this case $J$ is peripheral) or are the
edges incident to a 4-valent vertex of $H$ (in this
case, by the construction of $G$, $J$ is the boundary
of two adjacent faces of $G$). Hence, $G$ satisfies (C6).
It remains to show that $G$ satisfies (C8). Let $C$ and $C'$
be nontriangular peripheral cycles of $G$ such that an
edge $a$ of $C$ is paired with an edge $a'$ of $C'$.
Then, by the construction of $G$, the peripheral cycles
$C$ and $C'$ are induced by peripheral cycles
$\overline{C}$ and $\overline{C}'$ of $H$,
respectively, that are opposite at some 4-valent
vertex $u$. If $C$ and $C'$ have a common vertex
$v(e)$, then $\overline{C}$ and $\overline{C}'$ have
a common edge $e$, hence $H$ has a separating vertex
set $\{u,w\}$, where $w$ is a vertex incident to $e$,
a contradiction, since $H$ is 3-connected. Now
suppose that $G$ has an edge joining a vertex $v(e)$
of $C$ to a vertex $v(e')$ of $C'$ such that at least
one of the vertices $v(e)$ and $v(e')$ is not
incident to $a$ and $a'$. Then the edges $e$ and $e'$
are incident to the same vertex $w$ of $H$ and the
cycles $\overline{C}$ and $\overline{C}'$ pass through $w$. It follows that $\{u,w\}$ is a separating
vertex set of $H$, a contradiction. Next suppose that
$G$ has a path $v(e)v(b)v(e')$ connecting a vertex
$v(e)$ of $C$ to a vertex $v(e')$ of $C'$ such that
$v(b)$ does not belong to $C$ and $C'$, and at least
one of the vertices $v(e)$ and $v(e')$ is not
incident to $a$ or $a'$. If in $H$ the edges $e$,
$b$, and $e'$ are incident to the same vertex $w$,
then $\{u,w\}$ is a separating vertex set of $H$, a
contradiction. If in $H$ the edges $a$ and $b$ (resp.
$b$ and $e'$) are incident to a vertex $w$ (resp.
$w'$) such that $w\neq w'$, then the edge $b$ joins
the vertex $w$ of $\overline{C}$ with the vertex $w'$
of $\overline{C}'$, contrary to (H4). Hence $G$
satisfies (C8). The proof is complete.
\end{proof}
There are medial extensions of graphs in $\mathcal{H}$ that do
not satisfy conditions (C7) and (C9). In the sequel we shall
describe a way to verify the conditions (C7), and henceforth
give examples of graphs satisfying (C1)--(C9).
To show that a medial extension $G$ of $H\in \mathcal{H}$
satisfies (C7) it is convenient to proceed in the
following way. Subdivide every edge $e$ of $H$ by a
two-valent vertex $v(e)$ of $G$. We obtain a graph
$\overline{H}$ whose vertex set is $V(H)\cup V(G)$
where the vertices of $V(G)$ are all 2-valent
vertices of $\overline{H}$. We will consider paths of
$G$ associated with paths of $\overline{H}$
connecting 2-valent vertices.
Two paths $P$ and $P'$ of $\overline{H}$ are \emph{H-disjoint} if
$P \cap P'\cap V(H) = \emptyset$, i.e.,\ $P \cap P'\subseteq V(G)$.
\begin{figure}
\caption{Two paths of $G$ associated with a path of $\overline{H}
\label{Fig:3.7}
\end{figure}
Consider a path $P=v(e_1)w_1 v(e_2)w_2 v(e_3)\ldots w_{n-1}v(e_n)$
in $\overline{H}$ where $w_1,\allowbreak w_2,\ldots,w_{n-1}$ are
the $H$-vertices on $P$. It is easy to see that the edges of $G$
associated with the vertices $w_1,w_2,\ldots,w_{n-1}$
of $H$ contain two edge-disjoint paths connecting in $G$
the vertices $v(e_1)$ and $v(e_n)$ (see
Fig.~\ref{Fig:3.7}); any two such paths in $G$ are said to be
\emph{associated} with the path $P$ of $\overline{H}$.
Since $H$ has no multiple edges, every edge of $G$ is
associated with exactly one vertex of $H$. Hence, if
$P$ and $P'$ are $H$-disjoint paths in
$\overline{H}$, each of which is connecting 2-valent
vertices, then every path in $G$ associated with $P$
is edge-disjoint from every path in $G$ associated
with $P'$. As a consequence, we have the following conclusion:
{\sloppy
}
(C) If $\overline{H}$ has a cycle
containing 2-valent vertices $v(e)$ and
$v(e')$, then $G$ has four edge-disjoint paths
connecting $v(e)$ and $v(e')$.
The fact that a path in $H$ gives rise to two edge-disjoint paths
in $G$ (paths associated with the path of $H$) can be used to
check the property (C7) of~$G$.
For a 3-cycle $C$ of $G$, a path of $\overline{H}$ is
\emph{$C$-independent} if the path does not
contain vertices of $C$. When checking (C7) for a medial
extension $G$ of $H\in \mathcal{H}$, given a 3-cycle $C$ of $G$
and two 2-valent vertices $x,y\in V(G)\setminus (V(C)\cup
N(C))\subset V(\overline{H})$, four $C$-independent
edge-disjoint paths $P_1$, $P_2$, $P_3$, and $P_4$ of $G$ connecting $x$ and $y$ in $G$ will be represented in some
subsequent figures (see, e.g., Figure \ref{Fig:3.9})
in the following way. The edges of the paths incident to
vertices of $N(C)$ are depicted as dashed edges joining 2-valent vertices, the dashed edges are not
edges of $\overline{H}$ (see, for example, Fig.~\ref{Fig:3.9}(b),
where the edges of $H$ are given as solid lines). All other edges
of the paths are represented by paths in $\overline{H}$. If $X$
is a subpath of $P_i$ such that $X$ is associated with a path
$\overline{X}$ in $\overline{H}$, then $X$ is
represented by a dashed line passing near the edges
of $\overline{X}$ in $\overline{H}$. If $X_i$ and $X_j$ are
subpaths of $P_i$ and $P_j$ (where possibly $i=j$), respectively,
such that $X_i$ and $X_j$ are edge-disjoint paths associated with a path
$\overline{X}$ of $\overline{H}$, then $X_i$ and
$X_j$ are represented by two (parallel) dashed lines passing
near the edges of $\overline{X}$ in $\overline{H}$.
Using these conventions, the reader will be able to check that the
depicted dashed paths and edges in the figure of
$\overline{H}$ represent four $C$-independent
edge-disjoint paths of $G$ connecting $x$ and $y$.
Now we describe some graphs in $\mathcal{A}$. Let us recall that
graphs in $\mathcal{A}$ are precisely those 3-connected planar graphs
that satisfy conditions (C1)--(C9). To simplify the arguments, we construct
graphs with lots of symmetries so that, for example, to
check the condition (C7) we will have to consider only
two 3-cycles of a graph.
\begin{figure}
\caption{The graph $H_n\in \mathcal{H}
\label{Fig:3.8}
\end{figure}
For $n\ge6$, let $H_n\in \mathcal{H}$ be the Cartesian product
of the path $P_3$ of length 2 and the cycle $C_n$ of length $n$
(see \ref{Fig:3.8}(a)). Let $G_n$ be the medial extension of $H_n$ shown in Fig.~\ref{Fig:3.8}(b).
\begin{lem}
\label{lem:Extra1}
Each graph $G_n$, $n\geq 6$, is a PN-graph.
\end{lem}
\begin{proof}
We show that $G_n$ satisfies (C1)--(C9) for every $n\geq 6$.
By Lemma~\ref{lem:3.4}, $G_n$
satisfies (C1)--(C6) and (C8). Every 4-gonal face of
$G_n$ is incident with a 6-valent vertex and $G_n$ has
no 5-valent vertices, hence $G_n$ satisfies (C9).
Now we show that $G_n$ satisfies (C7). Consider a
fragment of $\overline{H_n}$ shown in
Fig.~\ref{Fig:3.9}(a) (in the figure we introduce
notation for some vertices and also depict in dashed
lines some edges of $G_n$). Because of the symmetries of
$G_n$, it suffices to consider the following two
cases for a 3-cycle $C$, when checking (C7):
\begin{figure}
\caption{Verifying (C7) for the graph $G_n$.}
\label{Fig:3.9}
\end{figure}
\noindent
\emph{Case 1. $V(C)=\{6,9,10\}$.}
Then $N(C)=\{2,3,5,7,8,11,12,13,14\}$. If we delete from
$\overline{H_n}$ the vertices
$1,2,\ldots,14$, then the obtained graph has only one
connected component $U$ with a vertex in $H_n$, and
$U$ is 2-connected. Hence, any two 2-valent
vertices $x$ and $y$ of $U$ belong to a cycle in $U$
and then, by (C), $G_n$ has four $C$-independent
edge-disjoint paths connecting $x$ and $y$.
Fig.~\ref{Fig:3.9}(b) shows four $C$-independent
edge-disjoint paths in $G_n$ connecting vertices $1$ and $4$.
If we delete from $\overline{H_n}$ the vertices
$\{1,2,\ldots,18\}\setminus \{1,4\}$, then in the
resulting non-trivial connected component,
for every vertex $x\in V(G_n)\setminus \{1,2,\ldots,18\}$, there is a path
$P$ connecting the vertices $1$ and $4$, and passing through $x$;
combining two edge-disjoint paths of $G_n$ associated
with $P$ and the two edge-disjoint paths connecting
$1$ and $4$, shown in Fig.~\ref{Fig:3.9}(b),
we obtain four $C$-independent
edge-disjoint paths connecting the vertices $4$ and
$x$ (and, analogously, for the vertices $1$ and $x$).
Now, because of the symmetries of $\overline{H_n}$, it
remains to show that there are four $C$-independent
edge-disjoint paths connecting the vertex $4$ with each of the
vertices $15,16,17,18$; Fig.~\ref{Fig:3.9}(c) shows
the paths (since $n\ge 6$).
\noindent
\emph{Case 2: $V(C) = \{2,3,6\}$.}
We have $N(C)=\{1,4,5,7,9,10\}$. If we delete from
$\overline{H_n}$ the vertices $1,2,\ldots,7$ and $9,10,13$, then
the obtained graph has only one connected component
$U$ with at least two vertices and $U$ is 2-connected. Hence any two vertices $x$ and $y$ in $\overline{H_n}$ that
are both in $U$ belong to a cycle of $U$ and then, by (C),
$G_n$ has four $C$-independent edge-disjoint paths
connecting $x$ and $y$. It remains to show that
for every vertex $x$ of $\overline{H_n}$ belonging to $U$, there are four $C$-independent edge-disjoint paths connecting $x$
and the vertex $13$. These four paths are shown in
Figs.~\ref{Fig:3.9}(d)--(f), depending on the choice of $x$.
We conclude that $G_n$ satisfies (C1)--(C9), hence $G_n$
is a PN-graph for every $n\ge6$.
\end{proof}
\begin{figure}
\caption{The graph $H_n'\in \mathcal{H}
\label{Fig:3.10}
\end{figure}
Fig.~\ref{Fig:3.10} gives another example of an
extension $G_n'$ of a graph $H_n'\in \mathcal{H}$. By
Lemma~\ref{lem:3.4}, $G_n'$ satisfies (C1)--(C6) and
(C8). Since $G_n'$ has no 4-cycles, it
satisfies (C9). Using the symmetry of $\overline{H_n'}$, one can easily check that for every 3-cycle $C$ of
$G_n'$, if we delete from $\overline{H_n'}$ the vertices
$V(C)\cup N(C)$ of $G_n'$, then the obtained graph has
only one connected component $U$ with at least two
vertices and $U$ is 2-connected. Then, by (C), any
two vertices of $G_n'$ in $U$ are connected by four
$C$-independent edge-disjoint paths. Hence, $G_n'$
satisfies (C7) and is a PN-graph.
\section{MN-graphs based on PN-graphs}\label{Sec:4}
In this section we construct MN-graphs based on the
PN-graphs $G_n$ described in Section~\ref{Sec:3}.
For $m\geq 2$, denote by $S_m$, the graph shown in
Fig.~\ref{Fig:4.1}. The graph has $m+1$ disjoint cycles of
length $12m-2$ labelled by $B_0,B_1,\ldots,B_m$ as
shown in the figure. The vertices of $B_0$ are called
the \emph{central vertices} of $S_m$ and are labelled
by $1,2,\ldots,12m-2$ (see Fig.~\ref{Fig:4.1}). For
every central vertex $x\in\{1,2,\ldots,12m-2\}$,
denote by $x^*$ its ``opposite'' vertex $x+(6m-1)$ if
$x\in\{1,2,\ldots,6m-1\}$ and the vertex $x-(6m-1)$
if $x\in\{6m,6m+1,\ldots,12m-2\}$. In $S_m$, any pair
$\{x,x^*\}$ of central vertices is connected by a
\emph{central path} $P(x,x^*)$ of length $6m-3$ with
$6m-4$ two-valent vertices. There are exactly $6m-1$ central paths.
\begin{figure}
\caption{The graph $S_m$.}
\label{Fig:4.1}
\end{figure}
For any integers $m\geq 4$ and $n\geq 0$, denote by
$\Phi_m(n)$ the set of all $(12m-2)$-tuples
$(n_1,n_2,\ldots,n_{12m-2})$ of nonnegative integers
such that $n_1+n_2+\cdots+n_{12m-2}=n$. For every
$\lambda\in\Phi_m(n)$, denote by $S_m(\lambda)$ the
graph obtained from $S_m$ by replacing, for every central vertex
$x\in\{1,2,\ldots,12m-2\}$, the eight edges
marked by short crossings in Fig.~\ref{Fig:4.2}(a)
by $8(1+n_x)$ new edges marked by crossings
in Fig.~\ref{Fig:4.2}(b) (the value $x+1$ in that figure
is to be considered modulo $12m-2$). The graph $S_m(\lambda)$ has $m-2$\quad
$(12m-2)$-cycles $B_0,B_1,\ldots,B_{m-3}$ and three
$(12m-2+n)$-cycles $B_{m-2},B_{m-1},B_m$.
\begin{figure}
\caption{Obtaining the graph $S_m(\lambda)$.}
\label{Fig:4.2}
\end{figure}
We want to show that for every $m\geq 4$ and for
every $\lambda\in \Phi_m(n)$, $n\geq 0$, the graph
$S_m(\lambda)$ is an MN-graph.
\begin{lem}\label{lem:4.1}
For every $m\geq 4$, $n\geq 0$ and $\lambda\in\Phi_m(n)$,
the graph $S_m(\lambda)-e$ is\/ $1$-planar for every edge $e$.
\end{lem}
\begin{proof}
If we delete an edge of a central path,
then the remaining $6m-2$ central paths, each with
$6m-3$ edges, can be 1-immersed inside $B_0$ in
Fig.~\ref{Fig:4.1}. If we delete one of the edges
shown in Fig.~\ref{Fig:4.3}(a) by a thick line, then
the central path $P(x,x^*)$ can be drawn outside
$B_0$ with $6m-3$ crossing points as shown in the figure
and then the remaining $6m-2$ central paths can be
1-immersed inside $B_0$. If we delete one of the two
edges depicted in Fig.~\ref{Fig:4.3}(a) by a dotted
line, then Fig.~\ref{Fig:4.3}(b) shows how to place
the central vertex $x$ so that the path $P(x,x^*)$
can be drawn outside $B_0$ with $6m-3$ crossing
points (analogously to Fig.~\ref{Fig:4.3}(a)) and
then the remaining $6m-2$ central paths can be
1-immersed inside $B_0$. This exhibits all possibilities
for the edge $e$ (up to symmetries of $S_m$) and henceforth
completes the proof.
\end{proof}
\begin{figure}
\caption{The central path $P(x,x^*)$ immersed outside $B_0$.}
\label{Fig:4.3}
\end{figure}
Given a 1-immersion of a graph $G$ and an embedded cycle $C$,
we say that $G$ lies \emph{inside} (resp.\ \emph{outside}) $C$,
if the exterior (resp.\ interior) of $C$ does not contain vertices
and edges of $G$.
Denote by $J_{m-2}$ the graph obtained from the graph
$S_m$ in Fig.~\ref{Fig:4.1} by deleting the
2-valent vertices of all central paths and by deleting
all vertices lying outside the cycle $B_{m-2}$.
\begin{lem}\label{lem:4.2}
For every $m\geq 4$, $J_{m-2}$ is a PN-graph.
\end{lem}
\begin{proof} The graph $J_{m-2}$ contains $m-3$
subgraphs $L_1,L_2,\ldots,L_{m-3}$ isomorphic to the
PN-graph $G_{12m-2}$ such that for
$i=1,2,\ldots,m-1$, the graph $L_i$ contains the
cycles $B_{i-1}$, $B_i$, and $B_{i+1}$. Consider an
arbitrary 1-immersion $\varphi$ of $J_{m-2}$. Suppose
that in the plane embedding of the PN-graph $L_1$ in
$\varphi$, the cycle $B_2$ is the boundary cycle of
the outer $(12m-2)$-gonal face of the embedding. Then
the embedding of $L_1$ determines an embedding of the
subgraph of $L_2$ bounded by the cycles $B_1$ and
$B_2$. Since $L_2$ is a PN-graph, the subgraph of
$L_2$ bounded by $B_2$ and $B_3$ lies outside the
cycle $B_2$. Reasoning similarly, we obtain that for
$i=3,4,\ldots,m-3$, the subgraph of the PN-graph
$L_i$ bounded by $B_i$ and $B_{i+1}$ lies outside
$B_i$. As a result, $\varphi$ is a plane embedding of
$J_{m-2}$, hence $J_{m-2}$ is a PN-graph.
\end{proof}
Denote by $\overline{S}_m(\lambda)$ the graph
obtained from $S_m(\lambda)$, where $m\geq
4$ and $\lambda\in \Phi_m(n)$, by deleting the
2-valent vertices of all central paths.
\begin{lem}\label{lem:4.3}
For every $m\geq 4$, $n\geq 0$ and\/ $\lambda\in \Phi_m(n)$,
$\overline{S}_m(\lambda)$ is a PN-graph.
\end{lem}
\begin{proof}
The graph $\overline{S}_m(\lambda)$
contains a subgraph $G$ isomorphic to the PN-graph
$G_{12m-2+n}$ and contains a subgraph $G'$
homeomorphic to the PN-graph $J_{m-2}$. The graph $G$
contains the cycles $B_{m-2}$, $B_{m-1}$, and $B_m$
of $\overline{S}_m(\lambda)$, and the graph $G'$
contains the cycles $B_0,B_1,\ldots,B_{m-2}$ of
$\overline{S}_m(\lambda)$ and is obtained from $J_{m-2}$
by subdividing the edges of the cycle $B_{m-2}$
(by using $n$ 2-valent vertices in total).
Consider, for a contradiction, a proper 1-immersion
$\varphi$ of $\overline{S}_m(\lambda)$. In $\varphi$,
the graph $G$ has a plane embedding and we shall investigate in which faces of the embedding of $G$ lie the
vertices of $G'$. We shall show that they all lie in the
face of $G$ bounded by the (subdivided) cycle $B_{m-2}$.
In the graph $\overline{S}_m(\lambda)$ the cycles $B_{m-2}$ and $B_i$, $i\in\{0,1,\ldots,m-3\}$ are connected by $24m-4$ edge-disjoint paths. This implies that
no 3- or 4-gonal face of $G$ contains all vertices of $B_i$
in its interior.
Any two vertices of $B_i$ are connected by
six edge-disjoint paths in $G'-B_{m-2}$. Therefore:
(a) No 3- or 4-gonal face of $G$ contains any vertex of the cycles $B_i$, $i=0,1,\ldots,m-3$, in its interior.
Suppose that a vertex $v$ of $G'$ does not belong to the cycles $B_i$, $i=1,2,\ldots,m-2$, and lies inside a 3- or 4-gonal face $F$ of $G$. By construction of $G'$, the vertex $v$ is adjacent to two vertices $w$ and $w'$ of some $B_j$, $j\in\{0,1,\ldots,m-3\}$. By (a), $w$ and $w'$ do not lie inside $F$, hence they lie, respectively, in faces $F_1$ and $F_2$ of $G$ adjacent to $F$. However, at least one of $F_1$ and $F_2$ is 3- or 4-gonal, contrary to (a). Therefore, no 3- or 4-gonal face of $G$ contains any vertex of $G'-B_{m-2}$. If there is a vertex of $G'-B_{m-2}$ outside $B_m$, then $G'$ has two adjacent vertices such that one of them is either a vertex of $B_{m-2}$ or lies inside $B_{m-2}$, and the other vertex lies outside $B_m$, a contradiction, since the edge joining the vertices crosses at least 5 edges of $G$. This implies that all vertices of $G'-B_{m-2}$ lie inside the face of $G$ bounded by $B_{m-2}$.
Hence $G'$ lies inside $B_{m-2}$ and has a proper 1-immersion in $\varphi$. If in this 1-immersion of $G'$ we ignore the 2-valent vertices
on the cycle $B_{m-2}$ of $\overline{S}_m(\lambda)$,
then we obtain a proper 1-immersion of the PN-graph
$J_{m-2}$, a contradiction.
\end{proof}
By the paths of
$\overline{S}_m(\lambda)$ associated with any central
vertex $x$ we mean the two paths shown in
Fig.~\ref{Fig:4.4}; one of them is depicted in thick
line and the other in dashed line. Every edge of $\overline{S}_m(\lambda)$ not belonging
to the cycles $B_0,B_1,\ldots,B_m$ is assigned a
\emph{type} $t\in\{1,2,\ldots,2m\}$ as shown in
Fig.~\ref{Fig:4.4} such that for $i=1,2,\ldots,m$, the
edges of type $2i-1$ and $2i$ are all edges lying
between the cycles $B_{i-1}$ and $B_i$, and the edges
of type $2i-1$ (resp.\ $2i$) are incident to vertices
of $B_{i-1}$ (resp.\ $B_i$).
\begin{figure}
\caption{The paths associated with a central vertex and the types of edges.}
\label{Fig:4.4}
\end{figure}
Suppose that there is a 1-immersion $\varphi$ of $S_m(\lambda)$. By Lemma \ref{lem:4.3}, $\overline{S}_m(\lambda)$ is a PN-graph.
Thus, $\varphi$ induces an embedding of this graph. We shall assume that
the outer face $F_0$ of this embedding is bounded by the cycle $B_m$.
We shall first show that $F_0$ is also a face of $\varphi$. To prove this,
it suffices to see that no central path can enter $F_0$.
Any central vertex $x$ is separated from $F_0$ by $3m-1$
edge-disjoint cycles: $m$ cycles $B_1,B_2,\ldots,B_m$ and $2m-1$ cycles $C_2,C_3,\ldots,C_{2m}$, where the cycle $C_i$ consists of all edges of type $i$
($i=2,3,\ldots,2m$). The central path $P=P(x,x^*)$ can have at most $6m-3$ crossing points, hence $P$ cannot enter $F_0$. If $P$ lies between $B_0$ and $B_m$ in $\varphi$, then it must cross $2(6m-2)$ paths associated either with $6m-2$ central vertices $x+1,x+2,\ldots,x^*-1$ or with $6m-2$ central vertices $x^*+1,x^*+2,\ldots,x-1$ (here we
{interpret all additions modulo $12m-2$), a contradiction. Hence, in $\varphi$ any central path either lies inside $B_0$ or crosses some edges of $\overline{S}_m(\lambda)$ but does not lie entirely between $B_0$ and $B_m$.
The main goal of this section is to show that $S_m(\lambda)$ has no
1-immersions (see Theorem~\ref{thm:4.1} in the sequel). Roughly speaking, the main idea of the proof is as
follows. Suppose, for a contradiction, that
$S_m(\lambda)$ has a 1-immersion. Every central path
can have at most $6m-3$ crossing points, hence, all $6m-1$
central paths can not be 1-immersed inside $B_0$.
Then there is a central path which crosses some edges of
$\overline{S}_m(\lambda)$. Let $P$ be a central path with maximum
number of such crossings. Since $P$ can have at most
$6m-3$ crossing points, some of the other $6m-2$
central paths do not cross $P$ and have to ``go
around" $P$ and, in doing so, one of the
paths has to cross more edges of
$\overline{S}_m(\lambda)$ than $P$ does, a contradiction.
Before proving Theorem~\ref{thm:4.1}, we need some
definitions and preliminary Lemmas~\ref{lem:4.4} and
\ref{lem:4.5}.
Consider a 1-immersion of $S_m(\lambda)$ (if it
exists). If a central path $P=P(x,x^*)$ does not lie
inside $B_0$, consider the sequence
$\delta_1,\delta_2,\ldots,\delta_r$ ($r\geq 2$),
where $\delta_1=x$ and $\delta_r=x^*$, obtained by
listing the intersection points of the path and $B_0$
when traversing the path from the vertex $x$ to the
vertex $x^*$ (here
$\delta_2,\delta_3,\ldots,\delta_{r-1}$ are crossing
points). By a \emph{piece} of $P$ we mean the segment
of $P$ from $\delta_i$ to
$\delta_{i+1}$ for some $i\in\{1,2,\ldots,r-1\}$;
denote the piece by $P(\delta_i,\delta_{i+1})$. A
piece of $P$ with an end point $x$ or $x^*$ is called
an \emph{end piece} of $P$ at the vertex $x$ or
$x^*$, respectively. An \emph{outer piece} of $P$ is
every piece of $P$ that is immersed outside $B_0$. Clearly,
either $P(\delta_1,\delta_2),P(\delta_3,\delta_4),
P(\delta_5,\delta_6),\ldots$ or
$P(\delta_2,\delta_3),P(\delta_4,\delta_5),
P(\delta_6,\delta_7),\ldots$ are all outer pieces of
$P$. The end points $\delta$ and $\delta'$ of an
outer piece $\Pi$ of $P$ partition $B_0$ into two
curves $A$ and $A'$ such that the curve $A$ lies inside
the closed curve consisting of $\Pi$ and $A'$ (see
Fig.~\ref{Fig:4.5}). The central vertices belonging
to $A$ and different from $\delta$ and $\delta'$ are
said to be \emph{bypassed} by $\Pi$ and $P$
(cf.\ Fig.~\ref{Fig:4.5}).
\begin{figure}
\caption{The central vertices bypassed by an outer piece $\Pi$.}
\label{Fig:4.5}
\end{figure}
\begin{lem}\label{lem:4.4}
If\/ $P(x,x^*)$ bypasses neither a central vertex $y$
nor its opposite vertex $y^*$, and\/ $P(y,y^*)$ bypasses
neither $x$ nor $x^*$, then $P(x,x^*)$ crosses
$P(y,y^*)$.
\end{lem}
\begin{proof}
Suppose, for a contradiction, that
$P(x,x^*)$ does not cross $P(y,y^*)$. For every outer
piece of the two paths we can replace a curve of a
path containing the piece by a new curve lying inside
$B_0$ so that the path $P(x,x^*)$ (resp. $P(y,y^*)$)
becomes a new path $P'(x,x^*)$ (resp. $P'(y,y^*)$)
connecting the vertices $x$ and $x^*$ (resp. $y$ and
$y^*$) such that the two new paths lie inside $B_0$
and do not cross each other, a contradiction. How the
replacements can be done is shown in
Fig.~\ref{Fig:4.6}, where the new curves are depicted
in thick line. (Note that in Fig.~\ref{Fig:4.6}(b),
since $P(y,y^*)$ does not bypass $x$, the depicted
pieces bypassing $x$ belong to $P(x,x^*)$.)
\end{proof}
\begin{figure}
\caption{Transforming paths $P(x,x^*)$ into paths $P'(x,x^*)$.}
\label{Fig:4.6}
\end{figure}
By a \emph{type} of an outer piece of a central path we mean the maximal
type of an edge of $\overline{S}_m(\lambda)$ crossed by the path.
For an outer piece $\Pi$ of a central path $P(x,x^*)$,
denote by $b(\Pi)$ the number of central vertices
bypassed by $\Pi$, and by $\Delta(\Pi)$ the number of
intersection points of
$\Pi$ and $\overline{S}_m(\lambda)$,
including the crossings at the end points of $\Pi$
(except if an end point is $x$ or $x^*$).
\begin{lem}\label{lem:4.5}
If\/ $\Pi$ is an outer piece of type $t$ of a central path $P$, then
\[
\Delta(\Pi)-b(\Pi)\geq 2t-\tau,
\]
where $\tau=1$ if\/ $\Pi$ is an end piece and $\tau=0$ otherwise.
\end{lem}
\begin{proof}
The piece $\Pi$ crossing an edge of
type $t$ has a point separated from the interior of
$B_0$ by $\lfloor\frac{t+1}{2}\rfloor+t$
edge-disjoint cycles:
$B_0,B_1,\ldots,B_{\lfloor\frac{t+1}{2}\rfloor-1}$,
and the cycles $C_1,C_2,\ldots,C_t$, where the cycle
$C_i$ consists of all edges of type $i$ ($i=1,2,\ldots,t$).
Thus, $\Pi$ crosses each of these cycles twice,
except that for an end piece, we may miss one crossing with $C_1$.
The piece $\Pi$ bypasses $b(\Pi)$ central
vertices, hence $\Pi$ crosses $2b(\Pi)$ paths
associated with those $b(\Pi)$ vertices. Hence we obtain two inequalities
\begin{equation}\label{eq:1}
\Delta(\Pi)\geq 2\left\lfloor\tfrac{t+1}{2}\right\rfloor+2t-\tau,
\end{equation}
and
\begin{equation}\label{eq:2}
\Delta(\Pi)\geq
2\left\lfloor\tfrac{t+1}{2}\right\rfloor+2b(\Pi)-\tau.
\end{equation}
\noindent Now add (\ref{eq:1}) and (\ref{eq:2}), apply $2\lfloor\tfrac{t+1}{2}\rfloor\geq t$, then divide by $2$ and rearrange to get the result.
\end{proof}
By the \emph{type} of a central path not lying (entirely) inside
$B_0$ we mean the maximal type of the outer pieces of
the path. If $t$ different central paths bypass a
central vertex $x$, then all the paths cross edges of
the same path $T$ associated with $x$ and since the
edges of $T$ have pairwise different types, we obtain
that one of the central paths crosses an edge of type
at least $t$. Hence we have:
(D) If $t$ different central paths bypass
the same central vertex, then one of the paths
has type at least $t$.
\begin{thm}\label{thm:4.1}
For every $m\geq4$ and $\lambda\in \Phi_m(n)$,
the graph $S_m(\lambda)$ is not\/ $1$-planar.
\end{thm}
\begin{proof}
Consider, for a contradiction, a
1-immersion $\varphi$ of $S_m(\lambda)$ and a path
$P=P(x,x^*)$ of maximal type $t>0$.
As above, let $\Delta(P)$ be the number of crossing points
of $P$ and $\overline{S}_m(\lambda)$, and let $b(P)$ be the number of distinct central vertices bypassed by $P$ and different from $x$ and $x^*$.
There are at least $6m-2-b(P)$ different pairs $\{y,y^*\}$ ($\{y,y^*\}\neq\{x,x^*\}$) of central vertices such
that $P$ does not bypass $y$ and $y^*$; denote by
$\mathcal{P}$ the set of the corresponding (at least $6m-2-b(P)$) paths
$P(y,y^*)$. If $P(x,x^*)$ does not bypass $y$ and
$y^*$, then $P(y,y^*)$ either does not bypass $x$ and
$x^*$ (in this case $P(y,y^*)$ crosses $P(x,x^*)$
by Lemma~\ref{lem:4.4}) or bypasses at least one
of the vertices $x$ and $x^*$. Hence, we have
\begin{equation}\label{eq:3}
6m-2-b(P)\leq\beta+\gamma+\varepsilon,
\end{equation}
where: $\beta$ is the number of
paths of $\mathcal{P}$ that cross $P$ and do not
bypass $x$ or $x^*$; $\gamma$ is the number of
paths of $\mathcal{P}$ that bypass $x$ or $x^*$
and do not cross $P$; $\varepsilon$ is the number of
paths of $\mathcal{P}$ that cross $P$ and bypass $x$
or $x^*$. We are interested in the number
$\gamma+\varepsilon$ of paths of $\mathcal{P}$ that
bypass $x$ or $x^*$.
The path $P$ has at most $6m-3$ crossing points,
hence
\[
6m-3-\Delta(P)\geq\beta+\varepsilon
\]
and, by (\ref{eq:3}), we obtain
\[
\gamma\geq 6m-2-b(P)-(\beta+\varepsilon)\geq
\Delta(P)-b(P)+1,
\]
whence
\begin{equation}\label{eq:4}
\gamma+\varepsilon\geq\Delta(P)-b(P)+1+\varepsilon.
\end{equation}
Let $\Pi_1,\Pi_2,\ldots,\Pi_\ell$ ($\ell\geq 1$) be
all outer pieces of $P$, and let $\Pi_1$ be of the
maximal type $t$. We have
$\Delta(P)=\sum^\ell_{i=1}\Delta(\Pi_i)$ and
$b(P)\leq\sum^\ell_{i=1}b(\Pi_i)$ (the vertices $x$
and $x^*$ can be bypassed by $P$, and some central vertices can be bypassed by $P$ more than once). By Lemma~\ref{lem:4.5},
$\Delta(\Pi_i)-b(\Pi_i)\geq 0$ for every
$i=1,2,\ldots,\ell$. Hence, by
(\ref{eq:4}) and Lemma~\ref{lem:4.5}, we obtain
\begin{eqnarray}
\label{eq:5}
\gamma+\varepsilon &\geq& \sum^\ell_{i=1}\Delta(\Pi_i)
-\sum^\ell_{i=1}b(\Pi_i)+1+\varepsilon \nonumber \\
&\geq&
\Delta(\Pi_1)-b(\Pi_1)+1+\varepsilon \geq (2t+1)
-\tau+\varepsilon,
\end{eqnarray}
where $\tau=1$ if $\Pi_1$ is an end piece and
$\tau=0$ otherwise. If $\Pi_1$ is not an end
piece or $\varepsilon\geq 1$, then, by
(\ref{eq:5}), $\gamma+\varepsilon\geq 2t+1$, hence
one of the vertices $x$ and $x^*$ is bypassed by at
least $t+1$ paths of $\mathcal{P}$. Now, by (D),
one of the $t+1$ paths has type at least $t+1$, a contradiction. Now suppose that $\Pi_1$ is an end piece at the vertex
$x$ and $\varepsilon=0$. Then every path of
$\mathcal{P}$ either crosses $P$ or bypasses $x$ or
$x^*$, and at least $2t$ paths of $\mathcal{P}$
bypass $x$ or $x^*$. If no one of the $2t$ paths
bypasses $x$, then all the $2t\geq t+1$ paths bypass
$x^*$ and, by (D), one of the paths has type at
least $t+1$. If one of the $2t$ paths, say, $P'$,
bypasses $x$, then $P'$ has an outer piece $\Pi'$
that bypasses $x$ and does not cross $\Pi_1$ (since
$\varepsilon=0$, $P'$ does not cross $P$). The
piece $\Pi_1$ has type $t$ and is an end piece at
$x$, hence $\Pi'$ has type at least $t+1$, a
contradiction.
\end{proof}
We have shown that every graph $S_m(\lambda)$, where
$m\geq 4$ and $\lambda\in \Phi_m(n)$, is an MN-graph.
These graphs have order $(5m-1)(12m-2)+5n$. Clearly,
graphs $S_{m_1}(\lambda_1)$ and $S_{m_2}(\lambda_2)$,
where $\lambda_1\in\Phi_{m_1}(n_1)$ and
$\lambda_2\in\Phi_{m_2}(n_2)$, are nonisomorphic for
$m_1\neq m_2$ and for $m_1=m_2$ and $n_1\neq n_2$.
\begin{clm}\label{clm:2}
For any integers\/ $m\geq 4$ and\/ $n\geq 0$, there are
at least $\frac{1}{(24m-4)}{n+12m-3 \choose 12m-3}$
nonisomorphic MN-graphs $S_m(\lambda)$, where
$\lambda\in \Phi_m(n)$.
\end{clm}
\begin{proof}
The automorphism group of $S_m$ is the dihedral group $D_{12m-2}$
of order $24m-4$. Now the claim follows by recalling a well-known
fact that $|\Phi_m(n)|={n+12m-3 \choose 12m-3}$.
\end{proof}
\section{Testing 1-immersibility is hard}
\label{sect:NPC}
In this section we prove that testing 1-immersibility is NP-hard.
This shows that it is extremely unlikely that there exists a nice
classification of MN-graphs.
\begin{thm}
\label{thm:NPC1}
It is NP-complete to decide if a given input graph is\/ $1$-immersible.
\end{thm}
Since 1-immersions can be represented combinatorially, it is clear that 1-immersability is in NP. To prove its completeness, we shall make a reduction from a known NP-complete problem, that of 3-colorability of planar graphs of maximum degree at most four \cite{GJS}.
The rest of this section is devoted to the proof of Theorem \ref{thm:NPC1}.
Let $G$ be a given plane graph of maximum degree 4
whose 3-colorability is to be tested. We shall show how to construct, in polynomial time, a related graph $\overline{G}$ such that $\overline{G}$ is 1-immersible if and only if $G$ is 3-colorable. We may assume that $G$ has no vertices of degree less than three (since degree 1 and 2 vertices may be deleted without affecting 3-colorability).
To construct $\overline{G}$, we will use as building blocks graphs which have a unique 1-immersion. These building blocks are connected with each other by edges to form a graph which also has a unique 1-immersion. Then we add some additional paths to obtain $\overline{G}$.
We say that a 1-planar graph $G$ has a \emph{unique\/ $1$-immersion\/} if, whenever two edges $e$ and $f$ cross each other in some 1-immersion, then they cross each other in every 1-immersion of $G$, and secondly, if $G^\bullet$ is the planar graph obtained from $G$ by replacing each pair of crossing edges $e=ab$ and $f=cd$ by a new vertex of degree four joined to $a,b,c,d$, then $G^\bullet$ is 3-connected (and thus has combinatorially unique embedding in the plane -- the one obtained from 1-immersions of $G$).
It was proved in \cite{K} that for every $n\geq 6$, the graph with $4n$ vertices and $13n$ edges shown in Fig.~\ref{Fig.5.1}(a) has a unique 1-immersion. (To be precise, the paper \cite{K} considers the graph for even values of $n\geq 6$ only, but one can check that the proof does not depend on whether $n\geq 6$ is even or odd.) We call the graph a \emph{U-graph}. Fig.~\ref{Fig.5.1}(b) shows a designation of the U-graph used in what follows. In the 1-immersion of the U-graph shown in Fig.~\ref{Fig.5.1}, the vertices $1,2,3,\ldots,n-1,n$ which lie on the boundary of the outer face of the spanning embedding (the boundary is called the \emph{outer boundary cycle} of the 1-immersed U-graph) are called the \emph{boundary vertices} of the U-graph in the 1-immersion. If a graph has a U-graph as a subgraph, then the U-graph is called the \emph{U-subgraph} of the graph.
\begin{figure}
\caption{The U-graph.}
\label{Fig.5.1}
\end{figure}
Take two 1-immersed U-graphs $U_1$ and $U_2$ such that each of them has the outer boundary cycle of length at least 7, and construct the 1-immersed graphs shown in Figs.~\ref{Fig.5.2}(a) and (b), respectively, where by $1,2,\ldots,7$ we denote seven consecutive vertices on the outer boundary cycle of each of the 1-immersed graphs. We say that in Fig.~\ref{Fig.5.2}(a) (resp. (b)) the U-graphs $U_1$ and $U_2$ are connected by a $(1)$-grid (resp. $(2)$-grid). The vertices labeled $1,2,\ldots,7$ are the \emph{basic vertices} of the grid and for $i=1,2,\ldots,7$, the $h$-path connecting the vertices labeled $i$ of the $(h)$-grid, $h\in\{1,2\}$, is called the \emph{basic path} of the grid connecting these vertices. Let us denote the $i$th basic path by $P_i$. The paths $P_{i-1}$ and $P_i$, $i=2,3,\ldots,7$, are \emph{neighboring basic paths} of the grid. For two basic paths $P=P_i$ and $P'=P_j$, $1\leq i<j\leq 7$, denote by $C(P,P')$ the cycle of the graph in Fig.~\ref{Fig.5.2} consisting of the two paths and of the edges $(i,i+1),(i+1,i+2),\ldots,(j-1,j)$ of the two graphs $U_1$ and $U_2$.
\begin{figure}
\caption{Two U-graphs connected by a grid.}
\label{Fig.5.2}
\end{figure}
By a \emph{U-supergraph} we mean every graph obtained in the following way. Consider a plane connected graph $H$. Now, for every vertex $v\in V(H)$, take a 1-immersed U-graph $U(v)$ of order at least $28\cdot\deg(v)$ and for any two adjacent vertices $u$ and $w$ of the graph, connect $U(u)$ and $U(w)$ by a $(1)$- or $(2)$-grid as shown in Fig.~\ref{Fig.5.3} such that any two distinct grids have no basic vertices in common. We obtain a 1-immersed U-supergraph.
\begin{figure}
\caption{Constructing a U-supergraph.}
\label{Fig.5.3}
\end{figure}
\begin{thm}
\label{thm:1}
Every U-supergraph $M$ has a unique 1-immersion.
\end{thm}
\begin{proof}
It suffices to show the following:
\begin{itemize}
\item [(a)] The graph consisting of two U-graphs connected by an $(h)$-grid, $h=1,2$, has a unique 1-immersion.
\item [(b)] In every 1-immersion $\varphi$ of $M$, the edges of distinct grids do not intersect.
\end{itemize}
Note that $M$ contains no subgraph which can be 1-immersed inside the boundary cycle of a 1-immersed U-subgraph of $M$ in a 1-immersion of $M$ as shown in Fig.~\ref{Fig.5.4} in dashed line. Hence, in every 1-immersion of $M$, the boundary edges of the U-subgraphs of $M$ are not crossed.
\begin{figure}
\caption{A 1-immersion of a subgraph.}
\label{Fig.5.4}
\end{figure}
We prove (a) and (b) in the following way. We consider a 1-immersed subgraph $W$ of $M$ (cf.\ Fig.~\ref{Fig.5.2}) consisting of two U-graphs $U_1$ and $U_2$ connected by an $(h)$-grid $\Gamma$, $h\in\{1,2\}$, and we show that in every 1-immersion $\varphi$ of $M$, the graph $W$ has the same 1-immersion and the edges of $\Gamma$ are not crossed by edges of other grids.
Suppose, for a contradiction, that $U_1$ and $U_2$ are 1-immersed under $\varphi$ as shown in Fig.~\ref{Fig.5.5}(a) (the situation described in the figure arises when $U_1$ is drawn clockwise and $U_2$ counter-clockwise (or vice versa)). Clearly, there are two basic paths $P_i$ and $P_j$ of $\Gamma$, $1\leq i<j\leq 7$, which do not intersect. Then the cycle shown in Fig.~\ref{Fig.5.5}(a) in thick line is embedded in the plane, a contradiction, since the cycle is crossed by 5 other basic paths of $\Gamma$, but the cycle has only $2h\leq 4$ edges that can be crossed by other edges. Hence, $U_1$ and $U_2$ are 1-immersed as shown in Fig.~\ref{Fig.5.2}.
\begin{figure}
\caption{Cycles of two adjacent 1-immersed U-subgraphs.}
\label{Fig.5.5}
\end{figure}
Suppose that in $\varphi$, a basic path $P_i$ of $\Gamma$ crosses a basic path $Q$ of some grid of $M$ exactly once. If $Q=P_j$ is a basic path of $\Gamma$, $j\not=i$ (see Fig.~\ref{Fig.5.5}(b)), then the closed curves $C_1$ and $C_2$ shown in Fig.~\ref{Fig.5.5}(b) by dashed cycles, are embedded in the plane and each of the other five basic paths of $\Gamma$ crosses an edge of $C_1$ or $C_2$, a contradiction, since $C_1$ and $C_2$ have $2h-2\leq 2$ edges in total which can be crossed by other edges. If $Q$ is a basic path of a grid $\Gamma'$ different from $\Gamma$, then there is a basic path $P_j$, $j\not= i$, of $\Gamma$ such that $P_j$ is not crossed by $P_i$ and $Q$. Hence, the cycle $C(P_i,P_j)$ is embedded and $Q$ crosses the edges of the cycle exactly once. Then for every other basic path $Q'$ of $\Gamma'$, the cycle $C(Q,Q')$ crosses $C(P_i,P_j)$ at least twice and $Q'$ crosses $P_i$ or $P_j$ (the edges of different U-subgraphs do not intersect). We have that the 7 basic paths of $\Gamma'$ cross $P_i$ and $P_j$, a contradiction. Hence, in $\varphi$, if two basic paths intersect, then they intersect twice. In particular, only basic paths of (2)-grids can intersect.
Now we claim the following:
(E) If in $\varphi$ two neighboring basic paths $P_{i-1}$ and $P_i$ of the $(2)$-grid $\Gamma$ do not intersect, then the edge $e$ joining the middle vertices of $P_{i-1}$ and $P_i$ lies inside the embedded cycle $C(P_{i-1},P_i)$.
Indeed, if $e$ lies outside $C(P_{i-1},P_i)$, then the 4-cycle shown in Fig.~\ref{Fig.5.5}(c) in thick line is crossed by 5 basic paths $P_r$, $r\not=i-1,i$, a contradiction.
Suppose that in $\varphi$, a basic path of $\Gamma$ crosses a basic path of a grid $\Gamma'$ twice (that is, $\Gamma$ and $\Gamma'$ are $(2)$-grids). Since the number of basic paths of $\Gamma$ is odd (namely, 7), it can not be that every basic path of $\Gamma$ crosses some other basic path of $\Gamma$ twice. Then there is a basic path of $\Gamma$ which is not crossed by other basic paths of $\Gamma$. Hence, if $\Gamma=\Gamma'$ (resp. $\Gamma\not=\Gamma'$), then there are two neighboring basic paths $P_{i-1}$ and $P_i$ of $\Gamma$ such that one of them, say, $P_{i-1}$, is crossed twice by some basic path $Q$ of $\Gamma$ (resp. $\Gamma'$), and the other basic path $P_i\not=Q$ is not crossed by $Q$. Then the cycle $C(P_{i-1},P_i)$ is embedded. By (E), the edge $e$ joining the middle vertices of $P_{i-1}$ and $P_i$ lies inside $C(P_{i-1},P_i)$. Denote by $C_1$ and $C_2$ the two embedded adjacent 4-cycles each of which consists of $e$ and edges of the 6-cycle $C(P_{i-1},P_i)$. The middle vertex of $Q$ lies outside $C(P_{i-1},P_i)$ and the two end vertices of $Q$ lie inside $C_1$ and $C_2$, respectively. The end vertices of $Q$ belong to two U-subgraphs connected by $\Gamma'$. Since the edges of the two U-subgraphs do not cross the edges of $C_1$ and $C_2$, we obtain that one of the U-subgraphs lies inside $C_1$ and the other lies inside $C_2$, a contradiction, since the two U-subgraphs are connected by at least four basic paths different from $Q$, $P_{i-1}$, and $P_i$. Hence, no basic path of $\Gamma$ crosses some other basic path twice.
We conclude that the basic paths of the grids connecting U-subgraphs do not intersect.
Now it remains to show that if $\Gamma$ is a $(2)$-grid, then the edges joining the middle vertices of the basic paths of $\Gamma$ are not crossed. Consider any two neighboring basic paths $P_{i-1}$ and $P_i$ of $\Gamma$. The cycle $C(P_{i-1},P_i)$ is embedded and, by (E), the edge $e$ joining the middle vertices of $P_{i-1}$ and $P_i$ lies inside $C(P_{i-1},P_i)$. It is easy to see that for every edge $e'$ of $G$ not belonging to U-subgraphs and different from $e$ and the edges of $C(P_{i-1},P_i)$, in the graph $G-e'$ the end vertices of $e'$ are connected by a path which consists of edges of U-subgraphs and basic paths of grids and which does not pass through the vertices of $C(P_{i-1},P_i)$. Now, if the edge $e$ is crossed by some other edge $e'$, then $e'$ is not an edge of a U-subgraph, the end vertices of $e'$ lie inside the cycles $C_1$ and $C_2$, respectively (where $C_1$ and $C_2$ are defined as in the preceding paragraph) whose edges are not crossed by edges of U-subgraphs and basic paths, a contradiction.
Therefore, the edges of $M$ do not intersect and that the graph $W$ has a unique 1-immersion. This completes the proof of the theorem.
\end{proof}
Now, given a plane graph $G$ every vertex of which has degree 3 or 4, we construct a graph $\overline{G}$ such that $G$ is 3-colorable if and only if $\overline{G}$ is 1-immersible. To obtain $\overline{G}$, we proceed as follows. First we construct a subgraph $G^{(1)}$ \textbf{of} $\overline{G}$ such that $G^{(1)}$ has a unique 1-immersion. The graph $G^{(1)}$ is obtained from a U-supergraph $W$ by adding some additional vertices and edges. By inspection of the subsequent figures which illustrate the construction of $G^{(1)}$ and its 1-immersion, the reader will easily identify the additional vertices and edges:
they do not belong to U-subgraphs and grids. Then one can easily check that given the 1-immersion of $W$, the additional vertices and edges can be placed in the plane in a unique way to obtain a 1-immersion of $G^{(1)}$, hence $G^{(1)}$ has a unique 1-immersion also. Now, given the unique 1-immersion of $G^{(1)}$, to construct $\overline{G}$ we place some new additional paths ``between" 1-immersed U-subgraphs of $G^{(1)}$. Notice that due to circle inversion one may assume that in a 1-planar drawing of $\overline{G}$ each U-subgraph is drawn in such a way that the outer boundary cycle is containing all other vertices of the U-subgraph inside the region it bounds.
\begin{figure}
\caption{Constructing the graph $G^{(1)}
\label{Fig.5.6}
\end{figure}
The graph $G^{(1)}$ is obtained from the plane graph $G$ if we replace every face $F$ of the embedding of $G$ by a U-graph $U(F)$ and replace every vertex $v$ by a \emph{vertex-block} $B(v)$ as shown at the top of Fig.~\ref{Fig.5.6}. At the bottom of Fig.~\ref{Fig.5.6} we show the designation of a (1)-grid used at the top of the figure and at what follows. The vertex-block $B(v)$ has a unique 1-immersion and is obtained from a U-supergraph by adding some additional vertices and edges. Fig.~\ref{Fig.5.6} shows schematically the boundary of $B(v)$ and Fig.~\ref{Fig.5.9} shows $B(v)$ in more detail. For a $k$-valent vertex $v$ of $G$, $3\leq k\leq 4$, the vertex-block $B(v)$ has $3k$ \emph{boundary vertices} labeled clockwise as $a,b,c,a,b,c,\ldots,a,b,c$; these vertices do not belong to U-subgraphs of $B(v)$. In Figs.~\ref{Fig.5.6} and \ref{Fig.5.9} we only show the case of a 3-valent vertex $v$; for a 4-valent vertex the construction is analogous -- there are three more boundary vertices labeled $a,b,c$, respectively.
We say that vertex-blocks $B(v)$ and $B(w)$ are adjacent if $v$ and $w$ are adjacent vertices of $G$.
\begin{figure}
\caption{The pending paths connecting adjacent vertex-blocks.}
\label{Fig.5.7}
\end{figure}
The graph $\overline{G}$ is obtained from $G^{(1)}$ if we take a collection of additional disjoint paths of length $\geq 1$ (they are called the \emph{pending paths}) and identify the end vertices of every path with two vertices, respectively, of $G^{(1)}$. The graph $G^{(1)}$ has a unique 1-immersion and the edges of the U-subgraphs of $G^{(1)}$ can not be crossed by the pending paths, hence the 1-immersed $G^{(1)}$ restricts the ways in which the pending paths can be placed in the plane to obtain a 1-immersion of $\overline{G}$.
Every pending path connects either boundary vertices of adjacent vertex-blocks or vertices of the same vertex-block.
The graph $\overline{G}$ is such that for any two adjacent vertex-blocks, there are exactly three pending paths connecting the vertices of the vertex-blocks. The paths have length 3 and are shown in Fig.~\ref{Fig.5.7}; we say that these pending paths are incident with the two vertex-blocks. Each of the three pending paths connects the boundary vertices labeled by the same letter: $a$, $b$, or $c$. For $h\in\{a,b,c\}$, the pending path connecting vertices labeled $h$ is called the \emph{$(h)$-path\/} connecting the two vertex-blocks. In Fig.~\ref{Fig.5.7} the $(h)$-path, $h\in\{a,b,c\}$, is labeled by the letter $h$.
Denote by $G^{(2)}$ the graph obtained from $G^{(1)}$ if we add all triples of pending paths connecting vertex-blocks $B(v), B(w)$, for all edges $vw\in E(G)$.
\begin{figure}
\caption{Pending paths of an $h$-family.}
\label{Fig.5.8}
\end{figure}
The graph $\overline{G}$ also satisfies the properties stated below. The pending paths connecting vertices of the same vertex-block $B(v)$ are divided into three families called, respectively, the $a\,$-, $b\,$-, and \emph{$c$-families} of $B(v)$. Given the 1-immersion of $G^{(1)}$, for every $h\in\{a,b,c\}$, the $h$-family of $B(v)$ has the following properties:
\begin{itemize}
\item [(i)] Every path $P$ of the $h$-family admits exactly two embeddings in the plane such that we obtain a 1-immersion of $G^{(2)}\cup P$.
\item [(ii)] The $h$-family consists of paths $P_1,P_2,\ldots,P_n$ such that the graph $G^{(2)}\cup P_1\cup P_2\cup\cdots\cup P_n$ has exactly two 1-immersions. In the two 1-immersions, every path $P_i$ uses its two embeddings. In one of the 1-immersions, paths of the $h$-family cross all $(h)$-paths incident with $B(v)$. In the other 1-immersion, the paths of the $h$-family do not cross any $(h)$-path incident with $B(v)$.
{\sloppy
}
\end{itemize}
Fig.~\ref{Fig.5.8} shows fragments of the two 1-immersions of the union of $G^{(2)}$ and the pending paths of an $h$-family. In the figure, each of the depicted (in thick line) six edges of the family, they are labeled by $1,2,\ldots,6$, respectively, uses its two embeddings in one of the two 1-immersions.
If in a 1-immersion of $\overline{G}$, paths of an $h$-family of $B(v)$, $h\in\{a,b,c\}$, cross $(h)$-paths incident with $B(v)$, then we say that the $h$-family of $B(v)$ is \emph{activated} in the 1-immersion of $\overline{G}$.
\begin{figure}
\caption{A vertex-block and the activated $h$-families.}
\label{Fig.5.9}
\end{figure}
Figs.~\ref{Fig.5.9} and \ref{Fig.5.10} show a vertex-block $B(v)$ and the $h$-families of the vertex-block ($h=a,b,c$) in the case where $v$ is 3-valent (the generalization for a 4-valent vertex $v$ is straightforward). The pending paths of the three $h$-families are shown by thick lines and the three families are activated. To avoid cluttering a figure, Fig.~\ref{Fig.5.9} contains a fragment denoted by $R$ which is given in more detail in Fig.~\ref{Fig.5.10}. Recall that the grey areas in Figs.~\ref{Fig.5.9} and \ref{Fig.5.10} represent U-graphs.
\begin{figure}
\caption{The fragment $R$ of the vertex-block in Fig.~\ref{Fig.5.9}
\label{Fig.5.10}
\end{figure}
In Figs.~\ref{Fig.5.9} and \ref{Fig.5.10} we use designations of some fragments of $B(v)$; the designations are given at the left of Fig.~\ref{Fig.5.11} and the corresponding fragments are given at the right of Fig.~\ref{Fig.5.11} (that is, the connections between the grey areas in Figs.~\ref{Fig.5.9} and \ref{Fig.5.10} consist of seven basic paths). The reader can easily check that for every pending path $P$ of the three families, there are exactly two ways to embed the path so that we obtain a 1-immersion of $G^{(2)}\cup P$. The vertex-block $B(v)$ contains a 2-path connecting the vertices labeled 0 in Fig.~\ref{Fig.5.10} and a 1-path connecting the vertices labeled 1 in Fig.~\ref{Fig.5.10}; we call the paths the (0)- and (1)-blocking paths, respectively). For every $h\in\{a,b,c\}$, exactly one pending path of the $h$-family of $B(v)$ crosses a blocking path: the pending path has length 33, crosses the (1)-blocking (resp.\ (0)-blocking) path when the $h$-family is activated (resp.\ not activated), and the pending path in each of its two embeddings crosses exactly one pending path of each of the other two families. Fig.~\ref{Fig.5.10} shows the two embeddings of the pending 33-path of the $b$-family (one of them is in thick line, the other, when the family is not activated, is in dashed line). Note that Fig.~\ref{Fig.5.10} shows something that is not a 1-immersion, since all three families of paths are activated, and the $(1)$-blocking path is crossed three times.
\begin{figure}
\caption{The designations of fragments of the vertex-block $B(v)$.}
\label{Fig.5.11}
\end{figure}
Denote by $G^{(2)}_v$ the union of $G^{(2)}$ and the paths of all three $h$-families of $B(v)$. Now the reader can check that $B(v)$ and the $h$-families of $B(v)$ are constructed in such a way that the following holds:
(F) In every 1-immersion of $G^{(2)}_v$ (and, hence, of $\overline{G}$) exactly one $h$-family of $B(v)$ is activated, and for each $h\in\{a,b,c\}$, there is a 1-immersion of $G^{(2)}_v$ in which the $h$-family of $B(v)$ is activated.
By construction of $\overline{G}$, if $\overline{G}$ has a 1-immersion, then in the 1-immersion for every vertex $v$ of $G$, exactly one $h$-family ($h\in\{a,b,c\}$) of $B(v)$ is activated and taking Fig.~\ref{Fig.5.7} into account we obtain that in the 1-immersion the $h$-families of the vertex-blocks adjacent to $B(v)$ are not activated.
Now take a 1-immersion of $\overline{G}$ (if it exists) and assign every vertex $v$ of $G$ a color $h\in\{a,b,c\}$ such that the $h$-family of $B(v)$ is activated in the 1-immersion of $\overline{G}$. We obtain a proper 3-coloring of $G$ with colors $\{a,b,c\}$.
Take a proper 3-coloring of $G$ (if it exists) with colors $\{a,b,c\}$ and for every vertex $v$ of $G$, if $h(v)$ is the color of $v$, take the $h(v)$-family of $B(v)$ to be activated and the other two families not to be activated. By the construction of $\overline{G}$, and by the mentioned properties of 1-immersions of its subgraphs $B_v^{(2)}$, it follows that we obtain a 1-immersion of $\overline{G}$.
When constructing $\overline{G}$, we choose the order of every U-subgraph such that every boundary vertex of the U-subgraph is incident with an edge not belonging to the U-subgraph. This implies that for every face $F$ of size $k$ of the plane embedding of $G$, the number of edges in the U-graph $U(F)$ is bounded by a constant multiple of $k$. Similarly, for each $v\in V(G)$, the union of $B(v)$ and its three $h$-families has constant size. Therefore, the whole construction of $\overline{G}$ can be carried over in linear time. This completes the proof of Theorem~\ref{thm:NPC1}.
\section{$k$-planarity testing for multigraphs}
\label{Sec:6}
A graph drawn in the plane is $k$-\emph{immersed} in the plane ($k\geq 1$) if any edge is crossed by at most $k$ other edges (and any pair of crossing edges cross only once). A graph is $k$-\emph{planar} if it can be $k$-immersed into the plane.
It appears that we can slightly modify the proof of Theorem~\ref{thm:NPC1} so as to obtain a proof that $k$-planarity testing ($k\geq 2$) for multigraphs is NP-complete. Below we give only a sketch of the proof, the reader can easily fill in the missing details.
Denote by $G(k)$, $\overline{G}(k)$, and $G^{(1)}(k)$, respectively, the multigraphs obtained from the graphs $G$, $\overline{G}$, and $G^{(1)}$ if we replace every edge by $k$ parallel edges. For an edge $e$ of the multigraphs denote by $H(e)$ the set consisting of $e$ and all other $k-1$ edges parallel to $e$. Denote by $\varphi$ the unique plane 1-immersion of $G^{(1)}$, and by $\varphi_k$ the plane $k$-immersion of $G^{(1)}(k)$ obtained from $\varphi$ if we replace every edge of $G^{(1)}$ by $k$ parallel edges.
\begin{lem}
\label{lem:6.1}
The multigraph $G^{(1)}(k)$, $k\geq 2$, has a unique plane $k$-immersion.
\end{lem}
\begin{proof}
We consider an arbitrary plane $k$-immersion $\psi$ of $G^{(1)}(k)$ and show that $\psi$ is $\varphi_k$.
\begin{figure}
\caption{Different plane 1-immersions of $G^{(1)}
\label{Fig.6.1}
\end{figure}
First we show that if edges $e_1$ and $e_2$ of $G^{(1)}(k)$ cross in $\psi$, then each edge of $H(e_1)$ intersects every edge of $H(e_2)$. Suppose, for a contradiction, that an edge $e'_2$ of $H(e_2)$ does not intersect $e_1$ (see Fig.~\ref{Fig.6.1}(a)). Consider the 2-cell $D$ whose boundary consists of the edges $e_2$ and $e'_2$. Since $e_2$ and $e'_2$ can have at most $2k$ crossings in total, there are at most two vertices lying outside $D$ that are adjacent to vertices inside $D$. This means (see Fig.~\ref{Fig.6.1}(b)) that $G^{(1)}$ has two different plane 1-immersions (the edge of $G^{(1)}$ joining $u$ and $w$ has different positions in the two different plane 1-immersions), a contradiction. Hence, each edge of $H(e_1)$ intersects every edge of $H(e_2)$. Delete $k-1$ edges from every $k$ parallel edges. We obtain a plane 1-immersion of $G^{(1)}$, that is, $\varphi$. Hence $\psi$ is $\varphi_k$.
\end{proof}
The graph $\overline{G}(k)$ is obtained from $G^{(1)}(k)$ if we add the pending paths of $\overline{G}$ where every edge is replaced by $k$ parallel edges. Now, considering a pending path of an $h$-family, we have (see Fig.~\ref{Fig.6.2}, where each thick edge represents $k$ parallel edges) that if $e'\in H(e)$, then each of the edges $e$ and $e'$ is already crossed by $k$ edges of $H(e_1)$ and $H(e_2)$, respectively, thus the edges of the pending path incident with the vertex $v$ can not cross edges $e$ and $e'$, a contradiction. Hence all edges of $H(e)$ cross either $H(e_1)$ or $H(e_2)$. As a result, the $h$-families and the other pending paths of $\overline{G}(k)$ ``behave" in the same way as in $\overline{G}$. We conclude that $\overline{G}$ has a plane 1-immersion if and only if $\overline{G}(k)$ has a plane $k$-immersion. Since $\overline{G}$ has a plane 1-immersion if and only if $G$ has a proper 3-coloring, we get that $k$-planarity testing for multigraphs is NP-complete.
\begin{figure}
\caption{Edges of an $h$-family in $\overline{G}
\label{Fig.6.2}
\end{figure}
If we restrict ourselves to simple graphs only, then to have a proof analogous to the proof of Theorem~\ref{thm:NPC1} we need simple graphs that have a unique plane $k$-immersion ($k\geq 2$), but the construction of such graphs seems to be nontrivial and does not readily follow from the construction of U-graphs in Sect.~\ref{sect:NPC}.
\subsection*{Acknowledgement.} The authors are grateful to anonymous referee for pointing out that our proof in Section \ref{sect:NPC} might be used to derive a corresponding result for $k$-immersions.
\end{document}
|
\begin{document}
\begin{center}
{\LARGE{\bf{ Data Driven Stability Analysis of\\ Black-box Switched Linear Systems}}}
{\large
\begin{tabular}{ccc}
Joris Kenanian$^\star$, Ayca Balkan$^\star$, Raphael M. Jungers$^\dagger$, Paulo Tabuada$^\star$
\end{tabular}
}
\texttt{[email protected],[email protected], \\[email protected], [email protected]} \\
{\large $^\star$Department of Electrical and Computer Engineering at \\ University of California, Los Angeles (UCLA)} \\
{\large $^\dagger$UCLouvain - ICTEAM Institute, Belgium} \\
\today
\end{center}
\vspace*{.2cm}
\begin{abstract}
\noindent
Can we conclude the stability of an unknown dynamical system from the knowledge of a finite number of snapshots of trajectories? We tackle this black-box problem for switched linear systems. We show that, for any given random set of observations, one can give probabilistic stability guarantees. The probabilistic nature of these guarantees implies a trade-off between their quality and the desired level of confidence. We provide an explicit way of computing the best stability-like guarantee, as a function of both the number of observations and the required level of confidence. Our proof techniques rely on geometrical analysis, chance-constrained optimization, and stability analysis tools for switched systems, including the joint spectral radius.
\end{abstract}
\section{Introduction}
Most of the existing work on stability of dynamical systems is model-based, i.e., it requires the knowledge of a model for the considered system. Although natural in many contexts, a model may not always be available. Cyber-physical systems are an illustration of such difficulty: they consist of a large number of components of different nature (modeled by differential equations, difference equations, hybrid automata, lookup tables, custom switching logic, low-level legacy code, etc.) engaged in complex interactions with each other. Closed-form models for these complex and heterogeneous systems are equally complex or even not available, and therefore one cannot use model-based techniques in these situations. The emphasis that industry places on simulation of such systems is then not surprising, since it is always possible to simulate them despite their complexity. This raises the question of whether one can provide formal guarantees about certain properties of these complex systems, based solely on information obtained via their simulations. We focus here on one of the most important of such properties in the context of control theory: stability.
\noindent
More formally, we consider a time-varying discrete-time dynamical system of the form
\begin{equation}\label{eq:dynamicalsystemGeneral}
x_{k+1} = f(k, x_k),
\end{equation}
where, $x_k \in X$ is the state of the system and $k \in \mathbb{N}$ is the time index. For the rest of the paper, we use the term \emph{black-box} to refer to systems where we do not have access to the model, i.e., to $f$, yet we can indirectly learn information about $f$ by observing traces (finite trajectories) of length $l$ (in the particular case of $l=1$, these traces (trajectories) become pairs of points $(x_k, x_{k+1})$ as defined in \eqref{eq:dynamicalsystemGeneral}). We start with the following question to serve as a stepping stone: For some $l \in \mathbb{N}_{>0}$, given $N$ traces of length $l$, $(x_{i,0},x_{i,1},\dots, x_{i,l})$, $1 \leq i \leq N$, belonging to the behavior of the system \eqref{eq:dynamicalsystemGeneral}, (i.e., $x_{i,k+1} = f(k, x_{i,k})$ for any $0 \leq k \leq l-1$ and any $1 \leq i \leq N$), what can we say about the stability of System \eqref{eq:dynamicalsystemGeneral}?
\noindent
A potential approach to this problem is to first identify the dynamics, i.e., the function $f$, and then apply existing techniques from the model-based stability analysis literature. If System \eqref{eq:dynamicalsystemGeneral} is linear, its identification and stability analysis have been extensively studied. If $f$ is not a linear function and in particular if the system is a switched system, there are two main reasons behind our quest to directly work on system behaviors and bypass the identification phase:
\begin{itemize}
\item Identification can potentially introduce approximation errors, and can have a high computational complexity. Again, this is the case for switched systems, for which the identification problem is NP-hard \cite{lauer};
\item Even when the function $f$ is known, in general, stability analysis is a very difficult problem \cite{stabilityHard1}.
\end{itemize}
\noindent
A fortiori, the combination of these two steps in an efficient and robust way seems far from obvious. In this work, we take a first step into more complex systems than the linear case by considering the class of switched linear systems. Although we restrict ourselves to such systems, we believe that the presented results can be extended to more general models.
\noindent
In recent years, an increasing number of researchers started addressing various verification and design problems in control of black-box systems \cite{bianchini, balkan, mitra, mitra2, kozarev2016case}. In particular, the initial idea behind this paper was influenced by the recent efforts in \cite{balkan, topcu, kapinski}, and \cite{lazar} on using simulation traces to find Lyapunov functions for systems with known dynamics. In these works, the main idea is that if one can construct a Lyapunov function candidate decreasing along several finite trajectories starting from different initial conditions, it should hopefully decrease along every other trajectory. Then, once a Lyapunov function candidate is constructed, this intuition is put to test by verifying the candidate function either via off-the-shelf tools as in \cite{topcu} and \cite{kapinski}, or via sampling-based techniques as in \cite{lazar}. This also relates to almost-Lyapunov functions introduced in \cite{liberzon}, which presents a relaxed notion of stability proved via Lyapunov functions decreasing everywhere except on a small set. These approaches cannot be directly applied to black-box systems, where we do not have access to the dynamics -as in our framework. However, they are based on the following idea that we address in this paper: By observing that a candidate Lyapunov function decreases on a large number of observations, we empirically build a certain confidence that this function is a bona-fide Lyapunov function. \emph{Can we translate this empirical observation on a finite set of points into a confidence that this Lyapunov function decreases in the whole state space?}
\noindent
Note that, even in the case of a linear system, the connection between these two beliefs is nontrivial. In fact, one can easily construct an example where a candidate Lyapunov function decreases everywhere on its levels sets, except for an arbitrarily small subset, yet, almost all trajectories diverge to infinity. For example, the system
\[
x_{k+1} = \begin{bmatrix}
0.14 & 0\\
0 & 1.35
\end{bmatrix}x_k,
\]
admits a Lyapunov function candidate on the unit circle except on the two red areas shown in Fig. \ref{fig:levelset}. Moreover, the size of this ``violating set" can be made arbitrarily small by changing the magnitude of the unstable eigenvalue. Nevertheless, the only trajectories that do not diverge to infinity are those starting on the stable eigenspace that has zero measure.
\begin{figure}
\caption{A simple dynamics and the level set of an ``almost Lyapunov function''. Even though this function decreases at almost all points in its level set, almost all trajectories diverge to infinity.}
\label{fig:levelset}
\end{figure}
\noindent
In this work, we take a first step for this stability inference problem, in the case of switched linear systems. In addition to the phenomenon exhibited in the above example, switched linear systems seem a priori challenging for black-box stability analysis, as both the identification and ``white-box'' stability analysis are hard for these systems. Deciding stability of a switched linear system amounts to decide whether its \emph{Joint Spectral Radius} is smaller than $1$, which is extremely hard even in the white-box setting (see, e.g., \cite{jungers_lncis}, Chapter 2, for various complexity results).
\noindent
We present an algorithm to bound the JSR of an unknown switched linear system from a finite number $N$ of observations of traces (trajectories). This algorithm partially relies on tools from the random convex optimization literature (also known as chance-constrained optimization, see \cite{campi,nemirovski,campi-garatti}), and provides an upper bound on the JSR with a user-defined confidence level. As $N$ increases, this bound gets tighter. Moreover, with a closed form expression, we characterize what is the exact trade-off between the tightness of this bound and the number of samples. In order to understand the quality of our upper bound, the algorithm also provides a deterministic lower bound. Finally, we provide a guarantee of asymptotic convergence between the upper and the lower bound, for large $N$.
\noindent
The organization of the paper is as follows: In Section~\ref{sec:preliminaries}, we introduce the problem studied and provide the necessary background in stability of switched linear systems. Then, based on finite observations for a given switched linear system, we give in Section~\ref{sec:lowerBound} a deterministic lower bound for the JSR, before presenting in Section~\ref{sec:upperbound} the main contribution of this paper, which consists in a probabilistic upper bound. We illustrate the performance of the presented techniques with some experiments in Section~\ref{sec:experiments}, and we propose future extensions of this work in Section~\ref{sec:conclusions}.
\section{Preliminaries}\label{sec:preliminaries}
\subsection{Notations}
We consider the usual Hilbert finite normed vector space $(\mathbb{R}^n,\ell_2)$, $n \in \mathbb{N}_{> 0}$, with $\ell_2$ the classical Euclidean norm. We denote by $\lVert x \rVert$ the $\ell_2$-norm of $x \in \mathbb{R}^n$. For a distance $d$ on $\mathbb{R}^n$, the distance between a set $X \subset \mathbb{R}^n$ and a point $p \in \mathbb{R}^n$ is given by $d(X,p) := \inf_{x \in X} d(x,p)$. Note that the map $p \mapsto d(X,p)$ is continuous on $\mathbb{R}^n$. Given a set $X \subset \mathbb{R}^n$, $\partial X$ denotes the boundary of set $X$.
\noindent
We also denote the set of linear functions from $\mathbb{R}^n$ to $\mathbb{R}^n$ by $\mathcal{L}(\mathbb{R}^n)$, and the set of real symmetric matrices of size $n$ by $\mathcal{S}^n$. In particular, the set of positive definite matrices is denoted by $\mathcal{S}^n_{++}$. We write $P \succ 0$ to state that $P$ is positive definite, and $P \succeq 0$ to state that $P$ is positive semi-definite. Given a set $X \subset \mathbb{R}^n$, we denote by $\wp(X)$ its powerset (i.e., the set of all its subsets), and by $X^{\mathbb{N}}$ the set of all possible sequences $(x_n)_{n \in \mathbb{N}}$, $x_n \in X$. For any $r \in \mathbb{R}_{> 0}$, we write \mbox{$rX$}$:= \{rx : x \in X\}$ to denote the scaling of ratio $r$ of $X$. We denote by $\mathbb{B}$ (respectively $\mathbb{S}$) the ball (respectively sphere) of unit radius centered at the origin. We denote the ellipsoid described by the matrix $P \in \mathcal{S}^n_{++}$ as $E_P$, i.e., $E_P:= \{x \in \mathbb{R}^n: x^T P x = 1\}$. Finally, we denote the spherical projector on $\mathbb{S}$ by $\Pi_{\mathbb{S}}(x) := x/\Vert x\Vert$.
\noindent
In this paper, we only consider simple uniform probability distributions, and we believe that all the concepts can be easily intuitively understood. However, for the sake of rigor, we now develop the proper measure-theoretic setting on which our results build. We consider in this work the uniform spherical measure on $\mathbb{S}$, denoted by $\sigma^{n-1}$ ($n$ is the dimension of the space where $\mathbb{S}$ is embedded), and derived from the Lebesgue measure $\lambda$ as follows. For an ellipsoid centered at the origin, and for any of its subsets $\mathcal{A}$, the \emph{sector} defined by $\mathcal{A}$ is the subset $$\{t \mathcal{A}, \ t \in [0,1]\} \subset\ \mathbb{R}^n.$$ We denote by $E_P^{\mathcal{A}}$ the sector induced by $\mathcal{A} \subset E_P$. In the particular case of the unit sphere, we instead write $\mathbb{S}^{\mathcal{A}}$. We can notice that $E_P^{E_P}$ is the volume in $\mathbb{R}^n$ defined by $E_P$: $E_P^{E_P} = \{x \in \mathbb{R}^n: x^TPx \leq 1\}$. The spherical Borelian $\sigma$-algebra, denoted by $\mathcal{B}_{\mathbb{S}}$, is defined by $$\mathcal{A} \in \mathcal{B}_{\mathbb{S}} \iff \mathbb{S}^{\mathcal{A}} \in \mathcal{B}_{\mathbb{R}^n}.$$ We provide $(\mathbb{S},\mathcal{B}_{\mathbb{S}})$ with the classical, unsigned and finite uniform spherical measure $\sigma^{n-1}$ defined by
$$\forall\ \mathcal{A} \in \mathcal{B}_{\mathbb{S}},\, \sigma^{n-1}(\mathcal{A}) = \frac{\lambda(\mathbb{S}^{\mathcal{A}})}{\lambda(\mathbb{B})}. $$
In other words, the spherical measure of a subset of the sphere is related to the Lebesgue measure of the sector of the unit ball it induces. Notice that $\sigma^{n-1}(\mathbb{S}) = 1$.
\noindent
Since $P \in \mathcal{S}_{++}^n$, we recall that it can be written in its Choleski form \begin{equation}\label{choleski}P = L^TL,\end{equation} where $L$ is an upper triangular matrix. Note that, $L^{-1}$ maps the elements of $\mathbb{S}$ to $E_P$. Then, we define the measure on the ellipsoid $\sigma_P$ on the $\sigma$-algebra $\mathcal{B}_{E_P}:=L^{-1}\mathcal{B}_\mathbb{S}$, by $\forall\, \mathcal{A} \in \mathcal{B}_{E_P},\, \sigma_P({\mathcal{A}}) = \sigma^{n-1}(L\mathcal{A})$.
\noindent
For $m \in \mathbb{N}_{>0}$, we denote by $M$ the set $M = \{1,2,\dots,m\}$. The set $M$ is provided with the classical $\sigma$-algebra associated to finite sets: $\Sigma_M = \wp(M)$. We provide $(M, \Sigma_M)$ with the uniform measure $\mu_M$. For any $l \in \mathbb{N}_{>0}$, we denote by $M^l$ the $l$-Cartesian product of $M$, i.e., $M^l = \{(i_1,\dots,i_l)| i_j \in M, 1 \geq j \geq l\}$. We define $\Sigma_{M^l}$ as the product $\bigotimes^l \Sigma_M$ (which is here equal to $\wp(M)^l$), and we provide $(M^l, \Sigma_{M^l})$ with the uniform product measure $\mu_{M^l} = \otimes^l \mu_M$.
\noindent
We can now define $Z_l = \mathbb{S} \times M^l$ as the Cartesian product of $\mathbb{S}$ and $M^l$. We provide the set $Z_l$ with the product $\sigma$-algebra $\mathcal{B}_{\mathbb{S}} \bigotimes (\Sigma_{M^l})$ generated by $\mathcal{B}_{\mathbb{S}}$ and $\Sigma_{M^l}$: $\Sigma = \sigma( \pi_{\mathbb{S}}^{-1}(\mathcal{B}_{\mathbb{S}}), \pi_{M^l}^{-1}(\Sigma_{M^l}))$, where $\pi_{\mathbb{S}}: Z_l \to \mathbb{S}$ and $\pi_{M^l}: Z_l \to M^l$ are the standard projections. On $(Z_l,\mathcal{B}_{\mathbb{S}} \bigotimes (\Sigma_{M^l}) )$, we define the product measure $\mu_l = \sigma^{n-1} \otimes \mu_{M^l}$. Note that, $\mu_l$ is the uniform probability measure on $Z_l$.
\noindent
We will also need two classical functions to compute our probabilistic upper bound, which are known as the \emph{incomplete beta function} and the \emph{regularized incomplete beta function}
\begin{de}[\cite{handbook}, 6.6.1]
The incomplete beta function, denoted by $B$, is given by
\begin{equation*}
B: \left\{
\begin{split}
&\mathbb{R}_{> 0} \times \mathbb{R}_{> 0} \times \mathbb{R}_{> 0} \to \mathbb{R}_{\geq 0}\\
&(x,a,b) \mapsto B(x;a,b) = \int_0^x t^{a-1} (1-t)^{b-1} dt.
\end{split}
\right.
\end{equation*}
\end{de}
\begin{de}[\cite{handbook}, 6.6.2]
The regularized incomplete beta function, denoted by $I$, is given by
\begin{equation*}
I: \left\{
\begin{split}
&\mathbb{R}_{> 0} \times \mathbb{R}_{> 0} \times \mathbb{R}_{> 0} \to \mathbb{R}_{\geq 0}\\
&(x,a,b) \mapsto I(x;a,b) = \frac{B(x;a,b)}{B(1;a,b)} .
\end{split}
\right.
\end{equation*}
\end{de}
\noindent
For given values of parameters $a >0$ and $b>0$, the inverse of the regularized incomplete beta function with parameters $a,b$, denoted by $I^{-1}(y;a,b)$, is the function whose output is $x>0$ such that $I(x;a,b) = y$ \cite{betafct}.
\subsection{Stability of Switched Linear Systems}\label{sec:stab}
A \emph{switched linear system}, defined by a set of modes (matrices) \mbox{$\mathcal{M}= \{A_i, i \in M \}$}, is a time-varying discrete-time dynamical system of the form \eqref{eq:dynamicalsystemGeneral}, with $f(k,x_k) = A_{\tau(k)}x_k$, that is:
\begin{equation}\label{eq:switchedSystem}
x_{k+1} = A_{\tau(k)}x_k,
\end{equation}
for any $k \in \mathbb{N}$. Here, the signal $\tau \in M^{\mathbb{N}}$ is called the \emph{switching sequence}, and can take arbitrary values in $M$. Note that such systems are homogeneous, i.e., for any $\gamma > 0$, $f(k,\gamma x_k) = \gamma f(k,x_k)$. In this paper, we assume to not have access to $\mathcal{M}$ nor to the switching sequence. The only information available is (an upper bound on) $m$, the cardinality of $\mathcal{M}$.
\noindent
We are interested in the \emph{uniform asymptotic stability} of this system, that is, we want to guarantee the following property: $$\forall \tau \in M^{\mathbb{N}}, \, \forall x_0 \in \mathbb{R}^n, \, \lVert x_k \rVert \xrightarrow[k \to \infty]{} 0.$$
The joint spectral radius of a set of matrices $\mathcal{M}$ characterizes the stability of the underlying switched linear system \eqref{eq:switchedSystem} defined by $\mathcal{M}$ \cite{jungers_lncis}. This quantity is an extension to switched linear systems of the classical spectral radius for linear systems. It is the maximum asymptotic growth rate of the norm of the state under the dynamics \eqref{eq:switchedSystem}, over all possible initial conditions and sequences of matrices of $\mathcal{M}$.
\begin{de}[from \cite{jungers_lncis}]
Given a finite set of matrices \mbox{$\mathcal{M} \subset \mathbb{R}^{n\times n}$}, its \emph{joint spectral radius} (JSR) is given by $$\rho(\mathcal{M}) =\lim_{k \rightarrow \infty} \max_{i_1,\dots, i_k} \left\{ ||A_{i_1} \dots A_{i_k}||^{1/k}: A_{i_j} \in \mathcal{M}\ \right\}. $$
\end{de}
\begin{property}[\cite{jungers_lncis}, Corollary 1.1]
Given a finite set of matrices $\mathcal{M}$, the corresponding switched dynamical system is stable if and only if $\rho(\mathcal{M})<1$.
\end{property}
\begin{theo}s[\cite{jungers_lncis}, Prop. 1.3]\label{prop:scaling}
Given a finite set of matrices $\mathcal{M}$, and any invertible matrix $T$, $\rho(\mathcal{M})=\rho(T \mathcal{M} T^{-1})$, i.e., the JSR is invariant under similarity transformations (and is a fortiori a homogeneous function: $\forall \gamma > 0$, $\rho \left( \mathcal{M}/\gamma \right) = \rho(\mathcal{M})/\gamma$).
\end{theo}s
\noindent
The JSR also relates to a tool classically used in control theory to study stability of systems: Lyapunov functions. We will consider here a family of such functions that is particularly adapted to the case of switched linear systems.
\begin{de}
Consider a finite set of matrices $\mathcal{M} \subset \mathbb{R}^{n \times n}$. A \emph{common quadratic form (CQF)} for a system \eqref{eq:switchedSystem} with set of matrices $\mathcal{M}$, is a positive definite matrix $P \in \mathcal{S}_{++}^n$ such that for some $\gamma \geq 0$,
\begin{equation}\label{eq:lyap}
\forall A \in \mathcal{M}, A^T P A \preceq \gamma^2P.
\end{equation}
\end{de}
\noindent
CQFs are useful because they can be computed, when they exist, with semidefinite programming (see \cite{boyd}), and they constitute a stability guarantee (when $\gamma < 1$, they are Lyapunov functions) for switched systems as we formalize next.
\begin{theo}s[\cite{jungers_lncis}, Prop. 2.8 and Thm. 2.11]\label{thm:cqlf}
Consider a finite set of matrices $\mathcal{M}$.
\begin{itemize}
\item If there exist $\gamma \geq 0$ and $P \succ 0$ such that Equation \eqref{eq:lyap} holds, then $\rho(\mathcal{M}) \leq \gamma$.
\item If $\rho(\mathcal{M}) < \frac{\gamma}{\sqrt{n}},$ there exists a CQF, $P$, such that $\forall A \in \mathcal{M},\, A^T P A \preceq \gamma^2 P.$
\end{itemize}
\end{theo}s
\noindent
For any $\gamma < 1$, this theorem provides both a Lyapunov and a \emph{converse Lyapunov result}: if there exists a CQF, then our system is stable; if there is, on the contrary, no such stability guarantee, one may conclude a lower bound on the JSR. We obtain then an approximation algorithm for the JSR. It turns out that one can still refine this technique, in order to improve the error factor $1/\sqrt{n}$, and asymptotically get rid of it. This is a well-known technique for the ``white-box'' computation of the JSR, which we summarize in the following corollary.
\begin{cor}\label{cor:approx-products}
Fix $\gamma \geq 0$. For any finite set of matrices such that $\rho(\mathcal{M}) < \frac{\gamma}{\sqrt[2l]{n}}$ with $\gamma \geq 0$, there exists a CQF for $\mathcal{M}^l:=\{\Pi_{j=1}^l A_{i_j}: A_{i_j} \in \mathcal{M}\}$, that is, a $P\succ 0$ such that:
\begin{equation} \label{eq:LMI}
\forall\ \mathbf{A} \in \mathcal{M}^l,\, \mathbf{A}^T P \mathbf{A} \preceq \gamma^{2l} P.
\end{equation}
\end{cor}
\begin{proof}
It is easy to see from the definition of the JSR that $\rho(\mathcal{M}^l)=\rho(\mathcal{M})^l$. Thus, applying Theorem \ref{thm:cqlf} to the finite set $\mathcal{M}^l,$ one directly obtains the corollary.
\end{proof}
\noindent
Note that, the smaller $\gamma$ is in Theorem~\ref{thm:cqlf}, the tighter is the upper bound we get on $\rho(\mathcal{M})$. In order to properly analyze our setting, where matrices are unknown, let us reformulate \eqref{eq:LMI} in another form. For any $l \in \mathbb{N}_{>0}$, we can consider the optimal solution $\gamma^*$ of the following optimization problem:
\begin{equation}\label{eqn:campiOpt1}
\begin{aligned}
\hspace{-0.3cm}\text{min}_{\gamma, P} &\qquad \gamma \\
\hspace{-0.3cm}\text{s.t.} \quad &(\mathbf{A} x)^T P \mathbf{A} x \leq \gamma^{2l} x^T P x, \mathbf{A} \in \mathcal{M}^l, \,\forall x \in \mathbb{R}^n\\
&P \succ 0.
\end{aligned}
\end{equation}
\noindent
Notice that we can restrict the set of constraints by restricting $x$ to $\mathbb{S}$, due to the homogeneity of the system. Homogeneity indeed implies that it is sufficient to show the decrease of a CQF on an arbitrary set enclosing the origin. Hence the optimization problem \eqref{eqn:campiOpt1} is equivalent to the optimization problem:
\begin{equation}\label{eqn:campiOpt2}
\begin{aligned}
\hspace{-0.3cm}\text{min}_{\gamma, P} &\qquad \gamma \\
\hspace{-0.3cm}\text{s.t.} \quad &(\mathbf{A} x)^T P \mathbf{A} x \leq \gamma^{2l} x^T P x, \mathbf{A} \in \mathcal{M}^l, \,\forall x \in \mathbb{S}\\
&P \succ 0.
\end{aligned}
\end{equation}
\noindent
The above equation will provide a clear algebraic formalization of our black-box problem: our goal amounts to find a solution to a convex problem with an infinite number of constraints, while only sampling a finite number of them.
\subsection{Problem Formulation}
Let us now formally present the problem addressed in this paper. We recall that we only observe $N$ finite traces of length $l \in \mathbb{N}_{>0}$, i.e., $N$ sequences of states $(x_k,x_{k+1},\dots,x_{k+l})$ where $x_{k+i+1}$ and $x_{k+i}$ are related by \eqref{eq:switchedSystem}. Note that such sequences of states depend both on the initial state $x_k$ and the switching sequence $\tau(k)$ which is assumed to be unknown. In other words, we do not observe the mode or the matrices used to produce the trajectories. We do not have access to the process through which the system picks the modes. The user's knowledge is limited to the number of modes (or an upper bound on this number) and the dimension of the system. We assume that these trajectories are generated from a finite number of initial conditions $x_{i,0} \in \mathbb{S}$, $1 \leq i \leq N$ enumerating the observations, and that a random sequence of $l$ matrices is applied to each of these points. We randomly draw the initial conditions from $\mathbb{S}$, observe them and the $l$ subsequent state values produced by the system. Sampling the initial conditions from $\mathbb{S}$ is without loss of generality, since any trajectory in $\mathbb{R}^n$ can be rescaled so that $x_{i,0} \in \mathbb{S}$, by homogeneity of the system. To a given observed trajectory $(x_k,x_{k+1},\dots,x_{k+l})$, we can associate the corresponding probability event $(x_k,j_1,\dots,j_l)$ which is another $(l+1)$-tuple. Formally, with a fixed probability space $(\Omega, \mathcal{F}, \mathbb{P})$, we consider the random variables $X_0: \Omega \mapsto \mathbb{S}$ and $\theta_i: \Omega \mapsto M$, for $1 \leq i \leq l$, such that $X_0$ has uniform distribution, and the $\theta_i$ are independent and also have uniform distribution. Thus, to a given random finite set of trajectories of length $l$, we can associate an underlying uniform sample of $N$ such $(l+1)$-tuples in $Z_l = \mathbb{S} \times M^l$, denoted by
\begin{equation}\label{eq:omega}
\omega_N := \{(x_{i,0}, j_{i,1}, \dots, j_{i,l}), 1 \leq i \leq N \} \subset Z_l
\end{equation}
In other words, a set of $N$ available observations $\{(x_{i,0},x_{i,1}, \dots, x_{i,l}), 1\leq i\leq N \}$ can be rewritten, for all $1 \leq i \leq N$ and $1 \leq k \leq l$, as $x_{i,k}= A_{j_{i,k}} \dots A_{j_{i,1}} x_{i,0}$, with the $j_{i,k}$ being unobserved variables that take their values in $M$.
\begin{rem}\label{rem:probGeneralize}
Let us motivate our assumption on the uniform drawing of the matrices. We assumed that we only have access to random observations of the state of the system, and ignore the process that generates them. In particular, we ignore the process that selects the modes at each time step, and model it as a random process. We suppose that with nonzero probability, each mode is active: the problem would indeed not be solvable otherwise, since the system would be unidentifiable with probability $1$ and would prevent to ever observe some of its behaviors. We take this distribution to be uniform since we cannot say that some modes are more likely a priori. Our results extend to the case where the distribution is not uniform as long as we have a nonzero lower bound on the probability of each mode.
\end{rem}
\noindent
In this work, we aim at understanding what type of guarantees one can obtain on the stability of System \eqref{eq:switchedSystem} (that is, on the JSR of $\mathcal{M}$) from the sample \eqref{eq:omega}. More precisely, we answer the following problem:
\begin{problem}\label{problem}
Consider a finite set of matrices, $\mathcal{M},$ describing a switched system \eqref{eq:switchedSystem}, and suppose that one has a set of $N$ observations $(x_{i,0},x_{i,1},...,x_{i,l}), i=1,...,N$ corresponding to an event $\omega_N,$ sampled from $Z_l$ with the uniform measure $\mu_l$.
\begin{itemize}
\item For a given confidence level $\beta \in [0,1)$, provide an upper bound on $\rho(\mathcal{M})$, that is, a number $\overline{\rho(\omega_N)}$ such that $$\mu_l^N \left( \{\omega_N: \ \rho(\mathcal{M}) \leq \overline{\rho(\omega_N)} \} \right) \geq \beta.$$
\item For the same given level of confidence $\beta$, provide a lower bound $\underline{\rho(\omega_N)}$ on $\rho(\mathcal{M})$.
\end{itemize}
\end{problem}
\begin{rem}
We will see in Section~\ref{sec:lowerBound} that we can even derive a deterministic lower bound for a given (sufficiently high) number of observations.
\end{rem}
\noindent
We will see in Theorem~\ref{thm:mainTheorem} that for any level of confidence $\beta$, it is always possible to provide an upper bound for Problem~\ref{problem} which tends to the JSR when the number of sampled points increases. In particular, for any (large enough) number of samples, it is always possible to provide such an upper bound that is finite. Thus, we obtain a Hypothesis test for the question 'Is the system stable?' with an a priori fixed probability of false positive (equal to the number $1 - \beta$), and a probability of false negative that tends to zero when the number of samples increases.
\noindent
The key insight is to leverage the fact that conditions for the existence of a CQF for \eqref{eq:switchedSystem} can be obtained by considering a finite number of traces in $\mathbb{R}^n$ of the form $(x_k,x_{k+1}, \dots, x_{k+l})$. Developing that insight leads us to the following algorithm, that is the main result of our work and that solves Problem~\ref{problem}:
\begin{alg}[Probabilistic upper bound]
\
\newline
\textbf{Input:} observations produced by a uniform random sample $\omega_N \subset Z_l$ of size $N \geq \frac{n(n+1)}{2}+1$;
\noindent
\textbf{Input:} $\beta$ desired level of certainty;
\textbf{Compute:} a candidate for the upper bound, $\gamma^{*}(\omega_N)$, solution of the convex optimization problem \eqref{eq:lowerbound};\\
(observe that \eqref{eq:lowerbound} does not require the explicit knowledge of the matrices ${\bf A_j}$)
\textbf{Compute:} $\ensuremath{\operatorname{var}}epsilon(\beta,\omega_N)$ the proportion of points where our inference on the upper bound may be invalid;
\textbf{Compute:} $\delta(\ensuremath{\operatorname{var}}epsilon) \leq 1$ a correcting factor; ($\delta \xrightarrow[N \to \infty]{} 1$)
\noindent
\textbf{Output:} $\frac{\gamma^{*}(\omega_N)}{\sqrt[2l]{n}} \leq \rho \leq \frac{\gamma^{*}(\omega_N)}{\sqrt[l]{\delta(\ensuremath{\operatorname{var}}epsilon)}}$;\\
(the right-hand side inequality is valid with probability at least $\beta$).
\end{alg}
\section{A Deterministic Lower Bound}\label{sec:lowerBound}
In Section~\ref{sec:stab}, we presented an optimization problem, \eqref{eqn:campiOpt2}, that provides a stability guarantee. Nevertheless, this problem has infinitely many constraints and observing a finite number of traces only gives us access to a restriction of it (with finitely many constraints). We consider then the following optimization problem:
\begin{equation}\label{eq:lowerbound}
\begin{aligned}
\hspace{-0.4cm}\text{min}_{\gamma, P} \quad \gamma \\
\text{s.t.} &\qquad (\mathbf{A_j} x)^T P \mathbf{A_j} x \leq \gamma^{2l} x^T P x, \forall (x, \mathbf{j}) \in \omega_N\\
& \qquad P \succ 0,\ \gamma \geq 0. \\
\end{aligned}
\end{equation}
with optimal solution $\gamma^{*}(\omega_N)$, and where $\mathbf{A_j} := A_{j_l} A_{j_{l-1}} \dots A_{j_1}$ and $\mathbf{j}:=\{j_1,\dots, j_l\}$. Note that, \eqref{eq:lowerbound} can be efficiently solved by semidefinite programming and bisection on the variable $\gamma$ (see \cite{boyd}). Note also that solving this program can be done in practice only through the knowledge of the observations: even though the $A_{j_i}$ are not known, the program only requires the knowledge of $\mathbf{A_j} x,$ which is known through the observations. In this section, we provide a theorem for a deterministic lower bound based on the observations given by $\omega_N$, whose accuracy depends on the ``horizon'' $l$.
\begin{theo}s \label{thm:lowerbound}
For an arbitrary $l \in \mathbb{N}_{>0}$ and a given uniform sample $\omega_N \subset Z_l$, by considering $\gamma^*(\omega_N)$ the optimal solution of the optimization problem \eqref{eq:lowerbound}, we have $$\rho(\mathcal{M}) \geq \frac{\gamma^*(\omega_N)}{\sqrt[2l]{n}}.$$
\end{theo}s
\begin{proof}
Let $\nu >0$. By definition of $\gamma^*(\omega_N)$, there exists no matrix $P \in \mathcal{S}^n_{++}$ such that, $\forall x \in \mathcal{S}, \, \forall \mathbf{A_j} \in \mathcal{M}^{l}$,
\begin{equation*}
(\mathbf{A_j} x)^T P \mathbf{A_j} x \leq (\gamma^*(\omega_N) -\nu)^{2l} x^T P x.
\end{equation*}
Taking the contrapositive of Corollary \ref{cor:approx-products}, this implies that $\rho(\mathcal{M}) \geq \frac{(\gamma^*(\omega_N) -\nu)^{l}}{\sqrt[2l]{n}}$. Since this is valid for any $\nu>0,$ we finally obtain the claim.
\end{proof}
\section{A Probabilistic Stability-like Guarantee}\label{sec:upperbound}
\begin{subsection}{A Partial Upper Bound}\label{sec:IntroMainThm}
In this section, we show how to compute an upper bound on $\rho$, with a user-defined confidence $\beta \in [0, 1)$. We do this by constructing an $l$-step CQF which is valid with probability at least $\beta$. The existence of an $l$-step CQF implies $\rho \leq \gamma^*$ due to Theorem \ref{thm:cqlf}. As we will see below, the quality of our bound will depend on geometrical properties of the CQF found; more precisely, the smaller the condition number of the corresponding matrix $P$, the better will be our bound. In practice, one can minimize the condition number of the solution $P$ in a second step, after computing $\gamma^*$ from \eqref{eq:lowerbound}. However, for the sake of rigor and clarity of our proofs, we introduce a slighly different optimization problem. We consider for the rest of the discussion the following optimization problem, that we denote by $\text{Opt} (\omega_N)$:
\begin{equation}\label{eqn:campiOpt03}
\begin{aligned}
\hspace{-0.4cm}&\min_{P} \quad \lambda_{\max}(P) \\
\hspace{-0.3cm}&\text{s.t.} \quad (\mathbf{A_j}x)^T P \mathbf{A_j} x \leq { \left( (1 +\eta)\gamma^*(\omega_N) \right)}^{2l} x^T P x, \ \forall\ (x, \mathbf{j}) \in \omega_N \\
& P \succeq I, \\
\end{aligned}
\end{equation}
with $\eta > 0$, and where $\gamma^*(\omega_N)$ is the optimal solution to the optimization problem \eqref{eq:lowerbound}. Let us analyze the relationship between $\text{Opt} (\omega_N)$ and the optimization problem \eqref{eq:lowerbound}. Firstly, thanks to the homogeneity of system \eqref{eq:switchedSystem}, we can replace the constraint $P \succ 0$ in the initial problem with the constraint $P \succeq I$. Secondly, as discussed above, the objective function $\lambda_{\max}(P)$ (which is convex) can be added once $\gamma^*$ is computed, in order to minimize the condition number. Lastly, we introduced a ``regularization parameter'', $\eta > 0$, which ensures strict feasibility of $\text{Opt} (\omega_N)$. As the reader will see, we will derive results valid for arbitrarily small values of $\eta$. This will then not hamper the practical accuracy of our technique, while allowing us to derive a theoretical asymptotic guarantee (i.e., for a large number of observations). We denote the optimal solution of $\text{Opt} (\omega_N)$ by $P(\omega_N)$, and drop the explicit dependence of $P$ on $\omega_N$ when it is clear from the context.
\noindent
The intriguing question of whether the optimal solution of this sampled problem is a feasible solution to \eqref{eqn:campiOpt2} has been widely studied in the literature \cite{campi}. Under certain technical assumptions, one can bound the proportion of the constraints of the original problem \eqref{eqn:campiOpt2} that are violated by the optimal solution of $\text{Opt} (\omega_N)$, with some probability which is a function of the sample size $N$. In the following theorem, we adapt a classical result from the random convex optimization literature to our problem.
\begin{theo}s[adapted from Theorem 3.3\footnotemark, \cite{campi}]\label{mainTheorem0}
Consider the optimization problem $\text{Opt} (\omega_N)$ given in \eqref{eqn:campiOpt03}, where $\omega_N$ is a uniform random sample drawn from the set $Z_l$. Let $d = \frac{n(n+1)}{2}$ be the dimension of the decision variable $P$ of $\text{Opt} (\omega_N)$ and $N \geq d+1$. Then, for all $\ensuremath{\operatorname{var}}epsilon \in (0,1]$ the following holds:
\begin{equation}\label{eqn:violation}
\mu_l^N\hspace{-1mm}\left\{ \omega_N \in Z_l^N: \mu_l \left( V(\omega_N) \right) \leq \ensuremath{\operatorname{var}}epsilon \right\}\hspace{-1mm} \geq \beta(\ensuremath{\operatorname{var}}epsilon, N),
\end{equation}
where $\mu_l^N$ denotes the product probability measure on $Z_l^N$, $\beta(\ensuremath{\operatorname{var}}epsilon, N) = 1- \sum_{j=0}^{d} \binom{N}{j}\ensuremath{\operatorname{var}}epsilon^j (1-\ensuremath{\operatorname{var}}epsilon)^{N-j}$, and $V(\omega_N)$ is the set $\{(x,\mathbf{j}) \in Z_l: (\mathbf{A_j} x)^T P(\omega_N) \mathbf{A_j} x > (\gamma_{\omega_N}^{*})^{2l} x^T P(\omega_N) x\}$, i.e., it is the subset of $Z_l$ for which the considered $\gamma^*$-contractivity is violated by the optimal solution of $\text{Opt} (\omega_N)$.
\noindent
The quantity $\ensuremath{\operatorname{var}}epsilon$ can also be seen as a function of $\beta$ and $N$: $\ensuremath{\operatorname{var}}epsilon(\beta, N) = 1 - I(1-\beta, N-d,d+1)$ (see the proof of Theorem 3.3 in \cite{campi}).
\end{theo}s
\footnotetext{Theorem 3.3 in \cite{campi} requires $\text{Opt} (\omega_N)$ to satisfy the following technical assumptions:\begin{enumerate}
\item When the problem $\text{Opt} (\omega_N)$ admits an optimal solution, this solution is unique.
\item Problem $\text{Opt} (\omega_N)$ is nondegenerate with probability $1$.
\end{enumerate}
Here, the first assumption can be enforced if required by adding a tie-breaking rule to $\text{Opt} (\omega_N)$ as explained in Appendix A in \cite{tiebreak}, while the second assumption can be lifted, as explained in PART 2b in \cite{campi-garatti}, thanks to the introduction of ``constraint heating''.
}
\begin{cor}\label{cor:gettingRidOfm}
Consider a set of matrices $\mathcal{M}$, a sample $\omega_N \subset Z_l$, $\gamma^{*}$ the optimal solution of \eqref{eq:lowerbound} and $P \succ 0$ the optimal solution of $\text{Opt} (\omega_N)$. Then, with the notation of Theorem~\ref{mainTheorem0}, for any $\ensuremath{\operatorname{var}}epsilon>0$, with confidence $\beta(\ensuremath{\operatorname{var}}epsilon,N),$ one has:
\begin{equation}\label{eqn:P0}
(\mathbf{A_j} x)^T P \mathbf{A_j} x \leq (\gamma^{*})^{2l} x^T P x, \forall x \in \mathbb{S} \setminus \tilde{\mathbb{S}}, \forall \mathbf{j} \in M^l
\end{equation}
where $\tilde{\mathbb{S}} \subset \mathbb{S}$, the projection of $V(\omega_N)$ on $\mathbb{S}$, has measure $\sigma^{n-1}(\tilde{\mathbb{S}}) \leq \ensuremath{\operatorname{var}}epsilon m^l $.
\end{cor}
\noindent
This result allows us to make abstraction of the probabilistic setting: by accepting a confidence level of $\beta$ smaller than one, we may assume that all the points except a small set satisfy the Lyapunov equation (5). The case where we have the equality $\sigma(\tilde{\mathbb{S}}) = \ensuremath{\operatorname{var}}epsilon m^l$ corresponds to the case where every point $x \in \tilde{\mathbb{S}}$ violates \eqref{eqn:P0} for exactly one value of $\mathbf{j}$.
\begin{proof}
We know that $\Sigma_{M^l}$ is the disjoint union of its $2^{m^l}$ elements $\{\mathcal{M}^l_i, i \in \{1,2, \dots, 2^{m^l} \} \}$. Then $V(\omega_N)$ can be written as the disjoint union $V(\omega_N) = \sqcup_{1 \leq i \leq 2^{m^l}} (\mathcal{S}_i, \mathcal{M}^l_i)$ where $\mathcal{S}_i \in \Sigma_{\mathbb{S}}$. We notice that $\mathbb{S}' = \sqcup_{1 \leq i \leq 2^{m^l}} \mathcal{S}_i$, and
\begin{equation*}
\sigma^{n-1} (\tilde{\mathbb{S}}) = \sum_{1 \leq i \leq 2^{m^l}} \sigma^{n-1} (\mathcal{S}_i).
\end{equation*}
We have
\begin{eqnarray*}
\mu_l(V(\omega_N)) &=& \mu_l \left( \sqcup_{1 \leq i \leq 2^{m^l}} (\mathcal{S}_i, \mathcal{M}^l_i) \right) = \sum_{1 \leq i \leq 2^{m^l}} \mu_l \left( \mathcal{S}_i, \mathcal{M}^l_i \right) \\
&=& \sum_{1 \leq i \leq 2^{m^l}} \sigma^{n-1} \otimes \mu_{M^l} \left( \mathcal{S}_i, \mathcal{M}^l_i \right) \\
&=& \sum_{1 \leq i \leq 2^{m^l}} \sigma^{n-1}(\mathcal{S}_i) \mu_{M^l} (\mathcal{M}^l_i).
\end{eqnarray*}
\noindent
Note that we have $\min_{(j_1,\dots,j_l) \in M^l} \mu_{M^l}(\{j_1,\dots,j_l\}) = \frac{1}{m^l}.$
\noindent
Then since $ \forall \ i$, $\mu_{M^l}(\mathcal{M}^l_i) \geq \min_{(j_1,\dots,j_l) \in M^l} \mu_{M^l}(\{j_1,\dots,j_l\}) = \frac{1}{m^l}$, we get:
\begin{equation}
\sigma^{n-1}(\tilde{\mathbb{S}}) \leq \frac{\mu_l(V(\omega_N))}{\frac{1}{m^l}} \leq m^l \ensuremath{\operatorname{var}}epsilon.
\end{equation}
This means that
\begin{equation}\label{eqn:P2}
\begin{aligned}
& (A_{j_l} A_{j_{l-1}} \dots A_{j_1} x)^T P (A_{j_l} A_{j_{l-1}} \dots A_{j_1} x) \leq \gamma^{2l} x^T P x, \forall\, x \in \mathbb{S} \setminus \tilde{\mathbb{S}}, \,\forall\, (j_1,\dots,j_l) \in M^l,
\end{aligned}
\end{equation}
where $\sigma^{n-1}(\tilde{\mathbb{S}}) \leq m^l \ensuremath{\operatorname{var}}epsilon.$
\end{proof}
\noindent
The above results allow us to conclude, from a finite number of observations, that with probability $\beta$ (where $\beta$ goes to $1$ as $N$ goes to infinity), the required property is actually satisfied for the complete sphere $\mathbb{S}$, except on a small set of measure at most $\tilde{\ensuremath{\operatorname{var}}epsilon} = \ensuremath{\operatorname{var}}epsilon m^l$. This means that, the ellipsoid $E_P$ computed by $\text{Opt} (\omega_N)$ is ``almost invariant'' except on a set of measure bounded by $\tilde{\ensuremath{\operatorname{var}}epsilon}$. This is represented in Fig. 1. for the case $n=2$, where the red points of $E_P$ are points that might violate the invariance constraint. Here, the set of red points has measure at most $\tilde{\ensuremath{\operatorname{var}}epsilon}$.
\begin{figure}
\caption{Representation of the ``partial invariance property'' obtained by application of the results in Theorem~\ref{mainTheorem0}
\label{fig:ellipsoid}
\end{figure}
\noindent
Thus, we are left with the following question:
\begin{problem}\label{prob:question}
What can we conclude on the JSR if the invariance property is satisfied by all points, except a set of measure $\tilde{\ensuremath{\operatorname{var}}epsilon}$?
\end{problem}
\noindent
In the course of Theorem~\ref{thm:mainTheorem01}, we will be able to derive an upper bound by solving the geometric problem of computing the largest scaling of $E_P$ included in the convex hull of the subset of points of $E_P$ that satisfy the invariance property \eqref{eqn:P0}. Indeed, this smaller ellipsoid will satisfy a (relaxed) invariance property \emph{for all its points}, thanks to the following key property of switched linear systems.
\begin{property}\label{property:convpres}
The dynamics given in \eqref{eq:switchedSystem} is convexity-preserving, meaning that for any set of points $X \subset \mathbb{R}^n$,
\begin{equation*}
\begin{aligned}
&\{Ax: A \in \mathcal{M}, x \in \ensuremath{\operatorname{conv}}{X}\} \subset \ensuremath{\operatorname{conv}}{ \{Ax: A\in \mathcal{M}, x\in X \} }.
\end{aligned}
\end{equation*}
\end{property}
\noindent
Of course, for a fixed measure $\tilde{\ensuremath{\operatorname{var}}epsilon}$, this largest ellipsoid will depend on the distribution of points of $E_P$ that violate the constraint. In order to obtain a guarantee on our upper bound, we will look for the smallest such ellipsoid obtained over all possible sets $V(\omega_N)$ of measure $\tilde{\ensuremath{\operatorname{var}}epsilon}$.
\noindent
We start by solving this problem in the particular case where $E_P = \mathbb{S}$. In this case, we benefit from the following tool, allowing to explicitly analyse the worst-case distribution.
\begin{de}
We define the \emph{spherical cap} on $\mathbb{S}$ for a given hyperplane $c^Tx = k$, as $\mathcal{C}_{c,k} := \{x \in \mathbb{S} : c^Tx >k\}$.
\end{de}
\noindent
We now define the following function which quantifies the largest-inscribed-sphere problem, for a given subset $X \subset \mathbb{S}$:
\begin{equation}\label{shrinkage}
\Delta: \left\{
\begin{split}
&\wp(\mathbb{S}) \to [0,1]\\
&X \mapsto \sup \{r: r\mathbb{B} \subset \ensuremath{\operatorname{conv}}(\mathbb{S} \setminus X)\}.
\end{split}
\right.
\end{equation}
The following proposition tells us that, when the measure of the set $X$ is fixed, $\Delta$ is minized when $X$ is a spherical cap, i.e., the minimal radius $\delta$ of the largest sphere $\delta \mathbb{S}$ included in $\mathbb{S} \setminus X$ will be reached when $X$ is a spherical cap.
\begin{prop}\label{prop:mainSphericalCap}
Let $\tilde{\ensuremath{\operatorname{var}}epsilon} \in [0,1]$ and $\mathcal{X}_{\tilde{\ensuremath{\operatorname{var}}epsilon}} = \{X \subset \mathbb{S}: \sigma^{n-1}(X) \leq \tilde{\ensuremath{\operatorname{var}}epsilon}\}$. Then, the function $\Delta(X)$ attains its minimum over $\mathcal{X}_{\tilde{\ensuremath{\operatorname{var}}epsilon}}$ for some $X$ which is a spherical cap. We denote by $\delta(\tilde{\ensuremath{\operatorname{var}}epsilon})$ this minimal value, which takes the following expression: $$\delta(\tilde{\ensuremath{\operatorname{var}}epsilon}) = \sqrt{1 - I^{-1}(2 \tilde{\ensuremath{\operatorname{var}}epsilon};\frac{n-1}{2},\frac{1}{2})}.$$
\end{prop}
\noindent
A proof of Proposition~\ref{prop:mainSphericalCap} is given in the Appendix. By homogeneity of Program (10), we have $x \in \tilde{\mathbb{S}} \iff -x \in \tilde{\mathbb{S}}$, which implies that the minimal $\delta$ will in fact occur when the set of violating points is the union of two symmetric spherical caps, each of measure $\frac{\tilde{\ensuremath{\operatorname{var}}epsilon}}{2}$.
\begin{figure}
\caption{On the left, a general case of the situation where the ellipse in Fig. 1. is a sphere. On the right, case giving minimal $\delta$. The set of points violating the invariance constraint (in red) is the union of two spherical caps.}
\end{figure}
\begin{rem}
When $\ensuremath{\operatorname{var}}epsilon \geq \frac{1}{m^l}$, we have $\tilde{\ensuremath{\operatorname{var}}epsilon} \geq 1$ and $\delta(\tilde{\ensuremath{\operatorname{var}}epsilon}) = 0$: the only upper bound we can give for the JSR is then $+ \infty$.
\end{rem}
\end{subsection}
\begin{subsection}{A global upper bound}
We now introduce our main theorem, Theorem~\ref{thm:mainTheorem01}, which provides a solution to Problem~\ref{prob:question}. In order to use our solution of previous section, developed for the case $E_P = \mathbb{S}$, we will have to relate $E_{P(\omega_N)}$ to $\mathbb{S}$. We apply thus a change of coordinates bringing $E_P$ to $\mathbb{S}$. Since $P \in \mathcal{S}_{++}^n$, it can be written in its Cholesky form
\begin{equation}\label{cholesky}
P = L^TL,
\end{equation}
where $L$ is an upper triangular matrix. Remark that $L$ maps the elements of $E_P$ to $\mathbb{S}$. Since the JSR is not changed by similarity transformations, we can pursue our calculations with the matrices obtained after the change of coordinates.
\begin{figure}
\caption{Change of coordinates to bring our problem back to the case of the unit sphere.}
\end{figure}
\begin{theo}s\label{thm:mainTheorem01}
Let $\gamma^* \in \mathbb{R}_{> 0}$. Consider a set of matrices $\mathcal{M}$, and a matrix $P \succ 0$ optimal solution of $\text{Opt} (\omega_N)$, satisfying Equation \eqref{eqn:P0} for some $\tilde{\mathbb{S}} \subset \mathbb{S}$ where $\sigma^{n-1}(\tilde{\mathbb{S}}) \leq \tilde{\ensuremath{\operatorname{var}}epsilon}$. Then, we have
\begin{equation}\label{eq:ub1}
\rho(\mathcal{M}) \leq \frac{\gamma^{*}}{\sqrt[l]{\delta \left( \frac{\tilde{\ensuremath{\operatorname{var}}epsilon} \kappa(P)}{2} \right)}}
\end{equation}
with $\kappa(P) = \sqrt{\frac{\det(P)}{\lambda_{\min}(P)^n}}$ and where $\delta(\cdot{})$ is given by Proposition~\ref{prop:mainSphericalCap}.
\end{theo}s
\begin{proof}
\ \newline
\textit{i)} Since we have seen in the previous section a technique to solve the spherical case, we first bring our problem to the spherical case. To do so, we perform the change of coordinates defined as in \eqref{cholesky} by $L \in \mathcal{L}(\mathbb{R}^n)$ which maps the ellipsoid $E_P$ to the sphere $\mathbb{S}$. By defining \mbox{$\bar{A}_{j_i}= L A_{j_i} L^{-1}$,} and $\mathbf{\bar{A}_j} = \bar{A}_{j_l} \bar{A}_{j_{l-1}} \dots \bar{A}_{j_1} $, Equation \eqref{eqn:P0} becomes:
\begin{equation}\label{eq:assumption3}
\hspace{-0.3cm}(\mathbf{\bar{A}_j} x)^T \mathbf{\bar{A}_j} x \leq ({\gamma^*})^{2l} x^T x, \, \forall x \in L(\mathbb{S} \setminus \tilde{\mathbb{S}}), \forall \mathbf{j} \in M^l.
\end{equation}
By using the homogeneity of the dynamics, we have:
\begin{equation*}
\begin{aligned}
&(\mathbf{\bar{A}_j} x)^T \mathbf{\bar{A}_j} x \leq ({\gamma^*})^{2l} x^T x,\, \forall x \in L(\mathbb{S} \setminus \tilde{\mathbb{S}}) \implies (\mathbf{\bar{A}_j} x)^T \mathbf{\bar{A}_j} x \leq ({\gamma^*})^{2l} x^T x,\, \forall x \in \Pi_\mathbb{S} \left( L(\mathbb{S} \setminus \tilde{\mathbb{S}}) \right),
\end{aligned}
\end{equation*}
and therefore we can rewrite \eqref{eq:assumption3} as:
\begin{equation}\label{eq:assumption4}
(\mathbf{\bar{A}_j} x)^T \mathbf{\bar{A}_j} x \leq ({\gamma^*})^{2l} x^T x, \forall x \in \mathbb{S} \setminus \Pi_\mathbb{S}(L(\tilde{\mathbb{S}}), \forall \mathbf{j} \in M^l.
\end{equation}
\noindent
\textit{ii)} We now show how to relate $\sigma^{n-1}(\Pi_\mathbb{S}(L(\tilde{\mathbb{S}})))$ to $\sigma^{n-1}(\tilde{\mathbb{S}})$, the measure of the violating set in the initial coordinates. Consider $\mathbb{S}^{\tilde{\mathbb{S}}}$, the sector of $\mathbb{B}$ defined by $\tilde{\mathbb{S}}$. We denote $C:= L(\tilde{\mathbb{S}})$ and $C':=\Pi_\mathbb{S}(L(\tilde{\mathbb{S}}))$. Thus, we have $\mathbb{S}^{C'} \subset \frac{1}{ \lambda_{\min (L^)}} \mathbb{S}^C$. This leads to\footnote{Recall that $\lambda(S)$ is the Lebesgue measure of $S$, and the spherical measure of any set $C \subset \mathbb{S}$ is given by $\sigma^{n-1}(C)=\lambda \left( \mathbb{S}^C \right)$.}:
$$\sigma^{n-1}(C') = \lambda \left( \mathbb{S}^{C'} \right) \leq \lambda \left( \frac{1}{ \lambda_{\min (L)}} L(\mathbb{S}^{\tilde{\mathbb{S}}}) \right).$$ Then, the following holds:
\begin{eqnarray}
\nonumber\sigma^{n-1}(C') &\leq& \frac{\lambda \left( L(\mathbb{S}^{\tilde{\mathbb{S}}}) \right)}{\lambda_{\min}(L)^n} \\
\label{eqn:lt} &=&\frac{|\det(L)|}{\lambda_{\min}(L)^n}\lambda \left( \mathbb{S}^{\tilde{\mathbb{S}}} \right) \\
\label{eqn:map1} &=&\sqrt{\frac{\det(P)}{\lambda_{\min}(P)^n}}\sigma^{n-1}(\tilde{\mathbb{S}})
\end{eqnarray}
where \eqref{eqn:lt} follows from the fact that $\lambda(Q(X)) = |\det(Q)| \lambda(X)$, for any set $X \subset \mathbb{R}^n$ and $Q \in \mathcal{L}(\mathbb{R}^n)$ (see e.g. \cite{rudin}). Hence, we have
\begin{equation}\label{eqn:contraction}
(\mathbf{\bar{A}_j} x)^T \mathbf{\bar{A}_j} x \leq (\gamma^{*})^{2l} x^Tx, \forall x \in \mathbb{S} \setminus \mathbb{S}', \forall \mathbf{j} \in M^l,
\end{equation}
with $\mathbb{S}' = \Pi_\mathbb{S}(L(\tilde{\mathbb{S}})$ and $\sigma^{n-1}(\mathbb{S}') \leq \sqrt{\frac{\det(P)}{\lambda_{\min}(P)^n}}\sigma^{n-1}(\tilde{\mathbb{S}})= \kappa(P) \tilde{\ensuremath{\operatorname{var}}epsilon}$.
\noindent
\textit{iii)} For any such given set $\mathbb{S}'$, we look for the largest sphere included in $\mbox{convhull } (\mathbb{S} \setminus \mathbb{S}')$. By homogeneity of the system, this sphere is centered at the origin, and we denote by $\alpha$ its radius. By \eqref{eqn:contraction}, $l$-traces initialized on $\mathbb{S} \setminus \mathbb{S}'$ will be in $(\gamma^*)^l \mathbb{B}$: $$\mathbf{\bar{A}_j} \left( \mathbb{S} \setminus \mathbb{S}' \right) \subset (\gamma^*)^l \mathbb{B}, \, \forall \mathbf{j} \in M^l.$$
Now, combining with Property~\ref{property:convpres}, we have: $$\mathbf{\bar{A}_j} \left( \mbox{convhull } (\mathbb{S} \setminus \mathbb{S}') \right) \subset \mbox{convhull } ( \mathbf{\bar{A}_j} (\mathbb{S} \setminus \mathbb{S}')) \subset (\gamma^*)^l \mathbb{B}, \forall \mathbf{j} \in M^l.$$ Since $\alpha \mathbb{S} \subset \mbox{convhull } (\mathbb{S} \setminus \mathbb{S}')$, then $\forall \mathbf{j} \in M^l$, $\mathbf{\bar{A}_j} \left( \alpha \mathbb{S} \right) = \alpha \mathbf{\bar{A}_j} \left( \mathbb{S} \right) \subset \mbox{convhull } \left( \mathbf{\bar{A}_j} (\mathbb{S} \setminus \mathbb{S}') \right) \subset (\gamma^*)^l \mathbb{B}$, which implies that
\begin{equation}\label{eq:part3}
\mathbf{\bar{A}_j} (\mathbb{S}) \subset \frac{(\gamma^*)^l}{\alpha} \mathbb{B}.
\end{equation}
\noindent
\textit{iv)} Summarizing, since we know that the set $\tilde{\mathbb{S}}$ is symmetric w.r.t. the origin, by Proposition 1, we have that $\alpha \geq \delta(\frac{\tilde{\ensuremath{\operatorname{var}}epsilon} \kappa(P)}{2})$. Finally, by homogeneity of our system and the fact that the JSR is invariant under similarity transformations, Equation \eqref{eq:part3} implies $\rho(\mathcal{M}^l) \leq \frac{(\gamma^*)^l}{\delta(\frac{\tilde{\ensuremath{\operatorname{var}}epsilon} \kappa(P)}{2})}$, hence $\rho(\mathcal{M}) \leq \frac{\gamma^{*}}{\sqrt[l]{\delta \left( \frac{\tilde{\ensuremath{\operatorname{var}}epsilon} \kappa(P)}{2} \right)}}$.
\end{proof}
\begin{rem}\label{otherUb}
There is no conservatism in multiplying $\ensuremath{\operatorname{var}}epsilon$ by $m^l$, as in the worst case this can happen: if $\ensuremath{\operatorname{var}}epsilon = 1/m^l$, Theorem 4 does not rule out the pathological case where not a single point satisfies Equation (7) for all $\mathbf{A} \in \mathcal{M}^l$, and thus $\delta$ must be equal to $0$ since all points might be violating the constraint. However, the multiplication by $\kappa(P)$ is conservative if $P$ has different eigenvalues (this bound is then exactly reached only at a single point on the ellipsoid). We can then, instead of deriving an upper bound on the size of the set of points that violate the constraint, look at a lower bound on the size of the set of points that satisfy the constraint. Taking the complement of this latter set gives another upper bound on the size of the set of violating points. By a similar reasoning as the one conducted above, this second upper bound will be equal to $1 - (1-\ensuremath{\operatorname{var}}epsilon m^l) \sqrt{\frac{\det(P)}{\lambda_{\max}(P)^n}}$. \\
This provides an alternative upper bound, which can be used if the initial upper bound \eqref{eq:ub1} is infinite, or weaker.
\end{rem}
\end{subsection}
\begin{subsection}{Main Theorem}
We are now ready to prove our main theorem by putting together all the above pieces.
\begin{theo}s\label{thm:mainTheorem}
Consider an $n$-dimensional switched linear system as in \eqref{eq:switchedSystem} and a uniform random sampling $\omega_N \subset Z_l$, where $N \geq \frac{n(n+1)}{2}+1$. For any $\eta > 0$, let $\gamma^{*}(\omega_N) $ be the optimal solution to \eqref{eq:lowerbound}. Then, for any given $\beta \in [0,1)$, with probability at least $\beta$ we have:
$$\rho \leq \frac{\gamma^{*}(\omega_N) (1+ \eta)}{\sqrt[l]{\delta(\beta, \omega_N)}},$$
where $\delta(\beta, \omega_N) = \sqrt{1 - I^{-1}(\ensuremath{\operatorname{var}}epsilon(\beta,N) m^l \kappa(P); \frac{n-1}{2}, \frac{1}{2})}$ satisfies $\lim_{N \to \infty}\delta(\beta, \omega_N) = 1$ with probability $1$.
\end{theo}s
\begin{proof}
Let us consider $\gamma^*(\omega_N)$ and $P$ as in Equation \eqref{eqn:campiOpt03}. Then, by taking $\ensuremath{\operatorname{var}}epsilon:=\ensuremath{\operatorname{var}}epsilon(\beta,N)$ such that $\beta(\ensuremath{\operatorname{var}}epsilon,N)=\beta$ in Corollary~\ref{cor:gettingRidOfm}, we have
\begin{equation*}
(\mathbf{A_j} x)^T P \mathbf{A_j} x \leq \left( (\gamma^{*}(1+\eta) \right)^{2l} x^T P x, \forall x \in \mathbb{S} \setminus \tilde{\mathbb{S}}, \forall \mathbf{j} \in M^l
\end{equation*}
with $\tilde{\mathbb{S}}$ the projection of $V$ on $\mathbb{S}$, and $\sigma^{n-1}(\tilde{\mathbb{S}}) \leq \ensuremath{\operatorname{var}}epsilon m^l$. Then by Theorem \ref{thm:mainTheorem01}, we can compute $\delta(\beta, \omega_N) =\delta(\ensuremath{\operatorname{var}}epsilon'(\beta,N))$, where
\begin{equation}\label{eqn:eps2}
\ensuremath{\operatorname{var}}epsilon'(\beta, N) = \frac{1}{2} \ensuremath{\operatorname{var}}epsilon(\beta,N) m^l \kappa(P)
\end{equation}
such that with probability at least $\beta$ we have: $$\rho \leq \frac{\gamma^{*}(\omega_N) (1 + \eta)}{\sqrt[l]{\delta(\beta, \omega_N)}},$$ which completes the proof of the first part of the theorem.
\noindent
Let us prove now that $\lim_{N \to \infty} \delta(\beta, \omega_N) = 1$ with probability $1$. We recall that $$\delta(\beta, \omega_N) = \delta \left( \ensuremath{\operatorname{var}}epsilon(\beta, \omega_N) m^l \kappa(P(\omega_N)) \right).$$
\noindent
We start by showing that $\kappa \left( P(\omega_N) \right)$ is uniformly bounded in $N$. The optimization problem $\text{Opt} (\omega_N)$ given in \eqref{eqn:campiOpt03}, with $(\omega_N)$ replaced by $(Z_l)$ and $(1 + \eta)$ replaced by $(1+\frac{\eta}{2})$ is strictly feasible for any positive parameter $\eta$. It then admits a finite optimal value $K$ for some solution $P_{\eta/2}$. Note that, $\lim_{N \to \infty} \gamma^{*}(\omega_N)= \gamma^{*}(Z_l)$ with probability $1$. Thus, for large enough $N$, \mbox{$\gamma^{*}(\omega_N)(1+\eta) > \gamma^{*}(Z_l)(1+\frac{\eta}{2})$.} This also means that, for large enough $N$, $\text{Opt} (\omega_N)$ admits $P_{\eta/2}$ as a feasible solution and thus the optimal value of $\text{Opt} (\omega_N)$ is upper-bounded by $K.$ In other words, for $N$ large enough, \mbox{$\lambda_{\max}(P({\omega_N})) \leq K$.} Moreover, since $\lambda_{\min}(P(\omega_N))\geq 1$ (by $P \succeq I $), we also have \mbox{$\det(P(\omega_N)) \geq 1$,} which means that
\begin{equation}\label{kappa}
\kappa \left( P(\omega_N) \right) = \sqrt{\frac{\det(P(\omega_N))}{\lambda_{\min}(P(\omega_N))^n}} \leq \sqrt{K^n}.
\end{equation}
We next show that $\lim_{N \to \infty} \ensuremath{\operatorname{var}}epsilon(\beta, N) = 0$ for any fixed $\beta \in [0,1)$. Recall that $\ensuremath{\operatorname{var}}epsilon(\beta, N)$ is implicitely defined by
\begin{eqnarray}
\nonumber 1-\beta &=& \sum_{j=0}^d {{N}\choose{j}} \ensuremath{\operatorname{var}}epsilon^j (1-\ensuremath{\operatorname{var}}epsilon)^{N-j}\\
&\leq& (d+1)N^d (1-\ensuremath{\operatorname{var}}epsilon)^{N-d}.\label{eqn:beta}
\end{eqnarray}
\noindent
We prove $\lim_{N \to \infty} \ensuremath{\operatorname{var}}epsilon(\beta, N) = 0$ by contradiction. Assume that $\lim_{N \to \infty} \ensuremath{\operatorname{var}}epsilon(\beta, N) \not= 0$. This means that, there exists some $c > 0$ such that $\ensuremath{\operatorname{var}}epsilon(\beta, N) > c$ infinitely often. Then, consider the subsequence $N_k$ such that $\forall k$, $\ensuremath{\operatorname{var}}epsilon(\beta, N_k) > c$. Then, by \eqref{eqn:beta} we have for any $k \in \mathbb{N}$:
\begin{equation*}
1-\beta \leq (d+1)N_k^d (1-\ensuremath{\operatorname{var}}epsilon)^{N_k-d}\hspace{-0.4mm} \leq \hspace{-0.7mm}(d+1)N_k^d (1-c)^{N_k-d}.
\end{equation*}
Note that $\lim_{k \to +\infty}(d+1)N_k^d (1-c)^{N_k-d} = 0$, which implies that there exists a $k'$ such that:
$$(d+1)N_{k'}^d (1-c)^{N_k'-d} < 1 - \beta,$$ which is a contradiction. Therefore, we must have $\lim_{N \to \infty} \ensuremath{\operatorname{var}}epsilon (\beta, N) = 0$. Putting this together with \eqref{kappa}, we get: $\lim_{N \to \infty} m^l \kappa(P(\omega_N)) \ensuremath{\operatorname{var}}epsilon(\beta, \omega_N) = 0$. By the continuity of the function $\delta$ this also implies: $\lim_{N \to \infty} \delta \left( \ensuremath{\operatorname{var}}epsilon(\beta, \omega_N) m^l \kappa(P(\omega_N)) \right) = 1.$
\end{proof}
\end{subsection}
\section{Experimental Results}\label{sec:experiments}
\begin{subsection}{Algorithm and experimental results}
Theorem~\ref{thm:lowerbound} and Theorem~\ref{thm:mainTheorem} give us a straightforward algorithm which is summarized at the end of Section 2. In its first part, we look for $\gamma^*$ by bisection on an interval $[0,U]$ (for the value of $U$, take, e.g. the maximum value of $||x_{k+l}||$ among the observations made). For a fixed desired accuracy $\alpha$ on that bisection, we solve a feasibility problem (of polynomial complexity in the number of constraints) at most $\lceil \log_2(U/\alpha) \rceil$ times. In our experiments we took $\alpha=10^{-3}$. Once the result of the bisection is obtained, we solve $\text{Opt} (\omega_N)$. In practice, the parameter $\eta$ in $\text{Opt} (\omega_N)$ can be put to zero, as it is included in $\alpha$. Finally, we get $\delta$ by using the expression given in Theorem~\ref{thm:mainTheorem01}. All these computations are also of polynomial complexity.
\noindent
We illustrate our technique on a $4$-dimensional switched system with $6$ modes. We fix the confidence level, \mbox{$\beta = 0.95$}, and compute the lower and upper bounds on the JSR for $N:=20+200k,\, k \in\{0, \ldots, 29\}$, according to Theorem~\ref{thm:lowerbound} and Theorem~\ref{thm:mainTheorem}, respectively. We take the average performance of our algorithm over $10$ different runs. Fig.~\ref{fig:exp1} shows the evolution, as $N$ increases, of the upper and lower bounds for various values of trace length $l$. To further demonstrate the practical performance of our technique, we also provide the true value of the JSR approximated by the JSR Toolbox \cite{jsrtoolbox} for this system, which turns out to be $0.918 \pm 0.001$. We observed that the performance of the upper bound is much better for traces of length $1$, while for the lower bound, we benefit by considering traces of higher length. While it is expected that longer traces improve the accuracy, the decreasing performance for the upper bound comes from the fact that many more points are needed for larger traces, because the probability space to be sampled is larger. In our example, our first upper bound smaller than $1$ (that is, being a stability guarantee) was obtained for $N=5820$.
\begin{figure}
\caption{Evolution of the upper and lower bounds, for various lengths of traces, with the number of samples.}
\label{fig:exp1}
\end{figure}
\noindent
In Fig.~\ref{fig:exp2}, we compare the upper bound we obtained with the upper bound given by the (white box) JSR Toolbox, for different values of $n$ and $m$.
\begin{figure}
\caption{Convergence of our upper bound when the number of samples increases, for several values of $n$ and $m$ and $l=1$. The values plotted are the ratios between our upper bound and the true value computed by the JSR Toolbox.}
\label{fig:exp2}
\end{figure}
\noindent
Note that the speed of convergence of all the quantities considered decreases when the dimension of the system increases. We nevertheless observe convergence of the upper bound to $\rho(\mathcal{M})$, and convergence of the lower bound to $\frac{\rho(\mathcal{M})}{\sqrt[l]{n}}$. The gap between these two limits is $\frac{\rho}{\sqrt[l]{n}}$ as predicted by Theorem~\ref{thm:mainTheorem}. This gap could be improved by considering a more general class of common Lyapunov functions, such as those that can be described by sum-of-squares polynomials \cite{sosLyap}. We leave this for future work.
\noindent
To illustrate the accuracy of our confidence level $\beta$, we randomly generate $10,000$ test cases with systems of dimension between $2$ and $7$, number of modes between $2$ and $6$, and size of samples $N$ between $30$ and $1000$. We take $\beta = 0.95$ and we check if the upper bound computed by our technique is greater than the true value computed by the JSR Toolbox for the system. We get $9921$ positive tests, out of $10,000$, which gives us a correctness of $0.9921$ for the upper bound computed. Note that, this probability is significantly above the provided $\beta$. This is expected, since our techniques are based on worst-case analysis and thus fairly conservative.
\noindent
Finally, Fig. \ref{fig:exp3} shows the evolution of the function $\delta$ with the number of samples, for different values of $n$, at $m$ and $l$ fixed.
\begin{figure}
\caption{Average behavior of $\delta$ as a function of $N$ for different values of $n$, with fixed $m=4$ and $l=1$.}
\label{delta}
\label{fig:exp3}
\end{figure}
\end{subsection}
\begin{subsection}{Application to Networked Control System}\label{networkedEx}
We consider here a linear time-invariant control system given as \mbox{$x_{k+1}=Ax_k+Bu_k$}, with control law of the form $u_k = Kx_k$. Matrices $A$, $B$ and $K$ are unknown. The open-loop system is unstable with eigenvalues at $\{0.45, 1.1\}$. The controller stabilizes the system by bringing its eigenvalues to $\{0.8, -0.7\}$.
\noindent
The control input is transmitted over a wireless communication channel that is utilized by $\ell$ users, including the controller. The channel has two modes: a contention access mode, where the users can only send their message if the channel is ``idle'' with carrier-sense multiple access with collision avoidance (CSMA/CA); and a collusion free mode, where each user has guaranteed time slots, during which there is no packet loss.
\noindent
More precisely, the communication between the users and the recipients is performed based on the IEEE 802.15.4 MAC layer protocol \cite{macLayer}, which is used in some of the proposed standards for control over wireless, e.g., WirelessHART \cite{wirelessHart}. This protocol integrates both contention based slots and guaranteed slots. In this example, we consider a beacon-enabled version of the MAC protocol. A centralized control user periodically synchronizes and configures all the users. This control period is named Beacon Interval, and is divided into two subintervals: an active and an inactive period. The active period is itself divided into 6 slots. The first 2 slots constitute the contention access period (CAP), and the next 4 slots constitute the collusion free period (CFP). In our example, the third and fourth slots are designated for the controller, while the fifth and sixth slots are allocated to the other users. Finally, during the inactive period, all users enter a low-power mode to save energy. We illustrate the overall structure of this communication protocol in Fig.~\ref{comExample}. We now want to decide whether the resulting closed-loop networked control system is stable by simulating it starting from different initial conditions.
\begin{figure}
\caption{The time allocation structure of the modified IEEE 802.15.4 MAC layer.}
\label{comExample}
\end{figure}
\noindent
Note that, the closed-loop dynamics of the underlying system when the controller is active is $A_c = A+BK$. Then, we can model the overall networked control system by the switched linear system $x_{k+1} = \bar{A}x_k$, where $\bar{A} \in \mathcal{M}$ and $\mathcal{M} = \{A^2A_c^2A^4, A_cAA_c^2A^4,AA_c^3A^4, A_c^4A^4\}$. Each element of $\mathcal{M}$ corresponds to a different utilization of the CFP by the users. For example, the mode defined by $A_cAA_c^2A^4$ is active when the first slot in the CFP is assigned to the controller and the second slot is assigned to the other users. We assume that all of the users using the channel have an equal probability of being assigned to a time slot during the CFP. Therefore, the probability of each mode in $\mathcal{M}$ being active is $\left\{\frac{1}{(\ell-1)^2}, \frac{1}{\ell(\ell-1)}, \frac{1}{(\ell-1)\ell}, \frac{1}{\ell^2}\right\}$. Hence, we make use of Remark \ref{rem:probGeneralize} and update our bounds accordingly. Fig.~\ref{fig:networks} shows the computed bounds. As can be seen, approximately after 500 samples, the upper bound on the JSR drops below $1$, which lets us decide that the given closed-loop networked control system is stable, with probability $0.95$.
\begin{figure}
\caption{The evolution of the computed upper and lower bounds on the JSR with respect to the number of simulations collected from the networked control system.}
\label{fig:networks}
\end{figure}
\end{subsection}
\section{Conclusions}\label{sec:conclusions}
In this paper, we investigated the question of how one can conclude stability of a dynamical system when a model is not available and, instead, we can only observe the evolution of the state of the system. Our goal was to understand how the observation of well-behaved trajectories \emph{intrinsically} implies stability of a system.
\noindent
As expected, it is not surprising that we need some standing assumptions on the system, in order to allow for any sort of nontrivial stability certificate solely from a finite number of observations.
\noindent
The novelty of our contribution is twofold: First, we used as standing assumption that the unknown system can be modeled by a switched linear system. This assumption covers a wide range of systems of interest, and to our knowledge no such ``black-box'' result has been available so far on switched systems. Second, we applied powerful techniques from chance-constrained optimization. Their application was far from obvious, though, and relied on geometric properties of linear switched systems.
\noindent
We leveraged the concept of `$l$-step CQF', and showed that it allows to reach arbitrary precision for our black-box technique. In the switched systems literature, there are other well-known techniques for refining this precision for the white-box problem. For instance, one can replace the LMIs in Theorem \ref{thm:cqlf} by Sum-Of-Squares (SOS) constraints; see \cite{parrilo-jadbabaie} or \cite[Theorem 2.16]{jungers_lncis}. Although $l$-step CQFs seem better suited for our purpose, we leave for further work a more systematic analysis of the behaviour of the different refining techniques.
\noindent
Notice that, our algorithm can also be used in the white-box framework and becomes then a randomized algorithm to evaluate the JSR of a known system.
\noindent
In our view, the stability-like guarantees obtained are powerful, in view of the hardness of the general problem. In the future, we plan to investigate how to generalize our results to more complex models of realistic systems.
\section*{Appendix: a Few Results on Spherical Caps}\label{appendix}
Before proceeding to the proof of Proposition~\ref{prop:mainSphericalCap}, we first introduce some necessary definitions and related background on spherical caps. We recall that a \emph{spherical cap} on $\mathbb{S}$ for a given hyperplane $c^Tx = k$ is defined by $\mathcal{C}_{c,k} := \{x \in \mathbb{S} : c^Tx >k\}$.
\begin{rem}\label{lemma:muMonotone}
Consider the spherical caps $\mathcal{C}_{c, k_1}$ and $\mathcal{C}_{c, k_2}$ such that $k_1 > k_2$, then we have:
$$\sigma^{n-1}(\mathcal{C}_{c,k_1}) < \sigma^{n-1}(\mathcal{C}_{c,k_2}).$$
\end{rem}
\begin{rem}\label{prop:distance}
The distance between the point $x=0$ and the hyperplane $c^Tx = k$ is $\frac{|k|}{\|c\|}$.
\end{rem}
\noindent
We recall the definition given in Section 4 of the function $\Delta$, which quantifies the largest-inscribed-sphere problem, for a given subset $X \subset \mathbb{S}.$
\begin{equation*}
\Delta: \left\{
\begin{split}
&\wp(\mathbb{S}) \to [0,1]\\
&X \mapsto \sup \{r: r\mathbb{B} \subset \ensuremath{\operatorname{conv}}(\mathbb{S} \setminus X)\}.
\end{split}
\right.
\end{equation*}
\begin{lem} \label{lemma:delta2}
Consider the spherical cap $\mathcal{C}_{c,k}$. We have:
$$\Delta(\mathcal{C}_{c,k}) = \min\left(1, \frac{|k|}{\|c\|}\right).$$
\end{lem}
\begin{proof}
Note that: $$\ensuremath{\operatorname{conv}}(\mathbb{S} \setminus X)= \left\{x \in \mathbb{B} : c^Tx \leq k \right\}.$$
Then the following equalities hold:
\begin{eqnarray}
\nonumber \Delta(X) &=& d(\partial \ensuremath{\operatorname{conv}}(\mathbb{S} \setminus X), 0) \\
\nonumber &=& \min(d(\partial \mathbb{B}, 0), d(\partial\{x : c^Tx \leq k\}, 0)) \\
\nonumber &=& \min(d(\mathbb{S}, 0), d(\{x : c^Tx = k\}, 0)) \\
\nonumber &=& \min\left(1, \frac{|k|}{\|c\|}\right).
\end{eqnarray}
\end{proof}
\begin{cor}\label{lemma:deltaMonotone}
Consider the spherical caps $\mathcal{C}_{c, k_1}$ and $\mathcal{C}_{c, k_2}$ such that $k_1 \leq k_2$. Then we have: $$\Delta(\mathcal{C}_{c,k_1}) \leq \Delta(\mathcal{C}_{c,k_2}).$$
\end{cor}
\begin{lem}\label{lemma:constructSC}
For any set $X \subset \mathbb{S}$, there exist $c$ and $k$ such that $\mathcal{C}_{c,k}$ satisfies: $\mathcal{C}_{c,k} \subset X,$ and $\Delta(\mathcal{C}_{c,k}) = \Delta(X).$
\end{lem}
\begin{proof}
Let $X' := \ensuremath{\operatorname{conv}}(S \setminus X)$.
Since $d$ is continuous and the set $\partial X'$ is compact, there exists a point $x^* \in \partial X'$, such that:
\begin{eqnarray}\nonumber \Delta(X) = d(\partial X', 0) =
\label{deltaSupporting} \min_{x \in \partial X'}d(x, 0) = d(x^*, 0).\end{eqnarray}
Next, consider the hyperplane which is tangent to the ball $\Delta(X)\mathbb{B}$ at $x^*$, which we denote by $\left\{x : c^Tx = k\right\}$. Then we have:
\begin{equation*}\Delta(X) = d(x^*, 0) = d(\{x : c^Tx = k\}, 0) = \min\left(1, \frac{|k|}{\|c\|}\right).
\end{equation*}
Now, consider the spherical cap defined by this tangent plane i.e., $\mathcal{C}_{c, k}$. Then, by Lemma \ref{lemma:delta2} we have
$\Delta(\mathcal{C}_{c,k}) = \min\left(1, \frac{|k|}{\|c\|}\right)$. Therefore, $\Delta(X) = \Delta(\mathcal{C}_{c,k})$.
\noindent
We next show $\mathcal{C}_{c, k} \subset X$. We prove this by contradiction. Assume $x \in \mathcal{C}_{c,k}$ and $x \notin X$. Note that, if $x \notin X$, then $x \in \mathbb{S} \setminus X \subset \ensuremath{\operatorname{conv}}(\mathbb{S} \setminus X).$ Since $x \in \mathcal{C}_{c,k}$, we have $c^Tx>k$. But due to the fact that $x \in \ensuremath{\operatorname{conv}}(\mathbb{S} \setminus X)$, we also have $c^Tx \leq k$, which leads to a contradiction. Therefore, $\mathcal{C}_{c, k} \subset X$.
\end{proof}
\noindent
We are now able to prove Proposition 1 given in Section 4 of our paper, which states that, for any $\ensuremath{\operatorname{var}}epsilon \in (0,1)$, the function $\Delta(X)$ attains its minimum over $\mathcal{X}_{\ensuremath{\operatorname{var}}epsilon}$ for some $X$ which is a spherical cap, i.e., the minimal radius $\delta$ of the largest sphere $\delta \mathbb{S}$ included in $\mathbb{S} \setminus X$ will be reached when $X$ is a spherical cap.
\begin{prop}\label{thm:mainSphericalCap}
Let $\tilde{\ensuremath{\operatorname{var}}epsilon} \in [0,1]$ and $\mathcal{X}_{\tilde{\ensuremath{\operatorname{var}}epsilon}} = \{X \subset \mathbb{S}: \sigma^{n-1}(X) \leq \tilde{\ensuremath{\operatorname{var}}epsilon}\}$. Then, the function $\Delta(X)$ attains its minimum over $\mathcal{X}_{\tilde{\ensuremath{\operatorname{var}}epsilon}}$ for some $X$ which is a spherical cap. We denote by $\delta(\tilde{\ensuremath{\operatorname{var}}epsilon})$ this minimal value, which takes the following expression: $$\delta(\tilde{\ensuremath{\operatorname{var}}epsilon}) = \sqrt{1 - I^{-1}(2 \tilde{\ensuremath{\operatorname{var}}epsilon};\frac{n-1}{2},\frac{1}{2})}.$$
\end{prop}
\begin{proof}
We prove the first part of the proposition via contradiction. Assume that there exists no spherical cap in $\mathcal{X}_{\ensuremath{\operatorname{var}}epsilon}$ such that $\Delta(X)$ attains its minimum. This means there exists an $X^* \in \mathcal{X}_{\ensuremath{\operatorname{var}}epsilon}$, where $X^*$ is not a spherical cap and $\argmin_{X \in \mathcal{X}_{\ensuremath{\operatorname{var}}epsilon}}(\Delta(X))=X^*$. By Lemma \ref{lemma:constructSC}, we can construct a spherical cap $\mathcal{C}_{c,k}$ such that $\mathcal{C}_{c,k} \subset X^*$ and $\mathcal{C}_{c,k} = \Delta(X^*)$. Note that, we further have $\mathcal{C}_{c,k} \subsetneq X^*$, since $X^*$ is assumed not to be a spherical cap. This means that, there exists a spherical cap $\sigma^{n-1}(\mathcal{C}_{c,k})$ such that $\sigma^{n-1}(\mathcal{C}_{c,k}) < \ensuremath{\operatorname{var}}epsilon$.
\noindent
Then, the spherical cap $\mathcal{C}_{c, \tilde{k}}$ with $\sigma^{n-1}(\mathcal{C}_{c, \tilde{k}}) = \ensuremath{\operatorname{var}}epsilon$, satisfies $\tilde{k} < k$ by Remark \ref{lemma:muMonotone}. This implies $$\Delta(\mathcal{C}_{c, \tilde{k}}) < \Delta(\mathcal{C}_{c, k}) = \Delta(X^*)$$ by Corollary \ref{lemma:deltaMonotone}. Therefore, $\Delta(\mathcal{C}_{c, \tilde{k}}) < \Delta(X^*)$. This is a contradiction since we initially assumed that $\Delta(X)$ attains its minimum over $\mathcal{X}_{\ensuremath{\operatorname{var}}epsilon}$ at $X^*$.
\noindent
We can now give an expression for $\delta(\ensuremath{\operatorname{var}}epsilon)$. We know that:
\begin{equation}\label{eqn:sc}
\delta(\ensuremath{\operatorname{var}}epsilon) = \Delta(\mathcal{C}_{c, k}),
\end{equation}
for some spherical cap $\mathcal{C}_{c,k} \subset \mathbb{S}$, where $\sigma^{n-1}(\mathcal{C}_{c, k}) = \ensuremath{\operatorname{var}}epsilon$. It is known (see e.g. \cite{sphericalCapRef}) that the area of such $\mathcal{C}_{c, k}$, is given by the equation:
\begin{equation}\sigma^{n-1}(\mathcal{C}_{c, k}) = \frac{I\left(1-\Delta(\mathcal{C}_{c,k})^2; \frac{n-1}{2}, \frac{1}{2}\right)}{2}
\end{equation}
where $I$ is the regularized incomplete beta function. Since, \mbox{$\sigma^{n-1}(\mathcal{C}_{c, k})= \ensuremath{\operatorname{var}}epsilon$,} we get the following set of equations:
\begin{eqnarray}\nonumber \ensuremath{\operatorname{var}}epsilon &=& \frac{I\left(1- \Delta(\mathcal{C}_{c,k})^2;\frac{n-1}{2}, \frac{1}{2}\right)}{2} \\
\nonumber 1- \Delta(\mathcal{C}_{c, k})^2 &=& I^{-1}\left(2\ensuremath{\operatorname{var}}epsilon; \frac{n-1}{2}, \frac{1}{2}\right) \\
\label{eqn:last}\Delta(\mathcal{C}_{c, k})^2 &=& 1- I^{-1}\left(2\ensuremath{\operatorname{var}}epsilon; \frac{n-1}{2}, \frac{1}{2}\right).
\end{eqnarray}
\noindent
This gives us
\begin{equation}
\delta(\ensuremath{\operatorname{var}}epsilon) = \sqrt{1- I^{-1} \left( 2\ensuremath{\operatorname{var}}epsilon; \frac{n-1}{2}, \frac{1}{2} \right)}.
\end{equation}
\end{proof}
\end{document}
|
\begin{document}
\title{The axiom system of classical harmony}
\author{Andr\'{a}s T\'{o}bi\'{a}s\footnote{tobias\_AT\_ math.tu-berlin.de} \\ Institut f\"{u}r Mathematik, Technische Universit\"{a}t Berlin, Germany.\footnote{The work on which this paper is based was done between 2012--2014 for a national research competition and as my Bachelor's thesis \cite{szakdoga} at the Institute of Mathematics, Budapest University of Technology and Economics, Hungary, under the supervision of Dr.~\'{A}kos G.~Horv\'{a}th.}}
\maketitle
\begin{abstract}
This paper provides a new mathematical axiom system for classical harmony, which is a prescriptive rule system for composing music, introduced in the second half of the 18th century. The clearest model of classical harmony is given by the homophonic four-part pieces of music. The form of these pieces is based on the earlier four-part chorale adaptations of J. S. Bach. Our paper logically structures the musical phenomena belonging to the research area of classical harmony. Its main result, the fundamental theorem of tonality, provides a way to construct a complete axiom system which incorporates the well-known classical compositional principles about chord changing and voice leading. In this axiom system, a piece complies with classical harmony if it satisfies the formal requirements of four-part homophony and it does not violate any classical chord-changing or modulational rules. \\
\textbf{Keywords:} axiom system, classical compositional principles, trichotomy of keys, homophonic four-part piece, fundamental theorem of tonality, chord-changing rules
\end{abstract}
\section{Introduction}
The main goal of this paper is to provide a mathematical axiomatization for the strictly homophonic four-part model of classical harmony. This model is mainly based on J.~S.~Bach's four-part chorales, collected in \cite{bach}, which had been written before the start of Viennese classicism. Later, in the second half of the 18th century, almost unequivocal, prescriptive compositional principles were determined for this four-part model. By their nature, these compositional principles form a mathematical axiom system, as soon as all the basic notions of music corresponding to classical harmony are mathematically well-defined. In this paper, we present a possible way of logical ordering of these musical notions, and having these definitions we present a \emph{consistent and complete} axiom system which tells how to write homophonic four-part pieces.
Homophonic four-part pieces are interpreted as special right-continuous functions $M: \mathbb R^+ \to K^4$, where $K$ is an \emph{equal-tempered piano}. Here the real half-line refers to time. The special properties of $M$ are that for any $t \in \mathrm{Dom}~M$, $M(t)$ is a special kind of \emph{chord} in a special four-part version, and that the four voices always change their tones in the same time, yielding a \emph{chord change}. The chords in the ranges of the homophonic four-part pieces have to be some preliminarily given kinds of \emph{triads} and \emph{seventh chords} associated with a musical \emph{key}.
The formulation of the axiom system is the following. The axioms of classical harmony, which are called \emph{compositional principles}, determine whether a homophonic four-part piece whether complies with classical harmony or not. One of the first axioms, the so-called \emph{correctability condition} describes when a homophonic four-part piece complies with classical harmony \emph{apart from the chord-changing points}. Our \emph{fundamental theorem of tonality} (Theorem \ref{vécsey}) gives an equivalent condition for correctability of feasible pieces on a finite time interval. It claims that among these pieces, the correctable ones are exactly the ones which are \emph{locally tonal}. Its proof uses a sequential process for showing correctability of tonal pieces. Consequently, the chord-changing rules of classical harmony can be embedded in the axiom system as constraints. A correctable piece complies with classical harmony in a chord-changing point if none of these constraints is violated. Hence, until it does not cause inconsistence, one can add new chord-changing rules to the axiom system according to new results of music theory, or remove old ones. This way, exercises of writing correct four-part pieces consisting of given chords can virtually be interpreted as constraint programming problems.
Some chord-changing compositional principles are mathematically described in \cite{pachetroy}. This paper investigates harmonizing four-part chorales algorithmically using constraints, and it turns out that it is not necessary to construct a full mathematical axiom system of classical harmony for this purpose. An axiom system is rather important for educational purposes: in order to teach musician students how to write \emph{perfect} examples for classical chord progression, one has to know all compositional principles. The task of students does not only include harmonizing given soprano melodies but also e.g. finding a correct voice configuration for a piece with given chords. Also in the case of more complex Classicist genres, generalized versions of the four-part rules apply. It is nevertheless true that although in Viennese classicism many compositional rules were indeed prescriptive for real music, composers' practice was substantially more irregular than what classical harmony would indicate, see e.g. \cite[p.~167]{ebcioglu}.
The axiomatization is completed by the \emph{modulation} (musical key change) rules, which give constraints on several consecutive chords which a modulation consists of. These rules are musically quite complex but mathematically less interesting, therefore we omit them; see Section \ref{modulations} of the Appendix for an overview.
As for the compositional principles not detailed in \cite{benson}, we follow the traditional Hungarian music theory coursebook \cite{kesztler}. We use the German notation of classical harmony, according to the Hungarian convention, but in the meaning of the axioms this makes no difference from e.g. the British notation system.
The contribution of this paper is the construction of a mathematical axiom system for classical harmony. In particular, we classify musical keys. We prove that there are \emph{exactly three different keys} with the common base on the equal-tempered piano, up to enharmony. Our main result is the fundamental theorem of tonality, which makes it possible to embed the Classicist chord-changing compositional principles as constraints to a consistent axiom system. Our work gives a logical basis for writing a new music theory coursebook for high schools. This seems to be necessary in Hungary, and it can also be helpful in other European countries.
The paper is organized as follows. Section \ref{alja} enumerates and logically orders basic musical notions. It also explains compliance of triads and seventh chords with classical harmony. In Section \ref{hangnem}, we define and classify musical keys. In Section \ref{top}, we present the model of homophonic four-part pieces. Musical functions and functional tonality are defined in Section \ref{tonikus}, while the fundamental theorem of tonality and the structure of our axiom system is presented in Section \ref{fő}.
\section{Basic notions of music theory used in classical harmony} \label{alja}
We provide an axiom system for composing homophonic four-part pieces of music, in a first-order language. We use the language of set theory, assuming the Zermelo--Fraenkel--Choice (ZFC) axiom system. We use simple physical properties of the overtone system, but formally these only have arithmetic meaning. As usual in music theory, a \emph{tone} $Y$ is a longitudinal wave moving in an elastic medium with frequency $f(Y)>0$. For a tone $X$ with frequency $f(X)>0$, $X$ is \emph{audible} if $20~\mathrm{Hz}<f(X)<20~000~\mathrm{Hz}$. When speaking about tones, we always mean that the tone is uniquely determined by its frequency and consists of all of its \emph{overtones}. The set of overtones of the tone $X$ is $\lbrace Y \vert~ Y$ is a tone, $\exists n \in \mathbb{N}^+:~f(Y)=n~f(X) \rbrace$. The overtone of $X$ with frequency $n~f(X)$ is called the $n$th overtone of $X$. Thus, when we consider \emph{a tone $X$ with frequency $f(X)$}, $X$ can be mathematically described, e.g., as $X=(0,f(X)) \in (\mathbb{R}^+)^2$, thus the definitions of this paper can be derived consistently from ZFC\label{jujjujjuj}. In this sense, our \emph{compositional principles} which describe compliance of certain musical entities (e.g. triads, four-part pieces or modulations) with classical harmony are just special definitions in the mathematical framework of ZFC. But if one considers classical harmony in itself, they are indeed axioms, in the sense that these properties are postulated about all entities that comply with classical harmony. This is true for all Axioms in this paper.
In the whole article, $B_r(x)$ denotes the open ball with radius $r$ around the point $x$ in any metric space, further $\overline{A}$ the closure of $A$ and $\partial A$ the boundary of $A$ in any topological space. $\mathrm{Int}$ denotes interior, $\mathrm{Dom}$ domain and $\mathrm{Ran}$ range.
Musical \emph{intervals} are equal distances in the (base 2) logarithmic frequency scale. The most important intervals can be derived from the overtone system, cf. \cite[Section 4.1]{benson}. The interval of a tone and its 2nd overtone is called \emph{perfect octave}, the one of the 2nd and 3rd overtone of a tone \emph{perfect fifth}, the one the 3rd and 4th overtone of a tone \emph{perfect fourth}.
We say that a tone $X$ is higher than a tone $Y$ (and $Y$ is lower than $X$) if $f(X)>f(Y)$. Intervals can be summed, hence one can speak about \emph{octave-equivalent} tones $X$ and $Y$, the interval of which is $n$ octaves with $n \in \mathbb{Z}$. If $X$ is $n$ octaves higher than $Y$, this means $f(X)=2^n f(Y)$. Hence, octave equivalence is an equivalence relation on the set of tones. \emph{The octave-eqivalence class} of the tone $X$ will be denoted by $[X]$.
The following definitions will be used in Section \ref{tonikus}, where we define musical functions and tonality.
Let $X$ be a tone and $Y$ its 3rd overtone. The \emph{leading tone} of $[Y]$ is the octave equivalence class of $X$'s 11th overtone; the \emph{seventh tone belonging to} $[X]$, \label{seventhtone}also called the \emph{upper leading tone of $X$'s fifth overtone's equivalence class} is the octave equivalence class of the 7th overtone of $Y$. We also define these relations for the tones themselves: e.g. for $U \in [U]$ and $V \in [V]$, if $[U]$ is the leading tone of $[V]$, then we say that $U$ is the leading tone of $V$.
We define the \emph{perfect $X_1$ major scale} for a tone $X_1$. Generally, a \emph{seven-degree scale with base $X_1$} is a set of tones $\lbrace X_1, X_2,\ldots,X_7 \rbrace$ where $f(X_i) > f(X_j) \Leftrightarrow i>j$ and $f(X_7)<2f(X_1)$ (this is, every member of the scale is strictly less then one octave higher than the base). $X_i$ is called the $i$th degree scale tone of the scale. According to the Hungarian notation, we denote the degrees and the operations among them with the elements of the prime field $\mathbb{Z}_{7}$ , but we use the capital Roman numeral for the integer $(n \pmod 7) +1$ instead of $n \in \mathbb{Z}_7$. The \emph{perfect $X$ major scale} is a seven degree scale with base $X$, where the frequency ratios of the neighbouring degree tones are respectively:
$\frac{9}{8}, \frac{10}{9}, \frac{16}{15}, \frac{9}{8}, \frac{10}{9},\frac{9}{8}, \frac{16}{15}$.
where the last ratio is the ratio of the VIIth degree scale tone and the second overtone of $X$.
For a perfect major scale, the following are approximately true, in the sense that the human ear cannot observe that they are false:
\begin{enumerate}[(i)]
\item the degree VII tone is the leading tone of the degree I, III is the one of IV,
\item IV is the upper leading tone of III, I is the one of VII,
\item the interval between I and IV is a perfect fourth, the one between I and V is a perfect fifth,
\item IV is the seventh tone belonging to I, and I the one belonging to V.
\end{enumerate}
The sum of \emph{twelve perfect fifths} starting from a tone $X$ results a tone with frequency $\frac{531441}{4096} f(X)$, while the sum of \emph{seven perfect octaves} gives a tone with frequency $128~f(X)$. The difference of these two tones is noticeable by an average person. However, if this deviation is equally spread along the whole interval, it locally cannot be perceived. Therefore one aims to fease the concept of \emph{the circle of fifths}, i.e., 12 quasi-fifths equal to 7 octaves on a musical instrument each tone of which is a base of a seven-degree scale perceptually equivalent to a perfect major scale. The leading tone and upper leading tone/seventh tone connections between the quasi-perfect major scales could be used to make it possible to move from each major scale to the two with a base one quasi-perfect fifth higher respectively lower.
This is the idea of the \emph{equal-tempered piano}.
\begin{definition}
A countable set $K$ of tones is an \emph{equal-tempered piano} if
\begin{enumerate}[(i)]
\item $A \in K$, where $A$ is the normal $A4$ tone with frequency $440~\mathrm{Hz}$,
\item $K$ has two tones $X$ and $Y$ the interval of which is at least 7 octaves,
\item If a tone $X$ is an element of $K$, then $f(X)=(\sqrt[12]{2})^n~f(A)$ for some $n \in \mathbb Z$. Further, if $f(X)=(\sqrt[12]{2})^n~f(A)$ for some $n \in \mathbb Z$ and $\exists Y, Z \in K$ such that $f(Y) < f(X)<f(Z)$, then $X \in K$.
\end{enumerate}
\end{definition}
According to this definition, $A$ is the element of every -- finite or infinite -- equal-tempered piano. Thus, the octave equivalence classes of the piano's white keys ($A,B,\dotsc,G$) can be defined. Using a \emph{well-tempered}, i.e., approximately equal-tempered piano that already actualized the circle of fifths, J.S. Bach showed that every tone of the equal-tempered piano can serve as a base of a quasi-perfect major scale, by composing his \emph{Das wohltemperierte Klavier}, which contains one piece written in each major key of his well-tempered piano.
\emph{Enharmonic equivalence} in the context of the 12-tone equal tempered scale means that two tones $Y,Z$ originate from two different perfect major scales, but there is a tone $X$ on the equal tempered piano from which neither $Y$ nor $Z$ is significally different for the human ear. Enharmonic equivalence depends of the listener's own hearing and cultural background; here we follow the classicist European convention. Non-audible tones are called enharmonic if they have audible octave-equivalents that are enharmonic. The corresponding octave equivalence classes are also called enharmonic. If the tones $A$ and $B$ are enharmonic, we write $A \sim B$. It is easy to see that $\sim$ is an equivalence relation. Further,
\begin{enumerate}
\item The interval of two neighbouring tones of the equal-tempered piano is called \emph{semitone}, the sum of two semitones (the distance of second neighbours) a \emph{wholetone}. The sequence of tones 0, 2, 4, 5, 7, 9 and 11 semitones higher than an \emph{arbitrary} piano tone is enharmonic to a perfect major scale.
\item If $X$ and $Y$ are two piano tones where $Y$ is a wholetone higher than $X$, then the (only) piano tone $Z$ such that $f(X)<f(Z)<f(Y)$ is the leading tone of $Y$ and the upper leading tone of $X$, up to enharmonic equivalence.
\end{enumerate}
The C major scale on the equal-tempered piano consists of the seven white keys. Moving \emph{stepwise upwards} in the circle of fifths, one reaches the G, D, A, E, B major scales consecutively. At each step, one new tone appears in the scale, this is VIIth degree tone of the new scale. We denote this new tone by X$\sharp$, where X is the element of the C major scale which has been replaced by the new one semitone higher tone. This way the following tones appear consecutively: F$\sharp$, C$\sharp$, G$\sharp$, D$\sharp$, A$\sharp$, the leading tones to G, D, A, E, B respectively. After B, the next fifth step upwards leads to F$\sharp$. Now, starting from the C major scale again and move \emph{stepwise downwards} fifth by fifth, we reach the F major scale first, which has exactly one scale tone that is outside the C major scale: instead of B, a one semitone lower tone occurs: the seventh tone with respect to F. Let X$\flat$ denote the one semitone lower piano tone than the white key X, then moving downwards in the circle of fifths, we reach F, B$\flat$, E$\flat$, A$\flat$, D$\flat$ and G$\flat$ consecutively. $\sharp$ and $\flat$ marks can be multiplied. By construction, we have $\flat\sharp=\sharp\flat=\natural$ means the identity of the C major scale, and ``multiplication" of $\flat$'s and $\sharp$'s is commutative.
Note that G$\flat$ and F$\sharp$ refer to the same (black) piano keys, these two tones are enharmonic, also D$\flat$ is enharmonic to C$\sharp$ etc. However, they are the same only under equal temperament: if we build a \emph{perfect} A major scale, F$\sharp$ is the VI degree scale tone there, while G$\flat$ is reached if we move downwards in the D$\flat$ major scale by 8 \emph{perfect} fifth steps, and take the IV degree scale tone. It is a well-known experimental result that these actual G$\flat$ and F$\sharp$ differ significantly.
From this point, all major scales will be situated on an equal-tempered piano, with all degrees derivable from the C major scale with finitely many ---in practice, usually 0, 1 or 2 --- $\flat$s or $\sharp$s. The fifth-by-fifth sequence of sharpened scale tones of a major scale on the equal-tempered piano (F$\sharp$, C$\sharp$, $\ldots$) or the sequence of flattened scale tones of the major scale (B$\flat$, E$\flat$, $\ldots$) is called the major scale's \emph{key signature}. Having established these scales on the piano, the traditional notation of musical intervals among their degrees can be established, see e.g. \cite[Appendix E]{benson}. Also one can define \emph{consonance} and \emph{dissonance} of these intervals, cf. \cite[Chapter 4]{benson}.
By definition, an equal-tempered piano has to be \emph{at least as wide as a real piano}, in order to make it possible that the piano covers 7 octaves ($\approx$ 12 fifths). Let $d_2(X,Y)$ denote the interval of the notes $X$ and $Y$ of \emph{any} equal-tempered piano $K$, measured in semitones. The construction of equal temperament implies the following, also if the equal-tempered piano is infinite.
\begin{proposition}
$(K,d_2)$ is a metric space and $d_2$ generates the discrete topology.
\end{proposition}
Thus, if $K$ is an equal-tempered piano, the Cartesian product $K^n$ is also equipped with the discrete topology. The elements of $K^n$ are called \emph{chords}.
Hence, we can speak about \emph{Borel-measurable functions} $M:~\mathbb{R}_0^+ \to K^n$, which we call \emph{$n$-part pieces}. The \emph{$k$th voice} of $M$ is $\mathrm{pr}_k \circ M$, where $\mathrm{pr}_k$ is the projection to the $k$th instance of the equal-tempered piano. We are interested in the \emph{four-part case}; we define the special, homophonic four-part pieces in Section \ref{top}. There the voices (in increasing order of their numbers) are called, as conventionally, \emph{bass, tenor, alto} and \emph{soprano}. In the simplest models of classical harmony, each chord appearing in a homophonic four-part piece has to be a \emph{triad} or a \emph{seventh chord} in an correct four-part form. In Bach's four-part chorales, this is not true any more, but most of the chorales can be obtained from a piece consisting of such chords via applying a finite set of local modifications, the so-called \emph{figurations}, see Definition~\ref{koralka} bel
\emph{Triad names} are special elements of the factor space $K^3 / \equiv$ on an arbitrary equal-tempered piano $K$, where $\equiv$ is the octave equivalence relation. These contain scale tones or once altered tones from a certain seven-degree scale on $K$, and their main characteristic is that they consist of a $k$th, a $k+2$nd and a $k+4$th degree tone$\!\pmod 7$ of the given major scale based on one of the twelve enharmonic equivalence classes of the equal-tempered piano. With this notation, we say that the triad is of degree $k$. There are four kinds of triad names for which we say that they \emph{comply with classical harmony}, according to Table \ref{egykém}.
A \emph{four-part version of a triad} -- later in this article, simply: a \emph{triad} -- is an element of the piano power $K^4$, which consists of the tones of a triad name, exactly one of them in two voices.
If the triad consists of the $k$th, $k+2$nd and $k+4$th degree scale tone of a seven-degree scale -- these are called the \emph{base}, the \emph{third} and the \emph{fifth} of the triad, respectively -- on the equal-tempered piano, its \emph{position} is determined by which tone it has in the bass. If in the bass there is the $k$ degree tone, where $k$ refers to the corresponding Roman numeral as before, then the triad is in \emph{root position} (German--Hungarian notation of the triad: $k$), if the $k+2$ degree tone is in the bass, then the triad is in \emph{first inversion} (notation: $k^6$), and if the $k+4$ degree tone, then in \emph{second inversion} (notation: $k_4^6$). (We note that in some models of classical harmony, triads without fifths are allowed, in such cases the condition that exactly one tone appears in two voices is not satisfied; either the base appears in three voices or both the base and the third are doubled. We will not take this into account in the rest of the present paper.)
\begin{table} \caption{Triads (above) and seventh chords (below)}\label{egykém} \begin{footnotesize} \begin{tabular}{|ll|ccc|}
\hline
Name & Notation & $k\leftrightarrow k+2$ interval & $k+2\leftrightarrow k+4$ i. & $k+4\leftrightarrow k$ i. \\ \hline
Major triad & M &\emph{major third} & minor third & perfect fifth \\ \hline
Minor triad & m & \emph{minor third} & major third & perfect fifth \\ \hline
Diminished triad & d & minor third & minor third & \emph{diminished fifth} \\ \hline
Augmented triad & A & major third & major third & \emph{augmented fifth} \\ \hline
\end{tabular}
\begin{tabular}{|l|l|l|l|l|l|l|}
\hline
Name: & Third & Fifth & Seventh & Partial triads & Examples & Example \\
($\ldots$) \emph{seventh} & & & & & in major & in minor \\ \hline
augmented major &major & augmented & major & major, augm. & none & III \\ \hline
major minor &major & perfect & major & major, minor & I, IV & VI \\ \hline
major/dominant & major & perfect & minor & major, dimin. & V & V \\ \hline
harmonic minor & minor & perfect & major & minor, augm. & none & I \\ \hline
minor major & minor & perfect & minor & minor, major & II, III, VI & IV \\ \hline
semi-diminished & minor & diminished & minor & dimin., minor & VII & II \\ \hline
diminished & minor & diminished & diminished & dimin., dimin. & none & VII \\ \hline
\end{tabular}
\end{footnotesize}
\end{table}
Consider the union of a degree $k$ and a degree $k+2$ triad name on an arbitrary seven-degree scale. This is indeed an element of $K^4 / \equiv$, and it is called a \emph{seventh chord name}. If $H \in K^4$ consists of the tones of a seventh chord name in any permutation of the voices, then $H$ is called a \emph{seventh chord}. This name comes from the fact that there is a seventh interval between the $k$ and the $k+6$ degree scale tones. The degree $k$ and degree $k+2$ triads are the \emph{partial triads} of the seventh chord. The position of a degree $k$ seventh chord inverson can be: \emph{(root position) seventh chord} (German--Hungarian notation: $k^7$), \emph{first inversion} ($k_5^6$), \emph{second inversion} ($k_3^4$) and \emph{third inversion} ($k^2$), if in the bass there is the $k$th, $k+2$nd, $k+4$th and $k+6$th degree tone of the seventh chord name, respectively. For the origin of these notations, we refer to \cite[Book I., Section II.11]{kesztler}.
The $k$th, $k+2$nd, $k+4$th and $k+6$th degree tones of a seventh chord are called \emph{base, third, fifth} and \emph{seventh} respectively. If both partial triads of a seventh chord name $H$ comply with classical harmony, and there is no triad which is voicewise enharmonic to $H$, then we say that $H$ \emph{complies with classical harmony}. This second assumption is taken for excluding the \emph{augmented triad} (see Table \ref{egykém}) from the set of seventh chord names, which can be represented as a seventh chord but is indeed just a triad.
As a remark, we note that a root position \emph{dominant seventh} (see Table 1) that complies with classical harmony may be \emph{fifth deficient}, which means that it need not contain its fifth in any voice but instead the base in two voices (one of these voices is necessarily the bass).
Table \ref{egykém} also shows the seventh chord types, with examples consisting of scale tones of the major and the minor key (see Section \ref{hangnem}). For four-part versions of triads and seventh chords, we say that they \emph{comply with classical harmony} if their name complies with classical harmony, in each of their voices the pitches (frequencies) accord to the conventional pitch interval associated with the instrument or singer that presents the voice, and the duplication of tones is correct. Questions about appropriateness of pitches in the four voices are quite delicate and they require music-historical research. From a model point of view, they can be ignored by just considering pieces on an equal-tempered piano that is infinite towards both directions. Classical tone duplication rules are e.g. that if a triad is in root position, then the only tone of the triad that may appear in two voices is the base etc. It is also required that all tones of a triad or seventh chord that are not scale tones of the scale corresponding to the chord may only appear in one voice. For detailed duplication rules, we refer to \cite[p.~30--183.]{kesztler}.
\section{Trichotomy of musical keys} \label{hangnem}
After introducing the basic musical notions, we define keys based on the idea of key stability and functional tonality in classical harmony. We preliminarily ensure that our definition accepts the major and the minor keys, which have been used in Europe for five centuries, to be keys. Furthermore, our key definition gives us the possibility to find all possible key types. Our Proposition \ref{hangnemtyű} shows that apart from major and minor there is exactly one more type.
\begin{definition} \label{hangnemke}
Let $H$ be a seven-degree scale on the equal-tempered piano (consisting of scale tones and altered tones from the C major scale), with seven pairwise non-enharmonic scale tones. We say that $H$ is a \emph{key} if:
\begin{enumerate}[(i)]
\item the Vth degree seventh chord of $H$ is dominant,
\item all triad and seventh chord names that consist of the scale tones of $H$ comply with classical harmony,
\item if the $k$th degree seventh is dominant, then the degree $k+3 \mod 7$ triad is major or minor$\pmod 7$, with the $k+3$th degree scale tone one perfect fourth higher than the $k$th degree one.
\end{enumerate}
\end{definition}
The condition $(iii)$ means that the all dominant sevenths can \emph{resolve to their tonic}, see Section \ref{tonikus}. It follows that Definition \ref{hangnemke} implies the next two properties:
\begin{proposition}
In a key the first degree triad is major or minor, and the VIIth degree scale tone is the leading tone of the Ist degree scale tone.
\end{proposition}
The resolution of the Vth degree seventh (or triad) to the Ist degree triad is the key ingredient of the stability of the key, as detailed in Section \ref{tonikus}.
\begin{proof}
Parts (i) and (iii) of Definition \ref{hangnemke} imply that the Ist degree triad is major or minor, moreover that the Vth degree seventh chord must resolve to the Ist degree triad. Hence, the Vth degree scale tone is 7 semitones higher than the Ist degree one. Therefore, the VIIth degree scale tone, which is the third of the Vth degree triad, is one semitone lower than the Ist degree scale tone. This implies the second part of the claim.
\end{proof}
The next lemma is a key observation of this section.
\begin{lemma}[The Minor Lemma] \label{molllemma}
In any key $H$ the following are equivalent:
\begin{enumerate}[(i)]
\item the VIth degree scale tone is 8 semitones higher than the Ist degree one,
\item all types of seventh chords from Table \ref{egykém} can be built from scale tones of $H$,
\item the VIIth degree seventh chord (built from scale tones) is diminished.
\end{enumerate}
\end{lemma}
\begin{proof}
The definition of key implies that the sequence of intervals of the first degree scale tone and the other scale tones is: $(0,2,?,5,7,?,11)$ semitones. The ?'s refer to unknown intervals. It is easy to see that the conditions of the lemma are equivalent to the condition that the sequence of intervals is $(0,2,?,5,7,\mathbf{8},11)$. The remaining ? stands for either 3 or 4, in order to satisfy the definition of key.
\end{proof}
\begin{proposition}[The trichotomy of keys] \label{hangnemtyű}
Let $X$ be an enharmonic equivalence class on an equal-tempered piano $K$. Then there are exactly three keys with first degree $X$, up to enharmonic equivalence. These are the major, the minor and the harmonic major (named by Rimsky-Korsakov in \cite{rk}) keys, with interval sequences $(0,2,4,5,7,9,11)$, $(0,2,3,5,7,8,11)$ and $(0,2,4,5,7,8,11)$, respectively. The latter two ones are the ones that satisfy the Minor Lemma.
\end{proposition}
\begin{proof}
As in the proof of the Minor Lemma, the key's definition implies that the interval sequence of an arbitrary key's scale is $(0,2,?,5,7,?,11)$. If the degree VI scale tone has sign $9$, then the requirement that every triad and seventh chord built up from scale tones has to comply with classical harmony implies that the sign of the IIIrd degree tone is either $3$ or $4$. If this sign is $4$, then the scale is the $X$-major scale. If the sign is $3$, then the IVth degree seventh chord built up from scale tones is a dominant seventh, but the interval between the IVth and the VIIth degree scale tone is not a perfect fourth but a tritone (enharmonic with 6 semitones/3 wholetones). Therefore in this case we do not obtain a key.
\end{proof}
From the 1500s, European music is determined by the major--minor duality. The harmonic major key differs by only one scale tone (degree VI) from the major scale and also by only one scale tone (degree III) from the minor one, and therefore the listener automatically tries to perceive it as minor or major. This causes an instability of the harmonic major key in the context of European music history, which is however not an intrinsic instability of this key, since it satisfies the same stability conditions as the two other types of key, according to Definition \ref{hangnemke}.
The \emph{key signature \label{kettke} of a harmonic minor or harmonic major key} is the key signature of the major scale which has the degree I scale tone of the minor key as its degree VI scale tone. These major and minor scales are called \emph{relative}.
\section{Topology of the homophonic four-part setting} \label{top}
In this section, we present a continuous time model of classical harmony, given by \emph{homophonic four-part pieces}. Our notions allow for some non-feasible musical phenomena, such as an infinite piece and chords accumulating in one point in time (referred to as \emph{packing point}). This way, one can also handle \emph{periodic pieces} without ending in finite time, which is often aimed in both classical (e.g. William Billings: \emph{The Continental Harmony}, 1794) and popular music. The continuous approach allows us to define the genre of \emph{Bach's chorale harmonizations} mathematically precisely, which is not possible if one uses only \emph{chord sequences} to describe homophonic four-part pieces.
\begin{definition} \label{osszhangpelda}
Let $K$ be an equal-tempered piano. $M: \mathbb{R}_0^+ \to K^4$ be a four-part piece (see Section \ref{alja}). $M$ is called a \emph{homophonic four-part piece} if:
\begin{enumerate}
\item[(i)] Each element of $\mathrm{Ran}~M$ is a four-part version of a triad or a seventh chord (in some inversion) that complies with classical harmony,
\item[(ii)] each voice of each element of $\mathrm{Ran}~M$ only contains tones that can be derived from the C major scale on $K$ using the system of $\flat$'s and $\sharp$'s,
\item[(iii)] for all $H \in \mathrm{Ran}~M$, we have that $M^{-1}(H)=\lbrace x \in \mathrm{Dom}~M \vert M(x)=H \rbrace$ is a disjoint union of intervals closed on the left and open on the right.
\end{enumerate}
\end{definition}
\begin{definition}
If $M$ is a homophonic four-part piece, $B(M)$, the smallest (left-closed, right-open) interval that contains $\mathrm{Dom}~M$ is called the \emph{cover} of $M$.
\end{definition}
As we mentioned in the introduction, homophony means that if in a point in time one voice starts to play a new tone, then all other voices do so. It follows that if $M$ is a homophonic four-part piece, then if there is a pause at time $t \in B(M)$ in at least one voice of $M$ (i.e., $t \notin \mathrm{Dom}~pr_i \circ M$), then this is actually a \emph{general pause}, i.e. pause in all voices. Also, one can prove that the connected components of pauses are also intervals closed on the left and open on the right.
Using the point $(iii)$ of Definition \ref{osszhangpelda}, it is easy to see that any homophonic four-part piece $M$ is \emph{right-continuous}.
According to the fact that $M$ takes values in a discrete space, this implies
$
\forall t_0 \in \mathrm{Dom}~M~\exists \delta>0: ~ \forall t \in \left[ t_0, t_0+\delta \right[ ~ M(t)=M(t_0).
$
Now we define some special points of homophonic four-part pieces.
\begin{definition}
Let $M$ be a homophonic four-part piece.
\begin{enumerate}
\item[(i)] $t=\inf~\mathrm{Dom}~M$ is the \emph{starting point} of $M$,
\item[(ii)] $t=\sup~\mathrm{Dom}~M$ is the \emph{endpoint} of $M$,
\item[(iii)] $t \in \mathrm{Dom}~M$ is a \emph{chord-changing point} of $M$ if $\exists \varepsilon>0,~\exists H_1 \neq H_2 \in K^4$ such that $\forall x \in \left[ t-\varepsilon, t \right[~ M(x)=H_1$ and $\forall x \in \left[ t, t+\varepsilon \right[,~M(x)=H_2$. From now on, let $A(M)$ denote the set of the chord-changing points of $M$.
\end{enumerate}
\end{definition}
In the following, $\vee$ means logical ``or" and $\wedge$ means logical ``and".
\begin{definition}
A homophonic four-part piece $M$ has \emph{infimum of chord lengths} defined as \[ \inf\limits_{t \in \mathcal{D}(M)} \sup \left\lbrace r_1+r_2 \vert r_1, r_2 \geq 0 ~ \wedge \forall x \in \left[ t-r_1, t+r_2 \right[: M(x)=M(t) \right\rbrace . \]
\end{definition}
The proof of the next proposition is left for the reader.
\begin{proposition}
Let $M$ be a homophonic four-part piece. Then $ A(M)$ is countable. If the infimum of the chord lengths of $M$ is positive, then $A(M)$ has no accumulation point (i.e., $\overline{A(M)}$ consists only of isolated points), which implies that $A(M)$ is finite if $\mathrm{Dom}~M$ is bounded.
\end{proposition}
The following definition accounts for a non-feasible musical effect coming from our topological model, which is playing infinite music in finite time.
\begin{definition}
Let $M$ be a homophonic four-part piece. $t \in \overline{\mathrm{Dom}~M}$ is a \emph{packing point} of $M$ if $\forall \varepsilon>0$ $\left[ t-\varepsilon, t \right[$ contains infinitely many chord-changing points or \emph{isolated} boundary points of $\mathrm{Dom}~M$.
\end{definition}
Packing points can have interesting applications in the spirit of \cite[part~I.;~Decision]{ligeti}. But in the usual model in classical harmony, packing points do not occur, and neither do pieces without packing points but with infinitely many chords.
\begin{definition}
A homophonic four-part piece $M$ is \emph{feasible} if
\begin{enumerate}
\item[(i)] the infimum of chord lengths of $M$ is positive, and if $\mathrm{Dom}~M \neq B(M)$, then the infimum of general pause interval lengths is also positive,
\item[(ii)] and $\overline{\mathrm{Dom}~M}$ is compact.
\end{enumerate}
\end{definition}
\section{Definition of Bach's chorales}
In order to mathematically define chorales, first we introduce \emph{playing functions}, which describe the performance of these pieces with non-constant velocity.
\begin{definition}
Let $M$ be an $n$-part piece for a certain $n \in \mathbb{N}^+$ and $\theta:~ \left[ 0, \infty \right[ \to \left[ 0, \infty \right[$ a continuous, strictly increasing function, for which $\left[ 0, \infty \right[$ can be divided into countably many disjoint intervals $(I_i)_{i \in \mathbb N}$ joining each other and altogether covering $\left[ 0, \infty \right[$, such that restricted to the interior of each interval $I_i$, $\theta$ is twice continuously differentiable, $\theta'$ nowhere vanishes and $\inf\limits_{n \in \mathbb N} \lambda(I_n)>0$. \\ Then $\theta$ is called a \emph{playing function}. The name of $M \circ \theta \vert_{B(M)}$ is the \emph{playing} of $M$ that belongs to $\theta$. For $t \in B(M)$, $\theta'(t)$ is called the \emph{playing velocity} and $\theta''(t)$ the \emph{playing acceleration} in the point $t$, if they exist. $\theta \equiv 1$ gives the \emph{naturally parameterized} $n$-part piece. The set of playing functions is denoted as $PL(\mathbb{R^+})$.
\end{definition}
It is easy to verify that on bounded intervals playing functions are absolutely continuous. The following definition is based on this.
\begin{definition}
If $\theta \in PL(\mathbb{R})$, $M$ is a homophonic four-part piece and $A$ is a Lebesgue-measurable subset of $B(M)$, then the \emph{length} of the part $A$ of piece $M$ by the playing function $\theta$ is $\mu_{\theta}(A)=\int\limits_{A} 1 \theta(\mathrm{d} x) = \int\limits_{A} \theta'(x) \mathrm{d}x$.
\end{definition}
The proof of the next proposition is left for the reader.
\begin{proposition}
$PL(\mathbb{R})$ is a group under the composition of playing functions.
\end{proposition}
Now we turn our attention to the mathematical definition of the genre of Bach's chorales. We emphasize that chorale is an actual musical genre from Baroque, and hence its characteristics are originally non-prescriptive. Therefore however precisely we define a chorale, our definition may only be correct for the majority of the pieces, with certain exceptions.
\begin{definition}
Let $M$ be a homophonic four-part piece with $x \in \mathrm{Dom}~M$ and $M(x)=H$. Then the \emph{area} of $M(x)$ is the connected component of $M^{-1}(H)$ containing $x$. \\
The \emph{halving} of $I=\left[ a, b \right[ \subseteq B(M)$ in the playing belonging to $\theta \in PL(\mathbb{R})$ is dividing $I$ into two disjoint intervals closed on the left and open on the right $I_1, I_2$ which together cover $I$ and $\mu_{\theta}(I_1)=\mu_{\theta}(I_2)$.
\end{definition}
Using this, our definition for four-part chorale is the following.
\begin{definition} \label{koralka}
A four-part piece $\mathfrak{K}$ is a \emph{four-part chorale} if there is a feasible, naturally parameterized, pauseless homophonic four-part piece $M$ such that $\exists c>0$: $\forall x \in \mathrm{Dom}(M)$ the length of the area of $x$ by the identic playing function is $c$, and \\
$(1)$ $\mathfrak{K}$ can be derived from $M$ with using the following steps, the so-called \emph{figurations}. They are used for a finite number of $x \in \mathrm{Dom}~M$ and the figurations excluding each other are not done at the same time. \\ Types of the figurations are: \begin{description}
\item[Chord duplication] Halve the area of $M(x)$ by the identic playing function (natural parametrization), and in the first half of the area keep $M(x)$ for $\mathfrak{K}(x)$, in the other half $\mathfrak{K}(x)$ is one constant triad or seventh chord different from $M(x)$.
\item[Suspension] Halve the area of $M(x)$ by natural parametrization, in the second half of the area keep $M(x)$, in the first half, in one or two voices change the appropriate tone of $M(x)$ one step higher and keep the remaining voices.
\item[Advancement] Halve the area of $M(x)$ by natural parametrizaton, in the first half of the area keep $M(x)$, in the second half, in exactly one voice write a step higher or lower tone, which is equal to the tone in the same voice of the next chord after $M(x)$.
\item[Accented passing tone] Suppose that there is a third skip in some voice(s) of $M$ at arriving at or departing from $M(x)$. Then halve the area of $M(x)$, and on the half which is closer to the interval of the neighbouring chord that is involved in the third skip, instead of $M(x)$, write a tone the degree of which is between these two tones' degree. Note that only one accented passing tone per one chord area of $M$ is accepted.
\end{description}
$(2)$ The given four-part piece $\mathfrak{K}$ is meant to be associated with a canonic playing function $\theta$ that differs from the identic playing in the following: $\exists m \in \mathbb{N}$ such that $\theta$ changes the length of every $m$th chord interval of $M$ to $k$ times greater than originally, where $k \in \left] 1, 2 \right[$ is a conventionally accepted factor. Then we say that there is a \emph{pause} on every $m$th metric unit.
\end{definition}
\section{Convergence area of a key. Functions and tonality} \label{tonikus}
Let $T$ be a key with degree I scale tone $X$ in an enharmonic equivalence class on the equal-tempered piano $K$. The \emph{convergence area} of $T$ is defined $CA(T)=\lbrace (G_i, L_i) \vert i=1,\ldots,N \rbrace$, where $N\in\mathbb{N}$ and $\forall i$, $G_i$ is a fixed triad or seventh chord name of $K$ in a certain inversion and $L_i$ is the list of the accepted four-part versions of $G_i$, according to the definitions of compliance with classical harmony from Section \ref{alja}. In simplified notation, we also call $G_i$ an element of the convergence area, and view $CA(T)$ as the set of chords belonging to $T$.
We explain the meaning of convergence area of $T$ as follows. Roughly speaking, a chord $X$ that complies with classical harmony and is deducable from the C major scale on $K$ by $\sharp$s and $\flat$s is considered to be an element of $CA(T)$ if:
\begin{enumerate}
\item it is built from the scale tones of $T$ and regularly used in homophonic four-part pieces associated to this key. Some inversions of some chords are excluded for their too strong dissonances, e.g. diminished triads may only stand in first inversion, moreover in the case of degree VII triads, the duplicated tone of the triad must be the third. Apart from $\mathrm{I}^6_4$ and $\mathrm{IV}^6_4$, triads in third inversions are not used. All such seventh chords, which are called \emph{diatonic seventh chords}, are used in all inversions, though some of them very rarely.
\item if $X \in CA(T)$ has a tone outside scale of $T$, then $X$ is called an \emph{altered chord} of $T$. Such $X$ is convergent if and only if it can lead to chords in $CA(T)$ built from scale tones, without violating any chord-changing compositional principles, in such a way that was usual in the practice of Viennese classical composers. A full list of convergent chords in minor and major key can be found in Section \ref{durmoll} of the Appendix.
\end{enumerate}
After introducing convergence areas, we define weak tonality.
\begin{definition}
Let $M$ be a homophonic four-part piece and $t$ an accumulation point of $\mathrm{Dom}~M$. We say that $M$ is weakly tonal in the point $t$ with key $T$ if there is a connected open neighbourhood $U$ of $t$ such that $\forall x \in (U \setminus \lbrace t \rbrace) \cap \mathrm{Dom}~M$, $M(x)$ is the element of $CA(T)$.
\end{definition}
Weak tonality may be sufficient in the case when there are no modulations among different keys, but classical harmony has stronger measures on key stability, especially for establishing new keys after modulations. This involves the notion of musical \emph{functions}: the \emph{tonic, dominant and subdominant}.
In the following, the $k$th degree triad or seventh chord of the key $T$ of a tone will mean the one built from scale tones of $T$. The \emph{leading tone/seventh tone of a key $T$} will refer to the leading tone/seventh tone of the key's Ist degree scale tone. \emph{The leading tone of a diminished triad or diminished seventh} is, by definition, its base.
\begin{definition}
Let $T$ be a key, $H \in CA(T)$ be a major or diminished chord, i.e. major triad, diminished triad, major (dominant) seventh or diminished seventh, and $G \in CA(T)$ a major or minor third. We say that $H$ \emph{resolves to} $G$ if \begin{enumerate} \item[(i)] $H$ contains the leading tone of (the base of) $G$, and \item[(ii)] if there is a tone $x$ belonging to $H$ that is not a scale tone in the major key built on the base of $G$, then $x$ is the upper leading tone of the fifth of $G$. \end{enumerate}
\end{definition}
\begin{definition}[Dominant function (D) and secondary dominant property.]
$X \in CA(T)$ has the \emph{dominant function} in the key $T$ if it resolves to the first degree triad of $T$. $Y \in CA(T)$ is a \emph{secondary dominant chord} if it resolves to any other major or minor chord built from the scale tones of $T$.
\end{definition}
\begin{definition}[Tonic function (T)]
$X \in CA(T)$ has the \emph{tonic function} in the key $T$ if
\begin{enumerate}[(i)]
\item $X$ contains a Ist and IIIrd degree tone of $T$, the first one from the scale $T$,
\item if $X$ contains the leading tone of $T$, then it is the seventh tone of $X$,
\item if $X$ is secondary dominant, then $X$ is a Ist degree major triad,
\item $X$ has no augmented and no diminished partial triad.
\end{enumerate}
\end{definition}
\begin{definition}[Subdominant function (S)]
$X \in CA(T)$ has the \emph{subdominant function} in the key $T$, if
\begin{enumerate}[(i)]
\item $X$ contains the IVth and VIth degree scale tone of $T$, possibly both altered,
\item if $X$ is secondary dominant (itself, not just up to enharmonic equivalence), then it resolves to the Vth degree triad. Moreover, the VIth degree tone of $X$ has neither more $\sharp$s nor more sharpening $\natural$s than the key signature of $T$.
\item The set of tones of $X$ and the one of the Ist degree seventh chord of $T$ have no other common tone than the Ist degree scale tone. Moreover, $X$ contains no altered Ist degree tone.
\end{enumerate}
\end{definition}
In Table \ref{lúfej}, we present the most typical tonic, dominant and subdominant chords built of scale tones. Here, if a root position triad participates in the table, then its first inversion has the same function. A seventh chord participating in the list has the same function as any of its inversions. Note that we require that the \emph{seventh degree diminished seventh chord}, is also the element of $CA(T)$ if $T$ is a major key, where it is an altered chord. The Minor Lemma (Lemma \ref{molllemma}) guarantees that this chord is built from scale tones in a minor or harmonic major key but not in a major key. The non-altered convergent chords which are not listed in the table have no certain function. E.g., this applies for the IIIrd degree triad, as it is considered to be pending between tonic and dominant function, and many diatonic seventh chords also do not have a certain function.
\begin{table}
\centering
\begin{small}
\caption{Chords belonging to the three functions in the three different kinds of keys.}\label{lúfej}
\begin{tabular}{|c|c|c|c|}
\hline
Type of $T$ & Tonic chords & Dominant chords & Subdominant chords \\ \hline
major & $\mathrm{I}$, $\mathrm{VI}$, $\mathrm{VI}^7,$ $\mathrm{I}^7$ & $\mathrm{V}$, $\mathrm{VII}^6$, $\mathrm{V}^7$ & $\mathrm{II}$, $\mathrm{IV}$, $\mathrm{II}^7$ \\ \hline
minor & $\mathrm{I}$, $\mathrm{VI}$, $\mathrm{VI}^7$ & $\mathrm{V}^{\sharp}$, $\mathrm{VII}^{6\sharp}$, $\mathrm{V}_{\sharp}^7$, \small{$\underset{\sharp}{\mathrm{VII}}^7$} & $\mathrm{II}$, $\mathrm{IV}$, $\mathrm{II}^7$ \\ \hline
harmonic major & $\mathrm{I}^{\sharp}$, $\mathrm{VI}^{5\sharp}$, $\mathrm{VI}_{5\sharp}^7$ & $\mathrm{V}^{\sharp}$, $\mathrm{VII}^{6\sharp}$, $\mathrm{V}_{\sharp}^7$, \small{$\underset{\sharp}{\mathrm{VII}}^7$} & $\mathrm{II}$, $\mathrm{IV}$, $\mathrm{II}^7$ \\ \hline
\end{tabular}
\end{small}
\end{table}
\label{funky}
The degree I triad is called the \emph{tonic main triad} of the key $T$, the degree IV one is the \emph{subdominant main triad} and the degree V one is the \emph{dominant main triad}. \emph{Authentic step} means two different things in classical harmony. On the one hand, modulation (key change) to the one fifth higher key (the \emph{dominant} key), without changing the type of key. Among triads this means a V$\to$I or I$\to$IV type chord progression. On the other hand, function change $D \to T$ in a certain key in general. Similarly, \emph{plagal step} means two things. On the one hand modulation to the one fifth lower ---the \emph{subdominant}--- key, and among triads making a I$\to$V or IV$\to$I step, on the other hand function change $T \to D$ in certain key.
A \emph{cadence} is a chord progression consisting of at least two chords that is considered to be appropriate for finishing a piece. In view of this, in certain cases, we will call the dominant$\to$tonic and tonic$\to$subdominant steps \emph{authentic cadences} and the tonic$\to$dominant and subdominant$\to$tonic steps \emph{plagal cadences}. In a given key, a \emph{complete authentic cadence} is a chord progression with $T \to S \to D \to T$ function sequence, while a \emph{complete plagal cadence} is a chord progression with $T \to D \to S \to T$. It is a well-known fact that complete authentic cadences are the most applicable for finishing a piece, for which there are many arguments, but it is hard to get a full explanation, cf. \cite[Section 5.11]{benson}. Most of the classical, romantic and also recent popular music is based on $D \to T$ resolutions, supported by complete authentic cadences using the function $S$.
In the following, we present local and global notions of strong, \emph{functional tonality}. We already have all notions that we need in order to define tonality in a given point. The idea of this definition is to assign a key $T$ to the point as a limit, requiring that all three functions of $T$ occur in the vicinity of the point.
\begin{definition}[Local tonality with a given key.] \label{tonal}
Let $M$ be a (not by all means homophonic) four-part piece and $t$ an accumulation point of $\mathrm{Dom}~M$. $M$ is \emph{tonal} in the point $t$ \emph{with key} $T$ if there is a connected open neighbourhood $U$ of $t$ such that $V=(U \setminus \lbrace t \rbrace) \cap \mathrm{Dom}~M$, $M(x)$ satisfies the following conditions:
\begin{enumerate}
\item[(i)] $M$ is weakly tonal with key $T$ in every point of $V$,
\item[(ii)] $M[V]=\lbrace M(x) \vert~x \in V \rbrace$ contains at least one chord from \emph{all functions} of $T$,
\item[(iii)] if $t \notin \mathrm{Int~Dom}~M$, then only triad-valued points of $\mathrm{Dom}~M$ accumulate to $t$.
\end{enumerate}
\end{definition}
In order to define tonality of an entire piece, we need to provide our first axiom of classical harmony, in particular about \emph{modulations}.
\begin{definition}[Modulation] \label{modka}
If there are keys $T_1$ and $T_2$ for the homophonic four-part piece $M$ such that $\mathrm{Dom}~M$ has a subset $Z=\left[ a, b \right[$, for which $M |_Z$ is feasible, and $\exists r_1>0, r_2>0$ such that on the whole set $(B_{r_1}(a) \cap \mathrm{Dom}~M) \setminus Z$, $M$ is weakly tonal with key $T_1$ and on the whole set $(B_{r_2}(b) \cap \mathrm{Dom}~M) \setminus Z$, $M$ is tonal with key $T_2$, then $\forall W \subseteq Z$ we say that $W$ belongs to a $T_1 \to T_2$ \emph{modulation}. We also say that $M$ \emph{modulates} on $Z$ from $T_1$ to $T_2$.
\end{definition}
Thus, we demand that modulations themselves be \emph{feasible} and \emph{pauseless}: they need to last until a finite time, without general pauses and packing points.
\begin{definition}[First modulational axiom] \label{modi}
Let $M$ be a homophonic four-part piece. If $M$ complies with classical harmony and contains a $T_1 \to T_2$ modulation, then $\exists~\left[ a, b \right[ \subseteq \mathrm{Dom}~M$ such that $M(a)$ is the degree I triad of $T_1$(built from scale tones), $M(b-)=\lim_{x \downarrow b}$ is the degree I triad of $T_2$ (also consisting of scale tones), and $M$ is weakly tonal in $a$ with key $T_1$, $M$ is tonal (in the sense of Definition~\ref{tonal}) in $b$ with key $T_2$, and $\left[ a, b \right[$ is the largest interval which belongs to this $T_1 \to T_2$ modulation.
\end{definition}
Note that while the modulation can only be finished in a correct way if tonality in the new key is provided, at the beginning of the modulation only weak tonality in the old key is required. Indeed, there are well-known examples of homophonic four-part pieces consisting of one schematic modulation that do not satisfy Definition~\ref{tonal} in their starting point with the starting key.
\begin{definition}[Local tonality via modulation.] \label{tonalmod}
Let $M$ be a homophonic four-part piece, $t$ an accumulation point of $\mathrm{Dom}~M$ and $T_1 \neq T_2$ two keys. $M$ is \emph{tonal} in $t$ and \emph{modulates from} $T_1$ to $T_2$ if there is a connected open neighbourhood $U$ of $t$ such that $\exists \left[a, b \right[=V \supseteq U$, where $V$ belongs to a modulation (see Definition \ref{modka}), which complies with classical harmony apart from the chord-changing points.
\end{definition}
According to this, we define tonality as a global property of a piece as follows.
\begin{definition}[Tonal piece]
Let $M$ be a homophonic four-part piece, $A \subseteq \overline{\mathrm{Dom}~M}$. $M$ is tonal on $A$ if $\forall x \in \overline{A}$, $M$ is tonal in $x$ by Definition \ref{tonal} or \ref{tonalmod}.
\end{definition}
\section{Axioms and the fundamental theorem of tonality} \label{fő}
The most well-known classical compositional principles are the \emph{chord-changing or voice-leading rules}. The goal of the homophonic four-part model is to describe the kind of chord progression and voice-leading between chords that classical harmony accepts. Often the formal rules of classical harmony do not tell how to write pieces but what to avoid: it forbids some kinds of chord progressions (e.g. V$\to$IV steps in some cases) and some kinds of voice-leading (e.g. parallel octaves or augmented second steps). Virtually, this property of the axiom system gives the freedom to actually write \emph{pieces of art} and not just ‘correct examples' complying with classical harmony. What one \emph{should} write follows from the practice of Viennese classicist authors, cf. \cite[Section 5.11]{benson}.
Now, we present our axiom system, which consists of:
\begin{itemize}
\item the rules of compliance of triads and seventh chords with classical harmony,
\item the definition of correctable piece (global level of the pieces),
\item the compositional principles for modulations (semi-global level, describing global properties of a modulational segment of a piece). For these, see Section \ref{modulations} of the Appendix or \cite[p.~36]{statusquo},
\item the compositional principles for chord-changes (local level).
\end{itemize}
Our main result, the \emph{fundamental theorem of tonality} helps us embed the chord-changing rules in a mathematical axiom system for classical harmony. Its equivalent condition for tonality gives a general framework according to which pieces can comply with classical harmony \emph{apart from the chord changes}.
\begin{definition}[Correctable piece] \label{korrekt}
Let $M$ be a homophonic four-part piece, $a \in \mathrm{Dom}~M,~ b \in \overline{\mathrm{Dom}~M} \cup \lbrace \infty \rbrace, ~N =\left[ a, b \right[ \subseteq B(M)$. $M \vert_N$ is called a \emph{correctable piece} if all the following conditions are satisfied
\begin{enumerate}
\item[(i)] $M \vert_N$ has a positive infimum of chord lengths,
\item[(ii)] $N$ is the disjoint union of a finite even number $2n+2~(n \geq 0)$ of left-closed, right-open intervals $(I_0, I_1, \ldots, I_{2n+1})$ such that $\forall i \in \lbrace 1, 2, \ldots 2n+1 \},~ I_i \cap \mathrm{Dom}~M \neq \emptyset$, and either $I_0=\emptyset$ or also $I_0 \cap \mathrm{Dom}~M \neq \emptyset$. Further, $\forall 0 \leq k \leq n$ such that $I_{2k} \neq \emptyset$, in the whole interval $I_{2k}$, $M$ modulates complying with classical harmony apart from the chord-changing points, and $\forall 0 \leq k \leq n$ for $I_{2k+1} \cap \mathrm{Dom}~M$ there is a unique key $T_k$ such that $\forall G \in M[I_{2k+1}]= \lbrace H \in K^4 \vert \exists x \in I_{2k+1}: M(x)=H \rbrace$, $G \in CA(T_k)$, and $M[I_{2k+1}]$ contains at least one tonic, one dominant and one subdominant chord of $T_k$,
\item[(iii)] If $x \in N \cap \partial M$, then only triad-valued points accumulate to $x$.
\end{enumerate}
Moreover, if $t \in \mathrm{Int~Dom}~M \cap N$, then if there is no chord change in $t$ forbidden by the axioms regarding chord changes (see below), then we say that $M$ \emph{complies with classical harmony} in $t$ and $N$ is a \emph{classical neighbourhood} of $t$. \\
Finally, if $t_0 \in M$ is such that $t_0 \in \mathrm{Dom}~M$ but $\exists \varepsilon>0$ such that $t_0$ is the starting point of $M|_{N \cap [t_0-\varepsilon,\infty)}$, then according to the definition of strict four-part setting, $\lim\limits_{t \to t_0+0} M(t)=M(t_0)$. Then, if $\exists r>0$ such that $\forall x \in \left] t_0, t_0+r \right[: x \in N$ and $M$ complies with classical harmony in $x$, then we say that $M$ \emph{complies with classical harmony} in $t_0$. If $t_0 \neq a$, then we call $N$ a \emph{classical neighbourhood} of $t_0$.
\end{definition}
\begin{definition}
If a homophonic four-part piece $M$ complies with classical harmony in $x$, $\forall x \in \mathrm{Dom}~M$, then we say that $M$ \emph{complies with classical harmony}.
\end{definition}
The intuition behind these two definitions is the following. A piece that complies with classical harmony shall consist of segments that exhibit functional tonality with a given key, and sufficiently regular modulations leading from one such segment to the next one. The piece shall end with tonality in some key: the \emph{final key} of the piece, which often gives the name of the piece in Classicist music (e.g. String quartet in D major etc.)
\begin{theorem}[The fundamental theorem of tonality] \label{vécsey}
Let $M$ be a homophonic four-part piece that is pauseless ($\mathrm{Dom}~M=B(M)$) and feasible. Then $M$ is tonal (on $\overline{\mathrm{Dom}~M}$) if and only if it is correctable, i.e. if it complies with classical harmony (on $\mathrm{Dom}~M$) apart from its chord-changing points.
\end{theorem}
\begin{proof}
The fact that the condition of the theorem is sufficient for the tonality is almost clear from Definitions \ref{tonal} and \ref{korrekt}, therefore we omit this part of the proof.
We show that the condition is necessary for the tonality. Let $M$ be tonal, feasible and pauseless. For all $t \in \overline{\mathrm{Dom}~M}$, let $U_t$ be an open neighbourhood of $t$ that shows its tonality. If possible, let us choose $U_t$ such that it shows key and not modulation. Since $\mathrm{Dom}~M$ is a bounded subset of $\mathbb{R}$, it can be assumed that $\forall t \in \overline{\mathrm{Dom}~M}$, $U_t$ is a bounded open interval. Then, $\bigcup_{t \in \overline{\mathrm{Dom}~M}} U_t$ is an open cover of $\overline{\mathrm{Dom}~M}$. Since $\overline{\mathrm{Dom}~M}$ is compact, it has an open subcover, which we denote by $\bigcup_{i=1}^n U_i$. Without loss of generality, we can assume that $U_i= \left] a_i, b_i \right[$, where $a_i<a_j \Leftrightarrow i<j$ and $b_i < b_j \Leftrightarrow i<j$, moreover that $\inf~U_1$ is the starting point of $M$ and $\sup~U_n$ is the endpoint of $M$. The tonality of $M$ guarantees that there are two cases. The first one is that in $\inf~U_1$, $M$ is tonal with some key $T_1$. In this case, let us start a sequential process with $V=U_1$, $\mathfrak I=\emptyset$, $j=1$ and $T=T_1$ in order to divide $B(M)$ into an interval system that shows that $M$ is correctable.
1. If $\forall k>j$ we have that in $U_k$, $M$ is tonal with same key as in $V$ (or $\sup V$ is the endpoint of $M$), then let us append $(V \cup \bigcup_{k>j} U_k)\cap \mathrm{Dom}~M$ to $\mathfrak I$, as the next interval for showing correctability. In this interval $M$ is tonal with key $T$. Also, only triad-valued points of $\mathrm{Dom}~M$ accumulate to the only two boundary points that $\mathrm{Dom}~M$ has, which are the starting point and the endpoint.
2. Else, $\exists k>j$ such that in $U_k$ there is a key $T'$, since Definition \ref{tonal} implies that in $\sup\mathrm{Dom}~M$ there has to be tonality with a key. Then let us define $s= \sup \lbrace x \in \left] \inf V, \sup U_k \right[ \vert~M\text{ is tonal in x with key }T\rbrace$ and $i=\inf \lbrace x \in \left] \inf V, \sup U_k \right[ \vert~M\text{ is tonal in x with key }T'\rbrace$. Note that $s$ and $i$ are finite, and the tonality of $M$ implies weak tonality with key $T$ in $s$ and weak tonality with key $T'$ in $i$, therefore $s \leq i$. By Definition \ref{modi}, $s<i$ follows. Then, on the entire interval $\left[ s, i \right[$, $M$ modulates from $T$ to $T'$ complying with classical harmony apart from the chord-changing points. Let us append $\left[ \mathrm \inf~ V, s \right[$ (as an interval with key $T$) and $\left[ s, \sup~ U_k \right[$ (as an interval of a $T \to T'$ modulation) to the set $\mathfrak I$ of the intervals showing the correctability of $M$. Let us put $T=T'$, $j=k$ and $V=\left[ i, \sup U_k \right[$ and return to the starting alternative of the sequential process.
Each time the process restarts, the endpoint of the current $V$ is the endpoint of $U_k$ for a $k$ larger by at least 1 than the one in the previous turn. This ensures that the process is finite, further the number of turns is not more than $n$: when $\sup V=\sup \mathrm{Dom}~M$ holds, the process is finished. The intervals given by the process show that $M$ is correctable: the intervals with an odd index are intervals where $M$ has a key and the ones with an even index contain modulation from the previous interval's key to the following one's. Thus, each point of $\mathrm{Dom}~M \setminus A(M)$ has a classical neighbourhood containing $B(M)$.
The second case is that there is no tonality with any key in $\inf~U_1$. In this case, by Definition \ref{korrekt}, the whole interval $U_1$ belongs to a modulation from some key $T_1$ to another one $T_2$. Then let $s:=\inf \{ x \in U_1 | ~M\text{ is tonal on $[x,U_1[$ with key $T_2$} \}$ and $I_0:=[\inf~U_1,s[$. Now, $M$ restricted to $\mathrm{Dom}~M \setminus I_0$ is such that it is tonal in its starting point with a key, and thus the intervals $I_1,\ldots,I_{2n+1}$ can be constructed similarly to the case when $M$ is tonal in its starting point. This finishes the proof. \end{proof}
We note that each condition of the theorem is necessary, i.e. for each one of them, one can find a tonal piece $M$ that violates it and therefore is not correctable.
\begin{itemize}
\item A tonal piece may have a packing point.
\item A tonal piece $M$ with $B(M)=\mathbb{R}_0^+$ may have no packing point but the infimum of the lengths of chord intervals can be still zero (in this case the sum of the chord lengths must be infinite).
\item A tonal piece $M$ with $B(M)=\mathrm{R}_0^+$ and positive infimum of chord lengths can contain infinitely many modulation intervals. In this case it can occur that $\forall t \in \left] \inf \mathrm{Dom}~M,\infty \right[$, we have that $M \vert_{\left[ 0, t \right[}$ is correctable but $M$ itself is not correctable. In this case, a \emph{final key} of $M$ cannot be defined. The final key is a main characteristic of finite feasible tonal pieces, in particular, in classicist music the names of the pieces of music often contain the final key of the piece (e.g. symphony in G major etc.).
\item A tonal, feasible piece is not by all means correctable if it is not pauseless. Indeed, take a piece that complies with classical harmony and contains no modulations, write some positive amount of it, and then continue with the same piece in a different key. The two connected components of the resulting piece with classical harmony by themselves comply , but the entire piece is not correctable because the modulation between the two keys is missing. For an example for such a piece, see Figure \ref{rosszpelda0}.
\end{itemize}
\begin{figure}
\caption{A non-pauseless tonal piece that is not correctable due to lack of modulation.}
\label{rosszpelda0}
\end{figure}
Knowing the fundamental theorem of tonality, we can embed the basic chord-changing compositional principles of classical harmony (see e.g. \cite[p.~30--183.]{kesztler} or \cite[Section 5.11]{benson}) in our mathematical axiom system. We present a scheme and an example how these compositional principles can be stated, knowing Theorem \ref{vécsey}.
\begin{definition}[Scheme of chord-changing rules] \label{séma}
Let $M$ be a tonal, feasible, pauseless homophonic four-part piece and $t$ a chord-changing point of $M$. If $M$ complies with classical harmony in $t$, then [\emph{conditions on the chord change in $t$}].
\end{definition}
\begin{definition}[Prohibition of parallel octaves.]
Let $M$ be a feasible tonal four-part piece and $t$ a chord changing point of $M$ with $M(t_-)=A$, $M(t)=B$. If the interval of two voices of $A$ is an integer number of octaves, then the interval of the corresponding voices of $B$ is not the same interval.
\end{definition}
This scheme guarantees that new chord-changing compositional principles can be added to the axiom system of classical harmony as long as the compositional principles do not contradict each other. The general historic experience is that the set of four-part rules of classical harmony is \emph{consistent}, equivalently that the axiom system actually has a model. In the full Hungarian version of the paper, we precisely described the classical chord-changing principles and the modulational rules. As for a model, we provided examples that satisfy various combinations of our axioms, including direct modulations consisting of 7 chords between any two major keys. By structure of our axiom system, in the case of a correctable piece with finitely many chords, compliance with classical harmony can be equivocally decided knowing merely the \emph{chord sequence} of the piece, and thus we also see that for these pieces, our axiom system is also \emph{complete}. We note that one of the chord-changing rules, the \emph{principle of least motion}, is hard but possible to formalize mathematically precisely in full generality. There exist interesting mathematical results about this principle in the literature, see e.g. \cite[p.~4--6.]{tymoczko}.
The logical ordering of musical notions and the mathematically simpler results in this paper can now be used for writing a new classical harmony coursebook. We also plan to do experiments on the possibilities and barriers of composing four-part chorales by Markov models, revising the results of \cite{koralocska}.
\appendix
\section{Modulations} \label{modulations}
In this last extra section, we sketch the classical compositional principles for modulations with seven chords. Here we do not enumerate all exact details of the technically rather complicated compositional principles themselves, neither the \emph{altered chords}, the elements of the convergence areas of the keys not detailed so far. Our whole model for modulations that can be found in a full Hungarian version (not mentioned here for anonymity), in which all the altered chords of the keys and precise formulations of the modulational axioms takes place, is mathematically complete ---but still far from universal, as it only describes modulations consisting of seven chords.
The seven chords of the modulation do not have to ensure that there is tonality with key $T_1$ in the starting point of the modulations, but the new key $T_2$ has to be established by a complete authentic cadence, according to the compositional principles.
Definition \ref{modka} for modulations guarantees that in the context of modulations it is enough to consider feasible and pauseless homophonic four-part pieces. Pauselessness ensures that chords that cannot follow each other by actual chord-changing also will not occur directly after each other, separated by a pause. In general, pauses can weaken the impact of irregular chord progression and there are some examples in music history when composers use this. But in the case of modulations, in Viennese classicism, the basic aim is to make the key change as smooth as possible and to find some connection between the beginning key and the target key, therefore such trickery is not advised.
In the following, we establish the notions that are necessary to state the remaining compositional principles for modulations. First, we define pauseless extensions of general homophonic four-part pieces, in order to obtain a completely pauseless paradigm for the modulations that incorporates non-pauseless pieces as well. Using these, we define chord sequences, which provides a simpler interpretation for feasible pieces than the one described in Section 4 of our paper. In the same time, note that the chorales cannot be defined mathematically precisely using only chord sequences, therefore also the continuous time construction from our Section 4 is useful.
Let $M$
be a homophonic four-part piece that has no packing point apart from its endpoint, then we have a (possibly finite) sequence of disjoint consecutive intervals $(I_i)$ of which $B(M)$ consists, for all of which either $I_i \subseteq \mathrm{Dom}~M$ and $M$ has the constant value of a chord with $\inf I_i$ and $\sup I_i$ being either chord-changing points or boundary points of $\mathrm{Dom}~M$, or $I_i$ is a maximal general pause interval in the sense that $\inf I_i$ and $\sup I_i$ are boundary points of $\mathrm{Dom}~M$. In this case, there is a simple way to construct a pauseless extension $\overline{M}$ of $M$, given by an extension from $\mathrm{Dom}~M$ to $B(M)$, this is called the \emph{right-invariant pauseless extension} of $M$: \[ \overline{M}(t)= \begin{cases} M(t), ~ \text{if}~ t \in \mathrm{Dom}~M, \\ M[I_i]=M(\sup \lbrace u \in \mathrm{Dom}~M \vert u<t \rbrace), ~\text{if} ~t \in I_{i+1},I_{i+1} \cap \mathrm{Dom}~M=\emptyset. \end{cases} \] \\ In this case, $(\overline{M}(t_i), t_i \in A(\overline{M}))$ is called the \emph{chord sequence} of $M$, here $t_i$'s follow each other in their order in $\mathbb{R}_0^+$. We omit the proof of the following proposition, which shows the role of the playing function group $PL(\mathbb{R})$ in the topology of four-part pieces.
\begin{proposition}
Let $M_1$ and $M_2$ be homophonic four-part pieces such that the values of the chord sequences of $M_1$ and $M_2$ are the same. Then $\exists \theta \in PL(\mathbb{R})$: $M_2=M_1 \circ \theta$.
\end{proposition}
When describing modulations, we will not make any difference between feasible homophonic four-part pieces which have the same chord sequence. The expressions ‘‘a chord sequence is tonal/is correctable/complies with classical harmony'' will be used in the sense that every homophonic four-part piece with the given chord sequence has this property.
Firstly, we have to establish a connection between modulational axioms and the definition of the correctable piece. We call modulations which satisfy not only Definition \ref{modka} but also the first modulational axiom (Compositional principle \ref{modi}) \emph{basic modulations}. For a $T_1 \to T_2$ modulation that complies with classical harmony apart from chord changes, the next necessary condition that we require is tonality in the starting point of the first degree triad of $T_1$ that opens the modulation, with key $T_1$, and to also tonality in the endpoint of the first degree triad of $T_2$ that opens the modulation (the existence of these chords is guaranteed by the first modulational axiom).
Now we can turn to the basic idea of modulations complying with classical harmony: the chord sequence of the ---pauseless, feasible--- $T_1 \to T_2$ modulation section has to be able to be divided into three disjoint segments (left-closed, right-open intervals) that cover the whole chord sequence \cite[p.~36]{statusquo}:
\begin{description}
\item[Neutral phase \emph{(N)}] In this segment, which is opened by the Ist degree triad, the key of the piece is still $T_1$ (i.e., each element of $N$ is a member of $CA(T_1)$), but there are no secondary dominant chords. In the whole modulation after the first chord there is neither in $T_1$ nor in $T_2$ any root position Ist degree triad until the tonic main triad of $T_2$ occurs and closes the modulation.
\item[Fundamental step \emph{(F)}] If $T_1$ and $T_2$ are of the same type and they are neighbours in the circle of fifths, this whole segment may be empty. Otherwise here dominant chords of different keys follow each other. Only the last can be a (major/diminished/in the case of minor $T_1$ and minor $T_2$ augmented) triad, the ones before have to be seventh chord inversions. These seventh chords have to follow each other by \emph{elision}\footnote{ We generally use the word ‘‘elision'' for chord progression of inversions of different seventh chords, without the first seventh chord resolving to its tonic. The lack of resolution is expressed by ‘‘elision'', a word of Greek origin for ‘‘omission''. Chord progression using elision always has to use \emph{chromatics} in order to make it possible to comply with classical harmony. Chromatics means the sequence of at least two \emph{semitone steps} after each other in one voice. This semitone sequence has to be such that for each segment of it where the consecutive semitone steps are taken in the same direction, there exists a key in the scale of which, exactly every other step changes degree (i.e, every other step is a minor second and the remaining steps are augmented primes).}. The last chord of $F$ may be a triad and the previous chord may resolve to it.
\item[Cadence \emph{(C)}] The modulation has to be finished by a complete authentic cadence in the new key $T_2$, this shows and stabilizes the tonality in the new key. It may occur that we do not write a cadence in each key, but a modulation progress is only finished when we reach a cadence in some key. It is sure that the last chord before the closing degree I triad is the degree V triad or degree V dominant seventh chord of $T_2$ in the segment $C$. The tonic and subdominant chords of $T_2$ that preceed this chord belong already to $C$ (and not $F$) if and only if they are built up from scale tones in $T_2$, otherwise they belong to $F$.
\end{description}
If a chord sequence of a basic modulation has all the properties that we have introduced in this section, and it is the member of one of the following three modulation types, then we say that it complies with classical harmony apart from its chord-changing points. If its chord-changings are also correct, we say that the modulation complies with classical harmony. The three possible modulation types are:
\begin{description}
\item[Diatonic] For the last chord $H$ of $N$ we have $H \in CA(T_2)$, and every chord after this is convergent in $T_2$. This time $F$ usually consists of at most one chord. This is the smoothest possible key change type, but it is often not possible between keys further away from each other.
\item[Enharmonic] The last chord of $N$ or the first chord of $F$ is an element of $T_1$ that is enharmonic with some element of $CA(T_2)$. The most common enharmonic modulation types use the enharmonic equivalence of diminished seventh chords or augmented triads in different keys. After this chord occurs, we consider it as an element of $CA(T_2)$, and make a chord progression in $T_2$ ending with an authentic cadence.
\item[Chromatic] There is elision in the modulation chord sequence. Very far away leading modulations, such as C major $\to$ F$\sharp$ major can be most conveniently feased this way. In most of the cromatic modulations $\sharp F \geq 2$ holds.
\end{description}
These three categories do not exclude each other pairwise, while it is difficult to accomplish a modulation that is both diatonic and enharmonic at the same time. In the music score collection of the thesis we show examples of both enharmonic and chromatic and both diatonic and chromatic modulations.
Modulational compositional principles finish our axiomatization work.
\section{Convergent chords in major and minor keys} \label{durmoll}
In a major key $T$:
\hspace{-14pt}
\begin{tiny}
\begin{tabular}{|ll|c|c|c|c|c|}
\hline
Notation & Type & Convergent inversions & Function & Typical following chords \\ \hline
1. Diatonic & triads: & & & \\ \hline
$\mathrm{I}$ & major triad & all & T & almost all elements of $CA(T)$ \\ \hline
$\mathrm{II}$ & minor triad & root, first & S & $\mathrm{V}^{(7)}$, $\mathrm{II}_{\sharp}^7$ \\ \hline
$\mathrm{III}$ & minor triad & root, first & - & $\mathrm{IV},~\mathrm{VI},~\mathrm{III}_{\sharp}^7$ \\ \hline
$\mathrm{IV}$ & major triad & all & S & $\mathrm{V}^{(7)},~ \mathrm{II},~\underset{\sharp}{\mathrm{IV}^{7\flat}},~\mathrm{I}$ \\ \hline
$\mathrm{V}$ & major triad & root, first & D & $\mathrm{I},~\mathrm{VI},~\underset{\sharp}{\mathrm{III}_5^6},~\underset{\sharp}{\mathrm{V}^7}$ \\ \hline
$\mathrm{VI}$ & minor triad & root, first & T & $\mathrm{II}$, $\mathrm{IV}$, $\mathrm{VI}_{\sharp}^7$ \\ \hline
$\mathrm{VII}$ & diminished triad & first (with the third dupl.) & D & $\mathrm{I}^{(6)}$, $\mathrm{VII}^{7\flat}$ \\ \hline
2. Diatonic & sevenths: & & & \\ \hline
$\mathrm{I}^7$ & major minor seventh & all & T & $\mathrm{IV}$ \\ \hline
$\mathrm{II}^7$ & minor major seventh & all & S & $\mathrm{V}$ \\ \hline
$\mathrm{III}^7$ & minor major seventh & all & - & $\mathrm{VI}$ \\ \hline
$\mathrm{IV}^7$ & major minor seventh & all & - & $\mathrm{VII}^6$, $\mathrm{V}$ \\ \hline
$\mathrm{V}^7$ & dominant seventh & all & D & $\mathrm{I}$, $\underset{\flat}{\mathrm{I}^2}$ \\ \hline
$\mathrm{VI}^7$ & minor major seventh & all & T & $\mathrm{II}$, $\mathrm{VI}_{\sharp}^7$ \\ \hline
$\mathrm{VII}^7$ & semi-dim. seventh & all & -& $\mathrm{I}$, $\mathrm{III}$ \\ \hline
3. Diminished & sevenths (altered): & & & \\ \hline
$\underset{\sharp}{\mathrm{I}^{7\flat}}$ & dim. seventh & all & - & $\mathrm{II}$, $\mathrm{II}_2^{4(\sharp)}$ \\ \hline
$\underset{\sharp}{\mathrm{II}_{\sharp}^7}$ & dim. seventh & all & - & $\mathrm{III}$, $\mathrm{III}_2^{4(\sharp)}$ \\ \hline
$\underset{\sharp}{\mathrm{IV}^{7\flat}}$ & dim. seventh & all & S & $\mathrm{V}$, $\mathrm{V}^2$ \\ \hline
$\underset{\sharp}{\mathrm{V}^7}$ & dim. seventh & all & - & $\mathrm{VI}$, $\mathrm{II}_2^{4(\sharp)}$ \\ \hline
$\mathrm{VII}^{7\flat}$ & dim. seventh & all & D & $\mathrm{I}$, $\mathrm{I}^{2(\flat)}$\\ \hline
4. Secondary & dominant sevenths and triads: & & & \\ \hline
$\mathrm{I}^{7\flat}$& dominant seventh & all & T & $\mathrm{IV}$, $\underset{\flat}{\mathrm{IV}^2}$ \\ \hline
$\mathrm{II}^\sharp$& major triad & all & S & $\mathrm{V}$, $\mathrm{V}^2$ \\ \hline
$\mathrm{II}_{\sharp}^7$& dominant seventh & all & S & $\mathrm{V}$, $\mathrm{V}^2$ \\ \hline
$\mathrm{III}_{\sharp}^7$ & dominant seventh & all & - & $\mathrm{VI}$, $\mathrm{VI}_2^{4(\sharp)}$ \\ \hline
$\mathrm{IV}^{7\flat}$ & dominant seventh & third & - & $\mathrm{VII}^6$ \\ \hline
$\mathrm{VI}_{\sharp}^7$ & dominant seventh & all & - & $\mathrm{II}$, $\mathrm{II}_{2}^{4(\sharp)}$ \\ \hline
$\mathrm{VII} \tiny{\begin{smallmatrix} 7 \\ 5\sharp \\ \sharp \end{smallmatrix}}$ & dominant seventh & all & - & $\mathrm{III}$, $\mathrm{III}_{2}^{4(\sharp)}$ \\ \hline
5. Augmented & sixth chords: & & & \\ \hline
$\underset{\flat}{\mathrm{VI}}^{6\sharp}$ & ($\sim$dominant seventh) & first (with the third dupl.) & S & $\mathrm{V}$ \\ \hline
$\underset{\flat}{\mathrm{VI}_{5\flat}^{6\sharp}}$ & ($\sim$dominant seventh) & first & S & $\mathrm{V}$ \\ \hline
6. Minor & subdominants \& tonics: & & & \\ \hline
$\underset{\flat}{\mathrm{II} \tiny{\begin{smallmatrix} 6\sharp \\ 4 \\ 3 \end{smallmatrix}}}$ & irregular & second & S & $\mathrm{V}$ \\ \hline
$\mathrm{II}^{5\flat}$ & diminished triad & first & S & $\mathrm{V}^{(7)}$ \\ \hline
$\mathrm{II}_{5\flat}^7$ & semi-dim. seventh & all & S & $\mathrm{V}^{(7)}$ \\ \hline
$\underset{\flat}{\mathrm{II}}^{6\flat}$ & major triad$\ast$ & first (with the third dupl.) & S & $\mathrm{V}$ \\ \hline
$\mathrm{VI}^{\flat}$ & minor triad & root, first & S & $\mathrm{V}^{(7)}$ \\ \hline
$\underset{\flat}{\mathrm{VI}^{5\flat}}$ & major triad$\ast \ast$ & root & - & $\mathrm{V},~\mathrm{II},~\mathrm{IV}$ \\ \hline
\end{tabular}
\end{tiny}
In total, we have 97 convergent chord inversions. \\
$\ast$ This is the Neapolitan sixth of the minor key with the same first degree as $T$, see below. \\
$\ast \ast$ This is not a subdominant chord, but it comes also from the convergence area of the minor key with the same first degree as $T$. \\
Note that the augmented sixth chords are formally neither triads nor seventh chords complying with classical harmony according to our definitions, but they are enharmonic to (possibly fifth-deficient) dominant seventh chords. The same applies in minor. Further, the chord $\underset{\flat}{\mathrm{II} \tiny{\begin{smallmatrix} 6\sharp \\ 4 \\ 3 \end{smallmatrix}}}$ does not comply with classical harmony; the same holds for its analogue $\mathrm{II} \tiny{\begin{smallmatrix} 6\sharp \\ 4 \\ 3 \end{smallmatrix}}$ in minor as well as for the dominant nona chord. However, they are widely used in practice, and the axiom system presented in this paper can easily be extended in such a way that these chords also comply with it.
In a minor key $T$:
\hspace{-14pt}
\begin{tiny}
\begin{tabular}{|ll|c|c|c|c|c|}
\hline
Notation & Type & Convergent inversions & Function & Typical following chords \\ \hline
1. Diatonic & triads: & & & \\ \hline
$\mathrm{I}$ & minor triad & all & T & almost all elements of $CA(T)$ \\ \hline
$\mathrm{II}$ & diminished triad & first & S & $\mathrm{V}_{\sharp}^{(7)}$, $\mathrm{II} \tiny{\begin{smallmatrix} 7 \\ 5\sharp \\ \sharp \end{smallmatrix}}$ \\ \hline
$\mathrm{III}^{5\sharp}$ & augmented triad & root, first & - & $\mathrm{IV},~\mathrm{VI}, \mathrm{I}$ \\ \hline
$\mathrm{IV}$ & minor triad & all & S & $\mathrm{V}_{\sharp}^{(7)},~ \mathrm{II},~\underset{\sharp}{\mathrm{IV}_{\sharp}^7},~\mathrm{I}$ \\ \hline
$\mathrm{V}^{\sharp}$ & major triad & root, first & D & $\mathrm{I},~\mathrm{VI}$ \\ \hline
$\mathrm{VI}$ & major triad & root, first & - & $\mathrm{II}$, $\mathrm{IV}$, $\mathrm{V}^{\sharp}$ \\ \hline
$\underset{\sharp}{\mathrm{VII}}$ & diminished triad & first (with the third dupl.) & D & $\mathrm{I}^{(6)}$, $\underset{\sharp}{\mathrm{VII}^{7}}$ \\ \hline
2. Diatonic & sevenths: & & & \\ \hline
$\mathrm{I}^{7\sharp}$ & minor augmented seventh B & all & - & $\mathrm{IV}$ \\ \hline
$\mathrm{II}^7$ & semi-diminished seventh & all & S & $\mathrm{V}_{\sharp}^{(7)}$ \\ \hline
$\mathrm{III}^7$ & augmented major seventh & all &-& $\mathrm{VI},~\mathrm{V}_{\sharp}^7,~\mathrm{I}$ \\ \hline
$\mathrm{IV}^7$ & minor major seventh & all & - & $\mathrm{VII}^6$, $\mathrm{V}_{\sharp}$ \\ \hline
$\mathrm{V}_{\sharp}^7$ & dominant seventh & all & D & $\mathrm{I}$, $\underset{\natural}{\mathrm{I}_2^{4\sharp}}$ \\ \hline
$\mathrm{VI}^7$ & major minor seventh & all & T & $\mathrm{II}$ \\ \hline
3. Diminished & sevenths (altered): & & & \\ \hline
$\underset{\sharp}{\mathrm{VII}^7}$ & diminished seventh & all & D & $\mathrm{I}$, $\mathrm{I}_2^{4\sharp}$ \\ \hline
$\underset{\sharp}{\mathrm{III}^{7\flat}}$ & diminished seventh & all & - & $\mathrm{IV}$, $\mathrm{IV}_2^{4(\sharp)}$ \\ \hline
$\underset{\sharp}{\mathrm{IV}_{\sharp}^{7}}$ & diminished seventh & all & S & $\mathrm{V}^{\sharp}$, $\mathrm{V}_2^{4\sharp}$ \\ \hline
4. Secondary & dominant triads and sevenths: & & & \\ \hline
$\mathrm{I}_{\sharp}^{7}$& dominant seventh & all & T & $\mathrm{IV}$, $\mathrm{IV}^2$ \\ \hline
$\mathrm{II}_{\sharp}^7$& dominant seventh & all & S & $\mathrm{V}^{\sharp}$, $\mathrm{V}_2^{4\sharp}$ \\ \hline
$\mathrm{III}^{\sharp}$ & major triad & root & - & $\mathrm VI$ \\ \hline
$\underset{\natural}{\mathrm{VII}}$ & major triad & root & - & $\mathrm{III}^7$ \\ \hline
$\underset{\natural}{\mathrm{VII}^7}$ & dominant seventh & all & - & $\mathrm{III}^7$ \\ \hline
5. Augmented & sixth chords: & & & \\ \hline
$\mathrm{VI}^{6\sharp}$ & ($\sim$dominant seventh) & first (with the third dupl.) & S & $\mathrm{V}^{\sharp}$ \\ \hline
$\mathrm{VI}_{5}^{6\sharp}$ & ($\sim$dominant seventh) & first & S & $\mathrm{V}^{\sharp}$ \\ \hline
$\mathrm{II} \tiny{\begin{smallmatrix} 6\sharp \\ 4 \\ 3 \end{smallmatrix}}$ & irregular & second & S & $\mathrm{V}^{\sharp}$ \\ \hline
6. Picardian & first degree triad: & & & \\ \hline
$\mathrm{I}^{\sharp}$ & major triad & root & T & none (final chord) \\ \hline
7. Neapolitan & sixth chord: & & & \\ \hline
$\mathrm{II}^{6\flat}$ & major triad & first (with the third dupl.) & S & $\mathrm{V}^{\sharp}$ \\ \hline
8. Dominant & nona chord: & & & \\ \hline
$\mathrm{V} \tiny{\begin{smallmatrix} 9 \\ 7 \\ \sharp \end{smallmatrix}}$ & dominant nona chord & root (fifth-deficient) & D & $\mathrm{I}$ \\ \hline
\end{tabular}
\end{tiny}
In total, only 73 convergent chord inversions.
\end{document}
|
\begin{equation}gin{document}
\title{Uncertainty relations with the variance and the quantum Fisher information based on convex decompositions of density matrices}
\author{G\'eza T\'oth\,\orcidlink{0000-0002-9602-751X}}
\email{[email protected]}
\homepage{http://www.gtoth.eu}
\affiliation{Department of Theoretical Physics, University of the Basque Country
UPV/EHU, P.O. Box 644, E-48080 Bilbao, Spain}
\affiliation{Donostia International Physics Center (DIPC), P. O. Box 1072, E-20080 San Sebasti\'an, Spain}
\affiliation{IKERBASQUE, Basque Foundation for Science, E-48013 Bilbao, Spain}
\affiliation{Institute for Solid State Physics and Optics, Wigner Research Centre for Physics,
P. O. Box 49, H-1525 Budapest, Hungary}
\author{Florian Fr\"owis\,\orcidlink{0000-0002-2743-3119}}
\affiliation{Group of Applied Physics, University of Geneva, CH-1211 Geneva, Switzerland}
\begin{equation}gin{abstract}
We present several inequalities related to the Robertson-Schr\"odinger uncertainty relation. In all these inequalities, we consider a decomposition of the density matrix into a mixture of states, and use the fact that the Robertson-Schr\"odinger uncertainty relation is valid for all these components. By considering a convex roof of the bound, we obtain an alternative derivation of the relation in Fr\"owis {\it et al.} \href{https://doi.org/10.1103/PhysRevA.92.012102}{[Phys. Rev. A {\bf 92}, 012102 (2015)]}, and we can also list a number of conditions that are needed to saturate the relation. We present a formulation of the Cram\'er-Rao bound involving the convex roof of the variance. By considering a concave roof of the bound in the Robertson-Schr\"odinger uncertainty relation over decompositions to mixed states, we obtain an improvement of the Robertson-Schr\"odinger uncertainty relation. We consider similar techniques for uncertainty relations with three variances. Finally, we present further uncertainty relations that provide lower bounds on the metrological usefulness of bipartite quantum states based on the variances of the canonical position and momentum operators for two-mode continuous variable systems. We show that the violation of well-known entanglement conditions in these systems discussed in Duan {\it et al.}, \href{https://doi.org/10.1103/PhysRevLett.84.2722}{[Phys. Rev. Lett. {\bf 84}, 2722 (2000)]} and Simon \href{https://doi.org/10.1103/PhysRevLett.84.2726}{[Phys. Rev. Lett. {\bf 84}, 2726 (2000)]} implies that the state is more useful metrologically than certain relevant subsets of separable states. We present similar results concerning entanglement conditions with angular momentum operators for spin systems.
\noindent DOI: \href{https://doi.org/10.1103/PhysRevResearch.4.013075}{10.1103/PhysRevResearch.4.013075}
\end{abstract}
\date{\today}
\maketitle
\section{Introduction}
Quantum Fisher information (QFI) is a central quantity of quantum metrology, a field that is concerned with metrological tasks in which the quantumness of the system plays an essential role \cite{Giovannetti2004Quantum-Enhanced,Paris2009QUANTUM,Demkowicz-Dobrzanski2014Quantum,Pezze2014Quantum}. One of the most fundamental scenarios in quantum metrology is estimating the small parameter $\theta$ in the unitary dynamics
\begin{equation}gin{equation}
\varrho_\theta=e^{-iB\theta}\varrho e^{iB\theta},
\label{eq:rho_theta}
\end{equation}
where $B$ is the Hamiltonian of the dynamics, $\varrho$ is the initial state, $\varrho_\theta$ is the final state of the evolution, and we set $\hbar=1$ for simplicity. By carrying out measurements on $\varrho_{\theta},$ we aim to estimate $\theta$ from the distribution of the outcomes of the measurement. The quantum Cram\'er-Rao inequality gives a lower bound on the precision of the estimation for any measurement
\begin{equation}gin{equation} \label{eq:cra}
\va{\theta}\ge \frac{1}{m F_Q[\varrho, B]},
\end{equation}
where $F_Q[\varrho, B]$ is the QFI and $m$ is the number of independent repetitions \cite{Helstrom1976Quantum,Holevo1982Probabilistic,Braunstein1994Statistical, Braunstein1996Generalized}.
At the center of attention lies the question how noise can affect the precision of the estimation \cite{Huelga1997Improvement} and what the ultimate limit of the precision is in realistic scenarios \cite{Escher2011General,Demkowicz-Dobrzanski2012The}. We add that a driving force behind the development in quantum metrology are recent experiments in quantum optical systems, such as cold gases and cold trapped ions, which are possible due to the rapid technological advancement in the field \cite{Leibfried2004Toward,Napolitano2011Interaction-based,Riedel2010Atom-chip-based,Gross2010Nonlinear}. The experiments with the squeezed-light-enhanced gravitational wave detector GEO 600 \cite{Grote2013First,Aasi2013Enhanced,Demkowicz-Dobrzanski2013Fundamental} are highlights in the applications of quantum-enhanced sensitivity.
Recently, the QFI was discovered to play an important role in quantum information theory, in particular, in the theory of quantum entanglement \cite{Toth2014Quantum}. It turns out that, in linear interferometers, entanglement is needed to surpass the shot-noise limit in precision corresponding to product states \cite{Giovannetti2004Quantum-Enhanced,Pezze2009Entanglement,Paris2009QUANTUM,Demkowicz-Dobrzanski2014Quantum,Pezze2014Quantum}. It has been shown that the larger the QFI, the larger the depth of entanglement the state must posses \cite{Hyllus2012Fisher,Toth2012Multipartite}. Beside the entanglement depth, there are further quantities that can give a more detailed information about the structure of the multipartite entanglement \cite{Szalay2019Stretchability,Toth2020Stretching}, which turn out to be strongly connected to the QFI \cite{Ren2021Metrological}. In general, the QFI can be used to detect multipartite entanglement, which has been done in several experiments \cite{Lucke2011Twin,Krischek2011Useful,Strobel2014Fisher}. Apart from entanglement theory, the QFI has also been used to define what it means that a superposition is macroscopically quantum \cite{Frowis2011Stable,Frowis2012Are}, and to bound the speed of a quantum evolution \cite{Campo2013Quantum,Taddei2013Quantum}, and it plays a role even in the quantum Zeno effect \cite{Smerzi2012Zeno,Schafer2014Experimental}. Finally, the QFI offers a powerful characterization of the prepared quantum state, for which it is calculated even from tomographic data \cite{Schwemmer2014Experimental}. It has been shown that this type of characterization is superior to computing the fidelity with respect to the ideal state, for usual state reconstruction schemes \cite{Schwemmer2015Systematic}.
Recent findings show that the QFI is the convex roof of the variance, apart from a constant factor \cite{Toth2013Extremal,Yu2013Quantum}. This again connects quantum metrology to quantum information science where convex roofs often appear in the theory of entanglement measures \cite{Horodecki2009Quantum,Guhne2009Entanglement}. Density matrices have an infinite number of convex decompositions. This is a feature of quantum mechanics, not present in classical physics. So far, this fact is appreciated mostly in quantum information science, however, it can also be used as a powerful tool in other areas of quantum physics.
Finally, the QFI appears in various quantum uncertainty relations. In these relations, the error propagation formula defined as
\begin{equation}gin{equation}\label{eq:errprop}
(\Delta \theta)^2_A = \frac{(\Delta A)^2}{\left| \partial_{\theta} \exs{A} \right|^2}
\end{equation}
plays a central role \cite{Frowis2015Tighter}. The uncertainty of the estimate is given by \EQ{eq:errprop} divided by $m,$ the number of independent repetitions, if the distribution of the measurement results fulfill certain reasonable requirements and $m$ is sufficiently large \cite{Kholevo_Generalization_1974,Braunstein1994Statistical,Zhong_Optimal_2014}. Then, from the Cram\'er-Rao bound (\ref{eq:cra}), one can derive \cite{Frowis2015Tighter}, for example, the Heisenberg-Robertson uncertainty relation \cite{Heisenberg1927Uber,Robertson1929The}, time-energy uncertainty relations \cite{Mandelstam1945TheB,Fleming_unitarity_1973,Uhlmann_energy_1992,Frowis2012Kind} and squeezing inequalities \cite{Sorensen2001Many-particle,Pezze2009Entanglement}. The optimization of \EQ{eq:errprop} over a given set of operators has been considered \cite{Gessner2019Metrological}.
In this paper, we use the knowledge that the QFI is, apart from a constant factor, the convex roof of the variance to obtain inequalities valid for all quantum states, and to obtain entanglement criteria. First, we give a simple proof of a tighter version of the Heisenberg-Robertson uncertainty relation \cite{Schrodinger1930Zum, Frowis2015Tighter}, also giving conditions for saturation. We show ways to strengthen the Heisenberg-Robertson uncertainty relation. We derive the Cram\'er-Rao bound such that the bound is given by a convex roof. We derive a relation with two variances and a QFI. We also present entanglement conditions with the QFI.
Our paper is organized as follows.
In \SEC{sec:Important_properties}, we summarize important properties of the QFI and the variance.
In \SEC{sec:conn-betw-recent}, we discuss recent finding connecting the QFI to convex roofs.
In \SEC{sec:convroof}, we present inequalities derived from the Robertson-Schr\"odinger uncertainty relation based on convex roofs.
In \SEC{sec:RS}, we present an improvement on the same inequality based on concave roofs.
In \SEC{sec:several_var}, we present uncertainty relations with variances and the QFI.
In \SEC{sec:alt}, we present simple relation and use it to rederive some of our results. We also derive further inequalities with the variance and the QFI.
In \SEC{sec:var_metrlogy}, we show how to relate the violation of some entanglement conditions to the metrological usefulness of the quantum state.
\section{Important properties of the QFI}
\label{sec:Important_properties}
In this section we briefly summarize the basic literature about the QFI. The properties we list will be used later in our calculations.
Most importantly, the QFI is convex, i.e.,
\begin{equation}gin{equation}\label{eq:convexity}
F_Q[\varrho_{\rm m},B]\le pF_Q[\varrho_1,B]+(1-p)F_Q[\varrho_2,B],
\end{equation}
where the mixture is defined as
\begin{equation}gin{equation}
\varrho_{\rm m}=p\varrho_1+(1-p)\varrho_2.
\end{equation}
Here lies an important similarity between the QFI and entanglement measures: neither of the two can increase under mixing.
The QFI appearing in the Cram\'er-Rao bound \EQ{eq:cra} is defined as \cite{Helstrom1976Quantum,Holevo1982Probabilistic,Braunstein1994Statistical,
Petz2008Quantum,Braunstein1996Generalized}
\begin{equation}gin{equation}
F_{Q}[\varrho,A]=2\sum_{k,l}\frac{(\lambda_{k}-\lambda_{l})^{2}}{\lambda_{k}+\lambda_{l}}\vert \bra{k}A\ket{l}\vert^{2},\label{eq:qF}
\end{equation}
where the density matrix has the eigendecomposition
\begin{equation}gin{equation}\label{eq:rho_eigdecomp}
\varrho=\sum_{k}\lambda_k \ketbra{k}.
\end{equation}
From \EQ{eq:qF}, it follows that the QFI can be bounded from above by the variance
\begin{equation}gin{equation}
F_Q[\varrho,B] \leq 4(\Delta B)^2_{\varrho}\label{eq:FQvar},
\end{equation}
where equality holds if $\varrho$ is pure \cite{Braunstein1994Statistical}.
The Cram\'er-Rao bound \eqref{eq:cra} defines the achievable largest precision of parameter estimation, however, it is not clear what has to be measured to reach this precision bound. An optimal measurement can be carried out if we measure in the eigenbasis of the symmetric logarithmic derivative $\mathcal{L}$ \cite{Braunstein1994Statistical,Braunstein1996Generalized}. This operator is defined such that it can be used to describe the quantum dynamics of the system with the equation
\begin{equation}gin{equation}\label{eq:LrrL}
\frac{d\varrho_\theta}{d\theta}=\tfrac{1}{2}(\mathcal{L}\varrho_\theta+\varrho_\theta \mathcal{L}).
\end{equation}
Unitary dynamics are generally given by the von Neumann equation with the Hamiltonian $B$
\begin{equation}gin{equation}\label{eq:LrrL2}
\frac{d\varrho_\theta}{d\theta}=i(\varrho_\theta B-B\varrho_\theta).
\end{equation}
The operator $\mathcal{L}$ can be found based on knowing that the right-hand side of \EQ{eq:LrrL} must be equal to the right-hand side of \EQ{eq:LrrL2}:
\begin{equation}gin{equation}
i[\varrho,B] = \tfrac{1}{2} \left\{ \varrho,\mathcal{L}\right\}.\label{eq:comm_anticomm2}
\end{equation}
Hence, the symmetric logarithmic derivative can be expressed with a simple formula as
\begin{equation}gin{equation}
\label{eq:L}
\mathcal{L}=2i\sum_{k,l}\frac{\lambda_{k}-\lambda_{l}}{\lambda_{k}+\lambda_{l}} \vert k \rangle \langle l \vert \langle k \vert B \vert l \rangle,
\end{equation}
where $\lambda_k$ and $\vert k\rangle$ are the eigenvalues and eigenvectors, respectively, of the density matrix $\varrho.$ Based on \EQS{eq:qF} and \eqref{eq:L}, the symmetric logarithmic derivative can be used to obtain the QFI as
\begin{equation}gin{equation}
F_Q[\varrho,B]={\rm Tr}(\varrho \mathcal{L}^2)=(\Delta \mathcal{L})^2.\label{eq:LLL}
\end{equation}
In the second equality in \EQ{eq:LLL}, we used that
\begin{equation}gin{equation}
\exs{\mathcal{L}}_{\varrho}=0,\label{eq:L0}
\end{equation}
which can be seen based on \EQS{eq:rho_eigdecomp} and \eqref{eq:L}.
\section{Defining the quantum Fisher information with convex roofs}
\label{sec:conn-betw-recent}
The quantum Fisher information has been connected to convex roofs that are based on an optimization over convex decompositions of the density matrix \cite{Toth2013Extremal,Yu2013Quantum}. Let us consider a density matrix of the form
\begin{equation}gin{equation}
\varrho=\sum_k p_k \ketbra{\psi_k}, \label{decomp}
\end{equation}
where $p_k>0$ and $\sum_k p_k=1.$
Note that the pure states $\ket{\psi_{k}}$ are not required to be pairwise orthogonal, and \EQ{decomp} is not an eigendecomposition of the density matrix. Then, it can be shown that the QFI is the convex roof of the variance times four \cite{Toth2013Extremal,Yu2013Quantum}
\begin{equation}gin{equation}
F_{Q}[\varrho,B] =4 \min_{\{ p_k,\ket{\psi_k}\}} \sum_k p_k \va {B
}_{\psi_k},\label{e2b}
\end{equation}
where $\{p_{k},\ket{\psi_{k}}\}$ refers to a decomposition of $\varrho$ of the type Eq.~(\ref{decomp}). In other words, we already knew that the QFI is convex, but \EQ{e2b} implies that it is the smallest convex function that equals four times the variance for pure states. For further analysis on the convexity of the QFI, see \REF{Rezakhani2019Continuity}.
\EQL{e2b} has also been used in derivations concerning the continuity of the QFI \cite{Augusiak2016Asymptotic}, or finding efficient ways to bound it from below based on few measurements \cite{Apellaniz2017Optimal}. It has been used in constructing entanglement conditions in \REF{Akbari-Kourbolagh2019Entanglement}. Finally, it has also been used in finding a bound on
\begin{equation}gin{equation}
V(\varrho,A)=\va{A}-F_Q[\varrho,A]/4\label{eq:vaminusFQ}
\end{equation}
based on the purity of $\varrho$ \cite{Toth2017Lower}.
A related result is that the variance is the concave roof of itself
\begin{equation}gin{equation}\label{eq:Fisherroof}
\va A_{\varrho}=\max_{\{ p_k,\ket{\psi_k}\}} \sum_k p_k \va A_{\psi_k}.
\end{equation}
This property of the variance is relatively easy to show \cite{Toth2013Extremal,Yu2013Quantum}. For the proof, one has to demonstrate that there is always a decomposition of the type \EQ{decomp} such that
\begin{equation}gin{equation}
\exs{A}_{\psi_k}=\exs{A}_{\varrho}\label{eq:exApsi}
\end{equation}
for all $k.$ Similar decompositions for correlation matrices have been considered in Refs.~\cite{Leka2013Some,Petz2014}.
The statements of \EQS{e2b} and \eqref{eq:Fisherroof} can be concisely reformulated as follows. For any decomposition $\{p_{k},\ket{\psi_{k}}\}$ of the density matrix $\varrho$ we have
\begin{equation}gin{equation}
\frac{1}{4}F_{Q}[\varrho,A] \le \sum_k p_k \va {A}_{\psi_k} \le \va A_{\varrho},\label{eq:FsumV}
\end{equation}
where the upper and the lower bounds are both tight.
Note that the QFI has been connected to convex roofs in another context, via purifications \cite{Fujiwara2008A,Escher2011General,Demkowicz-Dobrzanski2012The,Marvian2020Coherence}. The basic idea is that the QFI can easily be computed for pure states and a unitary dynamics. For the more general case of mixed states and noisy dynamics we can still deal with pure states, if we add an ancillary system and consider the purification of the noisy dynamics.
Finally, let us discuss that relations given in \EQS{e2b} and \eqref{eq:Fisherroof} remain the same if we optimize over decompositions to a mixture of density matrices instead of decompositions to a mixture of pure states. Let us consider a decomposition of $\varrho$ to a mixture of density matrices \cite{*[{Decomposition to mixed states also appears in entanglement theory, e.g., see }] [{.}] Hofmann2014Analytical}
\begin{equation}gin{equation}
\varrho=\sum_k p_k \varrho_k.\label{eq:rhodecomp}
\end{equation}
Due to the fact that the QFI and the variance are convex and concave, respectively, in density matrices, the inequalities
\begin{equation}gin{equation}
\frac{1}{4}F_{Q}[\varrho,A] \le \sum_k p_k \va {A}_{\varrho_k} \le \va A_{\varrho}\label{eq:FsumV2}
\end{equation}
hold. However, we already know that decompositions to a mixture of pure states can saturate both inequalities in \EQ{eq:FsumV}. Thus, obtaining the convex and concave roofs over decompositions to mixed states will lead to same values that we obtain in \EQS{e2b} and \eqref{eq:Fisherroof}.
Concerning the relation for the QFI given in \EQ{e2b}, we can add the following. If we calculate the convex roof of a quantity that is concave in density matrices, then the result of a minimization over all pure-state decompositions will coincide with the result of a minimization over all mixed-state decompositions. The reason is that if a concave function is minimized over a convex set, then it takes its minima on the extreme points of the set. Similarly, if we calculate the concave roof of a quantity that is convex in density matrices, then the result of a maximization over all pure-state decompositions will coincide with the result of a maximization over all mixed-state decompositions.
\section{Uncertainty relations based on a convex roof over decompositions in the Robertson-Schr\"odinger inequality}
\label{sec:convroof}
The Robertson-Schr\"odinger uncertainty is a fundamentally important uncertainty relation in quantum physics \cite{Robertson1929The}. Hence, there is a strong interest in deriving further relations from it and in looking for possible improvements \cite{Luo2005Heisenberg,Furuichi2010Schrodinger,Yu2013Robertson,Maccone2014Stronger}. In this section, we present a simple method to obtain further uncertainty relations based on the optimization over the decompositions of density matrices. We rederive the improved Heisenberg-Robertson inequality presented in \REF{Frowis2015Tighter}. We discuss some implications of the Cram\'er-Rao bound, and determine, which states saturate the inequality.
\subsection{Simple proof for the improved Heisenberg-Robertson inequality presented in \REF{Frowis2015Tighter}}
\label{subsec:Simple_proof}
The Robertson-Schr\"odinger inequality is defined as
\begin{equation}gin{equation}
\label{eq:RS1}
\va{A}_\varrho \va{B}_\varrho
\ge \tfrac{1}{4} \vert L_\varrho \vert^2,
\end{equation}
where the lower bound is given by
\begin{equation}gin{eqnarray}\label{eq:defL}
L_\varrho=\sqrt{{\vert \exs{\{A,B\}}_\varrho-2\exs{A}_\varrho\exs{B}_\varrho\vert^2+\vert \exs{C}_\varrho\vert^2}},
\end{eqnarray}
$\{A,B\}=AB+BA$ is the anticommutator, and we used the definition
\begin{equation}gin{equation}
C=i[A,B].\label{eq:CAB}
\end{equation}
First let us examine the convexity properties of the bound on the right-hand side of \EQ{eq:RS1}, which we need later. One can show that $L_\varrho$ is neither convex nor concave in $\varrho.$ Let us consider a concrete example, the mixed two-qubit state with a decomposition
\begin{equation}gin{align}
p_1&=1/2, &\ket{\psi_1}&=\ket{00}, \nonumber\\
p_2&=1/2, &\ket{\psi_2}&=\ket{11},
\end{align}
and the operators
\begin{equation}gin{eqnarray}
A&=\sigma_z \otimes \openone, \nonumber\\
B&= \openone \otimes \sigma_z. \label{eq:AB2}
\end{eqnarray}
For these, we have $C=0$, $L_{\psi_1}=L_{\psi_2}=0,$ while $L_{\varrho}=1.$ Simple algebra shows that $L_{\varrho}>p_1L_{\psi_1}+p_2L_{\psi_2}$ holds. Let us consider another concrete example, the mixed state with a decomposition
\begin{equation}gin{align}
p_1&=1/2, &\ket{\psi_1}&=(\ket{00}+\ket{11})/\sqrt{2}, \nonumber\\
p_2&=1/2, &\ket{\psi_2}&=(\ket{01}+\ket{10})/\sqrt{2},
\end{align}
and the same operators given in \EQ{eq:AB2}. For these, $C=0$, $L_{\psi_1}=L_{\psi_2}=1,$ while $L_{\varrho}=0.$ Hence, $L_{\varrho}<p_1L_{\psi_1}+p_2L_{\psi_2}$ holds.
Since $L_\varrho$ is neither convex nor concave in $\varrho,$ we will now consider a decomposition of the density matrix to mixed states $\varrho_k$ as given in \EQ{eq:rhodecomp}, instead of a decomposition to pure states. For such a decomposition, for all $\varrho_k$ the Robertson-Schr\"odinger inequality given in \EQ {eq:RS1} holds. From this fact and with the simple inequality presented in \APP{App:A}, we arrive at
\begin{equation}gin{eqnarray}
&&\left[\sum_k p_k \va{A}_{\varrho_k}\right]\left [\sum_k p_k \va{B}_{\varrho_k}\right]\nonumber\\
&&\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\ge\tfrac{1}{4}\left[\sum_k p_k L_{\varrho_k}\right]^2.
\label{eq:Heisenberg2}
\end{eqnarray}
At this point it is important to know that the inequality in \EQ{eq:Heisenberg2} is valid for any decomposition of the density matrix of the type given in \EQ{eq:rhodecomp}. Moreover, we should remember that the three sums are over the {\it same} decomposition of the density matrix.
Let us try to obtain inequalities with the variance and the QFI. For that, we can choose the decomposition such that
\begin{equation}gin{equation}
\sum_k p_k \va{B}_{\varrho_k}\label{eq:pkvarB}
\end{equation}
is minimal and equals $F_Q[\varrho,B]/4$ given in \EQ{e2b}. Due to the concavity of the variance we also know that
\begin{equation}gin{equation}
\sum_k p_k \va{A}_{\varrho_k}\le \va{A}.
\end{equation}
Hence, it follows that for the product of the variance of $A$ and the QFI $F_Q[\varrho,B]$ that
\begin{equation}gin{eqnarray}
\va{A}_{\varrho}F_Q[\varrho,B]\ge \left(\sum_k p_k L_{\varrho_k} \right)^2.\label{eq:RS_improved_with_FQ222}
\end{eqnarray}
In order to use \EQ{eq:RS_improved_with_FQ222}, we need to know the decomposition that minimizes \EQ{eq:pkvarB}. We can have a inequality where
we do not need to know that decomposition
\begin{equation}gin{eqnarray}
\va{A}_{\varrho}F_Q[\varrho,B]\ge \left(\min_{\{p_k,\varrho_k\}} \sum_k p_k L_{\varrho_k} \right)^2.\label{eq:RS_improved_with_FQB}
\end{eqnarray}
On the right-hand side of \EQ{eq:RS_improved_with_FQB}, the bound is defined based on a convex roof. The right-hand side of \EQ{eq:RS_improved_with_FQB} is not larger than the right-hand side of \EQ{eq:RS_improved_with_FQ222}. We can also see that on the right-hand side of \EQ{eq:RS_improved_with_FQB} there is a minimization over mixed-state decompositions. Based on \SEC{sec:conn-betw-recent}, there is always an optimal pure-state decomposition such that \EQ{eq:pkvarB} is minimal and equals $F_Q[\varrho,B]/4.$ Thus, we can also have a valid inequality with an optimization over pure-state decompositions of the type given in \EQ{decomp}
\begin{equation}gin{eqnarray}
\va{A}_{\varrho}F_Q[\varrho,B]\ge \left(\min_{\{p_k,\ket{\psi_k}\}} \sum_k p_k L_{\psi_k} \right)^2.\label{eq:RS_improved_with_FQ}
\end{eqnarray}
The right-hand side of \EQ{eq:RS_improved_with_FQ} is not smaller than the right-hand side of \EQ{eq:RS_improved_with_FQB}. Hence, in the remaining part of the section we will work with pure-state decompositions rather than mixed-state decompositions.
Let us now try to find a lower bound for the inequality that is easier to compute, while possibly being smaller than the bound in \EQ{eq:RS_improved_with_FQ}. One could first think of using $\vert L_\varrho \vert^2$ as a lower bound; however, it is not convex in $\varrho,$ as we have discussed. Based on \EQ{eq:defL}, the relation
\begin{equation}gin{equation}
L_{\psi_k} \ge \vert \ex{C}_{\psi_k} \vert \label{eq:ineqLC}
\end{equation}
holds. Based on \EQ{eq:RS_improved_with_FQ} and \EQ{eq:ineqLC}, we can obtain the inequality
\begin{equation}gin{eqnarray}
&&\va{A}_{\varrho}F_Q[\varrho,B]\ge \left(\min_{\{p_k,\ket{\psi_k}\}} \sum_k p_k\vert \exs{C}_{\psi_k}\vert\right)^2,
\label{eq:RS_improved_with_FQ2}
\end{eqnarray}
Using well-known properties of the absolute value we get
\begin{equation}gin{equation}
\sum_k p_k\vert \exs{C}_{\psi_k}\vert \ge \left\vert \sum_k p_k \exs{C}_{\psi_k} \right\vert \equiv \vert \exs{C}_\varrho\vert,
\end{equation}
and with that we arrive at the improved Heisenberg-Robertson uncertainty proved by Fr\"owis \textit{et al.}~\cite{Frowis2015Tighter}
\begin{equation}gin{equation}
\va{A}_{\varrho}F_Q[\varrho,B] \ge \vert \exs{C}_\varrho\vert^2.
\label{eq:varFQproductUncRel}
\end{equation}
Due to the relation between the variance and the QFI given in \EQ{eq:FQvar}, the left-hand side of \EQ{eq:varFQproductUncRel} is never larger than the left-hand side of the Heisenberg-Robertson uncertainty.
Based on these, we find the following.
\begin{equation}gin{observation}
\label{obs:conditions}
The improved Heisenberg-Robertson inequality \eqref{eq:varFQproductUncRel} can be saturated only if all of the following conditions are fulfilled.
(i) There is a decomposition $\{p_k,\ket{\psi_k}\}$ that minimizes the weighted sum of the subensemble variances for the operator $B,$ hence
\begin{equation}gin{equation}\label{eq:Fisherroof2}
\frac1 4 F_Q[\varrho,B]= \sum_k p_k \va B_{\psi_k}.
\end{equation}
We also need that it maximizes the weighted sum of the subensemble variances for the operator $A,$ and hence
\begin{equation}gin{equation}
\va A_{\varrho}= \sum_k p_k \va A_{\psi_k}.
\end{equation}
(ii) If the decomposition maximizes the weighted sum of the subensemble variances for the operator $A$ then \EQ{eq:exApsi} holds. [See explanation after Eq.~\eqref{eq:Fisherroof}.]
(iii) Moreover, \EQ{eq:ineqLC} must be saturated for every $k.$ Hence, the equality
\begin{equation}gin{equation}
\tfrac{1}{2}\exs{\{A,B\}}_{\psi_k}-\exs{A}_{\psi_k}\exs{B}_{\psi_k}=0
\end{equation}
must hold. In this case, we
also have
\begin{equation}gin{equation}
\va{(A+B)}_{\psi_k}=\va{A}_{\psi_k}+\va{B}_{\psi_k}.
\end{equation}
(iv) \EQL{eq:Heisenberg2} is saturated for pure-state decompositions only if for the subensemble variances the equations
\begin{equation}gin{eqnarray}
\va{A}_{\psi_k}&=&\va{A}_{\psi_l},\nonumber\\
\va{B}_{\psi_k}&=&\va{B}_{\psi_l}
\end{eqnarray}
hold for all $k,l.$ (See \APP{App:A}.)
(v) For such an optimal decomposition, for every $k,$
\begin{equation}gin{equation}
|\exs{C}_{\psi_k}|=|\exs{C}_{\varrho}|,
\end{equation}
which is trivially fulfilled if $C$ is a constant. (See \APP{App:A}.) This is the case, for example, if $A$ and $B$ are the position and momentum operators, $x$ and $p,$ of a bosonic mode.
\end{observation}
\subsection{Implications for the Cram\'er-Rao bound}
In this section, we will show that the precision of parameter estimation is bounded from below by an expression with the convex roof of the variance.
Let us first define a relevant notion. The error propagation formula is given in \EQ{eq:errprop}. Using the fact that the dynamics is unitary, we have
\begin{equation}gin{equation}
\vert \partial_{\theta} \exs{A}\vert=\vert \exs{C}\vert,
\end{equation}
where $C$ is defined in \EQ{eq:CAB} (see, e.g., \REF{Frowis2015Tighter}). Hence
\begin{equation}gin{equation}
\label{eq:errprop2}
\va{\theta}_A=\frac{\va{A}}{\vert \exs{C}\vert^2}.
\end{equation}
Then, the precision of the estimation is bounded as
\begin{equation}gin{equation}
\va{\theta} \ge \frac{1}{m} \min_{A} \va{\theta}_A,\label{eq:varFQ22}
\end{equation}
where $m$ is the number of independent repetitions. In the large $m$ limit, if certain further conditions are fulfilled, \EQ{eq:varFQ22} can be saturated \cite{Pezze2018Quantum}.
Based on these and on \SEC{subsec:Simple_proof}, we arrive at
\begin{equation}gin{equation}
\va{\theta}_A\ge \frac{1}{4 \min_{\{p_{k},\ket{\psi_{k}}\}}\bigg[
\sum_{k}p_{k}
\va{B}_{\psi_k}\bigg]}.
\end{equation}
Using \EQS{eq:errprop2} and \eqref{eq:varFQ22}, we get a lower bound on the precision of parameter estimation
\begin{equation}gin{equation}
\label{eq:cr}
\va{\theta} \ge \frac1 m \times \frac{1}{4 \min_{\{p_{k},\ket{\psi_{k}}\}}\bigg[
\sum_{k}p_{k}
\va{B}_{\psi_k}\bigg]}.
\end{equation}
We have just derived a form of the Cram\'er-Rao bound that contains the convex roof of the variance. On the right-hand side of \EQ{eq:cr} we write intentionally the expression with the convex roof, rather than the QFI, to stress that our derivation did not use the formula given in \EQ{eq:qF} for the QFI. Hence, we can see that the Cram\'er-Rao bound in \EQ{eq:cr} can be saturated for von Neumann measurements only if the conditions of \OBS{obs:conditions} are fulfilled for some $A$.
Note that we did not prove that there is an $A$ for every $B$ and $\varrho$ such that the bound in \EQ{eq:cr} can be saturated, which would be necessary to prove that the Cram\'er-Rao bound can be reached.
Note also that we did not consider POVM measurements, which would be the more general case \cite{Braunstein1994Statistical}. However, it is known that it is always possible to saturate the Cram\'er-Rao bound by von Neumann measurements \cite{Paris2009QUANTUM}.
\subsection{Sufficient condition for saturating the bound}
In this section, for completeness, we present a concise sufficient condition that \EQ{eq:varFQproductUncRel} is saturated. Similar statements have been discussed in \REFS{Hotta2004Quantum,Escher2012Quantum_arxiv,Frowis2015Tighter,Toth2020Activating}. This is relevant for us, since it is connected to the conditions for saturation given in \OBS{obs:conditions}. We use the theory of the symmetric logarithmic derivative described in \SEC{sec:Important_properties}.
\begin{equation}gin{observation}
\label{obs:saturation}
If the equality
\begin{equation}gin{equation}
i[\varrho,B] = \tfrac{1}{2} \left\{ \varrho, c A\right\},\label{eq:comm_anticomm}
\end{equation}
holds, then \EQ{eq:varFQproductUncRel} is saturated. Here, $c\ne 0$ is a real constant.
\end{observation}
{\it Proof.} \EQL{eq:comm_anticomm} implies that $cA$ equals the symmetric logarithmic derivative $\mathcal{L};$ see \EQ{eq:comm_anticomm2}. Let us then substitute $cA$ by $\mathcal{L}$ in \EQ{eq:varFQproductUncRel}.
Then it follows that \cite{Braunstein1994Statistical,Braunstein1996Generalized}
\begin{equation}gin{equation}
F_Q[\varrho,B]= \langle \mathcal{L}^2 \rangle_{\varrho} = c^2 (\Delta A)^2_{\varrho},
\end{equation}
and moreover simple algebra yields
\begin{equation}gin{equation}
\langle i[A,B] \rangle_{\varrho} = \frac1 c {\rm Tr}(i[\mathcal{L},B]\varrho)= \frac1 c \langle \mathcal{L}^2 \rangle_{\varrho}= c(\Delta A)^2_{\varrho}.\label{eq:Lcomm}
\end{equation}
In the last equality in \EQ{eq:Lcomm} we used \EQ{eq:L0}. Consequently, the left-hand side and the right-hand side of \EQ{eq:varFQproductUncRel} are equal, and the state saturates the inequality. Moreover, the two terms of the product on the left-hand side of \EQ{eq:varFQproductUncRel} are equal to each other, that is, $\va{A}=F_Q[\varrho,B].$ $\ensuremath{
\blacksquare}$
\OBS{obs:saturation} is related to the known relation
\begin{equation}gin{equation}
\va{\mathcal L}F_Q[\varrho,B]=\vert\langle i[\mathcal L,B] \rangle_{\varrho} \vert^2,\label{eq:factors}
\end{equation}
where $\mathcal L$ is defined in \EQ{eq:L}, and on the left-hand side of \EQ{eq:factors}, the two terms in the product are equal to each other.
Note that as a consequence, the equality in \EQ{eq:comm_anticomm} implies that conditions in \OBS{obs:conditions} are fulfilled. Moreover, based on \OBS{obs:saturation}, we can find additional constraints on the subsensemble variances given in \OBS{obs:conditions}. If we compute the trace of both sides of \EQ{eq:comm_anticomm} we arrive at
\begin{equation}gin{equation}
\exs{A}_\varrho=0.
\end{equation}
From \OBS{obs:conditions} (ii) follows that there is a similar statement for all subensembles
\begin{equation}gin{equation}
\exs{A}_{\psi_k}=0.
\end{equation}
Moreover, based on \OBS{obs:conditions} (iii), we obtain a relation about the commutator of $A$ and $B$ as
\begin{equation}gin{equation}
\exs{\{A,B\}}_{\psi_k}=0,
\end{equation}
which is again valid for all subensembles.
\section{Improvement on the Robertson-Schr\"odinger inequality based on a concave roof over decompositions}
\label{sec:RS}
In this section, we show an improvement of the Robertson-Schr\"odinger inequality. We start from the fact that \EQ{eq:Heisenberg2} is valid for any decomposition of the density matrix to mixed components $\varrho_k.$ Hence, due to the concavity of the variance follows that
\begin{equation}gin{equation}
\va{A} \va{B} \ge\frac{1}{4}\left(\sum_k p_k L_{\varrho_k}\right)^2.
\label{eq:Heisenberg2BB}
\end{equation}
Based on these, we can find the following.
\begin{equation}gin{observation} For quantum states, the following inequality holds
\label{obs:vavaL_ineq}
\begin{equation}gin{eqnarray}
\va{A}_{\varrho}\va{B}_{\varrho}\ge \frac 1 4 \left( \max_{\{p_k,\varrho_k\}} \sum_k p_k L_{\varrho_k} \right)^2,\label{eq:RS2B}
\end{eqnarray}
where $L_{\varrho}$ is defined in \EQ{eq:defL}. On the right-hand side of \EQ{eq:RS2B}, we have a concave roof. The relation in \EQ{eq:RS2B} is saturated by all single-qubit mixed states, and it is stronger than the Robertson-Schr\"odinger inequality given in \EQ{eq:RS1}.
\end{observation}
{\it Proof.} By taking the maximum of the bound on the right-hand side of \EQ{eq:Heisenberg2BB} over mixed-state decompositions, we arrive at the inequality \EQ{eq:RS2B}.
Let us examine the single-qubit case in detail. Let us take the operators
\begin{equation}gin{eqnarray}
A &=& \sigma_x,\nonumber\\
B &=& \cos \alpha \sigma_x + \sin \alpha \sigma_y,\label{eq:AB}
\end{eqnarray}
which is the most general case, apart for trivial rotations of the coordinate system. We characterize the state by the Bloch vector elements $\exs{\sigma_l}$ for $l=x,y,z.$ Substituting these in the bound in the Robertson-Schr\"odinger inequality given in \EQ{eq:RS1} we obtain for pure states
\begin{equation}gin{eqnarray}
\frac1 4 \vert L_{\psi}\vert^2&=&
\left[ \cos \alpha (\Delta \sigma_x)^2_{\psi} - \sin\alpha \langle \sigma_x \rangle_{\psi}\langle \sigma_y \rangle_{\psi}\right]^2 \nonumber\\ &+& \sin^2\alpha \langle \sigma_z \rangle_{\psi}^2.
\label{eq:Lpure}
\end{eqnarray}
Substituting $\langle \sigma_z \rangle_{\psi}^2 = 1- \langle \sigma_x \rangle_{\psi}^2 - \langle \sigma_y \rangle_{\psi}^2$
into \EQ{eq:Lpure} we arrive at
\begin{equation}gin{eqnarray}
\label{eq:15}
\frac1 4 \vert L_{\psi}\vert^2&=&(\Delta \sigma_x)^2_{\psi} \bigg[ \cos^2 \alpha (\Delta \sigma_x)^2 \nonumber\\ &-& \sin 2\alpha \langle \sigma_x \rangle_{\psi}\langle \sigma_y \rangle_{\psi} + \sin ^2 \alpha (\Delta \sigma_y)^2\bigg].
\end{eqnarray}
Simple algebra leads to
\begin{equation}gin{eqnarray}
\frac1 4 \vert L_{\psi}\vert^2&=&(\Delta A)^2_{\psi} (\Delta B)^2_{\psi}.
\end{eqnarray}
Hence, all pure states saturate \EQ{eq:RS2B}.
We will now show that for every single-qubit mixed state and every single-qubit operator the inequality in \EQ{eq:RS2B} is saturated. If we can find a decomposition $\left\{ p_k, \varrho_k \right\}$, such that
\begin{equation}gin{equation}
( \Delta X )_{\varrho}^2 = ( \Delta X )_{\varrho_k}^2
\end{equation}
for $X = A,B$ and all $k$, then this is sufficient to have equality in \EQ{eq:RS2B}. The following decomposition has this property. We imagine the Bloch sphere with a vector representing an arbitrary $\varrho$ and a straight line that goes through $\varrho$ and that is parallel to the $z$ axis. All states along this line within the Bloch sphere have the same expectation values for $\sigma_x$ and $\sigma_y$ as $\varrho,$ hence also the same variances. Therefore, we choose a decomposition of $\varrho$ with the pure states at the points where the line intersects with the Bloch sphere. With this, we have equality in Eq.~(\ref{eq:RS2B}).
Let us now prove that the Robertson-Schr\"odinger inequality in Eq.~(\ref{eq:RS2B}) is stronger than the Robertson-Schr\"odinger inequality given in \EQ{eq:RS1}. First, the right-hand side of \EQ{eq:RS2B} is never smaller than the right-hand side of the Robertson-Schr\"odinger inequality given in \EQ{eq:RS1}. This is evident since one of the possible decompositions is
\begin{equation}gin{equation}
p_1=1,\quad\quad\quad\varrho_1=\varrho.
\end{equation}
Second, let us now consider a concrete example when the bound in \EQ{eq:RS2B} is higher than the bound in the Robertson-Schr\"odinger inequality given in \EQ{eq:RS1}. For instance, let us consider the complitely mixed state
\begin{equation}gin{equation}
\varrho=\openone/2 \label{eq:denmat}
\end{equation}
and the operators given in \EQ{eq:AB}. Since we found that the inequality in Eq.~(\ref{eq:RS2B}) is saturated by all single-qubit mixed states,
it is also saturated for the state given in \EQ{eq:denmat} and the right-hand side of Eq.~(\ref{eq:RS2B}) equals $1.$ The right-hand side of
the Robertson-Schr\"odinger inequality given in Eq.~(\ref{eq:RS1}) is zero. $\ensuremath{
\blacksquare}$
Let us consider now higher dimensional systems. Here, not all pure states saturate the inequality given in \EQ{eq:RS1}. As an example, let us consider qutrit states, and the operators
\begin{equation}gin{eqnarray}
A&=&J_x,\nonumber\\
B&=&J_y.
\end{eqnarray}
First we show a simple method that never gives a bound lower than the bound in \EQ{eq:RS1}, and often it gives a higher bound. Let us consider the bound based on \EQ{eq:Heisenberg2BB} using the eigendecomposition of $\varrho$ given in \EQ{eq:rho_eigdecomp}:
\begin{equation}gin{eqnarray}
\va{A}_{\varrho}\va{B}_{\varrho}\ge \frac 1 4 \left(\sum_k \lambda_k L_{\ket k}\right)^2 .\label{eq:eig1}
\end{eqnarray}
Since $L_\varrho$ is not convex in $\varrho,$ the bound in the inequality given in \EQ{eq:eig1} might be smaller than the bound in the Robertson-Schr\"odinger relation given in \EQ{eq:RS1}.
We will now present a relation for which the bound is never smaller than in \EQ{eq:RS1}. Let us consider the unnormalized states
\begin{equation}gin{equation}
\sigma_{k}=\varrho-\lambda_k\ketbra{k}
\end{equation}
for $k=1,2,3.$ Here, $\sigma_{k}$ is a mixture of the two basis vectors orthogonal to $\ket{k}.$ Then we define the probabilities and normalized states
\begin{equation}gin{equation}
p_k={\rm Tr}(\sigma_k),\quad\quad\quad\varrho_k=\sigma_k/p_k.
\end{equation}
Using $\varrho_k,$ we decompose the density matrix as
\begin{equation}gin{equation}
\varrho=\lambda_k \ketbra{k}+p_k \varrho_k,
\end{equation}
where $k\in\{1,2,3\}.$
With these we define the quantity
\begin{equation}gin{equation}
\tilde L_k=\lambda_k L_{\ket{k}}+p_k L_{\varrho_{k}}.
\end{equation}
Then, we obtain an inequality
\begin{equation}gin{eqnarray}
\va{A}_{\varrho}\va{B}_{\varrho}\ge \frac 1 4 K^2.\label{eq:eig12}
\end{eqnarray}
where the variable in the bound is defined as
\begin{equation}gin{equation}
K=\max\left(\sum_k \lambda_k L_{\ket k}, \tilde L_1, \tilde L_2, \tilde L_3, L_{\varrho}\right),\label{eq:K}
\end{equation}
and $\max(a_1,a_2,a_3,....)$ denotes the maximum of $a_k.$ Since $K\ge L_{\varrho},$ the bound in \EQ{eq:eig12} is never smaller than the bound in the Robertson-Schr\"odinger inequality given in \eqref{eq:RS1}. We will see that it is often larger. We considered all the possible ways to group the eigenvectors into groups and form mixed states from them. Such ideas can straightforwardly be generalized to larger dimensions, where we need to consider more partitions of the eigenvectors.
Next, we will test the uncertainty relation given in \EQ{eq:RS2B} numerically. We generate random single-qutrit states \cite{Zyczkowski2001Induced}. We calculate the usual bound in the Robertson-Schr\"odinger inequality given in \EQ{eq:RS1}. We test the simple method given in \EQ{eq:eig12} that give improved bounds. The results can be seen in \FIG{fig:rs}(a). Note that typically we do not find the best possible bound but still this simple technique often leads to an improvement and the new bound is significantly larger than the old one. There are also numerical methods to compute the concave roof in \EQ{eq:RS2B}, described in \APP{sec:numerical}. Based on that we carry out numerical optimization over mixed-state decompositions. The results can be seen in \FIG{fig:rs}(b). Even if the numerical search might not find the global maximum, but something smaller,
we found a valid lower bound on the left-hand side of \EQ{eq:RS2B}.
\begin{equation}gin{figure}[t!]
\centerline{ \epsfxsize1.65in \epsffile{crconvex_fig_rsimproved_d3_eigendecomp_V2_Nit200}\hskip0.3cm
\epsfxsize1.65in \epsffile{crconvex_fig_rsimproved_d3_concroof_V2_Nit200}}
\hskip0.5cm (a) \hskip4cm (b)
\caption{(a) The left-hand side (LHS) minus the right-hand side (RHS) for the Robertson-Schr\"odinger inequality \eqref{eq:RS1} vs. LHS-RHS for \EQ{eq:RS2B}, taking the eigendecomposition of the density matrix and using the inequality given in \EQ{eq:eig12}. Points that are below the dashed line correspond to quantum states for which the bound improved. Even if our method is simple, the improvement is significant. We generated $200$ random states. (b) The same for the concave roof obtained numerically.}
\label{fig:rs}
\end{figure}
\section{Uncertainty relations with several variances and the QFI}
\label{sec:several_var}
In this section, we derive uncertainty relations with the QFI, and one or more variances. This provides a lower bound on the QFI based on variances of angular momentum operators.
\subsection{Sum of two variances}
\label{sec:Sum of two variances}
Ideas similar to the ones in \SEC{sec:convroof} work even if we have uncertainty relations that are the sum of two variances. For example, for a continuous variable system
\begin{equation}gin{equation}
\va{x}+\va{p}\ge 1
\end{equation}
holds, where $x$ and $p$ are the position and momentum operators. This must be valid for any state, including pure states. Hence, for any decompositions of the density matrix it follows that
\begin{equation}gin{equation}
\sum_k p_k \va{x}_{\psi_k}+\sum_k p_k \va{p}_{\psi_k}\ge 1
\end{equation}
For one of the two operators, say, for $p,$ we can choose the decomposition that leads to the minimal value for the average variance, i.e., the QFI over four. Then, since $\sum_k p_k \va{x}_{\psi_k}\le \va{x}$ holds, it follows that
\begin{equation}gin{equation}
\va{x}+\tfrac{1}{4}F_Q[\varrho,p]\ge 1.
\end{equation}
Note that this could be obtained more directly from the uncertainty relation in \EQ{eq:varFQproductUncRel} using
\begin{equation}gin{equation}
X+Y \ge 2 \sqrt{XY}, \label{eq:XYsqrt}
\end{equation}
for $X,Y\ge 0,$ but we intended to demonstrate
the key idea of the next sections.
\subsection{Lower bound on the QFI}
\label{sec:3varfisher}
Similar reasoning works for the uncertainty relations for the sum of three variances. Let us start from the relation for pure states
\begin{equation}gin{equation}\label{eq:JxyzN2b}
\va{J_x}+\va{J_y}+\va{J_z} \ge j,
\end{equation}
where $J_l$ are the spin components fulfilling
\begin{equation}
J_x^2+J_y^2+J_z^2=j(j+1)\openone.
\end{equation}
Due to the concavity of the variance, the inequality in \EQ{eq:JxyzN2b} holds also for mixed states. We can improve this relation. From the inequality given in \EQ{eq:JxyzN2b}, following the ideas of \SEC{sec:Sum of two variances}, we arrive at
\begin{equation}gin{equation}
\label{eq:Jxyzvar2}
\va{J_x}+\va{J_y}+\tfrac{1}{4}F_Q[\varrho,J_z]\ge j.
\end{equation}
\EQL{eq:Jxyzvar2} is a stronger relation than \EQ{eq:JxyzN2b}. The left-hand side of \EQ{eq:Jxyzvar2} is never smaller than the left-hand side of \EQS{eq:JxyzN2b}. The difference between the two is given in \EQ{eq:vaminusFQ}. This quantity has been studied in \REF{Toth2017Lower}. It is zero for pure states and largest for the state
\begin{equation}gin{equation}
\frac1{ 2}\left(\ketbra{-j}_z+\ketbra{+j}_z\right).\label{eq:pmz}
\end{equation}
For this case, for the $z$-component of the spin we have $\va{J_z}=j^2$ and $F_Q[\varrho,J_z]=0,$ while for the variance of the $x$-component we have $\va{J_x}=j/2.$
Thus, the state given in \EQ{eq:pmz} saturates the inequality in \EQ{eq:Jxyzvar2}.
Based on these, we arrive at the following observation.
\begin{equation}gin{observation}
For a spin-$j$ particle, the following inequality bounds from below the metrological usefulness of the state
\begin{equation}\label{eq:FQbound}
F_Q[\varrho,J_z]\ge 4j-4\va{J_x}-4\va{J_y}=:B_{FQ}.
\end{equation}
\end{observation}
Let us now examine whether the bound in \EQ{eq:FQbound} can be improved. It is known that \EQ{eq:JxyzN2b} is saturated by all pure
SU(2) coherent states or spin-coherent states, which are defined as
\begin{equation}gin{equation}
\ket{s}=U\ket{+j}_z, \label{eq:spincoherent}
\end{equation}
where the unitary is given as
\begin{equation}gin{equation}
U=e^{-i\vec{c}\vec{J}},
\end{equation}
where $\vec{c}$ is a three-vector of numbers and $\vec{J}=(J_x,J_y,J_z).$ Hence, the inequality given in \EQ{eq:FQbound} is also saturated by all such states, and the bound is optimal.
The inequality in \EQ{eq:FQbound} bounds the QFI from below based on variances. Such a bound can be very useful in some situations, since we do not need to carry out a metrological task to get information about $F_Q[\varrho,J_z].$ Let us now consider some quantum states, and compare the bound given by \EQ{eq:FQbound} to the QFI of those states. Our first example will be planar squeezed states \cite{He2011Planar}. Such states saturate the uncertainty relation
\begin{equation}\label{eq:planar}
\va{J_x}+\va{J_y}\ge C_j,
\end{equation}
where for the bound
\begin{equation}gin{equation}
C_{\frac{1}{2}}=\tfrac{1}{4},\quad C_{1}=\tfrac{7}{16}
\end{equation}
holds, while for higher $j$'s the bound is obtained numerically in \REF{He2011Planar}. Note that planar squeezed states minimize the left-hand side of \EQ{eq:planar} such that their mean spin is not zero. Thus, they are different from the states that minimize $\exs{J_x^2}+\exs{J_y^2}.$
\begin{equation}gin{figure}[t!]
\centerline{
\epsfxsize2.5in \epsffile{crconvex_fig_planarsq.pdf}
}
\caption{Planar squeezed states. (solid) The QFI and (dashed) our lower bound $B_{FQ}$ given in \EQ{eq:FQbound} for planar squeezed states for a range of $j.$ (dotted) As a reference, we plot the QFI corresponding to the state fully polarized in the $x$-direction.}
\label{fig:planar}
\end{figure}
\begin{equation}gin{figure}[t!]
\centerline{
\epsfxsize2.5in \epsffile{crconvex_fig_spinsq.pdf}}
\caption{ Spin-squeezed states. (solid) The QFI and (dashed) our lower bound $B_{FQ}$ defined in \EQ{eq:FQbound} for spin-squeezed states $j=50.$ The spin-squeezed states are obtained as the ground states of \EQ{eq:Hsq} for a range of $\lambda.$ (dotted) As a reference, we plot the QFI corresponding to the state fully polarized in the $x$ direction.}
\label{fig:spinsq}
\end{figure}
Based on the inequalities given in \EQS{eq:FQbound} and \eqref{eq:planar}, for planar squeezed states we have
\begin{equation}gin{equation}\label{eq:FQbound2}
F_Q[\varrho,J_z]\ge B_{FQ} = 4(j-C_j).
\end{equation}
In \EQ{eq:FQbound2}, the value of $B_{FQ}$ approaches $4j$ since for large $j$ we have \cite{He2011Planar}
\begin{equation}gin{equation}
C_j\ll j.
\end{equation}
In \FIG{fig:planar}, we plotted the QFI vs. our lower bound for planar squeezed states for various $j$'s.
We will present another class of states for which our bound on the QFI can be useful. We will consider the state $\ket{j}_x$ squeezed in the $y$-direction. Spin-squeezed states can be obtained as the ground states of the Hamiltonian \cite{Sorensen2001Entanglement}
\begin{equation}gin{equation}\label{eq:Hsq}
H_{{\rm sq}}(\lambda)=J_y^2-\lambda J_x.
\end{equation}
For $\lambda=\infty,$ the ground state is $\ket{j}_x$, the state fully polarized in the $x$-direction. For $0<\lambda<\infty,$ it is a state spin squeezed along the $y$-direction. When the state becomes squeezed along the $y$-direction, the sum of the two variances in \EQ{eq:FQbound} starts to decrease. Then, due to \EQ{eq:FQbound}, the QFI has to increase and the state becomes more useful for metrology. In \FIG{fig:spinsq}, we plotted the right-hand side and the left-hand side of \EQ{eq:FQbound} for a range of $\lambda$ for $j=50.$ Our lower bound is quite close to the QFI for states with an almost maximal spin.
Next, we will determine what the largest precision is for SU(2) coherent states or spin-coherent states defined in \EQ{eq:spincoherent}. It is easy to show that $F_Q[\varrho,J_z]$ is maximal for $\ket{j}_x,$ the SU(2) coherent state pointing into in the $x$-direction. For that state, we have
\begin{equation}gin{equation}
F_Q[\ket{j}_x,J_z]=2j,\quad\va{J_x}=0,\quad \va{J_y}=j/2.
\end{equation}
Due to the convexity of the QFI, for the mixtures of SU(2) coherent states
\begin{equation}gin{equation}
\label{eq:SU2m}
F_Q[\varrho_{{\rm SU}(2){-\rm mixture}},J_z]\le 2j
\end{equation}
holds. Any state that violates \EQ{eq:SU2m} is more useful metrologically than a mixture of SU(2) coherent states. Both in \FIG{fig:planar} and in \FIG{fig:spinsq}, we plot a line corresponding to the bound in the inequality given in \EQ{eq:SU2m}.
Finally, let us generalize these ideas to more than three operators. Let us consider the following relation for pure states
\begin{equation}gin{equation}\label{eq:Gkequality}
\sum_{n=1}^{d^2-1} \va{G_n} = 4j,
\end{equation}
where $G_n$ are the SU($d$) generators fulfilling
\begin{equation}gin{equation}
{\rm Tr}(G_kG_l)=2\delta_{kl},
\end{equation}
and, $d=2j+1$ is the dimension of the qudit (see e.g., \REF{Vitagliano2011Spin}). Due to the concavity of the variance it follows that for mixed states \cite{Vitagliano2011Spin}
\begin{equation}\label{eq:varGk}
\sum_{n=1}^{d^2-1} \va{G_n} \ge 4j,
\end{equation}
We can even have a better relation based on the discussion before.
\begin{equation}gin{observation}
For a spin-$j$ particle, the following inequality bounds from above
the metrological usefulness of the state
\begin{equation}gin{equation}\label{eq:varGk_imrpoved}
\tfrac{1}{4} F_Q[\varrho,G_1]+\sum_{n=2}^{d^2-1} \va{G_n} \ge 4j.
\end{equation}
\end{observation}
\section{Alternative derivation based on convexity arguments}
\label{sec:alt}
In this section, we present a simple idea that can be used to rederive some of the previous results. The derivation becomes much shorter, while the conditions for saturating the inequalities are not so easy to obtain. We also derive further inequalities with the variance and the QFI. Part of the section is a summary of already existing results, which we are connecting to the methods of the paper.
\begin{equation}gin{observation}
\label{obs:convroof}
Let us consider a relation
\begin{equation}gin{equation}
\va{A}_\varrho \ge g(\varrho),\label{eq:vag}
\end{equation}
which is true for pure states. If $g(\varrho)$ is convex in density matrices, then
\begin{equation}gin{equation}
\frac 1 4 F_Q[\varrho,A] \ge g(\varrho) \label{eq:vag22}
\end{equation}
holds for mixed states. If $g(\varrho)$ is not convex in $\varrho,$ the inequality
\begin{equation}gin{equation}
\frac 1 4 F_Q[\varrho,A] \ge \min_{\{p_k,\ket{\psi_k}\}} \sum_k p_k g(\ket{\psi_k}) \label{eq:vag22bfq}
\end{equation}
\end{observation}
still holds.
{\it Proof.}--On the left-hand side of the inequality in \EQ{eq:vag22} there is the QFI of $\varrho$ over four. Based on \EQ{e2b}, we know that it is a convex roof, that is, the largest convex function that equals $\va{A}_\varrho$ for all pure states. If $g(\varrho)$ is convex in $\varrho,$ then, on the right-hand side of Eq.~(\ref{eq:vag22}), there is an expression that is never larger than the left-hand side for pure states. Even if $g(\varrho)$ is not convex in $\varrho,$ then the right-hand side of Eq.~(\ref{eq:vag22bfq}) is still convex in $\varrho.$ $\ensuremath{
\blacksquare}$
Using \OBS{obs:convroof}, the inequality with the product of the variance and the QFI given in \EQ{eq:RS_improved_with_FQ} can be proved as follows.
We will use the ideas of \REF{Giovanetti2003Characterizing} to convert relations with the product of uncertainties to relations with the sum of uncertainties.
Based on \EQ{eq:XYsqrt}, we know that
\begin{equation}gin{equation}
\alpha \va{A} + \begin{equation}ta \va{B} \ge 2\sqrt{\alpha\begin{equation}ta} \sqrt{ \va{A} \va{B} }\label{eq:XYsqrt2}
\end{equation}
holds for all $\alpha,\begin{equation}ta\ge0.$ From the product uncertainty relation given in \EQ{eq:RS1} and from \EQ{eq:XYsqrt2} follows that for the weighted sum of the variances
\begin{equation}gin{equation}
\alpha \va{A} + \begin{equation}ta \va{B} \ge \sqrt{\alpha\begin{equation}ta} L_{\varrho}. \label{eq:ABsqrt}
\end{equation}
holds for all $\alpha,\begin{equation}ta\ge0.$ From \EQ{eq:ABsqrt}, we can get a relation with a quantity on the right-hand side that is convex in the density matrix
\begin{equation}gin{equation}
\alpha \va{A} + \begin{equation}ta \va{B} \ge \sqrt{\alpha\begin{equation}ta} \min_{\{p_k,\ket{\psi_k}\}} \sum_k p_k L_{\psi_k}. \label{eq:ABsqrt2}
\end{equation}
Let us rewrite \EQ{eq:ABsqrt2} as
\begin{equation}gin{equation}
\begin{equation}ta \va{B} \ge \sqrt{\alpha\begin{equation}ta} \min_{\{p_k,\ket{\psi_k}\}} \left(\sum_k p_k L_{\psi_k} \right)-\alpha \va{A} . \label{eq:ABsqrt3}
\end{equation}
Now on the left-hand side we have a variance while the right-hand side is convex in the state. Using Observation 5, we arrive at
\begin{equation}gin{equation}
\begin{equation}ta \frac 1 4 F_Q[\varrho,B] \ge \sqrt{\alpha\begin{equation}ta} \min_{\{p_k,\ket{\psi_k}\}} \left(\sum_k p_k L_{\psi_k} \right)-\alpha \va{A} . \label{eq:ABsqrt4}
\end{equation}
Hence, we arrive at a relation with the wighted sum of the variance and the QFI
\begin{equation}gin{equation}
\alpha \va{A}+\begin{equation}ta \frac 1 4 F_Q[\varrho,B] \ge \sqrt{\alpha\begin{equation}ta} \min_{\{p_k,\ket{\psi_k}\}} \left(\sum_k p_k L_{\psi_k} \right). \label{eq:ABsqrt4B}
\end{equation}
From the fact that the inequality in \EQ{eq:ABsqrt4} holds for all $\alpha,\begin{equation}ta\ge0$ follows the inequality with the product of the variance and the QFI given in \EQ{eq:RS_improved_with_FQ}.
Using \OBS{obs:convroof}, the uncertainty relation with two variances and the QFI given in \EQ{eq:FQbound} can be proved as follows. The uncertainty relation with three variances in \EQ{eq:JxyzN2b} can be rewritten as
\begin{equation}gin{equation}
\va{J_x}\ge j-\va{J_y}-\va{J_z},
\end{equation}
which is of the form given in \EQ{eq:vag}, since its right-hand side is convex in $\varrho$ and the left-hand side is a variance.
Hence, the inequality in \EQ{eq:FQbound} can be rederived.
Next, we prove a general bound on the metrological usefulness of a quantum state based on its spin length using \OBS{obs:convroof}.
\begin{equation}gin{observation}
The metrological usefulness of a state is bounded with the spin-length as
\begin{equation}gin{equation}
F_Q[\varrho,J_x]\ge 4jF_j(\ex{J_z}/j).\label{eq:FQFjbound}
\end{equation}
where $F_j(X)$ is a convex function defined as
\begin{equation}gin{equation}
F_j(X)=\min_{\varrho:\ex{J_z}=Xj} \frac{\va{J_x}}{j}.\label{eq:Fjdef}
\end{equation}
In particular, if $\ex{J_z}\ne 0$ then $F_Q[\varrho,J_x]>0.$
\end{observation}
{\it Proof.} For the components of the angular momentum for a particle with spin-$j$ \cite{Sorensen2001Entanglement}
\begin{equation}gin{equation}
\va{J_x}\ge jF_j(\ex{J_z}/j)
\end{equation}
holds. Using \OBS{obs:convroof}, we can obtain an inequality for the QFI
\begin{equation}gin{equation}
\frac 1 4 F_Q[\varrho,J_x]\ge jF_j(\ex{J_z}/j).
\end{equation}
Based on the definition in \EQ{eq:Fjdef}, it is clear that if $\ex{J_z}>0$ then $F_j(\ex{J_z}/j)>0.$ Hence, based on the relation in \EQ{eq:FQFjbound} follows that $F_Q[\varrho,J_x]>0.$ Thus, if the $z$-component of the angular momentum has a non-zero expectation value then the state can be used for metrology with the Hamiltonian $J_x.$ $\ensuremath{
\blacksquare}$
Usually, the function $F_j(X)$ is computed by looking for the ground state $\ket{\Psi_{\lambda,\lambda_2}}$ of the Hamiltonian \cite{Sorensen2001Entanglement}
\begin{equation}gin{equation}
H_{\lambda,\lambda_2}=J_x^2-\lambda J_z - \lambda_2 J_x,\label{eq:Hlambdalabda2}
\end{equation}
where $\lambda$ and $\lambda_2$ play the role of Lagrange multipliers. In particular, we need the ground state of the Hamiltonian given in \EQ{eq:Hlambdalabda2} for which $\ex{J_z}$ equals a given value and $\va{J_x}$ is minimal, that is,
\begin{equation}gin{equation}
F_j(X)=\min_{\lambda,\lambda_2:\ex{J_z}_{\ket{\Psi_{\lambda,\lambda_2}}}=Xj} \va{J_x}_{\ket{\Psi_{\lambda,\lambda_2}}}/j.\label{eq:Fjbound}
\end{equation}
In this way the minimization is over two real parameters, rather than over a quantum state. Such a calculation has been used to obtain a lower bound on the variance $\va{J_x},$ if the expectation value $\ex{J_z}$ is constrained to be a given constant \cite{Sorensen2001Entanglement}. For an integer $j,$ the state minimizing $\va{J_x}$ has $\ex{J_x}=0,$ thus $\lambda_2$ can be omitted from the Hamiltonian in \EQ{eq:Hlambdalabda2}. Later it has also been shown that with such a procedure we get a lower bound on $\frac 1 4 F_Q[\varrho,J_x]$ \cite{Apellaniz2017Optimal}.
\begin{equation}gin{observation}
\label{obs:convevroof_multiterm}
Let us consider a relation
\begin{equation}gin{equation}
\sum_{n=1}^{N_A} \va{A_n}_\varrho \ge g(\varrho),
\end{equation}
which is true for pure states with some $A_n$ operators. Here $N_A$ is the number of $A_n$ operators we consider. If $g(\varrho)$ is convex in density matrices, then
\begin{equation}gin{equation}
I(\{A_n\}_{n=1}^{N_A},\varrho) \ge g(\varrho)
\end{equation}
holds for mixed states, where we define
\begin{equation}gin{equation}
I(\{A_n\}_{n=1}^{N_A},\varrho)= \min_{\{p_k,\ket{\psi_k}\}}\sum_k p_k \sum_{n=1}^{N_A} \va{A_n}_{\psi_k}.\label{eq:infsum}
\end{equation}
If $g(\varrho)$ is not convex in $\varrho$ then the inequality with a convex roof
\begin{equation}gin{equation}
I(\{A_n\}_{n=1}^{N_A},\varrho) \ge \min_{\{p_k,\ket{\psi_k}\}} \sum_k p_k g(\ket{\psi_k}) \label{eq:vag22bbb}
\end{equation}
still holds.
\end{observation}
{\it Proof.} The proof is analogous to that of \OBS{obs:convroof}. $\ensuremath{
\blacksquare}$
For a single operator
\begin{equation}gin{equation}
I(\{A\},\varrho)= \frac1 4 F_Q[\varrho,A]
\end{equation}
holds. For two or more operators, it is clear that $I(\{A_n\}_{n=1}^{N_A},\varrho) $ can be larger than the sum of the corresponding QFI terms
\begin{equation}gin{equation}
I(\{A_n\}_{n=1}^{N_A},\varrho) \ge \frac1 4 \sum_{n=1}^{N_A} F_Q[\varrho,A_n].
\end{equation}
There are efficient methods to calculate the convex roof in \EQ{eq:infsum} with semidefinite programming \cite{Toth2015Evaluating}. Calculating the minimum of \EQ{eq:infsum} for a set of constraints on the expectation values of operators $B_n$ is possible with the Hamiltonian
\begin{equation}gin{equation}
H_{\{\lambda_n\}_{n=1}^{N_A},\{\mu_n\}_{n=1}^{N_c}}=\sum_{n=1}^{N_A} (A_n^2 - \lambda_n A_n) - \sum_{n=1}^{N_c} \mu_n B_n,\label{eq:HAB}
\end{equation}
where $N_c$ is the number of constraints. In many cases, the lower bound on $I(\{A_n\}_{n=1}^{N_A},\varrho)$ can be obtained, analogously to \EQ{eq:Fjbound} as
\begin{equation}gin{equation}
\min_{\{\lambda_n\}_{n=1}^{N_A},\{\mu_n\}_{n=1}^{N_c}:\{\ex{B_n}=b_n\}_{n=1}^{N_c}} \sum_{k=1}^{N_A} \va{A_k}_{\ket{\Psi_{\{\lambda_n\}_{n=1}^{N_A},\{\mu_n\}_{n=1}^{N_c}}}}.\label{eq:minvar}
\end{equation}
In principle, some complications might arise if the ground state of the Hamiltonian given in \EQ{eq:HAB} is degenerate or due to the fact that the minimization was restricted to pure states \footnote{If the ground state of \EQ{eq:HAB} is degenerate then we can break the degeneracy with additional operators. Note also that \EQ{eq:minvar} works based on optimizing over pure states only. However, for some $\{\ex{B_n}=b_n\}$ constraints there might not be a corresponding pure state. Thus, when we plot $\sum_n \ex{A_n^2}$ as a function of $\ex{A_n}$ and $\ex{B_n}$ from results of the optimization over pure states, we have to construct the convex hull from below. Then, we can obtain the function $f$ giving the minimum as $(\sum_n \ex{A_n^2})_{\min}=f(\{\ex{A_n}\}_{n=1}^{N_A},\{\ex{B_n}\}_{n=1}^{N_c})$ that is valid even for mixed states. We can then compute the minimal variance based on this function rather than using \EQ{eq:minvar}.}.
\REFL{Apellaniz2017Optimal} considers a similar problem, but uses the Legendre transform instead of Lagrange multipliers for the case of a single $A_n$ operator. The method can straightforwardly be generalized to the case of several $A_n$ operators.
Finally, we can obtain a similar relation with a maximization rather than a minimization over the decomposition.
\begin{equation}gin{observation}
\label{obs:convevroof_multiterm2}
Let us consider a relation
\begin{equation}gin{equation}
\sum_{n=1}^{N_A} \va{A_n}_\varrho \le h(\varrho),
\end{equation}
which is true for pure states with some $A_n$ operators. If $h(\varrho)$ is concave in density matrices, then
\begin{equation}gin{equation}
R(\{A_n\}_{n=1}^{N_A},\varrho) \le h(\varrho)
\end{equation}
holds for mixed states, where we define via a concave roof the quantity
\begin{equation}gin{equation}
R(\{A_n\}_{n=1}^{N_A},\varrho)= \max_{\{p_k,\ket{\psi_k}\}}\sum_k p_k \sum_{n=1}^{N_A} \va{A_n}_{\psi_k}.\label{eq:infsum2}
\end{equation}
If $h(\varrho)$ is not concave in $\varrho,$ the inequality with a concave roof
\begin{equation}gin{equation}
R(\{A_n\}_{n=1}^{N_A},\varrho) \le \max_{\{p_k,\ket{\psi_k}\}} \sum_k p_k h(\ket{\psi_k}) \label{eq:vag22b}
\end{equation}
still holds.
\end{observation}
{\it Proof.} The proof is analogous to that of \OBS{obs:convevroof_multiterm}. $\ensuremath{
\blacksquare}$
Clearly, for a single operator
\begin{equation}gin{equation}
R(\{A\},\varrho)=\va{A}_{\varrho}
\end{equation}
holds. It can be shown that if we have only two operators then \cite{Leka2013Some,Petz2014,Toth2015Evaluating}
\begin{equation}gin{equation}
R(\{A_1,A_2\},\varrho)=\va{A_1}_{\varrho}+\va{A_2}_{\varrho}.
\end{equation}
For three observables, $R(\{A_1,A_2,A_2\}$ can be smaller than the sum of the variances
\begin{equation}gin{equation}
R(\{A_1,A_2,A_2\},\varrho)\le\va{A_1}_{\varrho}+\va{A_2}_{\varrho}+\va{A_3}_{\varrho}.
\end{equation}
Let us see a simple application for entanglement detection.
\begin{equation}gin{observation}
For separable states for $N$ spin-$j$ particles
\begin{equation}gin{equation}
V(\{J_x,J_y,J_z\},\varrho)\ge Nj\label{eq:Vxyz}
\end{equation}
holds, which has been presented in \REF{Toth2015Evaluating}. Any state violating the inequality \EQ{eq:Vxyz} is entangled.
\end{observation}
{\it Proof.} We know that for pure product states of $N$ spin-$j$ particles we have \cite{Toth2004Entanglement,Toth2007Optimal,Toth2010Generation,Vitagliano2011Spin}
\begin{equation}gin{equation}
\va{J_x}+\va{J_y}+\va{J_z}\ge Nj\label{eq:varJxyz}
\end{equation}
Thus, \EQ{eq:Vxyz} is true for pure product states. Since $V(\{J_x,J_y,J_z\},\varrho)$ is concave in $\varrho,$ it is also true for separable states, which are just mixtures of product states. $\ensuremath{
\blacksquare}$
The left-hand side of the relation with there variances given in \EQ{eq:varJxyz} is not smaller than the left-hand side of the criterion with $V(\{J_x,J_y,J_z\},\varrho)$ given in \EQ{eq:Vxyz}, and in some cases it is larger. Hence, the condition given in \EQ{eq:Vxyz} detects all states that are detected as entangled by \EQ{eq:varJxyz}, and it detects some further states.
\section{Metrological usefulness and entanglement conditions}
\label{sec:var_metrlogy}
In this section, we will connect the violation of uncertainty-based entanglement criteria to the metrological usefulness of the quantum state. With these findings, we address an important problem of entanglement theory: even if entanglement is detected, it is not yet sure that the entanglement is useful for some quantum information processing task or quantum metrology \cite{Pezze2009Entanglement}. We will discuss first entanglement conditions for two bosonic modes, then entanglement criteria for two spins.
\subsection{Two-mode quantum states}
In this section, we will consider continuous variable systems. A bosonic mode can be described by the canonical $x$ and $p$ operators. For coherent states, $\ket{\alpha}$
\begin{equation}gin{equation}
\va{x}=\va{p} = \frac{1}{2}\label{eq:coh12}
\end{equation}
holds. For mixtures of coherent states
\begin{equation}gin{equation}
\varrho_{\rm mc}=\sum_k p_k \ketbra{\alpha_k}
\label{eq:cohmixed}
\end{equation}
we have, due to the concavity of the variance and the convexity of the QFI
\begin{equation}gin{equation}
\va{x}, \va{p}\ge \frac{1}{2},\quad F_Q[x,\varrho], F_Q[p,\varrho]\le 2.
\end{equation}
Let us now consider a two-mode system with the position and momentum operators $x_1, p_1, x_2, p_2.$
\begin{equation}gin{observation}
For a mixture of products of coherent states $\alpha_k^{(l)}$ of the form
\begin{equation}gin{equation}
\varrho_{\rm sepc}=\sum_k p_k \ketbra{\alpha_k^{(1)}} \otimes \ketbra{\alpha_k^{(2)}}
\label{eq:cohsep}
\end{equation}
the collective variances of the position and momentum are bounded from below as
\begin{equation}gin{equation}
\vasq{(x_1\pm x_2)}\ge 1 ;\;\;\;\; \vasq{(p_1\pm p_2)}\ge 1.
\label{eq:vaxp}
\end{equation}
Moreover, the QFI for the same operators is bounded from above as
\begin{equation}gin{equation}
F_Q[\varrho,p_1\pm p_2] \le4;\;\;\;\; F_Q[\varrho,x_1\pm x_2]\le4.
\label{eq:FQxp}
\end{equation}
Note that for such states the multi-variable Glauber-Sudarshan $P$ function is non-negative \cite{Agudelo2013Quasiprobabilities}.
\end{observation}
{\it Proof.} For a coherent state, for the variances of $x$ and $p$ the relation in \EQ{eq:coh12} holds.
Then, for a tensor product of two coherent states we have
\begin{equation}gin{equation}
\vasq{(x_1\pm x_2)}=\vasq{(p_1\pm p_2)} = 1.
\end{equation}
Since for pure states the QFI is four times the variance, for a tensor product of two coherent states we have
\begin{equation}gin{equation}
F_Q[\varrho,x_1\pm x_2]=F_Q[\varrho,p_1\pm p_2]= 4.
\end{equation}
Then, the statement follows from the concavity of the variance and the
convexity of the QFI. $\ensuremath{
\blacksquare}$
Let us now consider entanglement detection in such systems with uncertainty relations. A well-known entanglement criterion is \cite{Duan2000Inseparability,Simon2000Peres-Horodecki}
\begin{equation}gin{equation}
\vasq{(x_1+x_2)} + \vasq{(p_1-p_2)} \ge 2.\label{eq:Duan}
\end{equation}
If a quantum state violates \EQ{eq:Duan}, then it is entangled.
Next, let us connect the violation of \EQ{eq:Duan} to the metrological properties of the quantum state.
\begin{equation}gin{observation}
For a two-mode state, the following uncertainty relation holds
\begin{equation}gin{eqnarray}
&&\vasq{(x_1+x_2)} +\vasq{(p_1-p_2)} \ge \nonumber\\
&&\;\;\;\;\;\;\;\;\;\;4/F_Q[\varrho,p_1+p_2] + 4/F_Q[\varrho,x_1-x_2] .
\label{eq:vaxpFxp_product}
\end{eqnarray}
\end{observation}
As a consequence of \EQ{eq:vaxpFxp_product}, states violating the entanglement condition given in \EQ{eq:Duan} are metrologically more useful than states of the form given in \EQ{eq:cohsep}, i.e., bipartite states with a non-negative multi-variable Glauber-Sudarshan $P$ function.
{\it Proof.} We start from the relations
\begin{equation}gin{subequations}
\begin{equation}gin{eqnarray}
\vasq{(x_1+x_2)} F_Q[\varrho,p_1+p_2] &\ge& 4,\label{eq:varFQ_simple1}\\
\vasq{(p_1-p_2)} F_Q[\varrho,x_1-x_2] &\ge& 4,\label{eq:varFQ_simple2}\end{eqnarray}\label{eq:varFQ_simple}\end{subequations}which are the applications of \EQ{eq:varFQproductUncRel}. Then, in both inequalities of \EQ{eq:varFQ_simple} we divide by the term containing the QFI. Finally, we sum the two resulting inequalities.
Next, we will show that violating the condition given in \EQ{eq:Duan} implies metrological usefulness compared to a special class of separable states. Due to \EQ{eq:vaxpFxp_product}, the violation of the entanglement criterion given in \EQ{eq:Duan} implies the violation of one of the inequalities of \EQ{eq:FQxp}. Thus, violation of the uncertainty relation-based entanglement condition also means that the state has larger metrological usefulness than states of the type given in \EQ{eq:cohsep}. $\ensuremath{
\blacksquare}$
Note however that we did not prove that violating the entanglement condition given in \EQ{eq:Duan} leads to larger metrological usefulness than that of separable states in general, since even for pure product states $F_Q[\varrho,x_1\pm x_2]$ or $F_Q[\varrho,p_1\pm p_2]$ can be arbitrarily large for two bosonic modes.
\subsection{Spin systems}
Next, we will consider a system of two spins. For this case, we can show that if entanglement is detected by a well-known entanglement condition, then the state is more useful for metrology than a certain subset of separable states.
Let us see first a well-known entanglement conditions for two spins \cite{Toth2004Entanglement}. For separable states
\begin{equation}gin{eqnarray}
\label{eq:varjxjyjz_bipartite}
&&\vasq{(J_x^{(1)}+J_x^{(2)})}+\vasq{(J_y^{(1)}+J_y^{(2)})}+\vasq{(J_z^{(1)}+J_z^{(2)})}\nonumber\\
&&\quad\quad\quad\ge j_1+j_2.
\end{eqnarray}
holds. Any state violating \EQ{eq:varjxjyjz_bipartite} is entangled. Note that this is the same condition as \EQ{eq:varJxyz} for the special case of two qudits.
Next, we need a similar relation for the QFI. For that, let us consider a special class of mixed states, a mixture of spin coherent states of a spin-$j$ particle given as
\begin{equation}gin{equation}
\varrho_{\rm msc}=\sum_k p_k \ketbra{s_k}.
\label{eq:spincohmix}
\end{equation}
Here, the spin-coherent states are defined similarly as in \EQ{eq:spincoherent}. It is easy to see that for such states
\begin{equation}gin{equation}
\sum_{l=x,y,z}F_Q[\varrho,j_l]\le 4j\label{eq:FQQ}
\end{equation}
holds, where the inequality is saturated for all pure spin coherent states. The maximum of the left-hand side of \EQ{eq:FQQ} for general quantum states is $4j(j+1).$ We add that for spin-coherent states
\begin{equation}gin{equation}
F_Q[\varrho,j_l]\le 2j\label{eq:FQQ2}
\end{equation}
also holds for $l=x,y,z,$ where the inequality is saturated for $\ket{+j}_k$ for $k\ne l.$ The maximum of the left-hand side of \EQ{eq:FQQ2} for general quantum states is $4j^2.$
Let us now move to bipartite systems.
\begin{equation}gin{observation}
For a mixture of products of spin-coherent states $\ket{s_k^{(l)}}$ of the form
\begin{equation}gin{equation}
\varrho_{\rm sepsc}=\sum_k p_k \ketbra{s_k^{(1)}} \otimes \ketbra{s_k^{(2)}},
\label{eq:spincohsep}
\end{equation}
the relation with the sum of three QFI terms
\begin{equation}gin{eqnarray}
\label{eq:sep3F}
&& F_Q[\varrho,J_x^{(1)}\pm J_x^{(2)}]+F_Q[\varrho,J_y^{(1)}\pm J_y^{(2)}]
+F_Q[\varrho,J_z^{(1)}\pm J_z^{(2)}]\nonumber\\
&&\quad\quad\quad \le 4(j_1+j_2)
\end{eqnarray}
holds.
\end{observation}
{\it Proof.} This is just a generalization of the statements presented in \REFS{Hyllus2012Fisher,Toth2012Multipartite}. For a pure product of spin-coherent states, for the left-hand side of \EQ{eq:sep3F} we have
\begin{equation}gin{eqnarray}
4 \left[\sum_{l=x,y,z} \va{J_l^{(1)}}+\sum_{l=x,y,z} \va{J_l^{(2)}}\right]= 4(j_1 +j_2).\nonumber\\\label{eq:varvar}
\end{eqnarray}
Due to the convexity of the QFI, the left-hand side of \EQ{eq:sep3F} cannot be larger than the right-hand side even for mixed states. $\ensuremath{
\blacksquare}$
A state violating the inequality given in \EQ{eq:sep3F} is more useful metrologically than a mixture of products of spin-coherent states, if we consider not a single metrological task, but the three tasks corresponding to the three QFI terms in \EQ{eq:sep3F}.
Next, we will show how the violation of \EQ{eq:varjxjyjz_bipartite} implies metrological usefulness.
\begin{equation}gin{observation} For a bipartite quantum state
\begin{equation}gin{eqnarray}
&&8 \sum_{l=x,y,z} \vasq{(J_l^{(1)}+J_l^{(2)})}+\sum_{l=x,y,z} F_Q[\varrho,J_l^{(1)}-J_l^{(2)}]\nonumber\\
&&\quad\quad\quad \ge 12 (j_1+j_2)
\label{eq:varJxJyJzFQxyz}
\end{eqnarray}
holds. Here, $J_l^{(n)}$ for $l=x,y,z$ are spin operators acting on the two subsystems, and $j_n$ are the spins of the two parties.
As a consequence of \EQ{eq:varJxJyJzFQxyz}, states violating the entanglement condition in \EQ{eq:varjxjyjz_bipartite} are metrologically more useful than mixtures of products of spin-coherent states given in \EQ{eq:spincohsep}, which are a subset of separable states, for the combination of the three metrological tasks corresponding to the three QFI terms in \EQ{eq:varJxJyJzFQxyz}. For the case $j_1=j_2=1/2,$ this also means that they are more useful than separable states.
\end{observation}
{\it Proof. } We start from the uncertainty relations for the two parties
\begin{equation}gin{equation}
\va{J_x^{(n)}}+\va{J_y^{(n)}}+\va{J_z^{(n)}} \ge j_n,
\end{equation}
where $n=1,2.$ For pure states of spin-1/2 particles, the equality holds. Then, we need the fact that
\begin{equation}gin{eqnarray}
&&F_Q[\varrho,J_x^{(1)}-J_x^{(2)}]/4+\vasq{(J_y^{(1)}+J_y^{(2)})}+\vasq{(J_z^{(1)}+J_z^{(2)})}\nonumber\\
&&\quad\quad\quad \ge j_1+j_2\label{eq:FQxminusVaryplusVarzplus}
\end{eqnarray}
is valid for any quantum state. This can be seen knowing that it is true for pure states, i.e.,
\begin{equation}gin{eqnarray}
\label{eq:varjxjyjz_bipartite2}
&&\vasq{(J_x^{(1)}-J_x^{(2)})}+\vasq{(J_y^{(1)}+J_y^{(2)})}+\vasq{(J_z^{(1)}+J_z^{(2)})}\nonumber\\
&&\quad\quad\quad \ge j_1+j_2,
\end{eqnarray}
which can be proved similarly to \EQ{eq:varjxjyjz_bipartite}. Then, the mixed state condition follows from ideas of \SEC{sec:3varfisher}. Using \EQ{eq:FQxminusVaryplusVarzplus}, and all the inequalities obtained from it after permuting $x,$ $y$ and $z,$ and adding these inequalities, we arrive at \EQ{eq:varJxJyJzFQxyz}.
Let us see the second part of the Observation. If \EQ{eq:varjxjyjz_bipartite} is violated, then based on \EQ{eq:varJxJyJzFQxyz}
\begin{equation}gin{equation}
\sum_{l=x,y,z}F_Q[\varrho,J_l^{(1)}-J_l^{(2)}] > 4(j_1+j_2),
\end{equation}
must hold. We know that for a mixture of products of spin-coherent states the inequality given in \EQ{eq:sep3F} holds. Thus, the quantum states violating the entanglement condition given in \EQ{eq:varjxjyjz_bipartite} are more useful for metrology than states of the form
\EQ{eq:spincohsep}.
$\ensuremath{
\blacksquare}$
Let us examine \EQ{eq:varJxJyJzFQxyz} for SU(2) singlet states.
For such states,
\begin{equation}gin{equation}
\exs{(J_l^{(1)}+J_l^{(2)})^2}=0
\end{equation}
for $l=x,y,z.$ Hence, for such states the first sum in \EQ{eq:varJxJyJzFQxyz} is zero, and
\begin{equation}gin{equation}
\sum_{l=x,y,z}F_Q[\varrho,J_l^{(1)}-J_l^{(2)}]\ge 12(j_1+j_2).
\end{equation}
Hence singlet states violate \EQ{eq:sep3F} with the choice of "-" for all the three terms.
Singlets are invariant under Hamiltonians of the type
\begin{equation}gin{equation}
H_{0}=B_0(J_l^{(1)}+J_l^{(2)}),
\end{equation}
which describes the effect of homogeneous magnetic fields, where $B_0$ is a constant proportional to the strength of the homogeneous magnetic field. However, singlet states are sensitive to field gradients \cite{Cable2010Parameter,Urizar-Lanz2013Macroscopic,Behbood2014Generation}.
\section{Conclusions}
We studied various relations obtained from the Schr\"odinger-Robertson uncertainty after an optimization over all the possible decompositions of the density matrix is applied. Using convex roofs over decompositions, we rederived the inequality presented in \REF{Frowis2015Tighter}, and gained insights concerning the Cram\'er-Rao bound. We also used concave roofs to obtain improvements on the Robertson-Schr\"odinger uncertainty relation. Finally, using similar techniques, we introduced inequalities with variances and the QFI. Similar techniques might make it possible to obtain inequalities for variances and the QFI from further inequalities for variances \cite{Dammeier2015UncertaintyNJP}.
Independently from our work, the convex-roof property of the QFI has been used to derive uncertainty relations by Chiew and Gessner \cite{Chiew2021ImprovingB}.
\acknowledgments
We thank I. Apellaniz, R. Demkowicz-Dobrza\'nski, P. Hyllus, O. G\"uhne, M. Kleinmann, J. Ko\l ody\'nski, J. Siewert, A. Smerzi, Sz. Szalay, R. Tr\'enyi, and G. Vitagliano for stimulating discussions. We acknowledge the support of the EU (COST Action CA15220, QuantERA CEBBEC, QuantERA MENTA), the Spanish MCIU (Grant No. PCI2018-092896), the Spanish Ministry of Science, Innovation and Universities and the European Regional Development Fund FEDER through Grant No. PGC2018-101355-B-I00 (MCIU/AEI/FEDER, EU), the Basque Government (Grant No. IT986-16), and the National Research, Development and Innovation Office NKFIH (Grant No. K124351, No. KH129601). We thank the "Frontline" Research Excellence Programme of the NKFIH (Grant No. KKP133827). G.T. is thankful for a Bessel Research Award from the Humboldt Foundation.
\appendix
\section{Derivation of \EQ{eq:Heisenberg2}}
\label{App:A}
We derive \EQ{eq:Heisenberg2} from knowing that the Robertson-Schr\"odinger inequality given in \EQ {eq:RS1} holds for all $\varrho_k$ components.
Let us consider the inequality
\begin{equation}gin{equation} \label{eq:ineq}
\left(\sum_k p_k a_k \right)\left(\sum_k p_k b_k \right)\ge\left(\sum_k p_k \sqrt{a_k b_k}\right)^2,
\end{equation}
where $a_k,b_k\ge0.$ It can be proved as follows. It can be rewritten as
\begin{equation}gin{equation}
\sum_{k,l} p_k p_l (a_k b_l + a_l b_k) \ge \sum_{k,l} p_k p_l 2\sqrt{a_k a_l b_l b_k}.
\end{equation}
Term by term, the left-hand side is larger or equal to the right-hand side, since $(\sqrt{a_k b_l}-\sqrt{ a_l b_k})^2\ge0.$ If additionally
\begin{equation}gin{equation}
a_k b_k\ge c_k^2 \label{eq:abc}
\end{equation}
holds for all $k$ then we arrive at
\begin{equation}gin{equation} \label{eq:ineq2}
\left(\sum_k p_k a_k \right)\left(\sum_k p_k b_k \right)\ge\left(\sum_k p_k \vert c_k \vert \right)^2.
\end{equation}
Note that \EQ{eq:ineq}, and hence \EQ{eq:ineq2} can be saturated only if
\begin{equation}gin{eqnarray}
a_k&=&a_l,\nonumber\\
b_k&=&b_l,
\end{eqnarray}
hold for all $k,l.$ Finally, in order to have equality in \EQ{eq:ineq2}, we also need that \EQ{eq:abc} is saturated for all $k.$ In this case, all $c_k$ must be equal to each other.
The inequality in \EQ{eq:Heisenberg2} can be derived from the relation in \EQ{eq:ineq2} knowing that the uncertainty relation given in \EQ {eq:RS1} holds for the $\varrho_k$ components in a decomposition given in \EQ{eq:rhodecomp}. We need to introduce $a_k=\va{A}_{\varrho_k},$ $b_k=\va{B}_{\varrho_k},$ and $c_k=\frac{1}{2}L_{\varrho_k}.$ If we use an inequality analogous to \EQ{eq:Heisenberg2} for pure-state decompositions given in \EQ{decomp} then we need $a_k=\va{A}_{\psi_k},$ $b_k=\va{B}_{\psi_k},$ and $c_k=\frac{1}{2}L_{\psi_k}.$
\section{Numerical calculation of concave roofs}
\label{sec:numerical}
In this appendix, we will discuss how to compute concave roofs numerically. Concave roofs can be computed by brute force optimization. We will now describe a simple numerical method to find such bounds. Other method is similar to the one in \REF{Rothlisberger2009Numerical}, as it is also based on the purification of the mixed state. The statements also hold for concave roofs, after trivial changes.
In order to obtain concave roofs, we have to carry out a numerical optimization over all decompositions of the density matrix. First let us consider decompositions to pure states given in \EQ{decomp}. Let us define the purification of $\varrho$ \cite{Hughston1993AComplete}
\begin{equation}gin{equation}
\ket{\Psi_p}=\sum_k \sqrt{p_k} \ket{\psi_k}_S \otimes \ket{k}_A,
\end{equation}
where $S$ denotes the system, $A$ is the ancilla and for this state,
\begin{equation}gin{equation}
{\rm Tr}_A(\ketbra{\Psi_p})=\varrho
\end{equation}
holds. One of the purifications that is easy to write is the one based on the eigendecomposition of the density matrix, and for that we need an ancilla that has the same size as the system. For other purifications, we might need an ancilla larger than the system. The dimension of the ancilla equals the number of pure subensembles we consider.
Since all purifications can be obtained from each other by a unitary acting on the ancilla, we arrive at the following. For any quantity $Q(\sigma),$ which is a function of a mixed state $\sigma$ we can write the concave roof as an optimum over the decompositions as
\begin{equation}gin{eqnarray}
&&\max_{\{p_k,\ket{\psi_k}\}} \sum_k p_k Q(\ketbra{\psi_k})\nonumber\\
&&\quad\quad=\max_{U_A} \sum_k \langle v_k \vert v_k \rangle Q(\ketbra{v_k}/\langle v_k \vert v_k \rangle),\quad\label{eq:supremum}
\end{eqnarray}
where the maximization is over unitaries acting on the ancilla and we defined the unnormalized vectors as
\begin{equation}gin{equation}
\ket{v_k}={\bra{k}}_A U_A \ket{\Psi_p}.
\end{equation}
Note that one can show that
\begin{equation}gin{equation}
\varrho=\sum_k \ketbra{v_k}.
\end{equation}
These ideas can be extended to mixed-state decompositions given in \EQ{eq:rhodecomp} as follows. Similarly to \SEC{sec:RS}, we consider not only pure-state decompositions, but also mixed-state decompositions in which the mixed components are mixtures of some of the $\vert v_k\rangle.$
We can extend this method to optimize over all mixed-state decompositions as follows
\begin{equation}gin{eqnarray}
&&\max_{\{K_l\}} \max_{\{p_l,\rho_l\}} \sum_l p_l Q(\varrho_l)\nonumber\\
&&\quad\quad=\max_{\{K_l\}} \max_{U_A} \sum_l {\rm Tr}(\sigma_l) Q[\sigma_l/{\rm Tr}(\sigma_l)],\quad\label{eq:supremum2}
\end{eqnarray}
where unnormalized states are
\begin{equation}gin{equation}
\sigma_l=\sum_{k\in K_l} {\bra{k}}_A U_A \ket{\Psi_p}\bra{\Psi_p} U_A^\dagger\ket{k}_A.
\end{equation}
The probabilities and the normalized states of the decomposition are given as
\begin{equation}gin{equation}
p_k={\rm Tr}(\sigma_k),\quad\quad\quad\varrho_k=\sigma_k/p_k.
\end{equation}
Here the basis states are distributed into sets $K_l.$ For instance, $K_1=1, K_2=2,$ and $K_3=3$ corresponds to looking for a pure-state decomposition.
$K_1=\{1,2\},$ and $K_2=3$ corresponds to looking for a mixture of a rank-2 mixed state and a pure state. In \FIG{fig:rs}(b), the results are shown for using the method above where both the system and the ancilla have a dimension $d=3.$
Looking for the unitary that leads to the maximum can be done with a multivariable search. We developed a simple algorithm based on a random search, and improving the best random guess by small local changes. The local changes are also random and they are accepted if they increase the quantity to be maximized. A computer program based on such an algorithm is incorporated in the newest version of the QUBIT4MATLAB package \cite{*[{The name of the functions relevant for this publication are {\tt concroof.m} and {\tt example\_concroof.m}, respectively. The program package is available at MATLAB CENTRAL at {\tt http://www.mathworks.com/matlabcentral/}. The 3.0 version of the package is described in }] [{.}] Toth2008QUBIT4MATLAB}. Such random optimization has already been used to look for the maximum of an operator expectation value for separable states in the same program package.
\REFL{Toth2015Evaluating} presents a method that provides good upper bounds of concave roofs based on semidefinite programming, but it works only for small systems of a couple of qubits. The result of this procedure is larger or equal to the true bound and thus can be used to evaluate whether the bound found with the brute force search is optimal.
Calculating the convex roof is similar, only the maximization has to be replaced by minimization in \EQ{eq:supremum}.
\end{document}
|
\begin{document}
\title[The homological slice spectral sequence]{The homological slice spectral sequence in motivic and Real bordism}
\author{Christian Carrick}
\address{Mathematical Institute, Utrecht University, Utrecht, 3584 CD, the Netherlands}
\email{[email protected]}
\author{Michael A. Hill}
\address{University of California Los Angeles, Los Angeles, CA 90095}
\email{[email protected]}
\thanks{This material is based upon work supported by the National Science Foundation under Grant No. 2105019}
\author{Douglas C. Ravenel}
\address{Department of Mathematics University of Rochester, Rochester, NY 14627}
\email{[email protected]}
\begin{abstract}
For a motivic spectrum $E\in \mathcal{SH}(k)$, let $\Gamma(E)$ denote the global sections spectrum, where $E$ is viewed as a sheaf of spectra on $\mathrm{Sm}_k$. Voevodsky's slice filtration determines a spectral sequence converging to the homotopy groups of $\Gamma(E)$. In this paper, we introduce a spectral sequence converging instead to the mod 2 homology of $\Gamma(E)$ and study the case $E=BPGL\langle m\rangle$ for $k={\mathbb R}$ in detail. We show that this spectral sequence contains the $\mathcal{A}_*$-comodule algebra $\mathcal{A}_*\square_{\mathcal{A}(m)_*}{\mathbb F}_2$ as permanent cycles, and we determine a family of differentials interpolating between $\mathcal{A}_*\square_{\mathcal{A}(0)_*}{\mathbb F}_2$ and $\mathcal{A}_*\square_{\mathcal{A}(m)_*}{\mathbb F}_2$. Using this, we compute the spectral sequence completely for $m\le 3$.
In the height 2 case, the Betti realization of $BPGL\langle 2\rangle$ is the $C_2$-spectrum $BP_{\mathbb R}\langle 2\rangle$, a form of which was shown by Hill and Meier to be an equivariant model for $\mathrm{tmf}_1(3)$. Our spectral sequence therefore gives a computation of the comodule algebra $H_*\mathrm{tmf}_0(3)$. As a consequence, we deduce a new ($2$-local) Wood-type splitting
\[\mathrm{tmf}\wedge X\simeq \mathrm{tmf}_0(3)\]
of $\mathrm{tmf}$-modules predicted by Davis and Mahowald, for $X$ a certain 10-cell complex.
\end{abstract}
\maketitle
\tableofcontents
\section{Introduction}
The classical truncated Brown--Peterson spectrum $BP\langle m\rangle$ admits an action by the cyclic group of order 2, via the complex conjugation action on complex bordism, $MU$. The fixed points admit familiar geometric models for small heights $m$ and give higher height analogues of connective real $K$-theory $\mathrm{ko}$. We give a spectral sequence converging to the homology of these fixed point spectra by lifting the calculation to a simpler one in the ${\mathbb R}$-motivic stable homotopy category.
\subsection{Motivation}
The chromatic approach to stable homotopy gives a method to compute the stable homotopy groups of spheres via a step-by-step procedure. This procedure comes from the height filtration on the moduli stack of formal groups, mirrored in stable homotopy by Bousfield localization. The chromatic convergence theorem of Hopkins--Ravenel \cite{ravorange} states that the $p$-local sphere spectrum may be recovered as the limit of its chromatic tower
\[S^0_{(p)}\simeq\varprojlim \big(\cdots\to L_nS^0\to L_{n-1}S^0\to\cdots\to L_0S^0\big)\]
where $L_n$ denotes Bousfield localization at a height $n$ Morava $E$-theory $E(k,\Gamma)$, for $\Gamma$ a height $n$ formal group over a perfect field $k$ of characteristic $p$. The spectrum $L_nS^0$ is built in finitely many steps from the $K(i)$-local spheres $L_{K(i)}S^0$ for $i\le n$, via chromatic fracture squares. The spectrum $L_{K(n)}S^0$, in turn, admits a description as a homotopy fixed-point spectrum
\[L_{K(n)}S^0\simeq E(k,\Gamma)^{h\mathbb G_n}\]
by a theorem of Devinatz--Hopkins \cite{devhop}, where $\mathbb G_n=\mathrm{Aut}(\Gamma)$ is the Morava stabilizer group. This, in principle, reduces many questions in stable homotopy theory to understanding the $\mathbb G_n$-equivariant spectrum $E(k,\Gamma)$.
However, this is not easy to access in practice, even at small heights. It was an observation of Ravenel \cite{ravarf} and Hopkins--Miller \cite{hopmiller} that $L_{K(n)}S^0$ is well approximated by the fixed points of $E(k,\Gamma)$ at \textit{finite} subgroups of $\mathbb G_n$, and that these theories are more computable. These theories
\[EO_n(G):=E(k,\Gamma)^{hG}\]
are known as the Hopkins--Miller higher real $K$-theories.
The $EO_n(G)$'s have proven very effective at carrying rich information in an accessible way. For instance, studying these theories led to the solution of the Kervaire invariant problem, at primes $p\ge5$ by Ravenel \cite{ravarf} and at $p=2$ by Hill--Hopkins--Ravenel \cite{HHR}. At the prime $p=3$, they were used by Goerss--Henn--Mahowald--Rezk \cite{ghmr} to produce an explicit finite resolution of the $K(2)$-local sphere, giving a conceptual framework to the calculation of $\pi_*L_{K(2)}S^0$ by Shimomura--Wang \cite{SW}. We refer the reader also to the introduction of \cite{BHLSZ} for a nice discussion of these theories and their history.
\subsubsection*{Connective models} Working with the $EO_n(G)$'s in practice is limited by the size of these theories. The $EO_n(G)$'s are non-connective, and their mod $p$ homology vanishes, making it impossible to understand these theories directly from the point of view of the Adams spectral sequence. Moreover, the homotopy groups of the $EO_n(G)$'s are not degreewise finitely generated, so passing to the connective cover does not give a substantial improvement. A key feature of the study of the $EO_n(G)$'s is thus a search for good connective models $eo_n(G)$ with strong finiteness properties.
At heights $h=2^{n-1}m$ with $p=2$, the group $\mathbb G_h$ contains a subgroup isomorphic to $G=C_{2^n}$, and Beaudry--Hill--Shi--Zeng defined and studied candidates for good connective models of $EO_h(G)$ in this case via Real bordism \cite{BHSZ}. These theories---known as the $BP^{((G))}\langle m\rangle$'s---are particularly accessible as they arise as the fixed points of a $G$-spectrum whose action comes from geometry, as opposed to the action on $E(k,\Gamma)$, which is defined via obstruction theory. An important manifestation of this is that the $BP^{((G))}\langle m\rangle$'s have a well understood slice filtration, in the sense of Hill--Hopkins--Ravenel \cite{HHR}.
In upcoming work \cite{CH}, the first and second named authors use the slice filtration to establish the desired finiteness properties of the $BP^{((G))}\langle m\rangle$'s, showing that their fixed points are fp spectra of type $m|G|/2$, in the sense of Mahowald---Rezk \cite{MR}. For explicit calculations, however, we would like to know the comodules $H_*BP^{((G))}\langle m\rangle^G$, and the main aim of the present paper is to initiate such homology computations by exploring the case $G=C_2$.
\subsubsection*{The $G=C_2$ case} The cyclic group of order 2 is often used as a test case for computations in equivariant homotopy in general and for the study of the $EO_n(G)$'s and $BP^{((G))}\langle m\rangle$'s in particular. Quite a lot is known in this case; for instance, the homotopy fixed point spectral sequence computing $\pi_*EO_n(C_2)$ was completely computed at all heights by Hahn--Shi in \cite{hahnshi}. Li--Shi--Wang--Xu showed that a large class of elements in the homotopy groups of spheres is detected by the $EO_n(C_2)$'s \cite{LSWX}. The $EO_n(C_2)$'s were studied extensively by Kitchloo--Wilson; they used these theories to prove new nonimmersion results for real projective spaces \cite{KW1} \cite{KW2}, demonstrating that these theories are useful far beyond their role as a test case.
For $G=C_2$, the connective models $BP^{((G))}\langle m\rangle$ were first defined by Hu--Kriz \cite{HK} and are also known as the $BP_{\mathbb R}\langle m\rangle$'s, as they model the classical truncated Brown--Peterson spectra $BP\langle m\rangle$ with their $C_2$-action via complex conjugation. At small heights $m$, the $BP_{\mathbb R}\langle m\rangle$'s and their fixed points admit familiar geometric models:
\begin{table}[!htbp]
\begin{tabular}{l | c | c }
$m$& $BP_{\mathbb R}\langle m\rangle$ & $BP_{\mathbb R}\langle m\rangle^{C_2}$\\
\hline\hline
-1&$H\underline{{\mathbb F}_2}$&$H{\mathbb F}_2$\\
0&$H\underline{{\mathbb Z}}_{(2)}$&$H{\mathbb Z}_{(2)}$\\
1&$\mathrm{k}_{\mathbb R}$&$\mathrm{ko}$\\
2&$\mathrm{tmf}_1(3)$&$\mathrm{tmf}_0(3)$
\end{tabular}
\end{table}
Here $\mathrm{k}_{\mathbb R}$ denotes connective Real $K$-theory in the sense of Atiyah \cite{atiyah}, and $\mathrm{tmf}_1(3)$ and $\mathrm{tmf}_0(3)$ are spectra of topological modular forms with level structure (see \cite{hilllawson} and \cite{hillmeier}).
\subsubsection*{${\mathbb R}$-motivic homotopy} For a real variety $X$, its set of complex points $X(\mathbb C)$ comes equipped with the complex analytic topology, with respect to which the action of $C_2=\mathrm{Gal}(\mathbb C/\mathbb R)$ is continuous. This extends to a symmetric monoidal left adjoint
\[\beta: \mathcal{SH}({\mathbb R})\to \mathcal Sp^{C_2}\]
from the ${\mathbb R}$-motivic stable homotopy category to the category of genuine $C_2$-spectra, called Betti realization. Real bordism and the $BP_{\mathbb R}\langle m\rangle$'s lift along $\beta$ to ${\mathbb R}$-motivic analogues, known as motivic bordism and the $BPGL\langle m\rangle$'s.
Consequently, we lift the calculation of $H_*BP_{\mathbb R}\langle m\rangle^{C_2}$ along $\beta$ to a simpler and more fundamental calculation in the ${\mathbb R}$-motivic stable homotopy category, and it is in this setting in which we do our computations. In the motivic setting, the role of fixed points $(-)^{C_2}$ is played by the global sections functor $\Gamma:\mathcal{SH}(k)\to \mathcal Sp$, the right adjoint to the unique colimit-preserving symmetric monoidal functor
\[i_*:\mathcal Sp\to \mathcal{SH}(k)\]
For $E\in \mathcal{SH}(k)$, $\Gamma(E)$ may be computed as the mapping spectrum $F(\mathcal Spec(k)_+,E)$ in $\mathcal{SH}(k)$ and thus is the global sections of $E$, viewed as a sheaf of spectra on $\mathrm{Sm}_k$, the category of smooth schemes over $\mathcal Spec(k)$.
The advantage of working motivically is that we may discard the so-called negative cone in our calculations. As we show in Section \ref{secequivariant}, the presence of the negative cone in the $C_2$-equivariant homology of a point results in serious complications. In Section \ref{sec2}, we give a method of comparing $C_2$-equivariant calculations of the kind studied here to the corresponding ${\mathbb R}$-motivic calculations, and we use this to show that the results must agree in non-positive weights. In practice, this means we may discard the negative cone and still recover the correct result in degrees $a+b\sigma$ for $b\le0$ and, in particular, in integer degrees.
\subsection{Main results}
\subsubsection*{Section 2} We begin by defining the homological slice spectral sequence (HSSS) in Section \ref{sec2}. This spectral sequence arises by smashing the slice tower of $E\in\mathcal{SH}(k)$ with the motivic spectrum $i_*H{\mathbb F}_2$, giving a spectral sequence of $\mathcal{A}_*$-comodules. We show that when $E$ is slice connective, this spectral sequence has strong vanishing lines and converges to the comodule $H_*\Gamma(E)$ (Proposition \ref{propvanishinglines}).
For our applications to the $BP_{\mathbb R}\langle m\rangle$'s, we compare the global sections functor $\Gamma$ and the genuine fixed points functor $(-)^{C_2}$, producing a natural transformation
\[\Gamma(E)\to\beta(E)^{C_2}\]
which we show to be an equivalence in many cases. In particular,
\begin{proposition}[Corollary \ref{cor:betaE}]
For all $m$, there is an equivalence of spectra
\[\Gamma(BPGL\langle m\rangle)\to BP_{\mathbb R}\langle m\rangle^{C_2}\]
\end{proposition}
\subsubsection*{Section 3} In \cite{BehrensShah}, Behrens--Shah lift the Tate square in $C_2$-equivariant homotopy along $\beta$ to an arithmetic square
\[
\begin{tikzcd}
E\arrow[r]\arrow[d]& E[\rho^{-1}]\arrow[d]\\
E^{\;\widehat{}}_\rho[\tau^{-1}]\arrow[r]&E^{\;\widehat{}}_\rho[\tau^{-1}][\rho^{-1}]
\end{tikzcd}
\]
for $E\in\mathcal{SH}({\mathbb R})$. In contrast to the Tate square, this is not usually a pullback, but we show that it becomes a pullback after applying $\Gamma(-)$ in many cases. For $E=i_*H{\mathbb F}_2\otimes BPGL$, we completely determine the effect of the arithmetic square on bigraded homotopy groups; the result in weight zero is as follows.
\begin{proposition}[Proposition \ref{proparithmeticsquarecomputation}]
On homotopy groups, the arithmetic square of $i_*H{\mathbb F}_2\otimes BPGL$ is given by
\[
\begin{tikzcd}
H_*\Gamma(BPGL)\arrow[r]\arrow[d]&\mathcal A_*\arrow[d,"\varphi"]&\xi_i\arrow[d,mapsto]\\
{\mathbb F}_2[z,\chi_i|i\ge1]^{\;\widehat{}}_{z}\arrow[r,hookrightarrow]&{\mathbb F}_2[z,\chi_i|i\ge1]^{\;\widehat{}}_{z}[z^{-1}]&\chi_iz^{2^i-1}+\chi_{i-1}z^{-1}
\end{tikzcd}
\]
with $|z|=-1$ and $|\chi_i|=2(2^i-1)$.
\end{proposition}
This leads to a somewhat explicit description of the $\mathcal{A}_*$-comodule $H_*\Gamma(BPGL)$ via the associated Mayer--Vietoris sequence (Theorem \ref{thmhomologybpr}). This corrects an error of Hu--Kriz, who gave similar formulas in \cite{BPO} (see Remark \ref{hukrizerror}). The $\rho$-local part of this arithmetic square is complicated by the following unexpected fact about the reduction map $BPGL\to M{\mathbb F}_2$ from the motivic Brown--Peterson spectrum to the mod 2 motivic Eilenberg--Maclane spectrum.
\begin{theorem}[Theorem \ref{thmtatefrob}]\label{mainthmtate}
The composite
\[H{\mathbb F}_2\simeq\Gamma(BPGL[\rho^{-1}])\to \Gamma(M{\mathbb F}_2[\rho^{-1}])\simeq\prod\limits_{i\ge0}\Sigma^i H{\mathbb F}_2\]
has components $Sq^i:H{\mathbb F}_2\to \Sigma^i H{\mathbb F}_2$.
Equivariantly, identifying $\Phi^{C_2}(H\underline{{\mathbb F}_2})$ with the connective cover of $H{\mathbb F}_2^{tC_2}$, the composite
\[H{\mathbb F}_2\simeq\Phi^{C_2}BP_{\mathbb R}\to \Phi^{C_2}H\underline{{\mathbb F}_2}\simeq\prod\limits_{i\ge0}\Sigma^i H{\mathbb F}_2\]
has components $Sq^i:H{\mathbb F}_2\to \Sigma^i H{\mathbb F}_2$.
\end{theorem}
We prove Theorem \ref{mainthmtate} as an application of Mahowald's theorem on $H{\mathbb F}_2$. The description of the map appearing here comes via comparison with the Tate-valued Frobenius of Nikolaus--Scholze \cite{NS}. We expect this fact to be of independent interest.
\subsubsection*{Section 4}
The HSSS for $BPGL\langle m\rangle$ has a straightforward $E_2$-page.
\begin{proposition}[Corollary \ref{corE_2description}]
The $E_2$-page of the HSSS for $BPGL\langle m\rangle$ is isomorphic, as an $\mathcal{A}_*$-comodule algebra, to
\[(\mathcal{A}_*\square_{\mathcal{A}(0)_*}{\mathbb F}_2)[\rho,x_1,\oldwidehat_1,\ldots,\oldwidehat_m]\]
where
\[\mathcal{A}_*\square_{\mathcal{A}(0)_*}{\mathbb F}_2\cong H_*H{\mathbb Z}\cong{\mathbb F}_2[\zeta_1^2,\zeta_2,\ldots]\]
The tridegrees are given as follows
\aln{
|\zeta_i|&=(2^i-1,0,1-2^i)\\
|\rho|&=(-1,-1,1)\\
|x_1|&=(0,-1,0)\\
|\oldwidehat_i|&=(2(2^i-1),2^i-1,0)
}
where we use Adams trigrading $(t-s,w,s)$, representing (stem, weight, filtration). The classes $\rho$ and $\oldwidehat_i$ are comodule primitives, and
\[\psi(x_1)=1\otimes x_1+\xi_1\otimes \rho\]
\end{proposition}
Our main application of the results of Section \ref{secarithmeticsquare} on the arithmetic square is to determine a large class of permanent cycles in the HSSS for $BPGL\langle m\rangle$. This comes via the edge homomorphism
\[H_*\Gamma(BPGL\langle m\rangle)\to H_*\Gamma(P^0BPGL\langle m\rangle)=H_*H{\mathbb Z}\]
\begin{theorem}[Theorem \ref{edgethm}]\label{mainthmedge} In weight zero, the image of the edge homomorphism in the HSSS for $BPGL\langle m\rangle$ is precisely
\[\mathcal{A}_*\square_{\mathcal{A}(m)_*}{\mathbb F}_2\subset \mathcal{A}_*\square_{\mathcal{A}(0)_*}{\mathbb F}_2\]
\end{theorem}
The classes $x_1,\rho$, and $\oldwidehat_i$ are all permanent cycles, so the subalgebra
\[(\mathcal{A}_*\square_{\mathcal{A}(m)_*}{\mathbb F}_2)[\rho,x_1,\oldwidehat_1,\ldots,\oldwidehat_m]\]
of $E_2$ consists of permanent cycles. We use the arithmetic square to identify also a class of permanent cycles in negative weights.
\begin{proposition}[Corollary \ref{corx_mpermanent}]
The classes defined inductively $x_0=\rho$, $x_1$ the class above, and
\[x_n=\sum\limits_{i=0}^{n-1}x_i\zeta_{n-i}^{2^i}\]
are permanent cycles in the HSSS for $BPGL\langle m\rangle$, for all $n$ and $m$. The tridegree of $x_n$ is $(2^n-2,-1,2-2^n)$, and the coaction is as follows
\[\psi(x_n)=\sum\limits_{i=0}^n\xi_i^{2^{n-i}}\otimes x_{n-i}\]
\end{proposition}
Heuristically, the $x_n$ classes may be thought of as indecomposable elements of the form $[\xi_n\rho]$. We determine a family of differentials in the HSSS for $BPGL\langle m\rangle$, which may be expressed in terms of the $x_n$'s. The classes $x_n$ appear as Massey products when taking homology with respect to these differentials.
\begin{theorem}[Theorem \ref{differentialsthm}]\label{mainthmdiff}
In the HSSS for $BPGL\langle m\rangle$, we have the differentials
\[d_{2^{i+1}-1}(\zeta_j^{2^{i+1-j}})=\oldwidehaterline{v}_{i}\rho^{2^i-1}\bigg(p_{j-1}\bigg(\frac{x_1}{\rho},\ldots,\frac{x_{j-1}}{\rho}\bigg)\bigg)^{2^{i+1-j}}\]
for all $1\le i\le m$ and $1\le j\le i+1$, where $p_j$ is the polynomial
\[\zeta_{j}=p_j(\xi_1,\ldots,\xi_j)\]
given by the inversion formulas in the Hopf algebra $\mathcal A_*$.
\end{theorem}
The classes $x_n$ are not divisible by $\rho$, but all of the $\rho$ exponents appearing are nonnegative when this expression is expanded. Using the heuristic $x_n=[\xi_n\rho]$, this expression may then be read more compactly as
\[d_{2^{i+1}-1}(\zeta_j^{2^{i+1-j}})=\oldwidehaterline{v}_{i}[\rho^{2^i-1}\zeta_{j-1}^{2^{i+1-j}}]\]
As each $\zeta_j^{2^{i+1-j}}$ supports a nonzero $d_{2^{i+1}-1}$, its square is a nonzero cycle on the next page. This stops when $i=m+1$, thus interpolating between
\[\mathcal{A}_*\square_{\mathcal{A}(0)_*}{\mathbb F}_2={\mathbb F}_2[\zeta_1^2,\zeta_2,\ldots]\]
and
\[\mathcal{A}_*\square_{\mathcal{A}(m)_*}{\mathbb F}_2={\mathbb F}_2[\zeta_1^{2^{m+1}},\zeta_2^{2^m},\ldots,\zeta_{m+1}^2,\zeta_{m+2},\ldots]\]
\subsubsection*{Section 5} These differentials are explicit but complicated due to the inversion formulas in $\mathcal{A}_*$, which makes computing homology with respect to these differentials quite difficult, though purely algebraic. This limits our understanding of the behavior of this spectral sequence in general beyond Theorems \ref{mainthmdiff} and \ref{mainthmedge}. However, we make the following conjecture.
\begin{conjecture}\label{conj}
All differentials in the HSSS for $BPGL\langle m\rangle$ are generated under the Leibniz rule by those in Theorem \ref{mainthmdiff}. In particular, the spectral sequence collapses on $E_{2^{m+1}}$.
\end{conjecture}
Given Conjecture \ref{conj}, computing $H_*\Gamma(BPGL\langle m\rangle)$ becomes the purely algebraic problem of computing homology with respect to these differentials, modulo comodule algebra extension problems. In Section \ref{seccomputations}, we verify this conjecture for $m\le 3$, and compute the corresponding spectral sequences completely.
At height 1, this recovers the classical computation of $H_*\mathrm{ko}$, without the use of the Wood cofiber sequence or knowledge of $\pi_*\mathrm{ko}$. At height 2, it is a theorem of Hill--Meier that the $C_2$-spectrum $\mathrm{tmf}_1(3)$ is a form of $BP_{\mathbb R}\langle 2\rangle$ \cite{hillmeier}, so our computation determines the $\mathcal{A}_*$-comodule algebra $H_*\mathrm{tmf}_0(3)$.
\begin{theorem}[Theorem \ref{thmt03}]\label{mainthmhtmf}
There is an isomorphism of $\mathcal{A}_*$-comodule algebras
\[H_*\mathrm{tmf}_0(3)\cong \mathcal{A}_*\square_{\mathcal{A}(2)_*}M_2\]
where $M_2$ is the $\mathcal{A}(2)_*$-comodule algebra given by the square zero extension ${\mathbb F}_2\{1\}\oplus \oldwidehaterline{M}_{2}$, and $\oldwidehaterline{M}_{2}$ is the $\mathcal{A}(2)_*$-comodule is displayed below in Figure \ref{figM2}.
\end{theorem}
\begin{figure}
\caption{The $\mathcal{A}
\label{figM2}
\end{figure}
Prior to the work of Hill--Lawson \cite{hilllawson}, it was not known how to produce a spectrum $\mathrm{Tmf}_0(3)$ (and thereby a connective model $\mathrm{tmf}_0(3)$ of the periodic spectrum $\mathrm{TMF}_0(3)$ of topological modular forms with level structure) as the global sections of a derived stack. In \cite{davismahowald}, Davis and Mahowald proposed several hands-on definitions that were suitable for computation. In particular, they construct a certain 10-cell complex $X$ \cite[Theorem 2.1]{davismahowald} and propose $\mathrm{tmf}\wedge X$ as such a connective model.
Our computation in Theorem \ref{mainthmhtmf} allows us to join this definition with the derived algebro-geometric one of Hill--Lawson. We use the Adams spectral sequence to construct a map $X\to \mathrm{tmf}_0(3)$, resulting in the following Wood-type splitting.
\begin{theorem}[Corollary \ref{cortmfsplitting}]
There is a 2-local equivalence of $\mathrm{tmf}$-modules
\[\mathrm{tmf}_0(3)\simeq\mathrm{tmf}\wedge X \]
\end{theorem}
Rationally, $X$ has rank 4, corresponding to the fact that the map of stacks $\mathcal{M}_0(3)\to\mathcal{M}_{ell}$ is a 4-fold cover. Base changing, this also gives the corresponding splittings for $\mathrm{Tmf}_0(3)$ and $\mathrm{TMF}_0(3)$.
At height 3, there is no known familiar geometric model of $BP_{\mathbb R}\langle 3\rangle$, but our computation extends the program of computing $H_*eo_n(G)$ at $p=2$ to height 3 for the first time, while also demonstrating the staggering complexity of these computations at heights $>2$. We refer the reader to Section \ref{subsecn=3computation} for a description of the comodule algebra $M_3$.
\begin{theorem}[Theorem \ref{einftym=3}]
The $E_\infty$ page of the HSSS for $BPGL\langle 3\rangle$ is isomorphic to the square zero extension
\[(\mathcal{A}_*\square_{\mathcal{A}(3)_*}M_3)\oplus (\mathcal{A}_*\square_{\mathcal{A}(2)_*}\oldwidehaterline{M}_{2})\]
for $M_3$ an explicit $\mathcal{A}(3)_*$-comodule algebra of dimension 165, where $\oldwidehaterline{M}_{3}\cdot\oldwidehaterline{M}_{2}=0$, and $M_2$ is the comodule of Theorem \ref{mainthmhtmf}.
\end{theorem}
We could, in principle, go further to height 4 and beyond; we see no reason our methods should not extend. However, we are unable to give a general description of a comodule $M_m$ at height $m$, and already at height 3, it is a significant task to give a proper account of the computation.
\subsubsection*{Section 6} We finish in Section \ref{secequivariant} by defining and exploring the HSSS in equivariant stable homotopy. Whereas the slice spectral sequence of Hill--Hopkins--Ravenel \cite{HHR} has proven an effective tool to compute the homotopy groups of the fixed points $E^G$ of a $G$-spectrum $E$, this gives a way to compute the \textit{homology} of $E^G$.
When $G=C_2$, the HSSS for $BP_{\mathbb R}\langle m\rangle$ is closely related to the motivic spectral sequences studied in the body of the paper. However, the presence of the negative cone in the equivariant homology of a point results in some unexpected differences. We show in particular the that the $RO(C_2)$-graded HSSS of $k_{\mathbb R}$ has an exotic differential of the form
\[d_5(2u_{2\sigma}^{-1}\cdot \zeta_1^2\zeta_2)=\oldwidehat_1^2\]
This differential leaves a class in the negative cone and kills a class in the positive cone.
\subsection{Acknowledgments}
The authors would like to thank Lennart Meier and Danny Shi for helpful conversations. The first and second-named authors would like to thank the Hausdorff Research Institute for Mathematics for its hospitality and support while some of this work was done. Our slice spectral sequence charts were created using Hood Chatham's {\tt spectralsequences} package, and our Adams spectral sequence charts were created using Bruner's {\tt ext} software.
\subsection{Notation and conventions}
\begin{enumerate}
\item We work at the prime $p=2$ in this paper, so homology $H_*(-)$ will always denote mod 2 homology $H_*(-;{\mathbb F}_2)$ and our Steenrod algebras are the 2-primary versions.
\item $\mathrm{Sm}_k$ denotes the category of smooth schemes over $\mathcal Spec(k)$, and $\mathcal{SH}(k)$ denotes the category of $k$-motivic spectra. $\mathcal{SH}(k)_{\mathrm{cell}}$ denotes the cellular category, the localizing subcategory generated by the bigraded spheres $S^{s,w}$.
\item $\pi_{s,w}^{\mathbb R}(-)$ denotes bigraded homotopy groups for an ${\mathbb R}$-motivic spectrum, where $s$ denotes the stem or topological degree, and $w$ denotes the weight.
\item $M{\mathbb Z}$ and $M{\mathbb F}_2$ are the integral and mod 2 ${\mathbb R}$-motivic homology spectra, so that $\pi_{*,*}^{\mathbb R} M{\mathbb Z}={\mathbb Z}[\rho,\tau^2]/(2\rho)$ and $\pi_{*,*}^{\mathbb R} M{\mathbb F}_2={\mathbb F}_2[\rho,\tau]$ where $|\rho|=(-1,-1)$ and $|\tau|=(0,-1)$.
\item $\mathcal{A}_*$ denotes the classical mod 2 dual Steenrod algebra, so that
\[\mathcal{A}_*={\mathbb F}_2[\xi_1,\xi_2,\ldots]\]
where $|\xi_i|=2^i-1$ are the usual Milnor generators, and $\zeta_i$ denotes the conjugate of $\xi_i$. $\mathcal{A}(m)_*$ denotes the quotient Hopf algebra
\[\mathcal{A}(m)_*={\mathbb F}_2[\xi_1,\ldots,\xi_{m+1}]/(\xi_1^{2^{m+1}},\ldots,\xi_{m+1}^2)\]
and
\[\mathcal{A}_*\square_{\mathcal{A}(m)_*}{\mathbb F}_2\cong{\mathbb F}_2[\zeta_1^{2^{m+1}},\zeta_2^{2^m},\ldots,\zeta_{m+1}^2,\zeta_{m+2},\ldots]\]
Dually, $\mathcal{A}$ denotes the mod 2 Steenrod algebra, $\mathcal{A}(m)$ the subalgebra generated by $Sq^{2^i}$ for $i\le m$, and
\[\mathcal{A}/\!/\mathcal{A}(m)=\mathcal{A}\otimes_{\mathcal{A}(m)}{\mathbb F}_2\]
\item $\mathcal{A}^{{\mathbb R}}$ denotes the ${\mathbb R}$-motivic mod 2 Steenrod algebra, and we use the following notation for generators
\[\mathcal{A}^{{\mathbb R}}_{*,*}=\pi_{*,*}^{\mathbb R} M{\mathbb F}_2[\oldwidehaterline{t}_1,\oldwidehaterline{t}_2,\ldots,\tau_0,\tau_1,\ldots]/(\tau_i^2=\tau \oldwidehaterline{t}_{i+1}+\rho\tau_{i+1}+\rho\tau_0\oldwidehaterline{t}_{i+1})\]
where $|\oldwidehaterline{t}_i|=(2(2^i-1),2^i-1)$ and $|\tau_i|=(2(2^i-1)+1,2^i-1)$. We use the following notation for the quotient Hopf algebra
\[\mathcal{E}_{*,*}^{{\mathbb R}}(m)=\pi_{*,*}^{\mathbb R} M{\mathbb F}_2[\tau_0,\tau_1,\ldots,\tau_m]/(\tau_i^2=\rho\tau_{i+1})\]
Moreover, $c(-)$ denotes Hopf conjugation in $\mathcal{A}^{{\mathbb R}}_{*,*}$.
\item $\mathcal Sp$ denotes the category of spectra, and $\mathcal Sp^G$ denotes the category of genuine $G$-spectra. For a $G$-spectrum $X$, $\pi_\text{ : }ar(X)$ denotes the $RO(G)$-graded homotopy groups of $X$.
\item We let $i_*:\mathcal Sp\to \mathcal{SH}(k)$ denote the unique symmetric monoidal colimit-preserving functor and $\Gamma(-)$ its right adjoint. $\beta:\mathcal{SH}({\mathbb R})\to \mathcal Sp^{C_2}$ denotes the $C_2$-equivariant Betti realization functor.
\item Our spectral sequences are Adams trigraded, so that a class in tridegree $(t-s,w,s)$ is in stem $t-s$, weight $w$, and filtration $s$. All of the charts drawn in Section \ref{seccomputations} are in weight $w=0$.
\end{enumerate}
\section{The slice spectral sequence for generalized homology}\label{sec2}
In this section we define the HSSS in motivic stable homotopy and establish its basic properties. In Section \ref{subsecmotivicslice}, we work at the level of generality of $\mathcal{SH}(k)$, where $k$ is a perfect field, and we use the very effective slice filtration of Spitzweck-Ostvaer \cite{Spitz}. This allows us to have strong convergence in a wide generality, due to Morel's connectivity theorem. However, for the spectra we consider - namely, standard quotients of the algebraic cobordism spectrum $MGL$ - the corresponding slice towers for the effective, cellular effective, very effective, and cellular very effective all coincide, as shown by Heard \cite{Heard}.
This filtration determines a tower of motivic spectra, and our spectral sequences arise via smashing these towers pointwise with an ordinary spectrum, using that $\mathcal{SH}(k)$ is tensored over $\mathcal Sp$. This construction is analogous to one construction of the classical Atiyah--Hirzebruch spectral sequence, where one does the same with the Postnikov tower of a spectrum.
In Section \ref{subsecbetti}, we specialize to $\mathcal{SH}_{\mathrm{cell}}({\mathbb R})$, the cellular subcategory of ${\mathbb R}$-motivic spectra, where we use the results of Behrens and Shah \cite{BehrensShah} on Betti realization to compare with $C_2$-equivariant stable homotopy.
\subsection{General slice towers}\label{subsecmotivicslice}
We begin with the very effective slice filtration, a modification of Voevodsky's slice filtration \cite{Voevodsky}, developed by Spitzweck--Ostvaer \cite{Spitz} and further studied by Bachmann \cite{Bachmann}. The reader will notice we double the usual grading conventions, so that our slice towers are run at half speed; we explain this below in Remark \ref{remarkdoublegradings}.
Following the discussion in \cite[Section 3]{Heard}, we let
\[\mathcal K_{2t}=\{\Sigma^{2a,a}\Sigma^{\infty}_+X;a\ge t,X\in\mathrm{Sm}_k\}\subset \mathcal{SH}(k)\]
and we set $\mathcal K_{2t+1}=\mathcal K_{2t}$. Letting $\mathcal{SH}(k)^{t,\mathrm{veff}}$ denote the full subcategory of $\mathcal{SH}(k)$ generated under colimits and extensions by $\mathcal K_t$, we have a filtration
\[\cdots\subset \mathcal{SH}(k)^{t+1,\mathrm{veff}}\subset \mathcal{SH}(k)^{t,\mathrm{veff}}\subset \mathcal{SH}(k)^{t-1,\mathrm{veff}}\subset\cdots\]
of $\mathcal{SH}(k)$, and we say $E$ is slice $\ge t$ if $E\in \mathcal{SH}(k)^{t,\mathrm{veff}}$. Associated to any $E\in \mathcal{SH}(k)$, we have a natural cofiber sequence
\[P_{t+1}E\to E\to P^tE\]
such that $P_{t+1}E\in \mathcal K_{t+1}$, and
\[\mathrm{Map}_{\mathcal{SH}(k)}(K,P^tE)\simeq*\]
for all $K\in \mathcal{SH}(k)^{t+1,\mathrm{veff}}$. We define the fiber $P^{t}_tE$ of the map
\[
P^t E\to P^{t-1}E
\]
to be the \(t\)-slice of \(E\). The slice filtration gives a natural tower of spectra under \(E\):
\[
\dots\to P^t E\to P^{t-1}E\to\dots.
\]
The colimit is always contractible and the limit is \(E\).
Now, given any \(K\in \mathcal{SH}(k)\), we can smash the slice tower for \(E\) with \(K\) to produce a new filtered motivic spectrum \(K\otimes P^\bullet E\). Applying (bigraded) homotopy groups, this gives a spectral sequence.
\begin{definition}\label{definition:generalizedslicess}
The \(K\)-homology slice spectral sequence for \(E\) is the spectral sequence associated to the filtered motivic spectrum \(K\otimes P^\bullet E\). We will generically denote this by
\[
E_{\ast}^{s,w,t}(E;K).
\]
In particular, the \(E_2\)-term is given by
\[
E_2^{s,w,t}(E;K)=K_{t-s,w}\big(P^t_t E\big).
\]
We use Adams grading $(t-s,w,s)$ so that the \(d_r\) differentials change tridegree by \((-1,0,r)\).
\end{definition}
\begin{remark}\label{remarkdoublegradings}
We have chosen to double the gradings to cohere with two closely related spectral sequences when $k={\mathbb R}$: slice spectral sequences in equivariant homotopy and the classical Atiyah--Hirzebruch spectral sequence. Applying Betti realization to the spectral sequences we consider recovers the analogous slice spectral sequences in $C_2$-equivariant homotopy (see Section \ref{secequivariant}), and if one base changes to $\mathbb C$ and inverts $\tau$, one recovers a classical Atiyah--Hirzebruch spectral sequence. Our choice of gradings recovers the standard grading conventions on these spectral sequences, in both cases.
\end{remark}
\begin{example}
When \(K=S^{0,0}\) is the sphere spectrum, this is the ordinary slice spectral sequence of $E$. When \(K=S^{0,0}[\rho^{-1}]\), this is the $\rho$-localized slice spectral sequence of $E$.
\end{example}
The ordinary slice spectral sequence converges strongly for any $E$, due to the Morel connectivity theorem \cite[Section 5.3]{Morel}. When we smash with an arbitrary motivic spectrum $K$, this is no longer guaranteed. For the following result, cf. \cite[Proposition 5.11]{Spitz}.
\begin{proposition}\label{propsliceconvergence}
Suppose $K$ is slice bounded below. Then, the \(K\)-homology slice spectral sequence of $E$ converges conditionally in the sense that the natural map
\[
K\otimes E\to \lim_{\leftarrow} (K\otimes P^\bullet E)
\]
induces an isomorphism on bigraded homotopy groups, and $\mathrm{colim}(K\otimes P^\bullet E)\simeq*$.
If, in addition, $E$ is slice bounded below, the spectral sequence converges strongly in the sense that
there are at most finitely many filtrations in which \(E_2\) is non-zero in any given stem.
\end{proposition}
\begin{proof}
The spectrum $\mathrm{colim }P^\bullet E$ is contractible as the categories $\mathcal{K}_t$ form a set of compact generators of $\mathcal{SH}(k)$. Smashing with $K$, we use that the smash product commutes with colimits.
One has a fiber sequence
\[\lim_{\leftarrow}(K\otimes P_{t+1}E)\to K\otimes E\to \lim_{\leftarrow}(K\otimes P^tE)\]
By assumption $K$ is slice $\ge n$ for some fixed $n$, so $K\otimes P_{t+1}E$ is slice $\ge n+t+1$ for all $t$. It follows from Morel's connectivity theorem that if $E\in \mathcal{SH}(k)^{t,\mathrm{veff}}$, then $\pi_{s,w}E=0$ for $s<t/2+w$. In a fixed weight $w$, therefore, $\pi_{s,w}(K\otimes P_{t+1}E)$ vanishes for $s<w+(n+t+1)/2$. This vanishing range for $s$ is strictly increasing in $t$, so the bigraded homotopy groups of the limit vanish. If $E$ is slice bounded below, then the same connectivity constraints imply that, in a fixed weight, only finitely many slices contribute to a given stem.
\end{proof}
\begin{remark}
Without the bound on \(K\), the result is much more delicate, even when \(E\) is \(0\)-connective, and the comparison map may fail to be an isomorphism. For example, when \(K=EGL(n)=\oldwidehaterline{v}_n^{-1}BPGL\langle n\rangle\) and \(E=MGL\), smashing the slice associated graded for \(E\) with \(K\) gives the zero spectrum.
\end{remark}
For our applications, both $K$ and $E$ will be slice connective, and this sharpens our convergence results to give strong vanishing lines.
\begin{proposition}\label{propvanishinglines}
If $E$ and $K$ are slice $\ge0$, the $K$-homology slice spectral sequence of $E$ in weight zero is a right half-plane spectral sequence concentrated between the lines $y=x$ and $y=-x$. That is, the groups
\[E_2^{s,0,t}(E;K)\]
vanish if $s>t-s$ or $t<0$.
\end{proposition}
\begin{proof}
If $E$ is slice $\ge0$, then $P^t_tE\simeq*$ for $t<0$, which gives the lower vanishing line. The upper vanishing line follows as above from Morel's connectivity theorem, which implies in particular that since $K\otimes P^t_tE$ is slice $\ge t$, $\pi_{t-s,0}(K\otimes P^t_tE)$ vanishes for $(t-s)<t/2$. This vanishing line is equivalent to \((t-s)<s\).
\end{proof}
The slice filtration is multiplicative, so we have the following.
\begin{proposition}
If \(K\) and \(E\) are both ring spectra, then this is a spectral sequence of algebras.
\end{proposition}
In fact, we have more structure that we can consider from naturality in \(K\). Since the spectral sequence arises from a filtration of \(E\) that is completely independent of \(K\), all of the structure commutes with \(K\)-cooperations.
\begin{proposition}
If \(K\) is a flat homology theory, then the \(K\)-homology slice spectral sequence for any \(E\) is a spectral sequence of \(K_{*,*} K\)-comodules. If \(E\) is a ring, this is a spectral sequence of \(K_{*,*} K\)-comodule algebras.
\end{proposition}
\begin{example}\label{example:bredonSS}
Let $k={\mathbb R}$ and $K=M{\mathbb F}_2$, the motivic mod 2 Eilenberg--Maclane spectrum. This gives a spectral sequence of $\mathcal A^{\mathbb R}_{*,*}$-comodules converging to the bigraded motivic homology of $E$ with $E_2$-page the bigraded motivic homology of the slice associated graded of $E$. If $E$ is a ring spectrum, this is a spectral sequence of $\mathcal A^{\mathbb R}_{*,*}$-comodule algebras.
\end{example}
\subsection{The homological slice spectral sequence (HSSS)}\label{subsechomologicalslice}
\subsubsection{Global sections homology}\label{subsubsecglobalsec} There is an essentially unique colimit-preserving symmetric monoidal functor
\[
i_\ast\colon \mathcal Sp\to\mathcal{\mathcal{SH}}(k).
\]
This functor admits a right adjoint
\[\Gamma(-):\mathcal{\mathcal{SH}}(k)\to\mathcal Sp\]
which sends a motivic spectrum $E$ to the mapping spectrum
\[F(S^{0,0},E)\]
Viewing $E$ as a sheaf of spectra on $\mathrm{Sm}_k$, since $S^{0,0}=\mathcal Spec(k)_+$, we may think of this as taking the global sections of $E$. We now define our main spectral sequence of interest.
\begin{definition}
For $E\in\mathcal{\mathcal{SH}}(k)$, the \textit{homological slice spectral sequence} (HSSS) of $E$ is the spectral sequence $E_*^{s,w,t}(E;i_*H{\mathbb F}_2)$ of Definition \ref{definition:generalizedslicess}.
\end{definition}
That is, we tensor the slice tower of $E$ with $i_*H{\mathbb F}_2$ and take the spectral sequence associated to the resulting tower. We begin with the following lemma, which follows immediately from the adjunction $i_*\dashv \Gamma$.
\begin{lemma}\label{lemma:homotopyofglobalsections}
For $E\in\mathcal{\mathcal{SH}}(k)$, we have an isomorphism of abelian groups
\[\pi_{s,w}E\cong \pi_s\Gamma(\Sigma^{0,-w}E)\]
When $E$ is a ring spectrum, this is an isomorphism of bigraded rings.
\end{lemma}
Since the unit is compact in $\mathcal{\mathcal{SH}}(k)$, $i_*$ preserves compact objects and hence $\Gamma(-)$ admits a right adjoint. In particular $\Gamma(-)$ preserves colimits, and we deduce the following projection formula.
\begin{proposition}
For any spectrum \(K\) and $E\in\mathcal{\mathcal{SH}}(k)$, we have
\[
\Gamma(i_\ast K\otimes E)\simeq K\otimes \Gamma(E)
\]
\end{proposition}
Applying Lemma \ref{lemma:homotopyofglobalsections} in the case that \(K=H{\mathbb F}_2\), we have the following.
\begin{corollary}
For any \(E\in\mathcal{\mathcal{SH}}(k)\), the homotopy groups of the spectrum \(\Gamma(i_\ast H{\mathbb F}_2\otimes E)\) are the mod \(2\) homology groups of \(\Gamma(E)\):
\[
(i_\ast H{\mathbb F}_2)_{*,0} E\cong H_\ast\Gamma(E)
\]
\end{corollary}
Since \(H{\mathbb F}_2\) is an \(E_\infty\)-ring spectrum, so is \(i_\ast H{\mathbb F}_2\). Since \(H{\mathbb F}_2\otimes H{\mathbb F}_2\) is a tensor product of free associative algebras generated by spheres, the same is true upon taking \(i_\ast\).
\begin{proposition}
We have an equivalence of associative \(i_\ast H{\mathbb F}_2\)-algebras
\[
i_\ast H{\mathbb F}_2^{\otimes 2}\simeq i_\ast H{\mathbb F}_2[\zeta_1,\dots],
\]
where just as classically, the topological degree of \(\zeta_i\) is \((2^i-1)\), and it is in weight 0.
\end{proposition}
In homotopy, the induced coaction on the \(\zeta_i\)'s is also the usual one, by functoriality. Put another way, the motivic spectrum \(i_\ast H{\mathbb F}_2\otimes E\) records all of the information of the mod \(2\) homology of \(\Gamma(E)\), even with the dual Steenrod coaction.
\begin{corollary}
The HSSS of $E$ is a spectral sequence of bigraded $\mathcal A_*$-comodules that converges to $H_*\Gamma(\Sigma^{0,-*}E)$. If $E$ is a ring spectrum, this is a spectral sequence of bigraded $\mathcal{A}_*$-comodule algebras.
\end{corollary}
\subsubsection{Global sections homology and motivic homology}\label{subsubseccanonicalmap}
The global sections functor is also lax monoidal, so given an \(E_\infty\)-monoid \(R\) in $\mathcal{\mathcal{SH}}(k)$, $\Gamma(R)$ is an \(E_\infty\)-ring spectrum. The counit of the adjunction $i_*\dashv \Gamma$ gives an \(E_\infty\)-map
\[
i_\ast \Gamma(R)\to R.
\]
It follows from Lemma \ref{lemma:homotopyofglobalsections} that when \(R=M{\mathbb Z}\) or \(R=M{\mathbb F}_2\), the motivic Eilenberg--Maclane spectra, the global sections spectra are \(H{\mathbb Z}\) and \(H{\mathbb F}_2\), respectively. We therefore have canonical \(E_\infty\)-maps
\[
i_\ast H{\mathbb Z}\to M{\mathbb Z}\text{ and }i_\ast H{\mathbb F}_2\to M{\mathbb F}_2
\]
We will use these to identify the slice associated graded in many cases.
Applying the functor $i_*$ to ordinary homotopy groups, we have a canonical map of algebras
\[
\mathcal A_*=\pi_\text{ : }ar(H{\mathbb F}_2\otimes H{\mathbb F}_2)\xrightarrow{i_*}\pi_{*,*}
(i_*H{\mathbb F}_2\otimes i_*H{\mathbb F}_2)\to \pi_{*,*}(M{\mathbb F}_2\otimes M{\mathbb F}_2)=:\mathcal A^k_{*,*}\]
If the latter dual Steenrod algebra is flat over $\pi_{*,*}^k(M{\mathbb F}_2)$, we have a Hopf algebroid structure, and our map of algebras is compatible with the coactions in the following sense.
\begin{proposition}
If \(E\in\mathcal{\mathcal{SH}}(k)\), then we have a natural map
\[
H_*\Gamma(E)\to H_{*,*}(E;M{\mathbb F}_2)
\]
When $\mathcal A^k_{*,*}$ is flat, this is a map of $\mathcal A^k_{*,*}$-comodules via the map
\[
\cc{A}_\ast\to \mathcal A^k_{*,*}
\]
\end{proposition}
\subsection{Betti realization and the arithmetic square}\label{subsecbetti} In this section, we restrict our attention now to $k={\mathbb R}$ and work in the cellular context. That is, we work in the category $\mathcal{SH}_{\mathrm{cell}}({\mathbb R})$, the localizing subcategory of $\mathcal{SH}({\mathbb R})$ generated by the bigraded spheres $S^{s,w}$. Moreover, we implicitly work in the $2$-complete context everywhere, so that our $\mathcal{SH}_{\mathrm{cell}}({\mathbb R})$ stands for $\mathcal{SH}_{\mathrm{cell}}({\mathbb R})^{\;\widehat{}}_2$, and our $\mathcal Sp^{C_2}$ stands for $(\mathcal Sp^{C_2})^{\;\widehat{}}_2$.
There is a strong relationship between ${\mathbb R}$-motivic stable homotopy and $C_2$-equivariant stable homotopy, which comes via the Betti realization functor
\[\beta:\mathcal{SH}({\mathbb R})\to\mathcal Sp^{C_2}\]
If $X$ is a smooth scheme over $\mathcal Spec({\mathbb R})$, its set of complex points $X(\mathbb C)$ can be equipped with the complex analytic topology, and the action of $C_2=\mathrm{Gal}(\mathbb C/{\mathbb R})$ on $X(\mathbb C)$ is continuous. The functor $\beta$ is obtained by left Kan extension of $X\mapsto \Sigma^\infty_+X(\mathbb C)$ along the canonical functor $\mathrm{Sm}_{{\mathbb R}}\to \mathcal{SH}({\mathbb R})$.
Pushing and pulling computations along the functor $\beta$ has been incredibly fruitful (see \cite{DI}, \cite{BGI}, and \cite{GHIR} for example). There has therefore been an effort to make the connection provided by $\beta$ conceptually precise (see \cite{DI}, \cite{HellerOrmsby}, \cite{bachmann2}, and \cite{BehrensShah}). Behrens--Shah combined and extended these results to characterize $\beta$ as a localization functor on $\mathcal{SH}_{\mathrm{cell}}({\mathbb R})$. They showed, in particular, that after completing at a prime, the functor
\[\mathrm{Sing}:\mathcal Sp^{C_2}\to \mathcal{SH}_{\mathrm{cell}}({\mathbb R})\]
is a fully faithful right adjoint to $\beta$, and we may therefore regard $\mathcal Sp^{C_2}$ as a reflective subcategory of $\mathcal{SH}_{\mathrm{cell}}({\mathbb R})$ with reflection functor given by $\beta$ \cite[Theorem 1.12]{BehrensShah}.
On the other hand, $\mathcal Sp^{C_2}$ may be described in terms of pullback squares. For $E\in \mathcal Sp^{C_2}$, there is a commutative square
\[
\begin{tikzcd}
E\arrow[r]\arrow[d]&\tilde{E}C_2\otimes E\arrow[d]\\
F(E{C_2}_+,E)\arrow[r]&\tilde{E}C_2\otimes F(E{C_2}_+,E)
\end{tikzcd}
\]
known as the \textit{Tate square} of $E$, which is a pullback. Behrens--Shah \cite[Theorem 1.10]{BehrensShah} provided an extension of this construction to all $E\in \mathcal{SH}_{\mathrm{cell}}({\mathbb R})$, forming a commutative square
\[
\begin{tikzcd}
E\arrow[r]\arrow[d]& E[\rho^{-1}]\arrow[d]\\
E^{\;\widehat{}}_\rho[\tau^{-1}]\arrow[r]&E^{\;\widehat{}}_\rho[\tau^{-1}][\rho^{-1}]
\end{tikzcd}
\]
which we call the \textit{arithmetic square} of $E$. This square is \textit{not} in general a pullback, but the results of Behrens--Shah imply the following.
\begin{proposition}\label{proparithmeticsquare}
$E\in \mathcal{SH}_{\mathrm{cell}}({\mathbb R})$ is in the essential image of the fully faithful right adjoint
\[\mathrm{Sing}:\mathcal Sp^{C_2}\to \mathcal{SH}_{\mathrm{cell}}({\mathbb R})\]
if and only if the arithmetic square of $E$ is a pullback. In particular, the functor $\beta(-)$ may be regarded as the endofunctor of $\mathcal{SH}_{\mathrm{cell}}({\mathbb R})$ given by the pullback
\[
\begin{tikzcd}
\beta(E)\arrow[r]\arrow[d]& E[\rho^{-1}]\arrow[d]\\
E^{\;\widehat{}}_\rho[\tau^{-1}]\arrow[r]&E^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]
\end{tikzcd}
\]
\end{proposition}
This, in turn, describes the genuine fixed points functor $(-)^{C_2}:\mathcal Sp^{C_2}\to \mathcal Sp$ as a special case of the global sections functor of Section \ref{subsubsecglobalsec}.
\begin{proposition}\label{propfixedpointsglobalsec}
The genuine fixed points functor $(-)^{C_2}$ is equivalent to the composite $\Gamma\circ \mathrm{Sing}$.
\end{proposition}
The unit map for the reflection functor $\beta$ gives a natural transformation
\[\eta_E:\Gamma(E)\to \Gamma(\beta(E))\]
We will show that $\eta_E$ is an equivalence in many cases of interest; that is, the arithmetic square of $E$ becomes a pullback after applying $\Gamma(-)$ in these cases. This follows by induction up the slice tower of $E$, given the following base case.
\begin{proposition}\label{propbetaMZ}
The map
\[\Gamma(\Sigma^{s,w}M{\mathbb Z})\to\Gamma(\Sigma^{s,w}\beta(M{\mathbb Z}))\]
is an equivalence if $w>-2$. Equivalently, the map
\[\pi_{*,*}^{{\mathbb R}}M{\mathbb Z}\to\pi_{*,*}^{{\mathbb R}}\beta(M{\mathbb Z})\]
is an iso in weights $<2$.
\end{proposition}
\begin{proof}
On one hand, this is immediate from Lemma \ref{lemma:homotopyofglobalsections} and the fact that $\beta$ induces an isomorphism
\[\pi_{*,*}^{\mathbb R} M{\mathbb Z}\to\pi_{*,*}^{C_2}H\m{\Z}\]
in weights $<2$ (see \cite{DI}).
However, we may argue directly. From Voevodsky's computation \cite{voe}
\[\pi_{*,*}^{\mathbb R} M{\mathbb Z}={\mathbb Z}[\tau^2,\rho]/(2\rho)\]
where $|\tau|=(0,-2)$ and $|\rho|=(-1,-1)$, one sees that applying bigraded homotopy groups to the arithmetic square of $M{\mathbb Z}$ gives the square
\[
\begin{tikzcd}
{\mathbb Z}[\tau^2,\rho]/(2\rho)\arrow[d]\arrow[r]&{\mathbb F}_2[\tau^2,\rho^{\pm}]\arrow[d]\\
{\mathbb Z}[\tau^{\pm 2},\rho]/(2\rho)\arrow[r]&{\mathbb F}_2[\tau^{\pm 2},\rho^{\pm}]
\end{tikzcd}
\]
The resulting Mayer--Vietoris sequence is exact in weights $w<2$, and the result now follows from Lemma \ref{lemma:homotopyofglobalsections} and Proposition \ref{proparithmeticsquare}.
\end{proof}
In particular, we also have that $\eta_E$ is an equivalence when $E$ is of the form $\Sigma^{2t,t}M{\mathbb Z}$ for $t\ge0$. If $E$ is slice connective with slices of this form, a similar argument to that of Proposition \ref{propsliceconvergence} gives the following.
\begin{corollary}\label{cor:betaE}
Suppose $E\in \mathcal{SH}_{\mathrm{cell}}({\mathbb R})$ has the property that, for all $t\ge0$,
\[P^{2t}_{2t}E=\bigoplus\limits_{I_t}\Sigma^{2t,t}M{\mathbb Z}\]
and $P_{2t}^{2t}E$ vanishes for $t<0$. Then
\[\Gamma(\Sigma^{s,w}E)\to \Gamma(\Sigma^{s,w}\beta(E))\]
is an equivalence for $w>-2$. Equivalently, the induced map
\[\pi_{*,*}^{\mathbb R} E\to\pi_{*,*}^{\mathbb R}\beta(E)\]
is an isomorphism in weights $w<2$.
\end{corollary}
\begin{corollary}\label{corhomologygammaE}
Suppose $E\in \mathcal{SH}_{\mathrm{cell}}({\mathbb R})$ is as in Corollary \ref{cor:betaE}. Then
\[H_*\Gamma(\Sigma^{s,w}E)\to H_*\Gamma(\Sigma^{s,w}\beta(E))\]
is an isomorphism for $w>-2$. Equivalently, the map
\[\pi_{*,*}^{\mathbb R} (i_*H{\mathbb F}_2\otimes E)\to\pi_{*,*}^{\mathbb R} (i_*H{\mathbb F}_2\otimes\beta(E))=\pi_{*,*}^{{\mathbb R}}(\beta(i_*H{\mathbb F}_2\otimes E))\]
is an isomorphism in weights $w<2$.
\end{corollary}
In particular, combining Corollary \ref{corhomologygammaE} with Proposition \ref{proparithmeticsquare}, one may compute the bigraded homology of $\Gamma(E)$ in weights $w<2$ in terms of that of the motivic spectra
\begin{align*}
&\Gamma(E[\rho^{-1}])
&&\Gamma(E^{\;\widehat{}}_\rho[\tau^{-1}]) &&\Gamma(E^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}])
\end{align*}
by applying the Mayer--Vietoris sequence. We will apply this in Section \ref{secarithmeticsquare} for $E=BPGL$, the $2$-local summand of the algebraic cobordism spectrum.
Recall there is a classifying map $L\to MGL_{*,*}^{{\mathbb R}}$ that is an isomorphism in bidegrees of the form $(2k,k)$, where $L\cong {\mathbb Z}[a_1,a_2,\ldots]$ is the Lazard ring, and $|a_i|=(2i,i)$ (see \cite{Heard} for this isomorphism and \cite{ravgreen} for the description of the Lazard ring). We say an $MGL$-module is a \textit{standard quotient of }$MGL$ if it is equivalent to the quotient of $MGL$ by some collection of polynomials that are $a_i$ modulo decomposables. Standard quotients of $MGL$ satisfy the hypotheses of Corollary \ref{cor:betaE}, by the following theorem of Hopkins--Morel and Levine--Tripathi \cite{levine}.
\begin{proposition}\label{propslicebp}
Let $I\subset L$ be the ideal generated by some collection of the $a_i$'s. Then the slice associated graded of $MGL/I$ is
\[M{\mathbb Z}[a_1,a_2,\ldots]/I:=M{\mathbb Z}[a_i|i\notin I]\]
\end{proposition}
Working $2$-locally, $MGL$ splits as a sum of shifts of the spectrum
\[BPGL:=MGL/(a_i|i\neq 2^k-1)\]
As is standard, we let $\oldwidehat_i:=a_{2^i-1}$, and our computations with the HSSS will focus on the truncated versions
\[BPGL\langle m\rangle=BPGL/(\oldwidehat_{m+1},\oldwidehat_{m+2},\ldots)\]
\subsection{The edge homomorphism}\label{subsecedgehom}
For any \(E\in \mathcal{SH}_{\mathrm{cell}}({\mathbb R})\), we have a natural map of motivic spectra
\[
E\to P^k E,
\]
and so we can consider the induced map of slice towers. Since we have a map of filtered spectra, we have an induced map of spectral sequences. The slice spectral sequence for \(P^kE\) is exactly the truncation of the slice spectral sequence for \(E\) where we throw away all classes in degrees \((t-s,s)\) with \(t>k\).
There is a very important case of the truncated slice tower: the case \(k=0\). For standard quotients $E$ of $MGL$, Proposition \ref{propslicebp} implies that $P^0 E\simeq M{\mathbb Z}$. We therefore have a map of HSSS's
\[
E_\ast^{s,w,t}\big(E;i_*H{\mathbb F}_2\big)\to E_\ast^{s,w,t}\big(M{\mathbb Z};i_*H{\mathbb F}_2\big).
\]
The latter is concentrated on the line \(t=0\), but recognizing that this comes from a map of filtered spectra, we see that this gives the edge homomorphism
\[
\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes E)\to H_{*,*}^{{\mathbb R}}\big(i_*H{\mathbb F}_2;{\mathbb Z}\big).
\]
Again, this map is a map of \(\mathcal A^{{\mathbb R}}_{*,*}\)-comodule (algebras). By definition, every class in the image of the edge homomorphism admits a lift along this map, so we have the following.
\begin{proposition}\label{propedgeispermanent}
The image of the edge homomorphism in the HSSS consists of permanent cycles.
\end{proposition}
\section{Homology of $\Gamma(BPGL\langle m\rangle)$ via the arithmetic square}\label{secarithmeticsquare}
We have seen in the previous section that computing the homology groups $H_*\Gamma(E)$ amounts to computing the motivic homotopy groups
\[\pi_{*,0}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes E)\]
and that, when $E$ is as in Proposition \ref{propslicebp}, this $\mathcal{A}_*$-comodule may be recovered from the arithmetic square of $i_*H{\mathbb F}_2\otimes E$. In this section, we explore this approach in the case $E=BPGL\langle m\rangle$.
\subsection{The arithmetic square of $i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle$}
\subsubsection{The $\tau$-local part}\label{subsecBredon}
In this section, we determine bottom row of the arithmetic square of $i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle$, namely the map
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)^{\;\widehat{}}_\rho[\tau^{-1}]\to \pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\]
The main advantage of applying the functor $(-)^{\;\widehat{}}_\rho[\tau^{-1}]$ is that it converts $i_*H{\mathbb F}_2$ homology to $M{\mathbb F}_2$-homology.
\begin{proposition}
\label{borelhomology}
The canonical map $i_*H{\mathbb F}_2\to M{\mathbb F}_2$ induces an equivalence
\[(i_*H{\mathbb F}_2\otimes E)^{\;\widehat{}}_\rho[\tau^{-1}]\to (M{\mathbb F}_2\otimes E)^{\;\widehat{}}_\rho[\tau^{-1}]\]
for any $E\in\mathcal{\mathcal{SH}}_{\mathrm{cell}}({\mathbb R})$.
\end{proposition}
\begin{proof}
The map in question is obtained by taking the limit of the maps
\[i_*H{\mathbb F}_2\otimes E\otimes C(\rho^i)[\tau^{-1}]\to M{\mathbb F}_2\otimes E\otimes C(\rho^i)[\tau^{-1}]\]
(see \cite[Theorem 1.10]{BehrensShah}). By filtering $C(\rho^i)$, it suffices to show this map is an equivalence when $i=1$. By \cite[Proposition 8.3]{BehrensShah}, the map
\[C(\rho)\to \mathcal Spec(\mathbb C_+)\]
becomes an equivalence after applying $2$-completion and cellularization. It therefore suffices to show that the map
\[i_*H{\mathbb F}_2\otimes \mathcal Spec(\mathbb C_+)[\tau^{-1}]\to M{\mathbb F}_2\otimes \mathcal Spec(\mathbb C_+)[\tau^{-1}]\]
is an equivalence, for which it suffices to base change to $\mathbb C$ and show that
\[i_*H{\mathbb F}_2[\tau^{-1}]\to M{\mathbb F}_2[\tau^{-1}]\]
is an equivalence in $\mathcal{SH}(\mathbb C)$, by \cite[Theorem 1.7]{BehrensShah}. Finally, by \cite[Theorem 1.1]{BehrensShah}, it suffices to apply $\mathbb C$-Betti realization, which gives the identity map of $H{\mathbb F}_2$.
\end{proof}
The motivic homology of $BPGL\langle m\rangle$ for $-1\le m\le \infty$ is described similarly to the classical case. We have the following computation of Ormsby \cite[Theorem 3.8]{ormsby}.
\begin{proposition}\label{prop:motivichomologybprm}
The motivic homology of $BPGL\langle m\rangle$ is
\[H_{*,*}^{{\mathbb R}}BPGL\langle m\rangle\cong \mathcal{A}_{*,*}^{{\mathbb R}}\square_{\mathcal{E}_{*,*}^{{\mathbb R}}(m)}{M{\mathbb F}_2}_{*,*}=\frac{{\mathbb F}_2[\rho,\tau,\oldwidehaterline{t}_i,c(\tau_j)|i\ge1,j\ge m+1]}{(c(\tau_j)^2=c(\tau_{j+1})\rho+c(\oldwidehaterline{t}_{j+1})\tau)}\]
\end{proposition}
\begin{corollary}\label{cor:bottomrow}
The map
\[
\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)^{\;\widehat{}}_\rho[\tau^{-1}]\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]
\]
is given by the inclusion
\[
\frac{{\mathbb F}_2[\rho,\tau^{\pm},\oldwidehaterline{t}_i,c(\tau_j)|i\ge1,j\ge m+1]}{(c(\tau_j)^2=c(\tau_{j+1})\rho+c(\oldwidehaterline{t}_{j+1})\tau)}^{\;\widehat{}}_{\rho} \hookrightarrow \frac{{\mathbb F}_2[\rho,\tau^{\pm},\oldwidehaterline{t}_i,c(\tau_j)|i\ge1,j\ge m+1]}{(c(\tau_j)^2=c(\tau_{j+1})\rho+c(\oldwidehaterline{t}_{j+1})\tau)}^{\;\widehat{}}_{\rho}[\rho^{-1}]
\]
\end{corollary}
\begin{proof}
By Proposition \ref{borelhomology}, we may prove this claim instead for the map
\[
\pi_{*,*}^{{\mathbb R}}(M{\mathbb F}_2\otimes BPGL)^{\;\widehat{}}_\rho[\tau^{-1}]\to\pi_{*,*}^{{\mathbb R}}(M{\mathbb F}_2\otimes BPGL)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]
\]
which follows from Proposition \ref{prop:motivichomologybprm}.
\end{proof}
\begin{remark}
Ormsby's computation may be recovered using the spectral sequence of Section \ref{subsecmotivicslice} with $K=M{\mathbb F}_2$ and $E=BPGL\langle m\rangle$. This computation mirrors exactly the classical computation of $H_*BP\langle m\rangle$ using the Atiyah--Hirzebruch spectral sequence.
\end{remark}
\subsubsection{The $\rho$-local part}\label{subsecTate}
Corollary \ref{cor:bottomrow} determines the bottom row of the arithmetic square for $i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle$. The key input for this calculation was the fact that, working $\tau$-locally, $i_*H{\mathbb F}_2$ homology agrees with $M{\mathbb F}_2$-homology. On the other hand, working $\rho$-locally, we have the following \cite[Theorem 1.2]{BehrensShah}.
\begin{proposition}
The functor
\[\Gamma:\mathcal{SH}({\mathbb R})_{\mathrm{cell}}[\rho^{-1}]\to \mathcal Sp\]
is an equivalence of categories with inverse given by $i_*(-)[\rho^{-1}]$.
\end{proposition}
The following is easy to obtain by appealing to $C_2$-equivariant homotopy and using \cite[Theorem 1.5]{BehrensShah} and \cite[Proposition 4.9]{GreenleesMeier}, or directly by simply adapting the arguments of \cite[Proposition 4.9]{GreenleesMeier} to the motivic setting.
\begin{lemma}\label{lemmarholocalbpgl}
Under this equivalence, $\Gamma$ sends $BPGL[\rho^{-1}]$ to $H{\mathbb F}_2$. Coning off $(\oldwidehat_{m+1},\oldwidehat_{m+2},\ldots)$, we have $\Gamma(BPGL\langle m\rangle[\rho^{-1}])\simeq H{\mathbb F}_2[y^{2^{m+1}}]$, as an $H{\mathbb F}_2$-module, where $|y|=1$.
\end{lemma}
This tells us that the gluing map in the arithmetic square for $i_*H{\mathbb F}_2\otimes BPGL$
\[i_*H{\mathbb F}_2\otimes BPGL[\rho^{-1}]\to (i_*H{\mathbb F}_2\otimes BPGL)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\]
is given on bigraded homotopy by some ring map of the form
\[\mathcal A_*[\rho^{\pm}]\to{\mathbb F}_2[\rho,\tau^{\pm},\oldwidehaterline{t}_i]^{\;\widehat{}}_{\rho}[\rho^{-1}]\]
To determine this gluing map, we can embed both groups into
\[\pi_{*,*}^{\mathbb R}(M{\mathbb F}_2\otimes M{\mathbb F}_2)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\]
via the Thom reduction map $BPGL\to M{\mathbb F}_2$ and the canonical map $i_*H{\mathbb F}_2\to M{\mathbb F}_2$.
Understanding these embeddings reduces to understanding the composite
\[i_*H{\mathbb F}_2[\rho^{-1}]\simeq BPGL[\rho^{-1}]\to M{\mathbb F}_2[\rho^{-1}]\]
Much of the subtlety in our computations comes from the observation that this map is not homotopic to the canonical map of \ref{subsubseccanonicalmap}
\[i_*H{\mathbb F}_2[\rho^{-1}]\to M{\mathbb F}_2[\rho^{-1}]\]
The two maps of course have the same effect in homotopy; we need to pass to homology to tell them apart. The following allows us to describe the effect of the canonical map in homology.
\begin{proposition}\label{propcounitimage}
Regarding the classical Milnor generators $\xi_i$ as elements of $\mathcal{A}_{*,*}^{{\mathbb R}}$ via the composite
\[\mathcal{A}_*=\pi_*(H{\mathbb F}_2\otimes H{\mathbb F}_2)\xrightarrow{i_*} \pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes i_*H{\mathbb F}_2)\to \pi_{*,*}^{{\mathbb R}}(M{\mathbb F}_2\otimes M{\mathbb F}_2)=\mathcal{A}_{*,*}^{{\mathbb R}}\]
one has the recursion formulas
\aln{
\oldwidehaterline{t}_{0}&:=1\\
\rho^{2^i}\oldwidehaterline{t}_i&=\xi_{i-1}^2(\tau_0\rho+\tau)+\xi_i\rho+\oldwidehaterline{t}_{{i-1}}\tau^{2^{i-1}}
}
\end{proposition}
\begin{proof}
We appeal to $C_2$-equivariant homotopy, where the corresponding formulas were computed by Hu--Kriz \cite[Theorem 6.18]{HK}. The result follows ${\mathbb R}$-motivically by the factorization
\[
\begin{tikzcd}
\mathcal A_*\arrow[r]\arrow[dr]&\mathcal{A}_{*,*}^{{\mathbb R}}\arrow[d,hookrightarrow]\\
&\mathcal{A}_{*,*}^{C_2}
\end{tikzcd}
\]
\end{proof}
Let can denote the equivalence
\[\mathrm{can}:H{\mathbb F}_2[y]\xrightarrow{\simeq} \Gamma(M{\mathbb F}_2[\rho^{-1}])\]
where $H{\mathbb F}_2[y]$ is the free $E_1$-$H{\mathbb F}_2$-algebra on a class $y$ in degree 1, and can is defined by giving $\Gamma(M{\mathbb F}_2[\rho^{-1}])$ an $H{\mathbb F}_2$-algebra structure via the canonical map of \ref{subsubseccanonicalmap} and by sending $y$ to $\tau/\rho\in\pi_1\Gamma(M{\mathbb F}_2[\rho^{-1}])$.
\begin{proposition}\label{propnotunit}
The map
\[H{\mathbb F}_2\simeq\Gamma(BPGL[\rho^{-1}])\to\Gamma(M{\mathbb F}_2[\rho^{-1}])\xrightarrow{\mathrm{can}^{-1}}H{\mathbb F}_2[y]\]
is not homotopic to the unit map $\eta:H{\mathbb F}_2\to H{\mathbb F}_2[y]$.
\end{proposition}
\begin{proof}
If it were, the composite
\[i_*H{\mathbb F}_2\otimes i_*H{\mathbb F}_2[\rho^{-1}]\simeq i_*H{\mathbb F}_2\otimes BPGL[\rho^{-1}]\to M{\mathbb F}_2\otimes M{\mathbb F}_2[\rho^{-1}]\]
would be homotopic to the canonical map
\[i_*H{\mathbb F}_2\otimes i_*H{\mathbb F}_2[\rho^{-1}]\to M{\mathbb F}_2\otimes M{\mathbb F}_2[\rho^{-1}]\]
By Proposition \ref{propcounitimage}, this map sends the class $\xi_1$ to $\rho\oldwidehaterline{t}_1+\tau_0$. This would imply that $\tau_0$ is in the image of the (injective) map
\[\pi_{*,*}^{{\mathbb R}}(M{\mathbb F}_2\otimes M{\mathbb Z})^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\to\pi_{*,*}^{{\mathbb R}}(M{\mathbb F}_2\otimes M{\mathbb F}_2)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\]
since the reduction map $BPGL\to M{\mathbb F}_2$ factors through $M{\mathbb Z}$.
Voevodsky's relations
\[\tau_i^2=\tau \oldwidehaterline{t}_{i+1}+\rho\tau_{i+1}+\rho\tau_0\oldwidehaterline{t}_{i+1}\]
would then imply that all $\tau_i$'s are in the image of this map, so that it is an isomorphism, a contradiction.
\end{proof}
\begin{remark}\label{hukrizerror}
In \cite{BPO}, Hu--Kriz compute the homology groups $H_*BP_{\mathbb R}^{C_2}$ using the isotropy separation sequence. By Corollary \ref{cor:betaE}, this is the same as computing
\[\pi_{*,0}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)\]
Our computations differ from theirs, and this is related to what seems to be an error on page 114, where they claim that $\eta_R(u_\sigma^{-1})$ is in the image of the reduction map
\[f:\pi_\text{ : }ar(\tilde{E}C_2\otimes F(E{C_2}_+,H\underline{{\mathbb F}_2}\otimes BP_{\mathbb R}))\to \pi_\text{ : }ar(\tilde{E}C_2\otimes F(E{C_2}_+,H\underline{{\mathbb F}_2}\otimes H\underline{{\mathbb F}_2}))\]
In their notation, the claim is that $f(\rho)=\eta_R(\sigma)$. As in the proof of the previous proposition, this would imply that $\tau_0$ is in the image of this map, which leads to contradictions.
\end{remark}
The composite in Proposition \ref{propnotunit} gives \textit{some} ring map
\[H{\mathbb F}_2\to H{\mathbb F}_2[y]\]
and we identify it by showing it is structured enough to apply Mahowald's theorem on $H{\mathbb F}_2$ \cite{mahow}.
\begin{theorem}\label{thmtatefrob}
The composite
\[H{\mathbb F}_2\simeq\Gamma(BPGL[\rho^{-1}])\to \Gamma(M{\mathbb F}_2[\rho^{-1}])\xrightarrow{\mathrm{can}^{-1}}H{\mathbb F}_2[y]\simeq\prod\limits_{i\ge0}\Sigma^i H{\mathbb F}_2\]
has components $Sq^i:H{\mathbb F}_2\to \Sigma^i H{\mathbb F}_2$.
\end{theorem}
\begin{proof}
We note first that the map
\[H{\mathbb F}_2=\Gamma(i_*H{\mathbb F}_2[\rho^{-1}])\simeq\Gamma(BPGL[\rho^{-1}])\to\Gamma(M{\mathbb F}_2[\rho^{-1}])\]
is an $E_\infty$ map since it may be factored as
\[\Gamma(i_*H{\mathbb F}_2[\rho^{-1}])\simeq\Gamma(BPGL[\rho^{-1}])\to\Gamma(MGL_{(2)}[\rho^{-1}])\to \Gamma(M{\mathbb F}_2[\rho^{-1}])\]
The first map is the unit map $H{\mathbb F}_2\to MO$, which is $E_\infty$, and the map $MGL_{(2)}\to M{\mathbb F}_2$ is $E_\infty$.
Mahowald's theorem on $H{\mathbb F}_2$ implies that there is a pushout square in $E_2$-algebras in $\mathcal Sp$
\[
\begin{tikzcd}
\text{Free}_{E_2}(\mathbb S)\arrow[r,"\oldwidehaterline{0}"]\arrow[d,"\oldwidehaterline{2}"']&\mathbb S\arrow[d]\\
\mathbb S\arrow[r]&H{\mathbb F}_2
\end{tikzcd}
\]
which gives
\[\pi_0\text{Map}_{E_2}(H{\mathbb F}_2,\Gamma(M{\mathbb F}_2[\rho^{-1}]))\cong\pi_1\Gamma(M{\mathbb F}_2[\rho^{-1}])={\mathbb F}_2\]
There is thus a unique such $E_2$-map not homotopic to the unit map $\eta$. We conclude by observing that the Tate-valued Frobenius
\[\varphi_2:H{\mathbb F}_2\to H{\mathbb F}_2^{tC_2}\]
of Nikolaus--Scholze is an $E_\infty$ map that factors thru the connective cover
\[H{\mathbb F}_2[y]\simeq\Gamma(M{\mathbb F}_2[\rho^{-1}])\simeq\Phi^{C_2}H\underline{{\mathbb F}_2}\]
and admits the above description by \cite[IV.1.5]{NS}.
\end{proof}
\subsection{The Mayer--Vietoris sequence for $i_*H{\mathbb F}_2\otimes BPGL$}\label{subsecMVseq}
Theorem \ref{thmtatefrob} may be used to describe explicitly the gluing map in the arithmetic square for $i_*H{\mathbb F}_2\otimes BPGL$. To determine its effect on homotopy, we need the following lemma.
\begin{lemma}\label{lemmacapproduct}
The map induced on homotopy by
\[H{\mathbb F}_2\otimes H{\mathbb F}_2\xrightarrow{1\otimes Sq^i}H{\mathbb F}_2\otimes \Sigma^i H{\mathbb F}_2\]
is the cap product
\[-\cap Sq^i:\mathcal A_*\xrightarrow{\Delta}\mathcal A_*\otimes\mathcal A_*\xrightarrow{1\otimes\langle-,Sq^i\rangle}\mathcal A_{*-i}\]
\end{lemma}
\begin{proof}
The map induced on homotopy by $1\otimes Sq^i$ is an $\mathcal A_*$-comodule map, and $\mathcal A_{*-i}$ is a cofree $\mathcal A_*$-comodule. It suffices to show the composite
\[\mathcal A_*\xrightarrow{(1\otimes Sq^i)_*}\mathcal A_{*-i}\xrightarrow{\epsilon}{\mathbb F}_2[i]\]
coincides with the composite
\[\mathcal A_*\xrightarrow{\Delta}\mathcal A_*\otimes\mathcal A_*\xrightarrow{1\otimes\langle-,Sq^i\rangle}\mathcal A_{*-i}\xrightarrow{\epsilon}{\mathbb F}_2[i]\]
where $\epsilon:\mathcal A_*\to {\mathbb F}_2$ is the coidentity map in the Hopf algebra $\mathcal A_*$. Under the isomorphism $\mathcal A\cong\textrm{Ho}m_{{\mathbb F}_2}(\mathcal A_*,{\mathbb F}_2)$, the latter map corresponds to $Sq^i$. The same is true of the former map as the pairing $\mathcal A_*\otimes_{{\mathbb F}_2}\mathcal A\to {\mathbb F}_2$
is induced by the map in $\mathcal Sp$
\[(H{\mathbb F}_2\otimes H{\mathbb F}_2)\otimes_{H{\mathbb F}_2}F(H{\mathbb F}_2,H{\mathbb F}_2)\to H{\mathbb F}_2\otimes H{\mathbb F}_2\xrightarrow{\mu}H{\mathbb F}_2\qedhere\]
\end{proof}
\begin{proposition}\label{proprightmap}
The map
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)[\rho^{-1}]\to \pi_{*,*}^{{\mathbb R}}(M{\mathbb F}_2\otimes BPGL)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\]
is given by the map
\[\varphi:\mathcal A_*[\rho^{\pm}]\to {\mathbb F}_2[\tau^{\pm},\rho,\oldwidehaterline{t}_i]^{\;\widehat{}}_{\rho}[\rho^{-1}]\]
of ${\mathbb F}_2[\rho^{\pm}]$-algebras sending
\[\varphi(\xi_i)=\rho^{2^i-1}\oldwidehaterline{t}_i+\Big(\frac{\tau^{2^{i-1}}}{\rho}\Big)\oldwidehaterline{t}_{{i-1}}\]
\end{proposition}
\begin{proof}
The diagram
\[
\begin{tikzcd}
(i_*H{\mathbb F}_2\otimes BPGL)[\rho^{-1}]\arrow[d]\arrow[r]&(M{\mathbb F}_2\otimes M{\mathbb F}_2)[\rho^{-1}]\arrow[d]\\
(M{\mathbb F}_2\otimes BPGL)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\arrow[r]&(M{\mathbb F}_2\otimes M{\mathbb F}_2)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]
\end{tikzcd}
\]
is given on bigraded homotopy by
\[
\begin{tikzcd}
\mathcal A_*[\rho^{\pm}]\arrow[d,"\varphi"]\arrow[r]&\mathcal A_*[\tau,\eta_R(\tau),\rho^{\pm}]\arrow[d]\\
{\mathbb F}_2[\rho,\tau^{\pm},\oldwidehaterline{t}_i]^{\;\widehat{}}_{\rho}[\rho^{-1}]\arrow[r]&\mathcal A_*[\tau^{\pm},\eta_R(\tau)^{\pm},\rho]^{\;\widehat{}}_{\rho}[\rho^{-1}]
\end{tikzcd}
\]
Note that the top map is \textit{not} a map of $\mathcal A_*$-algebras; this is the content of Proposition \ref{propnotunit}. The bottom and righthand maps are injective, so it suffices to determine the top map. Combining Theorem \ref{thmtatefrob} and Lemma \ref{lemmacapproduct}, this is the map of ${\mathbb F}_2[\rho^{\pm}]$-algebras sending
\[\xi_i\mapsto\sum\limits_{j\ge0}(\xi_i\cap Sq^j)\cdot\bigg(\frac{\eta_R(\tau)}{\rho}\bigg)^j\]
Using the coproduct formula on $\mathcal A_*$ and the fact that $Sq^j$ is dual to $\xi_1^j$, we have
\[\xi_i\mapsto \xi_i+\xi_{i-1}^2\frac{\eta_R(\tau)}{\rho}\]
We conclude by applying the relations of Proposition \ref{propcounitimage}
\end{proof}
We move now to weight zero and set $\chi_i:=\tau^{2^i-1}\oldwidehaterline{t}_i$ and $z:=\rho/\tau=y^{-1}$. We may summarize the results of Section \ref{secarithmeticsquare} thus far as follows.
\begin{proposition}\label{proparithmeticsquarecomputation}
The arithmetic square of $i_*H{\mathbb F}_2\otimes BPGL$ is given by
\[
\begin{tikzcd}
H_*\Gamma(BPGL)\arrow[r]\arrow[d]&\mathcal A_*\arrow[d,"\varphi"]&\xi_i\arrow[d,mapsto]\\
{\mathbb F}_2[z,\chi_i|i\ge1]^{\;\widehat{}}_{z}\arrow[r,hookrightarrow]&{\mathbb F}_2[z,\chi_i|i\ge1]^{\;\widehat{}}_{z}[z^{-1}]&\chi_iz^{2^i-1}+\chi_{i-1}z^{-1}
\end{tikzcd}
\]
with $|z|=-1$ and $|\chi_i|=2(2^i-1)$.
\end{proposition}
To compute the associated Mayer--Vietoris sequence, we establish the following.
\begin{proposition}\label{propbndmap}
The arithmetic square of Proposition \ref{proparithmeticsquarecomputation} satisfies the following properties:
\begin{enumerate}
\item The map $\varphi$ is injective.
\item The map ${\mathbb F}_2[z,\chi_i|i\ge 1]^{\;\widehat{}}_{z}\oplus\mathcal A_*\to {\mathbb F}_2[z,\chi_i|i\ge 1]^{\;\widehat{}}_{z}[z^{-1}]$ is injective in nonzero degrees.
\item The maps
\[H_*\Gamma(BPGL)\to H_*\Gamma(BPGL[\rho^{-1}])=\mathcal A_*\]
and
\[H_*\Gamma(BPGL)\to \pi_{*,0}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)^{\;\widehat{}}_\rho[\tau^{-1}]\]
are both zero in nonzero degrees
\item In positive degrees $j$, the following sequence is short exact
\end{enumerate}
\adjustbox{scale=1,center}{
$0\to ({\mathbb F}_2[z,\chi_i|i\ge 1]^{\;\widehat{}}_{z})_{j+1}\oplus\mathcal A_{j+1}\to ({\mathbb F}_2[z,\chi_i|i\ge 1]^{\;\widehat{}}_{z}[z^{-1}])_{j+1}\xrightarrow{\partial}H_{j}\Gamma(BPGL)\to0$
}
\end{proposition}
\begin{proof}
For (1), we have a diagram
\[
\begin{tikzcd}
\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)[\rho^{-1}]\arrow[r]\arrow[d,"\varphi"]&\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb F}_2)[\rho^{-1}]\arrow[d]\\
\pi_{*,*}^{{\mathbb R}}(M{\mathbb F}_2\otimes BPGL)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\arrow[r]&\pi_{*,*}^{{\mathbb R}}(M{\mathbb F}_2\otimes M{\mathbb F}_2)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]
\end{tikzcd}
\]
and the righthand and bottom maps are injective. We have seen the top map is induced by the map
\[i_*H{\mathbb F}_2\otimes i_*H{\mathbb F}_2\xrightarrow{1\otimes 1\otimes i_*(\varphi_2)}i_*H{\mathbb F}_2\otimes i_*H{\mathbb F}_2[y]\]
$\varphi_2$ differs from the unit map $H{\mathbb F}_2\xrightarrow{\eta}H{\mathbb F}_2[y]$ by an automorphism of $H{\mathbb F}_2[y]$ and hence is the inclusion of a retract.
For (2), since $\varphi$ is injective, the map
\[{\mathbb F}_2[z,\chi_i|i\ge 1]^{\;\widehat{}}_{z}\oplus\mathcal A_*\to {\mathbb F}_2[z,\chi_i|i\ge 1]^{\;\widehat{}}_{z}[z^{-1}]\]
fails to be injective in degree $j\neq 0$ if and only if there exists a class $0\neq x\in\mathcal A_j$ such that
\[\varphi(x)\in\mathrm{image}\bigg({\mathbb F}_2[z,\chi_i|i\ge 1]^{\;\widehat{}}_{z}\hookrightarrow {\mathbb F}_2[z,\chi_i|i\ge 1]^{\;\widehat{}}_{z}[z^{-1}]\bigg)\]
Note, however, that since $\varphi(\xi_i)=z^{-1}(\chi_iz^{2^i}+\chi_{i-1})$, a monomial $\xi_{i_1}^{j_1}\cdots\xi_{i_n}^{j_n}$ is sent to
\aln{
\varphi(\xi_{i_1}^{j_1}\cdots\xi_{i_n}^{j_n})&=z^{-j_1}(\chi_{i_1}z^{2^{i_1}}+\chi_{i_1-1})^{j_1}\cdots z^{-j_n}(\chi_{i_n}z^{2^{i_n}}+\chi_{i_n-1})^{j_n}\\
&=z^{-(j_1+\cdots +j_n)}\chi_{i_1-1}^{j_1}\cdots\chi_{i_n-1}^{j_n}+\cdots
}
where the omitted terms are of the form $z^mr(\chi_i)$ with $m>-(j_1+\cdots +j_n)$, where $r(\chi_i)$ is some polynomial in the $\chi_i$'s. In particular, for a polynomial
\[p(\xi_i)=\sum\limits_{k=1}^N\xi_{i_{k,1}}^{j_{k,1}}\cdots\xi_{i_{k,n_k}}^{j_{k,n_k}}\]
in $\mathcal A_*$, let
\[M:=\max\{j_{k,1}+\cdots+j_{k,n_k}\text{ : } 1\le k\le N\}\]
\[q(\xi_i):=\sum\limits_{k\text{ : } j_{k,1}+\cdots+j_{k,n_k}=M}\xi_{i_{k,1}}^{j_{k,1}}\cdots\xi_{i_{k,n_k}}^{j_{k,n_k}}\]
so that $p(\xi_i)=q(\xi_i)+r(\xi_i)$ where
\[r(\xi_i):=\sum\limits_{k\text{ : } j_{k,1}+\cdots+j_{k,n_k}<M}\xi_{i_{k,1}}^{j_{k,1}}\cdots\xi_{i_{k,n_k}}^{j_{k,n_k}}\]
Then
\[\varphi(p(\xi_i))=z^{-M}q(\chi_{i-1})+\cdots\]
where the omitted terms are of the form $z^ms(\chi_i)$ with $m>-M$, where $s(\chi_i)$ is some polynomial in the $\chi_i$'s. Note that $q(\chi_{i-1})\neq0$, so this sum cannot be in the image of the bottom map in the arithmetic square, as $M>0$. (3) and (4) are immediate from the Mayer--Vietoris sequence.
\end{proof}
This is already enough to describe $H_*\Gamma(BPGL)$ as an $\mathcal{A}_*$-comodule. For the following discussion, cf. \cite{BPO}. Let $K(\rho)$ be defined by the cofiber sequence
\[K(\rho)\to S^{0,0}\to S^{0,0}[\rho^{-1}]\]
We have a long exact sequence of left $\mathcal A_*$-comodules
\[\cdots\xrightarrow{\partial}H_*\Gamma(BPGL\otimes K(\rho))\to H_*\Gamma(BPGL)\to\mathcal A_*\xrightarrow{\partial}H_{*-1}\Gamma(BPGL\otimes K(\rho))\to\cdots\]
Proposition \ref{propbndmap} gives a splitting of left $\mathcal A_*$-comodules
\[H_*\Gamma(BPGL)\cong {\mathbb F}_2\{1\}\oplus\coker(\partial)\]
To describe $\coker(\partial)$, since our arithmetic square is a pullback in weight zero by Proposition \ref{proparithmeticsquare} and Corollary \ref{cor:betaE}, we have a diagram
\[
\begin{tikzcd}
\mathcal A_*\arrow[r,"\partial"]\arrow[d,"\varphi"]&H_{*-1}\Gamma(BPGL\otimes K(\rho))\arrow[d,"="]\\
{\mathbb F}_2[z,\chi_i]^{\;\widehat{}}_{z}[z^{-1}]\arrow[r]&H_{*-1}\Gamma(BPGL\otimes K(\rho))
\end{tikzcd}
\]
Since ${\mathbb F}_2[z,\chi_i]^{\;\widehat{}}_{z}\to {\mathbb F}_2[z,\chi_i]^{\;\widehat{}}_{z}[z^{-1}]$ is an injection, we find that
\aln{
\Sigma H_*\Gamma(BPGL\otimes K(\rho))&=\coker({\mathbb F}_2[z,\chi_i]^{\;\widehat{}}_{z}\to {\mathbb F}_2[z,\chi_i]^{\;\widehat{}}_{z}[z^{-1}])\\
&={\mathbb F}_2[z^{-1},\chi_i]\{z^{-1}\}
}
Together with our explicit description of $\varphi$, we conclude:
\begin{theorem}\label{thmhomologybpr}
$H_*\Gamma(BPGL)$ splits as a left $\mathcal A_*$-comodule
\[H_*\Gamma(BPGL)={\mathbb F}_2\{1\}\oplus\mathrm{coker}(\partial)\]
where $\partial$ may be described as the map
\aln{
\partial:\mathcal A_*&\to \Sigma^{-1}{\mathbb F}_2[z^{-1},\chi_i]\{z^{-1}\}\\
\xi_i&\mapsto \chi_iz^{2^i-1}+\zeta_{i-1}z^{-1}
}
which is to be understood as multiplicative on the generators $\xi_i$, while in the result, all nonnegative powers of $y$ are set to 0. The map $\partial$ is injective in positive degrees.
\end{theorem}
\begin{remark}
It is possible to describe the $\mathcal A_*$-comodule structure on $\coker(\partial)$ with explicit formulas. We omit these formulas, however, as we believe they are too complicated to be useful. By contrast, as we will see, the HSSS of $BPGL$ describes $H_*\Gamma(BPGL)$ as a subquotient of an explicit and straightforward $\mathcal A_*$-comodule algebra.
We remark that it is also possible to describe $H_*\Gamma(BPGL\langle m\rangle)$ as an $\mathcal{A}_*$-comodule somewhat explicitly in terms of the boundary map in the isotropy separation sequence, as in Theorem \ref{thmhomologybpr}. Due to the complicated nature of these formulas, however, we again prefer to use the HSSS to describe the global structure of homology.
\end{remark}
\subsection{The image of the edge homomorphism for $BPGL\langle m\rangle$}\label{subsecimageedgehom}
We will see in Section \ref{seccomputations} that in the HSSS for $BPGL\langle m\rangle$, the map $E_2\to E_2[\rho^{-1}]$ to the $\rho$-localized spectral sequence is an injection on $E_2$, and this allows us to determine the image of the edge homomorphism from that of the localized spectral sequence, using our results on the arithmetic square.
The edge homomorphism is natural, giving a commutative diagram
\[
\begin{tikzcd}
\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)\arrow[d]\arrow[r]&\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)[\rho^{-1}]\arrow[d]\\
\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\arrow[r]&\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\rho^{-1}]
\end{tikzcd}
\]
In the lefthand side of this square, we have an isomorphism in weight zero
\[\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2\cong H_*H{\mathbb Z}\cong \pi_{*,0}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\]
coming from the canonical map $i_*H{\mathbb Z}\to M{\mathbb Z}$. On the righthand side, using the equivalence
\[i_*H{\mathbb F}_2[y^{2^{m+1}}][\rho^{-1}]\simeq BPGL\langle m\rangle[\rho^{-1}]\]
of Lemma \ref{lemmarholocalbpgl}, the righthand map becomes the canonical inclusion
\[\mathcal A_*[y^{2^{m+1}},\rho^{\pm}]\hookrightarrow\mathcal A_*[y^{2},\rho^{\pm}]\]
Describing the bottom map in terms of these identifications therefore places us in the context of Theorem \ref{thmtatefrob}, which determines two identifications
\[H_*\Gamma(M{\mathbb F}_2[\rho^{-1}])\cong\mathcal A_*[y]\]
To be explicit, the canonical map $i_*H{\mathbb F}_2[\rho^{-1}]\to M{\mathbb F}_2[\rho^{-1}]$ gives an equivalence $H{\mathbb F}_2[y]\simeq \Gamma(M{\mathbb F}_2[\rho^{-1}])$, which determines an isomorphism of the above form denoted $\cong_{\mathrm{can}}$. On the other hand, the map $i_*H{\mathbb F}_2[\rho^{-1}]\simeq BPGL[\rho^{-1}]\to M{\mathbb F}_2[\rho^{-1}]$ determines an isomorphism of the same form, which we denote $\cong_{BP}$. Theorem \ref{thmtatefrob} and the proof of Proposition \ref{proprightmap} tell us how to translate between these identifications.
\begin{proposition}\label{prophomologytwist}
The composition
\[\mathcal A_*[y]\cong_{\mathrm{can}} H_*\Gamma(M{\mathbb F}_2[\rho^{-1}])\cong_{BP}\mathcal A_*[y]\]
sends
\aln{
y&\mapsto y\\
\zeta_i&\mapsto \zeta_i+\zeta_{i-1}y^{2^{i-1}}
}
\end{proposition}
\begin{proof}
Theorem \ref{thmtatefrob} and Lemma \ref{lemmacapproduct} describe the inverse of this composite as a sum of cap products. Using the congruence
\[\zeta_m\equiv \xi_1^{2^m-1}\mod (\xi_2,\xi_3,\ldots)\]
one has that the inverse of this composite sends
\[\zeta_m\mapsto \sum\limits_{i=0}^m\zeta_iy^{2^m-2^i}\]
which implies the claimed description.
\end{proof}
\begin{corollary}\label{coredgeA(m)}
The image of the edge homomorphism in the HSSS of $BPGL\langle m\rangle$ contains $\mathcal A_*\square_{\mathcal A(m)_*}{\mathbb F}_2\subset H_*H{\mathbb Z}$.
\end{corollary}
\begin{proof}
We claim that the image of each of the generators
\[\zeta_1^{2^{m+1}},\zeta_2^{2^m},\ldots,\zeta_{m+1}^2,\zeta_{m+2},\ldots,\]
of $\mathcal A_*\square_{\mathcal A(m)_*}{\mathbb F}_2$ along the map $E_2\to E_2[\rho^{-1}]$ is in the image of the edge homomorphism for the $\rho$-localized spectral sequence. Indeed, Proposition \ref{prophomologytwist} implies that the localization map sends
\aln{
\zeta_{i}^{2^{m+2-i}}\mapsto \zeta_i^{2^{m+2-i}}+\zeta_{i-1}^{2^{m+2-i}}y^{2^{m+1}}\hspace{0.5cm}&\mathrm{ for}\hspace{0.1cm}i\le m+2\\
\zeta_i\mapsto \zeta_i+\zeta_{i-1}y^{2^{i-1}}\hspace{0.5cm}&\mathrm{ for}\hspace{0.1cm}i>m+2
}
so that each of these generators lands in the subalgebra $\mathcal{A}_*[y^{2^{m+1}}]\subset E_2[\rho^{-1}]$.
We show in Corollary \ref{corE_2description} that $E_2\to E_2[\rho^{-1}]$ is an injection, and it therefore suffices to show that each of the classes
\aln{
\zeta_i^{2^{m+2-i}}+\zeta_{i-1}^{2^{m+2-i}}y^{2^{m+1}}\hspace{0.5cm}&\mathrm{ for}\hspace{0.1cm}i\le m+2\\
\zeta_i+\zeta_{i-1}y^{2^{i-1}}\hspace{0.5cm}&\mathrm{ for }\hspace{0.1cm}i>m+2
}
admits a lift along the map
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)[\rho^{-1}]\]
The Mayer--Vietoris sequence for the arithmetic square of $i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle$ implies that it suffices to show that the image of each of these classes along the map
\[\varphi_m:\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)[\rho^{-1}]\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\]
is in the image of the map
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)^{\;\widehat{}}_\rho[\tau^{-1}]\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\]
The map $\varphi_m$ is described by Proposition \ref{proprightmap} along with the fact that it sends $y^{2^{m+1}}$ to $(\eta_R(\tau)/\rho)^{2^{m+1}}$.
Embedding into the case $m=-1$, the map $\varphi$ sends these classes back along the inverse of the composition in Proposition \ref{prophomologytwist}, i.e. to the image of each of the generators above along the map
\[i_*:\mathcal{A}_*\to \pi_{*,*}^{{\mathbb R}}(M {\mathbb F}_2\otimes M{\mathbb F}_2)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\]
The image of the map
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL\langle m\rangle)^{\;\widehat{}}_\rho[\tau^{-1}]\to \pi_{*,*}^{{\mathbb R}}(M {\mathbb F}_2\otimes M{\mathbb F}_2)^{\;\widehat{}}_\rho[\tau^{-1},\rho^{-1}]\]
is a sub-$\mathcal{A}_*$-comodule, so it suffices to show that its image contains
\[i_*(\zeta_{m+2}),i_*(\zeta_{m+3}),\ldots\]
Conjugating the relations \cite[Theorem 2.12]{LSWX}, one has
\[i_*(\zeta_i)=\frac{1}{\tau}\big(c(\tau_{i-1})\eta_R(\tau)^{2^{i-1}}+\rho^{2^i}c(\tau_i)\big)\]
The result now follows from Proposition \ref{prop:motivichomologybprm}.
\end{proof}
Arguing similarly, we find the following classes in the image of the edge homomorphism in weight $-1$.
\begin{corollary}\label{corx_mpermanent}
For all $m\ge0$, the class $\rho\xi_m$ admits a lift along the map
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)[\rho^{-1}]\]
Therefore any lift of $\rho\xi_m\in E_2[\rho^{-1}]$ along $E_2\to E_2[\rho^{-1}]$ is in the image of the edge homomorphism.
\end{corollary}
\begin{proof}
By Proposition \ref{proprightmap}, we have
\[\varphi(\rho\xi_m)=\rho^{2^m}\oldwidehaterline{t}_m+\tau^{2^{m-1}}\oldwidehaterline{t}_{m-1}\]
\end{proof}
\section{The HSSS for $BPGL\langle m\rangle$}\label{secdifferentials}
\subsection{The slice \texorpdfstring{\(E_2\)}{E Two} page}\label{subsecE_2}
By Proposition \ref{propslicebp}, the HSSS of $BPGL\langle m\rangle$ has $E_2$ page
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z}[\oldwidehaterline{v}_{1},\ldots,\oldwidehaterline{v}_{m}])=\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\oldwidehaterline{v}_{1},\ldots,\oldwidehaterline{v}_{m}]\]
We therefore proceed to calculate the bigraded homotopy ring
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\]
This has some unexpected multiplicative features; we first determine the additive structure.
\begin{proposition}\label{propE_2splitting}
The map $M{\mathbb Z}\to i_*H\mathbb{F}_2\otimes M{\mathbb Z}$ determines an isomorphism of left $\mathcal{A}_*$-comodules
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\cong(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[\tau^2]\oplus\bigoplus\limits_{i\ge0,j>0}\mathcal A_*\{\tau^{2i}\rho^j\}\]
\end{proposition}
\begin{proof}
Voevodsky's computation \cite{voe} of $\pi_{*,*}^{{\mathbb R}}M{\mathbb Z}$ (see also \cite{GHIR}) implies that the $H{\mathbb Z}$-module
\[\Gamma\bigg(\bigoplus\limits_{b\in{\mathbb Z}}\Sigma^{0,b}M{\mathbb Z}\bigg)\]
splits as
\[\bigoplus\limits_{i\ge0}H{\mathbb Z}\{\tau^{2i}\}\oplus\bigoplus\limits_{i\ge0,j>0}H{\mathbb F}_2\{\tau^{2i}\rho^j\}\]
and the result follows upon taking homology.
\end{proof}
There are subtleties in computing the products in this ring, and these arise from the following class.
\begin{definition}\label{defx_1}
Let $x_1$ denote the class $[\rho\zeta_1]$ in bidegree $(0,-1)$. We use brackets to emphasize that $x_1$ is indecomposable in
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\]
In particular, there is no class $\zeta_1$, as $\zeta_1\notin\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2$, and $x_1$ is not divisible by $\rho$.
\end{definition}
Most of the ring structure in $\pi_{*,*}^{{\mathbb R}}(i_*H\mathbb{F}_2\otimes M{\mathbb Z})$ is determined by the ring maps
\[\pi_*(H{\mathbb F}_2\otimes H{\mathbb Z})\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\]
and
\[\pi_{*,*}^{{\mathbb R}}(M{\mathbb Z})\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\]
However, the class $x_1$ is not in the subalgebra generated by the images of these maps. To determine products involving $x_1$, we instead work $\rho$-locally.
\begin{proposition}\label{propE_2localized}
There is an isomorphism of bigraded rings
\[\mathcal A_*[y_2,\rho^{\pm}]\to \pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\rho^{-1}]\]
where $|y_2|=2$, and the localization map
\[\phi:\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\to \pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\rho^{-1}]\]
sends
\begin{align*}
\tau^2&\mapsto \rho^2y_2
&x_1&\mapsto \zeta_1\rho
&\zeta_1^2&\mapsto\zeta_1^2+y_2
&\zeta_i&\mapsto\zeta_i+\zeta_{i-1}y_2^{2^{i-2}}
\end{align*}
where $\zeta_1^2,\zeta_i$ are regarded as the elements in the
\[(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[\tau^2]\]
summand described in the previous proposition.
\end{proposition}
\begin{proof}
We define the map
\[\mathcal A_*[y_2,\rho^{\pm}]\to \pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\rho^{-1}]\]
via the map
\[\mathcal A_*[\rho^{\pm}]\cong\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)[\rho^{-1}]\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\rho^{-1}]\]
by sending $y_2$ to the image of $\tau^2/\rho^2$ along the right unit map
\[\pi_{*,*}^{{\mathbb R}}(M{\mathbb Z})[\rho^{-1}]\to \pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\rho^{-1}]\]
This is an isomorphism as $M{\mathbb Z}[\rho^{-1}]\simeq i_*H{\mathbb F}_2[y_2][\rho^{-1}]$.
For the claims about the map $\phi$, note that Proposition \ref{propE_2splitting} implies that
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\]
is $\rho$-torsion free, so $\phi$ is an injection. The map
\[\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2\cong\pi_{*,0}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\to \pi_{*,0}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\rho^{-1}]\cong \mathcal A_*[y_2]\]
is described by Proposition \ref{prophomologytwist} since the left and right presentations come from the canonical and $BPGL$ $H{\mathbb F}_2$-algebra structures, respectively. Now $x_1\mapsto \rho\xi_1$ since $\rho\xi_1$ is the only nonzero class in this bidegree.
\end{proof}
\begin{corollary}\label{corx_1rel}
In the ring $\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})$, we have the relation
\[x_1^2=\zeta_1^2\rho^2+\tau^2\]
\end{corollary}
\begin{proof}
The map $\phi$ of the previous proposition is an injective ring map, and one has
\[\phi(\zeta_1^2\rho^2+\tau^2)=\zeta_1^2\rho^2+y_2\rho^2+y_2\rho^2=\phi(x_1^2)\qedhere\]
\end{proof}
\begin{corollary}\label{corE_2description}
The map
\[(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[\rho,x_1]\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})\]
is an isomorphism of left $\mathcal A_*$-comodule algebras, where $\rho$ is primitive, and
\[\psi(x_1)=1\otimes x_1+\xi_1\otimes \rho\]
\end{corollary}
\begin{proof}
The source and target of the map are bigraded ${\mathbb F}_2$-vector spaces of the same (finite) graded dimension. It therefore suffices to show the map is surjective, and this follows from Corollary \ref{corx_1rel}, which implies that $\tau^2$ is in the image.
\end{proof}
\subsection{Differentials on subalgebras of the Steenrod algebra}\label{subsecdifferentials}
Corollary \ref{corE_2description} gives a description of our $E_2$-page \[E_2^{*,*,*}(BPGL\langle m\rangle;i_*H\mathbb F_2)\cong(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[\rho,x_1,\oldwidehaterline{v}_{1},\ldots,\oldwidehaterline{v}_{m}]\]
The classes $\rho,x_1,\oldwidehaterline{v}_{1},\ldots,\oldwidehaterline{v}_{m}$ are all permanent cycles, and the subalgebra
\[\mathcal A_*\square_{\mathcal A(m)_*}{\mathbb F}_2\subset \mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2\]
consists of permanent cycles by Corollary \ref{coredgeA(m)}. In this section, we describe a family of differentials in this spectral sequence on $\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2$ that interpolate between $\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2$ and $\mathcal A_*\square_{\mathcal A(m)_*}{\mathbb F}_2$.
For all $i$, $\mathcal A_*\square_{\mathcal A(i-1)_*}{\mathbb F}_2$ is a free $\mathcal A_*\square_{\mathcal A(i)_*}{\mathbb F}_2$-module given by
\[\mathcal A_*\square_{\mathcal A(i-1)_*}{\mathbb F}_2\cong (\mathcal A_*\square_{\mathcal A(i)_*}{\mathbb F}_2)\otimes E(\zeta_1^{2^{i}},\zeta_2^{2^{i-1}},\ldots,\zeta_{i+1})\]
We show that, for $i\le m$ each of these generators $\zeta_{j}^{2^{i+1-j}}$ supports a $d_{2^{i+1}-1}$ (Theorem \ref{differentialsthm}). Each of the squares of these generators is then a cycle and we are left with $\mathcal A_*\square_{\mathcal A(i)_*}{\mathbb F}_2$ on $E_{2^{i+1}}$ along with various Massey products (see Definition \ref{defx_m}) due to the fact that the values of $d_{2^{i+1}-1}$ on these generators are not algebraically independent. We deduce from these differentials that the image of the edge homomorphism is \textit{precisely} $\mathcal A_*\square_{\mathcal A(m)_*}{\mathbb F}_2$ (Theorem \ref{edgethm}).
To determine these differentials, we use an inductive argument on $m$ and Corollary \ref{coredgeA(m)} to deduce that the differential $d_{2^{m+1}-1}$ must take classes in $\mathcal A_*\square_{\mathcal A(m-1)_*}{\mathbb F}_2$ to a $\rho$-torsion free part of $E_{2^{m+1}-1}$. The differential is then determined by the structure of the $\rho$-localized spectral sequence, which we determine completely (Proposition \ref{proplocalizedSS}).
In this section, we use the following notation for our HSSS's.
\aln{
E_r^{*,*,*}\langle m\rangle&:=E_r^{*,*,*}(BPGL\langle m\rangle;i_*H{\mathbb F}_2)\\
E_r^{*,*,*}\langle m\rangle[\rho^{-1}]&:=E_r^{*,*,*}(BPGL\langle m\rangle;i_*H{\mathbb F}_2[\rho^{-1}])
}
\subsubsection{The localized spectral sequence}\label{subsubseclocalized}
We begin by determining the structure of the localized spectral sequence
\[E_r[\rho^{-1}]=E_r^{*,*,*}(BPGL;i_*H{\mathbb F}_2[\rho^{-1}])\]
The spectral sequence $E_r\langle m\rangle[\rho^{-1}]$ for $m$ finite
can be read off from this by setting $f_i=0$ for $i>m$.
\begin{proposition}\label{proplocalizedSS}
In the spectral sequence $E_r[\rho^{-1}]$, we have
\begin{enumerate}
\item $E_2[\rho^{-1}]\cong\mathcal A_*[y_2,f_1,f_2,\ldots][\rho^{\pm}]$,
where
\begin{itemize}
\item $f_m=\rho^{2^m-1}\oldwidehaterline{v}_{m}$ so that $|f_m|=(2^m-1,0,2^m-1)$
\item $\mathcal A_*$ is the image of the edge homomorphism
\[\mathcal A_*\cong\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)[\rho^{-1}]\to\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\rho^{-1}]=\mathcal A_*[y_2]\]
and $y_2$ is the image of the class $\tau^2/\rho^2$ along the map
\[\pi_{*,*}^{{\mathbb R}}(M{\mathbb Z})[\rho^{-1}]\to \pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes M{\mathbb Z})[\rho^{-1}]\]
\item If $q\in\mathcal A_i$, the tridegree of $q$ is $(i,0,-i)$, and $|y_2|=(2,0,-2)$\\
\end{itemize}
\item The differential $d_k[\rho^{-1}]=0$ unless $k=2^{m+1}-1$ for $m\ge 1$, and
\[E_{2^{m+1}-1}[\rho^{-1}]=\mathcal A_*[y_2^{2^{m-1}},f_m,f_{m+1},\ldots][\rho^{\pm}]\]
\item The differential $d_{2^{m+1}-1}[\rho^{-1}]$ is determined by
\[d_{2^{m+1}-1}[\rho^{-1}](y_2^{2^{m-1}})=f_m\]
\end{enumerate}
\end{proposition}
\begin{proof}
The description of $E_2[\rho^{-1}]$ follows from Proposition \ref{propE_2localized}. The image of the edge homomorphism consists of permanent cycles, so the only algebra generator that is not a permanent cycle is $y_2$, and since the spectral sequence must converge to
\[\pi_{*,*}^{{\mathbb R}}(i_*H{\mathbb F}_2\otimes BPGL)[\rho^{-1}]\cong\mathcal A_*[\rho^{\pm}]\]
each $f_i$ must be hit by a differential. For degree reasons, the claimed pattern of differentials is the only possibility.
\end{proof}
Proposition \ref{propE_2localized} also gives us a description of the map $E_2\to E_2[\rho^{-1}]$, and there are important permanent cycles in $E_2^{*,*,*}(BPGL;i_*H{\mathbb F}_2)$ that lift the classes $\rho\xi_m$ along this map.
\begin{definition}\label{defx_m}
Define $x_0=\rho$, $x_1$ as in Definition \ref{defx_1}, and inductively
\[x_m=\sum\limits_{i=0}^{m-1}x_i\zeta_{m-i}^{2^i}\]
for $m>1$. The tridegree of $x_m$ is $(2^m-2,-1,2-2^m)$ in $E_r^{*,*,*}(BPGL;i_*H{\mathbb F}_2)$.
\end{definition}
\begin{proposition}\label{propmaptolocalized}
The map $E_2\to E_2[\rho^{-1}]$ sends
\begin{align*}
\zeta_1^2&\mapsto \zeta_1^2+y_2
&\zeta_i &\mapsto \zeta_i+\zeta_{i-1}y_2^{2^{i-2}}
&x_m&\mapsto \rho\xi_m
\end{align*}
\end{proposition}
\begin{proof}
The first two follow immediately from Proposition \ref{propE_2localized}, and the claim
\[x_m\mapsto \rho\xi_m\]
follows by induction on $m$.
\end{proof}
The fact that $E_2\to E_2[\rho^{-1}]$ is an injection also gives us the coactions on these classes.
\begin{corollary}\label{corcoactionx_m}
On $E_2$, the classes $x_m$ satisfy the following coaction formula
\aln{
\psi(x_0)&=1\otimes x_0\\
\psi(x_m)&=\sum\limits_{i=0}^m\xi_i^{2^{m-i}}\otimes x_{m-i}
}
\end{corollary}
\subsubsection{Differentials on \texorpdfstring{\((\cc{A}(m)/\!/\cc{A}(m-1))^\ast\)}{A of m mod A of m-1}}
The cofiber sequences
\[\Sigma^{2(2^m-1),2^m-1}BPGL\langle m\rangle\xrightarrow{\cdot\oldwidehaterline{v}_{m}} BPGL\langle m\rangle\to BPGL\langle m-1\rangle\]
allow us to set up an inductive argument to determine a family of differentials in $E_r^{*,*,*}\langle m\rangle$. We first state a standard lemma on morphisms of spectral sequences.
\begin{lemma}\label{filteredlemma}
Suppose $E_r^{s,t}(1)\to E_r^{s,t}(2)$ is a morphism of spectral sequences with the property that the map is an isomorphism for $t<N$ when $r=2$. Then
\begin{enumerate}
\item The map $E_r^{s,t}(1)\to E_r^{s,t}(2)$ is an isomorphism whenever $t+r<N+2$.
\item The map $E_r^{s,t}(1)\to E_r^{s,t}(2)$ is an injection whenever $t<N$.
\end{enumerate}
\end{lemma}
In our case, we have the following.
\begin{proposition}\label{prop:EquivOfSlices}
For \(t< 2^{m+1}-2\), the map
\[
P^tBPGL\langle m\rangle\to P^tBPGL\langle m-1\rangle
\]
is an equivalence.
\end{proposition}
\begin{proof}
We have a cofiber sequence of \(BPGL\)-modules
\[
\Sigma^{2(2^m-1),2^m-1}BPGL\langle m\rangle\xrightarrow{\bar{v}_{m}}BPGL\langle m\rangle\to BPGL\langle m-1\rangle
\]
The source of \(\bar{v}_{m}\)-multiplication is slice \(2(2^{m}-1)\)-connective, and the result follows.
\end{proof}
We begin by showing that each of the classes $\zeta_j^{2^{i+1-j}}$ survives to $E_{2^{i+1}-1}$.
\begin{proposition}\label{dlower}
For $m\ge 1$, in $E_r^{*,*,*}\langle m\rangle$,
\[d_r(\zeta_j^{2^{i+1-j}})=0\]
for all $1\le i\le m$, $1\le j\le i+1$, and $r<2^{i+1}-1$.
\end{proposition}
\begin{proof}
We proceed by induction on $m$. When $m=1$, this follows from the fact that $d_2=0$; in fact $d_{2k}=0$ for all $k$ and $m$ as the odd slices of $BPGL\langle m\rangle$ vanish. By Proposition \ref{prop:EquivOfSlices} and Lemma \ref{filteredlemma}, the map of spectral sequences
\[E_r^{s,0,t}\langle m\rangle\to E_r^{s,0,t}\langle m-1\rangle\]
is an injection whenever $t<2^{m+1}-2$. The classes $\zeta_j^{2^{i+1-j}}$ have $t=0$, and thus the $i<m$ case of the proposition follows by the inductive hypothesis. The $i=m$ case then follows in the same way from the fact that the classes $\zeta_j^{2^{m+1-j}}$ are permanent cycles in $E_r^{*,*,*}\langle m-1\rangle$ by Corollary \ref{coredgeA(m)}.
\end{proof}
We know, therefore, that the first possible nonzero differential on $\zeta_j^{2^{i+1-j}}$ is a $d_{2^{i+1}-1}$. We show that this differential is nonzero and can be read off from the corresponding differential in the localized spectral sequence $E_r^{*,*,*}\langle m\rangle[\rho^{-1}]$. For this, we need to know that the differential lands in an $\rho$-torsion free part of the spectral sequence, for which we need the following lemma.
\begin{lemma}\label{subspacelemma}
In $E_k^{*,*,*}\langle m\rangle$, for $k<2^{m+1}-1$, there are no differentials of the form
\[d_k(x)=\oldwidehat_m p(\rho,x_1)q(\zeta_i)+r(\oldwidehaterline{v}_{i},\rho,x_1)s(\zeta_i)\]
where
\aln{
q(\zeta_i),s(\zeta_i)&\in\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2\\
p(\rho,x_1)&\in{\mathbb F}_2[\rho,x_1]\\
r(\oldwidehaterline{v}_{i},\rho,x_1)&\in{\mathbb F}_2[\rho,x_1,\oldwidehaterline{v}_{1},\ldots,\oldwidehaterline{v}_{{m-1}}]
}
such that $pq$ is nonzero.
\end{lemma}
\begin{proof}
Suppose to the contrary that, for some $k<2^{m+1}-1$, there is some nonzero differential $d_k(x)=y$ for $y$ of the above form, and let $k$ be minimal with respect to this property. It follows that $y$ is $\rho$-torsion free; since $y$ is $\rho$-torsion free on $E_2$, if we had $\rho^Ny=0\in E_k$, we must have a nonzero differential $d_{k'}(x')=\rho^Ny$ for some $k'<k$, contradicting minimality of $k$.
Since $y\in E_k$ is $\rho$-torsion free, its image in the localized spectral sequence $E_k^{*,*,*}\langle m\rangle[\rho^{-1}]$ is nonzero. By naturality, this determines a nonzero differential $d_k$ in the localized spectral sequence. This contradicts the description of the differentials given in Proposition \ref{proplocalizedSS}.
\end{proof}
\begin{remark}
In particular, letting $rs=0$, it follows that the subspace
\[B_m:=\oldwidehaterline{v}_{m}\cdot\mathrm{Sym}^{2^m-1}({\mathbb F}_2\{\rho,x_1\})\cdot (\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\]
of $E_2$ receives no differentials $d_r$ for $r<2^{m+1}-1$. In particular, the subspace of $B_m$ of $d_1,\ldots,d_r$ cycles for $r<2^{m+1}-1$ is a $\rho$-torsion free subspace of $E_{2^{m+1}-1}$, since $B_m$ is $\rho$-torsion free on $E_2$.
\end{remark}
\begin{proposition}\label{v_npure}
In $E_{2^{m+1}-1}\langle m\rangle$, the composition
\[\mathcal A_*\square_{\mathcal A(m-1)_*}{\mathbb F}_2\hookrightarrow E_{2^{m+1}-1}\xrightarrow{d_{2^{m+1}-1}}E_{2^{m+1}-1}\]
lands in the subspace of
\[\oldwidehaterline{v}_{m}\cdot\mathrm{Sym}^{2^m-1}({\mathbb F}_2\{\rho,x_1\})\cdot (\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\]
that survives to $E_{2^{m+1}-1}$.
\end{proposition}
\begin{proof}
The class $z:=d_{2^{m+1}-1}(\zeta_i^{2^{m+1-i}})$ may be written as a sum of monomials of the form
\[y:=\rho^ax_1^bm(\oldwidehaterline{v}_{i})q(\zeta_i)\]
written as an element in a subquotient of $E_2$, where $m(\oldwidehaterline{v}_{i})$ is a monomial in ${\mathbb F}_2[\oldwidehaterline{v}_{1},\ldots,\oldwidehaterline{v}_{m}]$ and $q(\zeta_i)$ is a monomial in $\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2$. Note that the class $\zeta_i^{2^{m+1-i}}$ has $t=0$, hence $y$ has $t=2^{m+1}-2$. It follows that $\oldwidehaterline{v}_{m}\cdot\oldwidehaterline{v}_{j}$ does not divide $m(\oldwidehaterline{v}_{i})$ for any $j\le m$.
We therefore have that $z$ can be written as
\[\oldwidehaterline{v}_{m}p(\rho,x_1)q(\zeta_i)+r(\rho,x_1,\oldwidehaterline{v}_{i})s(\zeta_i)\]
as in the lemma. The class $\oldwidehaterline{v}_{m}p(\rho,x_1)q(\zeta_i)$ lies in
\[\oldwidehaterline{v}_{m}\cdot\mathrm{Sym}^{2^m-1}({\mathbb F}_2\{\rho,x_1\})\cdot (\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\]
since $z$ is in weight zero. Note that $pq$ is nonzero by naturality and the fact that
\[d_{2^{m+1}-1}[\rho^{-1}](y_2^{2^{m-1}})=f_m\]
by Proposition \ref{proplocalizedSS}. We claim that $r(\oldwidehaterline{v}_{i},\rho,x_1)s(\zeta_i)=0\in E_{2^{m+1}-1}$, completing the proof.
If $r(\oldwidehaterline{v}_{i},\rho,x_1)s(\zeta_i)\neq 0\in E_{2^{m+1}-1}$, since
\[\zeta_i^{2^{m+1-i}}\in E_r^{*,*,*}\langle m-1\rangle\]
is a permanent cycle, we must have that $r(\oldwidehaterline{v}_{i},\rho,x_1)s(\zeta_i)=0$ on $E_{2^{m+1}-1}^{*,*,*}\langle m-1\rangle$. Hence there must be a differential in the latter spectral sequence of the form
\[d_k(x)=r(\oldwidehaterline{v}_{i},\rho,x_1)s(\zeta_i)\]
for $k<2^{m+1}-1$. The class $x$ lies in
\[E_k^{2^{m+1-i}-1-k,0,2^{m+1}-1-k}\langle m-1\rangle\]
and by Lemma \ref{filteredlemma} (1), the map
\[E_k^{2^{m+1-i}-1-k,0,2^{m+1}-1-k}\langle m\rangle\to E_k^{2^{m+1-i}-1-k,0,2^{m+1}-1-k}\langle m-1\rangle\]
is an isomorphism. Therefore, the differential $d_k(x)$ lifts to a differential in $E_k^{*,*,*}\langle m\rangle$ of the form
\[d_k(\tilde{x})=\oldwidehaterline{v}_{m}p'(\rho,x_1)q'(\zeta_i)+r'(\rho,x_1,\oldwidehaterline{v}_{i})s'(\zeta_i)\]
But we must have $p'q'=0$ by Lemma \ref{subspacelemma}, and $E_k^{*,*,*}\langle m\rangle\to E_k^{*,*,*}\langle m-1\rangle$ sends
\[r'(\rho,x_1,\oldwidehaterline{v}_{i})s'(\zeta_i)\mapsto r(\rho,x_1,\oldwidehaterline{v}_{i})s(\zeta_i)\]
An argument similar to that of Lemma \ref{subspacelemma} shows that $r's'=rs\in E_k\langle m\rangle$.
\end{proof}
\begin{theorem}\label{differentialsthm}
In $E_{2^{i+1}-1}^{*,*,*}(BPGL\langle m\rangle;i_*H{\mathbb F}_2)$, we have the differentials
\[d_{2^{i+1}-1}(\zeta_j^{2^{i+1-j}})=\oldwidehaterline{v}_{i}\rho^{2^i-1}\bigg(p_{j-1}\bigg(\frac{x_1}{\rho},\ldots,\frac{x_{j-1}}{\rho}\bigg)\bigg)^{2^{i+1-j}}\]
for all $1\le i\le m$ and $1\le j\le i+1$, where $p_j$ is the polynomial
\[\zeta_{j}=p_j(\xi_1,\ldots,\xi_j)\]
given by the inversion formulas in the Hopf algebra $\mathcal A_*$.
\end{theorem}
\begin{proof}
We know from Lemma \ref{subspacelemma} and Proposition \ref{v_npure} that these differentials land in an $\rho$-torsion free part of the spectral sequence, so we use naturality and Proposition \ref{proplocalizedSS}. In particular, the map $E_2\to E_2[\rho^{-1}]$ sends
\[\zeta_j^{2^{i+1-j}}\mapsto \zeta_j^{2^{i+1-j}}+\zeta_{j-1}^{2^{i+1-j}}y_2^{2^{i-1}}\]
and
\[d_{2^{i+1}-1}[\rho^{-1}](\zeta_j^{2^{i+1-j}}+\zeta_{j-1}^{2^{i+1-j}}y_2^{2^{i-1}})=\zeta_{j-1}^{2^{i+1-j}}f_i\]
\end{proof}
\begin{remark}\label{capremark}
There is another way to view these differentials. The composition
\[\mathcal A_*\square_{\mathcal A(i-1)_*}{\mathbb F}_2\hookrightarrow E_{2^{i+1}-1}\xrightarrow{d_{2^{i+1}-1}}\Sigma E_{2^{i+1}-1}\to \Sigma E_{2^{i+1}-1}[\rho^{-1}]\]
lands in $\Sigma\mathcal A_*\cdot f_i\cong \mathcal A_{*-2^i}$, and the last map in the composition is an injection on
\[d_{2^{i+1}-1}(\mathcal A_*\square_{\mathcal A(i-1)_*}{\mathbb F}_2)\]
by Lemma \ref{subspacelemma} and Proposition \ref{v_npure}. The formulas given in the theorem tell us this map coincides with the $\mathcal A_*$-comodule map
\[\mathcal A_*\square_{\mathcal A(i-1)_*}{\mathbb F}_2\xrightarrow{-\cap Sq^{2^i}}\mathcal A_{*-2^i}\]
Indeed, we have
\aln{
\Delta(\zeta_j^{2^{i+1-j}})&=\sum\limits_{l+k=j}\zeta_l^{2^{i+1-j}}\otimes\zeta_k^{2^{i+1-j+l}}\\
&\equiv \sum\limits_{l+k=j}\zeta_l^{2^{i+1-j}}\otimes(\xi_1^{2^k-1})^{2^{i+1-j+l}}\in\mathcal A_*\otimes\mathcal A_*/(\xi_2,\xi_3,\ldots)\\
&\equiv \sum\limits_{l+k=i}\zeta_l^{2^{i+1-j}}\otimes\xi_1^{2^{i+1}-2^{i+1-j+l}}\in\mathcal A_*\otimes\mathcal A_*/(\xi_2,\xi_3,\ldots)
}
where we have used the congruence
\[\zeta_k\equiv\xi_1^{2^k-1}\mod (\xi_2,\xi_3,\ldots)\]
The righthand tensor factor is $\xi_1^{2^i}$ if and only if $l=j-1$, so pairing on the right with $Sq^{2^i}$ gives $\zeta_{j-1}^{2^{i+1-j}}$.
\end{remark}
\begin{theorem}\label{edgethm}
The image of the edge homomorphism in weight zero in
\[E_\infty^{*,*,*}(BPGL\langle m\rangle;i_*H{\mathbb F}_2)\]
is precisely $\mathcal A_*\square_{\mathcal A(m)_*}{\mathbb F}_2$.
\end{theorem}
\begin{proof}
We proceed by induction, where the case $m=0$ is clear. By induction, Proposition \ref{dlower}, and Lemma \ref{filteredlemma} (1), we see that a class $x\in \mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2$ is a $d_2,\ldots,d_{2^{m+1}-2}$-cycle if and only if $x\in \mathcal A_*\square_{\mathcal A(m-1)_*}{\mathbb F}_2$. As in Remark \ref{capremark}, the restriction of $d_{2^{m+1}-1}$ to $\mathcal A_*\square_{\mathcal A(m-1)_*}{\mathbb F}_2$ may be identified with the map
\[\mathcal A_*\square_{\mathcal A(m-1)_*}{\mathbb F}_2\xrightarrow{-\cap Sq^{2^m}}\mathcal A_{*-2^m}\]
The kernel of this map is precisely $\mathcal A_*\square_{\mathcal A(m)_*}{\mathbb F}_2$ because $\mathcal A_*\square_{\mathcal A(m)_*}{\mathbb F}_2$ is dual to the quotient $\mathcal A/\!/\mathcal A(m)$, and $\mathcal{A}(m)$ is the subalgebra of the Steenrod algebra generated by the classes
\[\{Sq^{2^i}\text{ : } 0\le i\le m\}\qedhere\]
\end{proof}
\section{The homology of $\Gamma(BPGL\langle m\rangle)$ for $m\le 3$}\label{seccomputations}
In Section \ref{secdifferentials}, we determined the $E_2$ page of the HSSS for $BPGL\langle m\rangle$, the image of the edge homomorphism, and a family of differentials. Modulo comodule algebra extension problems, this reduces the computation of $H_*\Gamma(BPGL\langle m\rangle)$ to two problems:
\begin{enumerate}
\item Show that the differentials of Theorem \ref{differentialsthm} are \textit{all} the differentials in the HSSS for $BPGL\langle m\rangle$. In particular, show that this spectral sequence collapses on $E_{2^{m+1}}$.
\item Compute homology with respect to these differentials.
\end{enumerate}
We know of no way to solve problem (1) except by way of our knowledge of the image of the edge homomorphism from Section \ref{subsecimageedgehom} and sparseness arguments afforded by solving problem (2) for small values of $m$. Problem (2) is purely algebraic; however, the computation quickly becomes very complicated as $m$ grows, and it requires the use of algebraic Bockstein spectral sequences.
Nevertheless we solve both problems for $m\le 3$, completely determining the $E_\infty$ page of the HSSS as a comodule algebra. Moreover, we solve all comodule algebra extension problems for $m\le 2$. We can solve the extension problems for $m=3$ case by case in an ad hoc manner, but the number of such extensions is very large, so we do not attempt to include such a computation here.
When $m=1$, $\mathrm{kgl}$ is a form of $BPGL\langle 1\rangle$ and $\Gamma(\mathrm{kgl})\simeq \mathrm{ko}$, so this gives a quick computation of $H_*\mathrm{ko}$ that does not require knowledge of $\pi_*\mathrm{ko}$ or the Wood cofiber sequence. When $m=2$, we have an equivalence
\[\Gamma(BPGL\langle 2\rangle)\simeq BP_{\mathbb R}\langle 2\rangle^{C_2}\]
by Proposition \ref{propfixedpointsglobalsec}, and it is a theorem of Hill and Meier that $\mathrm{tmf}_1(3)$ is a form of $BP_{\mathbb R}\langle 2\rangle$ \cite{hillmeier}. Since $\mathrm{tmf}_1(3)^{C_2}\simeq \mathrm{tmf}_0(3)$, we give a complete computation of the comodule algebra
\[H_*\mathrm{tmf}_0(3)\]
which is new (Theorem \ref{thmt03}). We use this to deduce a Wood-type splitting of $\mathrm{tmf}$-modules
\[\mathrm{tmf}\wedge X\simeq \mathrm{tmf}_0(3)\]
where $X$ is a certain 10-cell complex that was predicted by Davis and Mahowald \cite{davismahowald}.
When $m=3$, it is not known if $\Gamma(BPGL\langle 3\rangle)$ even admits a ring structure. However, we may use the ring structure of $BPGL$ and the fact that the HSSS of $BPGL\langle 3\rangle$ is a module over that of $BPGL$.
\subsection{The homology of $\Gamma(BPGL\langle 1\rangle)$}\label{subsecn=1computation}
Combining Corollary \ref{corE_2description}, Theorem \ref{differentialsthm}, and Corollary \ref{coredgeA(m)}, we have the following.
\begin{theorem}
There is a spectral sequence of $\mathcal A_*$-comodule algebras with $E_2$-term given by
\[E_2^{*,*,*}={\mathbb F}_2[\zeta_1^2,\zeta_2,\ldots][\rho,x_1,\oldwidehat_1]\]
that converges to $H_*\Gamma(BPGL\langle 1\rangle)$. We have $d_3$-differentials
\begin{align*}
d_3(\zeta_1^2)&=\rho\oldwidehat_1
&d_3(\zeta_2)&=x_1\oldwidehat_1
\end{align*}
and the subalgebra generated by
\[\rho,x_1,x_2,\oldwidehat_1,\zeta_1^4,\zeta_2^2,\zeta_3,\ldots\]
consists of permanent cycles.
\end{theorem}
We display the $E_3$ page of this spectral sequence in Figure \ref{E3kr}. The black dots represent classes in the dual Steenrod algebra, and brown dots represent classes divisible by $\oldwidehat_1$. Brown structure lines represent multiplication by $\rho\oldwidehat_1$, which detects $\eta\in\pi_1S^0$ (see \cite{LSWX}), and we have drawn the vanishing lines of Proposition \ref{propvanishinglines} in green. It is straightforward to compute homology with respect to these differentials.
\begin{sseqdata}[ name = E3kr, Adams grading, classes = {fill, show name=below},
grid = go, xrange ={0}{7},yrange={-7}{7},xscale=1,yscale=1,x tick step =2, y tick step =2,run off differentials = {->},struct lines = red!25!brown ]
\class[name = 1](0,0)
\class[red!25!brown,name = \rho\oldwidehat_1](1,1)
\DoUntilOutOfBoundsThenNMore{2}{
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\text{ : }ructline(0,0)(1,1)
\class[red!25!brown,name = x_1\oldwidehat_1](2,0)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[name = \zeta_1^2](2,-2)
\DoUntilOutOfBoundsThenNMore{3}{
\d3
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[red!25!brown](4,0)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[name = \zeta_2](3,-3)
\DoUntilOutOfBoundsThenNMore{3}{
\d3
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[red!25!brown](4,-2)
\DoUntilOutOfBoundsThenNMore{3}{
\d3
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[name=\zeta_1^4](4,-4)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[red!25!brown](5,-3)
\DoUntilOutOfBoundsThenNMore{3}{
\d3(\lastx,\lasty,2,2)
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[red!25!brown](6,-2)
\DoUntilOutOfBoundsThenNMore{3}{
\d3(\lastx,\lasty,3,2)
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[red!25!brown](6,0)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class(5,-5)
\DoUntilOutOfBoundsThenNMore{3}{
\d3
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[red!25!brown](6,-4)
\DoUntilOutOfBoundsThenNMore{3}{\d3
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[red!25!brown](7,-3)
\DoUntilOutOfBoundsThenNMore{3}{
\d3(\lastx,\lasty,3,3)
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class(6,-6)
\DoUntilOutOfBoundsThenNMore{3}{
\d3
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[name=\zeta_2^2](6,-6)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class[red!25!brown](7,-5)
\DoUntilOutOfBoundsThenNMore{3}{\d3(\lastx,\lasty,3,2)
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class(7,-7)
\DoUntilOutOfBoundsThenNMore{3}{
\d3
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class(8,-8)
\DoUntilOutOfBoundsThenNMore{3}{
\d3(\lastx,\lasty,1,2)
\class[red!25!brown](\lastx+1,\lasty+1)
\text{ : }ructline
}
\class(48,48)
\class(60,-60)
\text{ : }ructline[green!38!black](0,0)(60,-60)
\end{sseqdata}
\begin{figure}
\caption{The $E_3$ page of the HSSS for $BPGL\langle 1\rangle$.}
\label{E3kr}
\end{figure}
\begin{corollary}
In the HSSS of $BPGL\langle 1\rangle$, we have
\[E_\infty=E_4=\frac{{\mathbb F}_2[\zeta_1^4,\zeta_2^2,\zeta_3,\ldots][\rho,x_1,x_2,\oldwidehat_1]}{(\rho\oldwidehat_1,x_1\oldwidehat_1,x_2\oldwidehat_1,x_2^2+\rho^2\zeta_2^2+x_1^2\zeta_1^4)}\]
\end{corollary}
In weight zero, the relations imply that there is no contribution on $E_\infty$ from the generators in nonzero weights, and we recover the classical computation of the homology of $ko$ (displayed in Figure \ref{einftykr}). Each nonzero stem has exactly one nonzero filtration, so there are no nontrivial comodule algebra extensions.
\begin{sseqdata}[ name = Einftykr, Adams grading, classes = {fill, show name=below},
grid = go, xrange ={0}{9},yrange={-9}{1},xscale=0.5,yscale=0.5,x tick step =2, y tick step =2]
\class[name = 1](0,0)
\class[name=\zeta_1^4](4,-4)
\class[name=\zeta_2^2](6,-6)
\class[name=\zeta_3](7,-7)
\class[name=\zeta_1^8](8,-8)
\class(48,48)
\class(60,-60)
\text{ : }ructline[green!50!black](0,0)(48,48)
\text{ : }ructline[green!50!black](0,0)(60,-60)
\end{sseqdata}
\begin{figure}
\caption{The $E_\infty$ page of the HSSS for $BPGL\langle 1\rangle$.}
\label{einftykr}
\end{figure}
\begin{corollary}
There is an isomorphism of $\mathcal{A}_*$-comodule algebras
\[H_*(\mathrm{ko})\cong\mathcal{A}_*\square_{\mathcal{A}(1)_*}{\mathbb F}_2={\mathbb F}_2[\zeta_1^4,\zeta_2^2,\zeta_3,\ldots]\]
\end{corollary}
\subsection{The homology of $\Gamma(BPGL\langle 2\rangle)$}\label{subsecn=2computation}
We move to height 2, and we display the $E_7$ page of the HSSS in Figure \ref{e7bp2}. As before, black dots represent classes in the dual Steenrod algebra, and red dots represent classes divisible by $\oldwidehat_1$. Orange structure lines represent multiplication by $\rho^3\oldwidehat_2$, which detects $\nu\in\pi_3S^0$, and we have drawn the vanishing lines of Proposition \ref{propvanishinglines} in green.
\begin{theorem}
There is a spectral sequence of \(\cc{A}_{\ast}\)-comodule algebras with
\(E_{2}\)-term given by
\[E_2^{*,*,*}={\mathbb F}_2[\zeta_1^2,\zeta_2,\ldots][\rho,x_1,\oldwidehat_1,\oldwidehat_2]\]
that converges to $H_*\Gamma(BPGL\langle 2\rangle)$. We have differentials
\begin{align*}
d_3(\zeta_1^2)&=\rho\oldwidehat_1
&d_3(\zeta_2)&=x_1\oldwidehat_1
&d_7(\zeta_1^4)&=\rho^3\oldwidehat_2\\
d_7(\zeta_2^2)&=\rho x_1^2\oldwidehat_2
&d_7(\zeta_3)&=(x_1^3+\rho^2x_2)\oldwidehat_2
\end{align*}
The subalgebra of $E_2$ generated by the classes
\[\rho,x_1,x_2,x_3,\oldwidehat_1,\oldwidehat_2,\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots\]
consists of permanent cycles.
\end{theorem}
The formulas for $d_3$ imply that, as a dga, one has an isomorphism
\[E_3^{*,*,*}(BPGL\langle 2\rangle;i_*H{\mathbb F}_2)\cong E_3^{*,*,*}(BPGL\langle 1\rangle;i_*H{\mathbb F}_2)[\oldwidehat_2]\]
and we deduce the following.
\begin{corollary}
The \(E_{4}=E_7\) page is given by
\[
\frac{{\mathbb F}_2[\zeta_1^4,\zeta_2^2,\zeta_3,\ldots]
[\rho,x_1,x_2,\oldwidehat_{1},\oldwidehat_{2}]}{(\rho\oldwidehat_1,x_1\oldwidehat_1,x_2\oldwidehat_1,x_2^2+\rho^2\zeta_2^2+x_1^2\zeta_1^4)}\]
\end{corollary}
Computing homology with respect to $d_7$ is much trickier as - unlike $E_3$ - $E_7$ is not $\rho$-torsion free, and hence there are a number of Massey products one must take into account. For example, we have the $d_7$-cycle
\[\oldwidehat_1\zeta_1^4\in\langle \oldwidehat_1,\rho^3,\oldwidehat_2\rangle\]
We are interested primarily in the computation in weight zero, however, and none of these classes can contribute to stems of weight zero. Indeed, we observe the following.
\begin{proposition}\label{propwithrelationslist}
The map of dga's
\[E_7\to E_7/(\oldwidehat_1)=\frac{{\mathbb F}_2[\zeta_1^4,\zeta_2^2,\zeta_3,\ldots]
[\rho,x_1,x_2,\oldwidehat_{2}]}{(x_2^2+\rho^2\zeta_2^2+x_1^2\zeta_1^4)}\]
is an isomorphism in weights $\le0$.
\end{proposition}
\begin{proof}
The relations $\rho\oldwidehat_1=x_1\oldwidehat_1=x_2\oldwidehat_1=0$ imply the map is an isomorphism in weights $\le0$. It is a map of dga's because the ideal $(\oldwidehat_1)\subset E_7$ is contained in $\ker(d_7)$.
\end{proof}
We therefore instead compute the homology of the simpler dga $(E_7/(\oldwidehat_1),d_7)$. We claim that $\ker(d_7)$ is the subalgebra of $E_7/(\oldwidehat_1)$ generated by the classes
\[\rho,x_1,x_2,x_3,\oldwidehat_2,\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots\]
It follows that we have the presentation
\[
\ker(d_7)=\frac{{\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots][\rho,x_1,x_2,x_3,\oldwidehat_2]}{(x_2^4+\rho^4\zeta_2^4+x_1^4\zeta_1^8,x_3^2+\rho^2\zeta_3^2+x_1^2\zeta_2^4+x_2^2\zeta_1^8)}
\]
Writing our differentials in terms of this description of the kernel, we have
\begin{align*}
d_7(\zeta_1^4)&=\rho^3\oldwidehat_2
&d_7(\zeta_2^2)&=\rho x_1^2\oldwidehat_2\\
d_7(\zeta_3)&=(x_1^3+\rho^2x_2)\oldwidehat_2
&d_7(\zeta_1^4\zeta_2^2)&=\rho x_2^2\oldwidehat_2\\
d_7(\zeta_1^4\zeta_3)&=(x_1x_2^2+\rho^2x_3)\oldwidehat_2
&d_7(\zeta_2^2\zeta_3)&=(x_1^2x_3+x_2^3)\oldwidehat_2\\
d_7(\zeta_1^4\zeta_2^2\zeta_3)&=(x_1^2x_2\zeta_1^8+x_2^2x_3+\rho^2x_1\zeta_2^4)\oldwidehat_2
\end{align*}
Letting $I_2$ be the ideal in $\ker(d_7)$ generated by this list of relations, it follows that
\[H_*(E_7/(\oldwidehat_1))=\frac{{\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots][\rho,x_1,x_2,x_3,\oldwidehat_2]}{I_2+(x_2^4+\rho^4\zeta_2^4+x_1^4\zeta_1^8,x_3^2+\rho^2\zeta_3^2+x_1^2\zeta_2^4+x_2^2\zeta_1^8)}\]
It remains to verify that the claimed list of elements indeed generate $\ker(d_7)$. We achieve this by running the $\rho$-Bockstein spectral sequence, which converges since $E_7/(\oldwidehat_1)$ is (graded) $\rho$-complete. We begin by computing the associated graded of the $\rho$-adic filtration.
\begin{proposition}
The $E_0$-page of the $\rho$-Bockstein spectral sequence is given by
\[\frac{{\mathbb F}_2[\zeta_1^4,\zeta_2^2,\zeta_3,\ldots]
[\rho,x_1,x_2,\oldwidehat_{2}]}{(x_2^2+x_1^2\zeta_1^4)}\]
This spectral sequence converges to the homology of $E_7/(\oldwidehat_1)$. We have differentials
\begin{align*}
\delta_0(\zeta_3)&=x_1^3\oldwidehat_2
&\delta_1(\zeta_2^2)&=\rho x_1^2\oldwidehat_2
&\delta_3(\zeta_1^4)&=\rho^3\oldwidehat_2
\end{align*}
\end{proposition}
The annihilator ideal of $\delta_0(\zeta_3)$ in $E_0$ is trivial, so $\delta_0$ does not create any Massey product cycles. We conclude the following.
\begin{proposition}
The $E_1$-page of the $\rho$-Bockstein spectral sequence is given by
\[\frac{{\mathbb F}_2[\zeta_1^4,\zeta_2^2,\zeta_3^2,\zeta_4,\ldots]
[\rho,x_1,x_2,\oldwidehat_{2}]}{(x_2^2+x_1^2\zeta_1^4,x_1^3\oldwidehat_2)}\]
\end{proposition}
To compute the $E_2$-page, we note that the class $x_3$ is a cycle in $E_7/(\oldwidehat_1)$ and therefore it projects to a permanent cycle in the $\rho$-Bockstein spectral sequence. We also denote its projection
\[x_1\zeta_2^2+x_2\zeta_1^4\]
by $x_3$.
\begin{proposition}
The $E_3$-page of the $\rho$-Bockstein spectral sequence is given by
\[
\frac{{\mathbb F}_2[\zeta_1^4,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots][\rho,x_1,x_2,x_3,\oldwidehat_2]}{(x_3^2+x_2^2\zeta_1^8+x_1^2\zeta_2^4,x_2^2+x_1^2\zeta_1^4,x_1^3\oldwidehat_2,\rho x_1^2\oldwidehat_2,(x_1^2x_3+x_2^3)\oldwidehat_2)}
\]
\end{proposition}
\begin{proof}
The kernel of $\delta_1$ is generated as an algebra by the classes
\[\rho,x_1,x_2,\oldwidehat_2,\zeta_1^4,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots\]
and $\zeta_2^2\cdot\mathrm{ann}_{E_1}(\rho x_1^2\oldwidehat_2)$, and we claim $\mathrm{ann}_{E_1}(\rho x_1^2\oldwidehat_2)=(x_1)$. To see this, let
\[R:=\frac{{\mathbb F}_2[\zeta_1^4,\zeta_2^2,\zeta_3^2,\zeta_4,\ldots]
[\rho,x_1,x_2,\oldwidehat_{2}]}{(x_2^2+x_1^2\zeta_1^4)}\]
There is a surjection $\pi:R\to R/(x_1^3\oldwidehat_2)=E_1$, so if
\[\pi(r)\cdot \rho x_1^2\oldwidehat_2=0\]
then $r\rho x_1^2\oldwidehat_2\in(x_1^3\oldwidehat_2)\subset(x_1^3)$ in $R$. Using that $R$ is of the form $T[\rho,\oldwidehat_2]$ for $T$ an $x_1$-torsion free ring, it follows that $r\in(x_1)$.
Therefore, the map
\[S:=\frac{{\mathbb F}_2[\zeta_1^4,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots][\rho,x_1,x_2,x_3,\oldwidehat_2]}{(x_3^2+x_2^2\zeta_1^8+x_1^2\zeta_2^4,x_2^2+x_1^2\zeta_1^4,x_1^3\oldwidehat_2,(x_1^2x_3+x_2^3)\oldwidehat_2)}\to\ker(\delta_1)\]
is surjective. To see that it is injective, note that the map
\[S\to S[y]/(y^2+\zeta_2^4,x_1y+x_3+x_2\zeta_1^4)\]
is injective, and the latter is easily identified with $E_1$ by setting $y=\zeta_2^2$. Now, $\mathrm{im}(\delta_1)$ is the ideal in $S$ generated by $\rho x_1^2\oldwidehat_2$, and the result follows.
\end{proof}
\begin{proposition}\label{propEinftyrhobocksteinn=2}
The $E_4$ page of the $\rho$-Bockstein spectral sequence is given by the quotient of
\[
{\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots][\rho,x_1,x_2,x_3,\oldwidehat_2]
\]
by the ideal generated by the elements
\begin{align*}
&x_3^2+x_2^2\zeta_2^4+x_1^2\zeta_1^8
&&x_2^4+x_1^4\zeta_1^8
&&x_1^3\oldwidehat_2\\
&\rho x_1^2\oldwidehat_2
&&\rho^3\oldwidehat_2
&&(x_1^2x_3+x_2^3)\oldwidehat_2\\
&x_2^2x_3\oldwidehat_2+\zeta_1^8x_1^2x_2\oldwidehat_2
&&x_2^2\rho\oldwidehat_2
&&x_1x_2^2\oldwidehat_2
\end{align*}
\end{proposition}
\begin{proof}
The kernel of $\delta_3$ is generated by the classes
\[\rho,x_1,x_2,x_3,\oldwidehat_2,\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots\]
and $\zeta_1^4\cdot\mathrm{ann}_{E_3}(\rho^3\oldwidehat_2)$, and we claim $\mathrm{ann}_{E_3}(\rho^3\oldwidehat_2)=(x_1^2)$. To see this, set
\[R:=\frac{{\mathbb F}_2[\zeta_1^4,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots][\rho,x_1,x_2,x_3,\oldwidehat_2]}{(x_3^2+x_1^2\zeta_2^4+x_2^2\zeta_1^8,x_2^2+x_1^2\zeta_1^4)}
\]
We have a surjection
\[\pi:R\to R/(\rho x_1^2\oldwidehat_2,x_1^3\oldwidehat_2,(x_1^2x_3+x_2^3)\oldwidehat_2)=E_3\]
So if $\pi(r)\cdot \rho^3\oldwidehat_2=0$, then
\[r\rho^3\oldwidehat_2\in (\rho x_1^2\oldwidehat_2,x_1^3\oldwidehat_2,(x_1^2x_3+x_2^3)\oldwidehat_2)\subset(x_1^2)\]
in $R$. Using that $R$ is of the form $T[\rho,\oldwidehat_2]$ for $T$ an $x_1$-torsion free ring, we see that $r\in (x_1^2)$.
In particular, we do not pick up any new Massey product cycles as $x_1^2\zeta_1^4=x_2^2$, and therefore the map
\[S:={\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots][\rho,x_1,x_2,x_3,\oldwidehat_2]/J\to \ker(\partial_7)\]
is surjective, where $J$ is the ideal generated by the list in the statement of the proposition, with the element $\rho^3\oldwidehat_2$ removed. To see that this map is injective, note that the map
\[S\to S[y]/(y^2+\zeta_1^8,x_1^2y+x_2^2)\]
is injective, and the latter is easily identified with $E_3$ by setting $y=\zeta_1^4$.
\end{proof}
The remaining algebra generators are all permanent cycles, and so the $\rho$-Bockstein spectral sequence collapses on $E_4$. This verifies the discussion following Proposition \ref{propwithrelationslist}.
\subsubsection{The homology of $\mathrm{tmf}_0(3)$} Moving to weight zero, we have the subalgebra $\mathcal A_*\square_{\mathcal A(2)_*}{\mathbb F}_2$ along with the following ten elements
\begin{align*}
1 & \in H_{0}
&\rho^{2}x_1\oldwidehat_{2}
&\in H_{4}
& \rho^2 x_{2}\oldwidehat_{2} &\in H_{6}\\
\rho x_1 x_2\oldwidehat_{2}
&\in H_{7}
&x_1^{2}x_2\oldwidehat_{2}
&\in H_{8}
&\rho^2 x_{3}\oldwidehat_{2}
&\in H_{10}\\
\rho x_1x_3\oldwidehat_{2}
&\in H_{11}
&x_1^{2}x_3\oldwidehat_{2}
&\in H_{12}
&\rho x_{2}x_{3}\oldwidehat_{2}
&\in H_{13} \\
x_1x_{2}x_{3}\oldwidehat_{2}
&\in H_{14}
\end{align*}
These form an $\mathcal A(2)_*$-comodule algebra $M_2$ of dimension 10 with coactions determined by Corollary \ref{corcoactionx_m} and the fact that $\rho$ and $\oldwidehat_2$ are primitive. We display $M_2$ in Figure \ref{figM2}, where we have omitted the unit class as it generates a trivial comodule summand.
\begin{theorem}\label{thmt03}
The homology of $\mathrm{\mathrm{tmf}}_0(3)\simeq\Gamma(BPGL\langle 2\rangle)$ is isomorphic as an $\mathcal A_*$-comodule algebra to
\[\mathcal A_*\square_{\mathcal A(2)_*}M_2\]
where $\oldwidehaterline{M}_{2}$ is as in Figure \ref{figM2}, and the multiplication in $\oldwidehaterline{M}_{2}$ is square zero.
\end{theorem}
\begin{proof}
The description follows for the $E_\infty$ page of the HSSS for $BPGL\langle 2\rangle$ by the discussion following Proposition \ref{propwithrelationslist}. This page is displayed in Figure \ref{einftybp2}.
It suffices now to observe that there are no nontrivial comodule algebra extensions in the HSSS. Indeed, each class in $M_2$ is in the highest filtration in its respective stem so the projection $H_*\mathrm{tmf}_0(3)\to M_2$ is a map of $\mathcal{A}(2)_*$-comodule algebras, which is adjoint to an isomorphism of $\mathcal{A}_*$-comodule algebras
\[H_*\mathrm{tmf}_0(3)\to \mathcal A_*\square_{\mathcal A(2)_*}M_2\qedhere\]
\end{proof}
Prior to the results of Hill--Lawson \cite{hilllawson}, it was not known how to construct a connective model $\mathrm{tmf}_0(3)$ of the periodic spectrum $\mathrm{TMF}_0(3)$ via a derived algebraic geometry approach. Nonetheless, computational aspects of such a spectrum were studied in detail, and Davis-Mahowald proposed several definitions of such a spectrum. In particular, they constructed a certain 10-cell complex $X$ and considered the spectrum $\mathrm{tmf}\wedge X$ as a good connective model. They give an explicit construction of $X$, but we show that it exists by Toda obstruction theory.
\begin{proposition}\label{proptoda}
Let $N$ be the $\mathcal{A}$-module with one generator in each dimension $3,5,6,7,9,10,11,12,13$, where the following Steenrod squares are nonzero on the generator $g$ of dimension 3
\[\mathrm{Sq}^2,\mathrm{Sq}^3,\mathrm{Sq}^4,\mathrm{Sq}^4\mathrm{Sq}^2,\mathrm{Sq}^5\mathrm{Sq}^2,\mathrm{Sq}^6\mathrm{Sq}^2=\mathrm{Sq}^8,\mathrm{Sq}^6\mathrm{Sq}^3,\mathrm{Sq}^7\mathrm{Sq}^3\]
and $\mathrm{Sq}^6(g)=0$. Then there exists a unique 2-complete bounded below spectrum $Y$ with $H^*Y\cong N$. Moreover, there is a map $Y\to S^0$ extending $2\nu$.
\end{proposition}
\begin{proof}
Toda obstruction theory (see \cite[Theorem 3.2]{BE} or \cite{Toda}) implies that it suffices to show that the $-2$ stem in
\[\Ext_{\mathcal{A}}^{*,*}(N,N)\]
is trivial. This is easily checked using Bruner's software, and we display the chart in Figure \ref{bruner4}. The $-1$-stem contains no classes in filtration higher than 1, so uniqueness follows. The existence of the map $Y\to S^0$ may also be checked directly from the Adams spectral sequence, and this is the argument used in \cite[Theorem 2.1(b)]{davismahowald}.
\end{proof}
\begin{figure}
\caption{$\Ext_{\mathcal{A}
\label{bruner4}
\end{figure}
It is easy to check that the $\mathcal{A}$-module $N$ has the property that its restriction to $\mathcal{A}(2)$ is dual to the $\mathcal{A}(2)_*$-comodule $\Sigma^{-1}\oldwidehaterline{M}_{2}$. We conclude the following.
\begin{corollary}\label{cortmfsplitting}
Let $X$ be the cofiber of the map $Y\to S^0$ constructed in Proposition \ref{proptoda}. There is a 2-local equivalence of $\mathrm{tmf}$-modules
\[\mathrm{tmf}\wedge X\to \mathrm{tmf}_0(3)\]
\end{corollary}
\begin{proof}
The homotopy groups of $\mathrm{tmf}$ and $\mathrm{tmf}_0(3)$ are degreewise finitely generated, so it suffices to produce a $2$-complete equivalence, which follows from the existence of any map $X\to \mathrm{tmf}_0(3)$ that induces the map of $\mathcal{A}_*$-comodules $H_*X\to \mathcal A_*\square_{\mathcal A(2)_*}M_2$ adjoint to the identity map of the $\mathcal{A}(2)_*$-comodule $M_2$.
For this, it suffices to show that in the Adams spectral sequence
\[\Ext_{\mathcal{A}_*}(H_*X,H_*\mathrm{tmf}_0(3))\cong\Ext_{\mathcal{A}(2)_*}(M_2,M_2)\implies [X,\mathrm{tmf}_0(3)]^{\hat{}}_2\]
the identity map $\iota$ is a permanent cycle. By Proposition \ref{proptoda}, the 4-skeleton of $X$ is the 2-cell complex $C(2\nu)$. Since $2\nu=0\in\pi_3\mathrm{tmf}_0(3)$ (see \cite{hillmeier} or \cite{LSWX}), the unit map $S^0\to \mathrm{tmf}_0(3)$ extends over $C(2\nu)$, which implies that the map of Adams spectral sequences
\[\Ext_{\mathcal{A}_*}(H_*X,H_*\mathrm{tmf}_0(3))\to \Ext_{\mathcal{A}_*}(H_*C(2\nu),H_*\mathrm{tmf}_0(3))\]
sends $\iota$ to a permanent cycle.
We use Bruner's ext software to produce the former in Figure \ref{bruner} and the latter in Figure \ref{bruner1}. The map is an isomorphism in bidegrees $(0,0)$, $(-1,2)$ and $(-1,3)$, sending $\iota$ to the generator of the lefthand $h_0$-tower in Figure \ref{bruner1}. It follows by naturality that $\iota$ does not support a $d_{<4}$ in Figure \ref{bruner}. We claim the generator of the righthand $h_0$-tower in Figure \ref{bruner1} supports a nontrivial $d_2$. Given this, the nontrivial $d_2$ lifts to Figure \ref{bruner}, and it follows from $h_0$-linearity that the $-1$ stem is then trivial on the $E_3$-page.
It remains to show the claimed $d_2$ in Figure \ref{bruner1}. This follows by naturality via the map
\[\Ext_{\mathcal{A}_*}(H_*C(2\nu),{\mathbb F}_2)\to\Ext_{\mathcal{A}_*}(H_*C(2\nu),H_*\mathrm{tmf}_0(3))\]
and we display the former $E_2$ page in Figure \ref{bruner2}. Since
\[D(C(2\nu))\not\simeq S^{-4}\vee S^0\]
the class in bidegree $(0,0)$ must support a differential. It follows easily from the long exact sequence in homotopy that $\pi_{-1}D(C(2\nu))=\pi_3C(2\nu)={\mathbb Z}/2$, which implies the claimed $d_2$.
\end{proof}
\begin{corollary}\label{cortmf03collapse}
The Adams spectral sequence for $\mathrm{tmf}_0(3)$ does not collapse on $E_2$. In particular, we have $d_2(x)=h_0h_2$, where $x$ is the nontrivial class in bidegree $(4,0)$.
\end{corollary}
\begin{proof}
The claimed $d_2$ follows by naturality via the map $C(2\nu)\to\mathrm{tmf}_0(3)$, where the Adams spectral sequence for $C(2\nu)$ supports the nontrivial $d_2$ discussed in the proof of Corollary \ref{cortmfsplitting}. We display the Adams $E_2$-page for $\mathrm{tmf}_0(3)$ in Figure \ref{bruner3}.
\end{proof}
\begin{remark}
Corollary \ref{cortmf03collapse} stands in contrast to the motivic Adams spectral sequence, which collapses for $BPGL\langle m\rangle$ for all $m$ (similarly for the $C_2$-equivariant Adams spectral sequence for $BP_{\mathbb R}\langle m\rangle$).
\end{remark}
\begin{figure}
\caption{$\Ext_{\mathcal{A}
\label{bruner}
\end{figure}
\begin{figure}
\caption{$\Ext_{\mathcal{A}
\caption{$\Ext_{\mathcal{A}
\label{bruner1}
\label{bruner2}
\end{figure}
\begin{figure}
\caption{$\Ext_{\mathcal{A}
\label{bruner3}
\end{figure}
\begin{sseqdata}[ name = E7bp2, Adams grading, classes = {fill, show name=below},
grid = go, xrange ={0}{14},yrange={-14}{14},xscale=0.5,yscale=0.5,x tick step =2, y tick step =2,run off differentials = {->},struct lines = red!88!black ]
\class[name = 1](0,0)
\class[red!88!black,name = \rho^3\oldwidehat_2](3,3)
\DoUntilOutOfBoundsThenNMore{2}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\text{ : }ructline(0,0)(3,3)
\class[red!88!black](4,2)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black,name=\rho x_1^2\oldwidehat_2](5,1)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black,name=x_1^3\oldwidehat_2](6,0)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](6,0)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[name = \zeta_1^4](4,-4)
\DoUntilOutOfBoundsThenNMore{3}{
\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](7,-1)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](10,2)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](8,-2)
\DoUntilOutOfBoundsThenNMore{1}{\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](8,-2)
\DoUntilOutOfBoundsThenNMore{1}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](11,1)
\DoUntilOutOfBoundsThenNMore{1}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[name = \zeta_2^2](6,-6)
\DoUntilOutOfBoundsThenNMore{3}{
\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](9,-3)
\DoUntilOutOfBoundsThenNMore{1}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](12,0)
\DoUntilOutOfBoundsThenNMore{1}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](12,0)
\DoUntilOutOfBoundsThenNMore{1}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[name = \zeta_3](7,-7)
\DoUntilOutOfBoundsThenNMore{3}{
\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](10,-4)
\DoUntilOutOfBoundsThenNMore{1}{\d7(\lastx,\lasty,2,2)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](10,-4)
\DoUntilOutOfBoundsThenNMore{1}{\d7(\lastx,\lasty,3,2)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](10,-4)
\DoUntilOutOfBoundsThenNMore{1}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](13,-1)
\DoUntilOutOfBoundsThenNMore{1}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[name = \zeta_1^8](8,-8)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](11,-5)
\DoUntilOutOfBoundsThenNMore{1}{\d7(\lastx,\lasty,2,2)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](11,-5)
\DoUntilOutOfBoundsThenNMore{1}{\d7(\lastx,\lasty,3,2)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](11,-5)
\DoUntilOutOfBoundsThenNMore{1}{\d7(\lastx,\lasty,4,3)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](12,-6)
\DoUntilOutOfBoundsThenNMore{3}{\d7(\lastx,\lasty,1,2)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](12,-6)
\DoUntilOutOfBoundsThenNMore{3}{\d7(\lastx,\lasty,2,3)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](12,-6)
\DoUntilOutOfBoundsThenNMore{3}{\d7(\lastx,\lasty,3,3)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](12,-6)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](12,-6)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class(10,-10)
\DoUntilOutOfBoundsThenNMore{1}{
\d7(\lastx,\lasty,1,2)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](13,-7)
\DoUntilOutOfBoundsThenNMore{2}{\d7(\lastx,\lasty,2,2)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](13,-7)
\DoUntilOutOfBoundsThenNMore{1}{\d7(\lastx,\lasty,3,3)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](13,-7)
\DoUntilOutOfBoundsThenNMore{1}{\d7(\lastx,\lasty,4,4)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](13,-7)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class(11,-11)
\DoUntilOutOfBoundsThenNMore{3}{
\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](14,-8)
\DoUntilOutOfBoundsThenNMore{2}{\d7(\lastx,\lasty,2,2)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](14,-8)
\DoUntilOutOfBoundsThenNMore{2}{\d7(\lastx,\lasty,3,3)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](14,-8)
\DoUntilOutOfBoundsThenNMore{2}{\d7(\lastx,\lasty,4,4)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](14,-8)
\DoUntilOutOfBoundsThenNMore{3}{\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](14,-8)
\DoUntilOutOfBoundsThenNMore{3}{\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class(12,-12)
\DoUntilOutOfBoundsThenNMore{3}{\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class(12,-12)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](15,-9)
\DoUntilOutOfBoundsThenNMore{3}{
\d7(\lastx,\lasty,3,4)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](15,-9)
\DoUntilOutOfBoundsThenNMore{3}{
\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](15,-9)
\DoUntilOutOfBoundsThenNMore{3}{
\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class[red!88!black](15,-9)
\DoUntilOutOfBoundsThenNMore{3}{
\d7
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class(13,-13)
\DoUntilOutOfBoundsThenNMore{3}{
\d7(\lastx,\lasty,1,5)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class(14,-14)
\DoUntilOutOfBoundsThenNMore{1}{
\d7(\lastx,\lasty,1,5)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class(14,-14)
\DoUntilOutOfBoundsThenNMore{3}{
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class(15,-15)
\DoUntilOutOfBoundsThenNMore{3}{
\d7(\lastx,\lasty,1,5)
\class[red!88!black](\lastx+3,\lasty+3)
\text{ : }ructline
}
\class(48,48)
\class(60,-60)
\text{ : }ructline[green!50!black](0,0)(60,-60)
\end{sseqdata}
\begin{figure}
\caption{The $E_7$ page of the HSSS for $BPGL\langle 2\rangle$.}
\label{e7bp2}
\end{figure}
\begin{sseqdata}[ name = Einftybp2, Adams grading, classes = {fill, show name=below},
grid = go, xrange ={0}{16},yrange={-16}{3},xscale=0.5,yscale=0.5,x tick step =2, y tick step =2, font = \tiny ]
\class[name = 1](0,0)
\class[name=\zeta_1^8](8,-8)
\class[name=\zeta_2^4](12,-12)
\class[name=\zeta_3^2](14,-14)
\class[red!88!black,name=\rho^2x_1\oldwidehat_2](4,2)
\class[red!88!black,name=\rho^2x_2\oldwidehat_2](6,0)
\class[red!88!black,name=\rho x_1x_2\oldwidehat_2](7,-1)
\class[red!88!black,name=x_1^2x_2\oldwidehat_2](8,-2)
\class[red!88!black,name=\rho^2x_3\oldwidehat_2](10,-4)
\class[red!88!black,name=\rho x_1x_3\oldwidehat_2](11,-5)
\class[red!88!black,name=x_1^2x_3\oldwidehat_2](12,-6)
\class[red!88!black](12,-6)
\class[red!88!black,name=\rho x_2x_3\oldwidehat_2](13,-7)
\class[red!88!black,name=x_1x_2x_3\oldwidehat_2](14,-8)
\class[red!88!black](14,-8)
\class[red!88!black,name=\zeta_1^8\rho x_1x_2\oldwidehat_2](15,-9)
\class[red!88!black,name=\zeta_1^8x_1^2x_2\oldwidehat_2](16,-10)
\class[red!88!black](16,-10)
\class[name=\zeta_4](15,-15)
\class(48,48)
\class(60,-60)
\text{ : }ructline[green!50!black](0,0)(48,48)
\text{ : }ructline[green!50!black](0,0)(60,-60)
\end{sseqdata}
\begin{figure}
\caption{The $E_\infty$ page of the HSSS for $BPGL\langle 2\rangle$.}
\label{einftybp2}
\end{figure}
\subsection{The homology of $\Gamma(BPGL\langle 3\rangle)$}\label{subsecn=3computation}
We follow arguments similar to the $m=2$ case to compute the HSSS for $BPGL\langle 3\rangle$. The $E_{15}$ page is displayed in Figure \ref{e15bp3}, with conventions as before, where $\oldwidehat_3$ classes are represented by blue dots, and blue structure lines indicate multiplication by $\rho^7\oldwidehat_3$, which detects $\sigma\in\pi_7 S^0$.
\begin{theorem}\label{thmn=3SS}
There is a spectral sequence of \(\cc{A}_{\ast}\)-comodule algebras with
\(E_{2}\)-term given by
\[E_2^{*,*,*}={\mathbb F}_2[\zeta_1^2,\zeta_2,\ldots][\rho,x_1,\oldwidehat_1,\oldwidehat_2,\oldwidehat_3]\]
We have differentials
\begin{align*}
d_3(\zeta_1^2)&=\rho\oldwidehat_1
&d_3(\zeta_2)&=x_1\oldwidehat_1
&d_7(\zeta_1^4)&=\rho^3\oldwidehat_2\\
d_7(\zeta_2^2)&=\rho x_1^2\oldwidehat_2
&d_7(\zeta_3)&=(x_1^3+\rho^2x_2)\oldwidehat_2
&d_{15}(\zeta_1^8)&=\rho^7\oldwidehat_3\\
d_{15}(\zeta_2^4)&=\rho^3 x_1^4\oldwidehat_3
&d_{15}(\zeta_3^2)&=(\rho x_1^6+\rho^5x_2^2)\oldwidehat_3
&d_{15}(\zeta_4)&=(x_1^7+\rho^2x_1^4x_2+\rho^4x_1x_2^2+\rho^6x_3)\oldwidehat_3
\end{align*}
The subalgebra of $E_2$ generated by the classes
\[\rho,x_1,x_2,x_3,x_4,\oldwidehat_1,\oldwidehat_2,\oldwidehat_3,\zeta_1^{16},\zeta_2^{8},\zeta_3^4,\zeta_4^2,\zeta_5,\ldots\]
consists of permanent cycles.
\end{theorem}
\begin{sseqdata}[ name = E15bp3, Adams grading, classes = {fill, show name=below},
grid = go, xrange ={0}{19},yrange={-19}{19},xscale=0.5,yscale=0.45,x tick step =2, y tick step =2,run off differentials = {->},struct lines = blue!85!black ]
\class[name = 1](0,0)
\class[red!88!black](4,2)
\class[red!88!black](6,0)
\class[red!88!black](7,-1)
\class[red!88!black](8,-2)
\class[red!88!black](10,-4)
\class[red!88!black](11,-5)
\class[red!88!black](12,-6)
\class[red!88!black](12,-6)
\class[red!88!black](13,-7)
\class[red!88!black](14,-8)
\class[red!88!black](14,-8)
\class[red!88!black](15,-9)
\class[red!88!black](16,-10)
\class[red!88!black](16,-10)
\class[red!88!black](18,-12)
\class[red!88!black](18,-12)
\class[red!88!black](18,-12)
\class[red!88!black](19,-13)
\class[red!88!black](19,-13)
\class[red!88!black](19,-13)
\class[blue!85!black,name = \rho^7\oldwidehat_3](7,7)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\text{ : }ructline(0,0)(7,7)
\class[blue!85!black](8,6)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](9,5)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](10,4)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](10,4)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](11,3)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black,name=\rho^3x_1^4\oldwidehat_3](11,3)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](12,2)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](12,2)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black,name=\rho x_1^6\oldwidehat_3](13,1)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](13,1)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](13,1)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black,name=x_1^7\oldwidehat_3](14,0)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](14,0)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](14,0)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](14,0)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[name=\zeta_1^8](8,-8)
\DoUntilOutOfBoundsThenNMore{3}{
\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](15,-1)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](15,-1)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](15,-1)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](15,-1)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](16,-2)
\DoUntilOutOfBoundsThenNMore{3}{\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](16,-2)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](16,-2)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](16,-2)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](16,-2)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](23,5)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](17,-3)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](17,-3)
\DoUntilOutOfBoundsThenNMore{3}{\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](17,-3)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](17,-3)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](17,-3)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](24,4)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](18,-4)
\DoUntilOutOfBoundsThenNMore{3}{\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](18,-4)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](18,-4)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](18,-4)
\DoUntilOutOfBoundsThenNMore{3}{\d15(\lastx,\lasty,4,2)
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](18,-4)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](18,-4)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[name=\zeta_2^4](12,-12)
\DoUntilOutOfBoundsThenNMore{3}{
\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](19,-5)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](19,-5)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](19,-5)
\DoUntilOutOfBoundsThenNMore{3}{\d15(\lastx,\lasty,4,2)
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](19,-5)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](19,-5)
\DoUntilOutOfBoundsThenNMore{3}{
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[name=\zeta_3^2](14,-14)
\DoUntilOutOfBoundsThenNMore{3}{
\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[name=\zeta_4](15,-15)
\DoUntilOutOfBoundsThenNMore{3}{
\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[name=\zeta_1^{16}](16,-16)
\DoUntilOutOfBoundsThenNMore{3}{
\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class(20,-20)
\DoUntilOutOfBoundsThenNMore{3}{
\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](20,-6)
\DoUntilOutOfBoundsThenNMore{3}{\d15
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class[blue!85!black](20,-6)
\DoUntilOutOfBoundsThenNMore{3}{\d15(\lastx,\lasty,2,2)
\class[blue!85!black](\lastx+7,\lasty+7)
\text{ : }ructline
}
\class(48,48)
\class(60,-60)
\text{ : }ructline[green!50!black](0,0)(60,-60)
\end{sseqdata}
\begin{figure}
\caption{The $E_{15}
\label{e15bp3}
\end{figure}
The formulas for $d_{\le 7}$ imply that, as a dga, one has an isomorphism
\[E_{15}^{*,*,*}(BPGL\langle 3\rangle;i_*H{\mathbb F}_2)\cong E_8^{*,*,*}(BPGL\langle 2\rangle;i_*H{\mathbb F}_2)[\oldwidehat_3]\]
The ideal $(\oldwidehaterline{v}_{1},\oldwidehaterline{v}_{2})\subset E_{15}$ is contained in $\ker(d_{15})$, and we have maps of dga's
\[E_{15}\to E_{15}/(\oldwidehaterline{v}_{1},\oldwidehaterline{v}_{2}^2)\to E_{15}/(\oldwidehaterline{v}_{1},\oldwidehaterline{v}_{2})\]
There is nothing in weight 0 contributed by $\oldwidehaterline{v}_{1}$ because of the $d_3$'s, and there is nothing in weight 0 contributed by $\oldwidehaterline{v}_{2}^2$ because of the $d_7$'s. In fact, the first map is an isomorphism in non-positive weights, and the second map is surjective with square zero kernel consisting of $d_{15}$-cycles (for degree reasons). Using this, it is not difficult to recover the $E_{16}$ page from $H_*(E_{15}/(\oldwidehaterline{v}_{1},\oldwidehaterline{v}_{2}))$, and we have an isomorphism
\[E_{15}/(\oldwidehaterline{v}_{1},\oldwidehaterline{v}_{2})\cong \frac{{\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots][\rho,x_1,x_2,x_3,\oldwidehaterline{v}_{3}]}{(x_2^4+\zeta_1^8x_1^4+\zeta_2^4\rho^4,x_3^2+\zeta_1^8x_2^2+\zeta_2^4x_1^2+\zeta_3^2\rho^2)}\]
with $d_{15}$ determined by the four explicit differentials given above.
We claim that $\ker(d_{15})$ is the subalgebra of $E_{15}/(\oldwidehat_1,\oldwidehat_2)$ generated by the classes
\[\zeta_1^{16},\zeta_2^8,\zeta_3^4,\zeta_4^2,\zeta_5,\ldots,\rho,x_1,x_2,x_3,x_4,\oldwidehat_3\]
It follows that we have the presentation
\[\ker(d_{15})=\frac{{\mathbb F}_2[\zeta_1^{16},\zeta_2^8,\zeta_3^4,\zeta_4^2,\zeta_5,\ldots][\rho,x_1,x_2,x_3,x_4,\oldwidehat_3]}{(x_2^8+\rho^8\zeta_2^8+x_1^8\zeta_1^{16},x_3^4+\rho^4\zeta_3^4+x_1^4\zeta_2^8+x_2^4\zeta_1^{16},x_4^2+\rho^2\zeta_4^2+x_1^2\zeta_3^4+x_2^2\zeta_2^8+x_3^2\zeta_1^{16})}\]
Writing our differentials in terms of this description of the kernel, we have
{
\allowdisplaybreaks
\aln{
d_{15}(\zeta_1^8)&=\rho^7\oldwidehat_3\\
d_{15}(\zeta_2^4)&=\rho^3 x_1^4\oldwidehat_3\\
d_{15}(\zeta_3^2)&=(\rho x_1^6+\rho^5 x_2^2)\oldwidehat_3\\
d_{15}(\zeta_4)&=(x_1^7+\rho^2x_1^4x_2+\rho^4x_1x_2^2+\rho^6x_3)\oldwidehat_3\\
d_{15}(\zeta_1^8\zeta_2^4)&=\rho^3x_2^4\oldwidehat_3\\
d_{15}(\zeta_1^8\zeta_3^2)&=(\rho^5x_3^2+\rho x_1^2x_2^4)\oldwidehat_3\\
d_{15}(\zeta_1^8\zeta_4)&=(x_1^3x_2^4+\rho^2x_2^5+\rho^4x_1x_3^2+\rho^6x_4)\oldwidehat_3\\
d_{15}(\zeta_2^4\zeta_3^3)&=(\rho x_1^4x_3^2+\rho x_2^6)\oldwidehat_3\\
d_{15}(\zeta_2^4\zeta_4)&=(x_1^5x_3^2+x_1x_2^6+\rho^2x_1^4x_4+\rho^2x_2^4x_3)\oldwidehat_3\\
d_{15}(\zeta_3^2\zeta_4)&=(x_2^7+x_1^6x_4+x_1^4x_2x_3^2+x_1^2x_2^4x_3+\rho^4x_2^2x_4+\rho^4x_3^3)\oldwidehat_3\\
d_{15}(\zeta_1^8\zeta_2^4\zeta_3^2)&=(\rho x_2^4x_3^2+\rho x_1^4x_2^2\zeta_1^{16}+\rho^5x_1^2\zeta_2^8)\oldwidehat_3\\
d_{15}(\zeta_1^8\zeta_2^4\zeta_4)&=(x_1x_2^4x_3^2+x_1^5x_2^2\zeta_1^{16}+\rho^2x_2^4x_4+\rho^2x_1^4x_3\zeta_1^{16}+\rho^4x_1^3\zeta_2^8+\rho^6x_2\zeta_2^8)\oldwidehat_3\\
d_{15}(\zeta_1^8\zeta_3^2\zeta_4)&=(x_1^2x_2^4x_4+x_2^5x_3^2+x_1^4x_2^3\zeta_1^{16}+\rho^4x_3^3x_4+\rho^4x_2^2x_3\zeta_1^{16}+\rho^4x_1^2x_2\zeta_2^8+\rho^6x_1\zeta_3^4)\oldwidehat_3\\
d_{15}(\zeta_2^4\zeta_3^2\zeta_4)&=(x_1^4x_3^2x_4+x_2^6x_4+x_2^4x_3^3+x_1^4x_2^2x_3\zeta_1^{16}+(x_1^6x_2+\rho^4x_1^2x_3+\rho^4x_2^3)\zeta_2^8+\rho^2x_1^5\zeta_3^4)\oldwidehat_3\\
d_{15}(\zeta_1^8\zeta_2^4\zeta_3^2\zeta_4)&=(x_2^4x_3^2x_4+x_1^2x_2^5\zeta_2^8+x_1^4x_2^2x_4\zeta_1^{16}+x_1^4x_3^3\zeta_1^{16}+x_2^6x_3\zeta_1^{16}+\rho^2x_1x_2^4\zeta_3^4\\
&+\rho^4x_1^2x_4\zeta_2^8+\rho^4x_2x_3^2\zeta_2^8)\oldwidehat_3
}
}
Letting $I_3$ be the ideal in $\ker(d_{15})$ generated by this list of relations, it follows that $H_*(E_{15}/(\oldwidehat_1,\oldwidehat_2))=\ker(d_{15})/I_3$. It remains to verify that the claimed list of elements indeed generate $\ker(d_{15})$. We achieve this by running the $\rho$-Bockstein spectral sequence as before, and our arguments follow closely those of the $m=2$ case.
\begin{proposition}\label{propn=3list}
The $E_0$-page of the $\rho$-Bockstein spectral sequence is given by
\[\frac{{\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4,\ldots][\rho,x_1,x_2,x_3,\oldwidehat_3]}{(x_2^4+\zeta_1^8x_1^4,x_3^2+\zeta_1^8x_2^2+\zeta_2^4x_1^2)}\]
We have differentials
\begin{align*}
\delta_0(\zeta_4)&=x_1^7\oldwidehat_3
&\delta_1(\zeta_3^2)&=\rho x_1^6\oldwidehat_3
&\delta_3(\zeta_2^4)&=\rho^3 x_1^4\oldwidehat_3
&\delta_7(\zeta_1^8)&=\rho^7\oldwidehat_3
\end{align*}
\end{proposition}
The annihilator ideal of $\delta_0(\zeta_4)$ in $E_0$ is trivial, so $\delta_0$ does not create any Massey product cycles. We conclude the following.
\begin{proposition}
The $E_1$-page of the $\rho$-Bockstein spectral sequence is given by
\[\frac{{\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4^2,\ldots][\rho,x_1,x_2,x_3,\oldwidehat_3]}{(x_2^4+\zeta_1^8x_1^4,x_3^2+\zeta_1^8x_2^2+\zeta_2^4x_1^2,x_1^7\oldwidehaterline{v}_{3})}\]
\end{proposition}
The annihilator ideal of $\delta_1(\zeta_3^2)$ in $E_1$ is $(x_1)$, which gives our Massey product $x_4$.
\begin{proposition}
The $E_3$-page of the $\rho$-Bockstein spectral sequence is given by the quotient of the polynomial ring
\[{\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^4,\zeta_4^2,\zeta_5,\ldots][\rho,x_1,x_2,x_3,x_4,\oldwidehat_3]\]
by the ideal generated by the elements
\begin{align*}
&x_2^4+\zeta_1^8x_1^4,
&&x_3^2+\zeta_1^8x_2^2+\zeta_2^4x_1^2,\\
&x_4^2+x_1^2\zeta_3^4+x_2^2\zeta_2^8+x_3^2\zeta_1^{16},
&&x_1^7\oldwidehat_3,\\
&(x_2^7+x_1^6x_4+x_1^4x_2x_3^2+x_1^2x_2^4x_3)\oldwidehat_3,
&&\rho x_1^6\oldwidehat_3.
\end{align*}
\end{proposition}
\begin{proof}
The description of $\mathrm{ann}_{E_1}(\rho x_1^6\oldwidehaterline{v}_{3})$ implies that the map
\[S:={\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^4,\zeta_4^2,\zeta_5,\ldots][\rho,x_1,x_2,x_3,x_4,\oldwidehat_3]/J\to \ker(\delta_1)\]
is surjective, where $J$ is the ideal generated by the list of elements in the statement of the proposition, with $\rho x_1^6\oldwidehat_3$ removed. To see that it is injective, note that the map
\[S\to S[y]/(y^2+\zeta_3^4,x_1y+x_2\zeta_2^4+x_3\zeta_1^8+x_4)\]
is injective, and the latter is easily identified with $E_1$ by setting $y=\zeta_3^2$.
\end{proof}
\begin{proposition}
The $E_7$-page of the $\rho$-Bockstein spectral sequence is given by the quotient of the polynomial ring
\[{\mathbb F}_2[\zeta_1^8,\zeta_2^8,\zeta_3^4,\zeta_4^2,\zeta_5,\ldots][\rho,x_1,x_2,x_3,x_4,\oldwidehat_3]\]
by the ideal generated by the elements
\begin{align*}
&x_2^4+\zeta_1^8x_1^4,
&&x_3^4+\zeta_1^{16}x_2^4+\zeta_2^8x_1^4,\\
&x_4^2+x_1^2\zeta_3^4+x_2^2\zeta_2^8+x_3^2\zeta_1^{16},
&&x_1^7\oldwidehat_3,\\
&(x_2^7+x_1^6x_4+x_1^4x_2x_3^2+x_1^2x_2^4x_3)\oldwidehat_3,
&&\rho x_1^6\oldwidehat_3,\\
&\rho^3 x_1^4\oldwidehat_3,
&&(x_1^5x_3^2+x_1x_2^6)\oldwidehat_3,\\
&(\rho x_1^4x_3^2+\rho x_2^6)\oldwidehat_3,
&&(x_1^4x_3^2x_4+x_2^6x_4+x_2^4x_3^3+x_1^4x_2^2x_3\zeta_1^{16}+x_1^6x_2\zeta_2^8)\oldwidehat_3.
\end{align*}
\end{proposition}
\begin{proof}
We claim that $\mathrm{ann}_{E_3}(\rho^3 x_1^4\oldwidehat_3)=(x_1^2)$. To see this, let
\[R=\frac{{\mathbb F}_2[\zeta_1^8,\zeta_2^4,\zeta_3^4,\zeta_4^2,\ldots][\rho,x_1,x_2,x_3,x_4,\oldwidehat_3]}{(x_2^4+\zeta_1^8x_1^4,x_3^2+\zeta_1^8x_2^2+\zeta_2^4x_1^2,x_4^2+x_1^2\zeta_3^4+x_2^2\zeta_2^8+x_3^2\zeta_1^{16})}\]
There is a surjection
\[\pi:R\to R/(x_1^7\oldwidehaterline{v}_{3},\rho x_1^6\oldwidehat_3,(x_2^7+x_1^6x_4+x_1^4x_2x_3^2+x_1^2x_2^4x_3)\oldwidehat_3)\cong E_3\]
So if $\pi(r)\cdot \rho^3 x_1^4\oldwidehat_3=0$, we have
\[r\rho^3 x_1^4\oldwidehat_3\in (x_1^7\oldwidehaterline{v}_{3},\rho x_1^6\oldwidehat_3,(x_2^7+x_1^6x_4+x_1^4x_2x_3^2+x_1^2x_2^4x_3)\oldwidehat_3)\subset (x_1^6)\]
Using that $R=T[\rho,\oldwidehat_3]$ for $T$ an $x_1$-torsion free ring, it follows that $rx_1^4\in (x_1^6)$ and therefore $r\in (x_1^2)$. Since $x_1^2\zeta_2^4=x_3^2+\zeta_1^8x_2^2$, this creates no new cycles.
It follows that the map
\[S:={\mathbb F}_2[\zeta_1^8,\zeta_2^8,\zeta_3^4,\zeta_4^2,\zeta_5,\ldots][\rho,x_1,x_2,x_3,x_4,\oldwidehat_3]/J\to \ker(\delta_3)\]
is surjective, where $J$ is the ideal generated by the list of elements in the statement of the proposition, with $\rho^3 x_1^4\oldwidehat_3$ removed. To see that it is injective, note that the map
\[S\to S[y]/(y^2+\zeta_2^8,x_1^2y+x_2^2\zeta_1^8+x_3^2)\]
is injective, and the latter is easily identified with $E_3$ by setting $y=\zeta_2^4$.
\end{proof}
\begin{proposition}
The $E_8$-page of the $\rho$-Bockstein spectral sequence is given by the quotient of the polynomial ring
\[{\mathbb F}_2[\zeta_1^{16},\zeta_2^8,\zeta_3^4,\zeta_4^2,\zeta_5,\ldots][\rho,x_1,x_2,x_3,x_4,\oldwidehat_3]\]
by the ideal generated by the elements
\begin{align*}
&x_2^8+\zeta_1^{16}x_1^8,
&&x_3^4+\zeta_1^{16}x_2^4+\zeta_2^8x_1^4,\\
&x_4^2+x_1^2\zeta_3^4+x_2^2\zeta_2^8+x_3^2\zeta_1^{16},
&&x_1^7\oldwidehat_3,\\
&(x_2^7+x_1^6x_4+x_1^4x_2x_3^2+x_1^2x_2^4x_3)\oldwidehat_3,
&&\rho x_1^6\oldwidehat_3,\\
&\rho^3 x_1^4\oldwidehat_3,
&&(x_1^5x_3^2+x_1x_2^6)\oldwidehat_3,\\
&(\rho x_1^4x_3^2+\rho x_2^6)\oldwidehat_3,
&&(x_1^4x_3^2x_4+x_2^6x_4+x_2^4x_3^3+x_1^4x_2^2x_3\zeta_1^{16}+x_1^6x_2\zeta_2^8)\oldwidehat_3,\\
&\rho^7\oldwidehat_3,
&&\rho^3x_2^4\oldwidehat_3,\\
&(\rho^5x_3^2+\rho x_1^2x_2^4)\oldwidehat_3,
&&(x_1^3x_2^4+\rho^2x_2^5+\rho^4x_1x_3^2+\rho^6x_4)\oldwidehat_3,\\
&(\rho x_2^4x_3^2+\rho x_1^4x_2^2\zeta_1^{16}+\rho^5x_1^2\zeta_2^8)\oldwidehat_3.
\end{align*}
\begin{align*}
&(x_1x_2^4x_3^2+x_1^5x_2^2\zeta_1^{16}+\rho^2x_2^4x_4+\rho^2x_1^4x_3\zeta_1^{16}+\rho^4x_1^3\zeta_2^8+\rho^6x_2\zeta_2^8)\oldwidehat_3\\
&(x_1^2x_2^4x_4+x_2^5x_3^2+x_1^4x_2^3\zeta_1^{16}+\rho^4x_3^3x_4+\rho^4x_2^2x_3\zeta_1^{16}+\rho^4x_1^2x_2\zeta_2^8+\rho^6x_1\zeta_3^4)\oldwidehat_3\\
&(x_2^4x_3^2x_4+x_1^2x_2^5\zeta_2^8+x_1^4x_2^2x_4\zeta_1^{16}+x_1^4x_3^3\zeta_1^{16}+x_2^6x_3\zeta_1^{16}+\rho^2x_1x_2^4\zeta_3^4+\rho^4x_1^2x_4\zeta_2^8+\rho^4x_2x_3^2\zeta_2^8)\oldwidehat_3
\end{align*}
\end{proposition}
\begin{proof}
We claim that $\mathrm{ann}_{E_7}(\rho^7\oldwidehat_3)=(x_1^4)$. To see this, let
\[R=\frac{{\mathbb F}_2[\zeta_1^8,\zeta_2^8,\zeta_3^4,\zeta_4^2,\ldots][\rho,x_1,x_2,x_3,x_4,\oldwidehat_3]}{(x_2^4+\zeta_1^8x_1^4,x_3^4+\zeta_1^{16}x_2^4+\zeta_2^8x_1^4,x_4^2+x_1^2\zeta_3^4+x_2^2\zeta_2^8+x_3^2\zeta_1^{16})}\]
There is a surjection $\pi:R\to R/I\cong E_7$ where $I$ is the ideal generated by the elements
\begin{align*}
&x_1^7\oldwidehat_3,
&&(x_2^7+x_1^6x_4+x_1^4x_2x_3^2+x_1^2x_2^4x_3)\oldwidehat_3,\\
&\rho x_1^6\oldwidehat_3,
&&\rho^3 x_1^4\oldwidehat_3,\\
&(x_1^5x_3^2+x_1x_2^6)\oldwidehat_3,
&&(\rho x_1^4x_3^2+\rho x_2^6)\oldwidehat_3,\\
&(x_1^4x_3^2x_4+x_2^6x_4+x_2^4x_3^3+x_1^4x_2^2x_3\zeta_1^{16}+x_1^6x_2\zeta_2^8)\oldwidehat_3.
\end{align*}
So if $\pi(r)\cdot \rho^7\oldwidehat_3=0\in E_7$, then $r\rho^7\oldwidehat_3\in I$. It is not hard to check that $I\subset (x_1^4)$, so since $R$ is of the form $T[\rho,\oldwidehat_3]$ for $T$ an $x_1$-torsion free ring we have $r\in (x_1^4)$. Arguing as before, we arrive at the claimed presentation.
\end{proof}
The remaining algebra generators are all permanent cycles, and so the $\rho$-Bockstein spectral sequence collapses on $E_8$. This verifies the discussion preceding Proposition \ref{propn=3list}, so that $H_*(E_{15}/(\oldwidehat_1,\oldwidehat_2))$ has the claimed description as an $\mathcal{A}_*$-comodule algebra. In weight zero, we have
\[H_*(E_{15}/(\oldwidehat_1,\oldwidehat_2))\cong \mathcal{A}_*\square_{\mathcal{A}(3)_*}M_3\]
and machine computation gives the following basis of the $\mathcal{A}(3)_*$-comodule algebra $M_3$, which has dimension 165.
{
\allowdisplaybreaks
\begin{align*}
1&\in H_0
&\rho^6x_1 \oldwidehat_3 &\in H_8
&\rho^5x_1^2 \oldwidehat_3 &\in H_9\\
\rho^6x_2 \oldwidehat_3 &\in H_{10}
&\rho^4x_1^3 \oldwidehat_3 &\in H_{10}
&\rho^5x_1x_2 \oldwidehat_3 &\in H_{11}\\
\rho^4x_1^2x_2 \oldwidehat_3 &\in H_{12}
&\rho^2x_1^5 \oldwidehat_3 &\in H_{12}
&\rho^5x_2^2 \oldwidehat_3 &\in H_{13}\\
\rho^3x_1^3x_2 \oldwidehat_3 &\in H_{13}
&\rho^6x_3 \oldwidehat_3 &\in H_{14}
&\rho^4x_1x_2^2 \oldwidehat_3 &\in H_{14}\\
\rho^2x_1^4x_2 \oldwidehat_3 &\in H_{14}
&\rho^5x_1x_3 \oldwidehat_3 &\in H_{15}
&\rho^3x_1^2x_2^2 \oldwidehat_3 &\in H_{15}\\
\rho x_1^5x_2 \oldwidehat_3 &\in H_{15}
&\rho^4x_1^2x_3 \oldwidehat_3 &\in H_{16}
&\rho^4x_2^3 \oldwidehat_3 &\in H_{16}\\
\rho^2x_1^3x_2^2 \oldwidehat_3 &\in H_{16}
&x_1^6x_2 \oldwidehat_3 &\in H_{16}
&\rho^5x_2x_3 \oldwidehat_3 &\in H_{17}\\
\rho^3x_1^3x_3 \oldwidehat_3 &\in H_{17}
&\rho^3x_1x_2^3 \oldwidehat_3 &\in H_{17}
&\rho x_1^4x_2^2 \oldwidehat_3 &\in H_{17}\\
\rho^4x_1x_2x_3 \oldwidehat_3 &\in H_{18}
&\rho^2x_1^4x_3 \oldwidehat_3 &\in H_{18}
&\rho^2x_1^2x_2^3 \oldwidehat_3 &\in H_{18}\\
x_1^5x_2^2 \oldwidehat_3 &\in H_{18}
&\rho^3x_1^2x_2x_3 \oldwidehat_3 &\in H_{19}
&\rho x_1^5x_3 \oldwidehat_3 &\in H_{19}\\
\rho x_1^3x_2^3 \oldwidehat_3 &\in H_{19}
&\rho^4x_2^2x_3 \oldwidehat_3 &\in H_{20}
&\rho^2x_1^3x_2x_3 \oldwidehat_3 &\in H_{20}\\
\rho^2x_1x_2^4 \oldwidehat_3 &\in H_{20}
&x_1^6x_3 \oldwidehat_3 &\in H_{20}
&x_1^4x_2^3 \oldwidehat_3 &\in H_{20}\\
\rho^5x_3^2 \oldwidehat_3 &\in H_{21}
&\rho^3x_1x_2^2x_3 \oldwidehat_3 &\in H_{21}
&\rho x_1^4x_2x_3 \oldwidehat_3 &\in H_{21}\\
\rho^6x_4 \oldwidehat_3 &\in H_{22}
&\rho^4x_1x_3^2 \oldwidehat_3 &\in H_{22}
&\rho^2x_1^2x_2^2x_3 \oldwidehat_3 &\in H_{22}\\
\rho^2x_2^5 \oldwidehat_3 &\in H_{22}
&x_1^5x_2x_3 \oldwidehat_3 &\in H_{22}
&\rho^5x_1x_4 \oldwidehat_3 &\in H_{23}\\
\rho^3x_1^2x_3^2 \oldwidehat_3 &\in H_{23}
&\rho^3x_2^3x_3 \oldwidehat_3 &\in H_{23}
&\rho x_1^3x_2^2x_3 \oldwidehat_3 &\in H_{23}\\
\rho x_1x_2^5 \oldwidehat_3 &\in H_{23}
&\rho^4x_1^2x_4 \oldwidehat_3 &\in H_{24}
&\rho^4x_2x_3^2 \oldwidehat_3 &\in H_{24}\\
\rho^2x_1^3x_3^2 \oldwidehat_3 &\in H_{24}
&\rho^2x_1x_2^3x_3 \oldwidehat_3 &\in H_{24}
&x_1^4x_2^2x_3 \oldwidehat_3 &\in H_{24}\\
x_1^2x_2^5 \oldwidehat_3 &\in H_{24}
&\rho^5x_2x_4 \oldwidehat_3 &\in H_{25}
&\rho^3x_1^3x_4 \oldwidehat_3 &\in H_{25}\\
\rho^3x_1x_2x_3^2 \oldwidehat_3 &\in H_{25}
&\rho x_1^4x_3^2 \oldwidehat_3 &\in H_{25}
&\rho x_1^2x_2^3x_3 \oldwidehat_3 &\in H_{25}\\
\rho^4x_1x_2x_4 \oldwidehat_3 &\in H_{26}
&\rho^2x_1^4x_4 \oldwidehat_3 &\in H_{26}
&\rho^2x_1^2x_2x_3^2 \oldwidehat_3 &\in H_{26}\\
\rho^2x_2^4x_3 \oldwidehat_3 &\in H_{26}
&x_1^5x_3^2 \oldwidehat_3 &\in H_{26}
&x_1^3x_2^3x_3 \oldwidehat_3 &\in H_{26}\\
\rho^3x_1^2x_2x_4 \oldwidehat_3 &\in H_{27}
&\rho^3x_2^2x_3^2 \oldwidehat_3 &\in H_{27}
&\rho x_1^5x_4 \oldwidehat_3 &\in H_{27}\\
\rho x_1^3x_2x_3^2 \oldwidehat_3 &\in H_{27}
&\rho x_1x_2^4x_3 \oldwidehat_3 &\in H_{27}
&\rho^4x_3^3 \oldwidehat_3 &\in H_{28}\\
\rho^2x_1^3x_2x_4 \oldwidehat_3 &\in H_{28}
&\rho^2x_1x_2^2x_3^2 \oldwidehat_3 &\in H_{28}
&x_1^6x_4 \oldwidehat_3 &\in H_{28}\\
x_1^4x_2x_3^2 \oldwidehat_3 &\in H_{28}
&x_1^2x_2^4x_3 \oldwidehat_3 &\in H_{28}
&\rho^4x_2^2x_4\oldwidehaterline{v}_3&\in H_{28}\\
\rho^3x_1x_2^2x_4\oldwidehaterline{v}_3&\in H_{29}
&\rho^5x_3x_4 \oldwidehat_3 &\in H_{29}
&\rho^3x_1x_3^3 \oldwidehat_3 &\in H_{29}\\
\rho x_1^4x_2x_4 \oldwidehat_3 &\in H_{29}
&\rho x_1^2x_2^2x_3^2 \oldwidehat_3 &\in H_{29}
&\rho x_2^5x_3 \oldwidehat_3 &\in H_{29}\\
\rho^2x_1^2x_2^2x_4\oldwidehaterline{v}_3&\in H_{30}
&\rho^4x_1x_3x_4 \oldwidehat_3 &\in H_{30}
&\rho^2x_1^2x_3^3 \oldwidehat_3 &\in H_{30}\\
\rho^2x_2^3x_3^2 \oldwidehat_3 &\in H_{30}
&x_1^5x_2x_4 \oldwidehat_3 &\in H_{30}
&x_1^3x_2^2x_3^2 \oldwidehat_3 &\in H_{30}\\
x_1x_2^5x_3 \oldwidehat_3 &\in H_{30}
&\rho^3x_1^2x_3x_4 \oldwidehat_3 &\in H_{31}
&\rho^3x_2x_3^3 \oldwidehat_3 &\in H_{31}\\
\rho x_1^3x_2^2x_4 \oldwidehat_3 &\in H_{31}
&\rho x_1^3x_3^3 \oldwidehat_3 &\in H_{31}
&\rho x_1x_2^3x_3^2 \oldwidehat_3 &\in H_{31}\\
\rho^3x_2^3x_4\oldwidehaterline{v}_3&\in H_{31}
&\rho^4x_2x_3x_4 \oldwidehat_3 &\in H_{32}
&\rho^2x_1^3x_3x_4 \oldwidehat_3 &\in H_{32}\\
\rho^2x_1x_2^3x_4 \oldwidehat_3 &\in H_{32}
&\rho^2x_1x_2x_3^3 \oldwidehat_3 &\in H_{32}
&x_1^4x_2^2x_4 \oldwidehat_3 &\in H_{32}\\
x_1^4x_3^3 \oldwidehat_3 &\in H_{32}
&x_1^2x_2^3x_3^2 \oldwidehat_3 &\in H_{32}
&x_2^6x_3 \oldwidehat_3 &\in H_{32}\\
\rho^3x_1x_2x_3x_4 \oldwidehat_3 &\in H_{33}
&\rho x_1^4x_3x_4 \oldwidehat_3 &\in H_{33}
&\rho x_1^2x_2^3x_4 \oldwidehat_3 &\in H_{33}\\
\rho x_1^2x_2x_3^3 \oldwidehat_3 &\in H_{33}
&\rho^2x_1^2x_2x_3x_4 \oldwidehat_3 &\in H_{34}
&\rho^2x_2^2x_3^3 \oldwidehat_3 &\in H_{34}\\
x_1^5x_3x_4 \oldwidehat_3 &\in H_{34}
&x_1^3x_2^3x_4 \oldwidehat_3 &\in H_{34}
&x_1^3x_2x_3^3 \oldwidehat_3 &\in H_{34}\\
\rho^2x_2^4x_4\oldwidehaterline{v}_3&\in H_{34}
&\rho x_1^3x_2x_3x_4 \oldwidehat_3 &\in H_{35}
&\rho x_1x_2^4x_4 \oldwidehat_3 &\in H_{35}\\
\rho x_1x_2^2x_3^3 \oldwidehat_3 &\in H_{35}
&\rho^3x_2^2x_3x_4\oldwidehaterline{v}_3&\in H_{35}
&\rho^4x_3^2x_4 \oldwidehat_3 &\in H_{36}\\
x_1^4x_2x_3x_4 \oldwidehat_3 &\in H_{36}
&x_1^2x_2^4x_4 \oldwidehat_3 &\in H_{36}
&x_1^2x_2^2x_3^3 \oldwidehat_3 &\in H_{36}\\
\rho^2x_1x_2^2x_3x_4\oldwidehaterline{v}_3&\in H_{36}
&\rho^3x_1x_3^2x_4 \oldwidehat_3 &\in H_{37}
&\rho x_1^2x_2^2x_3x_4 \oldwidehat_3 &\in H_{37}\\
\rho x_2^5x_4 \oldwidehat_3 &\in H_{37}
&\rho x_2^3x_3^3 \oldwidehat_3 &\in H_{37}
&\rho^2x_1^2x_3^2x_4 \oldwidehat_3 &\in H_{38}\\
x_1^3x_2^2x_3x_4 \oldwidehat_3 &\in H_{38}
&x_1x_2^5x_4 \oldwidehat_3 &\in H_{38}
&x_1x_2^3x_3^3 \oldwidehat_3 &\in H_{38}\\
\rho^2x_2^3x_3x_4\oldwidehaterline{v}_3&\in H_{38}
&\rho^3x_2x_3^2x_4 \oldwidehat_3 &\in H_{39}
&\rho x_1^3x_3^2x_4 \oldwidehat_3 &\in H_{39}\\
\rho x_1x_2^3x_3x_4 \oldwidehat_3 &\in H_{39}
&\rho^2x_1x_2x_3^2x_4\oldwidehat_3&\in H_{40}
&x_1^4x_3^2x_4\oldwidehat_3&\in H_{40}\\
x_1^2x_2^3x_3x_4\oldwidehat_3&\in H_{40}
&x_2^6x_4\oldwidehat_3&\in H_{40}
&\rho^6x_1^3x_2^3x_3^2\oldwidehat_3^2&\in H_{40}\\
\rho x_1^2x_2x_3^2x_4\oldwidehat_3&\in H_{41}
&\rho x_2^4x_3x_4\oldwidehat_3&\in H_{41}
&x_1^3x_2x_3^2x_4\oldwidehat_3&\in H_{42}\\
x_1x_2^4x_3x_4\oldwidehat_3&\in H_{42}
&\rho^2x_2^2x_3^2x_4\oldwidehaterline{v}_3&\in H_{42}
&\rho^3x_3^3x_4\oldwidehat_3&\in H_{43}\\
\rho x_1x_2^2x_3^2x_4\oldwidehat_3&\in H_{43}
&\rho^2x_1x_3^3x_4\oldwidehat_3&\in H_{44}
&x_1^2x_2^2x_3^2x_4\oldwidehat_3&\in H_{44}\\
x_2^5x_3x_4\oldwidehat_3&\in H_{44}
&\rho^6x_1^3x_2^2x_3^3\oldwidehat_3^2&\in H_{44}
&\rho x_1^2x_3^3x_4\oldwidehat_3&\in H_{45}\\
\rho x_2^3x_3^2x_4\oldwidehat_3&\in H_{45}
&\rho^2x_2x_3^3x_4\oldwidehat_3&\in H_{46}
&x_1^3x_3^3x_4\oldwidehat_3&\in H_{46}\\
x_1x_2^3x_3^2x_4\oldwidehat_3&\in H_{46}
&\rho^6x_1^2x_2^3x_3^3\oldwidehat_3^2&\in H_{46}
&\rho x_1x_2x_3^3x_4\oldwidehat_3&\in H_{47}\\
\rho^5x_1^3x_2^3x_3^3\oldwidehat_3^2&\in H_{47}
&x_1^2x_2x_3^3x_4\oldwidehat_3&\in H_{48}
&\rho^6x_1^3x_2^3x_3x_4\oldwidehaterline{v}_3^2&\in H_{48}\\
\rho x_2^2x_3^3x_4\oldwidehat_3&\in H_{49}
&x_1x_2^2x_3^3x_4\oldwidehat_3&\in H_{50}
&x_2^3x_3^3x_4\oldwidehat_3&\in H_{52}
\end{align*}
}
Note that multiplication in $M_3$ is not square zero, as there are five generators divisible by $\oldwidehat_3^2$. For example, there is the nontrivial product $H_8\otimes H_{32}\to H_{40}$
\[\rho^6x_1\oldwidehat_3\cdot x_1^2x_2^3x_3^2\oldwidehat_3=\rho^6x_1^3x_2^3x_3^2\oldwidehat_3^2\]
However, all products in $M_3$ are determined by the relations in the discussion preceding Proposition \ref{propn=3list}. The coactions follow from Corollary \ref{corcoactionx_m} and the fact that $\rho$ and $\oldwidehat_3$ are primitive.
For the following, we refer to Figure \ref{einftybp3}. Here we use rectangles in a given bidegree if it has rank $\ge 3$.
\begin{theorem}\label{einftym=3}
The HSSS for $BPGL\langle 3\rangle$ collapses on $E_{16}$, and there is an isomorphism of $\mathcal{A}_*$-comodule algebras (in weight zero)
\[E_\infty\cong(\mathcal{A}_*\square_{\mathcal{A}(3)_*}M_3)\oplus(\mathcal{A}_*\square_{\mathcal{A}(2)_*}\oldwidehaterline{M}_{2})\]
where the latter is a square zero extension with $\oldwidehaterline{M}_{3}\cdot\oldwidehaterline{M}_{2}=0$, where $\oldwidehaterline{M}_{i}$ denotes the augmentation ideal in $M_i$.
\end{theorem}
\begin{proof}
We run the $\oldwidehat_2$-Bockstein spectral sequence to calculate $E_{16}$, using that we have an isomorphism in weight zero
\[E_{16}\cong H_*(E_{15}/(\oldwidehat_1,\oldwidehat_2^2))\]
The description of the $d_{15}$'s implies that, on $E_{15}$
\[\mathrm{image}(d_{15})\cap(\oldwidehat_2)=0\]
and so this spectral sequence collapses. All of the relations in $H_*(E_{15}/(\oldwidehat_2))$ hold in $E_{16}$ again by the differentials given in Theorem \ref{thmn=3SS}, and the claimed description follows for $E_{16}$.
It remains to show that the HSSS collapses on $E_{16}$, and the only classes on $E_{16}$ that are not in the subalgebra of permanent cycles described in Theorem \ref{thmn=3SS} lie in the subspace
\[V\cdot E(\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4)\]
where $V$ is the subspace of $E_{16}$ generated by classes divisible by $\oldwidehat_2$, which lie on the line of red dots (the line $y=6-x$) in Figure \ref{einftybp3}. It suffices therefore to show that the subspace
\[
\oldwidehat_2\cdot E(\zeta_1^8,\zeta_2^4,\zeta_3^2,\zeta_4)
\]
of $E_2$ consists of permanent cycles. Since this is a spectral sequence of $\mathcal{A}_*$-comodules, the coproduct formulas imply that it suffices to show the class
\[\oldwidehat_2\zeta_1^8\zeta_2^4\zeta_3^2\zeta_4\]
is a permanent cycle. It follows for degree reasons that the only possible differential on this class is a $d_{23}$ with target a sum of monomials of the form
\[p(\zeta_i)\rho^a x_1^b\oldwidehat_3^2\]
written as an element of $E_2$. It is straightforward to compute the $\oldwidehat_3^2$ part of $E_{16}$ in this weight following closely the computation above, from which a machine computation shows there is no class in the required degree on $E_{16}$.
\end{proof}
\begin{remark}
Modulo comodule algebra extension problems, this describes
\[H_*\Gamma(BPGL\langle 3\rangle)\]
Unlike the $m=2$ computation, there is room here for nontrivial comodule algebra extensions. We can resolve these case by case using ad hoc restriction and transfer arguments, but we do not include this, as the size of $M_3$ makes this prohibitively lengthy.
For the same reasons, we do not include a schematic for the comodule $M_3$, although all of its structure may be determined from the coactions on the $x_m$'s described in Corollary \ref{corcoactionx_m}.
\end{remark}
\begin{sseqdata}[ name = Einftybp3, Adams grading, classes = {fill, show name=below},
grid = go, xrange ={0}{50},yrange={-50}{6},xscale=0.2,yscale=0.2,x tick step =2, y tick step =2, font = \tiny ]
\class[name = 1](0,0)
\class[name=\zeta_1^{16}](16,-16)
\class[name=\zeta_2^8](24,-24)
\class[name=\zeta_3^4](28,-28)
\class[name=\zeta_4^2](30,-30)
\class[name=\zeta_5](31,-31)
\class(32,-32)
\class(40,-40)
\class(44,-44)
\class(46,-46)
\class(47,-47)
\class(48,-48)
\class(48,-48)
\class(52,-52)
\class[red!88!black](4,2)
\class[red!88!black](6,0)
\class[red!88!black](7,-1)
\class[red!88!black](8,-2)
\class[red!88!black](10,-4)
\class[red!88!black](11,-5)
\class[red!88!black](12,-6)
\class[red!88!black](12,-6)
\class[red!88!black](13,-7)
\class[red!88!black](14,-8)
\class[red!88!black](14,-8)
\class[red!88!black](15,-9)
\class[red!88!black](16,-10)
\class[red!88!black](16,-10)
\class[red!88!black,rectangle](18,-12)
\DoUntilOutOfBounds{
\class[red!88!black,rectangle](\lastx+1,\lasty-1)
}
\class[blue!85!black](8,6)
\class[blue!85!black](9,5)
\class[blue!85!black](10,4)
\class[blue!85!black](10,4)
\class[blue!85!black](11,3)
\class[blue!85!black](12,2)
\class[blue!85!black](12,2)
\class[blue!85!black](13,1)
\class[blue!85!black](13,1)
\class[blue!85!black,rectangle](14,0)
\DoUntilOutOfBounds{
\class[blue!85!black,rectangle](\lastx+1,\lasty-1)
}
\class[blue!85!black](40,-12)
\class[blue!85!black](44,-16)
\class[blue!85!black](46,-18)
\class[blue!85!black](47,-19)
\class[blue!85!black](48,-20)
\class(48,48)
\class(60,-60)
\text{ : }ructline[green!50!black](0,0)(48,48)
\text{ : }ructline[green!50!black](0,0)(60,-60)
\end{sseqdata}
\begin{figure}
\caption{The $E_{\infty}
\label{einftybp3}
\end{figure}
\section{The HSSS in equivariant homotopy}\label{secequivariant}
In the equivariant stable homotopy category $\mathcal Sp^G$ for a finite group $G$, Hill--Hopkins--Ravenel \cite{HHR} constructed a filtration analogous to Voevodsky's slice filtration. Later, Ullman \cite{ullman} constructed a filtration with better multiplicative properties called the \textit{regular} slice filtration. To fix notions, we work with Ullman's filtration, though for the spectra we consider, the filtrations coincide.
Most of our results on the arithmetic square and the HSSS for $BPGL\langle m\rangle$ apply without change in the equivariant setting for $BPRn$ by applying Betti realization and using Corollary \ref{cor:betaE}. As in Section \ref{sec2}, we can smash the slice tower of $E$ with any $G$-spectrum $K$, obtaining a spectral sequence. The proofs in Section \ref{sec2} go through in this setting essentially verbatim, so we omit details. Whereas the slice spectral sequence gives a very powerful tool to compute the homotopy groups of the fixed points $E^G$ of a $G$-spectrum, our spectral sequence computes the \textit{homology} of $E^G$.
Computations in ${\mathbb R}$-motivic homotopy and $C_2$-equivariant homotopy often differ due to the presence of the negative cone in the equivariant homology of a point. We fully compute the $RO(C_2)$-graded HSSS for $k_{\mathbb R}$ and we find an exotic differential (Corollary \ref{corexoticdiff}) of the form
\[d_5(2u_{2\sigma}^{-1}\cdot \zeta_1^2\zeta_2)=\oldwidehat_1^2\]
This differential, in particular, originates in the negative cone and lands in the positive cone. We deduce this differential from the following fact.
\begin{proposition}\label{v_nnilpotent}
The map
\[\pi_\text{ : }ar BPR\to\pi_\text{ : }ar(i_*H\mathbb F_2\otimes BPR)\]
sends $\oldwidehaterline{v}_{m}^{2^m}\mapsto0$.
\end{proposition}
\begin{proof}
By Theorem \ref{differentialsthm}, we have a differential
\[
d_{2^{m+1}-1}(\zeta_1^{2^m})=a_\sigma^{2^m-1}\oldwidehaterline{v}_{m}
\]
on $E_{2^{m+1}-1}(BPR;i_*H\mathbb F_2)$. There is nothing in higher filtration in this stem, so we have the relation
\[
a_\sigma^{2^m-1}\oldwidehaterline{v}_{m}=0\in \pi_\text{ : }ar(i_*H\mathbb F_2\otimes BPR)
\]
By induction, we may assume that $a_\sigma^{2^m-i}\oldwidehaterline{v}_{m}^i=0$ for $1\le i<2^m$, which implies that the class $a_\sigma^{2^m-(i+1)}\oldwidehaterline{v}_{m}^i$ is in the image of the transfer. Since the restriction of $\oldwidehaterline{v}_{m}$ is zero, the Frobenius relation implies that $a_\sigma^{2^m-(i+1)}\oldwidehaterline{v}_{m}^{i+1}=0$.
\end{proof}
\begin{remark}
Note that the map
\[\pi_\text{ : }arBPR\to\pi_\text{ : }ar(H\underline{{\mathbb F}_2}\otimesBPR)\]
sends $\oldwidehaterline{v}_{m}\mapsto 0$, since the latter is a constant Mackey functor in degrees $*\rho$, and $v_m=0$ in $H_*(BP;{\mathbb F}_2)$. However, it is not hard to see from the HSSS that
\[\oldwidehaterline{v}_{m}\neq0\in \pi_\text{ : }ar(i_*H\mathbb F_2\otimes BPR)\]
It seems likely that $2^m$ is the minimal nilpotence degree for all $m$.
\end{remark}
The computation of the $E_2$-page of the homological slice {spectral sequence} in the equivariant setting follows in the same way, where now we include the negative cone. We recall Stong's computation of $\pi_\text{ : }ar H\m{\Z}$.
\begin{proposition}\label{propstong}
The $RO(C_2)$-graded homotopy $\pi_\text{ : }ar H\m{\Z}$ is the square zero extension of the ring ${\mathbb Z}[a_\sigma,u_{2\sigma}]/(2a_\sigma)$ by the module
\[NC:={\mathbb Z}\{e_{2n\sigma}|n>0\}\oplus{\mathbb F}_2\{e_{(2n+1)\sigma}/a_\sigma^j|n>0,j\ge0\}\]
where $e_{k\sigma}=\mathrm{tr}(u_\sigma^{-k})$. The module structure on $NC$ is determined by the relations
\[a_\sigma e_{k\sigma}=u_{2\sigma}e_{2\sigma}=u_{2\sigma}e_{3\sigma}=0\]
and
\[u_{2\sigma}e_{k\sigma}=e_{(k-2)\sigma}\]
for $k\ge 4$.
\end{proposition}
\begin{proposition}
There is an isomorphism of $RO(C_2)$-graded rings
\[
\pi_\text{ : }ar(i_*H\mathbb F_2\otimes H\m{\Z})\cong (\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[a_\sigma,x_1]\oplus NC\]
where the latter ring is a square zero extension of the ring $(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[a_\sigma,x_1]$ by the module
\[NC:=(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\{e_{(2i+1)\sigma}/a_\sigma^j,x_1\cdot e_{(2i+1)\sigma}/a_\sigma^j\text{ : } i\ge1,j\ge0\}\]
defined by the relation
\[x_1\cdot (x_1\cdot e_{(2i+1)\sigma}/a_\sigma^j)=\zeta_1^2\cdot e_{(2i+1)\sigma}/a_\sigma^{j-2}+e_{(2i-1)\sigma}/a_\sigma^j\]
and the usual relations in $\pi_\text{ : }ar H\m{\Z}$.
\end{proposition}
\begin{proof}
This follows from Proposition \ref{propstong} and the isomorphisms
\aln{
\mathcal A_*\{e_{(2i+1)\sigma}/a_\sigma^j\}&\cong (\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\{e_{(2i+1)\sigma}/a_\sigma^j\}\oplus \zeta_1\cdot(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\{e_{(2i+1)\sigma}/a_\sigma^j\}\\
&\cong(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\{e_{(2i+1)\sigma}/a_\sigma^j\}\oplus x_1\cdot(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\{e_{(2i+1)\sigma}/a_\sigma^{j+1}\}
}
and
\[(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\{e_{2i\sigma}\}\cong (\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)\{x_1\cdot e_{(2i+1)\sigma}\}\]
For the latter, we are using that
\[x_1\cdot e_{(2i+1)\sigma}=e_{2i\sigma}\]
as follows from the Frobenius relation and that $x_1$ restricts to $u_\sigma$. The relations (and the fact that $NC$ is square zero) then follow from the fact that
\[\pi_\text{ : }ar H\m{\Z}\to \pi_\text{ : }ar(i_*H\mathbb F_2\otimes H\m{\Z})\]
is a ring map, along with Corollary \ref{corx_1rel}.
\end{proof}
\begin{corollary}
We have an isomorphism
\[E_2^{*,\text{ : }ar}(BPRn;i_*H\mathbb F_2)\cong \bigg((\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[a_\sigma,x_1]\oplus NC\bigg)[\oldwidehaterline{v}_{1},\ldots,\oldwidehaterline{v}_{m}]\]
An element $p\in(\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)_i$ has bidegree $(i,-i)$, and we have the following bidegrees
\aln{
|\oldwidehaterline{v}_{i}|&=((2^i-1)\rho,0)\\
|a_\sigma|&=(-\sigma,1)\\
|x_1|&=(1-\sigma,0)\\
|e_{(2i+1)\sigma}/a_\sigma^j|&=(-(2i+1)+(2i+j+1)\sigma,-j)
}
\end{corollary}
\begin{proposition}\label{prop:ROpermcycles}
In the spectral sequence
\[E_r^{*,\text{ : }ar}(k_{\mathbb R};i_*H\mathbb F_2)\]
the subalgebra
\[\bigg((\mathcal A_*\square_{\mathcal A(1)_*}{\mathbb F}_2)[a_\sigma,x_1]\oplus NC\bigg)[\oldwidehaterline{v}_{1},\ldots,\oldwidehaterline{v}_{m}]\]
of the $E_2$-page consists of permanent cycles.
\end{proposition}
\begin{proof}
This follows from Corollary \ref{corx_mpermanent}, Theorem \ref{edgethm}, and the fact that the negative cone in $\pi_\text{ : }ar H\m{\Z}$ consists of permanent cycles in the slice spectral sequence for $BPR$.
\end{proof}
By Theorem \ref{differentialsthm}, the first differentials we see are
\aln{
d_3(\zeta_1^2)&=a_\sigma\oldwidehaterline{v}_{1}\\
d_3(\zeta_2)&=x_1\oldwidehaterline{v}_{1}
}
and $d_3$ vanishes on the rest of the algebra generators by Proposition \ref{prop:ROpermcycles}. This gives the following.
\begin{proposition}
$E_4^{*,\text{ : }ar}(k_{\mathbb R};i_*H\mathbb F_2)$ is the subalgebra of
\[\frac{\bigg((\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[a_\sigma,x_1]\oplus NC\bigg)[\oldwidehaterline{v}_{1}]}{(a_\sigma\oldwidehaterline{v}_{1},x_1\oldwidehaterline{v}_{1})}\]
generated by
\[\frac{\bigg((\mathcal A_*\square_{\mathcal A(1)_*}{\mathbb F}_2)[a_\sigma,x_1]\oplus NC\bigg)[\oldwidehaterline{v}_{1}]}{(a_\sigma\oldwidehaterline{v}_{1},x_1\oldwidehaterline{v}_{1})}\]
\aln{
x_2&:=x_1\zeta_1^2+a_\sigma\zeta_2\in\langle a_\sigma,\oldwidehaterline{v}_{1},x_1\rangle\\
y_n&:=e_{n\sigma}\zeta_1^2=\mathrm{tr}(u_\sigma^{-n}\zeta_1^2)\\
z_m&:=x_1\cdot\frac{e_{-3\sigma}}{a_\sigma^m}\zeta_2\\
w&:=e_{2\sigma}\zeta_1^2\zeta_2=\mathrm{tr}(u_\sigma^{-2}\zeta_1^2\zeta_2)
}
for $n\ge 2$ and $m\ge0$
\end{proposition}
The subalgebra
\[\frac{\bigg((\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[a_\sigma,x_1]\oplus NC\bigg)[\oldwidehaterline{v}_{1}]}{(a_\sigma\oldwidehaterline{v}_{1},x_1\oldwidehaterline{v}_{1})}\]
consists of permanent cycles, $x_2$ is a permanent cycle by Corollary \ref{corx_mpermanent}, the $y_n$'s are permanent cycles since they are transfers of permanent cycles, and we show that the $z_m$'s are permanent cycles for degree reasons.
\begin{proposition}\label{prop:z_mperm}
The classes $z_m$ are permanent cycles for all $m\ge0$.
\end{proposition}
\begin{proof}
For this, note that
\[|z_m|=(1+(m+2)\sigma,-3-m)\]
So that
\[|d_r(z_m)|=((m+2)\sigma,-3-m+r)\]
Using our description of $E_4$, and the relations
\[\oldwidehaterline{v}_{1}x_2=\oldwidehaterline{v}_{1}y_n=\oldwidehaterline{v}_{1}z_m=\oldwidehaterline{v}_{1}w=0\]
the target $d_r(z_m)$ can be written as a sum of monomials of the form
\[m(\zeta_i)\cdot x_1^\epsilon\cdot \frac{e_{(2i+1)\sigma}}{a_\sigma^j}\]
written as elements in a subquotient of $E_2$, for $\epsilon=0,1$ and $m(\zeta_i)$ a monomial in $\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2$. If $m\in (\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)_k$, this monomial has bidegree
\[(k,-k)+(-(2i+1-\epsilon)+(2i+1-\epsilon+j)\sigma,-j)\]
It follows that $k=2i+1-\epsilon$ so that the filtration is $-(2i+1-\epsilon+j)$. On the other hand, looking at the $\sigma$ degree, we have
\[2i+1-\epsilon+j=m+2\]
This is a contradiction, as $r\ge 4$ tells us the filtration must be at least $1-m$.
\end{proof}
\begin{corollary}\label{corexoticdiff}
The differential $d_5$ is determined by
\[d_5(w)=\oldwidehaterline{v}_{1}^2\]
\end{corollary}
\begin{proof}
By Proposition \ref{prop:z_mperm}, the only algebra generator of $E_4$ that is not a permanent cycle is $w$. By Proposition \ref{v_nnilpotent}, there must be a differential killing $\oldwidehaterline{v}_{1}^2$, and for degree reasons, the stated differential is the only possibility.
\end{proof}
\begin{corollary}
The spectral sequence $E_r^{*,\text{ : }ar}(k_{\mathbb R};i_*H\mathbb F_2)$ collapses on $E_6$, which is the subalgebra of
\[\frac{\bigg((\mathcal A_*\square_{\mathcal A(0)_*}{\mathbb F}_2)[a_\sigma,x_1]\oplus NC\bigg)[\oldwidehaterline{v}_{1}]}{(a_\sigma\oldwidehaterline{v}_{1},x_1\oldwidehaterline{v}_{1},\oldwidehaterline{v}_{1}^2)}\]
generated by
\[\frac{\bigg((\mathcal A_*\square_{\mathcal A(1)_*}{\mathbb F}_2)[a_\sigma,x_1]\oplus NC\bigg)[\oldwidehaterline{v}_{1}]}{(a_\sigma\oldwidehaterline{v}_{1},x_1\oldwidehaterline{v}_{1},\oldwidehaterline{v}_{1}^2)}\]
and the classes $x_2,y_n,z_m$ for $n\ge 2$ and $m\ge0$.
\end{corollary}
\begin{proof}
There are no new cycles because
\[w^2=a_\sigma w=u_\sigma w=wz_m=wy_n=wx_2=0\in E_6\]
since these relations hold on $E_2$. The generators are all permanent cycles, so $E_6=E_\infty$.
\end{proof}
\begin{remark}
In this section, we relied heavily on ad hoc degree arguments to control the algebra generators of the $RO(C_2)$-graded HSSS for $BPRn[1]$. For larger values of $m$, it becomes harder to determine when classes coming from the negative cone in the HSSS for $BPRn$ are permanent cycles, so it seems that one needs additional input in these cases. However, by Proposition \ref{v_nnilpotent} there must be some pattern of differentials leaving the negative cone that kill powers of $\oldwidehat_m$.
\end{remark}
\printbibliography
\end{document}
|
\begin{enumerate}gin{document}
\title[Sandpiles and Dominos]{Sandpiles and Dominos}
\author[Florescu]{Laura Florescu}
\address{Courant Institute, NYU, New York}
\email{[email protected]}
\author[Morar]{Daniela Morar}
\address{Department of Economics, University of Michigan, Ann Arbor}
\email{[email protected]}
\author[Perkinson]{David Perkinson}
\address{Department of Mathematics, Reed College}
\email{[email protected]}
\author[Salter]{Nick Salter}
\address{Department of Mathematics, University of Chicago}
\email{[email protected]}
\author[Xu]{Tianyuan Xu}
\address{Department of Mathematics, University of Oregon}
\email{[email protected]}
\date{\today}
\begin{enumerate}gin{abstract}
We consider the subgroup of the abelian sandpile group of the grid graph
consisting of configurations of sand that are symmetric with respect to
central vertical and horizontal axes. We show that the size of this group
is (i) the number of domino tilings of a corresponding weighted rectangular
checkerboard; (ii) a product of special values of Chebyshev polynomials; and
(iii) a double-product whose factors are sums of squares of values of
trigonometric functions. We provide a new derivation of the formula due to
Kasteleyn and to Temperley and Fisher for counting the number of domino tilings of
a $2m\times 2n$ rectangular checkerboard and a new way of counting the number
of domino tilings of a $2m\times 2n$ checkerboard on a M\"obius strip.
\end{abstract}
\maketitle
0.1ection{Introduction}0.9abel{section:Introduction}
This paper relates the Abelian Sandpile Model (ASM) on a grid graph to domino
tilings of checkerboards. The ASM is, roughly, a game in which one places
grains of sand on the vertices of a graph, $\Gamma$, whose vertices and edges we
assume to be finite in number. If the amount of sand on a vertex reaches a
certain threshold, the vertex becomes unstable and fires, sending a grain of
sand to each of its neighbors. Some of these neighbors, in turn, may now be
unstable. Thus, adding a grain of sand to the system may set off a cascade of
vertex firings. The resulting ``avalanche'' eventually subsides, even
though our graph is finite, since the system is not conservative: there is a
special vertex that serves as a sink, absorbing any sand that reaches it. It is
assumed that every vertex is connected to the sink by a path of edges, so as a
consequence, every pile of sand placed on the graph stabilizes after a finite
number of vertex firings. It turns out that this stable state only depends on
the initial configuration of sand, not on the order of the firings of unstable
vertices, which accounts for the use of the word ``abelian.''
Now imagine starting with no sand on $\Gamma$ then repeatedly choosing a vertex
at random, adding a grain of sand, and allowing the pile of sand to stabilize.
In the resulting sequence of configurations of sand, certain configurations will
appear infinitely often. These are the so-called ``recurrent'' configurations.
A basic theorem in the theory of sandpiles is that the collection of recurrent
configurations forms an additive group, where addition is defined as vertex-wise
addition of grains of sand, followed by stabilization. This group is called the
{\em sandpile group} or {\em critical group} of $\Gamma$. Equivalent versions
of the sandpile group have arisen independently. For a history and as a general
reference, see~\cite{Holroyd}.
In their seminal 1987 paper, Bak, Tang, and Wiesenfeld (BTW),~\cite{BTW}, studied
sandpile dynamics in the case of what we call the {\em sandpile grid graph}. To
construct the $m\times n$ sandpile grid graph, start with the ordinary grid
graph with vertices $[m]\times[n]$ and edges $\{(i,j),(i',j')\}$ such that
$|i-i'|+|j-j'|=1$. Then add a new vertex to serve as a sink, and add edges from
the boundary vertices to the sink so that each vertex on the grid has
degree~$4$. Thus, corner vertices have two edges to the sink (assuming $m$ and
$n$ are greater than~$1$), as on the left in Figure~\ref{fig:sandpile grid
graph}. Dropping one grain of sand at a time onto a sandpile grid graph and
letting the system stabilize, BTW experimentally finds that eventually the
system evolves into a barely stable ``self-organized critical'' state. This
critical state is characterized by the property that the sizes of avalanches
caused by dropping a single grain---measured either temporally (by the number of
ensuing vertex firings) or spatially (by the number of different vertices that
fire)---obey a power law. The power-laws observed by BTW in the case of some
sandpile grid graphs have not yet been proven.
The ASM, due to Dhar~\cite{Dhar1}, is a generalization of the BTW model to a
wider class of graphs. It was Dhar who made the key observation of its abelian
property and who coined the term ``sandpile group'' for the collection of
recurrent configurations of sand. In terms of the ASM, the evolution to a
critical state observed by BTW comes from the fact that by repeatedly adding a
grain of sand to a graph and stabilizing, one eventually reaches a configuration
that is recurrent. Past this point, each configuration reached by adding sand
and stabilizing is again recurrent.
Other work on ASM and statistical physics includes~\cite{Fey}. In addition, the
ASM has been shown to have connections with a wide range of mathematics,
including algebraic geometry and commutative algebra
(\cite{Baker},~\cite{Dochtermann},~\cite{Payne},~\cite{Madhu},~\cite{Madhu2},~\cite{Wilmes}),
pattern formation (\cite{Ostojic},\cite{Paoletti},\cite{Pedgen1},
\cite{Pedgen2},\cite{Sadhu}), potential theory
(\cite{Baker2},\cite{Biggs},\cite{Levine}), combinatorics
(~\cite{Hopkins2},~\cite{Hopkins1},\cite{Merino},~\cite{Postnikov}), and number
theory (\cite{Musiker}). The citations here are by no means exhaustive. One
might argue that the underlying reason for these connections is that the firing
rules for the ASM encode the discrete Laplacian matrix of the graph (as
explained in Section~\ref{section:Sandpiles}). Thus, the ASM is a means of
realizing the dynamics implicit in the discrete Laplacian.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.5]
\def28{10};
\def7{3};
\node at (28+0.5,7+0.95){\# grains};
\node at (0,0){28ncludegraphics[height=3.0in]{400x400identity300dpi.png}};
\draw[fill=black] (28,7) rectangle (28+0.5,7-0.5);
\node at (28+1.4,7-0.25){$=0$};
\draw[fill=yellow] (28,7-1) rectangle (28+0.5,7-1.5);
\node at (28+1.4,7-1.25){$=1$};
\draw[fill=blue] (28,7-2) rectangle (28+0.5,7-2.5);
\node at (28+1.4,7-2.25){$=2$};
\draw[fill=red] (28,7-3) rectangle (28+0.5,7-3.5);
\node at (28+1.4,7-3.25){$=3$};
\end{tikzpicture}
\caption{Identity element for the sandpile group of the $400\times400$
sandpile grid graph.}0.9abel{fig:400x400identity}
\end{figure}
The initial motivation for our work was a question posed to the second and third
authors by Irena Swanson. She was looking at an online computer
program~\cite{Maslov} for visualizing the ASM on a sandpile grid graph. By
pushing a button, the program adds one grain of sand to each of the nonsink
vertices then stabilizes the resulting configuration. Swanson asked, ``Starting
with no sand, how many times would I need to push this button to get the
identity of the sandpile group?'' A technicality arises here: the configuration
consisting of one grain of sand on each vertex is not recurrent, hence, not in
the group. However, the all-$2$s configuration, having two grains at each
vertex, is recurrent. So for the sake of this introduction, we reword the
question as: ``What is the order of the all-$2$s configuration?''
Looking at data (cf.~Section~\ref{section:order of all-2s},
Table~\ref{table:all-2s}), one is naturally led to the special case of the
all-$2$s configuration on the $2n\times2n$ sandpile grid graph, which we denote
by $\vec{2}_{2n\times 2n}$. The orders for $\vec{2}_{2n\times2n}$ for
$n=1,\dots,5$ are
\[
1,3,29,901,89893.
\]
Plugging these numbers into the Online Encyclopedia of Integer Sequences
yields a single match, sequence A065072 (\cite{OEIS}): the sequence of odd integers
$(a_n)_{n\geq1}$ such that $2^na_n^2$ is the number of domino tilings of the
$2n\times 2n$ checkerboard.\footnote{By a {\em checkerboard} we mean a rectangular array of squares. A {\em domino} is a $1\times2$ or $2\times1$ array of squares. A {\em domino tiling} of a
checkerboard consists of covering all of the squares of the checkerboard---each
domino covers two---with dominos.}
(Some background on this sequence is included in
Section~\ref{section:order of all-2s}.) So we conjectured that the order of
$\vec{2}_{2n\times2n}$ is equal to $a_n$, and trying to prove this is what first
led to the ideas presented here. Difficulty in finishing our
proof of the conjecture led to further computation, at which time we
(embarrassingly) found that the order of $\vec{2}_{2n\times2n}$ for $n=6$ is,
actually, $5758715=a_6/5$. Thus, the conjecture is false, and there are
apparently at least two natural sequences that start $1,3,29,901, 89893$!
Theorem~\ref{thm4} shows that the cyclic group generated
by $\vec{2}_{2n\times2n}$ is isomorphic to a subgroup of a sandpile group whose
order is $a_n$, and therefore the order of $\vec{2}_{2n\times2n}$ divides $a_n$.
We do not know when equality holds, and we have not yet answered Irena Swanson's question.
On the other hand, further experimentation using the mathematical software Sage
led us to a more fundamental connection between the sandpile group and domino
tilings of the grid graph. The connection is due to a property that is a
notable feature of the elements of the subgroup generated by the all-$2$s
configuration---{\em symmetry} with respect to the central horizontal and
vertical axes. The recurrent identity element for the sandpile grid graph, as
exhibited in Figure~\ref{fig:400x400identity}, also has this symmetry.\footnote{For
square grids, the identity is symmetric with respect to the dihedral group of
order $8$, but this phenomenon is of course not present in the rectangular grids
that we also consider.} If $\Gamma$ is any graph equipped with an
action of a finite group~$G$, it is natural to consider the collection of
$G$-invariant configurations. Proposition~\ref{prop:symmetric configs}
establishes that the symmetric recurrent configurations form a subgroup of the
sandpile group for $\Gamma$. The central purpose of this paper is to explain
how symmetry links the sandpile group of the grid graph to domino tilings.
We now describe our main results. We study the recurrent configurations on the sandpile grid graph having $\mathbb{Z}/2 \times \mathbb{Z}/2$ symmetry with respect to the central horizontal and vertical axes. The cases of even$\times$even-, even$\times$odd-, and
odd$\times$odd-dimensional grids each have their own particularities, and so we divide their analysis into separate cases, resulting in Theorems~\ref{thm1},~\ref{thm2},
and~\ref{thm3}, respectively. In each case, we compute the number of symmetric
recurrents as (i) the number of domino tilings of corresponding
(weighted) rectangular checkerboards; (ii) a product of special values of Chebyshev
polynomials; and (iii) a double-product whose factors are sums of squares of
values of trigonometric functions.
For instance, of the $557,568,000$ elements of the sandpile group of the $4
\times 4$ grid graph, only the $36$ configurations displayed in
Figure~\ref{fig:checker4x4} are up-down and left-right symmetric. In accordance
with Theorem~\ref{thm1},
\begin{enumerate}gin{align}0.9abel{intro:double product}
36&=U_4(i\cos(\pi/5))\,U_4(i\cos(2\pi/5))\notag\\[5pt]
&=\prod_{h=1}^{2}\prod_{k=1}^20.9eft(4\,\cos^2(h\pi/5)+4\,\cos^2(k\pi/5)\right),
\end{align}
where $U_4(x)=16x^4-12x^2+1$ is the fourth Chebyshev polynomial of the second
kind.
The double-product in equation~\eqref{intro:double product} is an instance of
the famous formula due to Kasteleyn~\cite{Kasteleyn} and to Temperley and
Fisher~\cite{Temperley} for the number of domino tilings of a $2m\times 2n$
checkerboard:
\[
\prod_{h=1}^m\prod_{k=1}^n0.9eft(4\,\cos^2\frac{h\pi}{2m+1}+
4\,\cos^2\frac{k\pi}{2n+1}\right),
\]
for which Theorem~\ref{thm1} provides a new proof.
In the case of the even$\times$odd grid, there is an extra ``twist'': the
double-product in Theorem~\ref{thm2} for the even$\times$odd grid is (a slight
re-writing of) the formula of Lu and Wu~\cite{LW} for the number of domino
tilings of a checkerboard on a M\"obius strip.
To sketch the main idea behind the proofs of these theorems, suppose a group~$G$
acts on a graph $\Gamma$ with fixed sink vertex
(cf.~Section~\ref{subsection:Symmetric configurations}). To study symmetric
configurations with respect to the action of $G$, one considers a new firing
rule in which a vertex only fires simultaneously with all other vertices in its
orbit under~$G$. This new firing rule can be encoded in an $m\times m$ matrix
$D$ where $m$ is the number of orbits of nonsink vertices of $G$. We show in
Corollary~\ref{cor:nsr} that $\det(D)$ is the number of symmetric recurrents on
$G$. Suppose, as is the case for for sandpile grid graphs, that either $D$ or
its transpose happens to be the (reduced) Laplacian of an associated graph
$\Gamma'$. The nonsink vertices correspond to the orbits of vertices of the
original graph. The well-known matrix-tree theorem says that the determinant of
$D$ is the number of spanning trees of $\Gamma'$. Then the generalized
Temperley bijection~\cite{KPW} says these spanning trees correspond with perfect
matchings of a third graph $\Gamma''$. In this way, the symmetric recurrents on
$\Gamma$ can be put into correspondence with the perfect matchings of
$\Gamma''$. In the case where $\Gamma$ is a sandpile grid graph, $\Gamma''$ is
a weighted grid graph, and perfect matchings of it correspond to weighted
tilings of a checkerboard. Also, in this case, the matrix $D$ has a nice block
triangular form (cf.~Lemma~\ref{lemma:tridiagonal}), which leads to a recursive
formula for its determinant and a connection with Chebyshev polynomials.
\centerline{0.1c outline}
\hangindent1em
\hangafter=0
\noindent\ref{section:Introduction} {\bf Introduction.}\vskip4pt
\hangindent1em
\hangafter=0
\noindent\ref{section:Sandpiles} {\bf Sandpiles.}\vskip2pt
\hangindent2em
\hangafter=0
\noindent\ref{subsection:Basics} {\bf Basics.} A summary of the basic theory of
sandpiles needed for this paper.\vskip2pt
\hangindent2em
\hangafter=0
\noindent\ref{subsection:Symmetric configurations} {\bf Symmetric
configurations.} Group actions on sandpile graphs.
Proposition~\ref{prop:symmetric configs} shows that the collection of symmetric
recurrents forms a subgroup of the sandpile group. We introduce the {\em
symmetrized reduced Laplacian} operator,~$\widetilde{\Delta}^G$, and use it to determine the structure of
this subgroup in Proposition~\ref{prop:symmetric subgroup iso}. An important
consequence is Corollary~\ref{cor:nsr}, which shows that the number of
symmetric recurrents equals $\det\widetilde{\Delta}^G$.\vskip4pt
\hangindent1em
\hangafter=0
\noindent\ref{section:Matchings and trees} {\bf Matchings and trees.} A
description of the generalized Temperley bijection~\cite{KPW} between
weighted spanning trees of a planar graph and weighted perfect matchings of a
related graph.\vskip4pt
\hangindent1em
\hangafter=0
\noindent\ref{section:symmetric recurrents} {\bf Symmetric recurrents on the sandpile grid
graph.} We count symmetric recurrents on sandpile grid graphs using weighted
tilings of checkerboards, Chebyshev polynomials, and Kasteleyn-type formulae.
The problem is split into three cases.\vskip2pt
\hangindent2em \hangafter=0 \noindent\ref{subsection:tridiagonal} {\bf Some
tridiagonal matrices.} A summary of some properties of Chebyshev polynomials and
a proof Lemma~\ref{lemma:tridiagonal}, which calculates the determinant of a
certain form of tridiagonal block matrix. The symmetrized reduced Laplacian
matrices for the three classes of sandpile grid graphs, below, have this form.
Their determinants count symmetric recurrents.\vskip2pt
\hangindent2em
\hangafter=0
\noindent\ref{subsection:symmetric recurrents on evenxeven grid} {\bf Symmetric
recurrents on a $2m\times 2n$ sandpile grid graph.} See Theorem~\ref{thm1}.\vskip2pt
\hangindent2em
\hangafter=0
\noindent\ref{subsection:symmetric recurrents on evenxodd grid} {\bf Symmetric
recurrents on a $2m\times(2n-1)$ sandpile grid graph.} See
Theorem~\ref{thm2}.\vskip2pt
\hangindent2em
\hangafter=0
\noindent\ref{subsection:symmetric recurrents on oddxodd grid} {\bf Symmetric
recurrents on a $(2m-1)\times(2n-1)$ sandpile grid graph.} See
Theorem~\ref{thm3}.\vskip4pt
\hangindent1em \hangafter=0 \noindent\ref{section:order of all-2s} {\bf The
order of the all-twos configuration.} Corollary~\ref{cor:all-2s order}: the
order of the all-$2$s configuration on the $2n\times2n$ sandpile grid divides
the odd number $a_n$ such that $2^na_n^2$ is the number of domino tilings of the
$2n\times2n$ checkerboard.\vskip4pt
\hangindent1em
\hangafter=0
\noindent\ref{section:conclusion} {\bf Conclusion.} A list of open problems.
\noindent{\bf Acknowledgments.} We thank Irena Swanson for providing initial
motivation. We thank the organizers of the Special Session on Laplacian Growth
at the Joint Mathematics Meeting, New Orleans, LA, 2011 at which some of this
work was presented, and we thank Lionel Levine, in particular, for encouragement
and helpful remarks. Finally, we would like to acknowledge the mathematical
software Sage~\cite{sage} and the Online Encyclopedia of Integer
Sequences~\cite{OEIS} which were both essential for our investigations.
0.1ection{Sandpiles}0.9abel{section:Sandpiles}
0.1ubsection{Basics}0.9abel{subsection:Basics} In this section, we recall the basic
theory of sandpile groups. The reader is referred to \cite{Holroyd} for
details. Let $\Gamma=(V,E,\wt,s)$ be a directed graph with vertices $V$, edges
$E$, edge-weight function $\wt\colon V\times V\to \mathbb{N}:=\{0,1,2,\dots\}$, and
special vertex $s28n V$. For each pair $v,w28n V$, we think of $\wt(v,w)$ as
the number of edges running from $v$ to $w$. In particular, $\wt(v,w)>0$ if and
only if $(v,w)28n E$. The vertex $s$ is called the {\em sink} of $\Gamma$, and
it is assumed that each vertex of $\Gamma$ has a directed path to $s$. Let $\widetilde{V}
:= V0.1etminus\{s\}$ be the set of non-sink vertices. A {\em (sandpile)
configuration} on $\Gamma$ is an element of $\mathbb{N}\widetilde{V}$, the free monoid on $\widetilde{V}$.
If $c=0.1um_{v28n\widetilde{V}}c_v\,v$ is a configuration, we think of each component,
$c_v$, as a number of grains of sand stacked on vertex $v$. The vertex
$v28n\widetilde{V}$ is {\em unstable} in $c$ if $c_v\geq \outdeg(v)$ where
$\outdeg(v):=0.1um_{w28n V}\wt(v,w)$, is the {\em out-degree} of $v$, i.e., the
number of directed edges emanating from~$v$. If $v$ is unstable in $c$, we may
{\em fire} ({\em topple}) $c$ at $v$ to get a new configuration $c'$ defined for
each $w28n\widetilde{V}$ by
\[
c'_w=
\begin{enumerate}gin{cases}
c_v-\outdeg(v)+\wt(v,v)&\mbox{if $w=v$,}\\
c_w+\wt(v,w)&\mbox{if $w\neq v$}.
\end{cases}
\]
In other words,
\[
c' = c - \outdeg(v)v+\textstyle0.1um_{w28n\widetilde{V}}\wt(v,w)\,w.
\]
If the configuration $\tilde{c}$ is obtained from $c$ by a sequence of firings
of unstable vertices, we write
\[
c\to\tilde{c}.
\]
Since each vertex has a path to the sink, $s$, it turns out that by repeatedly
firing unstable vertices each configuration relaxes to a stable configuration.
Moreover, this stable configuration is independent of the ordering of firing of
unstable vertices. Thus, we may talk about {\em the} stabilization of a
configuration $c$, which we denote by~$c^{\circ}$. Define the binary operation
of {\em stable addition} on the set of all configurations as component-wise
addition followed by stabilization. In other words, the stable addition of
configurations $a$ and $b$ is given by
\[
(a+b)^{\circ}.
\]
Let $\mathcal{M}$ denote the collection of stable
configurations on $\Gamma$. Then stable addition restricted to $\mathcal{M}$
makes $\mathcal{M}$ into a commutative monoid.
A configuration $c$ on $\Gamma$ is {\em recurrent} if: (1) it is stable, and (2)
given any configuration $a$, there is a configuration $b$ such that
$(a+b)^{\circ}=c$. The {\em maximal stable configuration}, $c_{\mathrm{max}}$, is defined
by
\[
c_{\mathrm{max}}:=0.1um_{v28n\widetilde{V}}(\outdeg(v)-1)\,v.
\]
It turns out that the collection of recurrent configurations forms
a principal semi-ideal of $\mathcal{M}$ generated by $c_{\mathrm{max}}$. This
means that the recurrent configurations are exactly those obtained by adding
sand to the maximal stable configuration and stabilizing. Further, the
collection of recurrent configurations forms a group, $\mathcal{S}(\Gamma)$,
called the {\em sandpile group} for $\Gamma$. Note that the identity for
$\mathcal{S}(\Gamma)$ is not usually the zero-configuration, $\vec{0}28n\mathbb{N}\widetilde{V}$.
For an undirected graph, i.e., a graph for which $\wt(u,v)=\wt(v,u)$ for each
pair of vertices $u$ and $v$, one may use the {\em burning algorithm}, due to
Dhar~\cite{Dhar2}, to determine whether a configuration is recurrent (for a
generalization to directed graphs, see~\cite{Speer}):
\begin{enumerate}gin{thm}[{\cite{Dhar2},\cite[Lemma~4.1]{Holroyd}}]0.9abel{thm:Dhar} Let $c$ be
a stable configuration on an undirected graph $\Gamma$. Define the {\em burning configuration}
on~$\Gamma$ to be the configuration obtained by {\em firing the sink vertex}:
\[
b:=0.1um_{v28n\widetilde{V}}\wt(s,v)\,v.
\]
Then in the stabilization of $b+c$, each vertex fires at most once, and
the following are equivalent:
\begin{enumerate}gin{enumerate}
28tem $c$ is recurrent;
28tem $(b+c)^{\circ}=c$;
28tem in the stabilization of $b+c$, each non-sink vertex fires.
\end{enumerate}
\end{thm}
Define the {\em proper Laplacian}, $L\colon \mathbb{Z}^V\to\mathbb{Z}^V$, of $\Gamma$ by
\[
L(f)(v):=0.1um_{w28n V}\wt(v,w)(f(v)-f(w))
\]
for each function $f28n\mathbb{Z}^V$. Taking the $\mathbb{Z}$-dual (applying the functor
$\mathrm{Hom}(\ \cdot\ ,\mathbb{Z}))$ gives the mapping of free abelian groups
\[
\Delta\colon\mathbb{Z} V\to\mathbb{Z} V
\]
defined on vertices $v28n V$ by
\[
\Delta(v)=\outdeg(v)\,v-0.1um_{w28n V}\wt(v,w)\,w.
\]
We call $\Delta$ the {\em Laplacian} of $\Gamma$. Restricting $\Delta$ to
$\mathbb{Z}\widetilde{V}$ and setting the component of~$s$ equal to $0$ gives the {\em reduced
Laplacian}, $\widetilde{\Delta}\colon\mathbb{Z}\widetilde{V}\to\mathbb{Z}\widetilde{V}$. If $v$ is an unstable vertex in a
configuration $c$, firing $v$ gives the new configuration
\[
c-\widetilde{\Delta} v.
\]
There is a well-known isomorphism
\begin{enumerate}gin{align}0.9abel{basic iso}
\mathcal{S}(\Gamma)&\to\mathbb{Z}\widetilde{V}/\mathrm{image}(\widetilde{\Delta})\\
c&\mapsto c.\nonumber
\end{align}
While there may be many stable configurations in each equivalence class of
$\mathbb{Z}\widetilde{V}$ modulo $\mathrm{image}(\widetilde{\Delta})$, there is only one that is recurrent. For
instance, the recurrent element in the equivalence class of $\vec{0}$ is the
identity of $\mathcal{S}(\Gamma)$.
A {\em spanning tree of $\Gamma$ rooted at $s$} is a directed subgraph
containing all the vertices, having no directed cycles, and for which $s$ has
no out-going edges while every other vertex has exactly one out-going edge. The
weights of the edges of a spanning tree are the same as they
are for $\Gamma$, and the {\em weight} of a spanning tree is the product of the
weights of its edges. The matrix-tree theorem says the sum of the weights of
the set of all spanning trees of $\Gamma$ rooted at $s$ is equal to $\det\widetilde{\Delta}$, the
determinant of the reduced Laplacian. It then follows from (\ref{basic iso})
that the number of elements of the sandpile group is also the sum of the weights
of the spanning trees rooted at $s$.
0.1ubsection{Symmetric configurations}0.9abel{subsection:Symmetric configurations}
Preliminary versions of the results in this section appear in \cite{Durgin}.
Let $G$ be a finite group. An {\em action} of $G$ on $\Gamma$ is an action of
$G$ on $V$ fixing $s$, sending edges to edges, and preserving edge-weights. In
detail, it is a mapping
\begin{enumerate}gin{eqnarray*}
G\times V&\to&V\\
(g,v)&\mapsto&gv
\end{eqnarray*}
satisfying
\begin{enumerate}gin{enumerate}
28tem if $e$ is the identity of $G$, then $ev=v$ for all $v28n V$;
28tem $g(hv)=(gh)v$ for all $g,h28n G$ and $v28n V$;
28tem $gs=s$ for all $g28n G$;
28tem if $(v,w)28n E$, then $(gv,gw)28n E$ and both edges have the same
weight.
\end{enumerate}
Note that these conditions imply that $\outdeg(v)=\outdeg(gv)$ for all
$v28n V$ and $g28n G$.
For the rest of this section, let $G$ be a group acting on $\Gamma$.
By linearity, the action of $G$ extends to an action on $\mathbb{N} V$ and $\mathbb{Z} V$.
Since~$G$ fixes the sink, $G$ acts on configurations and each element of
$G$ induces an automorphism of $\mathcal{S}(\Gamma)$ (cf.~\ref{cor:preserved}).
We say a configuration $c$ is {\em symmetric} (with respect to the action
by $G$) if $gc=c$ for all $g28n G$.
\begin{enumerate}gin{prop}
The action of $G$ commutes with stabilization. That is, if $c$ is any
configuration on $\Gamma$ and $g28n G$, then $g(c^{\circ})=(gc)^{\circ}$.
\end{prop}
\begin{enumerate}gin{proof} Suppose that $c$ is stabilized by firing the sequence of vertices
$v_1,\dots,v_t$. Then
\[
c^{\circ}=c-0.1um_{i=1}^t\widetilde{\Delta} v_i.
\]
At the $k$-th step in the stabilization process, $c$ has relaxed to the
configuration $c':=c-0.1um_{i=1}^k\widetilde{\Delta} v_i$. A vertex $v$ is unstable in
$c'$ if and only if $gv$ is unstable in
$gc'=gc-0.1um_{i=1}^k\widetilde{\Delta} (gv_i)$. Thus, we can fire the sequence of
vertices $gv_1,\dots,gv_t$ in~$gc$, resulting in the stable configuration
\[
(gc)^{\circ}=gc-0.1um_{i=1}^t\widetilde{\Delta} (g v_i).
\]
\end{proof}
\begin{enumerate}gin{cor}0.9abel{cor:preserved}
The action of $G$ preserves recurrent configurations, i.e., if $c$
is a recurrent configuration and $g28n G$, then $gc$
is recurrent.
\end{cor}
\begin{enumerate}gin{proof}
If $c$ is recurrent, we can find a configuration $b$ such that
$c=(b+c_{\mathrm{max}})^{\circ}$. Then,
\[
gc=g(b+c_{\mathrm{max}})^{\circ}=(gb+gc_{\mathrm{max}})^{\circ}=(gb+c_{\mathrm{max}})^{\circ}.
\]
Hence, $gc$ is recurrent.
\end{proof}
\begin{enumerate}gin{cor}0.9abel{cor:symm-stab}
If $c$ is a symmetric configuration, then so is its stabilization.
\end{cor}
\begin{enumerate}gin{proof}
For all $g28n G$, if $gc=c$, then
$g(c^{\circ})=(gc)^{\circ}=c^{\circ}$.
\end{proof}
\begin{enumerate}gin{remark}
In fact, if $c$ is a symmetric configuration, one may find a sequence of
symmetric configurations, $c_1, \dots,c_t$ with $c_t=c^{\circ}$ such that
$c\to c_1\to\cdots\to c_t$. This follows since in a symmetric configuration
a vertex $v$ is unstable if and only if $gv$ is unstable for all $g28n G$. To
construct $c_{i+1}$ from $c_i$, simultaneously fire all unstable vertices of
$c_i$ (an alternative is to pick any vertex $v$, unstable in $c_{i}$, and
simultaneously fire the vertices in $\{gv:g28n G\}$).
\end{remark}
\begin{enumerate}gin{prop}0.9abel{prop:symmetric configs}
The collection of symmetric recurrent configurations forms a subgroup of the
sandpile group $\mathcal{S}(\Gamma)$.
\end{prop}
\begin{enumerate}gin{proof}
Since the group action respects addition in $\mathbb{N}\widetilde{V}$ and
stabilization, the sum of two symmetric recurrent configurations is again
symmetric and recurrent. There is at least one symmetric recurrent
configuration, namely, $c_{\mathrm{max}}$. Since the sandpile group is
finite, it follows that these configurations form a subgroup.
\end{proof}
\begin{enumerate}gin{notation}
The subgroup of symmetric recurrent configurations on $\Gamma$ with respect to
the action of the group $G$ is denoted $\mathcal{S}(\Gamma)^G$.
\end{notation}
\begin{enumerate}gin{prop}
If $c$ is symmetric and recurrent then $c=(a+c_{\mathrm{max}})^{\circ}$ for some
symmetric configuration~$a$.
\end{prop}
\begin{enumerate}gin{proof} By \cite{Speer} there exists an element $b$ in the image of
$\widetilde{\Delta}$ such that: (1) $b_v\geq0$ for all $v28n\widetilde{V}$, and (2) for each
vertex $w28n\widetilde{V}$, there is a directed path to $w$ from some $v28n\widetilde{V}$ such that
$b_v>0$, i.e., from some $v$ in the {\em support} of $b$. (If $\Gamma$ is
undirected, one may find such a $b$ by applying
$\widetilde{\Delta}$ to the vector whose components are all~$1$s). Define
\[
b^G=0.1um_{g28n G}gb.
\]
Then $b^G$ is symmetric and equal to zero modulo the image of $\widetilde{\Delta} $. Take a
large positive integer $N$ and consider $Nb^G$, the vertex-wise addition of $b^G$
with itself~$N$ times without stabilizing. Every vertex of $\Gamma$ is
connected by a path from a vertex in the support of~$b$, and hence, the same is
true of $Nb^G$. Thus, by choosing~$N$ large enough and by firing symmetric
vertices of $Nb^G$, we obtain a symmetric configuration $b'$ such that $b'_v\geq
c_{\mathrm{max},v}$ for all $v$ and such that $b'$ is zero modulo the image of
$\widetilde{\Delta}$. Define $a=b'-c_{\mathrm{max}}+c$, by construction a symmetric configuration.
The unique recurrent element in the equivalence
class of $b'+c$ modulo the image of $\widetilde{\Delta}$ is $c$. Therefore,
\[
(a+c_{\mathrm{max}})^{\circ}=(b'+c)^{\circ}=c.
\]
\end{proof}
The {\em orbit} of $v28n V$ under~$G$ is the set
\[
Gv=\{gv:g28n G\}.
\]
Let $\mathcal{O}=\mathcal{O}(\Gamma,G)=\{Gv:v28n \widetilde{V}\}$ denote the set of orbits
of the non-sink vertices. The {\em symmetrized reduced Laplacian} is the $\mathbb{Z}$-linear mapping
\begin{enumerate}gin{equation}
\widetilde{\Delta}^G\colon\mathbb{Z}\mathcal{O}\to\mathbb{Z}\mathcal{O}
\end{equation}0.9abel{eqn:srl}
such that for all $v,w28n\widetilde{V}$, the $Gw$-th component of $\widetilde{\Delta}^G(Gv)$
is
\[
0.9eft(\textstyle0.1um_{u28n Gv}\widetilde{\Delta}(u)\right)_w.
\]
\begin{enumerate}gin{remark}0.9abel{orbit correspondence} If $c28n\mathbb{Z}\widetilde{V}$ is symmetric, then
define $[c]28n\mathbb{Z}\mathcal{O}$ by $[c]_{Gv}:=c_v$ for all $v28n\widetilde{V}$, thus
obtaining a bijection between symmetric elements of $\mathbb{Z}\widetilde{V}$ and
$\mathbb{Z}\mathcal{O}$.
The mapping $\widetilde{\Delta}^G$ is defined so that if $c$ is a symmetric
configuration and $v28n \widetilde{V}$, then $[c]-\widetilde{\Delta}^G(Gv)$ is the element of $\mathbb{Z}\mathcal{O}$
corresponding to
\[
c-\widetilde{\Delta}(\textstyle0.1um_{w28n Gv}w),
\]
the symmetric configuration obtained from $c$ by firing all vertices in the
orbit of $v$.
\end{remark}
For the following let $r\colon\mathbb{Z}\widetilde{V}/\mathrm{image}(\widetilde{\Delta})\to\mathcal{S}(\Gamma)$
denote the inverse of the isomorphism in (\ref{basic iso}).
\begin{enumerate}gin{prop}0.9abel{prop:symmetric subgroup iso} There is an isomorphism of groups,
\[
\phi\colon\mathbb{Z}\mathcal{O}/\mathrm{image}(\widetilde{\Delta}^G)\to\mathcal{S}(\Gamma)^G,
\]
determined by $Gv\mapsto r(\textstyle0.1um_{w28n Gv}w)$ for $v28n\widetilde{V}$.
\end{prop}
\begin{enumerate}gin{proof}
The homomorphism $0.9ambda\colon\mathbb{Z}\mathcal{O}\to\mathbb{Z}\widetilde{V}$ determined by
\[
0.9ambda(Gv):=0.1um_{w28n Gv}w
\]
for $v28n\widetilde{V}$ induces the (well-defined) mapping
\[
\Lambda\colon\mathbb{Z}\mathcal{O}/\mathrm{image}(\widetilde{\Delta}^G)\to\mathbb{Z}\widetilde{V}/\mathrm{image}(\widetilde{\Delta}).
\]
To see that the image of $r\circ\Lambda$ is symmetric, consider
the symmetric configuration $|\mathcal{S}(\Gamma)|\cdotc_{\mathrm{max}}28n\mathbb{Z}\widetilde{V}$, a
configuration in the image of $\widetilde{\Delta}$. For
each $v28n\widetilde{V}$,
\[
\phi(Gv)=r(\Lambda(Gv))=0.9eft(\,|\mathcal{S}(\Gamma)|\cdotc_{\mathrm{max}}+{0.9ambda}(Gv)\right)^{\circ},
\]
which is symmetric by Corollary~\ref{cor:symm-stab}.
The mapping $c\mapsto[c]$, introduced in Remark \ref{orbit correspondence}, is a
left inverse to $0.9ambda$. Thus, if $c28n\mathcal{S}(\Gamma)^G$, then
$\phi([c])=c$, and hence $\phi$ is surjective. To show that $\phi$ is
injective, it suffices to show that $\Lambda$ is injective. So suppose that
$a=0.9ambda(o)$ for some $o28n\mathbb{Z}\mathcal{O}$ and that
$a=\widetilde{\Delta}(b)$ for some $b28n\mathbb{Z}\widetilde{V}$. Fix $g28n G$, and consider the isomorphism
$g\colon \mathbb{Z}\widetilde{V}\to\mathbb{Z}\widetilde{V}$ determined by the action of $g$ on vertices. A
straightforward calculation shows that $\widetilde{\Delta}=g\widetilde{\Delta} g^{-1}$. It follows that
\[
\widetilde{\Delta}(b)= a = ga = g\widetilde{\Delta} b = (g\widetilde{\Delta} g^{-1})(gb) = \widetilde{\Delta}(gb).
\]
Since $\widetilde{\Delta}$ is invertible, it follows that $b=gb$ for all $g28n G$, i.e., $b$ is
symmetric. Hence, $o=[a]=\widetilde{\Delta}^G([b])$, as required.
\end{proof}
\begin{enumerate}gin{cor}0.9abel{cor:nsr} The number of symmetric recurrent configurations is
\[
|\mathcal{S}(\Gamma)^G|=\det\widetilde{\Delta}^G.
\]
\end{cor}
\begin{enumerate}gin{remark}
We have not assumed that the action of $G$ on $\Gamma$ is faithful. If $K$ is
the kernel of the action of $G$, then
$\mathcal{O}(\Gamma,G)=\mathcal{O}(\Gamma,G/K)$
and $\mathcal{S}^G=\mathcal{S}^{G/K}$. We also have $\widetilde{\Delta}^G=\widetilde{\Delta}^{G/K}$.
\end{remark}
\begin{enumerate}gin{example}0.9abel{symmetry example}
Consider the graph $\Gamma$ of Figure~\ref{fig:symmetry example} with sink $s$
and with each edge having weight~$1$.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=1.0]
\SetVertexMath
\GraphInit[vstyle=Art]
\SetUpVertex[MinSize=3pt]
\SetVertexLabel
\tikzset{VertexStyle/.style = {
shape = circle,
shading = ball,
ball color = black,
inner sep = 1.5pt
}}
\SetUpEdge[color=black]
\Vertex[LabelOut=true,Lpos=180,x=0,y=0]{u}
\Vertex[LabelOut=true,Lpos=0,x=1,y=0]{v}
\Vertex[LabelOut=true,Lpos=90,x=0.5,y=0.866]{w}
\Vertex[LabelOut=true,Lpos=270,x=0.5,y=-0.866]{s}
\Edges(u,s,v,w,u)
\Edge[](u)(v)
\end{tikzpicture}
\caption{The graph $\Gamma$ for Example~\ref{symmetry
example}.}0.9abel{fig:symmetry example}
\end{figure}
Let $G=\{e,g\}$ be the group of order $2$ with identity $e$. Consider the action
of~$G$ on $\Gamma$ for which $g$ swaps vertices $u$ and $v$ and fixes vertices
$w$ and $s$. Ordering the vertices of $\Gamma$ as $u,v,w$ and ordering the
orbits, $\mathcal{O}$, as $Gu$, $Gw$, the reduced Laplacian and the symmetrized
reduced Laplacian for $\Gamma$ become
\begin{enumerate}gin{center}
\begin{enumerate}gin{tikzpicture}
\draw (0,0) node{$
\widetilde{\Delta}=
0.9eft[
\begin{enumerate}gin{array}{rrr}
3&-1&-1\\
-1&3&-1\\
-1&-1&2
\end{array}
\right]$,};
\draw (-0.4,-0.9) node{$u$};
\draw (0.4,-0.9) node{$v$};
\draw (1.2,-0.9) node{$w$};
\draw (5,0) node{$
\widetilde{\Delta}^G=
0.9eft[
\begin{enumerate}gin{array}{rr}
2&-1\\
-2&2
\end{array}
\right]$,};
\draw (5.1,-0.8) node{$Gu$};
\draw (6.0,-0.8) node{$Gw$};
\end{tikzpicture}
\end{center}
where we have labeled the columns by their corresponding vertices or orbits for
convenience. To illustrate how one would compute the columns of the symmetrized
reduced Laplacian in general, consider the column of $\widetilde{\Delta}^G$ corresponding to
$Gu=\{u,v\}$. It was computed by first adding the $u$- and $v$-columns of $\widetilde{\Delta}$
to get the $3$-vector $\ell=(2,2,-2)$, then taking the $u$ and $w$ components of $\ell$
since $u$ and $w$ were chosen as orbit representatives.
There are $8=\det\widetilde{\Delta}$ recurrent elements $(c_u,c_v,c_w)$ of $\Gamma$:
\[
(0,2,1),(1,2,0),(1,2,1),(2,0,1),(2,1,0),(2,1,1),(2,2,0),(2,2,1),
\]
and $(2,2,0)$ is the identity of $\mathcal{S}(\Gamma)$. In accordance with
Corollary~\ref{cor:nsr}, there are $2=\det\widetilde{\Delta}^G$ symmetric recurrent elements:
$(2,2,0)$ and $(2,2,1)$.
\end{example}
0.1ection{Matchings and trees}0.9abel{section:Matchings and trees}
In this section, assume that $\Gamma=(V,E,\wt,s)$ is embedded in the plane, and
fix a face $f_s$ containing the sink vertex,
$s$. In \S\ref{section:symmetric recurrents} and~\S\ref{section:order
of all-2s}, we always take $f_s$ to be the unbounded face. We recall the generalized Temperley bijection, due to \cite{KPW}, between
directed spanning trees of~$\Gamma$ rooted at $s$ and perfect matchings of a
related weighted undirected graph,~$\mathcal{H}(\Gamma)$. (The graph $\mathcal{H}(\Gamma)$ would be denoted
$\mathcal{H}(s,f_s)$ in \cite{KPW}.)
It is sometimes convenient to allow an edge $e=(u,v)$ to be represented in the embedding
by distinct weighted edges $e_1,\dots,e_k$, each with tail $u$ and head $v$,
such that $0.1um_{i=1}^k\wt(e_i)=\wt(e)$. Also, we would like to be able to
embed a pair of oppositely oriented edges between the same vertices so that they coincide
in the plane. For these purposes then, we work in the more general category of
weighted directed {\em multi}-graphs by allowing $E$ to be a {\em multiset} of
edges in which an edge $e$ with endpoints $u$ and $v$ is represented as the set
$e=\{u,v\}$ with a pair of weights $\wt(e,(u,v))$ and $\wt(e,(v,u))$, at least
one of which is nonzero. Each edge in the embedding is then represented by a double-headed arrow
with two weight labels (the label $\wt(e,(u,v))$ being placed next to the head
vertex, $v$). Figure~\ref{fig:embedded graph} shows a pair of edges $e=\{u,v\}$
and $e'=\{u,v\}$ where $\wt(e,(u,v)))=2$, $\wt(e,(v,u)))=0$, $\wt(e',(u,v)))=3$,
and $\wt(e',(v,u)))=1$. The top edge,~$e$, represents a single directed edge
$(u,v)$ of weight~$2$, and the bottom edge represents a pair of directed edges
of weights $3$ and $1$. The two edges combine to represent a pair of directed
edges, $(u,v)$ of weight~$5$ and $(v,u)$ of weight~$1$.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=1.0]
\SetVertexMath
\GraphInit[vstyle=Art]
\SetUpVertex[MinSize=3pt]
\SetVertexLabel
\tikzset{VertexStyle/.style = {
shape = circle,
shading = ball,
ball color = black,
inner sep = 1.5pt
}}
\SetUpEdge[color=black]
\tikzset{EdgeStyle/.append style = {<->,>=mytip,semithick}}
\Vertex[LabelOut=true,Lpos=180,x=0,y=0]{u}
\Vertex[LabelOut=true,Lpos=0,x=3,y=0]{v}
\Edge[style={bend left = 20}](u)(v)
\Edge[style={bend left = 20}](v)(u)
\draw (0.4,0.5) node{0};
\draw (0.4,-0.5) node{1};
\draw (2.6,0.5) node{2};
\draw (2.6,-0.5) node{3};
\end{tikzpicture}
\caption{Edges for a planar embedding of a weighted directed graph.}0.9abel{fig:embedded graph}
\end{figure}
The rough idea of the construction of the weighted undirected graph~$\mathcal{H}(\Gamma)$ is to
overlay the embedded graph~$\Gamma$ with its dual, forgetting the orientation of
the edges and introducing new vertices where their edges cross. Then remove $s$
and the vertex corresponding to the chosen face $f_s$, and remove their incident
edges. In detail, the vertices of $\mathcal{H}(\Gamma)$ are
\[
V_{\mathcal{H}(\Gamma)}:=\{t_v:v28n V0.1etminus\{s\}\}\cup\{t_e: e28n
E\}\cup\{t_f:f28n F0.1etminus\{f_s\}\},
\]
where $F$ is the set of faces of $\Gamma$, including the unbounded face, and the
edges of~$\mathcal{H}(\Gamma)$ are
\[
E_{\mathcal{H}(\Gamma)}:=\{\{t_u,t_e\}: u28n V0.1etminus\{s\}, u28n e28n E\}\cup\{\{t_e,t_f\}:
e28n E, e28n f28n F0.1etminus\{f_s\}\}.
\]
The weight of each edge of the form $\{t_u,t_e\}$ with $e=\{u,v\}28n E$ is defined to be
$\wt(e,(u,v))$, and the weight of each edge of the form $\{t_e,t_f\}$ with $f28n
F$ is defined to be $1$.
Figure~\ref{fig:h-gamma} depicts a graph $\Gamma$ embedded in the plane (for
which the multiset $E$ is actually just a set). The graph displayed in the
middle is the superposition of~$\Gamma$ with its dual, $\Gamma^{\perp}$. The
unbounded face is chosen as~$f_s$. For convenience, its corresponding vertex is omitted
from the middle graph, and its incident edges are only partially drawn.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=1.0]
\SetVertexMath
\GraphInit[vstyle=Art]
\SetUpVertex[MinSize=3pt]
\SetVertexLabel
\tikzset{VertexStyle/.style = {
shape = circle,
shading = ball,
ball color = black,
inner sep = 1.5pt
}}
\SetUpEdge[color=black]
\tikzset{EdgeStyle/.append style = {->,>=mytip,semithick}}
\Vertex[NoLabel,x=0,y=4]{v_1}
\Vertex[NoLabel,x=0,y=2]{v_2}
\Vertex[NoLabel,x=0,y=0]{v_3}
\Vertex[LabelOut=true,Lpos=270,L=s,x=1,y=-1.732]{v_4}
\Vertex[NoLabel,x=2,y=2]{v_5}
\Vertex[NoLabel,x=2,y=0]{v_6}
\Edge[](v_1)(v_2)
\Edge[](v_2)(v_1)
\Edge[](v_2)(v_5)
\Edge[](v_5)(v_2)
\Edge[](v_2)(v_3)
\Edge[](v_3)(v_2)
\Edge[](v_3)(v_6)
\Edge[](v_6)(v_3)
\Edge[](v_5)(v_6)
\Edge[](v_6)(v_5)
\Edge[](v_3)(v_4)
\Edge[](v_4)(v_3)
\Edge[](v_6)(v_4)
\Edge[](v_4)(v_6)
\draw (-0.3,3.48) node{0};
\draw (-0.3,2.48) node{1};
\draw (-0.3,1.48) node{2};
\draw (-0.3,0.48) node{1};
\draw (1.48,0.3) node{1};
\draw (0.48,0.3) node{1};
\draw (2.3,1.48) node{3};
\draw (2.3,0.48) node{3};
\draw (1.48,2.3) node{5};
\draw (0.48,2.3) node{2};
\draw (0.04,-0.6) node{2};
\draw (0.48,-1.48) node{1};
\draw (1.48,-1.48) node{2};
\draw (1.98,-0.60) node{0};
\draw [dashed] (3.0,-0.7) -- (3.0,3.7);
\tikzset{EdgeStyle/.append style = {-}}
\Vertex[NoLabel,x=4,y=4]{v_1}
\Vertex[NoLabel,empty=true,x=4,y=3]{x03}
\Vertex[NoLabel,x=4,y=2]{v_2}
\Vertex[NoLabel,x=4,y=0]{v_3}
\Vertex[LabelOut=true,Lpos=270,L=s,x=5,y=-1.732]{v_4}
\Vertex[NoLabel,x=6,y=2]{v_5}
\Vertex[NoLabel,x=6,y=0]{v_6}
\tikzset{VertexStyle/.append style={shape=rectangle}}
\Vertex[NoLabel,x=4,y=3]{x03}
\Vertex[NoLabel,x=4,y=1]{x01}
\Vertex[NoLabel,x=5,y=2]{x12}
\Vertex[NoLabel,x=6,y=1]{x21}
\Vertex[NoLabel,x=5,y=0]{x10}
\Vertex[NoLabel,x=4.5,y=-0.866]{xneg1}
\Vertex[NoLabel,x=5.490,y=-0.88335]{xneg2}
\tikzset{VertexStyle/.append style={shape=circle}}
\Edge[](v_1)(x03)
\Edge[](v_2)(x03)
\Edge[](v_2)(v_3)
\Edge[](v_2)(v_5)
\Edge[](v_3)(v_6)
\Edge[](v_3)(v_4)
\Edge[](v_4)(xneg2)
\Edge[](v_5)(v_6)
\Edge[](v_6)(xneg2)
\draw (3.8,2.5) node{0};
\draw (3.8,0.5) node{2};
\draw (6.2,1.5) node{3};
\draw (6.2,0.5) node{3};
\draw (4.5,2.2) node{5};
\draw (5.5,2.2) node{2};
\draw (4.50,-1.3) node{2};
\draw (5.92,-0.5) node{2};
\draw (5.5,-1.3) node{0};
\Vertex[NoLabel,x=5,y=1]{mt}
\Vertex[NoLabel,x=5,y=-0.667]{mb}
\draw [blue] (3.5,3.01) -- (4.5,3.01);
\draw [blue] (5,1) -- (5,2.5);
\draw [blue] (5,1) -- (5,-0.666);
\draw [blue] (5,1) -- (3.5,1);
\draw [blue] (5,1) -- (6.5,1);
\draw [blue] (5,-0.666) -- (4,-1.0654);
\draw [blue] (5,-0.666) -- (6,-1.0654);
\draw [dashed] (7.0,-0.7) -- (7.0,3.7);
\Vertex[NoLabel,x=8,y=4]{v_1}
\Vertex[NoLabel,x=8,y=3]{x03}
\Vertex[NoLabel,x=8,y=2]{v_2}
\Vertex[NoLabel,x=8,y=1]{x01}
\Vertex[NoLabel,x=8,y=0]{v_3}
\Vertex[NoLabel,x=8.5,y=-0.866]{xneg1}
\Vertex[NoLabel,x=9,y=2]{x12}
\Vertex[NoLabel,x=9,y=0]{x10}
\Vertex[NoLabel,x=9.490,y=-0.88335]{xneg2}
\Vertex[NoLabel,x=10,y=2]{v_5}
\Vertex[NoLabel,x=10,y=1]{x21}
\Vertex[NoLabel,x=10,y=0]{v_6}
\Vertex[NoLabel,x=9,y=1]{mt}
\Vertex[NoLabel,x=9,y=-0.667]{mb}
\Edge[](v_1)(x03)
\Edge[](v_2)(x01)
\Edge[](v_2)(x12)
\Edge[](v_3)(x01)
\Edge[](v_3)(x10)
\Edge[](v_3)(xneg1)
\Edge[](v_5)(x12)
\Edge[](v_5)(x21)
\Edge[](v_6)(x21)
\Edge[](v_6)(x10)
\Edge[](v_6)(xneg2)
\Edge[](mt)(x12)
\Edge[](mt)(x10)
\Edge[](mt)(x01)
\Edge[](mt)(x21)
\Edge[](mb)(x10)
\Edge[](mb)(xneg1)
\Edge[](mb)(xneg2)
\draw (7.8,0.5) node{2};
\draw (10.2,1.5) node{3};
\draw (10.2,0.5) node{3};
\draw (8.5,2.2) node{5};
\draw (9.5,2.2) node{2};
\draw (9.92,-0.5) node{2};
\draw (1,3.8) node(G){$\Gamma$};
\draw (5.5,3.8) node(G){$\Gamma\cup\Gamma^{\perp}$};
\draw (9.5,3.8) node(G){$\mathcal{H}(\Gamma)$};
\end{tikzpicture}
\caption{Construction of $\mathcal{H}(\Gamma)$. (Unlabeled edges have weight~$1$.)}0.9abel{fig:h-gamma}
\end{figure}
A {\em perfect matching} of a weighted undirected graph is a subset of its edges such that
each vertex of the graph is incident with exactly one edge in the subset. The {\em weight}
of a perfect matching is the product of the weights of its edges.
We now describe the weight-preserving bijection between perfect matchings
of $\mathcal{H}(\Gamma)$ and directed spanning trees of $\Gamma$ rooted at $s$ due to
\cite{KPW}. Let $T$ be a directed spanning tree of $\Gamma$ rooted at $s$, and
let $\widetilde{T}$ be the corresponding directed spanning tree of
$\Gamma^{\perp}$, the dual of $\Gamma$, rooted at~$f_s$. (The tree
$\widetilde{T}$ is obtained by properly orienting the edges of~$\Gamma^{\perp}$ that do not cross
edges of $T$ in $\Gamma\cup\Gamma^{\perp}$.) The perfect matching
of $\mathcal{H}(\Gamma)$ corresponding to $T$ consists of the following:
\begin{enumerate}gin{enumerate}
28tem an edge $\{t_u,t_e\}$ of weight $\wt(e)$ for each $e=(u,v)28n T$;
28tem an edge $\{t_f,t_e\}$ of weight $1$ for each $\tilde{e}=(f,f')28n
\widetilde{T}$, where $e$ is the edge in $\Gamma$ that crossed by $\tilde{e}$.
\end{enumerate}
See Figure~\ref{fig:h-gamma2} for an example continuing the example from Figure~\ref{fig:h-gamma}.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=1.0]
\SetVertexMath
\GraphInit[vstyle=Art]
\SetUpVertex[MinSize=3pt]
\SetVertexLabel
\tikzset{VertexStyle/.style = {
shape = circle,
shading = ball,
ball color = black,
inner sep = 1.9pt
}}
\SetUpEdge[color=black]
\tikzset{EdgeStyle/.append style = {->,>=mytip,ultra thick}}
\Vertex[NoLabel,x=4,y=4]{v_1}
\Vertex[NoLabel,empty=true,x=4,y=3]{x03}
\Vertex[NoLabel,x=4,y=2]{v_2}
\Vertex[NoLabel,x=4,y=0]{v_3}
\Vertex[LabelOut=true,Lpos=270,L=s,x=5,y=-1.732]{v_4}
\Vertex[NoLabel,x=6,y=2]{v_5}
\Vertex[NoLabel,x=6,y=0]{v_6}
\tikzset{VertexStyle/.append style={shape=rectangle}}
\Vertex[NoLabel,x=4,y=3]{x03}
\Vertex[NoLabel,x=4,y=1]{x01}
\Vertex[NoLabel,x=5,y=2]{x12}
\Vertex[NoLabel,x=6,y=1]{x21}
\Vertex[NoLabel,x=5,y=0]{x10}
\Vertex[NoLabel,x=4.5,y=-0.866]{xneg1}
\Vertex[NoLabel,x=5.490,y=-0.88335]{xneg2}
\tikzset{VertexStyle/.append style={shape=circle}}
\Vertex[NoLabel,x=5,y=1]{mt}
\Vertex[NoLabel,x=5,y=-0.667]{mb}
\Vertex[NoLabel,empty=true,x=3.2,y=1]{outleft}
\Edge[](v_1)(v_2)
\Edge[](v_2)(v_5)
\Edge[](v_5)(v_6)
\Edge[](v_6)(v_4)
\Edge[](v_3)(v_4)
\Edge[style={color=blue}](mb)(mt)
\Edge[style={color=blue}](mt)(outleft)
\draw (6.2,1.2) node{3};
\draw (4.8,2.23) node{5};
\draw (5.83,-0.74) node{2};
\draw [blue] (3.5,3.01) -- (4.5,3.01);
\draw [blue] (5,1) -- (5,2.5);
\draw [blue] (5,1) -- (5,-0.666);
\draw [blue] (5,1) -- (3.5,1);
\draw [blue] (5,1) -- (6.5,1);
\draw [blue] (5,-0.666) -- (4,-1.0654);
\draw [blue] (5,-0.666) -- (6,-1.0654);
\draw [dashed] (7.5,-0.7) -- (7.5,3.7);
\tikzset{VertexStyle/.style = {
shape = circle,
shading = ball,
ball color = black,
inner sep = 1.5pt
}}
\Vertex[NoLabel,x=9,y=4]{v_1}
\Vertex[NoLabel,x=9,y=3]{x03}
\Vertex[NoLabel,x=9,y=2]{v_2}
\Vertex[NoLabel,x=9,y=1]{x01}
\Vertex[NoLabel,x=9,y=0]{v_3}
\Vertex[NoLabel,x=9.5,y=-0.866]{xneg1}
\Vertex[NoLabel,x=10,y=2]{x12}
\Vertex[NoLabel,x=10,y=0]{x10}
\Vertex[NoLabel,x=10.490,y=-0.88335]{xneg2}
\Vertex[NoLabel,x=11,y=2]{v_5}
\Vertex[NoLabel,x=11,y=1]{x21}
\Vertex[NoLabel,x=11,y=0]{v_6}
\Vertex[NoLabel,x=10,y=1]{mt}
\Vertex[NoLabel,x=10,y=-0.667]{mb}
\draw[thick] (v_1) -- (x03);
\draw[thick] (v_2) -- (x12);
\draw[thick] (v_5) -- (x21);
\draw[thick] (v_6) -- (xneg2);
\draw[thick] (v_3) -- (xneg1);
\draw[thick] (mb) -- (x10);
\draw[thick] (mt) -- (x01);
\draw (11.2,1.5) node{3};
\draw (9.5,2.2) node{5};
\draw (10.92,-0.5) node{2};
\draw (10.6,3.8) node(G){$\mathcal{H}(\Gamma)$};
\end{tikzpicture}
\caption{A spanning tree of $\Gamma$ determines a dual spanning tree for
$\Gamma^{\perp}$ and a perfect matching for $\mathcal{H}(\Gamma)$. (See
Figure~\ref{fig:h-gamma}. Unlabeled edges have weight $1$.)}0.9abel{fig:h-gamma2}
\end{figure}
As discussed in~\cite{KPW}, although $\mathcal{H}(\Gamma)$ depends on the
embedding of $\Gamma$ and on the choice of $f_s$, the number of spanning trees
of $\Gamma$ rooted at $s$ (and hence, the number of perfect matchings of
$\mathcal{H}(\Gamma)$), counted according to weight, does not change. In what
follows, we will always choose $f_s$ to be the unbounded face.
0.1ection{Symmetric recurrents on the sandpile grid
graph}0.9abel{section:symmetric recurrents} The {\em ordinary $m\times n$ grid
graph} is the undirected graph $\Gamma_{m\times n}$ with vertices $[m]\times[n]$
and edges $\{(i,j),(i',j')\}$ such that $|i-i'|+|j-j'|=1$. The $m\times n$ {\em
sandpile grid graph}, $\mathrm{S}\Gamma_{m\times n}$, is formed from $\Gamma_{m\times n}$ by
adding a (disjoint) sink vertex, $s$, then edges incident to $s$ so that every
non-sink vertex of the resulting graph has degree~$4$. For instance, each of the four
corners of the sandpile grid graph shares an edge of weight~$2$ with~$s$ in the
case where $m\geq2$ and $n\geq 2$, as on the left in Figure~\ref{fig:sandpile
grid graph}.
We embed $\Gamma_{m\times n}$ in the plane as the standard grid with vertices
arranged as in a matrix, with $(1,1)$ in the upper left and $(m,n)$ in the lower
right. We embed $\mathrm{S}\Gamma_{m\times n}$ similarly, but usually identify the sink
vertex, $s$, with the unbounded face of $\Gamma_{m\times n}$ for convenience in
drawing, as on the left-hand side in Figure~\ref{fig:sandpile grid graph}. The
edges leading to the sink are sometimes entirely omitted from the drawing, as in
Figure~\ref{fig:8x6}.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.7]
\draw[style=very thick] (1,1) grid (5,4);
\draw (1,1) -- (0.2,0.2);
\draw (1,4) -- (0.2,4.8);
\draw (5,1) -- (5.8,0.2);
\draw (5,4) -- (5.8,4.8);
\foreach 28 in {2,3,4}{
\draw (28,1) -- (28, 0.2);
\draw (28,4) -- (28, 4.8);
}
\foreach 28 in {2,3}{
\draw (1,28) -- (0.2,28);
\draw (5,28) -- (5.8,28);
}
\draw[fill=white, draw=none] (0.6,0.6) circle [radius=2.8mm];
\draw (0.6,0.6) node{$2$};
\draw[fill=white, draw=none] (0.6,4.4) circle [radius=2.8mm];
\draw (0.6,4.4) node{$2$};
\draw[fill=white, draw=none] (5.4,0.6) circle [radius=2.8mm];
\draw (5.4,0.6) node{$2$};
\draw[fill=white, draw=none] (5.4,4.4) circle [radius=2.8mm];
\draw (5.4,4.4) node{$2$};
\node at (3.3,-0.4){$\mathrm{S}\Gamma_{4\times 5}$};
\def28{10}
\draw[style=very thick] (28+3,1) -- (28+3,4);
\draw (28,2.5) .. controls +(280:20pt) and +(180:20pt) .. (28+3,1);
\draw (28,2.5) .. controls +(330:20pt) and +(180:20pt) .. (28+3,2);
\draw (28,2.5) .. controls +(30:20pt) and +(180:20pt) .. (28+3,3);
\draw (28,2.5) .. controls +(80:20pt) and +(180:20pt) .. (28+3,4);
\draw[fill=white, draw=none] (28+1,3.4) circle [radius=2.8mm];
\draw (28+1,3.4) node{$3$};
\draw[fill=white, draw=none] (28+1.7,2.9) circle [radius=2.8mm];
\draw (28+1.7,2.9) node{$2$};
\draw[fill=white, draw=none] (28+1.7,2.1) circle [radius=2.8mm];
\draw (28+1.7,2.1) node{$2$};
\draw[fill=white, draw=none] (28+1.1,1.53) circle [radius=2.8mm];
\draw (28+1.1,1.53) node{$3$};
\draw[fill] (28,2.5) circle [radius=2pt];
\node at (28-0.5,2.5){$s$};
\node at (28+1.5,-0.4){$\mathrm{S}\Gamma_{4\times 1}$};
\end{tikzpicture}
\caption{Two sandpile grid graphs. (The sink for $\mathrm{S}\Gamma_{4\times 5}$ is not
drawn.)}0.9abel{fig:sandpile grid graph}
\end{figure}
In this section, {\em symmetric recurrent} will always refer to a recurrent
element on $\mathrm{S}\Gamma_{m\times n}$ with horizontal and vertical symmetry, i.e., an
element of $\mathcal{S}(\mathrm{S}\Gamma_{m\times n})^G$ where~$G$ is the Klein $4$-group,
\[
G=0.9angle0.1igma,\tau:0.1igma^2=\tau^2=1, 0.1igma\tau=\tau0.1igma\rangle,
\]
acting on $\mathrm{S}\Gamma_{m\times n}$ by
\[
0.1igma(i,j)=(i,n-j+1),\quad
\tau(i,j)=(m-i+1,j),\quad\mbox{and $0.1igma(s)=\tau(s)=s$}.
\]
Our main goal in this section is to study the symmetric recurrent configurations
on the sandpile grid graph. After collecting some basic facts about certain
tridiagonal matrices, we divide the study into three cases: even$\times$even-,
even$\times$odd-, and odd$\times$odd-dimensional grids. In each case we
provide a formula for the number of symmetric recurrents using Chebyshev
polynomials and show how these configurations are related to domino tilings of
various types of checkerboards.
0.1ubsection{Some tridiagonal matrices.}0.9abel{subsection:tridiagonal}
Recall that {\em Chebyshev polynomials of the first kind} are defined by the
recurrence
\begin{enumerate}gin{align}0.9abel{eqn:1st}
\nonumber
T_0(x)&=1\\
T_1(x)&=x\\
\nonumber
T_{j}(x)&=2x\,T_{j-1}(x)-T_{j-2}(x)\quad\text{for $j\geq2$},
\end{align}
and {\em Chebyshev polynomials of the second kind} are defined by
\begin{enumerate}gin{align}0.9abel{eqn:2nd}
\nonumber
U_0(x)&=1\\
U_1(x)&=2x\\
\nonumber
U_{j}(x)&=2x\,U_{j-1}(x)-U_{j-2}(x)\quad\text{for $j\geq2$}.
\end{align}
Two references are~\cite{MH} and~\cite{wiki-chebyshev}.
It follows from the recurrences that these polynomials may be expressed as
determinants of $j\times j$ tridiagonal matrices:
\[
T_j(x) = \det
\resizebox{.3\textwidth}{!}{
$
\begin{enumerate}gin{bmatrix}
x&1&&&&\\
1&2x&1&&&\\
&1&2x&1&&\\
&&&\ddots&&\\
&&&1&2x&1\\
&&&&1&2x
\end{bmatrix},
$
}
\quad
U_j(x) = \det
\resizebox{.3\textwidth}{!}{
$
\begin{enumerate}gin{bmatrix}
2x&1&&&&\\
1&2x&1&&&\\
&1&2x&1&&\\
&&&\ddots&&\\
&&&1&2x&1\\
&&&&1&2x
\end{bmatrix},
$
}
\]
and, hence, $T_j(-x)=(-1)^j\,T_j(x)$ and $U_j(-x)=(-1)^j\,U_j(x)$.
We have the well-known factorizations:
\begin{enumerate}gin{align}
T_j(x)&=2^{j-1}\prod_{k=1}^j0.9eft(x-\cos0.9eft(\frac{(2k-1)\pi}{2j} \right)
\right)0.9abel{T-factorization}\\[5pt]
U_j(x)&=2^{j}\prod_{k=1}^j0.9eft(x-\cos0.9eft(\frac{k\pi}{j+1} \right) \right).
0.9abel{U-factorization}
\end{align}
We will also use the following well-known identities:
\begin{enumerate}gin{align}
T_{2j}(x)&=T_j(2x^2-1)=(-1)^j\,T_j(1-2x^2)0.9abel{eqn:half-angle}\\
2\,T_j(x)&= U_j(x)-U_{j-2}(x).0.9abel{eqn:sum formula}
\end{align}
Corollary~\ref{cor:nsr} will be used to count the symmetric recurrents on
sandpile grid graphs. The form of the determinant that arises is treated by the
following.
\begin{enumerate}gin{lemma}0.9abel{lemma:tridiagonal} Let $m$ and $n$ be positive integers.
Let $A$, $B$, and $C$ be $n\times n$ matrices over the complex numbers, and
let $I_n$ be the $n\times n$ identity matrix. Define the $mn\times mn$
tridiagonal block matrix
\[
D(m) =
0.9eft[
\begin{enumerate}gin{array}{ccccc}
A&-I_n&&&\\
-I_n&A&-I_n&&\\
&&\ddots&&\\
&&-I_n&A&-I_n\\
&&&-C&B
\end{array}
\right],
\]
where the super- and sub-diagonal blocks are all $-I_n$ except for the
one displayed block consisting of $-C$ and all omitted entries in the matrix are zero. Take $D(1) = B$. Then
\[
\det D(m)=(-1)^n\det(T),
\]
where
\[
T = -B\,U_{m-1}0.9eft(\frac{1}{2}A\right)+C\,U_{m-2}0.9eft(\frac{1}{2}A\right),
\]
letting $U_{-1}(x):=0$.
\end{lemma}
\begin{enumerate}gin{proof} The case $m=1$ is immediate. For $m>1$, Theorem~2 of
\cite{Molinari} gives a formula for calculating the determinant of a general
tridiagonal block matrix. In our case, it says
\begin{enumerate}gin{equation}
\det D(m) = (-1)^n\det E_{\boldsymbol{t}},
0.9abel{eqn:Et}
\end{equation}
where $E_{\boldsymbol{t}}$ is the top-left block of size
$n\times n$ of the matrix
\[
E:=
\begin{enumerate}gin{bmatrix}
-B&C\\
I_n&0
\end{bmatrix}
\begin{enumerate}gin{bmatrix}
A&-I_n\\
I_n&0
\end{bmatrix}^{m-2}
\begin{enumerate}gin{bmatrix}
A&I_n\\
I_n&0
\end{bmatrix}.
\]
Set $S_0=I_n$, and for all positive integers $j$, define
\[
S_j = 0.9eft( \begin{enumerate}gin{bmatrix}A& -I_n \\ I_n &
0\end{bmatrix}^{j-1}\begin{enumerate}gin{bmatrix}A& I_n \\ I_n & 0\end{bmatrix}
\right)_{\boldsymbol{t}}
\]
and
\[
S'_j = 0.9eft (\begin{enumerate}gin{bmatrix}A& -I_n \\ I_n &
0\end{bmatrix}^{j-1}\begin{enumerate}gin{bmatrix}A& I_n \\ I_n & 0\end{bmatrix}
\right)_{\boldsymbol{b}},
\]
where the subscripts $\boldsymbol{t}$ and $\boldsymbol{b}$ denote taking the
top-left and bottom-left blocks of size $n \times n$, respectively.
It follows that
\begin{enumerate}gin{equation}0.9abel{eqn:srecurrence}
S_0 = I_n ,\quad S_1= A,\quad\text{and $\quad S_j= A\,S_{j-1} - S_{j-2}$ for $j\geq
2$},
\end{equation}
and
\[
S'_j = S_{j-1}\;\,\text{for all}\;\, j \ge 1.
\]
By \eqref{eqn:2nd} and \eqref{eqn:srecurrence}, $S_{j}= U_{j} (\frac{1}{2}A)$.
Hence,
\[
E_{\boldsymbol{t}}= -B\,S_{m-1}+C\,S'_{m-1} =
-B\,U_{m-1}0.9eft(\frac{1}{2}A\right)+C\,U_{m-2}0.9eft(\frac{1}{2}A\right),
\]
as required.
\end{proof}
0.1ubsection{Symmetric recurrents on a $2m\times 2n$ sandpile grid
graph.}0.9abel{subsection:symmetric recurrents on evenxeven grid} A {\em
checkerboard} is a rectangular array of squares. A domino is a $1\times 2$ or
$2\times 1$ array of squares and, thus, covers exactly two adjacent squares of
the checkerboard. A {\em domino tiling of the checkerboard} consists of placing
non-overlapping dominos on the checkerboard, covering every square. As is usually
done, and exhibited in Figure~\ref{fig:perfect matching}, we identify domino
tilings of an $m\times n$ checkerboard with perfect matchings of
$\Gamma_{m\times n}$. Figure~\ref{fig:checker4x4} exhibits the $36$ domino
tilings of a $4\times4$ checkerboard.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.7]
\newcommand{\vertRect}[2]{\draw[fill=gray!20,rounded corners=0.4mm](#1+0.2,#2+0.2) rectangle (#1+0.8,#2+1.8);}
\newcommand{\horizRect}[2]{\draw[fill=gray!20,rounded corners=0.4mm] (#1+0.2,#2+0.2) rectangle (#1+1.8,#2+0.8);}
\draw (0,0) grid (4,3);
\horizRect{0}{0};
\vertRect{2}{0};
\vertRect{3}{0};
\vertRect{0}{1};
\vertRect{1}{1};
\horizRect{2}{2};
\foreach 28 in {0,1,2,3}{
\foreach 7 in {0,1,2}{
\draw[fill] (28+0.5,7+0.5) circle [radius=2pt];
}
}
\foreach 7 in {0,1,2}{
\draw[style=very thin,color=gray!90] (0.5,7+0.5) -- (3.5,7+0.5);
}
\foreach 28 in {0,1,2,3}{
\draw[very thin,color=gray!90] (28+0.5,0.5) -- (28+0.5,2.5);
}
\draw[ultra thick] (0.5,0.5) -- (1.5,0.5);
\draw[ultra thick] (2.5,0.5) -- (2.5,1.5);
\draw[ultra thick] (3.5,0.5) -- (3.5,1.5);
\draw[ultra thick] (0.5,1.5) -- (0.5,2.5);
\draw[ultra thick] (1.5,1.5) -- (1.5,2.5);
\draw[ultra thick] (2.5,2.5) -- (3.5,2.5);
\end{tikzpicture}
\caption{Correspondence between a perfect matching of~$\Gamma_{3\times 4}$ and
a domino tiling of its corresponding checkerboard.}0.9abel{fig:perfect matching}
\end{figure}
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.3]
\newcommand{\vertRect}[3]{\draw[fill=#3,rounded corners=0.4mm](#1+0.2,#2+0.2) rectangle (#1+0.8,#2+1.8);}
\newcommand{\horizRect}[3]{\draw[fill=#3,rounded corners=0.4mm] (#1+0.2,#2+0.2) rectangle (#1+1.8,#2+0.8);}
\foreach 28 in {0,...,5}{
\foreach 7 in {0,...,5} {
\draw (5*28,5*7) grid (5*28+4,5*7+4);
}
}
\def7{25}
\horizRect{0}{7+3}{gray} \horizRect{2}{7+3}{gray};
\horizRect{0}{7+2}{gray} \horizRect{2}{7+2}{blue};
\horizRect{0}{7+1}{gray} \horizRect{2}{7+1}{gray};
\horizRect{0}{7}{gray} \horizRect{2}{7}{blue};
\horizRect{5}{7+3}{gray} \vertRect{7}{7+2}{gray};
\horizRect{5}{7+2}{gray} \vertRect{8}{7+2}{gray};
\horizRect{5}{7+1}{gray} \horizRect{7}{7+1}{gray};
\horizRect{5}{7}{gray} \horizRect{7}{7}{blue};
\horizRect{10}{7+3}{gray} \vertRect{12}{7+2}{gray};
\horizRect{10}{7+2}{gray} \vertRect{13}{7+2}{gray};
\horizRect{10}{7+1}{gray} \vertRect{13}{7}{gray};
\horizRect{10}{7}{gray} \vertRect{12}{7}{gray};
\horizRect{15}{7+3}{gray} \horizRect{17}{7+3}{gray};
\horizRect{15}{7+2}{gray} \vertRect{17}{7+1}{gray};
\horizRect{15}{7+1}{gray} \vertRect{18}{7+1}{gray};
\horizRect{15}{7}{gray} \horizRect{17}{7}{blue};
\horizRect{20}{7+3}{gray} \horizRect{22}{7+3}{gray};
\horizRect{20}{7+2}{gray} \horizRect{22}{7+2}{blue};
\horizRect{20}{7+1}{gray} \vertRect{22}{7}{gray};
\horizRect{20}{7}{gray} \vertRect{23}{7}{gray};
\vertRect{25}{7+2}{gray} \horizRect{26}{7+3}{gray};
\horizRect{26}{7+2}{gray} \vertRect{28}{7+2}{gray};
\horizRect{25}{7}{gray} \horizRect{27}{7}{blue};
\horizRect{25}{7+1}{gray} \horizRect{27}{7+1}{gray};
\def7{20}
\vertRect{0}{7+2}{gray} \horizRect{2}{7+3}{gray};
\vertRect{1}{7+2}{gray} \horizRect{2}{7+2}{blue};
\horizRect{0}{7+1}{gray} \horizRect{2}{7+1}{gray};
\horizRect{0}{7}{gray} \horizRect{2}{7}{blue};
\vertRect{5}{7+2}{gray} \vertRect{7}{7+2}{gray};
\vertRect{6}{7+2}{gray} \vertRect{8}{7+2}{gray};
\horizRect{5}{7+1}{gray} \horizRect{7}{7+1}{gray};
\horizRect{5}{7}{gray} \horizRect{7}{7}{blue};
\vertRect{10}{7+2}{gray} \vertRect{12}{7+2}{gray};
\vertRect{11}{7+2}{gray} \vertRect{13}{7+2}{gray};
\horizRect{10}{7+1}{gray} \vertRect{13}{7}{gray};
\horizRect{10}{7}{gray} \vertRect{12}{7}{gray};
\vertRect{15}{7+2}{gray} \horizRect{17}{7+3}{gray};
\vertRect{16}{7+2}{gray} \vertRect{17}{7+1}{gray};
\horizRect{15}{7+1}{gray} \vertRect{18}{7+1}{gray};
\horizRect{15}{7}{gray} \horizRect{17}{7}{blue};
\vertRect{20}{7+2}{gray} \horizRect{22}{7+3}{gray};
\vertRect{21}{7+2}{gray} \horizRect{22}{7+2}{blue};
\horizRect{20}{7+1}{gray} \vertRect{22}{7}{gray};
\horizRect{20}{7}{gray} \vertRect{23}{7}{gray};
\vertRect{25}{7+2}{gray} \horizRect{26}{7+3}{gray};
\horizRect{26}{7+2}{gray} \vertRect{28}{7+2}{gray};
\horizRect{25}{7}{gray} \vertRect{27}{7}{gray};
\horizRect{25}{7+1}{gray} \vertRect{28}{7}{gray};
\def7{15}
\vertRect{0}{7+2}{gray} \horizRect{2}{7+3}{gray};
\vertRect{1}{7+2}{gray} \horizRect{2}{7+2}{blue};
\vertRect{0}{7}{gray} \horizRect{2}{7+1}{gray};
\vertRect{1}{7}{gray} \horizRect{2}{7}{blue};
\vertRect{5}{7+2}{gray} \vertRect{7}{7+2}{gray};
\vertRect{6}{7+2}{gray} \vertRect{8}{7+2}{gray};
\vertRect{5}{7}{gray} \horizRect{7}{7+1}{gray};
\vertRect{6}{7}{gray} \horizRect{7}{7}{blue};
\vertRect{10}{7+2}{gray} \vertRect{12}{7+2}{gray};
\vertRect{11}{7+2}{gray} \vertRect{13}{7+2}{gray};
\vertRect{10}{7}{gray} \vertRect{13}{7}{gray};
\vertRect{11}{7}{gray} \vertRect{12}{7}{gray};
\vertRect{15}{7+2}{gray} \horizRect{17}{7+3}{gray};
\vertRect{16}{7+2}{gray} \vertRect{17}{7+1}{gray};
\vertRect{15}{7}{gray} \vertRect{18}{7+1}{gray};
\vertRect{16}{7}{gray} \horizRect{17}{7}{blue};
\vertRect{20}{7+2}{gray} \horizRect{22}{7+3}{gray};
\vertRect{21}{7+2}{gray} \horizRect{22}{7+2}{blue};
\vertRect{20}{7}{gray} \vertRect{22}{7}{gray};
\vertRect{21}{7}{gray} \vertRect{23}{7}{gray};
\vertRect{25}{7+2}{gray} \horizRect{26}{7+3}{gray};
\horizRect{26}{7+2}{gray} \vertRect{28}{7+2}{gray};
\vertRect{25}{7}{gray} \horizRect{27}{7}{blue};
\vertRect{26}{7}{gray} \horizRect{27}{7+1}{gray};
\def7{10}
\horizRect{0}{7+3}{gray} \horizRect{2}{7+3}{gray};
\vertRect{0}{7+1}{gray} \horizRect{2}{7+2}{blue};
\vertRect{1}{7+1}{gray} \horizRect{2}{7+1}{gray};
\horizRect{0}{7}{gray} \horizRect{2}{7}{blue};
\horizRect{5}{7+3}{gray} \vertRect{7}{7+2}{gray};
\vertRect{5}{7+1}{gray} \vertRect{8}{7+2}{gray};
\vertRect{6}{7+1}{gray} \horizRect{7}{7+1}{gray};
\horizRect{5}{7}{gray} \horizRect{7}{7}{blue};
\horizRect{10}{7+3}{gray} \vertRect{12}{7+2}{gray};
\vertRect{10}{7+1}{gray} \vertRect{13}{7+2}{gray};
\vertRect{11}{7+1}{gray} \vertRect{13}{7}{gray};
\horizRect{10}{7}{gray} \vertRect{12}{7}{gray};
\horizRect{15}{7+3}{gray} \horizRect{17}{7+3}{gray};
\vertRect{15}{7+1}{gray} \vertRect{17}{7+1}{gray};
\vertRect{16}{7+1}{gray} \vertRect{18}{7+1}{gray};
\horizRect{15}{7}{gray} \horizRect{17}{7}{blue};
\horizRect{20}{7+3}{gray} \horizRect{22}{7+3}{gray};
\vertRect{20}{7+1}{gray} \horizRect{22}{7+2}{blue};
\vertRect{21}{7+1}{gray} \vertRect{22}{7}{gray};
\horizRect{20}{7}{gray} \vertRect{23}{7}{gray};
\horizRect{25}{7+3}{gray} \horizRect{27}{7+3}{gray};
\horizRect{25}{7+2}{gray} \horizRect{27}{7+2}{blue};
\vertRect{25}{7}{gray} \horizRect{26}{7}{gray};
\vertRect{28}{7}{gray} \horizRect{26}{7+1}{gray};
\def7{5}
\horizRect{0}{7+3}{gray} \horizRect{2}{7+3}{gray};
\horizRect{0}{7+2}{gray} \horizRect{2}{7+2}{blue};
\vertRect{0}{7}{gray} \horizRect{2}{7+1}{gray};
\vertRect{1}{7}{gray} \horizRect{2}{7}{blue};
\horizRect{5}{7+3}{gray} \vertRect{7}{7+2}{gray};
\horizRect{5}{7+2}{gray} \vertRect{8}{7+2}{gray};
\vertRect{5}{7}{gray} \horizRect{7}{7+1}{gray};
\vertRect{6}{7}{gray} \horizRect{7}{7}{blue};
\horizRect{10}{7+3}{gray} \vertRect{12}{7+2}{gray};
\horizRect{10}{7+2}{gray} \vertRect{13}{7+2}{gray};
\vertRect{10}{7}{gray} \vertRect{13}{7}{gray};
\vertRect{11}{7}{gray} \vertRect{12}{7}{gray};
\horizRect{15}{7+3}{gray} \horizRect{17}{7+3}{gray};
\horizRect{15}{7+2}{gray} \vertRect{17}{7+1}{gray};
\vertRect{15}{7}{gray} \vertRect{18}{7+1}{gray};
\vertRect{16}{7}{gray} \horizRect{17}{7}{blue};
\horizRect{20}{7+3}{gray} \horizRect{22}{7+3}{gray};
\horizRect{20}{7+2}{gray} \horizRect{22}{7+2}{blue};
\vertRect{20}{7}{gray} \vertRect{22}{7}{gray};
\vertRect{21}{7}{gray} \vertRect{23}{7}{gray};
\horizRect{25}{7+3}{gray} \vertRect{27}{7+2}{gray};
\horizRect{25}{7+2}{gray} \vertRect{28}{7+2}{gray};
\vertRect{25}{7}{gray} \horizRect{26}{7}{gray};
\vertRect{28}{7}{gray} \horizRect{26}{7+1}{gray};
\def7{0}
\vertRect{0}{7}{gray} \vertRect{0}{7+2}{gray};
\vertRect{3}{7}{gray} \vertRect{3}{7+2}{gray};
\horizRect{1}{7}{gray} \vertRect{1}{7+2}{gray};
\horizRect{1}{7+1}{gray} \vertRect{2}{7+2}{gray};
\vertRect{5}{7}{gray} \vertRect{5}{7+2}{gray};
\vertRect{8}{7}{gray} \vertRect{8}{7+2}{gray};
\vertRect{6}{7}{gray} \horizRect{6}{7+2}{gray};
\vertRect{7}{7}{gray} \horizRect{6}{7+3}{gray};
\vertRect{10}{7}{gray} \vertRect{10}{7+2}{gray};
\vertRect{13}{7}{gray} \vertRect{13}{7+2}{gray};
\horizRect{11}{7}{gray} \vertRect{11}{7+1}{gray};
\vertRect{12}{7+1}{gray} \horizRect{11}{7+3}{gray};
\vertRect{15}{7}{gray} \vertRect{15}{7+2}{gray};
\vertRect{18}{7}{gray} \vertRect{18}{7+2}{gray};
\horizRect{16}{7}{gray} \horizRect{16}{7+1}{gray};
\horizRect{16}{7+2}{gray} \horizRect{16}{7+3}{gray};
\horizRect{20}{7+3}{gray} \horizRect{22}{7+3}{gray};
\vertRect{20}{7+1}{gray} \horizRect{21}{7+2}{gray};
\vertRect{23}{7+1}{gray} \horizRect{21}{7+1}{gray};
\horizRect{20}{7}{gray} \horizRect{22}{7}{blue};
\horizRect{27}{7+3}{gray} \vertRect{25}{7+2}{gray};
\horizRect{27}{7+2}{blue} \vertRect{26}{7+2}{gray};
\vertRect{25}{7}{gray} \horizRect{26}{7}{gray};
\vertRect{28}{7}{gray} \horizRect{26}{7+1}{gray};
\end{tikzpicture}
\caption{The $36$ domino tilings of a $4\times 4$
checkerboard. The blue dominos are assigned weight $2$ for the purposes of
Theorem~\ref{thm2}.}0.9abel{fig:checker4x4}
\end{figure}
Part~(\ref{thm1-4}) of the following theorem is the well-known formula due to
Kasteleyn {\cite{Kasteleyn}} and to Temperley and Fisher~\cite{Temperley} for
the number of domino tilings of a checkerboard. We provide a new proof.
\begin{enumerate}gin{thm}0.9abel{thm1} Let $U_j(x)$ denote the $j$-th Chebyshev polynomial of
the second kind, and let
\[
\xi_{h,d}:=\cos0.9eft(\frac{h\pi}{2d+1}\right),
\]
for all integers $h$ and $d$.
Then for all integers $m,n\geq 1$, the following are equal:
\begin{enumerate}gin{enumerate}
28tem0.9abel{thm1-1} the number of symmetric recurrents on $\mathrm{S}\Gamma_{2m\times 2n}$;
28tem0.9abel{thm1-2} the number of domino tilings of a $2m\times 2n$ checkerboard;
28tem0.9abel{thm1-3} \
\[
(-1)^{mn}\prod_{h=1}^m U_{2n}(i\,\xi_{h,m});
\]
28tem0.9abel{thm1-4} \
\[
\prod_{h=1}^{m}\prod_{k=1}^{n}0.9eft(4\,\xi_{h,m}^2+4\,\xi_{k,n}^2\right).
\]
\end{enumerate}
\end{thm}
\begin{enumerate}gin{proof} It may be helpful to read Example~\ref{example:main1} in parallel
with this proof.
Let $A_n=(a_{h,k})$ be the $n\times n$ tridiagonal matrix with entries
\[
a_{h,k} =
\begin{enumerate}gin{cases}
4 & \quad \text{if $h=k\neq n$},\\
3 & \quad \text{if $h=k=n$},\\
-1& \quad \text{if $|h-k| = 1$},\\
0 & \quad \text{if $|h-k|\geq 2$}.
\end{cases}
\]
In particular, $A_1=[3]$. Take the vertices $[m]\times[n]$ as representatives
for the orbits of $G$ acting the non-sink vertices of $\mathrm{S}\Gamma_{2m\times 2n}$.
Ordering these representatives lexicographically, i.e., left-to-right then
top-to-bottom, the symmetrized reduced Laplacian~\eqref{eqn:srl} is given by the
$mn\times mn$ tridiagonal block matrix
\begin{enumerate}gin{equation}0.9abel{eqn:deltaG}
\widetilde{\Delta}^G = \begin{enumerate}gin{bmatrix}
A_n & -I_n & & & \cdots & & 0 \\
-I_n & A_n & -I_n & & & & \\
& \ddots & \ddots & \ddots & & & \vdots \\
& & -I_n & A_n & -I_n & & \\
\vdots & & & \ddots & \ddots & \ddots & \\
& & & & -I_n & A_n & -I_n \\
0 & & \cdots & & & -I_n & B_n\\
\end{bmatrix}
\end{equation}
where $I_n$ is the $n\times n$ identity matrix and $B_n:=A_n-I_n$. If $m=1$,
then $\widetilde{\Delta}^G:=B_n$.
\noindent[{\bf(\ref{thm1-1}) $=$ (\ref{thm1-2})}]: The matrix $\widetilde{\Delta}^G$ is the
reduced Laplacian of a sandpile graph we now describe. Let $D_{m\times n}$ be
the graph obtained from $\Gamma_{m\times n}$, the ordinary grid graph, by adding
(i) a sink vertex, $s'$, (ii) an edge of weight~$2$ from the vertex $(1,1)$
to~$s'$, and (iii) edges of weight $1$ from each of the other vertices along the
left and top sides to $s'$, i.e., $\{(h,1),s'\}$ for $1<h0.9eq m$ and
$\{(1,k),s'\}$ for $1<k0.9eq n$. We embed $D_{m\times n}$ in the plane so that
the non-sink vertices form an ordinary grid, and the edge of weight $2$ is
represented by a pair of edges of weight $1$, forming a digon. Then,
$\mathcal{H}(D_{m\times n})=\Gamma_{2m\times 2n}$ (see Figure~\ref{fig:4x3}).
Since $\widetilde{\Delta}^G=\widetilde{\Delta}_{D_{m\times n}}$, taking determinants shows that the number of
symmetric recurrents on $\mathrm{S}\Gamma_{2m\times 2n}$ is equal to the size of the sandpile
group of $D_{m\times n}$, and hence to the number of spanning trees of
$D_{m\times n}$ rooted at $s'$, counted according to weight. These spanning
trees are, in turn, in bijection with the perfect matchings of the graph
$\mathcal{H}(D_{m\times n})=\Gamma_{2m\times 2n}$ obtained from the generalized
Temperley bijection of Section~\ref{section:Matchings and trees}. Hence, the
numbers in parts~(\ref{thm1-1}) and~(\ref{thm1-2}) are equal.
\noindent [{\bf(\ref{thm1-1}) $=$ (\ref{thm1-3})}]: By Corollary~\ref{cor:nsr},
$\det\widetilde{\Delta}^G$ is the number of symmetric recurrents on $\mathrm{S}\Gamma_{2m\times 2n}$. By
Lemma~\ref{lemma:tridiagonal},
\begin{enumerate}gin{equation}0.9abel{eqn:thm1-det}
\det\widetilde{\Delta}^G = (-1)^n\det(T),
\end{equation}
where
\begin{enumerate}gin{align*}
T&= -B_n\,U_{m-1}0.9eft(\frac{A_n}{2}\right)+
U_{m-2}0.9eft(\frac{A_n}{2}\right)\\[5pt]
&= -(A_n-I_n)\,U_{m-1}0.9eft(\frac{A_n}{2}\right)+U_{m-2}0.9eft(\frac{A_n}{2}\right)\\[5pt]
&= U_{m-1}0.9eft(\frac{A_n}{2}\right)-0.9eft(A_n\,U_{m-1}0.9eft(\frac{A_n}{2}\right) -
U_{m-2}0.9eft(\frac{A_n}{2}\right)\right)\\[5pt]
&=U_{m-1}0.9eft(\frac{A_n}{2}\right) - U_{m}0.9eft(\frac{A_n}{2}\right).
\end{align*}
Using~(\ref{U-factorization}) and the fact that the Chebyshev polynomials of the
second kind satisfy
\[
U_j(\cos\theta) = \dfrac{0.1in((j+1)\theta)}{0.1in\theta},
\]
it is easy to check that the polynomial
\[
p(x):= U_{m}0.9eft(\frac{x}{2}\right) - U_{m-1}0.9eft(\frac{x}{2}\right)
\]
is a monic polynomial of degree $m$ with zeros
\[
t_{h,m}:=2\cos\dfrac{(2h+1)\pi}{2m+1},\quad00.9eq h0.9eq m-1.
\]
Thus,
\[
T= -p(A_n) =
-\prod_{h=0}^{m-1}0.9eft({A_n-t_{h,m} I_n}\right),
\]
and by equation~\eqref{eqn:thm1-det},
\[
\det\widetilde{\Delta}^G=\prod_{h=0}^{m-1}\chi_n(t_{h,m}),
\]
where $\chi_n(x)$ is the characteristic polynomial of $A_n$. Therefore, to show
that the expressions in parts~(\ref{thm1-1}) and~(\ref{thm1-3}) are equal, it
suffices to show that
\begin{enumerate}gin{equation}0.9abel{eqn:claim1}
\chi_n(t_{h,m})=(-1)^n\,U_{2n}(i\,\xi_{m-h,m})
\end{equation}
for each $h28n \{0,1,\cdots, m-1\}$, which we do by showing that both sides of the equation satisfy the same
recurrence.
Define $\chi_0(x):=1$. Expanding the
determinant defining~$\chi_n(x)$, starting along the first row, leads to
a recursive formula for $\chi_n(x)$:
\begin{enumerate}gin{align}0.9abel{chi-recurrence}
\nonumber
\chi_0(x)&=1\\
\chi_1(x)&=3-x\\
\nonumber
\chi_{j}(x)&=(4-x)\chi_{j-1}(x)-\chi_{j-2}(x)\quad \text{for $j\geq2$}.
\end{align}
On the other hand, defining $C_j(x):=(-1)^j\,U_{2j}(x)$, it follows from~\eqref{eqn:2nd} that
\begin{enumerate}gin{align}0.9abel{c-recurrence}
\nonumber
C_0(x)&=1\\
C_1(x)&=1-4x^2\\
\nonumber
C_{j}(x)&=(2-4x^2)C_{j-1}(x)-C_{j-2}(x)\quad \text{for $j\geq2$}.
\end{align}
The result now follows by letting $x=t_{h,m}$ in~(\ref{chi-recurrence}),
letting $x=i\,\xi_{m-h,m}$ in~(\ref{c-recurrence}), and using the fact that
\begin{enumerate}gin{equation}0.9abel{eqn:t-chi}
t_{h,m}= 2-4\,\xi_{m-h,m}^2.
\end{equation}
(Equation~\eqref{eqn:t-chi} can be verified using, for example, the double-angle
formula for cosine and the relation among angles, $(2h+1)\pi/(2m+1)=\pi-2(m-h)\pi/(2m+1))$.
\noindent [{\bf(\ref{thm1-3}) $=$ (\ref{thm1-4})}]:
Using~(\ref{U-factorization}),
\begin{enumerate}gin{align*}
(-1)^{mn}&\prod_{h=1}^{m}U_{2n}(i\,\xi_{h,m})\\
&=(-1)^{mn}\prod_{h=1}^m\prod_{k=1}^{2n}(2i\,\xi_{h,m}-2\,\xi_{k,n})\\
&=(-1)^{mn}\prod_{h=1}^m\prod_{k=1}^{n}
(2i\,\xi_{h,m}-2\,\xi_{k,n})
(2i\,\xi_{h,m}+2\,\xi_{k,n})\\
&=\prod_{h=1}^m\prod_{k=1}^{n}(4\,\xi_{h,m}^2+4\,\xi_{k,n}^2).
\end{align*}
\end{proof}
\begin{enumerate}gin{example}0.9abel{example:symm4x4} Figure~\ref{fig:symm4x4} lists the $36$
symmetric recurrents on $\mathrm{S}\Gamma_{4\times 4}$ in no particular order. Given a
symmetric recurrent, $c$, let $\tilde{c}$ be the restriction of $c$ to the
vertices $(1,1)$, $(1,2)$, $(2,1)$, and $(2,2)$, representing the orbits of
the Klein $4$-group action on~$\mathrm{S}\Gamma_{4\times 4}$. We regard $\tilde{c}$ as a
configuration on $D_{2\times2}$, the sandpile graph introduced in the proof of
Theorem~\ref{thm1}. Let $28ota(c)$ be the recurrent element of the sandpile
graph $D_{2\times 2}$ equivalent to $\tilde{c}$ modulo the reduced Laplacian
of $D_{2\times 2}$. Then $c\mapsto28ota(c)$ determines a bijection between
the symmetric recurrents of $\mathrm{S}\Gamma_{4\times 4}$ and the recurrents of
$D_{2\times 2}$. In~\cite{Holroyd}, it is shown that the sandpile group of a
graph acts freely and transitively on the set of spanning trees of the graph
rooted at the sink, i.e., this set of spanning trees is a {\em torsor} for the
sandpile group. Thus, via the Temperley bijection, the domino tilings of the
$4\times 4$ checkerboard, forms a torsor for the group of symmetric recurrents
on $\mathrm{S}\Gamma_{4\times 4}$.
\end{example}
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.5]
\def28{12};
\def7{0};
\node at (28+0.5,7+0.95){\# grains};
\node at (0,0){28ncludegraphics[height=2.5in]{symm4x4.png}};
\draw[fill=black] (28,7) rectangle (28+0.5,7-0.5);
\node at (28+1.4,7-0.25){$=0$};
\draw[fill=yellow] (28,7-1) rectangle (28+0.5,7-1.5);
\node at (28+1.4,7-1.25){$=1$};
\draw[fill=blue] (28,7-2) rectangle (28+0.5,7-2.5);
\node at (28+1.4,7-2.25){$=2$};
\draw[fill=red] (28,7-3) rectangle (28+0.5,7-3.5);
\node at (28+1.4,7-3.25){$=3$};
\end{tikzpicture}
\caption{The $36$ symmetric recurrents on $\mathrm{S}\Gamma_{4\times4}$.}0.9abel{fig:symm4x4}
\end{figure}
\begin{enumerate}gin{example}0.9abel{example:main1} This example illustrates part of the proof
of Theorem~\ref{thm1} for the case $m=4$ and $n=3$. Figure~\ref{fig:8x6}
shows the graph $\mathrm{S}\Gamma_{8\times 6}$. The boxed $4\times 3$ block of vertices in
the upper left are representatives of the orbits of the Klein $4$-group
action. Order these from left-to-right, top-to-bottom, to get the matrix for
the symmetrized reduced Laplacian, $\widetilde{\Delta}^G_{8\times 6}$. The vertex $(2,3)$ of
$\mathrm{S}\Gamma_{8\time6}$ in Figure~\ref{fig:8x6} is colored blue. If this vertex is
fired simultaneously with the other vertices in its orbit, it will lose $4$
grains of sand to its neighbors but gain $1$ grain of sand from the adjacent
vertex in its orbit. This firing-rule is encoded in the sixth column of
$\widetilde{\Delta}^G_{8\times6}$ (shaded blue).
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.5]
\begin{enumerate}gin{scope}[shift={(0,-1.8)}]
\draw (0,0) grid (5,7);
\draw[line width=0.6mm,color=green!70] (-0.3,7.3) -- (2.3,7.3) -- (2.3,3.7) -- (-0.3,3.7) -- (-0.3,7.3);
\fill[color=blue] (2,6) circle (3pt);
\fill[color=blue] (3,6) circle (3pt);
\fill[color=blue] (2,2) circle (3pt);
\fill[color=blue] (3,2) circle (3pt);
\end{scope}
\draw (16,3.5) node(symlap){
\resizebox{0.67\columnwidth}{!}{
$
\renewcommand{1.3}{1.3}
0.9eft[
\begin{enumerate}gin{array}{rrrrrrrrrrrr}
4&-1& 0&-1& 0& 0& 0& 0& 0& 0& 0& 0\\
-1& 4&-1& 0&-1& 0& 0& 0& 0& 0& 0& 0\\
0&-1& 3& 0& 0&-1& 0& 0& 0& 0& 0& 0\\
-1& 0& 0& 4&-1& 0&-1& 0& 0& 0& 0& 0\\
0&-1& 0&-1& 4&-1& 0&-1& 0& 0& 0& 0\\
0& 0&-1& 0&-1& 3& 0& 0&-1& 0& 0& 0\\
0& 0& 0&-1& 0& 0& 4&-1& 0&-1& 0& 0\\
0& 0& 0& 0&-1& 0&-1& 4&-1& 0&-1& 0\\
0& 0& 0& 0& 0&-1& 0&-1& 3& 0& 0&-1\\
0& 0& 0& 0& 0& 0&-1& 0& 0& 3&-1& 0\\
0& 0& 0& 0& 0& 0& 0&-1& 0&-1& 3&-1\\
0& 0& 0& 0& 0& 0& 0& 0&-1& 0&-1& 2\\
\end{array}
\right]
$
}
};
\fill[color=blue!30,opacity=0.4] (14.85,-1.9) rectangle (15.95,8.9);
\foreach 28 in {1,2,3} {
\draw[line width=0.02cm, dotted] (8.0,2.72*28-1.95) --(24,2.72*28-1.95);
}
\draw[line width=0.02cm, dotted] (12.2,-1.8) --(12.2,8.8);
\draw[line width=0.02cm, dotted] (16.15,-1.8) --(16.15,8.8);
\draw[line width=0.02cm, dotted] (20.1,-1.8) --(20.1,8.8);
\draw (2.5,-3) node(a) {$\mathrm{S}\Gamma_{8\times 6}$};
\draw (16,-3) node(b) {$\widetilde{\Delta}^{G}_{8\times6}$};
\end{tikzpicture}
\caption{A sandpile grid graph and its symmetrized reduced Laplacian.}0.9abel{fig:8x6}
\end{figure}
The matrix $\widetilde{\Delta}^G_{8\times 6}$ is the reduced Laplacian of the graph $D_{4\times
3}$, shown in Figure~\ref{fig:4x3}. To form $\mathcal{H}(D_{4\times
3})=\Gamma_{8\times 6}$, we first overlay $D_{4\times 3}$ with its dual, as
shown, then remove the vertices $s$ and $\tilde{s}$ and their incident edges.
Figure~\ref{fig:entwined} shows how a spanning tree of $D_{4\times 3}$ (in black)
determines a spanning tree of the dual graph (in blue) and a domino tiling of
the $8\times 6$ checkerboard.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.8]
\foreach 28 in {0,1,2} {
\draw (28,0) -- (28,3.6);
\draw (28,3.6) .. controls +(90:20pt) and +(0:20pt) .. (-1.60,4.6);
}
\foreach 28 in {0,1,2,3} {
\draw (-0.60,28) -- (2,28);
\draw (-0.60,28) .. controls +(180:20pt) and +(270:20pt) .. (-1.60,4.6);
}
\draw (-2,4.6) node(s){$s$};
\draw (1,-2) node(d){$D_{4\times3}$};
\def7{7};
\foreach 28 in {0,1,2} {
\draw (28+7,0) -- (28+7,3.6);
\draw (28+7,3.6) .. controls +(90:20pt) and +(0:20pt) .. (-1.60+7,4.6);
}
\foreach 28 in {0,1,2,3} {
\draw (-0.60+7,28) -- (2+7,28);
\draw (-0.60+7,28) .. controls +(180:20pt) and +(270:20pt) .. (-1.60+7,4.6);
}
\draw (-2+7,4.6) node(s){$s$};
\foreach 28 in {0,1,2} {
\draw[color=blue] (28+7-0.5,-0.2) -- (28+7-0.5,3.5);
\draw[color=blue] (28+7-0.5,-0.2) .. controls +(270:20pt) and +(180:20pt) .. (3.1+7,-1.2);
}
\foreach 28 in {0,1,2,3} {
\draw[color=blue] (2.10+7,28+0.5) -- (-0.50+7,28+0.5);
\draw[color=blue] (2.10+7,28+0.5) .. controls +(0:20pt) and +(90:20pt) .. (3.1+7,-1.2);
}
\draw (3.1+7+0.4,-1.2) node(s){$\color{blue}\tilde{s}$};
\draw (1+7,-2) node(e){$D_{4\times3}\cup D_{4\times3}^{\perp}$};
\end{tikzpicture}
\caption{The symmetrized reduced Laplacian for $\mathrm{S}\Gamma_{8\times 6}$ is the reduced
Laplacian for $D_{4\times 3}$. Removing $s$ and $\tilde{s}$ and their incident
edges from the graph on the right shows $\mathcal{H}(D_{4\times
3})=\Gamma_{8\times 6}$.}0.9abel{fig:4x3}
\end{figure}
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.8]
\foreach 28 in {0,2,4} {
\foreach 7 in {0,2,4,6} {
\draw[fill=white] (28,7) rectangle (28+1,7+1);
\draw[fill=black!65] (28+1,7) rectangle (28+2,7+1);
\draw[fill=blue!65] (28,7+1) rectangle (28+1,7+2);
\draw[fill=white] (28+1,7+1) rectangle (28+2,7+2);
}
}
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (0.2,0.2) rectangle (0.8,1.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (0.2,2.2) rectangle (0.8,3.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (0.2,5.2) rectangle (1.8,5.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (0.2,6.2) rectangle (0.8,7.8);
\filldraw[draw=white,fill=black,rounded corners] (1.2,6.2) rectangle (1.8,7.8);
\filldraw[draw=white,fill=black,rounded corners] (0.2,4.2) rectangle (1.8,4.8);
\filldraw[draw=white,fill=black,rounded corners] (1.2,2.2) rectangle (1.8,3.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (1.2,1.2) rectangle (2.8,1.8);
\filldraw[draw=white,fill=black,rounded corners] (1.2,0.2) rectangle (2.8,0.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (2.2,7.2) rectangle (3.8,7.8);
\filldraw[draw=white,fill=black,rounded corners] (2.2,6.2) rectangle (3.8,6.8);
\filldraw[draw=white,fill=black,rounded corners] (3.2,4.2) rectangle (3.8,5.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (2.2,4.2) rectangle (2.8,5.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (2.2,3.2) rectangle (3.8,3.8);
\filldraw[draw=white,fill=black,rounded corners] (2.2,2.2) rectangle (3.8,2.8);
\filldraw[draw=white,fill=black,rounded corners] (3.2,0.2) rectangle (3.8,1.8);
\filldraw[draw=white,fill=black,rounded corners] (4.2,0.2) rectangle (5.8,0.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (4.2,7.2) rectangle (5.8,7.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (4.2,5.2) rectangle (4.8,6.8);
\filldraw[draw=white,fill=black,rounded corners] (5.2,5.2) rectangle (5.8,6.8);
\filldraw[draw=white,fill=black,rounded corners] (4.2,4.2) rectangle (5.8,4.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (4.2,2.2) rectangle (4.8,3.8);
\filldraw[draw=white,fill=black,rounded corners] (5.2,2.2) rectangle (5.8,3.8);
\filldraw[draw=black,style=ultra thick,fill=blue,rounded corners] (4.2,1.2) rectangle (5.8,1.8);
\draw[fill=black] (-0.8,8.8) circle (1.5pt);
\draw[style=ultra thick] (-0.8,8.8) .. controls (1.5,8.4) .. (1.5,8);
\draw[style=thin] (-0.8,8.8) .. controls (3.5,8.4) .. (3.5,8);
\draw[style=thin] (-0.8,8.8) .. controls (5.5,8.4) .. (5.5,8);
\draw[style=thin] (-0.8,8.8) .. controls (-0.4,6.5) .. (0,6.5);
\draw[style=ultra thick] (-0.8,8.8) .. controls (-0.4,4.5) .. (0,4.5);
\draw[style=thin] (-0.8,8.8) .. controls (-0.4,2.5) .. (0,2.5);
\draw[style=thin] (-0.8,8.8) .. controls (-0.4,0.5) .. (0,0.5);
\draw[style=ultra thick] (1.5,8) -- (1.5,7.8);
\draw[style=ultra thick] (1.8,6.5) -- (2.2,6.5);
\draw[style=ultra thick] (3.5,6.2) -- (3.5,5.8);
\draw[style=ultra thick] (3.8,4.5) -- (4.2,4.5);
\draw[style=ultra thick] (5.5,4.8) -- (5.5,5.2);
\draw[style=ultra thick] (5.5,4.2) -- (5.5,3.8);
\draw[style=ultra thick] (0.0,4.5) -- (0.2,4.5);
\draw[style=ultra thick] (1.5,4.2) -- (1.5,3.8);
\draw[style=ultra thick] (1.8,2.5) -- (2.2,2.5);
\draw[style=ultra thick] (3.5,2.2) -- (3.5,1.8);
\draw[style=ultra thick] (2.8,0.5) -- (3.2,0.5);
\draw[style=ultra thick] (3.8,0.5) -- (4.2,0.5);
\draw[fill=blue] (6.8,-0.8) circle (1.5pt);
\draw[style=ultra thick,color=blue] (6.8,-0.8) .. controls (6.4,1.5) .. (6,1.5);
\draw[style=thin,color=blue] (6.8,-0.8) .. controls (6.4,3.5) .. (6,3.5);
\draw[style=thin,color=blue] (6.8,-0.8) .. controls (6.4,5.5) .. (6,5.5);
\draw[style=ultra thick,color=blue] (6.8,-0.8) .. controls (6.4,7.5) .. (6,7.5);
\draw[style=thin,color=blue] (6.8,-0.8) .. controls (4.5,-0.4) .. (4.5,0);
\draw[style=thin,color=blue] (6.8,-0.8) .. controls (2.5,-0.4) .. (2.5,0);
\draw[style=ultra thick,color=blue] (6.8,-0.8) .. controls (0.5,-0.4) .. (0.5,0);
\draw[style=ultra thick,color=blue] (0.5,6.2) -- (0.5,5.8);
\draw[style=ultra thick,color=blue] (1.8,5.5) -- (2.2,5.5);
\draw[style=ultra thick,color=blue] (2.5,4.2) -- (2.5,3.8);
\draw[style=ultra thick,color=blue] (3.8,3.5) -- (4.2,3.5);
\draw[style=ultra thick,color=blue] (4.5,2.2) -- (4.5,1.8);
\draw[style=ultra thick,color=blue] (5.8,1.5) -- (6.0,1.5);
\draw[style=ultra thick,color=blue] (0.5,2.2) -- (0.5,1.8);
\draw[style=ultra thick,color=blue] (0.8,1.5) -- (1.2,1.5);
\draw[style=ultra thick,color=blue] (0.5,0.2) -- (0.5,0.0);
\draw[style=ultra thick,color=blue] (0.5,0.2) -- (0.5,0.0);
\draw[style=ultra thick,color=blue] (3.8,7.5) -- (4.2,7.5);
\draw[style=ultra thick,color=blue] (5.8,7.5) -- (6.0,7.5);
\draw[style=ultra thick,color=blue] (4.5,7.2) -- (4.5,6.8);
\end{tikzpicture}
\caption{Every domino tiling of an even-sided checkerboard consists of a spanning
tree entwined with its dual spanning tree.}0.9abel{fig:entwined}
\end{figure}
\end{example}
0.1ubsection{Symmetric recurrents on a $2m\times (2n-1)$ sandpile grid
graph.}0.9abel{subsection:symmetric recurrents on evenxodd grid} The {\em
$m\times n$ M\"obius grid graph}, $\Gamma^{\mathrm{mob}}_{m\times n}$, is the graph formed from
the ordinary $m\times n$ grid graph, $\Gamma_{m\times n}$, by adding the edges
$\{(h,1),(m-h+1,n)\}$ for $10.9eq h0.9eq m$. A {\em M\"obius checkerboard} is an
ordinary checkerboard with its left and right sides glued with a twist. Domino
tilings of an $m\times n$ M\"obius checkerboard are identified with perfect
matchings of $\Gamma^{\mathrm{mob}}_{m\times n}$. See Figure~\ref{fig:Moebius} for examples.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.7]
\SetVertexMath
\GraphInit[vstyle=Art]
\SetUpVertex[MinSize=3pt]
\SetVertexLabel
\tikzset{VertexStyle/.style = {
shape = circle,
shading = ball,
ball color = black,
inner sep = 0pt
}}
\SetUpEdge[color=black]
\foreach 28 in {0,...,3} {
\draw (28,0) -- (28,3);
\draw (-0.5,28) -- (3.5,28);
\draw[dotted, thick] (3.5,28) -- (4.0,28);
\draw[dotted, thick] (-1.0,28) -- (-0.5,28);
}
\foreach 28 in {0,1,2,3}{
\foreach 7 in {0,1,2,3}{
\Vertex[NoLabel,x=28,y=7]{}
}
}
\node [right] at (4,3){d};
\node [left] at (-1.0,0){d};
\node [right] at (4,2){c};
\node [left] at (-1.0,1){c};
\node [right] at (4,1){b};
\node [left] at (-1.0,2){b};
\node [right] at (4,0){a};
\node [left] at (-1.0,3){a};
\node at (1.5,-1) {(i)};
\def7{7}
\def-0.5{-0.5}
\foreach 28 in {0,...,4} {
\draw (7+28,-0.5+0) -- (7+28,-0.5+4);
\draw (7+0,-0.5+28) -- (7+4,-0.5+28);
}
\def0.9{0.9}
\def0.1{0.1}
\draw[fill=black!10,rounded corners] (7-0.9,-0.5+3+0.1) rectangle (7+0.9,-0.5+3+0.9);
\draw[fill=black!10,rounded corners] (7-0.9,-0.5+2+0.1) rectangle (7+0.9,-0.5+2+0.9);
\draw[fill=black!10,rounded corners] (7+1+0.1,-0.5+2+0.1) rectangle (7+1+0.9,-0.5+3+0.9);
\draw[fill=black!10,rounded corners] (7+2+0.1,-0.5+2+0.1) rectangle (7+3+0.9,-0.5+2+0.9);
\draw[fill=black!10,rounded corners] (7+2+0.1,-0.5+3+0.1) rectangle (7+3+0.9,-0.5+3+0.9);
\draw[fill=black!10,rounded corners] (7+0.1,-0.5+0.1) rectangle (7+0.9,-0.5+1+0.9);
\draw[fill=black!10,rounded corners] (7+1+0.1,-0.5+0.1) rectangle (7+2+0.9,-0.5+0.9);
\draw[fill=black!10,rounded corners] (7+1+0.1,-0.5+1+0.1) rectangle (7+2+0.9,-0.5+1+0.9);
\draw[fill=black!10,rounded corners] (7+3+0.1,-0.5+0.1) rectangle (7+4+0.9,-0.5+0.9);
\draw[fill=black!10,rounded corners] (7+3+0.1,-0.5+1+0.1) rectangle (7+4+0.9,-0.5+1+0.9);
\node at (7+0,-0.5+3.5) {A};
\node at (7+0,-0.5+2.5) {B};
\node at (7+4,-0.5+0.5) {A};
\node at (7+4,-0.5+1.5) {B};
\node at (7+2,-0.5-0.5) {(ii)};
\end{tikzpicture}
\caption{(i) The $4\times4$ M\"obius grid graph,
$\Gamma^{\mathrm{mob}}_{4\times4}$; (ii) A tiling of the $4\times4$ M\"obius
checkerboard.}0.9abel{fig:Moebius}
\end{figure}
As part of Theorem~\ref{thm2}, we will show that the domino tilings of a
$2m\times 2n$ M\"obius checkerboard can be counted using weighted domino tilings
of an associated ordinary checkerboard, which we now describe. Define the {\em
M\"obius-weighted $m\times n$ grid graph}, $\text{M}\Gamma_{m\times n}$, as the ordinary
$m\times n$ grid graph but with each edge of the form $\{(m-2h,n-1),(m-2h,n)\}$
for $00.9eq h<0.9floor\frac{m}{2}\rfloor$ assigned the weight~$2$, and, if~$m$
is odd, then in addition assign the edge $\{(1,n-1),(1,n)\}$ the weight~$3$ (and all
other edges have weight~$1$). (In the case $m=1$, the weight of the edge
$\{(1,n-1),(1,n)\}$ is be defined to be $3$.)
See Figure~\ref{fig:mobius-weighted} for examples.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.5]
\draw (0,0) grid(3,3);
\foreach 7 in {0,2}{
\draw[fill=white, draw=none] (2.5,7) circle [radius=2.1mm];
\draw (2.5,7) node{$2$};
}
\node at (1.8,-1){$\text{M}\Gamma_{4\times 4}$};
\def28{8}
\draw (28+0,0) grid(28+1,4);
\foreach 7 in {0,2}{
\draw[fill=white, draw=none] (28+0.5,7) circle [radius=2.8mm];
\draw (28+0.5,7) node{$2$};
}
\draw[fill=white, draw=none] (28+0.5,4) circle [radius=2.8mm];
\draw (28+0.5,4) node{$3$};
\node at (28+0.8,-1){$\text{M}\Gamma_{5\times 2}$};
\end{tikzpicture}
\caption{Two M\"obius-weighted grid graphs.}0.9abel{fig:mobius-weighted}
\end{figure}
The {\em M\"obius-weighted $m\times n$ checkerboard} is the ordinary $m\times n$
checkerboard but for which the weight of a domino tiling is taken to be the
weight of the corresponding perfect matching of $\text{M}\Gamma_{m\times n}$. In
Figure~\ref{fig:checker4x4}, the dominos corresponding to edges of weight~$2$
are shaded. Thus, the first three tilings in the first row of
Figure~\ref{fig:checker4x4} have weights $4$, $2$, and $1$, respectively.
Example~\ref{example:mobius 3x1} considers a case for which $m$ is odd.
\begin{enumerate}gin{thm}0.9abel{thm2} Let $T_j(x)$ denote the $j$-th Chebyshev polynomial of
the first kind, and let
\[
\xi_{h,d}:=\cos0.9eft(\frac{h\pi}{2d+1}\right)\quad\text{and}\quad
\zeta_{h,d}:=\cos0.9eft(\frac{(2h-1)\pi}{4d}\right)
\]
for all integers $h$ and $d\neq0$.
Then for all integers $m,n\geq 1$, the following are equal:
\begin{enumerate}gin{enumerate}
28tem0.9abel{thm2-1} the number of symmetric recurrents on $\mathrm{S}\Gamma_{2m\times(2n-1)}$;
28tem0.9abel{thm2-2} if $n>1$, the number of domino tilings of the
M\"obius-weighted $2m\times 2n$ checkerboard, and if $n=1$, the number of
domino tilings of the M\"obius-weighted $(2m-1)\times 2$ checkerboard,
counted according to weight;
28tem0.9abel{thm2-3} \
\[
(-1)^{mn}\,2^m\prod_{h=1}^m T_{2n}(i\,\xi_{h,m});
\]
28tem0.9abel{thm2-4} \
\[
\prod_{h=1}^{m}\prod_{k=1}^{n}0.9eft(4\,\xi_{h,m}^2+4\,\zeta_{k,n}^2\right);
\]
28tem0.9abel{thm2-5} the number of domino tilings of a $2m\times 2n$
M\"obius checkerboard.
\end{enumerate}
\end{thm}
\begin{enumerate}gin{remark}0.9abel{remark:chebT}
By identity~(\ref{eqn:half-angle}),
\[
T_{2n}(i\,\xi_{h,m})=(-1)^n\,T_n(1+2\,\xi_{h,m}^2),
\]
from which it follows, after proving Theorem~\ref{thm2}, that
\[
2^m\prod_{h=1}^mT_n(1+2\,\xi_{h,m}^2)
\]
is another way to express the numbers in parts (1)--(5).
\end{remark}
\begin{enumerate}gin{proof}[Proof of Theorem~\ref{thm2}.] The proof is similar to that of
Theorem~\ref{thm1} after altering the definitions of the matrices $A_n$ and
$B_n$ used there. This time, for $n>1$, let $A'_n=(a'_{h,k})$ be the $n\times n$
tridiagonal matrix with entries
\[
a'_{h,k} =
\begin{enumerate}gin{cases}
4 & \quad \text{if $h=k$},\\
-1 & \quad \text{if $|h-k|=1$ and $h\neq n$},\\
-2& \quad \text{if $h=n$ and $k=n-1$},\\
0 & \quad \text{if $|h-k|\geq 2$}.
\end{cases}
\]
In particular, $A'_1=[4]$. Define the matrix $B'_n=(b'_{h,k})$ by
\[
b'_{h,k} =
\begin{enumerate}gin{cases}
\ 3 & \quad \text{if $h=k$},\\
\ a'_{h,k} & \quad \text{otherwise}.
\end{cases}
\]
Thus, for instance,
\[
A'_3=
0.9eft[\begin{enumerate}gin{array}{rrr}
4 & -1 & 0 \\
-1 & 4 & -1 \\
0 & -2 & 4
\end{array}\right],\qquad
B'_3=
0.9eft[\begin{enumerate}gin{array}{rrr}
3 & -1 & 0 \\
-1 & 3 & -1 \\
0 & -2 & 3
\end{array}\right].
\]
If $n=1$, take $A'_1=[4]$ and $B'_1=[3]$.
\noindent [{\bf(\ref{thm2-1}) $=$ (\ref{thm2-2})}]: Reasoning as in the proof of
Theorem~\ref{thm1}, equation~\eqref{eqn:deltaG} with $A'_n$ and~$B'_n$
substituted for $A_n$ and $B_n$ gives the symmetrized reduced Laplacian,
$\widetilde{\Delta}^G$, of $\mathrm{S}\Gamma_{2m\times(2n-1)}$. Unless $n=1$, the matrix $\widetilde{\Delta}^G$ is {\em
not} the reduced Laplacian matrix of a sandpile graph since the sum of the
elements in its penultimate column is~$-1$ whereas the sum of the elements in
any column of the reduced Laplacian of a sandpile graph must be nonnegative.
However, in any case, the transpose $(\widetilde{\Delta}^G)^t$ is the reduced Laplacian of a
sandpile graph, which we call~$D'_{m\times n}$. We embed it in the plane as a
grid as we did previously with $D_{m\times n}$ in the proof of
Theorem~\ref{thm1}, but this time with some edge-weights not equal to $1$.
Figure~\ref{fig:mobius 4x3} shows~$D'_{4\times3}$. It is the same as
$D_{4\times3}$ as depicted in Figure~\ref{fig:4x3}, except that arrowed edges,
\raisebox{0.5ex}{\tikz\draw[<<->,>=angle 60] (0,0)--(0.8,0);}, have been
substituted for certain edges. Each represents a pair of arrows---one from
right-to-left of weight~$2$ and one from left-to-right of weight~$1$---embedded
so that they coincide, as discussed in Section~\ref{section:Matchings and
trees}.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=1]
\foreach 28 in {0,1,2} {
\draw (28,0) -- (28,3.6);
\draw (28,3.6) .. controls +(90:20pt) and +(0:20pt) .. (-1.60,4.6);
}
\foreach 28 in {0,1,2,3} {
\draw (-0.60,28) -- (1,28);
\draw[<<->,>=mytip] (1,28) -- (2,28);
\draw (-0.60,28) .. controls +(180:20pt) and +(270:20pt) .. (-1.60,4.6);
}
\draw (-2,4.6) node(s){$s$};
\draw (1,-0.7) node(d){$D'_{4\times3}$};
\end{tikzpicture}
\caption{The symmetrized reduced Laplacian for $\mathrm{S}\Gamma_{8\times 5}$ is the reduced
Laplacian for $D'_{4\times 3}$. Arrowed edges each represent a pair of directed
edges of weights~$1$ and $2$, respectively, as indicated by the number of arrow
heads. All other edges have weight $1$.}0.9abel{fig:mobius 4x3}
\end{figure}
Reasoning as in the proof of Theorem~\ref{thm1}, we see that the number of
perfect matchings of $\mathcal{H}(D'_{m\times n})$ is equal to the number of
perfect matchings of $\text{M}\Gamma_{2m\times(2n-1)}$, each counted according to weight.
This number is $\det(\widetilde{\Delta}^G)^t=\det\widetilde{\Delta}^G$, which is the number of symmetric
recurrents on $\mathrm{S}\Gamma_{2m\times(2n-1)}$ by Corollary~\ref{cor:nsr}.
\noindent [{\bf(\ref{thm2-1}) $=$ (\ref{thm2-3})}]: Exactly the same argument as
given in the proof of Theorem~\ref{thm1} shows that
\[
\det\widetilde{\Delta}^G=\prod_{h=0}^{m-1}\chi_n(t_{h,m}),
\]
where $t_{h,m}$ is as before, but now $\chi_n(x)$ is the characteristic
polymonial of $A'_n$. In light of Remark~\ref{remark:chebT}, it suffices to
show
\[
\chi_n(t_{h,m})=2\,T_n(1+2\,\xi_{m-h,m}^2)
\]
for each $h28n \{0,1,\cdots, m-1\}$ , which we now do as before, by showing both sides of the equation satisfy the same
recurrence.
Defining $\chi_0(x):=2$ and expanding the determinant defining $\chi_n(x)$ yields
\begin{enumerate}gin{align}
\chi_0(x)&=2\notag\\
\chi_1(x)&=4-x0.9abel{thm2-chi}\\
\chi_j(x)&=(4-x)\chi_{j-1}(x)-\chi_{j-2}(x)\quad \text{for $j\geq2$}.\notag
\end{align}
On the other hand, definining $C_j(x):=2\,T_{j}(x)$, it follows from~\eqref{eqn:1st} that
\begin{enumerate}gin{align}
C_0(x)&=2\notag\\
C_1(x)&=2x0.9abel{thm2-C}\\
C_{j}(x)&=2x\,C_{j-1}(x)-C_{j-2}(x)\quad \text{for $j\geq2$}.\notag
\end{align}
The result now follows as before, using equation~\eqref{eqn:t-chi}.
\noindent [{\bf(\ref{thm2-3}) $=$ (\ref{thm2-4})}]: The numbers given in
parts~(\ref{thm2-3}) and~(\ref{thm2-4}) are equal by a straightforward
calculation, similar to that in the proof of the analogous result in
Theorem~\ref{thm1}, this time using~(\ref{T-factorization}).
\noindent [{\bf(\ref{thm2-4}) $=$ (\ref{thm2-5})}]: Formula~(2)
in~\cite{LW} gives the number of domino tilings of a $2m\times2n$ M\"obius
checkerboard. That formula is identical to our double-product in
part~(\ref{thm2-4}) but with $0.1in((4k-1)\pi/(4n))$ substituted for
$\zeta_{k,n}$. Now,
\[
0.1in0.9eft( \frac{(4k-1)\pi}{4n}\right)=\cos0.9eft(\frac{(4k-1-2n)\pi}{4n}\right).
\]
Defining $\theta(k)=(2k-1)\pi/(4n)$ and $\psi(k)=(4k-1-2n)\pi/(4n)$, it
therefore suffices to show that there is a permutation $0.1igma$ of
$\{1,\dots,n\}$ such that $\theta(k)=\pm\psi(0.1igma(k))$ for $k=1,\dots,n$
because, in that case, $\zeta_{k,n}=\cos(\theta(k))=\cos(\psi(0.1igma(k)))$.
Such a permutation exists, for if $n=2t$, then
\begin{enumerate}gin{align*}
\theta(2\ell-1)&=-\psi(t-\ell+1),\qquad10.9eq \ell0.9eq t,\\
\theta(2\ell)&=\psi(\ell+t),\qquad10.9eq \ell0.9eq t,
\end{align*}
and if $n=2t-1$, then
\begin{enumerate}gin{align*}
\theta(2\ell-1)&=\psi(t+\ell-1),\qquad10.9eq \ell0.9eq t,\\
\theta(2\ell)&=-\psi(t-\ell),\qquad10.9eq \ell0.9eq t-1.
\end{align*}
\end{proof}
\begin{enumerate}gin{remark}0.9abel{remark:Lu-Wu} In the proof of Theorem~\ref{thm2}, we
rewrote the double-product in part~(\ref{thm2-4}) as the Lu-Wu formula
((2)~in~\cite{LW}) for the number of domino tilings of the $2m\times2n$
M\"obius checkerboard:
\[
\prod_{h=1}^{m}\prod_{k=1}^{n}0.9eft(4\,\xi_{h,m}^2+4\,\mu_{k,n}^2\right),
\]
where
$\mu_{k,n}:=0.1in( (4k-1)\pi/(4n))$. Thus, it is the work of Lu and Wu
that allowed us to add part~(\ref{thm2-5}) to Theorem~\ref{thm2}. This is in
contrast to Theorem~\ref{thm1}, which gave an independent proof of the
Kastelyn and Temperley-Fisher formula for the number of tilings of the
ordinary $2m\times2n$ checkerboard.
\end{remark}
\begin{enumerate}gin{example}
The $36$ tilings of the ordinary $4\times4$ checkerboard are listed in
Figure~\ref{fig:checker4x4}. Considering these as tilings of the
M\"obius-weighted $4\times 4$ checkerboard, the sum of the weights of the
tilings is $71$, which is the number of tilings of the $4\times 4$ M\"obius
checkerboard and the number of symmetric recurrents on $\mathrm{S}\Gamma_{4\times 3}$, in
accordance with Theorem~\ref{thm2}.
\end{example}
\begin{enumerate}gin{example}0.9abel{example:mobius 3x1}
Figure~\ref{fig:m=3 n=1} shows the domino tilings of the M\"obius-weighted
$5\times 2$ checkerboard. The total number of tilings, counted according to
weight, is $41$, which is the number of domino tilings of a $6\times 2$
M\"obius checkerboard, in agreement with case $m=3$ and
$n=1$ of Theorem~\ref{thm2}.
\end{example}
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.4]
\newcommand{\horizRect}[3]{
\draw[fill=gray!25,rounded corners=0.4mm] (#1+0.15,#2+0.15) rectangle (#1+1.85,#2+0.85);
28fnum#3=1
\draw[fill=black] (#1+1.0,#2+0.5) circle[radius=0.1];
\else
28fnum#3=2
\draw[fill=black] (#1+0.666,#2+0.5) circle[radius=0.1];
\draw[fill=black] (#1+1.333,#2+0.5) circle[radius=0.1];
\else
28fnum#3=3
\draw[fill=black] (#1+0.5,#2+0.5) circle[radius=0.1];
\draw[fill=black] (#1+1.0,#2+0.5) circle[radius=0.1];
\draw[fill=black] (#1+1.5,#2+0.5) circle[radius=0.1];
\fi
\fi
\fi
}
\newcommand{\vertRect}[3]{
\draw[fill=gray!25,rounded corners=0.4mm] (#1+0.15,#2+0.15) rectangle (#1+0.85,#2+1.85);
28fnum#3=1
\draw[fill=black] (#1+0.5,#2+1.0) circle[radius=0.1];
\else
28fnum#3=2
\draw[fill=black] (#1+0.5,#2+0.666) circle[radius=0.1];
\draw[fill=black] (#1+0.5,#2+1.333) circle[radius=0.1];
\else
28fnum#3=3
\draw[fill=black] (#1+0.5,#2+0.5) circle[radius=0.1];
\draw[fill=black] (#1+0.5,#2+1.0) circle[radius=0.1];
\draw[fill=black] (#1+0.5,#2+1.5) circle[radius=0.1];
\fi
\fi
\fi
}
\foreach 28 in {0,1,2,3,4,5,6,7}{
\foreach 7 in {0} {
\draw (4*28,7*7) grid (4*28+2,7*7+5);
}
}
\def28{0}
\horizRect{28}{4}{3};
\horizRect{28}{3}{1};
\horizRect{28}{2}{2};
\horizRect{28}{1}{1};
\horizRect{28}{0}{2};
\draw (28+1,-1) node{$12$};
\def28{4}
\horizRect{28}{4}{3};
\horizRect{28}{3}{1};
\horizRect{28}{2}{2};
\vertRect{28}{0}{1};
\vertRect{28+1}{0}{1};
\draw (28+1,-1) node{$6$};
\def28{8}
\horizRect{28}{4}{3};
\vertRect{28}{2}{1};
\vertRect{28+1}{2}{1};
\horizRect{28}{1}{1};
\horizRect{28}{0}{2};
\draw (28+1,-1) node{$6$};
\def28{12}
\horizRect{28}{4}{3};
\vertRect{28}{2}{1};
\vertRect{28+1}{2}{1};
\vertRect{28}{0}{1};
\vertRect{28+1}{0}{1};
\draw (28+1,-1) node{$3$};
\def28{16}
\horizRect{28}{4}{3};
\horizRect{28}{3}{1};
\vertRect{28}{1}{1};
\vertRect{28+1}{1}{1};
\horizRect{28}{0}{2};
\draw (28+1,-1) node{$6$};
\def28{20}
\vertRect{28}{3}{1};
\vertRect{28+1}{3}{1};
\horizRect{28}{2}{2};
\horizRect{28}{1}{1};
\horizRect{28}{0}{2};
\draw (28+1,-1) node{$4$};
\def28{24}
\vertRect{28}{3}{1};
\vertRect{28+1}{3}{1};
\horizRect{28}{2}{2};
\vertRect{28}{0}{1};
\vertRect{28+1}{0}{1};
\draw (28+1,-1) node{$2$};
\def28{28}
\vertRect{28}{3}{1};
\vertRect{28+1}{3}{1};
\vertRect{28}{1}{1};
\vertRect{28+1}{1}{1};
\horizRect{28}{0}{2};
\draw (28+1,-1) node{$2$};
\end{tikzpicture}
\caption{Domino tilings of the M\"obius-weighted $5\times2$ checkerboard. The
number of dots on each domino indicates its weight. The weight of each tiling
appears underneath.}0.9abel{fig:m=3 n=1}
\end{figure}
0.1ubsection{Symmetric recurrents on a $(2m-1)\times (2n-1)$ sandpile grid
graph.}0.9abel{subsection:symmetric recurrents on oddxodd grid} The {\em
$2$-weighted $2m\times 2n$ grid graph}, $2$-$\Gamma_{2m\times2n}$ is the
ordinary $2m\times 2n$ grid graph but where each horizontal edge of the form
$\{(2m-2h,2n-1),(2m-2h,2n)\}$ for $00.9eq h< m$ and each vertical edge of the
form $\{(2m-1,2n-2k),(2m,2n-2k)\}$ for $00.9eq k< n$ is assigned the weight~$2$ (and all other
edges have weight~$1$). See Figure~~\ref{fig:mobius-weighted} for an example.
The {\em $2$-weighted $2m\times 2n$ checkerboard} is the ordinary $2m\times 2n$
checkerboard but for which the weight of a domino tiling is taken to be the
weight of the corresponding perfect matching of $2$-$\Gamma_{2m\times 2n}$.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.6]
\draw (0,0) grid(5,3);
\foreach 7 in {0,2}{
\draw[fill=white, draw=none] (4.5,7) circle [radius=2.1mm];
\draw (4.5,7) node{$2$};
}
\foreach 28 in {1,3,5}{
\draw[fill=white, draw=none] (28,0.5) circle [radius=2.8mm];
\draw (28,0.5) node{$2$};
}
\node at (2.5,-1){$2$-$\Gamma_{4\times 6}$};
\end{tikzpicture}
\caption{A $2$-weighted grid graph.}0.9abel{fig:2-weighted}
\end{figure}
\begin{enumerate}gin{thm}0.9abel{thm3} Let $T_j(x)$ denote the $j$-th Chebyshev polynomial of
the first kind, and let
\[
\zeta_{h,d}:=\cos0.9eft(\frac{(2h-1)\pi}{4d}\right)
\]
for all integers $h$ and $d\neq 0$.
Then for all integers $m,n\geq 1$, the following are equal:
\begin{enumerate}gin{enumerate}
28tem0.9abel{thm3-1} the number of symmetric recurrents on $\mathrm{S}\Gamma_{(2m-1)\times(2n-1)}$;
28tem0.9abel{thm3-2} the number of domino tilings of the $2$-weighted
checkerboard of size $2m\times 2n$;
28tem0.9abel{thm3-3} \
\[
(-1)^{mn}\,2^m\prod_{h=1}^m T_{2n}(i\,\zeta_{h,m});
\]
28tem0.9abel{thm3-4} \
\[
\prod_{h=1}^{m}\prod_{k=1}^{n}0.9eft(4\,\zeta_{h,m}^2+4\,\zeta_{k,n}^2\right).
\]
\end{enumerate}
\begin{enumerate}gin{remark}0.9abel{remark:chebT2}
As in Remark~\ref{remark:chebT}, we use
identity~(\ref{eqn:half-angle}), this time to get
\[
T_{2n}(i\,\zeta_{h,m})=(-1)^n\,T_n(1+2\,\zeta_{h,m}^2),
\]
allowing us to equate the formula in part~(\ref{thm3-3}) with
\[
2^m\prod_{h=1}^mT_n(1+2\,\zeta_{h,m}^2).
\]
(We do not know of an analogous expression for the formula in
Theorem~\ref{thm1}~(\ref{thm1-3}) in terms of products of $n$-th Chebyshev polynomials.)
\end{remark}
\end{thm}
\begin{enumerate}gin{proof} The proof is similar to those for Theorem~\ref{thm1} and
Theorem~\ref{thm2}. Let $A_n'$ be the matrix defined at the beginning of the
proof of Theorem~\ref{thm2}. Then the symmetrized reduced Laplacian, $\widetilde{\Delta}^G$,
for $2$-$\Gamma_{(2m-1)\times(2n-1)}$ is the matrix $D(m)$ displayed in the
statement of Lemma~\ref{lemma:tridiagonal} after setting $A=B=A_n'$ and
$C=2I_n$.
\noindent [{\bf(\ref{thm2-1}) $=$ (\ref{thm2-2})}]: The transpose $(\widetilde{\Delta}^G)^t$
is the reduced Laplacian of a sandpile graph, which we denote by $D''_{m\times
n}$ and embed in the plane as we did previously for $D_{m\times n}$ and
$D'_{m\times n}$ in Theorems~\ref{thm1} and~\ref{thm2}. The embedding
of $D''_{m\times n}$ differs from that of $D'_{m\times n}$ only in
that each edge of the form $((m,i),(m-1,i))$ where $i28n [n]$ now
carries weight 2, again embedded as one edge coincident with the edge
$((m-1,i),(m,i))$ in the plane (Figure~\ref{fig:oddxodd 4x3} displays
$D''_{4\times 3}$).
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=1]
\foreach 28 in {0,1,2} {
\draw (28,0) -- (28,3.6);
\draw (28,3.6) .. controls +(90:20pt) and +(0:20pt) .. (-1.60,4.6);
}
\foreach 7 in {0,1,2,3} {
\draw (-0.60,7) -- (1,7);
\draw[<<->,>=mytip] (1,7) -- (2,7);
\draw (-0.60,7) .. controls +(180:20pt) and +(270:20pt) .. (-1.60,4.6);
}
\foreach 28 in {0,1,2}{
\draw[<->>,>=mytip] (28,0) -- (28,1);
}
\draw (-2,4.6) node(s){$s$};
\draw (1,-0.8) node(d){$D''_{4\times3}$};
\end{tikzpicture}
\caption{The symmetrized reduced Laplacian for $\mathrm{S}\Gamma_{7\times 5}$ is the reduced
Laplacian for $D''_{4\times 3}$. (The edge weights are encoded as in
Figure~\ref{fig:mobius 4x3}).}0.9abel{fig:oddxodd 4x3}
\end{figure}
The result for this
section of the proof now follows just as it did in the proof of
Theorem~\ref{thm2}.
\noindent [{\bf(\ref{thm3-1}) $=$ (\ref{thm3-3})}]: By Corollary~\ref{cor:nsr}
and Lemma~\ref{lemma:tridiagonal}, the number of symmetric recurrents on
$2$-$\Gamma_{(2m-1)\times(2n-1)}$ is
\[
\det\widetilde{\Delta}^G=(-1)^n\,\det(T),
\]
where
\[
T
=-A_n'\,U_{m-1}0.9eft(\frac{A_n'}{2}\right)+2\,U_{m-2}0.9eft(\frac{A_n'}{2}\right).
\]
Define
\[
s_{h,m}:=\cos\frac{(2h-1)\pi}{2m},\quad 10.9eq h0.9eq m.
\]
Then, using identities from Section~\ref{subsection:tridiagonal},
\begin{enumerate}gin{align*}
T&=-U_{m}0.9eft(\frac{A_n'}{2}\right)+U_{m-2}0.9eft(\frac{A_n'}{2}\right)\\[5pt]
&=-2\,T_{m}0.9eft(\frac{A_n'}{2}\right)\\
&=-\prod_{h=1}^{m}(A_n'-2\,s_{h,m}I_n),
\end{align*}
Thus,
\[
\det\widetilde{\Delta}^G=\prod_{h=1}^{m}\chi_n(2\,s_{h,m}),
\]
where $\chi_n$ is the characteristic polynomial of $A_n'$. Now consider the
recurrences~(\ref{thm2-chi}) and~(\ref{thm2-C}) in the proof of Theorem~\ref{thm2}.
Substituting $2s_{h,m}$ for $x$ in the former and $2-s_{h,m}$ for $x$ in the
latter, the two recurrences become the same. It follows that
$\chi_n(2\,s_{h,m})=2\,T_n(2-s_{h,m})$. Then using a double-angle formula for cosine
and identity~(\ref{eqn:half-angle}),
\[
\chi_n(2\,s_{h,m})=2\,T_n(2-s_{h,m})=2\,T_n(1+2\,\zeta_{m-h+1,m}^2),
\]
and the result follows from Remark~\ref{remark:chebT2}.
\noindent [{\bf(\ref{thm3-3}) $=$ (\ref{thm3-4})}]: The numbers given in
parts~(\ref{thm2-3}) and~(\ref{thm2-4}) are equal by a straightforward
calculation, as in the proof of the analogous results in
Theorems~\ref{thm1} and \ref{thm2}.
\end{proof}
\begin{enumerate}gin{remark} Identities among trigonometric functions and among Chebyshev
polynomials allow our formulae to be recast many ways.
Remarks~\ref{remark:chebT},~\ref{remark:Lu-Wu}, and~\ref{remark:chebT2} have
already provided some examples. In addition, we note that in
part~(\ref{thm2-4}) of Theorem~\ref{thm2} and in parts~(\ref{thm3-3})
and~(\ref{thm3-4}) of Theorem~\ref{thm3}, one may replace each $\zeta_{h,n}$
with $0.1in((2h-1)\pi/(4n))$ or, as discussed at the end of the proof of
Theorem~\ref{thm2}, with $0.1in( (4h-1)\pi/(4n))$.
\end{remark}
0.1ection{The order of the all-twos configuration}0.9abel{section:order of all-2s}
Let $c$ be a configuration on a sandpile graph $\Gamma$, not necessarily an
element of $\mathcal{S}(\Gamma)$, the sandpile group. If $k$ is a nonnegative
integer, let $k\cdot c$ denote the vertex-wise addition of $c$ with itself $k$
times, without stabilizing. The {\em order} of $c$, denoted $\mbox{order}(c)$,
is the smallest positive integer $k$ such that $k\cdot c$ is in the image of the
reduced Laplacian of $\Gamma$. If $c$ is recurrent, then the order of $c$ is
the same as its order as an element of $\mathcal{S}(\Gamma)$ according to the
isomorphism~(\ref{basic iso}).
Consider the sandpile grid graph, $\mathrm{S}\Gamma_{m\times n}$, with $m,n\geq 2$. For each
nonnegative integer $k$, let $\vec{k}_{m\times n}=k\cdot\vec{1}_{m\times n}$ be
the {\em all-$k$s} configuration on $\mathrm{S}\Gamma_{m\times n}$ consisting of~$k$ grains
of sand on each vertex. The motivating question for this section is: what is
the order of $\vec{1}_{m\times n}$? Since $\vec{1}_{m\times n}$ has
up-down and left-right symmetry, its order must divide the order of the group of
symmetric recurrents on $\mathrm{S}\Gamma_{m\times n}$ calculated in
Theorems~\ref{thm1},~\ref{thm2}, and~\ref{thm3}. The number of domino tilings
of a $2n\times 2n$ checkerboard can be written as $2^na_n^2$ where $a_n$ is an
odd integer (cf.~Proposition~\ref{prop:a_n}). Our main result is
Theorem~\ref{thm4} which, through Corollary~\ref{cor:all-2s order}, says that the
order of $\vec{2}_{2n\times 2n}$ divides $a_n$.
\begin{enumerate}gin{prop}0.9abel{prop:all-ones} Let $m,n\geq2$.
\begin{enumerate}gin{enumerate}
28tem The configuration $\vec{1}_{m\times n}$ is not recurrent.
28tem The configuration $\vec{2}_{m\times n}$ is recurrent.
28tem0.9abel{prop:all-ones3} The order of $\vec{1}_{m\times n}$ is either
$\mathrm{order}(\vec{2}_{m\times n})$ or $2\,\mathrm{order}(\vec{2}_{m\times
n})$.
28tem Let $\widetilde{\Delta}_{m\times n}$ be the reduced Laplacian of $\mathrm{S}\Gamma_{m\times n}$.
The order of $\vec{1}_{m\times n}$ is the smallest integer $k$ such that
$k\cdot\widetilde{\Delta}_{m\times n}^{-1}\vec{1}_{m\times n}$ is
an integer vector.
\end{enumerate}
\end{prop}
\begin{enumerate}gin{proof}
Part~(1) follows immediately from the burning algorithm (Theorem~\ref{thm:Dhar}).
For part~(2), we start by orienting some of the edges of $\mathrm{S}\Gamma_{m\times n}$ as shown in
Figure~\ref{fig:orientation}.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.7]
\SetVertexMath
\GraphInit[vstyle=Art]
\SetVertexNoLabel
\tikzset{VertexStyle/.style = {
shape = circle,
shading = ball,
ball color = black,
inner sep = 1.5pt,
}}
\SetUpEdge[color=black]
\foreach 28 in {1,2,3,4,5}{
\foreach 7 in {1,2,3,4}{
\Vertex[x=28,y=7]{v287}
}
}
\Edge[style={->,>=mytip, semithick}](v14)(v13)
\Edge[style={->,>=mytip, semithick}](v24)(v23)
\Edge[style={->,>=mytip, semithick}](v34)(v33)
\Edge[style={->,>=mytip, semithick}](v44)(v43)
\Edge[style={->,>=mytip, semithick}](v54)(v53)
\Edge[style={->,>=mytip, semithick}](v13)(v12)
\Edge[style={->,>=mytip, semithick}](v23)(v22)
\Edge[style={->,>=mytip, semithick}](v33)(v32)
\Edge[style={->,>=mytip, semithick}](v43)(v42)
\Edge[style={->,>=mytip, semithick}](v53)(v52)
\Edge[](v12)(v11)
\Edge[](v22)(v21)
\Edge[](v32)(v31)
\Edge[](v42)(v41)
\Edge[](v52)(v51)
\Edge[style={->,>=mytip, semithick}](v11)(v21)
\Edge[style={->,>=mytip, semithick}](v12)(v22)
\Edge[style={->,>=mytip, semithick}](v13)(v23)
\Edge[style={->,>=mytip, semithick}](v14)(v24)
\Edge[style={->,>=mytip, semithick}](v21)(v31)
\Edge[style={->,>=mytip, semithick}](v22)(v32)
\Edge[style={->,>=mytip, semithick}](v23)(v33)
\Edge[style={->,>=mytip, semithick}](v24)(v34)
\Edge[style={->,>=mytip, semithick}](v31)(v41)
\Edge[style={->,>=mytip, semithick}](v32)(v42)
\Edge[style={->,>=mytip, semithick}](v33)(v43)
\Edge[style={->,>=mytip, semithick}](v34)(v44)
\Edge[](v41)(v51)
\Edge[](v42)(v52)
\Edge[](v43)(v53)
\Edge[](v44)(v54)
\tikzset{VertexStyle/.style = {inner sep = 0.0pt,}}
\foreach 28 in {1,2,3,4,5}{
\Vertex[x=28,y=0]{s281}
\Edge[style={->,>=mytip, semithick}](s281)(v281)
\Vertex[x=28,y=5]{s284}
\Edge[style={->,>=mytip, semithick}](s284)(v284)
}
\foreach 7 in {1,2,3,4}{
\Vertex[x=0,y=7]{s17}
\Edge[style={->,>=mytip, semithick}](s17)(v17)
\Vertex[x=6,y=7]{s57}
\Edge[style={->,>=mytip, semithick}](s57)(v57)
}
\end{tikzpicture}
\caption{Partial orientation of $\mathrm{S}\Gamma_{4\times5}$. Arrows pointing into the grid
from the outside represent edges from the sink vertex.}0.9abel{fig:orientation}
\end{figure}
First, orient all the edges containing the sink, $s$, so that they point away
from~$s$. Next, orient all the horizontal edges to point to the right except
for the last column of horizontal arrows. Finally, orient all the vertical
edges down except for the last row of vertical arrows. More formally, define
the {\em partial orientation} of $\mathrm{S}\Gamma_{m\times n}$,
\begin{enumerate}gin{align*}
\mathcal{O}:=
&\{( s,(i,j) ): 10.9eq i0.9eq m,j28n\{1,n\} \}\\
&\cup\{( s,(i,j) ): i28n\{1,m\}, 10.9eq j0.9eq n\}\\
&\cup\{( (i,j),(i,j+1) ):10.9eq j0.9eq n-2\}\\
&\cup\{( (i,j),(i+1,j) ): 10.9eq i0.9eq m-2\}.
\end{align*}
Use $\mathcal{O}$ to define a poset $P$ on the vertices of $\mathrm{S}\Gamma_{m\times n}$ by first setting
$u<_P v$ if $(u,v)28n\mathcal{O}$, then taking the transitive closure. Now list the
vertices of $\mathrm{S}\Gamma_{m\times n}$ in any order $v_1,v_2,\dots$ such that $v_i<_Pv_j$
implies $i<j$. Thus, $v_1=s$ and $v_2,v_3,v_4,v_5$ are the four corners of the
grid, in some order. Starting from $\vec{2}_{m\times n}$, fire $v_1$.
This has the effect of adding the burning configuration to $\vec{2}_{m\times
n}$. Since the indegree of each non-sink vertex with respect to $\mathcal{O}$ is $2$,
after $v_1,\dots,v_{i-1}$ have fired,~$v_i$ is unstable. Thus, after firing the
sink, every vertex will fire while stabilizing the resulting configuration. So
$\vec{2}_{m\times n}$ is recurrent by the burning algorithm.
\noindent[{0.1c note:} One way to think about listing the vertices, as prescribed
above, is as follows. Let $P_{-1}:=\{s\}$, and for $i\geq 0$, let $P_i$ be
those elements whose distance from some corner vertex is~$i$. (By {\em
distance} from a corner vertex, we mean the length of a longest chain in $P$ or
the length of any path in $\mathcal{O}$ starting from a corner vertex.) For instance, $P_0$
consists of the four corners. After firing the vertices in $P_{-1},P_0,\dots,P_{i-1}$,
all of the vertices in $P_i$ are unstable and can be fired in any order.]
For part~(3), let $\alpha=\mathrm{order}(\vec{1}_{m\times n})$ and
$\begin{enumerate}ta=\mathrm{order}(\vec{2}_{m\times n})$, and let $e$ be the identity of
$\mathcal{S}(\mathrm{S}\Gamma_{m\times n})$. Let $\widetilde{\mathcal{L}}$ denote the image of the reduced Laplacian,
$\widetilde{\Delta}$, of $\mathrm{S}\Gamma_{m\times n}$. Since $e=(2\alpha\cdot\vec{1}_{m\times
n})^{\circ}=(\alpha\cdot\vec{2}_{m\times n})^{\circ}$ and
$e=(\begin{enumerate}ta\cdot\vec{2}_{m\times n})^{\circ}=(2\begin{enumerate}ta\cdot\vec{1}_{m\times
n})^{\circ}$, we have
\begin{enumerate}gin{equation}0.9abel{eqn:alpha beta}
2\begin{enumerate}ta\geq\alpha\geq\begin{enumerate}ta.
\end{equation}
We have $(2\begin{enumerate}ta-\alpha)\cdot\vec{1}_{m\times n}=0\bmod\widetilde{\mathcal{L}}$. Suppose
$\alpha\neq2\begin{enumerate}ta$. It cannot be that $2\begin{enumerate}ta-\alpha=1$. Otherwise,
$\vec{1}_{m\times n}=0\bmod\widetilde{\mathcal{L}}$. It would then follow that $\vec{2}_{m\times
n}$ and $\vec{3}_{m\times n}$ are recurrent elements equivalent to $0$ modulo
$\widetilde{\mathcal{L}}$, whence, $\vec{2}_{m\times n}=\vec{3}_{m\times n}=e$, a contradiction.
Thus, $(2\begin{enumerate}ta-\alpha)\cdot \vec{1}_{m\times n}\geq\vec{2}_{m\times n}$. Since
$\vec{2}_{m\times n}$ is recurrent, $((2\begin{enumerate}ta-\alpha)\cdot \vec{1}_{m\times
n})^{\circ}$ is recurrent and equivalent to $0$ modulo $\widetilde{\mathcal{L}}$, and thus must be
the~$e$. So $2\begin{enumerate}ta-\alpha\geq\alpha$, and the right side of~\eqref{eqn:alpha beta} implies
$\alpha=\begin{enumerate}ta$, as required.
Now consider part~(4). The order of $\vec{1}_{m\times n}$ is the smallest
positive integer $k$ such that $k\cdot\vec{1}_{m\times n}=0\bmod\widetilde{\mathcal{L}}$, i.e., for
which there exists an integer vector~$v$ such that $k\cdot\vec{1}_{m\times
n}=\widetilde{\Delta}_{m\times n}\,v$. The result follows.
\end{proof}
\begin{enumerate}gin{example} We have $\mathrm{order}(\vec{1}_{2\times
2})=2\,\mathrm{order}(\vec{2}_{2\times 2})=2$, and
$\mathrm{order}(\vec{1}_{2\times 3})=\mathrm{order}(\vec{2}_{2\times 3})=7$.
In general, we do not know which case will hold in part~\ref{prop:all-ones3}
of Proposition~\ref{prop:all-ones}.
\end{example}
Table~\ref{table:all-2s} records the order of $\vec{2}_{m\times n}$ for
$m,n28n\{2,3,\dots,10\}$.
\begin{enumerate}gin{table}[ht]
\centering
\begin{enumerate}gin{tabular}{c|lllllllll}
$m\begin{enumerate}gin{eqnarray*}ckslash n$&2&3&4&5&6&7&8&9&10\\\hline
2&1&7&5&9&13&47&17&123&89\\
3&$\cdot$&8&71&679&769&3713&8449&81767&93127\\
4&$\cdot$&$\cdot$&3&77&281&4271&2245&8569&18061\\
5&$\cdot$&$\cdot$&$\cdot$&52&17753&726433&33507&24852386&20721019\\
6&$\cdot$&$\cdot$&$\cdot$&$\cdot$&29&434657&167089&265721&4213133\\
7&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&272&46069729&8118481057&4974089647\\
8&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&901&190818387&1031151241\\
9&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&73124&1234496016491\\
10&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&$\cdot$&89893
\end{tabular}
\caption{Order of the all-2s element on $\mathrm{S}\Gamma_{m\times n}$ (symmetric in $m$
and $n$).}
0.9abel{table:all-2s}
\end{table}
Perhaps the most striking feature of Table~\ref{table:all-2s} is the relatively
small size of the numbers along the diagonal ($m=n$). It seems natural to
group these according to parity. The sequence $\{\vec{2}_{2n\times
2n}\}_{n\geq1}$ starts $1,3,29,901,89893,\dots$, which is the beginning of the
famous sequence, $(a_n)_{n\geq1}$, we now describe. The following was
established independently by several people (cf.~\cite{JSZ}):
\begin{enumerate}gin{prop}0.9abel{prop:a_n}
The number of domino tilings of a $2n\times 2n$ checkerboard has the form
\[
2^na_n^2
\]
where $a_n$ is an odd integer.
\end{prop}
For each positive integer $n$, let $P_n$ be the sandpile graph with vertices
\[
V(P_n)=\{v_{i,j}:10.9eq i0.9eq n\mbox{ and }10.9eq j 0.9eq i\}\cup\{s\}.
\]
Each $v_{i,j}$ is connected to those vertices $v_{i',j'}$ such that
$|i-i'|+|j-j'|=1$. In addition, every vertex of the form $v_{i,n}$ is connected
to the sink vertex, $s$. The first few cases are illustrated in
Figure~\ref{fig:Pn}.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.8]
\SetVertexMath
\GraphInit[vstyle=Art]
\SetUpVertex[MinSize=3pt]
\SetVertexLabel
\tikzset{VertexStyle/.style = {
shape = circle,
shading = ball,
ball color = black,
inner sep = 1.5pt
}}
\SetUpEdge[color=black]
\Vertex[NoLabel,x=0,y=0]{a11}
\Vertex[LabelOut=true,Lpos=0,L={s},x=1,y=0]{as}
\Edge[](a11)(as)
\Vertex[NoLabel,x=3,y=0]{b11}
\Vertex[NoLabel,x=4,y=0]{b12}
\Vertex[NoLabel,x=4,y=1]{b21}
\Vertex[LabelOut=true,Lpos=0,L={s},x=5,y=0.5]{bs}
\Edges(b11,b12,b21)
\Edge[](b12)(bs)
\Edge[](b21)(bs)
\Vertex[NoLabel,x=7,y=0]{c11}
\Vertex[NoLabel,x=8,y=0]{c12}
\Vertex[NoLabel,x=9,y=0]{c13}
\Vertex[NoLabel,x=8,y=1]{c22}
\Vertex[NoLabel,x=9,y=1]{c23}
\Vertex[NoLabel,x=9,y=2]{c33}
\Vertex[LabelOut=true,Lpos=0,L={s},x=10,y=1]{cs}
\Edges(c11,c12,c13,c23,c33)
\Edges(c12,c22,c23)
\Edge[](c13)(cs)
\Edge[](c23)(cs)
\Edge[](c33)(cs)
\draw (0.5,-0.8) node(G){$P_1$};
\draw (4,-0.8) node(G){$P_2$};
\draw (8.5,-0.8) node(G){$P_3$};
\end{tikzpicture}
\caption{}0.9abel{fig:Pn}
\end{figure}
Next define a family of triangular checkerboards, $H_n$, as in
Figure~\ref{fig:Hn}. The checkerboard $H_n$ for $n\geq 2$ is formed by adding
a $2\times (2n-1)$ array (width-by-height) of squares to the right of
$H_{n-1}$.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.5]
\draw (0,0) -- (2,0) -- (2,1) -- (0,1) -- (0,0);
\draw (1,0) -- (1,1);
\draw (4,0) -- (8,0);
\draw (4,1) -- (8,1);
\draw (6,2) -- (8 ,2);
\draw (6,3) -- (8 ,3);
\draw (4,0) -- (4,1);
\draw (5,0) -- (5,1);
\draw (6,0) -- (6,3);
\draw (7,0) -- (7,3);
\draw (8,0) -- (8,3);
\draw (10,0) -- (16,0);
\draw (10,1) -- (16,1);
\draw (12,2) -- (16,2);
\draw (12,3) -- (16,3);
\draw (14,4) -- (16,4);
\draw (14,5) -- (16,5);
\draw (10,0) -- (10,1);
\draw (11,0) -- (11,1);
\draw (12,0) -- (12,3);
\draw (13,0) -- (13,3);
\draw (14,0) -- (14,5);
\draw (15,0) -- (15,5);
\draw (16,0) -- (16,5);
\draw (1,-0.8) node(G){$H_1$};
\draw (6,-0.8) node(G){$H_2$};
\draw (13,-0.8) node(G){$H_3$};
\end{tikzpicture}
\caption{}0.9abel{fig:Hn}
\end{figure}
These graphs were introduced by M.~Ciucu~\cite{Ciucu} and later used by
L.~Pachter~\cite{Pachter} to give the first combinatorial proof of
Proposition~\ref{prop:a_n}. As part of his proof, Pachter shows that~$a_n$
is the number of domino tilings of $H_n$.
As noted in~\cite{KPW}, considering $H_n$ as a planar graph and taking its dual
(forgetting about the unbounded face of $H_n$) gives the graph
$\mathcal{H}(P_n)$ corresponding to $P_n$ under the generalized Temperley
bijection of Section~\ref{section:Matchings and trees}. See Figure~\ref{fig:Hn and H(Pn)}.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.4]
\SetVertexMath
\GraphInit[vstyle=Art]
\SetUpVertex[MinSize=3pt]
\SetVertexLabel
\tikzset{VertexStyle/.style = {
shape = circle,
shading = ball,
ball color = black,
inner sep = 1.1pt
}}
\SetUpEdge[color=black]
\Vertex[NoLabel,x=0.5,y=0.5]{a1}
\Vertex[NoLabel,x=1.5,y=0.5]{a2}
\Vertex[NoLabel,x=2.5,y=0.5]{a3}
\Vertex[NoLabel,x=3.5,y=0.5]{a4}
\Vertex[NoLabel,x=4.5,y=0.5]{a5}
\Vertex[NoLabel,x=5.5,y=0.5]{a6}
\Edges(a1,a2,a3,a4,a5,a6)
\Vertex[NoLabel,x=2.5,y=1.5]{b3}
\Vertex[NoLabel,x=3.5,y=1.5]{b4}
\Vertex[NoLabel,x=4.5,y=1.5]{b5}
\Vertex[NoLabel,x=5.5,y=1.5]{b6}
\Edges(b3,b4,b5,b6)
\Vertex[NoLabel,x=2.5,y=2.5]{c3}
\Vertex[NoLabel,x=3.5,y=2.5]{c4}
\Vertex[NoLabel,x=4.5,y=2.5]{c5}
\Vertex[NoLabel,x=5.5,y=2.5]{c6}
\Edges(c3,c4,c5,c6)
\Vertex[NoLabel,x=4.5,y=3.5]{d5}
\Vertex[NoLabel,x=5.5,y=3.5]{d6}
\Edges(d5,d6)
\Vertex[NoLabel,x=4.5,y=4.5]{e5}
\Vertex[NoLabel,x=5.5,y=4.5]{e6}
\Edges(e5,e6)
\Edges(a3,b3,c3)
\Edges(a4,b4,c4)
\Edges(a5,b5,c5,d5,e5)
\Edges(a6,b6,c6,d6,e6)
\draw (0,0) -- (6,0);
\draw (0,1) -- (6,1);
\draw (2,2) -- (6,2);
\draw (2,3) -- (6,3);
\draw (4,4) -- (6,4);
\draw (4,5) -- (6,5);
\draw (0,0) -- (0,1);
\draw (1,0) -- (1,1);
\draw (2,0) -- (2,3);
\draw (3,0) -- (3,3);
\draw (4,0) -- (4,5);
\draw (5,0) -- (5,5);
\draw (6,0) -- (6,5);
\end{tikzpicture}
\caption{$H_3$ and $\mathcal{H}(P_3)$.}0.9abel{fig:Hn and H(Pn)}
\end{figure}
\begin{enumerate}gin{prop}0.9abel{prop:an} The number of elements in the sandpile group for $P_n$ is
\[
\#\,\mathcal{S}(P_n) =a_n,
\]
where $a_n$ is as in Proposition~\ref{prop:a_n}.
\end{prop}
\begin{enumerate}gin{proof}
The number of domino tilings of $H_n$ equals the number of perfect matchings
of $\mathcal{H}(P_n)$. By the generalized Temperley bijection, the latter
is the number of spanning trees of $P_n$, and hence, the order of the sandpile group of
$P_n$. As mentioned above, Pachter shows in~\cite{Pachter} that~$a_n$ is the
number of domino tilings of $H_n$.
\end{proof}
The main result of this section is the following:
\begin{enumerate}gin{thm}0.9abel{thm4}
Let $0.9angle\vec{2}_{2n\times 2n}\rangle$ be the cyclic subgroup of
$\mathcal{S}(\mathrm{S}\Gamma_{2n\times 2n})$ generated by the all-$2$s element of
$\Gamma_{2n\times 2n}$, and let
$\vec{2}_{n}$ denote the all-$2$s element on $P_n$. Then the mapping
\[
\psi\colon0.9angle\vec{2}_{2n\times 2n}\rangle\to\mathcal{S}(P_n),
\]
determined by $\psi(\vec{2}_{2n\times 2n})=\vec{2}_n$, is a well-defined
injection of groups.
\end{thm}
\begin{enumerate}gin{proof}
Let $\widetilde{V}_n$ and $\widetilde{V}_{2n\times 2n}$ denote the non-sink vertices of $P_n$ and
$\mathrm{S}\Gamma_{2n\times 2n}$, respectively. We view configurations on $P_n$ as triangular
arrays of natural numbers and configurations on $\mathrm{S}\Gamma_{2n\times 2n}$ as
$2n\times 2n$ square arrays of natural numbers. Divide the $2n\times 2n$ grid
by drawing bisecting horizontal, vertical, and diagonal lines, creating eight
wedges. Define $\phi\colon\mathbb{Z} \widetilde{V}_n\to \mathbb{Z}\widetilde{V}_{2n\times 2n}$, by placing a
triangular array in the position of one of these wedges, then flipping about
lines, creating a configuration on $\mathrm{S}\Gamma_{2n\times 2n}$ with dihedral symmetry.
Figure~\ref{fig:phi} illustrates the case $n=4$.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.4]
\draw (-1,0) node(a){
$
\begin{enumerate}gin{array}{cccc}
&&&j\\
&&h&i\\
&e&f&g\\
a&b&c&d
\end{array}
$
};
\draw (14,0) node(b){
$
\begin{enumerate}gin{array}{cccccccc}
j&i&g&d&d&g&i&j\\
i&h&f&c&c&f&h&i\\
g&f&e&b&b&e&f&g\\
d&c&b&a&a&b&c&d\\
d&c&b&a&a&b&c&d\\
g&f&e&b&b&e&f&g\\
i&h&f&c&c&f&h&i\\
j&i&g&d&d&g&i&j
\end{array}
$
};
\draw [dashed] (14,-4.3) -- (14,4.3);
\draw [dashed] (8.5,0) -- (19.5,0);
\draw [dashed] (8.5,-4.3) -- (19.5,4.3);
\draw [dashed] (8.5,4.3) -- (19.5,-4.3);
\draw [fill=blue!20, opacity=0.2] (14,0) -- (19,4) -- (19,0) -- (14,0);
\path[|->] (3,0) edge node[above]{$\phi$} (7,0);
\end{tikzpicture}
\caption{$\phi\colon\mathbb{Z} P_4\to\mathbb{Z} \mathrm{S}\Gamma_{8\times 8}$.}0.9abel{fig:phi}
\end{figure}
We define special types of configurations on $P_n$. First, let $s_n$ be
the configuration in which the number of grains of sand on each vertex records that
vertex's distance to the sink; then let $t_n$ denote the sandpile with no sand
except for one grain on each vertex along the boundary diagonal, i.e., those
vertices with degree less than~$3$. Figure~\ref{fig:special} illustrates the
case $n=4$.
\begin{enumerate}gin{figure}[ht]
\begin{enumerate}gin{tikzpicture}[scale=0.4]
\draw (0,0) node(a){
$
\begin{enumerate}gin{array}{cccc}
&&&1\\
&&2&1\\
&3&2&1\\
4&3&2&1
\end{array}
$
};
\draw (9,0) node(b){
$
\begin{enumerate}gin{array}{cccc}
&&&1\\
&&1&0\\
&1&0&0\\
1&0&0&0
\end{array}
$
};
\draw (0,-3) node{$s_4$};
\draw (9,-3) node{$t_4$};
\end{tikzpicture}
\caption{Special configurations on $P_4$.}0.9abel{fig:special}
\end{figure}
Let $\widetilde{\Delta}_{n}$ and $\widetilde{\Delta}_{2n\times 2n}$ be the reduced Laplacians for $P_n$ and
$\mathrm{S}\Gamma_{2n\times 2n}$, respectively. The following are straightforward
calculations:
\begin{enumerate}gin{enumerate}
28tem $\widetilde{\Delta}_n s_n=t_n$.
28tem If $c28n\mathbb{Z} P_n$, then $\widetilde{\Delta}_{2n\times 2n}(\phi(c))$ equals
$\phi(\widetilde{\Delta}_n(c))$ at all non-sink vertices of $\mathrm{S}\Gamma_{2n\times 2n}$ except along the
diagonal and anti-diagonal, where they differ by a factor of~$2$:
\[
\widetilde{\Delta}_{2n\times 2n}(\phi(c))_{ij}=
\begin{enumerate}gin{cases}
2\,\phi(\widetilde{\Delta}_n(c))_{ij}&\text{for $i=j$ or $i+j=2n+1$,}\\
\phi(\widetilde{\Delta}_n(c))_{ij}&\text{otherwise.}
\end{cases}
\]
\end{enumerate}
Let $\widetilde{\mathcal{L}}_n0.1ubset\mathbb{Z} V_n$ and $\widetilde{\mathcal{L}}_{2n\times 2n}0.1ubset\mathbb{Z} V_{2n\times 2n}$ denote
the images of $\widetilde{\Delta}_n$ and $\widetilde{\Delta}_{2n\times 2n}$, respectively. Identify the
sandpile groups of $P_n$ and $\mathrm{S}\Gamma_{2n\times 2n}$ with $\mathbb{Z} V_n/\widetilde{\mathcal{L}}_n$ and $\mathbb{Z}
V_{2n\times 2n}/\widetilde{\mathcal{L}}_{2n\times 2n}$, respectively. To show that $\psi$ is
well-defined and injective, we need to show that $k\,\vec{2}_n28n\widetilde{\mathcal{L}}_n$ for some
integer $k$ if and only if $k\,\vec{2}_{2n\times 2n}28n\widetilde{\mathcal{L}}_{2n\times 2n}$.
Since the reduced Laplacians are invertible over $\mathbb{Q}$, there exist unique
vectors $x$ and~$y$ defined over the rationals such that
\[
\widetilde{\Delta}_nx=\vec{2}_n\quad\text{and}\quad \widetilde{\Delta}_{2n\times 2n}y=\vec{2}_{2n\times 2n}.
\]
Using the special configurations $s_n$ and $t_n$ and the two calculations noted
above,
\[
\widetilde{\Delta}_nx=\vec{2}_n\quad\Longrightarrow\quad \widetilde{\Delta}_n(x-s_n)=\vec{2}_n-t_n
\quad\Longrightarrow\quad \widetilde{\Delta}_{2n\times 2n}\phi(x-s_n)=\vec{2}_{2n\times 2n}.
\]
In other words,
\begin{enumerate}gin{equation}
y=\phi(x-s_n).
0.9abel{eqn:y}
\end{equation}
Using the fact that $\widetilde{\Delta}_n$ is invertible over $\mathbb{Q}$, we see that
$k\,\vec{2}_n28n\widetilde{\mathcal{L}}_n$ if and only if $kx$ has integer coordinates.
By~(\ref{eqn:y}), this is the same as saying $ky$ has integer components, which
in turn is equivalent to $k\,\vec{2}_{2n\times 2n}28n\widetilde{\mathcal{L}}_{2n\times 2n}$, as required.
\end{proof}
Combining this result with Proposition~\ref{prop:an} gives
\begin{enumerate}gin{cor}0.9abel{cor:all-2s order}
The order of $\vec{2}_{2n\times 2n}$ divides $a_n$.
\end{cor}
0.1ection{Conclusion}0.9abel{section:conclusion}
We conclude with a list of suggestions for further work.
\noindent{\bf 1.} Theorem~\ref{thm2} states that the number of domino tilings of
a M\"obius checkerboard equals the number of domino tilings of an associated
ordinary checkerboard after assigning weights to certain domino positions. We
would like to see a direct bijective proof---one that does not rely on the Lu-Wu
formula (and thus giving a new proof of that formula). For instance, consider
the tiling of the $4\times4$ checkerboard that appears second in the top row of
Figure~\ref{fig:checker4x4}. It has one domino of weight~$2$. So this weighted
tiling should correspond to two tilings of the $4\times4$ M\"obius checkerboard.
Presumably, one of these two tilings is just the unweighted version of the given
tiling. One might imagine that the other tiling would result from pushing the
single blue domino to the right one square so that it now wraps around on the
M\"obius checkerboard, and then making room for this displacement by
systematically shifting the other dominos.
0.1mallskip
\noindent{\bf 2.} Section~\ref{section:order of all-2s} is motivated by Irena
Swanson's question: what is the order of the all-$1$s configuration,
$\vec{1}_{m\times n}$, on the $m\times n$ sandpile grid graph?
Proposition~\ref{prop:all-ones}~(\ref{prop:all-ones3}) shows this order is
either the same as or twice the order of the all-$2$s
configuration,~$\vec{2}_{m\times n}$. It would be nice to know when each case
holds. Corollary~\ref{cor:all-2s order} says the order of $\vec{2}_{2n\times
2n}$ divides the integer $a_n$ of Proposition~\ref{prop:a_n}, connected with
domino tilings. When is this order equal to $a_n$? Ultimately, of course, we
would like to know the answer to Swanson's original question.
\noindent{\bf 3.} Example~\ref{example:symm4x4} introduces an action of the
sandpile group of the $2m\times 2n$ sandpile grid graph on the domino tilings of
the $2m\times 2n$ checkerboard. Perhaps this group action deserves further study.
\noindent{\bf 4.} To summarize some of the main ideas of this paper, suppose a
group acts on an arbitrary sandpile graph $\Gamma$. If the corresponding
symmetrized reduced Laplacian or its transpose is the (ordinary) reduced
Laplacian of a sandpile graph~$\Gamma'$, then Proposition~\ref{prop:symmetric
subgroup iso} yields a group isomorphism between the symmetric configurations on
$\Gamma$ and the sandpile group $\mathcal{S}(\Gamma')$ of $\Gamma'$. By the
matrix-tree theorem, the size of the latter group is the number of spanning
trees of $\Gamma'$ (and, in fact, as mentioned earlier, $\mathcal{S}{\Gamma'}$ is
well-known to act freely and transitively on the set of spanning trees of
$\Gamma'$). The generalized Temperley bijection then gives a correspondence
between the spanning trees of $\Gamma'$ and perfect matchings of a corresponding
graph, $\mathcal{H}(\Gamma')$. Thus, the number of symmetric recurrents
on $\Gamma$ equals the number of perfect matchings of $\mathcal{H}(\Gamma')$.
We have applied this idea to the case of a particular group acting on sandpile
grid graphs. Does it lead to anything interesting when applied to other classes
of graphs with group action? The Bachelor's thesis of the first
author~\cite{Florescu} includes a discussion of the case of a dihedral action on
sandpile grid graphs.
{}
\end{document}
|
\begin{document}
\title{There are no socialist primes less than $10^{9}
\begin{abstract}
\noindent
There are no primes $p$ with $5<p<10^{9}$ for which $2!, 3!, \ldots, (p-1)!$ are all distinct modulo $p$; it is conjectured that there are no such primes.
\end{abstract}
\section{The problem}
Erd\H{o}s asked whether there exist any primes $p>5$ for which the numbers $2!, 3!, \ldots, (p-1)!$ are all distinct modulo $p$. Were these $p-2$ factorials all distinct then the $p-1$ non-zero residue classes modulo $p$ contain at most one of them. Motivated by this redistribution of resources amongst classes I shall call such a prime $p$ a \textit{socialist prime}.
Rokowska and Schinzel \cite{RokS}\footnote{This problem also appears as \textbf{F11} in Richard Guy's insuperable book \cite{GuyBook}.}
proved that $p$ is a socialist prime only if $p \equiv 5 \pmod 8$, and
\begin{equation}\label{con}
\left(\frac{5}{p}\right) = -1, \quad \left( \frac{-23}{p}\right) = 1.
\end{equation}
Moreover, if a socialist prime exists then none of the numbers $2!, 3!, \ldots, (p-1)!$ is congruent to $-((p-1)/2)!$.
The proof given by Rokowska and Schinzel is fairly straightforward.
One may dismiss primes of the form $p \equiv 3 \pmod 4$, since such primes have the property \cite[Thm 114]{HW} that $((p-1)/2)! \equiv \pm 1 \pmod p$. By Wilson's theorem, $(p-1)! \equiv -1 \pmod p$ and $(p-2)! \equiv (p-1)! (p-1)^{-1} \equiv +1 \pmod p$, conditions which, when taken together, prohibit $p$ from being a socialist prime. Henceforth consider $p \equiv 1 \pmod 4,$ in which case
\begin{equation}\label{gg}
\left\{ \left( \frac{p-1}{2}\right)! \right\}^{2} \equiv -1 \pmod p.
\end{equation}
If $2!, 3!, \ldots, (p-1)!$ are all distinct modulo $p$ then they must be permutations of the numbers $1,2,\ldots, p-1$ with the exception of some $r$, with $1 \leq r \leq p-1$, whence
$$\prod_{n=2}^{p-1} n! \equiv \frac{(p-1)!}{r} \pmod p,$$
so that
$$1 \equiv r \prod_{n=1}^{p-2} n! \equiv r((p-1)/2)! \prod_{1 \leq k < \frac{p-1}{2}} k! (p-k-1)! \pmod p.$$
Applying (\ref{gg}) and Wilson's theorem gives\begin{equation*}\label{g2}
r \prod_{1 \leq k < \frac{p-1}{2}} (-1)^{k+1} \equiv - \left( \frac{p-1}{2}\right) ! \pmod p,
\end{equation*}
so that $r \equiv \pm ((p-1)/2)! \pmod p$. One may dismiss the positive root, since $r$ is not congruent to any $j!$ for $1\leq j \leq p-1$. Hence
\begin{equation*}
\prod_{1 \leq k < \frac{p-1}{2}} (-1)^{k+1} \equiv 1 \pmod p.
\end{equation*}
Equating powers of $(-1)$ gives
$$ \sum_{1\leq k < \frac{p-1}{2}} (k+1) = \frac{(p-3)(p+3)}{8} \equiv 0 \pmod 2,$$
whence, since $p\equiv 1 \pmod 4$, one may conclude that $p \equiv 5 \pmod 8.$
The conditions in (\ref{con}) are a little more subtle. Consider a polynomial $F(x) = x^{n} + a_{1} x^{n-1} + \ldots + a_{0}$ with integral coefficients and discriminant $D$. A theorem by Stickelberger (see, e.g.\ \cite[p.\ 249]{Dickson}) gives $\left( \frac{D}{p}\right) = (-1)^{n - \nu}$, where $\nu$ is the number of factors of $F(x)$ that are irreducible modulo $p$.
Consider the two congruences
$$x(x+1) -1 \equiv 0 \pmod p, \quad x(x+1)(x+2) -1 \equiv 0 \pmod p,$$
the polynomials in which have discriminants 5 and $-23$.
For the former, if $\left(\frac{5}{p}\right) = 1$, then, by Stickelberger's theorem, there are two irreducible factors, whence the congruence factors and has a solution. Therefore $(x+1)! \equiv (x-1)! \pmod p$ and $p$ is not a socialist prime.
Likewise for the latter: if $\left(\frac{-23}{p}\right) = -1$ then there are two irreducible factors, whence $(x+2)! \equiv (x-1)! \pmod p$.
One cannot continue down this path directly. Consider $x(x+1)(x+2)(x+3) -1 \equiv 0 \pmod p$ which has a solution if and only if $y(y+2) -1 \equiv 0 \pmod p$ has a solution, where $y = x(x+3)$. Hence $(y+1)^{2} \equiv 2 \pmod p$, which implies 2 is a quadratic residue modulo $p$ --- a contradiction since $p \equiv 5 \pmod 8$.
Instead one can consider the congruence
$$x(x+1)(x+2)(x+3)(x+4)(x+5) -1 \equiv 0 \pmod p,$$
which is soluble precisely when $y(y+4)(y+6) -1\equiv 0 \pmod p$ is soluble, where $y = x(x+5)$. The cubic congruence in $y$ has discriminant $1957$, whence, by Stickelberger's theorem, if $\left(\frac{1957}{p}\right) = -1$ then $y(y+4)(y+6)$ has a linear factor. To deduce that $(x+5)! \equiv (x-1)! \pmod p$ we need to know that
$y \equiv x(x+5) \pmod p$ is soluble, that is, we need to know that $4y + 25$ is a quadratic residue modulo $p$. We can therefore add a condition to (\ref{con}), namely, a necessary condition that $p$ be a socialist prime is
\begin{equation}\label{extrac}
\begin{split}
\left(\frac{1957}{p}\right) &= 1, \quad \textrm{or}\\
\left(\frac{1957}{p}\right) &= -1 \quad \& \quad \left(\frac{4y + 25}{p}\right) = -1,\\
&\qquad\qquad\qquad\textrm{for all $y$ satisfying}\quad y(y+4)(y+6) -1 \equiv 0 \pmod p
\end{split}
\end{equation}
\section{Computation and conclusion}
Rokowska and Schinzel showed that the only primes $5<p<1000$ satisfying $p \equiv 5 \pmod 8$ and (\ref{con}) were
$$13, 173, 197, 277, 317, 397, 653, 853, 877, 997.$$
Using Jacobi's \textit{Canon arithmeticus} they showed that for each prime there existed $1<k<j\leq p-1$ for which $k! \equiv j! \pmod p$.
I am grateful to Dr David Harvey who extended this to show that there are no socialist primes less than $10^{6}$. This computation took 45 minutes on a 1.7 GHz Intel Core i7 machine. Professor Tom\'{a}s Oliveira e Silva extended this to $p< 10^{9}$, a calculation which took 3 days.
The following example shows the utility of adding the condition (\ref{extrac}). Using the conditions $p\equiv 5 \pmod 8$ and (\ref{con}), it is easy to check that there are at most $4908$ socialist primes up to $10^{6}$. These need to be checked to see whether there are values of $k$ and $j$ for which $k! \equiv j! \pmod p$. Including the condition (\ref{extrac}) means that there are at most $3662$ socialist primes up to $10^{6}$ that need to be checked.
To extend the range of computation beyond $10^{9}$ it would be desirable to add another condition arising from a suitable congruence. The congruence leading to (\ref{extrac}) was of degree 6; no other suitable congruence was found for degrees 8 and~9.
In \cite{BanksLucaSS} the authors consider $F(p)$ defined to be the number of distinct residue classes modulo $p$ that are not contained in the sequence $1!, 2!, 3!, \ldots$. They show that $\limsup_{p\rightarrow\infty} F(p) = \infty$; for the problem involving socialist primes one wishes to show that $F(p)= 2$ never occurs. It would therefore be of interest to study small values of $F(p)$.
Finally, one may examine the problem na\"{\i}vely as follows. Ignore the conditions $p \equiv 5 \pmod 8$ and (\ref{con}) --- including these only reduces the likelihood of there being socialist primes. For $2\leq k \neq j \leq p-2$ we want $p\nmid j! - k!$. There are $\binom{p-3}{2} = (p-3)(p-4)/2$ admissible values of $(k, j)$. Assuming, speciously, that the probability that $p$ does not divide $N$ `random' integers is $(1-1/p)^{N}$ one concludes that the probability of finding a socialist prime is
\begin{equation*}
\left( 1 - \frac{1}{p}\right)^{\frac{(p-3)(p-4)}{2}} \rightarrow e^{\frac{(7-p)}{2}},
\end{equation*}
for large $p$.
Given this estimate, and the computational data, it seems reasonable to conjecture that there are no socialist primes.
\end{document}
|
\begin{document}
\title {On Triple Lines and Cubic Curves\\ --- the Orchard Problem revisited}
\author{Gy\"orgy Elekes ~and Endre Szab\'o}
\address{Gy\"orgy Elekes\\E\"otv\"os University, Budapest}
\email{[email protected]}
\thanks{Elekes is partially supported by HU-NSF grants
OTKA T014105 T014302 and T019367}
\address{Endre Szab\'o\\
R\'enyi Institute of the Hungarian Academy of Sciences}
\email{[email protected]}
\thanks{Szab\'o is partially supported by
OTKA grants \EGYIK~and \MASIK}
\begin{abstract}
Planar point sets with many triple lines
(which contain at least three distinct points of the set)
have been studied for 180 years, started with Jackson
\cite{Jackson:1821} and followed by Sylvester \cite{Sylvester:2473}.
Green and Tao~\cite{Green-Tao:sets-with-few-lines:2012}
has shown recently that
the maximum possible number of triple lines for an $n$ element set is
$\lfloor n(n - 3)/6\rfloor + 1$.
Here we address the related problem of describing the structure of the
\emph{asymptotically near-optimal\/} configurations, i.e., of those
for which the number of straight lines, which go through three or more points,
has a quadratic (i.e., best possible) order of magnitude.
We pose the problem whether such point sets must always be related to cubic
curves.
To support this conjecture we settle various special cases; some of them
(Theorems \ref{thm:NoFourOnAlgebraicThm} and \ref{thm:FourInALineThm})
are also related to the four-in-a-line problem of Erd\H{o}s.
\end{abstract}
\maketitle
\section {Introduction}
Given $n$ point in the plane $\BR^2$,
a line is \emph{$3$-rich}, if it contains precisely $3$ of the given points.
One of the oldest problems of combinatorial geometry,
the so-called Orchard Problem,
is to maximise the number of $3$-rich lines
(see Jackson~\cite{Jackson:1821} and
Sylvester~\cite{Sylvester:2473}).
Sylvester showed that the number of $3$-rich lines is $n^2/6 + \CO(n)$,
and recently
Green and Tao~\cite{Green-Tao:sets-with-few-lines:2012}
have found the precise value of the maximum.
\begin{thm}[Orchard Problem. Green--Tao]
\label{thm:orch-probl-Green-Tao}
Suppose that $\CH$ is a finite set of $n$ points in the plane.
Suppose that $n\ge n_0$ for some sufficiently large absolute
constant $n_0$. Then there are no more then
$\big\lfloor n(n - 3)/6\big\rfloor + 1$
lines that are $3$-rich, that is they contain precisely $3$ points
of $\CH$.
\end{thm}
Here we address the related problem of describing the structure of the
\emph{asymptotically near-optimal\/} configurations, i.e., of those
for which the number of straight lines, which go through three or more points,
has a quadratic (i.e., best possible) order of magnitude.
\begin{defn}
Let $\CH$ be a subset of the plane $\BR^2$.
A straight line $l$ is called a {\sl triple line} with respect to
$\CH$ if there exist three distinct points
$P_1, P_2, P_3\in l \cap \CH$.
We shall also use the notation
$$
\harompt{\CH} \buildrel{\scriptstyle\rm def}\over=\; \{l\ ;\ \size{l \cap \CH}\ge 3 \}.
$$
We extended the notion of \emph{triple line},
without any change in the definition,
to subsets of the projective plane.
\end{defn}
Note that $\harompt{\CH}$ is a set of {\sl lines}, not
a set of triples; e.g.\ if $\CH$ is a collinear set of 3 or more points
then $|\harompt{\CH}|=1$.
Triple lines are not necessarily $3$-rich (as they may be $4$-rich,
$5$-rich, and so on), hence
\fref{thm:orch-probl-Green-Tao} does not directly bound the size of
$\harompt{\CH}$.
In any case, it is easy to find a (non-sharp) quadric upper bound.
Indeed, each line with three points contains three segments
of the $n \choose 2$ which connect pairs of points of $\CH$,
hence
$$
\big|\harompt{\CH}\big|\le {1 \over 3}{n \choose 2} = {n^2 \over 6}- n/6.
$$
The following examples show four simple configurations for which the
quadratic order of magnitude can really be attained.
Two of them consist of three collinear point sets each, the third one
is located on a conic and a straight line, while the fourth one on a cubic.
\begin{ex}
\label{ex:FirstExa}
If $\CH_1$, $\CH_2$, $\CH_3$ are three copies of an arithmetic progression on
three equidistant parallel lines then $|\harompt{\CH_1\CH_2\CH_3}|\approx
N^2/18$, where $N$ denotes the total number of points and
$\harompt{\CH_1\CH_2\CH_3}$ denotes the set of lines $l$ such that
there exist three distinct points $P_i\in{l \cap \CH_i}$ for $i=1,2,3$. \\
(It is slightly better to place a point set of ``double density''
on the middle line.)
\end{ex}
\begin{ex}
\label{ex:SecondExa}
Let $P_1,P_2,P_3$ be the vertices of a non--degenerate triangle, and
$\CH_i$ ($i=1,2,3$) point sets on the line through
the vertices $P_{i-1}$ and $P_{i+1}$, defined by
\begin{equation}
\CH_i=\Bigl\{X\ ;\
\frac{\overline{P_{i-1}X}}{\overline{XP_{i+1}}}\in
\{\pm1,\pm2^{\pm1},\pm4^{\pm1},\ldots,\pm2^{\pm (n-1)}\}\Bigr\},
\label{eq:GEPTriple}
\end{equation}
where $i\pm1$ is used mod 3 in the indices of the $P_i$.
(See \fref{fig:TriangleFig}.)
\begin{figure}
\caption{Portion of a triangular configuration with some triple lines marked.}
\label{fig:TriangleFig}
\end{figure}
\par\noindent
Here again $|\harompt{\CH_1\CH_2\CH_3}|\approx
N^2/18$, where $N$ denotes the total number of points.\\
(The observant reader may have noticed that we allowed $(-1)$ among the ratios,
i.e., $X$ may be a point at infinity.)
\end{ex}
\begin{ex}
\label{ex:ThirdExa}
The $n\choose2$ segments which connect pairs of vertices of a regular $n$--gon
$C$ only determine $n$ distinct slopes. Let $D$ be the set of points on the
line at infinity which correspond to these directions. Then
$|\ketpt{C}\egypt{D}|\approx N^2/8$, where $N=|C\cup D|=2n$ and
$\ketpt{C}\egypt{D}$ stands for $\harompt{CCD}$.
\end{ex}
\begin{ex}
\label{ex:FourthExa}
The point set $\CH=\{(i,i^3)\ ;\ i=-n,\ldots,n\}$ on the curve $y=x^3$
satisfies $|\harompt{\CH}|\approx N^2/8$, where $N=2n+1$.
This can easily be demonstrated by
making use of the fact that three points $(a,a^3)$, $(b,b^3)$ and $(c,c^3)$
are collinear iff $a+b+c=0$.
\end{ex}
\par
The goal of this paper
is to show that point sets with many triple lines are,
from several points of view, closely related to cubics.
\section {Problems and results}
\subsection*{A conjecture}
Since all the above examples with a quadratic order of magnitude of the triple
lines involve cubic curves (some of which are degenerate), it is natural to
believe the following.
\begin{conj}\label{conj:TenConj}
If $|\harompt{\CH}| \ge c\size{\CH}^2 $
then ten or more points of $\CH$ lie on a (possibly degenerate)
cubic, provided that $\size{\CH}>n_0(c)$.\\ \null \\
\end{conj}
Here the ``magic number'' 10 is the least non-trivial value since any nine
points of $\BR^2$ lie on a cubic. Perhaps even a stronger version may hold:
{\sl
for every $c>0$ and positive integer $k$ there exist $c^*=c^*(c,k)>0$ and
$n_0=n_0(c,k)$, such that, if
$\;|\harompt{\CH}| \ge c\size{\CH}^2 $
then there is a con-cubic $\CH^*\subset\CH$ with $\size{\CH^*}\ge k$ and
$|\harompt{\CH^*}| \ge c^*\size{\CH^*}^2 $,
provided that $\size{\CH}\ge n_0$.
}
\par\noindent
It is very likely that in place of $k$ above,
even $c^*\size{\CH}^{\alpha}$ con-cubic points exist
(for some $c^*=c^*(c)>0$ and $\alpha=\alpha(c)>0$).
An example with only $O(\sqrt{\size{\CH}})$ such points is a
$k\times k$ square or parallelogram lattice where the points of three parallel
lines provide the set located on a (degenerate) cubic.
Similarly,
projections of $d$~dimensional cube lattices to $\BR^2$
form structures with only $O(\size{\CH}^{1/d})$ con-cubic points.
\par
Moreover, if we assume that $\CH$ has no four--in--a--line and
$|\harompt{\CH}| \ge c\size{\CH}^2 $, then perhaps as
many as $c^*\size{\CH}$ of its points will lie on an irreducible cubic.
\subsection*{Results}
In order to support the above conjecture, we
settle various special cases in the affirmative.
Our main result is the following.
\begin{thm}\label{thm:CubicThm}
In $\BR^2$,
if irreducible algebraic curve of degree $d$
contains a set $\CH$
of $n$ points with $|\harompt{\CH}|\ge cn^2$ then the curve is a cubic ---
provided that $n>n_0(c,d)$.
\end{thm}
Two simple applications of the forthcoming slightly more general
\fref{thm:ThreeCurveThm} are the following.
\begin{thm}\label{thm:NoFourOnAlgebraicThm}
In $\BR^2$, no irreducible algebraic curve of degree $d$
can accommodate $n$ points with
$cn^2$ \emph{quadruple} lines if $n>n_0(c,d)$.
\end{thm}
\begin{thm}\label{thm:FewDirectionsThm}
In $\BR^2$, if a set of $n$ points
located on an irreducible algebraic curve of degree $d$
only determines $Cn$ distinct directions
then the curve is a conic --- provided that $n>n_0(d,C)$.
\end{thm}
The above theorems are of algebraic geometric nature,
therefore it is natural to ask analogous questions in complex geometry
(i.e. when the point set and the algebraic curves live in $\BC^2$).
However, in this paper
we restrict our attention to the real plane $\BR^2$.
In some other results (see \fref{sec:ConicSection}) we allow part of the
points (a positive proportion) to be arbitrary and only restrict the rest of
them to a conic. In this case it will turn out that
a large subset of the first
part must be collinear. (Here again, the conic and the straight line,
together, form a degenerate cubic.) The following is the essence of Theorems
\ref{thm:DegenerateConicThm} and \ref{thm:NonDegenerateConicThm}.
\result{Let $\CH=\CH_1\cup\CH_2$ and assume that $\CH_1$ lies on a (possibly
degenerate) conic $\Gamma$ while $\CH_2\cap\Gamma=\emptyset$.
If $n\le|\CH_1|,|\CH_2|\le Cn$ and $|\ketpt{\CH_1}\egypt{\CH_2}|\ge cn^2$ then
some $c^*n$ points of $\CH_2$ are collinear. (Here $c^*=c^*(c,C)$ does not
depend on $n$.)}
\noindent
We also mention a theorem of Jamison \cite{Ja:84} which can be considered
as another result in the direction of our \fref{conj:TenConj}:
if the diagonals and sides of a convex $n$--gon only determine
$n$ distinct slopes (which is smallest possible), then the vertices of the
polygon all lie on an ellipse. In terms of triple lines (and a degenerate
cubic formed by a straight line and an ellipse) this can be formulated
as follows:
\result{(Jamison's Theorem) if\/ $\CH_1$
is the vertex set of a convex polygon
and $\CH_2$
lies on the line at infinity
with $|\CH_1|=|\CH_2|=n$ then
$|\ketpt{\CH_1}\egypt{\CH_2}|={n \choose 2}$
implies that $\CH_1$ lies on an ellipse.
}
\noindent
A similar statement was proven by Wettl \cite{We:87} for finite projective
planes.
\par\noindent
\subsection*{The structure of the paper}
The aforementioned results
(usually in stronger form) are presented in detail in the last two sections.
Before that, we list some basic facts on the relation between continuous
curves, collinearity and Abelian groups, concluding in the fundamental
observation \fref{lem:MainLemma}.
\section{Collinearity and groups}\label{sec:CurveSection}
\subsection*{Collinearity on cubics}
\begin{defn}
Let $\Gamma_1$, $\Gamma_2$, $\Gamma_3$ be three (not necessarily
distinct) Jordan curves (i.e., bijective continuous images of an
interval or a circle)
in the projective plane,
and $\Agroup$ an Abelian topological group.
We say that \emph{collinearity between $\Gamma_1$, $\Gamma_2$ and $\Gamma_3$
can be described by the group operation $\oplus$}, if, for $i=1,2,3$,
there are homeomorphic monomorphisms (i.e., continuous injections
whose inverses are also continuous)
\[
f_i: \Gamma_i \rightarrow \CA
\]
--- in other words, ``parametrisation'' of the $\Gamma_i$ with $\CA$ ---
such that three distinct points $P_1 \in \Gamma_1$, $P_2 \in \Gamma_2$, $P_3 \in \Gamma_3$
are collinear if and only if
\[
f_1(P_1) \oplus f_2(P_2) \oplus f_3 (P_3) = 0 \in \CA.
\]
\end{defn}
The curves we consider will usually be irreducible components of algebraic
curves in $\BR^2$ --- or subsets thereof.
However, sometimes we must also study general
continuous curves, as well.
\par
In what follows we denote the set of regular points of an algebraic curve
$\Gamma$ by $\text{Reg}(\Gamma)$. The connected components of $\text{Reg}(\Gamma)$
are Jordan curves.
\begin{prop}\label{prop:CollinearityOnCubics}
Let $\CC$ be a cubic curve in the projective plane.
If $\Gamma_1$, $\Gamma_2$, $\Gamma_3$ are (not necessarily distinct)
connected components of $\text{Reg}(\CC)$,
then collinearity between them
can be described by commutative group operation
--- unless two of the $\Gamma_i$ are identical straight lines.
\end{prop}
Indeed, for reducible cubics,
Figures~\ref{fig:ConicFig} and \ref{fig:ThreeLinesFig} show
appropriate parametrisation in the real plane. (Any other reducible cubic
is projective equivalent to one of these.)
The groups used are
\mbox{$\group{\BR}{+}/2\pi\BZ$}, \mbox{$\group{\BR}{+}$},
\mbox{$\group{\BR\setminus\{0\}}{\;\cdot\;}$}
in \fref{fig:ConicFig}
and \mbox{$\group{\BR}{+}$}, \mbox{$\group{\BR\setminus\{0\}}{\;\cdot\;}$}
in \fref{fig:ThreeLinesFig}, respectively.
If $\Gamma_1=\Gamma_2=\Gamma_3=\CC=\{(x,x^3)\;;\;x\in\BR\}$ then the
parametrisation $f(x,x^3)=x$ works well.
It is also well-known that for irreducible cubics
(i.e. elliptic curves),
suitable parametrisation exist (see, e.g., in \cite{Reid:UAG}).
\par
\begin{figure}
\caption{Parametrisation of reducible cubics: a conic plus the line at
infinity. (Due to lack of sufficient space the line at infinity is
depicted as a bent curve.)}
\label{fig:ConicFig}
\end{figure}
\begin{figure}
\caption{Parametrisation of reducible cubics: three straight lines.
In case of a triangle,
\ $u_i\buildrel{\scriptstyle\rm def}
\label{fig:ThreeLinesFig}
\end{figure}
\begin{rem}
Note that in all cases only regular points are parametrised. This will make
no confusion since singular (e.g., multiple) points of a cubic never occur in
proper collinear triples.
\end{rem}
\subsection*{Collinearity on continuous curves}
Throughout this section we consider the graphs of three continuous real
functions.
\begin{defn}
\label{defn:ssocrfDef}
We call $\alpha$, $\beta$ and $\gamma$ a
\emph{standard system of continuous real functions} if
\begin{enumerate}[(i)]
\item
they are defined in a neighbourhood $\CD$ of 0;
\item
$\alpha(x)<\beta(x)<\gamma(x)$ for all $x\in\CD$;
\item
any straight line through
any point of the graph of any of the three functions intersects the other two
graphs in at most one point each.
\end{enumerate}
For such functions $\alpha$, $\beta$ and $\gamma$
we denote their graphs (which are Jordan arcs)
by $\overline\alpha$, $\overline\beta$ and $\overline\gamma$.
\end{defn}
\begin{rem}\label{rem:DerivativeRem}
Assumption (iii) is not very strong a requirement;
e.g., if the functions are differentiable at 0 (elsewhere they may not even
be smooth) then $\CD$ can be restricted to a sufficiently small
neighbourhood of 0 so that (iii) be satisfied there.
\end{rem}
\begin{prop}\label{prop:ContinuityPro}
Let $P(x,\beta(x))$ be a point of the ``middle'' graph $\overline\beta$.
Connect it with lines to the two
points $A_0(0,\alpha(0))$ and $C_0(0,\gamma(0))$; moreover, denote by $C(P)$
and $A(P)$ the points of intersection of these lines with the graphs
$\overline\gamma$ and $\overline\alpha$, respectively (if they exist).
Finally, let $B(P)$ be the intersection of the line through $A(P)$ and $C(P)$
with the graph $\overline\beta$. Then
\begin{enumerate}[(i)]
\item
if $x$ is sufficiently close to 0 then $A(P)$, $B(P)$ and $C(P)$ really exist;
and the composite mappings
\[
x\;\mapsto\; P=P(x,\beta(x)) \; \mapsto \ \begin{cases}
A(P) \text{ or}\\
B(P) \text{ or}\\
C(P) \\
\end{cases}
\]
are continuous functions $\BR\rightarrow\BR^2$;
\item
for every point $\hat B$ of the graph $\overline\beta$,
sufficiently close to the $y$--axis, there is a $P$ for which
$\hat B = B(P)$.
\end{enumerate}
\end{prop}
The straightforward proof using straightforward calculus --- together with the
Intermediate Value Theorem for (ii) --- is left to the reader.
\emph{Proof \/}end
\par\par\noindent
Next we shall study when will collinearity between
$\overline\alpha$ $\overline\beta$ and $\overline\gamma$
be described by an Abelian topological group $\CA$,
so we will search for parametrisations
$f_\alpha:\overline\alpha\to\CA$,
$f_\beta:\overline\beta\to\CA$ and
$f_\gamma:\overline\gamma\to\CA$.
Part (iii) of \fref{defn:ssocrfDef} also implies that the curves
$\overline\alpha$, $\overline\beta$ and $\overline\gamma$
must be pairwise disjoint.
That is why, in what follows, we shall only use one notation
$$
f:=(f_\alpha\cup f_\beta\cup f_\gamma)\;:\;
(\overline\alpha\cup\overline\beta\cup\overline\gamma)
\rightarrow A
$$
in place of three.
\begin{lem}[``Parameter--halving lemma'']\label{lem:ParameterHalvingLemma}
Let $\alpha$, $\beta$ and $\gamma$ form a standard system of continuous real functions. Moreover, let
$B_0=(0,\beta(0))$ and $A_0$, $C_0$, $P$, $A=A(P)$, $B=B(P)$ and $C=C(P)$
be as above.
Assume that collinearity between the three graphs is described by
a group operation
$\group{\CA}{\oplus}$ and mapping (parametrisation) $f$.
Then
\begin{enumerate}[(i)]
\item
if
\[
\begin{aligned}
f(P)&=f(B_0) \oplus p \text{\quad and }\cr
f(B)&=f(B_0) \oplus b
\end{aligned}
\]
then $p=b/2$, i.e., $b=p\oplus p$.
\item
if $B$ is sufficiently close to $B_0$ then there really exists a $P$ for which
$f(P)=f(B_0) \oplus b/2$.
\end{enumerate}
\end{lem}
\emph{Proof \/}
(i) Note that
$$
f(A_0) \oplus f(B_0) \oplus f(C_0) = 0 \in \CA.
$$
Moreover, the collinearity of the triples $C_0PA$ and $CPA_0$
imply
\[
\begin{aligned}
f(A)&=f(A_0)\ominus p;\\
f(C)&=f(C_0)\ominus p,
\end{aligned}
\]
respectively; therefore
\[
\begin{aligned}
f(B)&= p \oplus p \ominus f(A_0)\ominus f(C_0) = \\
&= p \oplus p \oplus f(B_0),
\end{aligned}
\]
whence the required identity.
\par
(ii) is obvious from \fref{prop:ContinuityPro}(ii).
\emph{Proof \/}end
\subsection*{A fundamental lemma}
The forthcoming \fref{lem:MainLemma}
will work as our first tool for proving
\fref{thm:CubicThm} and the slightly more general
\fref{thm:ThreeCurveThm}.
The basic idea is to use the well-known construction
of the group structure on cubics.
If we know a few points on a cubic,
then just by drawing specific lines and marking specific
intersection points we can construct infinitely many new points on that cubic.
The essence of the following statement is that only on
cubics can Abelian groups describe collinearity.
\begin{lem}\label{lem:MainLemma}
Let $\alpha$, $\beta$, $\gamma$ be a standard system of continuous
functions defined in a neighbourhood of 0.
Assume that collinearity between the three graphs is described by
a group operation.
Then their union
$\overline\alpha\cup\overline\beta\cup\overline\gamma$
is contained in a (possibly reducible) cubic.
\end{lem}
For the proof we need certain special structures; they will be the topic of
the next subsection.
The proof itself comes then in the subsection afterwards.
\subsection*{Ten point configurations and cantilevers}
Two types of point-line configurations will play special roles in what
follows. The first one consists of ten points and a certain structure of
triple lines while the latter will extend the former one.
Given $\overline\alpha$, $\overline\beta$, $\overline\gamma$
as in \fref{lem:MainLemma}, we define ten
point configurations as follows.
Denote, again, by $A_0$, $B_0$ and $C_0$ the points of intersection of the
$y$--axis with the three graphs, respectively.
Choose $B_1$ on $\overline\beta$ sufficiently close to $B_0$
in order to make sure that all the forthcoming points exist.
(This will be described later in more detail.)
Let $A_1$ (resp.~$C_1$) be the point of
intersection of $\overline\alpha$ with the line through $B_1$ and $C_0$
(resp. that of $\overline\gamma$ with the line through $B_1$ and $A_0$).
Define $B_2$ to be the point of intersection of $\overline\beta$ with the
line through $A_1$ and $C_1$.
Let $A_2$ (resp.~$C_2$) be the point of
intersection of $\overline\alpha$
with the line through $B_2$ and $C_0$
(resp. that of $\overline\gamma$
with the line through $B_2$ and $A_0$).
\par
The definition of $B_3$ is asymmetric: it will be the intersection of
$\overline\beta$
with the line through $A_1$ and $C_2$. Finally, $B_4$ is, again, defined in a
symmetric manner: the intersection of $\overline\beta$
with the line through $A_2$ and $C_2$
(see \fref{fig:ElevenPoints}).
Note that by iterated application of
\fref{prop:ContinuityPro}, the rest
of the points will all exist if $B_1$ is close enough to $B_0$.
\begin{figure}
\caption{The straight line $A_2B_3C_1$ is not used in the definition of the
points.}
\label{fig:ElevenPoints}
\end{figure}
\par
The observant reader may have noticed that we defined eleven points
altogether (instead of just ten).
However, $B_0$ will NOT be in our configuration.
\begin{defn}
\label{defn:TenPtsDef}
Given $\overline\alpha$, $\overline\beta$, $\overline\gamma$
as in \fref{lem:MainLemma}, we call the above
$$\langle A_0,A_1,A_2,B_1,B_2,B_3,B_4,C_0,C_1,C_2\rangle$$
a \emph{ten point configuration}\/ defined by $B_1$.
\end{defn}
\begin{prop}\label{prop:TenPointStructurePro}
If $\alpha$, $\beta$, $\gamma$
is a standard system of continuous real functions and
collinearity between their graphs
is described by $\Agroup$ and mapping $f$ then
\begin{enumerate}[(i)]
\item
$A_2$, $B_3$ and $C_1$ are collinear.
\item
More generally, $A_i$, $B_j$ and $C_k$ are collinear iff $i+k=j$.
\item
There is a
$\Delta\in\CA$ such that $f(A_i)=f(A_0)\ominus i\Delta$,
$f(B_i)=f(B_0)\oplus i\Delta$, and
$f(C_i)=f(C_0)\ominus i\Delta$.
\end{enumerate}
\end{prop}
\emph{Proof \/}
Indeed, statement (ii) --- with the exception of (i) --- holds by definition.
For $\Delta\buildrel{\scriptstyle\rm def}\over=\; f(B_1)\ominus f(B_0)$, this implies statement (iii)
by group identities. Finally, (i) follows from (iii), using
$f(A_0) \oplus f(B_0) \oplus f(C_0)=0$, which, together with (iii), implies
$f(A_2) \oplus f(B_3) \oplus f(C_1)=0$.
\emph{Proof \/}end
\begin{lem}[Ten point Lemma]\label{lem:TenPointLemma}
Let $\overline\alpha$, $\overline\beta$, $\overline\gamma$ be a
as in \fref{lem:MainLemma}. Assume, moreover, that a ten point
configuration defined on them is contained in two (possibly reducible) cubics
$\CC_1$ and $\CC_2$. Then $\CC_1=\CC_2$.
\end{lem}
\emph{Proof \/}
According to the definition of a standard system of continuous functions,
if a straight line $l$ contains two points of any of the three graphs then $l$
is disjoint from the other two. This leaves us three possibilities for a cubic
$\CC_j$ ($j=1,2$):
\begin{enumerate}[Type 1.]
\item
three straight lines, one through the $A_i$, one through the $B_i$, and one through the $C_i$;
\item
a straight line through all (three or four) points of one of the graphs and a
non-degenerate conic through the rest of them;
\item
an irreducible cubic through all the points.
\end{enumerate}
According to B\'ezout's Theorem \cite{Fulton:AlgCurves}, two distinct
irreducible algebraic curves of degree $k$ and $m$, respectively, can only
intersect in at most $k m$ points. This immediately implies the Lemma.
Indeed, if we assume $\CC_1\ne\CC_2$ for a contradiction, then e.g., if
$\CC_1$ is of type 2 and $\CC_2$ of type 3 then either $\CC_2$ and a
straight line component of $\CC_1$ intersect in four or more points, or
$\CC_2$ and a conic component of $\CC_1$ intersect in seven or more points
--- a contradiction anyway. (The other pairs of types are easier.)
\emph{Proof \/}end
\begin{lem}[Nine Point Lemma]\label{lem:NinePointLemma}
Let $\overline\alpha$, $\overline\beta$, $\overline\gamma$ be a
as in \fref{lem:MainLemma},
consider a ten point configuration on them.
If a (possibly reducible) cubic $\CC$ contains,
with the exception of $B_3$, the
other nine points, then it must also contain $B_3$.
Moreover, all ten points must belong to $\text{Reg}(\CC)$.
\end{lem}
\emph{Proof \/}
Define $\delta\buildrel{\scriptstyle\rm def}\over=\; f(A_0)\oplus f(B_1) \oplus f(C_0)\in\CA$.
Then $\delta\ne0$ since $A_0$, $B_1$ and $C_0$ are not collinear.
What is $X\in\overline\beta$ for which $f(X)=3\delta$?
According to \fref{prop:TenPointStructurePro}, it must be the
point of intersection of the two straight lines $\overline{C_1A_2}$ and
$\overline{C_2A_1}$.
Finally,
lines passing through a singular point $P\in\CC$, if it has any,
may contain at most two points of $\CC$,
so the lines in our ten point configuration may not pass through $P$.
In particular, $P$ cannot belong to a ten point configuration.
\emph{Proof \/}end
\begin{rem}
Note that Lemmas \ref{lem:TenPointLemma} and \ref{lem:NinePointLemma}
also imply
that two cubics must coincide if they both contain the nine points (with
the exception of $B_3$). However, we shall not need this fact.
\end{rem}
\par\noindent
Now we extend ten point configurations to what we call
\emph{``cantilevers''}.\footnote
{Cantilever [noun]: a projecting beam or structure supported
only at one end. (The Merriam--Webster Dictionary).}
\\
(We hope that the shape of these structures will really justify this
non-conven\-tional notion.)
Starting from a ten point configuration on $\alpha$, $\beta$, $\gamma$, we
proceed recursively as follows.
Assume that $B_i$ and $B_{i+1}$ have already been defined for an $i\ge 3$.
Then let $C_i$ be the intersection of the lines $\overline{A_0B_i}$ and
$\overline{A_1B_{i+1}}$ while
$A_i$ the intersection of the lines $\overline{C_0B_i}$ and
$\overline{C_1B_{i+1}}$. Finally, define $B_{i+2}$ to be the
intersection of $\overline{A_2C_i}$ and $\overline{C_2A_i}$.
(See \fref{fig:FourteenPoints}.)
It is important to note that the construction of cantilevers
use only the ten points, and does not depend on the three curves.
\begin{figure}\label{fig:FourteenPoints}
\end{figure}
\begin{rem}
Formally, here we work in the projective plane and even allow points of
intersection located on the line at infinity. However, whenever we apply this
construction, all points will lie on the curves
$\overline\alpha$, $\overline\beta$, and $\overline\gamma$.
\end{rem}
\begin{lem} \label{lem:CantileverOnCurves}
If the straight lines $\overline{A_0B_i}$ and $\overline{A_1B_{i+1}}$
intersect $\overline\gamma$ then this must happen at $C_i$, and similarly
for $\overline{C_0B_i}$, $\overline{C_1B_{i+1}}$, $\overline\alpha$ and $A_i$.
Moreover, if the above intersections all exist (and coincide with
the $C_i$ and the $A_i$, respectively), then $B_{i+2}$ is located on
$\overline\beta$.
\end{lem}
\emph{Proof \/}
Denote by $X$ and $Y$ the points of intersection of $\overline\gamma$ with
$\overline{A_0B_i}$ and $\overline{A_1B_{i+1}}$, respectively. What is $f(X)$
then?
By \fref{prop:TenPointStructurePro},
\[
f(X)=\ominus f(A_0) \ominus f(B_i) =
\ominus f(A_0) \ominus f(B_0) \ominus i\Delta =
f(C_0) \ominus i\Delta.
\]
Similarly, $f(Y) = f(C_0) \ominus (i+1-1)\Delta =f(X)$, whence $X=Y$.
Therefore, also $C_i$ must coincide with these points.\\
A similar argument proves the statement on $B_{i+2}$, too, since in that case
the lines which define it must always intersect $\overline\beta$.
\emph{Proof \/}end
\begin{lem} \label{lem:CantileverOnCubics}
If a cubic $\CC$ contains the nine points
$A_0$, $A_1$, $A_2$, $B_1$, $B_2$, $B_4$, $C_0$, $C_1$, $C_2$
of a ten point configuration
then the entire cantilever (of infinite length)
built from this configuration
is contained in $\text{Reg}(\CC)$.
\end{lem}
\emph{Proof \/}
By \fref{lem:NinePointLemma} the entire ten point configuration
is contained in $\text{Reg}(\CC)$.
Let $\Gamma_1$, $\Gamma_2$, and $\Gamma_3$
denote the connected components of $\text{Reg}(\CC)$ containing
$A_0$, $B_1$, and $C_0$, respectively.
By \fref{prop:CollinearityOnCubics} the collinearity between the $\Gamma_i$
is described by a group operation,
let $f_1$, $f_2$, $f_3$ denote the parametrisations.
In this case (i.e. for cubics) all $f_i$ are bijections,
hence they have inverse functions.
Consider the group element $\Delta=f_3(C_1)\ominus f_3(C_0)$.
For all $n\ge 0$ we define the following points on $\CC$:
\begin{eqnarray*}
A_n' &=& f_1^{-1}\big(f_1(A_0)\ominus n\Delta\big)\\
B_n' &=& f_2^{-1}\big(f_2(B_1)\oplus (n-1)\Delta\big)\\
C_n' &=& f_3^{-1}\big(f_3(C_1)\ominus (n-1)\Delta\big)
\end{eqnarray*}
Plugging in $n=0$ and $n=1$ we obtain that
$$
A_0'=A_0,\quad
B_1'=B_1,\quad
C_0'=C_0,\quad
C_1'=C_1.
$$
By assumption $A_0,B_1,C_2$ are collinear, hence
$f_1(A_0)\oplus f_2(B_1)\oplus f_3(C_2) =0$. This implies that
$$
f_1(A_i')\oplus f_2(B_j')\oplus f_3(C_k') =
\ominus i\Delta\oplus (j-1)\Delta\ominus(k-1)\Delta =
(i+k-j)\Delta
$$
hence $A_i',B_j',C_k'$ are collinear iff $i+k=j$.
Moreover, if a line can intersect $\CC$ in at most three points,
and if two of the intersection points are regular then all of them must
be regular.
Apply this to the line $\overline{C_0B_1}=\overline{C_0'B_1'}$.
The third intersection point of this line with $\text{Reg}(\CC)$
must be $A_1$ by \fref{prop:TenPointStructurePro},
but above we proved it is $A_1'$.
Therefore $A_1'=A_1$.
Similarly, the third intersection point of the line
$\overline{A_1C_1}=\overline{A_1'C_1'}$ with $\text{Reg}(\CC)$
must be $B_2$ on the one hand, and $B_2'$ on the other hand,
which implies $B_2=B_2'$.
Finally apply the same argument to the lines
$\overline{C_0B_2}=\overline{C_0'B_2'}$
and $\overline{A_0B_2}=\overline{A_0'B_2'}$
to obtain that $A_2=A_2'$ and $C_2=C_2'$.
To prove the lemma it is enough to show that $A_n'=A_n$, $B_n'=B_n$
and $C_n'=C_n$ for all $n\ge1$.
We prove it by induction on $n$.
However, it is easier to do the
induction with a slightly stronger statement.
So we shall prove that
$$
A_n'=A_n \;,\quad
B_{n+1}'=B_{n+1} ,\quad
B_{n+2}'=B_{n+2} \;,\quad
C_{n}'=C_{n}
$$
for all $n\ge 0$.
For $n=0$ we have already seen this.
Assume now that it is true for $n-1$.
Consider the intersection point of the lines
$\overline{C_0B_{n+1}}=\overline{C_0'B_{n+1}'}$
and $\overline{C_1B_{n+2}}=\overline{C_1'B_{n+2}'}$.
On the one hand it must be $A_{n+1}$,
on the other hand it is $A_{n+1}'$,
hence $A_{n+1}'=A_{n+1}$.
Similarly, the intersection point of the lines
$\overline{A_0B_{n+1}}=\overline{A_0'B_{n+1}'}$
and $\overline{A_1B_{n+2}}=\overline{A_1'B_{n+2}'}$
must be $C_{n+1}'=C_{n+1}$.
Finally, the intersection point of
$\overline{C_2A_{n+1}}=\overline{C_2'A_{n+1}'}$
and $\overline{A_2C_{n+1}}=\overline{A_2'C_{n+1}'}$
must be $B_{n+3}=B_{n+3}'$.
This completes the induction step.
\emph{Proof \/}end
\subsection*{Proof of \fref{lem:MainLemma}}
\label{subsec:MainLemmaProofSection}
It suffices to show that, for any $x_0$ in the (common) domain $\CD$ of the
functions $\alpha$, $\beta$, and $\gamma$, there exists a cubic $\CC$ which
contains the three graphs \emph{restricted to a sufficiently small
neighbourhood} of $x_0$.
Indeed, if we have such a neighbourhood (for each $x_0$) then it is possible to
extend any of them as follows.
Let $x_1\in\CD$ be one of the endpoints of this neighbourhood (interval)
and consider a cubic
$\CC_1$ which contains the graphs in a neighbourhood of $x_1$.
Within the intersection of the two intervals one can find a ten point
configuration contained both by $\CC$ and $\CC_1$. By the Ten Point Lemma
(\fref{lem:TenPointLemma}), $\CC=\CC_1$, i.e., we have a longer neighbourhood
of $x_0$. Thus the \emph{maximal\/} such neighbourhood must be $\CD$
itself.
\par
Now we find an appropriate cubic in a neighbourhood of (without loss of generality) $x_0=0$.
To start with, we select a ten point configuration, also include $B_0$,
and extend it to the
other side as follows. Start ``backwards'' from the collinear triple
$A_2$, $B_4$, $C_2$ and define (using $B_3$ in place of the original $B_1$)
a $5+9+5$ point cantilever --- with $A_0$, $B_0$ and $C_0$ in the
``middle''. We shall denote this structure by $\CH $.
\par
Define $B_{1/2}$ as in the Parameter Halving Lemma
(\fref{lem:ParameterHalvingLemma}) and, starting from
$A_0$, $B_0$ and $C_0$, using this $B_{1/2}$ as reference point,
define a cantilever with points $A_i$ ($i=0,\ldots,4$),
$B_i$ ($i=0,\ldots,8$) and $C_i$ ($i=0,\ldots,4$).
Of course, the new points will include the old ones, as well, by
\fref{prop:TenPointStructurePro}(iii).
Also continue the structure ``to the left'' and
denote this refined (halved) cantilever of 35 points by
$\CH _1$. Keep on defining $B_{1/2^n}$ and $\CH _n$ by recursive
halving, where
the latter consists of $(2^{n+2}+1)+(2^{n+3}+1)+(2^{n+2}+1)=2^{n+4}+3$ points.
\par
For each $n$, consider a cubic $\CC_n$ which passes through
$A_0$, $A_{1/2^n}$, $A_{2/2^n}$, $B_{1/2^n}$, $B_{2/2^n}$, $B_{4/2^n}$, $C_0$,
$C_{1/2^n}$, and
$C_{2/2^n}$. By \fref{lem:CantileverOnCubics}
this cubic contains all points of $\CH_n$.
In particular, all $\CC_n$ must contain the ten point configuration we
started with, hence all these cubics are identical by the Ten Point Lemma
(\fref{lem:TenPointLemma}).
\par
At this point we have a cubic $\CC$ for which
\[
\bigcup_n\CH _n\subset\CC.
\]
On it, the halving process (starting from $\CH _0$) gives exactly the same
$\CH _n$, whence the parameters which occur in $\cup_n\CH _n$ are
dense somewhere in an open set $\CU$ of the topological group $A$.
Hence so is the point set itself in three corresponding arcs of $\CC$
(i.e., in the homeomorphic pre--images of $\CU$).
By the continuity of $\alpha$, $\beta$, $\gamma$ (and
$\cup_n\CH_n\subset\CC$), these arcs are completely on $\CC$, as well,
thus providing the required common
parts.
\emph{Proof \/}end
\subsection*{Surfaces and groups}
Let $F\in \BR[x,y,z]$ be a polynomial of three real variables.
Denote by
\[
S=S_F \buildrel{\scriptstyle\rm def}\over=\; \{ (x,y,z)\in\BR^3\ ;\ F(x,y,z)=0 \}
\]
its zero set, i.e., the algebraic surface described by the equation $F=0$.
The degree of $S_F$ is the (total) degree of its defining polynomial $F$.
\begin{defn}
We say that a surface $S\subset\BR^3$ is
\emph{described by a commutative group operation}\/ $\Agroup$
if there are mappings (``parametrisations'') $f_i:\BR\mapsto \CA$
\ for $i=1,2,3$ \ such that
\[
(x_1,x_2,x_3)\in S \ \Leftrightarrow \
f_1(x_1) \oplus f_2(x_2) \oplus f_3(x_3) =0.
\]
\end{defn}
E.g., the ball of equation $x^2+y^2+z^2= 1$ is described by the additive group
through the mappings $f_i(t)=t^2-1/3$ \ ($i=1,2,3$).
\par\par\noindent
One of the main ingredients of our proof is \fref{thm:SpecialSurfaceOrchardThm}
below, proven in \cite{How-to-find-groups}.
Assume we consider a plane $\alpha x+ \beta y+\gamma z=\delta$,
intersecting the cube $[0,n]^3$.
If the coefficients $\alpha, \beta, \gamma, \delta$ are rationals with
small numerators and denominators then
this plane will contain $\sim n^2$ lattice points. If we apply
independent uni-variate
transformations in the three coordinates, $x,y,z$, then we can
easily produce 2-dimensional surfaces --- described by some equation
$f(x)+g(y)+h(z)=\delta$ --- containing a quadratic number of points from a
product set $X\times Y \times Z$, where $|X|=|Y|=|Z|=n$.
The main result of \cite{How-to-find-groups} asserts that if
some appropriate algebraicity conditions hold then (apart from
being a cylinder) this is the only way for a surface $F(x,y,z)=0$ to contain
a near--quadratic number of points from such a product set
$X\times Y\times Z$.
As usual, we call a function of one or two variable(s)
\emph{analytic\/} at a point if it can be expressed as a convergent power
series in a neighbourhood.
Also, it is analytic on an open set if it is analytic at each of its points.
\begin{thm}[``Surface Theorem'', see \cite{How-to-find-groups}, Theorem~3.]
\label{thm:SpecialSurfaceOrchardThm}
For any positive integer $d$ there exist positive constants
$\eta=\eta(c,d)$, $\lambda=\lambda(c,d)$ and
$n_0=n_0(c,d)$ with the following property.\\
If $V\subset\BR^3$ is an algebraic surface (i.e. each component is two
dimensional) of degree $\le d$ then the following are equivalent:
\begin{enumerate}[(a)]
\item \label{item:18}
For at least one $n>n_0(c,d)$ there exist $X,Y,Z \subset\BR$ such that
$|X|=|Y|=|Z|=n$ and
$$
|V\cap(X\times Y\times Z)|\ge c n^{2-\eta };
$$
\item \label{item:19}
Let $D$ denote the interval $(-1,1)$.
Then either $V$ contains a cylinder over a curve
$F(x,y)=0$ or $F(x,z)=0$ or $F(y,z)=0$ or, otherwise,
there are one-to-one analytic functions $f,g,h:D\to\BR$
with analytic inverses
such that $V$ contains the
$f\times g\times h$-image of a part of the plane
$x+y+z=0$ near the origin:
$$
V\supseteq
\Big\{\,\Big(f(x),g(y),h(z)\Big)\in\BR^3 \ ;\
x,y,z\in D\;;\;x+y+z=0\Big\};
$$
\item \label{item:21}
The statement in \eqref{item:19} can be localised as follows.
There is a finite subset $H\subset\BR$ and an irreducible component
$V_0\subseteq V$ such that whenever $P\in V_0$ is a point whose
coordinates are not in $H$, then one may require that
$\Big(f(0),g(0),h(0)\Big)=P$.
\end{enumerate}
\end{thm}
This result indicates a significant ``jump'':
either $V$ has the special form described in \fref{item:19},
in which case a quadratic order of magnitude is possible,
by \fref{item:19}$\Rightarrow$\fref{item:21}; or, else, we cannot
even exceed $n^{2-\eta}$, by \fref{item:18}$\Rightarrow$\fref{item:19}.
\section{Theorems on curves}
Here we present some results on point sets located on algebraic curves and
satisfying certain requirements.
\par
The first one (\fref{thm:ThreeCurveThm})
is a ``gap version'' of \fref{thm:CubicThm}. It states that there is a
significant difference between cubics and other algebraic curves: on a cubic,
$n$ points can determine as many as $cn^2$ triple lines; otherwise
even as few as $n^{{2-\eta}}$ are impossible for $n$ large enough.
\par
The other result is related to a problem of Erd\H{o}s. He asked if a point set
with $cn^2$ \emph{quadruple\/} lines must also contain a five-in-a-line. In
\fref{thm:FourInALineThm} we settle this in the affirmative, under the
additional assumption that the points lie on an algebraic curve.
\par
Finally, \fref{thm:FewDirThm} concerns point sets which determine few
distinct directions.
\subsection*{Many triple lines force cubics}
Our first main result states that, of all algebraic curves, only cubics can
accommodate $n$ points with $cn^{2-\eta}$ triple lines.
This is probably far from being best possible; perhaps
even the existence of as few as $cn^{1+\delta}$ such lines will also imply
the same statement, for any $\delta>0$ and $n>n_0(c,\delta)$.
\begin{thm}\label{thm:ThreeCurveThm}
For every $c>0$ and positive integer $d$ there exist $\eta=\eta(c,d)$ and
$n_0=n_0(c,d)$ with the following property.
Let $\Gamma_1$, $\Gamma_2$, $\Gamma_3$ be (not necessarily distinct)
irreducible algebraic curves of degree at most $d$ in the plane $\BR^2$.
Assume that $n>n_0$ and
\begin{enumerate}[(i)]
\item
no two $\Gamma_i$ are identical straight lines;
\item
$\CH_i \subset \Gamma_i$ with $\size{\CH_i} \le n$ ($i=1,2,3$);
\item
$\size{\egypt{\CH_1}\egypt{\CH_2}\egypt{\CH_3}} \ge cn^{2-\eta}$.
\end{enumerate}
Then $\Gamma_1\cup \Gamma_2 \cup \Gamma_3$ is a cubic.
\end{thm}
\begin{rem}
If we have an \emph{arbitrary}\/ (i.e., possibly reducible) algebraic curve
$\Gamma$ of degree $d$
and a point set $\CH$ with many triple lines on it, then by the Pigeonhole
Principle, some (at most three) irreducible components of $\Gamma$ will contain a
subset of $\CH$ which still determines at least $\size{\harompt{\CH}}/d^3$
distinct triple lines. Therefore, the union of these components must be a cubic,
according to the aforementioned Theorem.
\end{rem}
\emph{Proof \/}of {\fref{thm:ThreeCurveThm}.}
Let the curves $\Gamma_1$, $\Gamma_2$, $\Gamma_3$ be defined by the polynomial
equations $F_1(x,y)=0$, $F_2(x,y)=0$, $F_3(x,y)=0$, respectively. Three points
$P_i(x_i,y_i)\in\Gamma_i$ ($i=1,2,3$) are collinear iff
\[
F(x_1,y_1,x_2,y_2,x_3,y_3)\buildrel{\scriptstyle\rm def}\over=\;
\begin{vmatrix}
1 & x_1 & y_1 \\
1 & x_2 & y_2 \\
1 & x_3 & y_3 \\
\end{vmatrix}
=0.
\]
Eliminating the $y_i$ from the system of the four equations
\begin{equation}\label{eq:FourEqu}
\bigl\{
F(x_1,y_1,x_2,y_2,x_3,y_3)=0
\bigr\} \cup \bigl\{
F_i(x_i,y_i)=0\ \ (i=1,2,3)
\bigr\},
\end{equation}
we get a polynomial relation $f(x_1,x_2,x_3)=0$.
In other words, the projection to $\BR^3$ (i.e., to the subspace
spanned by the $x_i$ coordinates)
of the two dimensional algebraic variety defined by
\fref{eq:FourEqu} in $\BR^6$, will be contained in the zero-set
of a single polynomial equation $f(x_1,x_2,x_3)=0$.
\par
Let $\eta=\eta(c,d)$ be as in \fref{thm:SpecialSurfaceOrchardThm}.
Denoting the set of the $x$ coordinates of $\CH_i$ by $X_i$ ($i=1,2,3$), we have
that the surface $S_f = \{ f=0 \}$ contains at least $cn^{2-\eta}$ points of
$X_1\times X_2 \times X_3$.
\par
In other words, \fref{item:18} of the
Surface Theorem~\ref{thm:SpecialSurfaceOrchardThm} is
satisfied for $V=S_f$ and the $X_i$. Since $S_f$ cannot contain a cylinder by
assumption (i), there exists an irreducible component $V_0\subset S_f$
for which also \fref{item:19} --- localised as in \fref{item:21} of the same Theorem --- holds.
\par
Pick a generic point $P(a_1,a_2,a_3)\in V_0\subset S_f$.
By the definition of the
surface, there exist $b_1$, $b_2$, $b_3\in\BR$ such that, on the one hand,
$Q_i(a_i,b_i)\in\Gamma_i$ for $i=1,2,3$, while on the other hand, these $Q_i$
are collinear.
We can also assume without loss of generality, that these three
points are distinct, they are regular points of
$\Gamma_1\cup\Gamma_2\cup\Gamma_3$, and
the straight line $l$ which contains them is not tangent to $\Gamma_i$ at
$Q_i$ ($i=1,2,3$).
[Indeed, $V_0$ is two dimensional by
\fref{thm:SpecialSurfaceOrchardThm}\fref{item:19}
while the points to be excluded form a finite number of one dimensional
curves.]
\par
Moreover, by \fref{item:19} and \fref{item:21} there, collinearity between sufficiently small arcs
of the $\Gamma_i$ around the $Q_i$ is described by $\langle\BR,+\rangle$.
Now if we rotate and/or shift the plane so that $l$ becomes the $y$ axis then,
according to \fref{rem:DerivativeRem}, in a sufficiently small neighbourhood
of 0, the (rotated) $\Gamma_i$ coincide with the graphs of
a standard system of continuous functions.
Thus we can use \fref{lem:MainLemma} to conclude that a suitable cubic
$\CC$ contains a non-empty open arc of each $\Gamma_i$. Thus also the union
$\Gamma_1\cup \Gamma_2 \cup \Gamma_3$ of the three \emph{irreducible\/} curves
is contained in $\CC$.
\par
Finally, they cannot all be contained in a curve of degree $<3$ since in that
case they could not define many triple lines. Therefore,
$\Gamma_1\cup \Gamma_2 \cup \Gamma_3=\CC$.
\emph{Proof \/}end
\subsection*{Four-in-a-line}
Erd\H{o}s \cite{ErPu:IV} posed the problem whether a set of $n$ points
which contains $cn^2$ collinear four-tuples must also contain five collinear
points.
To our best knowledge, no progress has been made on this question so far.
In 1995, M.~Simonovits asked the following. \emph{ Is it possible to find $n$
points on an irreducible algebraic curve of degree 4 which determine $cn^2$
four-in-a-line?\/} (Of course, such a set can contain no five-in-a-line.)
We show here that the answer is in the negative, even in a more general
setting.
\begin{thm}\label{thm:FourInALineThm}
If an algebraic curve $\Gamma$ of degree $d$ accommodates a set $\CH$ of $n$
points with $cn^{2-\eta}$ distinct quadruple lines,
where $\eta=\eta(c,d)$ is the same as in \fref{thm:ThreeCurveThm},
then $\Gamma$ contains four
straight lines, each with $\ge c^{\prime}(c,d)\cdot n^{1-\eta}$ points of $\CH$,
provided that $n>n_0(c,d)$.
\end{thm}
\emph{Proof \/}
$\Gamma$ has at most $d$ irreducible components. Classify the $cn^{2-\eta}$
collinear four-tuples (located on distinct straight lines) according to
which point lies on which component. By the Pigeonhole Principle, some
four (not necessarily distinct) components
$\Gamma_1$, $\Gamma_2$, $\Gamma_3$ and
$\Gamma_4$ generate $cn^{2-\eta}/d^4={c}^{\prime}(c,d)n^{2-\eta}$ quadruple lines.
By \fref{thm:ThreeCurveThm}, any three of the $\Gamma_i$ must form a cubic.
However, this is only possible if they are distinct straight lines.
\emph{Proof \/}end
\subsection*{Few directions}
In \cite{EGyRL}, it was shown that if the graph of a polynomial $f\in\BR[x]$
contains $n$ points whose ${n \choose 2}$ connecting lines only determine
a linear number (at most $C n$) distinct directions then the polynomial $f$ is
quadratic.
(Some historic remarks and earlier results concerning sets which determine few
directions can also be found there.)
Here we extend this to general algebraic curves.
\begin{thm}\label{thm:FewDirThm}
For every $C>0$ and positive integer $d$ there is an $n_0=n_0(C,d)$ with the following property.\\
Let $\Gamma_1$ and $\Gamma_2$ be two (not necessarily distinct) irreducible
algebraic curves, $n>n_0$, and $\CH_i\subset\Gamma_i$ with $\size{\CH_i}=n$
($i=1,2$).
Assume that among the directions
of the straight lines $\overline{P_1P_2}$, for $P_i\in\CH_i$ and $P_1\ne
P_2$,
at most $Cn$ are distinct. Then $\Gamma_1\cup\Gamma_2$ is a
(possibly degenerate) conic.
\end{thm}
\emph{Proof \/}
Let $\Gamma_3$ be the line at infinity and $\CH_3$ the set of the
$\le Cn$ directions on it.
(If someone prefers no points at infinity, they can apply a projective mapping
before proceeding further.)
By assumption,
$\size{\egypt{\CH_1}\egypt{\CH_2}\egypt{\CH_3}} \ge {n \choose 2}
> n^{2-\eta} $ if $n$ is large.
Hence, by
\fref{thm:ThreeCurveThm}, $\Gamma_1\cup\Gamma_2\cup\Gamma_3$ is a cubic.
Therefore, $\Gamma_1\cup\Gamma_2$ is a conic.
\emph{Proof \/}end
\section{Straight lines and conics}\label{sec:ConicSection}
\begin{thm}\label{thm:DegenerateConicThm}
Let $n\le|\CH_1|,|\CH_2|,|\CH_3|\le Cn$ and assume that
$\CH_1$ and $\CH_2$ lie on the distinct straight lines
$l_1$ and $l_2$, respectively, while $\CH_3\cap l_1=\CH_3\cap l_2=\emptyset$.
\\
If, moreover, $|\harompt{\CH_1\CH_2\CH_3}|\ge cn^2$,
then some $c^*n$ of the points of $\;\CH_3$, too, must be collinear.
(Here $c^*=c^*(c,C)$ does not depend on $n$.)
\end{thm}
\emph{Proof \/}
Apply a projective transform $\pi$ which maps $l_1$ to the line at infinity.
Then some $cn^2$ pairs of points of $\pi(\CH_2)\times\pi(\CH_3)$
determine at most $\size{\pi(\CH_1)}=\size{\CH_1}\le Cn$ distinct directions,
while $\pi(\CH_2)$ is still collinear.
By a result in \cite{EGy:LinIII} (see Theorem 3 there), also $\pi(\CH_3)$
--- hence $\CH_3$, too --- must contain $c^*n$ collinear points.
~\emph{Proof \/}end
The following \fref{thm:NonDegenerateConicThm} is the ``elder brother'' of
\fref{thm:DegenerateConicThm}
in the sense that now we start from a non-degenerate
conic while the two lines $l_1$, $l_2$ above can be considered as a
degenerate one.
\begin{thm}\label{thm:NonDegenerateConicThm}
Let $C>1$ be arbitrary and $\CH_1$, $\CH_2\subset\BR^2$.
Assume that
\begin{enumerate}[(a)]
\item
$n\le|\CH_1|,|\CH_2|\le Cn$;
\item
$\CH_2$ lies on a non-degenerate conic which contains no point of $\CH_1$;
\item $|\egypt{\CH_1}\ketpt{\CH_2}|\ge n^2$.
\end{enumerate}
Then some $c^*n$ of the points of $\CH_1$ must be collinear
(where $c^*=c^*(C)$ does not depend on $n$.)
\end{thm}
\emph{Proof \/}
First, without loss of generality,
we may assume that every point of $\CH_1$ is incident upon
at least $n$ triple lines. (Otherwise keep on deleting those with less than
$n/(2C)$ such lines and finally, use the new values of
$n^\prime=n/(2C)$, $C^\prime=2C^2$.)
Moreover, we may assume that the conic which contains $\CH_2$,
is the parabola $y=x^2$. (Else we apply a projective
mapping which maps it to that curve. This can also be done
such a way that no point of $\CH_1$ is mapped to the
line at infinity and the $x$--coordinates of the points in
$\CH_1\cup\CH_2$ become all distinct.)
Denote the coordinates of the points of $\CH_1$ by $(a_i,b_i)$
and the set of the $x$--coordinates of the points of $\CH_2$
by $X$, i.e.,
$$
\begin{aligned}
\CH_1&=\{ (a_i,b_i)\ |\ i=1,2,\ldots,\size{\CH_1}\};\cr
\CH_2&=\{ (x,x^2)\ |\ x\in X\},
\end{aligned}
$$
where, of course, $|X|=\size{\CH_2}$.
\begin{prop}
Two distinct points $(x,x^2)$, $(y,y^2)$ of $\CH_2$ and a point
$(a_i,b_i)\in\CH_1$ are collinear iff
$$
xy-a_ix-a_iy+b_i=0.\emph{Proof \/}end
$$
\end{prop}
The above equations can be considered as
functions of type $X\mapsto X$:
$$
y=f_i(x)\buildrel{\scriptstyle\rm def}\over=\;\;\frac{a_ix-b_i}{x-a_i}.
$$
These projective mappings $f_i$ are ``vertical projections'' (to $X$)
of the involutions of the parabola, with centres $(a_i,b_i)$.
\par
We started with the assumption that every point of $\CH_1$ is incident
upon at least $n$ triple lines. Therefore, each $f_i$ maps at least $n$
elements of $X$ to elements of $X$. According to \cite{EGyKZ} Theorem 29
(the ``Image Set Theorem''), some $c^*n$ of the $f_i$ must be collinear ---
if we represent them as elements of the three dimensional projective space.
In other words, in that space at least $c^*n$ points of projective coordinates
$(a_i,-b_i,1,-a_i)$ are combinations of as few as two of them, say
$(a_1,-b_1,1,-a_1)$ and $(a_2,-b_2,1,-a_2)$. Considering the (constant)
third coordinates, this is only possible if --- even as four dimensional
vectors ---
$(a_i,-b_i,1,-a_i)=\lambda_i(a_1,-b_1,1,-a_1)+(1-\lambda_i)(a_2,-b_2,1,-a_2)$,
for suitable reals $\lambda_i$. We conclude that also the corresponding
$c^*n$ original points
$P_i(a_i,b_i)\in\CH_1\subset\BR^2$ must be collinear.\emph{Proof \/}end
\section{Concluding remarks }
\par
Beyond \fref{conj:TenConj} the following remain open.
\begin{prob}
Let $\delta>0$ be arbitrary. Does the conclusion ``\/$\Gamma_1 \cup \Gamma_2
\cup \Gamma_3 $ is a cubic'' of
\fref{thm:ThreeCurveThm} hold if, in place of (iii), we only assume
$$
\text{(iii*) }\qquad
\size{\egypt{\CH_1}\egypt{\CH_2}\egypt{\CH_3}} \ge n^{1+\delta}
$$
--- provided that $n>n_0=n_0(\delta,d)$?
\end{prob}
\begin{prob}
Does \fref{thm:FourInALineThm} hold with $n^{1-\eta/2}$ in the statement
(in place of $n^{1-\eta}$)?
\end{prob}
\begin{prob}
Let $\delta>0$ be arbitrary. Does the conclusion ``\/$\Gamma_1 \cup \Gamma_2$
is a conic''of
\fref{thm:FewDirThm} hold if we only assume that the lines
$\overline{P_1P_2}$ only determine $\le n^{2-\delta}$ distinct directions
--- in place of $Cn$ --- provided that $n>n_0=n_0(\delta,d)$?
\end{prob}
\par
{\bf\qquad Acknowledgements}
We are grateful to Endre Makai for his very constructive comments
on (and simplifications to) some earlier versions of the manuscript
and also to Zolt\'an J\'arai for sharing with us his typesetting
\TeX{pertise}.
\end{document}
|
\begin{document}
\title[Heegaard surfaces and measured laminations II]{Heegaard surfaces and measured laminations, II: non-Haken 3--manifolds}
\author{Tao Li}
\thanks{2000 \emph{Mathematics Subject Classification}. Primary 57N10, 57M50; Secondary 57M25}
\thanks{\emph{Key words and phrases}. Heegaard splitting, measured lamination, non-Haken 3--manifold}
\thanks{Partially supported by an NSF grant}
\address{Department of Mathematics \\
Boston College \\
Chestnut Hill, MA 02467 \\
USA}
\email{[email protected]}
\urladdr{http://www2.bc.edu/\~{}taoli}
\begin{abstract}
A famous example of Casson and Gordon shows that a Haken 3--manifold can have an infinite family of irreducible Heegaard splittings with different genera.
In this paper, we prove that a closed non-Haken 3--manifold has only finitely many irreducible Heegaard splittings, up to isotopy. This is much stronger than the generalized Waldhausen conjecture. Another immediate corollary is that for any irreducible non-Haken 3--manifold $M$, there is a number $N$, such that any two Heegaard splittings of $M$ are equivalent after at most $N$ stabilizations.
\end{abstract}
\maketitle
\setcounter{tocdepth}{1}
\tableofcontents
\section{Introduction}
A Heegaard splitting of a closed orientable 3--manifold is said to be reducible if there is an essential simple closed curve in the Heegaard surface bounding disks in both handlebodies. Haken proved that a Heegaard splitting of a reducible 3--manifold is always reducible \cite{H}.
The classification of irreducible Heegaard splittings has been a long-standing fundamental problem in 3--manifold topology. Such classification has been achieved for certain non-hyperbolic manifolds, such as $S^3$ by Waldhausen \cite{W1}, Lens spaces by Bonahon and Otal \cite{BO}, and Seifert fiber spaces by \cite{BCZ, M, MSch}. The main theorem of this paper is a finiteness result for non-Haken 3--manifolds.
\begin{theorem}\label{main}
A closed orientable non-Haken 3--manifold has only finitely many irreducible Heegaard splittings, up to isotopy.
\end{theorem}
An important question in the study of Heegaard splittings is whether there are ways to construct different Heegaard splittings. By adding trivial handles, one can always construct an infinite family of Heegaard splittings for every 3--manifold. Theorem~\ref{main} says that, for irreducible non-Haken manifolds, adding trivial handles is virtually the only way of obtaining new Heegaard splittings.
The study of Heegaard splitting has been dramatically changed since Casson and Gordon introduced the notion of strongly irreducible Heegaard splitting \cite{CG}. They showed that \cite{CG} an irreducible Heegaard splitting of a non-Haken 3--manifold is also strongly irreducible. Using the thin-position argument, Rubinstein established relations between strongly irreducible Heegaard splittings and normal surface theory. The results in \cite{CG} have also been used to attack the virtually Haken conjecture \cite{La, MMZ}.
Casson and Gordon found the first 3--manifolds containing infinitely many different irreducible Heegaard splittings, see \cite{CG2,Sed,Ko}, and Theorem~\ref{main} says that this can only happen in Haken 3--manifolds. In section~\ref{Sexample}, we will show the relation between an incompressible surface and the infinite family of strongly irreducible Heegaard splittings in the Casson-Gordon example. This interpretation of the Casson-Gordon example was independently discovered by \cite{MSS}, where the authors proved a special case of the theorem.
A conjecture of Waldhausen \cite{W2} says that a closed orientable 3--manifold has only finitely many minimal/reducible Heegaard splittings, up to homeomorphism (or even isotopy). This is known to be false because of the Casson-Gordon example. A modified version of this conjecture is the so-called generalized Waldhausen conjecture, which says that an irreducible and atoroidal 3--manifold has only finitely many Heegaard splittings in each genus, up to isotopy. Johannson \cite{Jo1, Jo2} proved the generalized Waldhausen conjecture for Haken 3--manifolds. Together with Johannson's theorem, Theorem~\ref{main} implies the generalized Waldhausen conjecture. Moreover, Theorem~\ref{main} says that the original version of Waldhausen conjecture is true for non-Haken 3--manifolds.
Another important question in the study of Heegaard splittings is how different Heegaard splittings are related. This is the so-called stabilization problem, asking the number of stabilizations required to make two Heegaard splittings equivalent. It has been shown that the number of stabilizations is bounded by a linear function of the genera of the two splittings \cite{RS}, but it remains unknown whether there is a universal bound. We hope the techniques used in this paper can shed some light on this question. Corollary~\ref{stable} follows from Theorem~\ref{main} and \cite{RS}.
\begin{corollary}\label{stable}
For any closed, orientable, irreducible and non-Haken 3--manifold $M$, there is a number $N$ such that any two Heegaard splittings of $M$ are equivalent after at most $N$ stabilizations.
\end{corollary}
We briefly describe the main ideas of the proof. The basic idea is similar in spirit to the proof of \cite{L5}. By \cite{H, CG, BCZ, BO, M, MSch}, we may assume $M$ is irreducible, atoroidal and not a small Seifert fiber space, and the Heegaard splittings are strongly irreducible. By a theorem in \cite{L4}, there is a finite collection of branched surfaces in $M$ such that every strongly irreducible Heegaard surface is fully carried by a branched surface in this collection. Moreover, the branched surfaces in this collection have some remarkable properties, such as they do not carry any normal 2--sphere or normal torus. Each surface carried by a branched surface corresponds to an integer solution to the system of branch equations \cite{FO}. One can also define the projective lamination space for a branched surfaces, see \cite{L4}. If a branched surface in this collection carries an infinite number of strongly irreducible Heegaard surfaces, then we have an infinite sequence of points in the projective lamination space. By compactness, there must be an accumulation point which corresponds to a measured lamination $\mu$. The main task is to prove that $\mu$ is incompressible and hence yields a closed incompressible surface, contradicting the hypothesis that $M$ is non-Haken. The proof utilizes properties of both strongly irreducible Heegaard splittings and measured laminations.
We organize this paper as follows. In section~\ref{Sbranch}, we briefly review some results from \cite{L4} and show some relations between branched surfaces and strongly irreducible Heegaard splittings. In sections~\ref{Slam} and \ref{Slimit}, we prove some technical lemmas concerning measured laminations. In section~\ref{Shelix}, we explain a key construction. We finish the proof of Theorem~\ref{main} in section~\ref{Smain}. In section~\ref{Sexample}, we show how to interpret the limit of the infinite family of strongly irreducible Heegaard surfaces in the Casson-Gordon example.
\begin{acknowledgments}
I would like to thank Bus Jaco, Saul Schleimer and Ian Agol for useful conversations and Cynthia Chen for technical assistance. I also thank the referee for many corrections and suggestions.
\end{acknowledgments}
\section{Heegaard surfaces and branched surfaces}\label{Sbranch}
\begin{notation}
Throughout this paper, we will denote the interior of $X$ by $int(X)$, the closure (under path metric) of $X$ by $\overline{X}$, and the number of components of $X$ by $|X|$. We will also use $|n|$ to denote the absolute value of $n$ if $n$ is a number. We will use $\eta(X)$ to denote the closure of a regular neighborhood of $X$. We will also use the same notations on branched surfaces and laminations as in sections 2 and 3 of \cite{L4}.
\end{notation}
Let $M$ be a closed orientable and non-Haken 3--manifold. A theorem of Haken \cite{H} says that a reducible 3--manifold cannot have any irreducible Heegaard splitting. By \cite{BCZ, BO, M, MSch}, Theorem~\ref{main} is true for small Seifert fiber spaces. So we may assume $M$ is irreducible and not a small Seifert fiber space. Casson and Gordon \cite{CG} showed that irreducible Heegaard splittings are equivalent to strongly irreducible Heegaard splittings for non-Haken 3--manifolds. Hence we assume the Heegaard splittings in this paper are strongly irreducible. We call the Heegaard surface of a strongly irreducible splitting a strongly irreducible Heegaard surface.
By \cite{R, St}, each strongly irreducible Heegaard surface is isotopic to an almost normal surface with respect to a triangulation. Similar to \cite{FO}, we can use normal disks and almost normal pieces to construct a finite collection of branched surfaces such that each strongly irreducible Heegaard surface is fully carried by a branched surface in this collection. By a theorem of \cite{L4} (Theorem~\ref{Heeg1} below), we can split these branched surfaces into a larger collection of branched surfaces so that each strongly irreducible Heegaard surface is still fully carried by a branched surface in this collection and no branched surface in this collection carries any normal 2--sphere or normal torus.
\begin{theorem}[Theorem 1.3 in \cite{L4}]\label{Heeg1}
Let $M$ be a closed orientable irreducible and atoroidal 3--manifold, and suppose $M$ is not a Seifert fiber space. Then $M$ has a finite collection of branched surfaces, such that
\begin{enumerate}
\item each branched surface in this collection is obtained by gluing together normal disks and at most one almost normal piece with respect to a fixed triangulation, similar to \cite{FO},
\item up to isotopy, each strongly irreducible Heegaard surface is fully carried by a branched surface in this collection,
\item no branched surface in this collection carries any normal 2--sphere or normal torus.
\end{enumerate}
\end{theorem}
Our goal is to prove that each branched surface in Theorem~\ref{Heeg1} only carries a finite number of strongly irreducible Heegaard surfaces. We will use various properties of strongly irreducible Heegaard splittings, branched surfaces and measured laminations, and we refer to sections 2 and 3 of \cite{L4} for an overview of some results and techniques in these areas. In this section, we prove some easy lemmas which establish some connections between branched surfaces and Heegaard surfaces.
Let $B$ be a branched surface, $N(B)$ be a fibered neighborhood of $B$, and $\pi:N(B)\to B$ be the map collapsing each $I$--fiber of $N(B)$ to a point.
We say an annulus $A=S^1\times I\subset N(B)$ is a \emph{vertical annulus} if every $\{x\}\times I\subset A$ ($x\in S^1$) is a subarc of an $I$--fiber of $N(B)$. We say a surface $\Gamma$ is carried by $N(B)$ if $\Gamma\subset N(B)$ is transverse to the $I$--fibers of $N(B)$.
\begin{proposition}\label{Ptorus}
Let $B$ be a branched surface and $A\subset N(B)$ an embedded vertical annulus. Suppose there is an embedded annulus $\Gamma$ carried by $N(B)$ such that $\partial\Gamma\subset A$ and $int(\Gamma)\cap A$ is an essential closed curve in $\Gamma$. Then $B$ carries a torus.
\end{proposition}
\begin{proof}
First note that if $B$ carries a Klein bottle $K$, then the boundary of a twisted $I$--bundle over $K$ is a torus carried by $B$. The idea of the proof is that one can perform some cutting and pasting on $A$ and $\Gamma$ to get a torus (or Klein bottle) carried by $B$. The circle $int(\Gamma)\cap A$ cuts $\Gamma$ into 2 sub-annuli, say $\Gamma_1$ and $\Gamma_2$, with $int(\Gamma_i)\cap A=\emptyset$ ($i=1,2$). Let $A_i$ be the sub-annulus of $A$ bounded by $\partial\Gamma_i$. So $A_i\cup\Gamma_i$ is an embedded torus (or Klein bottle). We have two cases here. The first case is that $\Gamma_i$ connects $A$ from different sides, more precisely, after a small perturbation, the torus (or Klein bottle) $A_i\cup\Gamma_i$ is transverse to the $I$--fibers of $N(B)$, as shown in Figure~\ref{tori} (a). The second case is that both $\Gamma_1$ and $\Gamma_2$ connect $A$ from the same side. Then as shown in Figure~\ref{tori}(b, c), we can always use the annuli $\Gamma_i$ and $A_i$ to assemble a torus (or Klein bottle) carried by $B$.
\end{proof}
\begin{figure}\label{tori}
\end{figure}
The following lemma is a variation of Lemma 2.2 in \cite{S} and the proof is similar.
\begin{lemma}\label{Lnest}
Let $M=H_1\cup_SH_2$ be a strongly irreducible Heegaard splitting, $S$ the Heegaard surface, and $D$ an embedded disk in $M$ with $\partial D\subset S$. Suppose $D$ is transverse to $S$ and $int(D)\cap S$ is a single circle $\gamma$. Let $D_1\subset D$ be the disk bounded by $\gamma$, and suppose $D_1\subset H_1$ is a compressing disk of the handlebody $H_1$. Then the annulus $A=D-int(D_1)$ must be $\partial$--parallel in the handlebody $H_2$.
\end{lemma}
\begin{proof}
Since $S$ is strongly irreducible, $\gamma$ does not bound a disk in $H_2$. So $A$ is incompressible in $H_2$, and hence $A$ is $\partial$--compressible. Let $E\subset H_2$ be a $\partial$--compressing disk for the annulus $A=D-int(D_1)$. We may suppose $\partial E$ consists of two arcs, $\alpha$ and $\beta$, where $\alpha\subset A$ is an essential arc in $A$, $\beta\subset S$ and $\partial\alpha=\partial\beta\subset\partial A$.
Now we compress $A$ along $E$, in other words, we perform a simple surgery, replacing a small neighborhood of $\alpha$ in $A$ by two parallel copies of $E$. The resulting surface is a disk properly embedded in $H_2$. We denote this disk by $D_2$. After a small perturbation, we may assume $\partial D_2$ is disjoint from $\partial D_1$. Since $M=H_1\cup_SH_2$ is a strongly irreducible Heegaard splitting and $D_1$ is a compressing disk in $H_1$, $D_2$ must be a $\partial$--parallel disk in $H_2$. This implies that $A$ is $\partial$--parallel in $H_2$.
\end{proof}
The following lemma follows easily from Proposition~\ref{Ptorus} and Lemma~\ref{Lnest}.
\begin{lemma}\label{Lbound}
Let $S$ be a strongly irreducible Heegaard surface fully carried by a branched surface $B$, and suppose $B$ does not carry any torus. Let $A$ be an embedded vertical annulus in $N(B)$, and suppose $A\cap S=\cup_{i=1}^nc_i$ consists of $n$ non-trivial circles in $S$. If some $c_i$ bounds a compressing disk in one of the two handlebodies, then there is a number $K$ depending only on $B$ such that $n=|A\cap S|<K$.
\end{lemma}
\begin{proof}
Suppose $M=H_1\cup_SH_2$ is the Heegaard splitting. Let $A_i$ be the sub-annulus of $A$ bounded by $c_i\cup c_{i+1}$, and we may assume $A_i$ is properly embedded in $H_1$ if $i$ is odd and in $H_2$ if $i$ is even. Without loss of generality, we may suppose $c_1$ bounds a compressing disk in a handlebody. Note that the argument works fine if one starts with an arbitrary $c_i$ rather than $c_1$.
If $c_1$ bounds a compressing disk in $H_2$, since $c_1\cup c_2$ bounds an annulus $A_1$ in $H_1$, by Lemma~\ref{Lnest}, $A_1$ is $\partial$--parallel in $H_1$. By pushing $A_1$ into $H_2$, we have that $c_2$ bounds a disk in $H_2$. Since $A_2$ lies in $H_2$, the union of $A_2$ and the disk bounded by $c_2$ in $H_2$ is a disk bounded by $c_3$. Since each $c_i$ is non-trivial in $S$, $c_3$ bounds a compressing disk in $H_2$. Again, since $A_3$ lies in $H_1$, by Lemma~\ref{Lnest}, $A_3$ is $\partial$--parallel in $H_1$. Inductively, we conclude that $A_{2k+1}$ is $\partial$--parallel in $H_1$ for each $k$. So for each $k$, there is an annulus $\Gamma_k\subset S$ such that $\partial\Gamma_k=\partial A_{2k+1}$ and $A_{2k+1}\cup\Gamma_k$ bounds a solid torus $T_k$ in $H_1$. It is clear that any two such solid tori $T_i$ and $T_j$ are either disjoint or nested.
Suppose $T_i$ and $T_j$ are nested, say $T_i\subset T_j$. Hence $\Gamma_i\subset\Gamma_j$ and $\partial A_{2i+1}\subset\Gamma_j$. Note that $\Gamma_j\subset S$ is an annulus carried by $N(B)$ and $\partial A_{2i+1}\subset\Gamma_j\cap A$, so a sub-annulus of $\Gamma_j$ satisfies the hypotheses of Proposition~\ref{Ptorus}. Hence $B$ must carry a torus, contradicting our hypotheses. Thus, the solid tori $T_i$'s are pairwise disjoint. Note that $\partial T_i\subset N(B)$ but the solid torus $T_i$ is not contained in $N(B)$, since $A_k\subset A\subset N(B)$ is a vertical annulus. So each solid torus $T_i$ must contain a component of $\partial_hN(B)$, and hence the number of such solid tori is bounded by the number of components of $\partial_hN(B)$. Therefore, there is a number $K$ depending only on $B$ such that $n=|A\cap S|<K$.
If $c_1$ bounds a compressing disk in $H_1$, since $c_1\cup c_2$ bounds the annulus $A_1$ in $H_1$, $c_2$ bounds a compressing disk in $H_1$. As $A_2$ is an annulus in $H_2$, by Lemma~\ref{Lnest}, we have that $A_2$ is $\partial$--parallel in $H_2$. Using the same argument, we can inductively conclude that $A_{2k}$ is $\partial$--parallel in $H_2$ for each $k$, and obtain such a bound $K$ on $n=|A\cap S|$.
\end{proof}
The following Proposition for branched surfaces is well-known, see also \cite{FO,AL}.
\begin{proposition}\label{Pmon}
Let $B$ be a branched surface in $M$. Suppose $M-B$ is irreducible and $\partial_hN(B)$ is incompressible in $M-int(N(B))$. Let $C$ be a component of $M-int(N(B))$ and suppose $C$ contains a monogon. Then $C$ must be a solid torus
in the form of $D\times S^1$, where $D$ is a monogon.
\end{proposition}
\begin{proof}
Let $D$ be a monogon in $C$, {\it i.e.}, the disk $D$ is properly embedded in $C$, $\partial D$ consists of two arcs, $\alpha\subset\partial_vN(B)$ and $\beta\subset\partial_hN(B)$, and $\alpha$ is a vertical arc in $\partial_vN(B)$. Let $v$ be the component of $\partial_vN(B)$ containing $\alpha$. Then as shown in Figure~\ref{monogon} (a), the union of two parallel copies of $D$ and a rectangle in $v$ is a disk $E$ properly embedded in $C$, with $\partial E\subset \partial_hN(B)$. Since $\partial_hN(B)$ is incompressible in $M-int(N(B))$, $\partial E$ must bound a disk in $\partial_hN(B)\cap\partial C$. Since $C$ is irreducible, $C$ must be a solid torus in the form of $D\times S^1$, where $D$ is the monogon above.
\end{proof}
Before we proceed, we quote two results of Scharlemann that we will use later.
\begin{lemma}[Lemma 2.2 of \cite{S}]\label{Ls22}
Suppose $H_1\cup_SH_2$ is a strongly irreducible Heegaard splitting of a 3--manifold $M$ and $F$ is a disk in $M$ transverse to $S$ with $\partial F\subset S$. Then $\partial F$ bounds a disk in some $H_i$.
\end{lemma}
\begin{theorem}[Theorem 2.1 of \cite{S}]\label{Ts21}
Suppose $H_1\cup_SH_2$ is a strongly irreducible Heegaard splitting of a 3--manifold $M$ and $B$ is 3--ball in $M$. Let $T_i$ be the planar surface $\partial B\cap H_i$ properly embedded in $H_i$, and suppose $T_i$ is incompressible in $H_i$. Then $S\cap B$ is connected and $\partial$--parallel in $B$.
\end{theorem}
Corollary~\ref{Csch} follows trivially from Scharlemann's theorem.
\begin{corollary}\label{Csch}
Suppose $H_1\cup_SH_2$ is a strongly irreducible Heegaard splitting of a 3--manifold. Let $P$ be a planar surface properly embedded in $H_1$. Suppose $P$ is incompressible in $H_1$, and each boundary component of $P$ bounds a disk in $H_2$. Then $P$ is $\partial$--parallel in $H_1$.
\end{corollary}
\section{Measured laminations}\label{Slam}
The purpose of this section is to prove Lemma~\ref{Lfine}, which is an easy consequence of some properties of laminations and results from \cite{L4}.
The following theorem is one of the fundamental results in the theory of measured laminations and foliations. It also plays an important role in \cite{L4}. An exceptional minimal lamination is a lamination in which every leaf is dense, and the intersection of any transversal with such a lamination is a Cantor set, see section 3 of \cite{L4}.
\begin{theorem}[Theorem 3.2 in Chapter I of \cite{MS}, pp 410]\label{TMS}
Let $\mu$ be a co-dimension one measured lamination in a closed connected 3--manifold $M$, and suppose $\mu\ne M$. Then $\mu$ is the disjoint union of a finite number of sub-laminations. Each of these sub-laminations is of one of the following types:
\begin{enumerate}
\item A family of parallel compact leaves,
\item A twisted family of compact leaves,
\item An exceptional minimal measured lamination.
\end{enumerate}
\end{theorem}
\begin{definition}[Definition 4.2 of \cite{L4}]\label{Dvan}
Let $\mu$ be a lamination in $M$ and $l_0$ a leaf of $\mu$. We call a simple closed curve $f_0:S^1\to l_0$ an \emph{embedded vanishing cycle} in $\mu$ if $f_0$ extends to an embedding $F:[0, 1]\times S^1\to M$ satisfying the following properties.
\begin{enumerate}
\item $F^{-1}(\mu)=C\times S^1$, where $C$ is a closed set of $[0, 1]$, and for any $t\in C$, the curve $f_t(S^1)$, defined by $f_t(x)=F(t,x)$, is contained in a leaf $l_t$,
\item for any $x\in S^1$, the curve $t\to F(t,x)$ is transverse to $\mu$,
\item $f_0$ is an essential curve in $l_0$, but there is a sequence of points $\{t_n\}$ in $C$ such that $\lim_{n\to\infty}t_n=0$ and $f_{t_n}(S^1)$ bounds a disk in $l_{t_n}$ for all $t_n$.
\end{enumerate}
\end{definition}
The following lemma from \cite{L4} will be useful in our proof of Lemma~\ref{Lfine}.
\begin{lemma}[Lemma 4.3 of \cite{L4}]\label{Lvan}
Let $M$ be a closed orientable and irreducible 3--manifold, and $\mu\subset M$ an exceptional minimal measured lamination. Suppose $\mu$ is fully carried by a branched surface $B$ and $B$ does not carry any $2$--sphere. Then $\mu$ has no embedded vanishing cycle.
\end{lemma}
The proof of the follow lemma is similar in spirit to part of the proof of Lemma 4.5 in \cite{L4}.
\begin{lemma}\label{Lleaf}
Let $B$ be a branched surface in a closed, orientable and irreducible 3--manifold $M$, and $M\ne T^3$. Suppose $B$ does not carry any 2--sphere or torus, and suppose $B$ fully carries a measured lamination $\mu$. Then $\mu$ does not contain any plane leaf, infinite annular leaf or infinite M\"{o}bius band leaf.
\end{lemma}
\begin{proof}
By Theorem~\ref{TMS}, we may assume $\mu$ is an exceptional minimal measured lamination, in particular, every leaf is dense in $\mu$.
Suppose every leaf of $\mu$ is a plane. After trivially eliminating all the disks of contacts in $N(B)$ that are disjoint from $\mu$, we have that $\partial_hN(B)$ consists of disks. So there is no monogon and $\mu$ is an essential lamination. By a Theorem in \cite{G7} (also see Proposition 4.2 of \cite{L1}), $M\cong T^3$.
So at least one leaf of $\mu$ is not a plane. Let $\gamma$ be an essential simple closed curve in a non-plane leaf. Since $\mu$ is a measured lamination, there is no holonomy. So there is an embedded vertical annulus $S^1\times I\subset N(B)$ such that $\gamma\subset S^1\times I$ and $\mu\cap(S^1\times I)$ is a union of parallel circles. Suppose $L$ is a plane leaf of $\mu$. Since every leaf is dense, $L\cap (S^1\times I)$ contains infinitely many circles whose limit is $\gamma$. As $L$ is a plane, these circles bound disks in $L$. By Definition~\ref{Dvan}, $\gamma$ is an embedded vanishing cycle, and this contradicts Lemma~\ref{Lvan}. So $\mu$ does not contain any plane leaf.
Suppose $\mu\subset N(B)$ and $A$ is an infinite annular leaf (or an infinite M\"{o}bius band leaf) of $\mu$. Let $\gamma$ be an essential simple closed curve in $A$. There is an embedded vertical annulus $S^1\times I\subset N(B)$ such that $\gamma\subset S^1\times I$, and $\mu\cap (S^1\times I)$ is a union of parallel circles. Since every leaf is dense in $\mu$, $A\cap (S^1\times I)$ contains infinitely many circles whose limit is $\gamma$. By Lemma~\ref{Lvan}, we may assume that only finitely many circles of $A\cap (S^1\times I)$ are trivial in $A$. So there exist 3 essential simple closed curves in $A\cap (S^1\times I)$, $\gamma_i$ ($i=1,2,3$), such that $\gamma_1\cup\gamma_3$ bounds a compact sub-annulus $A_\gamma$ in $A$ with $int(A_\gamma)\cap (S^1\times I)=\gamma_2$. By Proposition~\ref{Ptorus}, $B$ carries a torus, contradicting our hypotheses.
\end{proof}
\begin{lemma}\label{Linject}
Let $B$ be a branched surface in $M$. Suppose $N(B)$ does not contain any disk of contact and $\partial_hN(B)$ has no disk component. Let $\lambda\subset N(B)$ be a lamination fully carried by $N(B)$. Then every leaf of $\lambda$ is $\pi_1$--injective in the 3--manifold $N(B)$.
\end{lemma}
\begin{proof}
We may use the arguments in \cite{GO} to prove this lemma directly, but it is more convenient to simply use a theorem of \cite{GO}. Since $\partial_hN(B)$ has no disk component, no component of $\partial N(B)$ is a 2--sphere. For each component $S$ of $\partial N(B)$, we may glue to $N(B)$ (along $S$) a compact orientable and irreducible 3--manifold $M_S$, whose boundary $\partial M_S\cong S$ is incompressible in $M_S$. So we can obtain a closed 3--manifold $M'$ this way with $N(B)\subset M'$. Since $S$ is $\pi_1$--injective in $M_S$, the inclusion $i:N(B)\hookrightarrow M'$ induces an injection on $\pi_1$.
If $\partial_hN(B)$ is compressible in $M'-int(N(B))$, then we have a compressing disk $D$ with $\partial D\subset\partial_hN(B)\cap S$, where $S$ is a boundary component of $N(B)$. As $S$ is incompressible in $M_S$, $\partial D$ must bound a disk $E$ in $S$, which implies that $E$ contains a disk component of $\partial_hN(B)$, contradicting our hypotheses. So $\partial_hN(B)$ must be incompressible in $M'-int(N(B))$. There is clearly no monogon by the construction and no disk of contact by our hypotheses. Moreover, since $\partial_hN(B)$ has no disk component and there is no monogon, it is easy to see that there is no Reeb component for $N(B)$. Therefore, by \cite{GO}, $\lambda$ is an essential lamination in the closed manifold $M'$, and every leaf of $\lambda$ is $\pi_1$--injective in $M'$ hence $\pi_1$--injective in $N(B)$.
\end{proof}
The following lemma from \cite{L4} is also useful in the proof of Lemma~\ref{Lfine}.
\begin{lemma}[Lemma 4.1 of \cite{L4}]\label{Lnodoc}
Let $B$ be a branched surface fully carrying a lamination $\mu$. Suppose $\partial_hN(B)$ has no disk component and $N(B)$ does not contain any disk of contact that is disjoint from $\mu$. Then $N(B)$ does not contain any disk of contact.
\end{lemma}
Now, Lemma~\ref{Lfine} follows easily from the previous lemmas.
\begin{lemma}\label{Lfine}
Let $B$ be a branched surface in a closed, orientable and irreducible 3--manifold $M$. Suppose $B$ does not carry any 2--sphere or torus, and $B$ fully carries a measured lamination $\mu$. Then $B$ can be split into a branched surface $B_1$ such that $B_1$ still fully carries $\mu$, no component of $\partial_hN(B_1)$ is a disk, and every leaf of $\mu$ is $\pi_1$--injective in $N(B_1)$.
\end{lemma}
\begin{proof}
By Theorem~\ref{TMS}, we may assume that $\mu$ is an exceptional minimal measured lamination. Since $B$ does not carry any 2--sphere or torus, by Lemma~\ref{Lleaf}, no leaf of $\mu$ is a plane. After some isotopy, we may assume $\partial_hN(B)\subset\mu$. Hence we can split $N(B)$ so that each component of $\partial_hN(B)$ contains an essential curve of the corresponding leaf. So no component of $\partial_hN(B)$ is a disk after the splitting.
By splitting $N(B)$, we may trivially eliminate all the disks of contact that are disjoint from $\mu$. So, by Lemma~\ref{Lnodoc}, $N(B)$ does not contain any disk of contact. Now the lemma follows from Lemma~\ref{Linject}.
\end{proof}
The following Proposition is well-known. It also plays a fundamental role in \cite{L5}.
\begin{proposition}\label{PHaken}
Let $M$ be a closed irreducible and orientable 3--manifold and $B$ a branched surface in $M$ carrying a measured lamination $\mu$. If $\mu$ is an essential lamination, then $B$ carries an incompressible surface and hence $M$ is Haken.
\end{proposition}
\begin{proof}
By \cite{GO}, if $\mu$ is an essential lamination, then one can split $B$ into an incompressible branched surface $B'$ that fully carries $\mu$. Since $\mu$ is a measured lamination, the system of branch equations for $B'$ must have a positive solution. Since the coefficients of each branch equation are integers, the system of branch equations must have a positive integer solution. Thus $B'$ fully carries a closed orientable surface. By \cite{FO}, every closed surface fully carried by an incompressible branched surface is incompressible.
\end{proof}
\section{Limits of compact surfaces}\label{Slimit}
Let $B$ be a branched surface in a closed 3--manifold $M$, and $F\subset N(B)$ a closed surface carried by $B$. Then $F$ corresponds to a non-negative integer solution to the branch equations of $B$, see section 3 of \cite{L4} for a brief explanation and see \cite{FO,O} for more details. We use $\mathcal{S}(B)\subset\mathbb{R}^N$ to denote the set of non-negative solutions to the branch equations of $B$, where $N$ is the number of branch sectors of $B$. There is a one-to-one correspondence between a closed surface carried by $B$ and an integer point in $\mathcal{S}(B)$. A surface is fully carried by $B$ if and only if every coordinate of the corresponding point in $\mathcal{S}(B)$ is positive.
Every point in $\mathcal{S}(B)$, integer point or non-integer point, corresponds to a measured lamination carried by $B$. Such a measured lamination $\mu$ can be viewed as the inverse limit of a sequence of splittings $\{B_n\}_{n=0}^\infty$, where $B_0=B$ and $B_{i+1}$ is obtained by splitting $B_i$. Note that if $B_{i+1}$ is obtained by splitting $B_i$, one may naturally consider $N(B_{i+1})\subset N(B_i)$. We refer to section 3 of \cite{L4} for a brief description, see \cite{O} and section 3 of \cite{Hat} for more details (also see Definition 4.1 and Lemma 4.2 of \cite{GO}). There is a one-to-one correspondence between each point in $\mathcal{S}(B)$ and a measured lamination constructed in this fashion. This one-to-one correspondence is slightly different from the one above for integer points of $\mathcal{S}(B)$. For an integer point, the sequence of splittings on $B$ above stop in a finite number of steps (i.e., $B_{i+1}=B_i$ is a closed surface if $i$ is large), and the measured lamination constructed this way is the horizontal foliation of an $I$--bundle over a closed surface.
We define the \emph{projective lamination space} of $B$, denoted by $\mathcal{PL}(B)$, to be the set of points in $\mathcal{S}(B)$ satisfying $\sum_{i=1}^Nx_i=1$. Let $p: \mathcal{S}(B)-\{0\}\to\mathcal{PL}(B)$ be the natural projection sending $(x_1,\dots,x_N)$ to $\frac{1}{s}(x_1,\dots,x_N)$, where $s=\sum_{i=1}^Nx_i$. To simplify notation, we do not distinguish a point $x\in\mathcal{S}(B)$ and its image $p(x)\in\mathcal{PL}(B)$ unless necessary. $\mathcal{PL}(B)$ is a compact set. For any infinite sequence of distinct closed surfaces carried by $B$, the images of the corresponding points in $\mathcal{PL}(B)$ (under the map $p$) has an accumulation point, which corresponds to a measured lamination $\mu$. To simplify notation, we simply say that the measured lamination $\mu$ is an accumulation point of this sequence of surfaces in $\mathcal{PL}(B)$. Throughout this paper, when we consider a compact surface carried by $B$, we identify the surface with an integer point in $\mathcal{S}(B)$, but when we consider $\mu$ as a limit point of a sequence of compact surfaces in $\mathcal{PL}(B)$, we identify the point $\mu\in\mathcal{PL}(B)$ to a measured lamination as the inverse limit of the sequence of splittings on $B$ above.
\begin{proposition}\label{Plinear}
Let $B$ be a branched surface with $n$ branch sectors and $\{S_k=(x_1^{(k)}, \dots, x_n^{(k)})\}$ an infinite sequence of integer points in $\mathcal{S}(B)$ whose images in $\mathcal{PL}(B)$ are distinct points. Suppose $\mu=(z_1,\dots,z_n)\in\mathcal{PL}(B)$ is the limit point of $\{S_k\}$ in the projective lamination space. Let $f(x_1,\dots,x_n)$ be a homogeneous linear function with $n$ variables. Then we have the following.
\begin{enumerate}
\item If $z_i=0$ and $z_j\ne 0$, then $\lim_{k\to\infty}x_i^{(k)}/x_j^{(k)}=0$.
\item If $z_i>z_j$, then $x_i^{(k)}>x_j^{(k)}$ if $k$ is sufficiently large.
\item If the sequence $\{f(S_k)\}$ is bounded, then $f(\mu)=0$.
\end{enumerate}
\end{proposition}
\begin{proof}
Let $s_k=\sum_{i=1}^nx_i^{(k)}$. Then the corresponding point of $S_k$ in $\mathcal{PL}(B)$ is $[S_k]=(x_1^{(k)}/s_k,\dots,x_n^{(k)}/s_k)$. By our hypotheses, $\lim_{k\to\infty}x_i^{(k)}/s_k=z_i$ for each $i$. Thus, if $z_i=0$ and $z_j\ne 0$, we have $\lim_{k\to\infty}x_i^{(k)}/x_j^{(k)}=z_i/z_j=0$.
Since $x_i^{(k)}/s_k>x_j^{(k)}/s_k$ is equivalent to $x_i^{(k)}>x_j^{(k)}$, part 2 is obvious.
Since $f(x_1,\dots,x_n)$ is a homogeneous linear function, $f([S_k])=f(S_k)/s_k$ and $\lim_{k\to\infty}f([S_k])=f(\mu)$. Since the sequence $\{S_k=(x_1^{(k)},\dots,x_n^{(k)})\}$ consists of distinct non-negative integer solutions, the integers $\{s_k\}$ are unbounded. So, after passing to a sub-sequence if necessary, we have $\lim_{k\to\infty}s_k=\infty$. Therefore, if the sequence $\{f(S_k)\}$ is bounded from above, then $\lim_{k\to\infty}f(S_k)/s_k=f(\mu)=0$.
\end{proof}
\begin{corollary}\label{Clinear}
Let $\{S_k\}\subset N(B)$ be a sequence of distinct compact connected surfaces carried by a branched surface $B$. Suppose $\mu\subset N(B)$ is the measured lamination corresponding to the limit of $\{S_k\}$ in $\mathcal{PL}(B)$, and let $K$ be an $I$--fiber of $N(B)$ such that $K\cap\mu\ne\emptyset$. Then, if $k$ is large, $|K\cap S_k|$, the number of intersection points of $K$ and $S_k$, is large.
\end{corollary}
\begin{proof}
The number of intersection points of an $I$--fiber and $S_k$ is equal to the integer value of a coordinate of the corresponding point in $\mathcal{S}(B)$. So the corollary follows immediately from part 3 of Proposition~\ref{Plinear} after setting the linear function to $f(x_1,\dots,x_n)=x_i$, where $x_i$ corresponds to the branch sector of $B$ that contains the point $\pi(K)$ ($x_i=|K\cap S_k|$).
\end{proof}
We call a lamination $\mu$ a normal lamination with respect to a triangulation if every leaf of $\mu$ is a (possibly non-compact) normal surface.
\begin{corollary}\label{Cnormal}
Let $M$ be a closed 3--manifold with a fixed triangulation, and let $B$ be a branched surface obtained by gluing together a collection of normal disks and at most one almost normal piece, similar to \cite{FO}. Suppose $\{S_n\}$ is an infinite sequence of distinct connected almost normal surfaces fully carried by $B$. Then each accumulation point of $\{S_n\}$ in $\mathcal{PL}(B)$ must correspond to a normal measured lamination.
\end{corollary}
\begin{proof}
If $B$ does not contain an almost normal piece, then every surface carried by $B$ is normal and there is nothing to prove. Suppose $s$ is a branch sector of $B$ containing the almost normal piece. Since $B$ fully carries an almost normal surface, $B-int(s)$ must be a sub-branched surface of $B$ and every lamination carried by $B-int(s)$ is normal ($B-int(s)$ is called the normal part of $B$ in section 2 of \cite{L4}). Suppose $S_n=(x_1,\dots,x_N)\in\mathcal{S}(B)$ and suppose $x_1$ is the coordinate corresponding to the branch sector $s$. Since an almost normal surface has at most one almost normal piece, $x_1=1$ for each $S_n$. Suppose $\mu=(z_1,\dots,z_N)\in\mathcal{PL}(B)$. By Proposition~\ref{Plinear} and Corollary~\ref{Clinear}, $z_1$ must be zero. Hence $\mu$ is carried by $B-int(s)$ and is a normal lamination.
\end{proof}
Now, we will use two examples to illustrate the limit of closed surfaces. Although the two examples are train tracks, similar results hold for branched surfaces.
\begin{example}\label{Extrain}
Let $\tau$ be a train track in the plane as shown in Figure~\ref{traintrack}(a). There are 8 branch sectors in $\tau$, and the branch equations are $x_1+x_4=x_3=x_2+x_6$ and $x_7+x_4=x_5=x_8+x_6$. Suppose $\{\gamma_n\}$ is an infinite sequence of compact arcs carried by $\tau$ whose limit in $\mathcal{PL}(\tau)$ is the point $\mu=(0,0,1/4,1/4,1/4,1/4,0,0)$. Geometrically $\mu$ is a measured lamination consisting of parallel circles carried by $\tau$. We identify $\gamma_n$ with an integer point in $\mathcal{S}(\tau)$ and suppose the $\gamma_n$'s are different points in $\mathcal{S}(\tau)$. Note that $\gamma_n$ contains a circle if and only if $x_1=x_2$ and $x_7=x_8$. By Proposition~\ref{Plinear} and Corollary~\ref{Clinear}, as $n$ tends to infinity, the values $x_6$ and $x_6/x_2$ of $\gamma_n$ tend to infinity. This implies that, if $n$ is large, $\gamma_n$ contains either many parallel circles or a spiral wrapping around the circle many times.
\end{example}
\begin{figure}\label{traintrack}
\end{figure}
In the proof of the main theorem, we will consider the limit lamination $\mu$ of an infinite sequence of almost normal Heegaard surfaces carried by a branched surface $B$. The measured lamination $\mu$ is fully carried by a sub-branched surface $B^-$ of $B$. In many situations, we would like to split $B^-$ into a nicer branched surface $B^-_1$. In fact, by considering $\mu\subset N(B^-)\subset N(B)$, we can split $N(B^-)$ and $N(B)$ simultaneously and obtain $\mu\subset N(B_1^-)\subset N(B_1)$, such that $B_1^-$ is the sub-branched surface of $B_1$ that fully carries $\mu$, $B_1$ is obtained by splitting $B$, and $B_1$ still carries an infinite sub-sequence of $\{S_n\}$. Next, we will use Example~\ref{Exsplit} to illustrate how the local splittings work. We also formulate this fact in Proposition~\ref{Psplit}. Proposition~\ref{Psplit} is similar in spirit to Lemma 6.1 of \cite{L4}.
\begin{example}\label{Exsplit}
Let $\tau$ be the train track on the top of Figure~\ref{splitting}. As shown in Figure~\ref{splitting}, $\tau$ can be split into 3 different train tracks $\tau_1$, $\tau_2$ and $\tau_3$. Suppose $\mu$ is a lamination fully carried by $\tau$. Let $x_1,\dots,x_5$ be the weights of $\mu$ at the branch sectors of $\tau$. These $x_i$'s satisfy the branch equations $x_1+x_3=x_5=x_2+x_4$. It is easy to see that $x_1<x_2$ (resp. $x_1>x_2$) if and only if $\mu$ is fully carried by $\tau_1$ (resp. $\tau_3$), and $x_1=x_2$ if and only if $\mu$ is fully carried by $\tau_2$. Suppose $\{S_n\}$ is an infinite sequence of compact arcs carried by $\tau$ and suppose each $S_n$ corresponds to a different integer point in $\mathcal{S}(\tau)$. Suppose the limit of $\{S_n\}$ in $\mathcal{PL}(\tau)$ is $\mu$.
By part 2 of Proposition~\ref{Plinear}, if $x_1<x_2$ (resp. $x_1>x_2$) for $\mu$, we can split $\tau$ into $\tau_1$ (resp. $\tau_3$), and $\tau_1$ (resp. $\tau_3$) fully carries $\mu$ and an infinite sub-sequence of $\{S_n\}$. Now, we consider the case $x_1=x_2$ for $\mu$. Although we can split $\tau$ (along $\mu$) into $\tau_2$ which fully carries $\mu$, $\tau_2$ may not carry infinitely many $S_n$'s. Nonetheless, if $\tau_2$ only carries finitely many $S_n$'s, then at least one of $\tau_1$ and $\tau_2$, say $\tau_1$, must carry an infinite sub-sequence of $\{S_n\}$. Moreover, $\tau_1$ can be considered as the train track obtained by adding a branch sector to $\tau_2$, and $\tau_1$ can be obtained by splitting $\tau$.
Now we consider the splittings on branched surfaces. Note that any splitting on a branched surface can be viewed as a sequence of successive local splittings, and the operations of such local splittings on a branched surface are basically the same as the splittings on the train track in Example~\ref{Exsplit}. So we have the following proposition.
\end{example}
\begin{figure}\label{splitting}
\end{figure}
\begin{proposition}\label{Psplit}
Let $B$ be a branched surface and $\{S_n\}\subset\mathcal{S}(B)$ a sequence of distinct compact surfaces carried by $B$. Suppose $\mu\in\mathcal{PL}(B)$ is the limit point of $\{S_n\}$ in $\mathcal{PL}(B)$. Let $B^-$ be the sub-branched surface of $B$ that fully carries $\mu$. Let $B^-_1$ be any branched surface obtained by splitting $B^-$ along $\mu$, and suppose $B^-_1$ still fully carries $\mu$. Then one can add some branch sectors to $B^-_1$ to form a branched surface $B_1$ (i.e. $B_1^-$ is a sub-branched surface of $B_1$), such that $B_1$ can be obtained by splitting $B$, and $B_1$ carries an infinite sub-sequence of $\{S_n\}$.
\end{proposition}
\begin{proof}
This proposition is similar in spirit to Lemma 6.1 of \cite{L4}. The splitting from $B^-$ to $B_1^-$ can be divided into a sequence of successive small local splittings, and each local splitting is similar to the splittings in Example~\ref{Exsplit} and Figure~\ref{splitting}. During each local splitting, we can split $B^-$ and $B$ simultaneously. If $B$ fails to carry infinitely many $S_n$'s after a local splitting, similar to the operation of obtaining $\tau_1$ by adding a branch sector to $\tau_2$ in Example~\ref{Exsplit}, we can always add some branched sectors to get a branched surface satisfying the requirements of the Proposition.
\end{proof}
\begin{remark}
In Proposition~\ref{Psplit}, $B_1^-$ is the sub-branched surface of $B_1$ that fully carries $\mu$. Since any lamination carried by $B_1$ is carried by $B$, it is easy to see that $\mu\subset N(B_1)$ is still the limit point in $\mathcal{PL}(B_1)$ of the sub-sequence of $\{S_n\}$ carried by $B_1$.
\end{remark}
\begin{remark}\label{Rsss}
Let $\{S_n\}$ be an infinite sequence of distinct closed surfaces carried by $N(B)$ whose limit in $\mathcal{PL}(B)$ is a measured lamination $\mu$. Let $\gamma$ be a simple closed essential curve in a leaf of $\mu$. If every $I$--fiber of $N(B)$ intersects $\gamma$ in at most one point, then (after a slight enlargement) $\pi^{-1}(\pi(\gamma))$ can be considered as a fibered neighborhood of a train track consisting of a circle $\pi(\gamma)$ and some ``tails" along the circle similar to Figure~\ref{traintrack}, where $\pi:N(B)\to B$ is the map collapsing each $I$--fiber to a point. Since the limit of $\{S_n\}$ is $\mu$, $\pi^{-1}(\pi(\gamma))\cap S_i$ ($i=1,2,\cdots$) is a sequence of curves whose limit is a measured lamination containing the circle $\gamma$. As in Example~\ref{Extrain}, if $n$ is large, $\pi^{-1}(\pi(\gamma))\cap S_n$ must contain either many circles parallel to $\gamma$ or a spiral winding around $\gamma$ many times. However, if there is an $I$--fiber of $N(B)$ intersecting $\gamma$ in more than one point, then $\pi(\gamma)$ is an immersed curve in $B$. Nevertheless, since $\gamma$ is an embedded essential curve in a leaf of $\mu$, by Theorem~\ref{TMS}, after a finite sequence of splittings on $B$, we can get a branched surface $B_1$ such that $B_1$ still carries $\mu$ and $\pi|_\gamma$ is injective, where $\pi:N(B_1)\to B_1$ is the collapsing map, (i.e., every $I$--fiber of $N(B_1)$ intersects $\gamma$ in at most one point). Moreover, by Proposition~\ref{Psplit}, we may assume $B_1$ still carries an infinite sub-sequence of $\{S_n\}$. Now the situation is the same as above after replacing $B$ by $B_1$.
\end{remark}
The next lemma says that, if the branched surface is nice, then the limit of trivial circles in a sequence of closed surfaces cannot be an essential circle in the limit lamination.
\begin{lemma}\label{Lessential}
Let $M$ be a closed 3--manifold with a fixed triangulation, and let $B$ be a branched surface obtained by gluing together a collection of normal disks and at most one almost normal piece, as in Theorem~\ref{Heeg1}. Suppose $N(B)$ does not carry any normal 2--sphere or normal torus. Let $\{S_n\}$ be a sequence of distinct closed almost normal surfaces fully carried by $N(B)$ whose limit in $\mathcal{PL}(B)$ is a measured lamination $\mu$. Let $\gamma$ be an essential simple closed curve in a leaf of $\mu$. Then $B$ can be split into a branched surface $B_1$ that carries both $\mu$ and an infinite sub-sequence $\{S_{n_k}\}$ of $\{S_n\}$, such that, for any embedded vertical annulus $A\supset\gamma$ in $N(B_1)$, $A\cap S_{n_k}$ does not contain any circle that is trivial in the surface $S_{n_k}$, for each $S_{n_k}$.
\end{lemma}
\begin{proof}
Let $A_\gamma$ be an embedded vertical annulus in $N(B)$ containing $\gamma$. Suppose $A_\gamma\cap S_n$ contains a trivial circle in $S_n$ for each $n$. Such a trivial circle bounds a disk $D_n$ in $S_n$. So $D_n$ is transverse to the $I$--fibers of $N(B)$ and with $\partial D_n\subset A_\gamma$. Let $s$ be the branch sector containing the almost normal piece, and let $B'=B-int(s)$ be the sub-branched surface of $B$ ($B'$ is called the normal part of $B$, see section 2 of \cite{L4}). By Corollary~\ref{Cnormal}, $\mu$ is carried by $B'$. So we can assume that if $D_n$ contains an almost normal piece, the almost normal piece lies in $int(D_n)$. Since $S_n$ is an almost normal surface, $D_n$ contains at most one almost normal piece.
We call an isotopy of $N(B)$ a $B$--isotopy, if the isotopy is invariant on each $I$--fiber of $N(B)$.
\noindent\textbf{Claim}. Up to $B$--isotopy, there are only finitely many such disks $D_n$.
To prove the claim, we first consider such disks that do not contain almost normal pieces. If $D_n$ does not contain an almost normal piece, then we may assume that $D_n$ lies in $N(B')$ transverse to the $I$--fibers of $N(B')$, and consider $A_\gamma$ as an embedded vertical annulus in $N(B')$. Let $\mathcal{S}_\gamma$ be the set of embedded compact surfaces $F$ in $N(B')$ with the properties that $F$ is transverse to the $I$--fibers of $N(B')$ and $\partial F$ is a single circle in $A_\gamma$. Similar to $\mathcal{S}(B')$, we can describe $\mathcal{S}_\gamma$ as the set of non-negative integer solutions of a system of non-homogeneous linear equations as follows, see \cite{AL} for such a description for disks of contact. Let $L'$ be the branch locus of $B'$ and suppose $\pi(A_\gamma)$ is an immersed curve in $B'$. Suppose $b_1,\dots, b_N$ are the components of $B'-L'-\pi(A_\gamma)$. For each $b_i$ and any $F\in\mathcal{S}_\gamma$, let $x_i=|F\cap\pi^{-1}(b_i)|$. One can describe $F$ using a non-negative integer point $(x_1,\dots,x_N)\in\mathbb{R}^N$, and $(x_1,\dots,x_N)$ is a solution of the system of (non-homogeneous) linear equations in the forms of $x_k=x_i+x_j$ and $x_i=x_j+1$. Equations like $x_i=x_j+1$ occur when two pieces are glued along $\pi(A_\gamma)$, since $\pi(\partial F)=\pi(A_\gamma)$. Up to $B'$--isotopy, there are only finitely many surfaces corresponding to the same integer point in $\mathcal{S}_\gamma$. Moreover, the corresponding homogeneous system is exactly the system of branch equations of $B'$. Suppose there is an infinite sequence of distinct disks $\{D_n\}$ in $\mathcal{S}_\gamma$. Then one can find $D_i=(x_1,\dots,x_N)$ and $D_j=(y_1,\dots,y_N)$ such that $x_k\le y_k$ for each $k$. Thus $D_j-D_i=(y_1-x_1,\dots,y_N-x_N)$ is a non-negative integer solution to the corresponding homogeneous system, i.e., the system of branch equations. So $D_j-D_i$ corresponds to a closed surface carried by $B'$. Since the Euler characteristic is additive, $\chi(D_j-D_i)=\chi(D_j)-\chi(D_i)=0$. This means $B'$ carries a closed surface (which may not be connected) with total Euler characteristic $0$, which implies that $B'$ must carry a connected surface with non-negative Euler characteristic. If $B'$ carries a Klein bottle (or projective plane), $B'$ must carry a torus (or $2$--sphere) because $M$ is orientable. Since $B'=B-int(s)$, every surface carried by $B'$ is normal. This contradicts the hypothesis that $B$ does not carry any normal 2--sphere or normal torus.
Suppose there is an infinite sequence of disks $\{D_n\}$ from the $S_n$'s, such that each $D_n$ contains an almost normal piece. As above, we can also identify each $D_n$ as an integer solution of a system of non-homogeneous linear equations. Up to $B$--isotopy, there are only finitely many such disks corresponding to the same integer point. If the disks $\{D_n\}$ correspond to different integer points, then one can find $D_i=(x_1,\dots,x_K)$ and $D_j=(y_1,\dots,y_K)$ such that $x_k\le y_k$ for each $k$. Suppose the first coordinate corresponds to the branch sector $s$ that contains the almost normal piece. Since each $S_n$ is an almost normal surface, each $D_n$ contains only one almost normal piece. Hence, $x_1=y_1=1$ and the first coordinate of $D_j-D_i$ is $y_1-x_1=0$. This means that $D_j-D_i$ does not contain an almost normal piece and is carried by $B'$. Now the argument is the same as above. This finishes the proof of the claim.
Let $B^-$ be the sub-branched surface of $B$ fully carrying $\mu$. As described earlier in this section and in section 3 of \cite{L4}, we may consider $\mu$ as the inverse limit of an infinite sequence of splittings on $B^-$. Suppose $\{B_n^-\}_{n=0}^\infty$ ($B_0^-=B^-$) is such a sequence of branched surfaces, with each $B_i^-$ obtained by splitting $B_{i-1}^-$ and $\mu$ being the inverse limit of the sequence $\{N(B_n^-)\}$. Note that if $\mu$ consists of compact leaves, then such splittings are a finite process. By Theorem~\ref{TMS}, we only consider the case that $\mu$ is an exceptional minimal measured lamination, and the proof for the case that $\mu$ consists of compact leaves is similar. By Proposition~\ref{Psplit}, we may assume there is a sequence of branched surfaces $\{B_n\}$ ($B_0=B$) such that, for each $n$, $B_{n+1}$ is obtained by splitting $B_n$, $B_n$ carries $\mu$ and an infinite sub-sequence of $\{S_n\}$, and $B_n^-$ is a sub-branched surface of $B_n$.
Let $A_k\subset N(B_k^-)$ be a vertical annulus containing $\gamma$. By Lemma~\ref{Lfine}, after some splittings, we may assume that if $k$ is sufficiently large, every leaf of $\mu$ is $\pi_1$--injective in $N(B_k^-)$. Since $\gamma$ is an essential curve in a leaf, if $k$ is sufficiently large, there is no disk $D$ in $N(B_k^-)$ transverse to the $I$--fibers and with $\partial D\subset A_k$. Now, suppose $D\subset N(B_k)$ is a disk in $S_n$ with $\partial D\subset A_k$. So $D$ cannot be totally in $N(B_k^-)$. If $\mu\cap D\ne\emptyset$ under any $B_k$--isotopy, since $\mu$ is the inverse limit of the infinite sequence of splittings, these splittings $\{B_k^-\}$ will eventually cut through $D$. By the claim above, there are only finitely many such disks $D$. So, if $m$ is sufficiently large, there is no such disk $D\subset N(B_m)$ with $\mu\cap D\ne\emptyset$. If $D\cap\mu=\emptyset$, since $D$ cannot be totally in $N(B_k^-)$ as above, we can split $B_k$ and $B_k^-$ further so that $D$ is carried by $B_k-B_k^-$ and hence $\partial D\not\subset A_k$ after this splitting. Since there are only finitely many such disks $D$, after a finite sequence of splittings, we get a branched surface $B_k$ satisfying the requirements of the lemma.
We should note that the assumption that $B$ does not carry any normal torus is important. For example, if $\mu$ is a torus, one can easily construct a counter-example using an infinite sequence of disks wrapping around $\mu$ like the Reeb component.
\end{proof}
\begin{lemma}\label{Limmerse}
Let $M$, $B$, $\{S_n\}$ and $\mu$ be as in Lemma~\ref{Lessential}. Let $\gamma$ be an immersed essential closed curve in a leaf of $\mu$. Then $B$ can be split into a branched surface $B_1$ that carries $\mu$ and an infinite sub-sequence $\{S_{n_k}\}$ of $\{S_n\}$, such that, for each $k$, $S_{n_k}$ contains no embedded disk $D$ with the property that $\pi(\partial D)=\pi(\gamma)$, where $\pi: N(B_1)\to B_1$ is the collapsing map.
\end{lemma}
\begin{proof}
This lemma is basically the same as Lemma~\ref{Lessential}. Although the curve $\gamma$ may not be embedded, each $S_n$ is embedded. Hence there are only finitely many different configurations for $\partial D$. So the lemma follows from the same arguments in the proof of Lemma~\ref{Lessential}.
\end{proof}
\section{Helix-turn-helix bands}\label{Shelix}
A technical part in the proof of the main theorem is to construct compressing disks for the two handlebodies of the Heegaard splitting using $N(B)$. Such compressing disks are constructed using a complicated band in $N(B)$ that connects two parallel monogons, as shown in Figure~\ref{monogon} (a). The purpose of this section is to demonstrate how to construct these bands. Such bands are constructed using a local picture of the limit lamination of a sequence of Heegaard surfaces. We will start with a one-dimension lower example.
\begin{definition}
Let $A=S^1\times I$ be an annulus and $\alpha$ a compact spiral in $A$ transverse to the $I$--fibers. We define the \emph{winding number} of $\alpha$, denoted by $w(\alpha)$, to be the smallest intersection number of $\alpha$ with an $I$--fiber of $A$.
\end{definition}
\begin{example}\label{exspiral}
Let $\tau$ be a train track obtained by attaching two ``tails" to a circle $\gamma$, as shown in Figure~\ref{traintrack}(b). Curves fully carried by $\tau$ must consist of spirals. We use $x_1,\dots,x_4$ to denote the 4 branch sectors of $\tau$, and the branch equations are $x_1+x_3=x_4$ and $x_2+x_3=x_4$. Suppose $\{\gamma_n\}$ is an infinite sequence of positive integer solutions to the branch equations whose limit $\mu$ in $\mathcal{PL}(\tau)$ is a measured lamination consisting of parallel circles carried by $\tau$. So the coordinates of $\mu$ in $\mathcal{PL}(\tau)$ are $(0,0,1/2,1/2)$. Let $\gamma_i=(x_1^{(i)},\dots,x_4^{(i)})\in\mathcal{S}(\tau)$ be the corresponding sequence of integer points. For each $\gamma_i$, we denote the number of components of $\gamma_i$ by $h(\gamma_i)$ and clearly, $h(\gamma_i)=x_1^{(i)}=x_2^{(i)}$. Moreover, the winding number of each component of $\gamma_i$ is $w(\gamma_i)=x_3^{(i)}/h(\gamma_i)$. Because of the branch equations, we have $\gamma_i=(x_1^{(i)},x_1^{(i)},x_3^{(i)}, x_1^{(i)}+x_3^{(i)})$. Since the limit of these points in $\mathcal{PL}(\tau)$ is $(0,0,1/2,1/2)$, by part 1 of Proposition~\ref{Plinear}, we have that $\lim_{i\to\infty}x_1^{(i)}/x_3^{(i)}=0$, in other words $\lim_{i\to\infty}w(\gamma_i)=\infty$.
In general, a train track near a circle can have many ``tails" like Figure~\ref{traintrack}~(a), but the argument above still works (using part 2 of Proposition~\ref{Plinear}). If the limit of a sequence of spiral curves $\{\gamma_i\}$ is a measured lamination by circles, then the winding numbers tend to infinity, $\lim_{i\to\infty}w(\gamma_i)=\infty$.
\end{example}
Let $S^1\times I$ be an annulus, and let $\gamma$ be a collection of disjoint spirals properly embedded in $S^1\times I$ and transverse to the $I$--fibers. Suppose the winding number for each spiral is at least 2. We fix an $I$--fiber $\{x\}\times I$. Let $\beta$ be a subarc of a spiral in $\gamma$ with $\beta\cap(\{x\}\times I)=\partial\beta$. Let $\alpha$ be the subarc of $\{x\}\times I$ between the two endpoints of $\beta$. We define the \emph{discrepancy} of $\gamma$ to be $1+|\gamma\cap int(\alpha)|$. It is very easy to see that the discrepancy is equal to the number of components of $\gamma$ and does not depend on the choice of $\beta$.
Next, we consider the two-dimensional version of Example~\ref{exspiral}.
\begin{example}\label{exband}
If we take a product of the train track in Example~\ref{exspiral} and an interval, we get a branched surface, see the shaded region in Figure~\ref{cylinder}~(a). As in Figure~\ref{cylinder} (a), we may assume the branched surface is sitting in $A\times I$, where $A$ is a horizontal annulus, and this branched surface is transverse to the $I$--fibers of $A\times I$. For any essential simple closed curve $c$ in $A$, the intersection of the cylinder $c\times I\subset A\times I$ and this branched surface is a train track as in Example~\ref{exspiral}. Suppose there is a sequence of spiraling disks $\{S_n\}$ fully carried by this branched surface and the limit lamination of $\{S_n\}$ is a union of horizontal annuli of the form $A\times\{x\}$, $x\in I$. Then we can define the winding number similarly, and if $n$ tends to infinity, the winding number of $S_n$ tends to infinity as well. To fit this in the bigger picture, we should consider the $A\times I$ as a small portion of $N(B)$ and each $S_n$ is the intersection of $A\times I$ with a Heegaard surface. Naturally, $S_n$ may not be connected. Next we assume each $S_n$ lies in $A\times I$, transverse to every $I$--fiber of $A\times I$.
Let $h$ be the number of components of $S_n$ and suppose $h\ge 2$. Let $c$ be an essential simple closed curve in $A$. We consider the vertical cylinder $c\times I\subset A\times I$. $S_n\cap(c\times I)$ consists of $h$ spirals in $c\times I$. These spirals $S_n\cap(c\times I)$ cut $c\times I$ into some bands. We may describe each band as a product $l\times J$, where $l$ is a curve, $J$ is an interval, $l\times\partial J$ is a pair of spirals in $S_n\cap(c\times I)$, and each $\{x\}\times J$ ($x\in l$) is a subarc of an $I$--fiber of $c\times I$. We call such a band $l\times J$ a \emph{helical band}, see the shaded region in Figure~\ref{band} (a) for a picture. We call $\partial l\times J$ the two \emph{ends} of the band and define the wrapping number of the band to be the wrapping number of a spiral $l\times\{p\}$. We define the \emph{thickness} of a helical band $l\times J$ to be the number of components of $S_n\cap(l\times J)$. By the construction, the thickness of a helical band is at least 2 (since $l\times\partial J\subset S_n$) and can be as large as $h$. If the thickness of a helical band is less than $h$, then we can find a larger helical band $l'\times J'$ that contains $l\times J$ and with larger thickness. We say $l'\times J'$ is obtained by thickening $l\times J$.
\end{example}
\begin{figure}\label{cylinder}
\end{figure}
\begin{example}\label{exjoint}
Let $A_1$ and $A_2$ be two annuli and $Q$ a quadrilateral. By connecting $A_1$ and $A_2$ using $Q$, we get a pair of pants $P$, as shown in Figure~\ref{cylinder} (b). Now we consider the product $P\times I$. Let $\{S_n\}$ be a sequence of compact surface in $P\times I$ transverse to the $I$--fibers, and suppose the limit lamination of $\{S_n\}$ is of the form $P\times C$, where $C$ is a closed set in $I$. Suppose each component of $S_n\cap(Q\times I)$ is of the form $Q\times\{x\}$, $x\in int(I)$, and suppose $S_n\cap (A_i\times I)$ ($i=1,2$) consists of spiraling disks as in Example~\ref{exband}. We will use $h_i$ to denote the number of components of $S_n\cap(A_i\times I)$, and use $w_i$ to denote the winding number of a spiraling disk in $S_n\cap(A_i\times I)$. As in Example~\ref{exspiral}, if $n$ is large, the winding number $w_i$ is large. In this paper, we will also assume each $h_i$ is an even number and $h_i\ge 2$, $i=1,2$.
\end{example}
$S_n\cap(A_i\times I)$ consists of $h_i$ spiraling disks ($i=1,2$). Topologically, each spiraling disk is a meridian disk of the solid torus $A_i\times I$, and the intersection of theses spiraling disks with each annulus in $A_i\times\partial I$ is a union of parallel essential arcs in the annulus. We say an arc $K$ is a \emph{proper vertical arc} if $K$ is a subarc of an $I$--fiber of $P\times I$ and $K$ is properly embedded in $\overline{P\times I-S_n}$. Let $\gamma\times J$ be an embedded rectangle in $P\times I$. We call $\gamma\times J$ a \emph{vertical band} if each $\{x\}\times J$ is a subarc of an $I$--fiber and $\gamma\times\partial J$ lies in $S_n$. Note that the helical bands described in Example~\ref{exband} are vertical bands. We define the \emph{thickness} of the vertical band $\gamma\times J$ to be the number of components of $S_n\cap(\gamma\times I)$. So the thickness of a vertical band is at least 2.
By our assumptions, the number of components of $S_n\cap(Q\times I)$ is roughly $w_1h_1=w_2h_2$. Let $J$ be a proper vertical arc in $Q\times I$ and $\alpha_J$ be an arc in $Q\times I$ connecting a point in $int(J)$ to $Q\times\{0\}$. We define the \emph{height} of $J$ to be the minimal number of intersection points in $S_n\cap\alpha_J$. We take a vertical band $\beta\times J$ around $A_i\times I$ and with both vertical arcs $\partial\beta\times J$ in $Q\times I$, as in Figure~\ref{band} (b), and suppose each $\{x\}\times J$ ($x\in\beta$) is a proper vertical arc. Then since $S_n\cap(A_i\times I)$ consists of spiraling disks, the height difference between the two proper vertical arcs $\partial\beta\times J$ is equal to the discrepancy (see the definition of discrepancy before Example~\ref{exband}) of the spirals around $A_i\times I$. Hence the height difference between the two arcs $\partial\beta\times J$ is equal to $h_i$. Moreover, two proper vertical arcs in $(A_i\cap Q)\times I$ belong to the same component of $\overline{A_i\times I-S_n}$ if and only if the height difference between the two arcs is $kh_i$ for some integer $k$.
Now we are in position to construct a helix-turn-helix band (the word helix-turn-helix comes from biology). Recall that, as in Example~\ref{exjoint}, we assume each $h_i$ is an even number and $h_i\ge 2$.
\begin{example}[Helix-turn-helix bands]\label{HTH}
We assume $h_1=h_2$. First we give an outline of the construction. Let $c$ and $c'$ be a pair of disjoint essential simple closed essential curves in $A_1$. So $c\times I$ and $c'\times I$ are a pair of disjoint vertical annuli in $A_1\times I$. We take a pair of helical bands in $c\times I$ and $c'\times I$ respectively and connect them using a vertical band going around $A_2\times I$, as depicted in Figure~\ref{band} (b). The resulting vertical band is a helix-turn-helix band. There are some subtleties and additional requirements. The detailed description of the construction is as follows.
Let $J_1$ be a proper vertical arc in $\overline{(A_1\cap Q)\times I}$. We first take a vertical band $\sigma$ around $A_2\times I$, connecting $J_1$ to another proper vertical arc $J_2\subset (A_1\cap Q)\times I$, see the shaded region in Figure~\ref{band} (b). Note that in Figure~\ref{band} (b), the left two cylinders are vertical cylinders in $A_1\times I$ and the right cylinder is a vertical cylinder in $A_2\times I$. Clearly the height difference between $J_1$ and $J_2$ is $h_2$. Since $h_1=h_2$, $J_1$ and $J_2$ lie in the same component of $\overline{A_1\times I-S_n}$. Then we take a helical band $\sigma_i$ ($i=1,2$), as in Figure~\ref{band} (a), connecting $J_i$ to a proper vertical arc $J_i'$, where $J_i'$ has an endpoint in the bottom annulus $A_1\times\{0\}$. We can choose $\sigma_1$ and $\sigma_2$ in different vertical cylinders in $A_1\times I$, see the left part of Figure~\ref{band} (b) for a picture of two disjoint cylinders. So we may assume $\sigma_1\cap\sigma_2=\emptyset$ and $\Sigma=\sigma_1\cup\sigma\cup\sigma_2$ is an embedded vertical band connecting $J_1'$ to $J_2'$. Note that since the height difference between $J_1$ and $J_2$ is $h_2=h_1$, the winding numbers for $\sigma_1$ and $\sigma_2$ differ by one. We may write $\Sigma=\gamma\times J$, where $\gamma$ is an arc and $J$ is a closed interval. $\Sigma$ has the properties that $\Sigma\cap S_n=\gamma\times\partial J$, each $\{x\}\times J$ is a subarc of an $I$--fiber of $P\times I$, and $\partial\gamma\times J=J_1'\cup J_2'$. We call $\Sigma$ a \emph{helix-turn-helix} (or an HTH) band. Note that the thickness of the vertical band $\Sigma$ in the construction above is 2. Similar to Example~\ref{exband}, we can trivially thicken the HTH band $\Sigma$ to an embedded vertical band $\hat{\Sigma}$ so that the thickness of $\hat{\Sigma}$ is $h_1$ ($h_1=h_2$). We call both $\Sigma$ and $\hat{\Sigma}$ HTH bands.
Since $J_1'$ and $J_2'$ lie in the same component of $\overline{A_1\times I-S_n}$ and each $J_i'$ has an endpoint in the bottom annulus $A_1\times\{0\}$, we may glue a small vertical band $\delta$ to $\Sigma$, connecting $J_1'$ to $J_2'$, and get a vertical annulus $A_\Sigma=\Sigma\cup\delta$ properly embedded in $\overline{(P\times I)-S_n}$. Note that $J_1'\cup J_2'$ is a pair of opposite edges of $\delta$ and $\delta$ has an edge totally in the bottom annulus $A\times\{0\}$. Let $x_i$ be the element in the fundamental group $\pi_1(P\times I)$ represented by the core of $A_i\times I$ ($i=1,2$). Then this vertical annulus $A_\Sigma$ represents the element $x_1^{-k}\cdot x_2\cdot x_1^{k+1}$ in $\pi_1(P\times I)$, for some $k$.
\end{example}
\begin{figure}\label{band}
\end{figure}
Note that in a previous version of the paper, there is a construction of an HTH band for the case $h_1<h_2$. That construction turns out to be unnecessary for the proof of the main theorem.
\begin{example}\label{exmulti}
In Example~\ref{HTH}, if the winding numbers $w_1$ and $w_2$ are large, we can construct many disjoint HTH bands. To see this, we first divide $P\times I$ into $N$ parts, $P\times I_i$ ($i=1,\dots,N$), where $I_i=[\frac{i-1}{N},\frac{i}{N}]$. We may assume the intersection of $S_n$ with each $P\times I_i$ is as described in Example~\ref{exjoint}. Suppose the winding numbers $w_1$ and $w_2$ are large. We can carry out the construction in Example~\ref{HTH} on each $P\times I_i$. Then we glue a pair of long helical bands to the two ends of each vertical band constructed in $P\times I_i$ to spiral down to the bottom annulus $A_1\times\{0\}$. By choosing these helical bands to be in disjoint vertical cylinders of $A_1\times I$ (see the left part of Figure~\ref{band} (b) for a picture of two disjoint cylinders), we may assume these HTH bands are disjoint. Let $\Sigma_i=\gamma_i\times J$ ($i=1,\dots, N$) be the $N$ disjoint HTH bands above. We may assume each component of $\partial\gamma_i\times J$ is a proper vertical arc with an endpoint in $A_1\times\{0\}$. We may also construct the HTH bands so that these $\Sigma_i$'s lie in the same component of $\overline{P\times I-S_n}$. Moreover, we may assume that, for each $i$, the two proper vertical arcs $\partial\gamma_i\times J$ are close to each other. Hence, similar to Example \ref{HTH}, we can glue a small vertical band $\delta_i$ to each $\Sigma_i$ and get a collection of disjoint vertical annuli $A_{\Sigma_i}=\Sigma_i\cup\delta_i$ ($i=1,\dots,N$) properly embedded in the same component of $\overline{P\times I-S_n}$. The elements represented by these $A_{\Sigma_i}$'s in $\pi_1(P\times I)$ are conjugate. In fact, by unwinding the pairs of helical bands, we can isotope these annuli $A_{\Sigma_i}$ in $\overline{P\times I-S_n}$ so that $\pi(A_{\Sigma_i})$ is the same closed curve in $P$ for all $i$, where $\pi:P\times I\to P$ is the projection. Furthermore, similar to Example~\ref{HTH}, we can trivially thicken these $\Sigma_i$'s into a collection of embedded disjoint HTH bands with thickness $h_1$.
\end{example}
Let $\Sigma$ and $A_\Sigma$ be the HTH band and the vertical annulus constructed in the examples above. So, after a small perturbation, we may assume $\pi(A_\Sigma)$ is an immersed essential closed curve in $P$, where $\pi:P\times I\to P$ is the projection. By Example~\ref{exmulti}, if $w_1$ and $w_2$ are large, we can choose $N$ disjoint HTH bands $\Sigma_i$ ($i=1,\dots, N$) and $N$ disjoint vertical annuli $A_{\Sigma_i}$. Moreover, after some isotopy, $\pi(A_{\Sigma_i})$ is the same curve in $P$ for all $i$. Thus, regardless of the configurations of $S_n$, as long as $n$ is large, there is a fixed finite set of immersed essential closed curves in $P$, denoted by $\mathcal{C}_P$, such that $\pi(A_{\Sigma_i})$ above is a curve in $\mathcal{C}_P$, up to isotopy.
The following lemma follows trivially from Lemma~\ref{Limmerse}.
\begin{lemma}\label{Lcor}
Let $M$, $B$, $\{S_n\}$ and $\mu$ be as in Lemma~\ref{Lessential}. Let $P$ be an essential sub-surface of a leaf $l$ of $\mu$. Suppose $P$ is a pair of pants. Let $\mathcal{C}_P$ be the finite set of curves in $P$ as above. Then $B$ can be split into a branched surface $B_1$ that carries $\mu$ and an infinite sub-sequence $\{S_{n_k}\}$ of $\{S_n\}$, such that no $S_{n_k}$ contains any disk $D$ with the property that $\pi(\partial D)=\pi(\gamma)$ for any $\gamma\in\mathcal{C}_P$.
\end{lemma}
\begin{proof}
By the hypotheses, every curve $\gamma\in\mathcal{C}_P$ is essential in the leaf $l$. So the lemma follows from Lemma~\ref{Limmerse}.
\end{proof}
\begin{figure}\label{monogon}
\end{figure}
\begin{definition}\label{Dmonogon}
Let $S_n$ be a closed embedded surface carried by $N(B)$, and let $\nu$ be a subarc of an $I$--fiber of $N(B)$ with $\partial\nu\subset S_n$. We say that $\nu$ bounds a monogon if there is an embedded disk $E$ transverse to $S_n$, such that $\partial E=\nu\cup\alpha$, where $\alpha\subset S_n$ and $\partial\alpha=\partial\nu$. We call the disk $E$ a \emph{monogon}, see Figure~\ref{yinyang}(b) for a picture. We call $E$ an innermost monogon if $E\cap S_n=\alpha$. Since $\nu\subset N(B)$, we may assume that a neighborhood of $\nu$ in $E$ is a sub-disk $\kappa=a\times J$ of $E$ such that each $\{x\}\times J$ is a subarc of an $I$--fiber of $N(B)$, $a\times\partial J\subset\alpha\subset\partial E$, and $\nu$ is a component of $\partial a\times J$. We call $\kappa$ the \emph{tail} of the monogon. We define the \emph{thickness} of the tail to be $|S_n\cap\nu|$ and define the length of the tail to be the length of a component of $a\times\partial J$. So, if $E$ is innermost, the thickness of the tail is 2. Let $\Sigma=\gamma\times J$ be an HTH band constructed in Example \ref{HTH}, and let $\nu$ be a component of $\partial\gamma\times J$. Suppose $\nu$ bounds a monogon $E$ disjoint from $\Sigma$. Then we can glue $\Sigma$ and two parallel copies of $E$ together, forming an embedded disk $\Delta$ as shown in Figure~\ref{monogon} (a). By our construction, $\partial\Delta$ is a simple closed curve in $S_n$. We call the disk $\Delta$ constructed in this fashion a \emph{pinched disk}. Since $\Delta$ is constructed using parallel copies of $E$, there is a rectangle $R\subset S_n$ between the two monogons, see the shaded regions of Figure~\ref{monogon} (b). Let $\Sigma'$ be another HTH band constructed in Example~\ref{exmulti}. We can glue $\Sigma'$ and another two parallel copies of $E$ together, forming an embedded disk $\Delta'$. Similarly, there is a rectangle $R'\subset S_n$ between the two monogons, as shown in Figure~\ref{monogon} (b). By our construction in Example~\ref{exmulti}, $\Delta\cap\Delta'=\emptyset$ and $R\cap R'=\emptyset$. Moreover, there is a short arc $\eta\subset S_n$ connecting $R$ to $R'$, as shown in Figure~\ref{monogon} (b). We call an arc $\eta$ constructed in this fashion an \emph{$\eta$--arc}.
\end{definition}
\begin{remark}\label{Rrec}
Let $\Sigma$, $\Delta$ and $R$ be as in Definition~\ref{Dmonogon}. We can denote $R=\alpha\times\beta$, where $\alpha$ and $\beta$ are intervals, and suppose $R\cap\Delta=\alpha\times\partial\beta\subset\partial\Delta$. Moreover, $(\partial\Delta-\alpha\times\partial\beta)\cup(\partial\alpha\times\beta)$ is exactly the boundary of the annulus $A_\Sigma$ constructed in Example~\ref{HTH}.
\end{remark}
\begin{lemma}\label{Ldouble}
Let $M$, $\mu$, $P$, $B_1$ and $\{S_{n_k}\}$ be as in Lemma~\ref{Lcor}. Suppose $P\times I$ is embedded in $N(B_1)$ with each $\{x\}\times I$ a subarc of an $I$--fiber of $N(B_1)$. Suppose $S_{n_k}\cap(P\times I)$ is a surface as described in Example~\ref{exjoint} and assume the two winding numbers $w_1$ and $w_2$ are large enough. Let $\Sigma=\gamma\times J$ be an HTH band constructed in the examples above. Suppose the arcs $\partial\gamma\times J$ bound a pair of parallel embedded monogons $E_1$ and $E_2$ in $M-P\times (\epsilon,1]$, where $\epsilon\in I$ is a small number such that $\partial\gamma\times J\subset P\times[0,\epsilon]$. As in Definition~\ref{Dmonogon}, let $\Delta=E_1\cup\Sigma\cup E_2$ be an embedded pinched disk with $\partial\Delta\subset S_{n_k}$. Then $\partial\Delta$ is essential in $S_{n_k}$.
\end{lemma}
\begin{proof}
As in Example~\ref{HTH}, we can glue a small rectangle $\delta$ to $\Sigma$ and form an embedded annulus $A_\Sigma$ with $\partial A_\Sigma\subset S_{n_k}$. By Lemma~\ref{Lcor}, $\partial A_\Sigma$ is a pair of essential curves in $S_{n_k}$. Note that, since $E_1$ and $E_2$ may not be innermost monogons, $\Delta\cap S_{n_k}$ may contain other circles.
Since $E_1$ and $E_2$ are parallel monogons, there is a thin rectangle $R\subset S_{n_k}$ between $\partial E_1$ and $\partial E_2$, and $E_1\cup E_2\cup\delta\cup R$ is an embedded 2--sphere in $M$, see the shaded region in Figure~\ref{monogon} (b) for a picture of $R$.
Since the two winding numbers $w_1$ and $w_2$ in Example~\ref{exjoint} are large, we may assume the number $\epsilon$ in the lemma is very small. Hence, as in Example~\ref{exmulti}, we can find another disjoint HTH band $\Sigma'=\gamma'\times J$ and construct an annulus $A_{\Sigma'}$ by gluing a small rectangle $\delta'$ to $\Sigma'$. By Lemma~\ref{Lcor}, $\partial A_{\Sigma'}$ is also essential in $S_{n_k}$. Moreover, we can choose $\Sigma'$ so that $\partial\gamma'\times J$ bounds a pair of monogons $E_1'$ and $E_2'$ that are parallel to $E_1$ and $E_2$. Similarly, $\Delta'=E_1'\cup\Sigma'\cup E_2'$ is also an embedded disk with $\partial\Delta'\subset S_{n_k}$ and $\Delta\cap\Delta'=\emptyset$.
Similar to $R$, there is also a thin rectangle $R'\subset S_{n_k}$ between $\partial E_1'$ and $\partial E_2'$. Moreover, by our construction in Example~\ref{exmulti}, $R\cap R'=\emptyset$. Since the 4 monogons $E_1$, $E_2$, $E_1'$, $E_2'$ are parallel to each other, as described in Definition~\ref{Dmonogon}, there is a short arc $\eta\subset S_{n_k}$ outside $P\times I$ connecting $R$ to $R'$, as shown in Figure~\ref{monogon} (b), where the two shaded regions are $R$ and $R'$.
Now, suppose $\partial\Delta$ is a trivial curve in $S_{n_k}$, and we use $D$ to denote the disk in $S_{n_k}$ bounded by $\partial\Delta$. By Remark~\ref{Rrec}, $R\cap\partial\Delta$ is a pair of opposite edges of $R$, and the union of the other pair of opposite edges of $R$ and $\partial\Delta-R$ is $\partial A_\Sigma$. Since $\partial A_{\Sigma}$ is essential in $S_{n_k}$, the rectangle $R$ must lie in $S_{n_k}-int(D)$. Hence the arc $\eta$ must lie in $D$. Since $R\cup\partial\Delta$ is disjoint from $R'\cup\partial\Delta'$, $R'\cup\partial\Delta'$ must lie in $D$. This implies $\partial A_{\Sigma'}$ lies in $D$ and hence is trivial in $S_{n_k}$, contradicting our assumptions.
\end{proof}
Therefore, after some splittings and taking a sub-sequence of $\{S_n\}$, we have the following. For each HTH band $\Sigma$, by Lemma~\ref{Lcor}, the boundary of the annulus $A_\Sigma$ constructed above is a pair of essential curves in $S_n$. Moreover, if the two ends of $\Sigma$ bound a pair of parallel monogons, by Lemma~\ref{Ldouble}, the boundary of the pinched disk $\Delta$ constructed above is also an essential curve in $S_n$.
\section{Proof of the main theorem}\label{Smain}
Suppose $M$ is a closed orientable irreducible and non-Haken 3--manifold and $M$ is not a Seifert fiber space. By Theorem~\ref{Heeg1}, $M$ has a finite collection of branched surfaces such that,
\begin{enumerate}
\item each branched surface in this collection is obtained by gluing together normal disks and at most one almost normal piece with respect to a fixed triangulation, similar to \cite{FO},
\item up to isotopy, every strongly irreducible Heegaard surface is fully carried by a branched surface in this collection.
\item no branched surface in this collection carries any normal 2--sphere or normal torus.
\end{enumerate}
The goal of this section is to prove Theorem~\ref{Tfinite}. It is clear that Theorem~\ref{Tfinite} and Theorem~\ref{Heeg1} imply the main theorem.
\begin{theorem}\label{Tfinite}
Suppose $M$ is a closed, orientable, irreducible and non-Haken 3--manifold. Let $B$ be a branched surface in Theorem~\ref{Heeg1}. Then $B$ carries only finitely many irreducible Heegaard surfaces, up to isotopy.
\end{theorem}
\begin{proof}
Each closed surface fully carried by $B$ corresponds to a positive integer solution to the branch equations. Since the projective lamination space $\mathcal{PL}(B)$ is compact, if $B$ fully carries an infinite number of distinct strongly irreducible Heegaard surfaces, then there is an accumulation point in the projective lamination space, which corresponds to a measured lamination $\mu$. We may consider $\mu$ as the limit of these Heegaard surfaces, see section~\ref{Slimit}. Our goal is to show that $\mu$ is also an essential lamination. Then by Proposition~\ref{PHaken}, $M$ is Haken, which contradicts our hypothesis.
Because of Theorem~\ref{TMS}, we divide the proof into two parts. Part A is the case that $\mu$ is an exceptional minimal lamination and part B is the case that $\mu$ is a closed surface. The proofs for the two cases are slightly different.
\noindent\underline{\textbf{Part A}}. $\mu$ is an exceptional minimal measured lamination.
The main task is to prove the following lemma.
\begin{lemma}\label{Lincomp}
$\mu$ is incompressible in $M$.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{Lincomp}]
Suppose $\{S_n\}$ is an infinite sequence of strongly irreducible Heegaard surfaces fully carried by $B$ and $\mu$ is the limit point of $\{S_n\}$ in $\mathcal{PL}(B)$. The lamination $\mu$ is carried by $B$, but it may not be fully carried by $B$. Let $B^-$ be the sub-branched surface of $B$ that fully carries $\mu$. By Corollary~\ref{Cnormal}, $\mu$ must be a normal lamination. Hence $B^-$ does not contain the almost normal piece and every surface carried by $B^-$ is normal. By our hypotheses, $B^-$ does not carry any 2--sphere or torus.
We may assume $N(B^-)\subset N(B)$ with the induced $I$--fiber structure. By Proposition~\ref{Psplit}, we can arbitrarily split $B^-$ along $\mu$ and then split $B$ accordingly so that the resulting branched surface still carries an infinite sub-sequence of $\{S_n\}$. Therefore, by Proposition~\ref{Psplit} and Lemma~\ref{Lfine}, after splitting $B$ and $B^-$ and taking an infinite sub-sequence of $\{S_n\}$, we may assume no component of $\partial_hN(B^-)$ is a disk and each leaf of $\mu$ is $\pi_1$--injective in $N(B^-)$.
After some isotopy, we may assume $\partial_hN(B^-)\subset\mu$. Suppose $\mu$ is compressible and let $D$ be a compressing disk. After some splittings on $B$ and $B^-$ as in Proposition~\ref{Psplit} and taking a sub-sequence of $\{S_n\}$, we may assume $\partial_hN(B^-)$ is compressible and $D$ is a compressing disk in $M-int(N(B^-))$.
So $\gamma_1=\partial D$ is an essential curve in a leaf $l$ of $\mu$. Since $\mu$ has no holonomy, there is a vertical annulus $V$ in $N(B^-)$ such that $V$ contains $\gamma_1$ and $\mu\cap V$ is a union of parallel circles. Thus, after some splittings on $B^-$, we may assume $\pi(\gamma_1)$ is a simple closed curve in $B^--L^-$ and $V=\pi^{-1}(\pi(\gamma_1))$, where $\pi: N(B^-)\to B^-$ is the collapsing map and $L^-$ is the branch locus of $B^-$. By Proposition~\ref{Psplit} and Remark~\ref{Rsss}, we may split $B$ accordingly and assume $B$ still carries an infinite sequence of Heegaard surfaces $\{S_n\}$ whose limit lamination is $\mu$ and $B^-$ is the sub-branched surface of $B$ that fully carries $\mu$.
By Lemma~\ref{Lessential}, after some splittings and taking a sub-sequence of $\{S_n\}$, we may assume that $S_n\cap V$ does not contain any circle that is trivial in $S_n$, for each $n$. Since $\gamma_1$ bounds an embedded disk in $M$, by Lemma~\ref{Ls22}, if $S_n\cap V$ consists of circles, then each circle bounds a compressing disk in one of the two handlebodies. However, if $S_n\cap V$ consists of circles, by Corollary~\ref{Clinear} and Example~\ref{exspiral}, the number of circles in $S_n\cap V$ tends to infinity as $n$ tends to infinity. This gives a contradiction to Lemma~\ref{Lbound}. Therefore, $S_n\cap V$ cannot be a union of circles if $n$ is large enough. So we may assume $S_n\cap V$ consists of spirals.
Since every leaf is dense, $l\cap V$ contains an infinite number of circles. Since $B^-$ does not carry any torus, by Proposition~\ref{Ptorus} and our assumptions on $N(B^-)$ above, there must be a circle $\gamma_2\subset l\cap V$ such that $\gamma_2$ is non-trivial and not homotopic to $\gamma_1$ in $l$. Let $\gamma_i\times I\subset V$ ($i=1,2$) be a pair of disjoint thin vertical annuli such that $\gamma_i\subset\gamma_i\times I$ and $\mu\cap(\gamma_i\times I)$ is a union of parallel circles. Let $\alpha\subset l$ be a simple arc connecting $\gamma_1$ to $\gamma_2$ and $\Gamma=\gamma_1\cup\alpha\cup\gamma_2$ be a 1--complex in $l$. By choosing $\gamma_i\times I$ to be thin enough, we may assume $\gamma_1\times I$ and $\gamma_2\times I$ are connected by a rectangle $\alpha\times I$, forming an embedded 2--complex $\Gamma\times I$ with each $\{x\}\times I$ ($x\in\Gamma$) a subarc of an $I$--fiber of $N(B^-)$. By our construction, $\mu\cap(\Gamma\times I)$ is a union of 1--complexes parallel to $\Gamma$.
Let $A_i\subset l$ ($i=1,2$) be a small annular neighborhood of $\gamma_i$ in $l$, $Q$ be a small neighborhood of $\alpha$ in $l$, and $P=A_1\cup Q\cup A_2$ be a small neighborhood of $\Gamma$ in $l$. We can extend $\Gamma\times I$ to a product $P\times I\subset N(B^-)$. So $\mu\cap(P\times I)$ is a union of compact surfaces parallel to $P$. Moreover, since $\gamma_1$ and $\gamma_2$ are not homotopic in $l$, $P$ is an essential sub-surface of $l$.
Since every leaf is dense in $\mu$, after some splittings along $\mu$, we may assume $\pi(P)\subset B^--L^-$ and $P\times I=\pi^{-1}(\pi(P\times I))$, where $\pi: N(B^-)\to B^-$ is the collapsing map and $L^-$ is the branch locus of $B^-$. By Proposition~\ref{Psplit}, we may split $B$ accordingly and assume $B$ still carries an infinite sequence of Heegaard surfaces $\{S_n\}$ whose limit lamination is $\mu$ and $B^-$ is the sub-branched surface of $B$ that fully carries $\mu$.
By the construction above, we may consider $\mu\cap(P\times I)$ as the limit lamination of the sequence $\{S_n\cap(P\times I)\}$. Since $S_n\cap V$ consists of spirals and $\gamma_i\times I\subset V$, after some splittings, we may assume $S_n\cap(A_i\times I)$ is a union of spiraling disks and $S_n\cap(P\times I)$ is as described in Example~\ref{exjoint}. We use the same notations as section~\ref{Shelix}, and in particular, let $h_i$ be the number of components of $S_n\cap(A_i\times I)$. Since $\gamma_1\times I$ and $\gamma_2\times I$ are disjoint sub-annuli of $V$ before the splitting, we may assume the spirals in $S_n\cap V$ wind around both $\gamma_i\times I$ many times. So the spirals in $S_n\cap(\gamma_i\times I)$ are part of longer spirals in $S_n\cap V$. Hence, the discrepancies (see section~\ref{Shelix} for the definition of discrepancy) of the spirals in $S_n\cap(\gamma_i\times I)$ ($i=1,2$) are the same. Therefore, we have $h_1=h_2$.
By Lemma~\ref{Lcor}, after some splittings and taking a sub-sequence of $\{S_n\}$, we may assume that no $S_n$ contains a disk $E$ with the property that $\pi(\partial E)=\pi(\gamma)$ for any curve $\gamma\in\mathcal{C}_P$, where $\mathcal{C}_P$ is as in Lemma~\ref{Lcor}.
Recall that $\gamma_1$ bounds a compressing disk $D$ in $M-int(N(B^-))$. Let $\hat{D}=D\cup(\gamma_1\times I)$, where $\gamma_1\times I\subset P\times I$. By our construction above, $\hat{D}$ is an embedded disk in $M$. As $S_n$ is a compact surface, $S_n\cap \hat{D}$ must produce a monogon with a long ``tail" spiraling around $\gamma_1\times I$, as shown in Figure~\ref{yinyang}~(a). In fact, there are at least 2 monogons as the $\theta_1$ and $\theta_2$ in Figure~\ref{yinyang}~(a). Note that $\hat{D}\cap S_n$ may contain circles.
\begin{figure}\label{yinyang}
\end{figure}
We will first consider the case that $\hat{D}\cap S_n$ does not contain any circle.
\noindent\underline{\textbf{Case 1}}. $\hat{D}\cap S_n$ does not contain any circle.
Since $S_n$ is a separating surface, we may assume each $h_i$ (i.e. the number of components in $S_n\cap(A_i\times I)$) is an even number. Since $h_1=h_2$, we have the following 2 subcases.
\noindent\underline{\textbf{Subcase 1a}}. $h_1=2$.
In this case, $S_n\cap\hat{D}$ is basically a single curve with both ends wrapping around $\gamma_1\times I$, as shown in Figure~\ref{yinyang}(a). So we have two innermost monogons $\theta_1$ and $\theta_2$ in different handlebodies. After a small perturbation in a small neighborhood of $\hat{D}$, we may assume $\partial\theta_1$ and $\partial\theta_2$ are disjoint in $S_n$. As in Examples~\ref{exmulti}, we can find two disjoint HTH bands $\Sigma_1$ and $\Sigma_2$, such that $\Sigma_i$ ($i=1,2$) connects two parallel copies of $\theta_i$ forming a pinched disk $\Delta_i$. By Lemma~\ref{Ldouble} and our construction, $\Delta_1$ and $\Delta_2$ are compressing disks in different handlebodies and $\partial\Delta_1\cap\partial\Delta_2=\emptyset$, which contradicts the assumption that $S_n$ is a strongly irreducible Heegaard surface.
\noindent\underline{\textbf{Subcase 1b}}. $h_1\ge 4$.
Since $h_1\ge 4$, $\hat{D}\cap S_n$ contains at least two curves. Note that each curve of $\hat{D}\cap S_n$ cuts $\hat{D}$ into 2 monogons, as the $\theta_1$ and $\theta_2$ in Figure~\ref{yinyang}~(a). Thus, we can find a monogon $E$ which is not innermost, but each monogon in the interior of $E$ is innermost, as shown in Figure~\ref{yinyang}(b). Let $h_E$ be the thickness of the tail of $E$ and clearly $h_E\le h_1$.
Let $E_1$ and $E_2$ be two parallel copies of $E$. Since $h_E\le h_1$, by Example~\ref{HTH}, we can connect the tails of $E_1$ and $E_2$ using an HTH band $\hat{\Sigma}$ (in $P\times I$) with thickness $h_E$. We denote $E_1\cup\hat{\Sigma}\cup E_2$ by $\Delta$. So $\Delta$ is an embedded disk with $\partial\Delta\subset S_n$. Let $c_1,\dots,c_m$ be the components of $S_n\cap int(\Delta)$, and let $\Delta_i$ be the disk in $\Delta$ bounded by $c_i$. Similar to $\Delta$, each $\Delta_i$ is the union of a sub-band of $\hat{\Sigma}$ and two parallel copies of a sub-monogon of $E$. Since we have assumed each sub-monogon in $int(E)$ is innermost, $\Delta_i\cap S_n=\partial\Delta_i$. By Lemma~\ref{Ldouble} and our assumptions above, $\partial\Delta$ and each $\partial\Delta_i$ are essential curves in $S_n$. So each $\Delta_i$ is a compressing disk in a handlebody, say $H_1$. By Lemma~\ref{Ls22}, $\partial\Delta$ must bound a disk in a handlebody. Since the Heegaard splitting is strongly irreducible, $\partial\Delta$ must bound a compressing disk in $H_1$ as well. So $P_\Delta=\Delta-\cup_{i=1}^mint(\Delta_i)$ is a planar surface properly embedded in $H_2$. If $P_\Delta$ is compressible in $H_2$, then we can compress $P_\Delta$ into a collection of disjoint incompressible planar surfaces $P_1,\dots, P_s$. By Corollary~\ref{Csch}, each $P_i$ is $\partial$--parallel in $H_2$. Let $Q_i$ be the sub-surface of $S_n$ that is parallel to $P_i$ in $H_2$ ($\partial P_i=\partial Q_i$). Since the $P_i$'s are disjoint, any two surfaces $Q_i$ and $Q_j$ are either disjoint or nested in $S_n$.
By Example~\ref{exmulti}, we can construct another HTH band $\hat{\Sigma}'$ with thickness $h_E$, connecting two monogons $E_1'$ and $E_2'$, where $E_1'$ and $E_2'$ are also two parallel copies of $E$. We use $\Delta'$ to denote the disk $E_1'\cup\hat{\Sigma}'\cup E_2'$. By our construction $\Delta\cap\Delta'=\emptyset$. Similar to $\Delta$, $int(\Delta')\cap S_n$ is a union of circles $c_1',\dots,c_m'$ and the sub-disk of $\Delta'$ bounded by $c_i'$, denoted by $\Delta_i'$, is a compressing disk for the handlebody $H_1$. Similarly, we can compress the planar surface $\Delta'-\cup_{i=1}^mint(\Delta_i')$ into a collection of incompressible planar surfaces $P_1',\dots,P_t'$. Since $\Delta\cap\Delta'=\emptyset$, we may assume these $P_i$'s and $P_j'$'s are all disjoint in $H_2$. So each $P_i'$ is also $\partial$--parallel in $H_2$ and we use $Q_i'$ to denote the sub-surface of $S_n$ that is parallel to $P_i'$ ($\partial P_i'=\partial Q_i'$). Since these planar surfaces $P_i$'s and $P_j'$'s are disjoint and $\partial$--parallel, any two surfaces $Q_i$ and $Q_j'$ are either disjoint or nested in $S_n$.
To unify notations, we also denote $\Delta$, $\partial\Delta$, $\Delta'$, $\partial\Delta'$ by $\Delta_0$, $c_0$, $\Delta_0'$, $c_0'$ respectively.
As in Definition~\ref{Dmonogon} and Remark~\ref{Rrec}, for each $c_i=\partial\Delta_i$ (resp. $c_i'$), $0\le i\le m$, there is a rectangle $R_i=\alpha_i\times\beta_i$ (resp. $R_i'=\alpha_i'\times\beta_i'$) in $S_n$, see the shaded region in Figure~\ref{monogon}(b), such that $R_i\cap c_i$ (resp. $R_i'\cap c_i'$) is a pair of opposite edges $\alpha_i\times\partial\beta_i$ (resp. $\alpha_i\times\partial\beta_i$). Moreover, these $R_i$ and $R_j'$ are pairwise disjoint. By our construction in section~\ref{Shelix}, $(c_i-\alpha_i\times\partial\beta_i)\cup(\partial\alpha_i\times\beta_i)$ (resp. $(c_i'-\alpha_i'\times\partial\beta_i')\cup(\partial\alpha_i'\times\beta_i')$) is the boundary of an embedded vertical annulus $A_{\Sigma_i}$ (resp. $A_{\Sigma_i'}$) in $P\times I$, and by our assumptions and Lemma~\ref{Lcor}, $\partial A_{\Sigma_i}$ (resp. $\partial A_{\Sigma_i'}$) is a pair of essential curves in $S_n$.
Let $W_i$ (resp. $W_i'$) be the closure of a small neighborhood of $c_i\cup R_i$ (resp. $c_i'\cup R_i'$) in $S_n$. So two boundary circles of $W_i$ (resp. $W_i'$) are parallel to the two components of $\partial A_{\Sigma_i}$ (resp. $\partial A_{\Sigma_i'}$) above, and the other boundary component of $W_i$ (resp. $W_i'$) is parallel to $c_i$ (resp. $c_i'$). By our assumptions above, each boundary circle of $W_i$ (resp. $W_i'$) is an essential curve in $S_n$. Moreover, there is an $\eta$--arc (see Definition~\ref{Dmonogon}) $\eta_i\subset S_n$ connecting $R_i$ to $R_i'$, as shown in Figure~\ref{monogon} (b).
Let $Q_i$ be a planar surface above, and suppose $c_0,\dots,c_q$ are the boundary components of $Q_i$. Next, we will show that at least one $R_j$ ($0\le j\le q$) lies in $S_n-int(Q_i)$. Otherwise, suppose $R_j\subset Q_i$ for every $j$. Then for each $j$, $\partial\alpha_j\times\beta_j$ is a pair of arcs properly embedded in $Q_i$. Since $Q_i$ is a planar surface and since there is a rectangle $R_j$ attached to each $c_j$, by an innermost-surface argument, it is easy to see that, for some $j$, $\partial\alpha_j\times\beta_j$ is a pair of $\partial$--parallel arcs in $Q_i$. This implies that a boundary component of $W_j$ bounds a disk in $Q_i$ and hence is trivial in $S_n$, contradicting our assumptions. This argument also holds for each $Q_i'$. Therefore, for each $Q_i$ (resp. $Q_i'$), there is always such a rectangle $R_j$ (resp. $R_k'$), lying outside $int(Q_i)$ (resp. $int(Q_i')$) and with two opposite edges in $\partial Q_i$ (resp. $\partial Q_i'$).
Let $Q_i$ be any planar surface above. Suppose $c_k$ is a boundary circle of $Q_i$ and suppose $R_k=\alpha_k\times\beta_k$ is a rectangle outside $int(Q_i)$. So $R_k\cap Q_i=R_k\cap c_k=\alpha_k\times\partial\beta_k$. By our construction before, there is an arc $\eta_k$ connecting $R_k$ to $R_k'$, and $int(\eta_k)$ is disjoint from any $c_j$ or $c_j'$. Moreover, the two endpoints of $\eta_k$ lie in $\alpha_k\times\partial\beta_k\subset c_k$ and $\alpha_k'\times\partial\beta_k'\subset c_k'$. Suppose $c_k'$ is a boundary component of $Q_j'$. Since $R_k$ lies outside $int(Q_i)$, $\eta_k$ must lie in $Q_i$. Hence $c_k'\subset Q_i$. Since the planar surfaces $Q_i$ and $Q_j'$ are either disjoint or nested, $c_k'\subset Q_i$ implies that $Q_j'\subset Q_i$. This means that for each $Q_i$, there is some $Q_j'$ such that $Q_j'\subset Q_i$.
However, we can apply the same argument to $Q_i'$ and conclude that, for each $Q_i'$, there is some $Q_k$ such that $Q_k\subset Q_i'$. This is impossible because there is always an innermost planar surface among these $Q_i$'s and $Q_j'$'s.
\noindent\underline{\textbf{Case 2}}. $\hat{D}\cap S_n$ contains circles.
Similar to Case 1, each non-circular curve cuts $\hat{D}$ into a pair of monogons, though there may be circles in the monogons. We say a monogon $E$ is innermost, if $E$ does not contain other monogon, but $E$ may contain circles of $\hat{D}\cap S_n$. We first consider innermost monogons. Let $E$ be an innermost monogon and $c_1,\dots, c_K$ the outermost circles of $E\cap S_n$. Since the sequence of surfaces $\{S_n\}$ are carried by $B$, by assuming $\hat{D}$ to be transverse to $B$, it is easy to see that $K$, the number of such outermost circles in $E$, is bounded from above by a number independent of $S_n$. Since we assume $n$ is large, the winding number $w_i$ of the spiraling disks in $A_i\times I$ is large. So, by Example~\ref{exmulti}, we can find a large number of disjoint HTH bands $\Sigma_1,\dots,\Sigma_N$. Moreover, we can take $2N$ parallel copies of $E$, denoted by $E_1, E_1',\dots, E_N, E_N'$, so that the disks $\Delta_i=E_i\cup\Sigma_i\cup E_i'$ are disjoint and embedded in $M$. By Lemma~\ref{Ldouble}, we may assume each $\partial\Delta_i$ is an essential curve in $S_n$. Since $K$ is bounded by a number independent of $S_n$, we may assume $N$ is much larger than $K$, and this is a key point in the proof.
Between each pair $E_i$ and $E_i'$, there is a rectangle $R_i\subset S_n$ with two opposite edges in $\partial E_i$ and $\partial E_i'$, see the shaded region in Figure~\ref{monogon} (b). By the construction in section~\ref{Shelix}, we may assume there is an $\eta$--arc (see Definition~\ref{Dmonogon}) $\eta_i$ connecting $R_i$ to $R_{i+1}$ for each $i=1,\dots,N-1$, as shown in Figure~\ref{monogon} (c). The interior of each $\eta_i$ is disjoint from these disks $\Delta_j$'s.
If $c_i$ ($i=1,\dots, K$) is a trivial curve in $S_n$, since $M$ is irreducible, we can perform some isotopy on $E$ (fixing $\partial E$) and get a monogon disk with fewer outermost circles in $E\cap S_n$. So we may assume each $c_i$ is essential in $S_n$. Let $d_i$ be the disk in the monogon $E$ bounded by $c_i$ ($i=1,\dots,K$), and suppose $E-\cup_{i=1}^Kd_i$ lies in $H_1$. By Lemma~\ref{Ls22}, each circle $c_i$ bounds a compressing disk in a handlebody. If some $c_i$ bounds a disk in $H_1$, then we can replace $d_i$ by a disk in $H_1$ and obtain a disk with the same boundary $\partial\Delta_i$ but fewer outermost circles. If we can eliminate all the outermost circles $c_i$'s in this fashion, then we can conclude that each $\partial\Delta_i$ bounds a compressing disk in $H_1$. Suppose we cannot eliminate these circles $c_i$ ($i=1,\dots,K$) via these isotopies and surgeries. Then by the arguments above, we may assume each $c_i$ bounds a compressing disk in $H_2$.
The arguments next involve compression bodies and strongly irreducible Heegaard splittings for manifold with boundary. We refer to \cite{CG} for definitions and fundamental results.
Let $W$ be the 3--manifold obtained by adding $K$ 2--handles to $H_1$ along these $c_i$'s, and let $\hat{W}$ be the manifold obtained by capping off the 2--sphere components of $\partial W$ by 3--balls. Since each $\Delta_i$ is constructed using parallel copies of $E$, after some isotopies, we may assume each $\Delta_i$ is a properly embedded disk in $W$. Note that after pushing $S_n$ into $int(\hat{W})$, $S_n$ becomes a Heegaard surface for $\hat{W}$, bounding the handlebody $H_1$ on one side and a compression body $W_2$ on the other side. Since each $c_i$ bounds a compressing disk in $H_2$ and $M=H_1\cup_{S_n} H_2$ is strongly irreducible, by \cite{CG}, the Heegaard splitting $\hat{W}=H_1\cup_{S_n} W_2$ is also strongly reducible. By our assumption on $E$ above, at least one 2--handle is added to $H_1$ and hence $W_2$ is not a trivial compression body. Thus, by a theorem of Casson and Gordon (Theorem 2.1 of \cite{CG}), $\hat{W}$ is irreducible, and if $\partial\hat{W}\ne\emptyset$, $\partial\hat{W}$ is incompressible in $\hat{W}$. Therefore, each $\partial\Delta_i$ bounds a disk $D_i$ in $\partial W$ ($i=1,\dots,N$).
Since $W$ is obtained by attaching $K$ 2--handles to $H_1$, there are $2K$ disjoint disks $m_1,\dots m_{2K}$ in $\partial W-S_n$ parallel to the cores of these 2--handles. Note that one can obtain the handlebody $H_2$ by attaching 1--handles to $M-int(W)$ along these disks $m_i$'s. Since each $\partial\Delta_i=\partial D_i$ is essential in $S_n$, each disk $D_i\subset\partial W$ ($i=1,\dots,N$) must contain some $m_j$ ($1\le j\le 2K$). Recall that $K$ is bounded by a number independent of $S_n$ and we have assumed that $N$ is very large compared with $K$. Since each $D_i$ contains some $m_j$, for any integer $p$, if $N$ is large enough, there exist a sequence of $p$ nested disks $D_{a_1}\subset D_{a_2}\subset\cdots\subset D_{a_p}$ ($0\le a_i\le N$). Note that if $p>2K$, at least one annulus $D_{a_{i+1}}-D_{a_i}$ does not contain any disk $m_j$. So, by assuming $N$ is large enough, one can find 3 nested disks, say $D_1\subset D_2\subset D_3$, such that the two annuli $D_3-int(D_2)$ and $D_2-int(D_1)$ do not contain any disk $m_i$.
Recall that $\partial D_2=\partial\Delta_2$ and there is a rectangle $R_2\subset S_n$ with two opposite edges attached to $\partial\Delta_2$. By the construction of $W$, we also have $R_2\subset\partial W$. Moreover, $int(R_2)$ is disjoint from the circles $\partial\Delta_j$'s. So $R_2$ lies in one of the two annuli, $D_3-int(D_2)$ or $D_2-int(D_1)$. Let $W(\partial\Delta_2\cup R_2)$ be the closure of a small neighborhood of $\partial\Delta_2\cup R_2$ in $S_n$. By our assumptions before, each boundary circle of $W(\partial\Delta_2\cup R_2)$ is essential in $S_n$. Since the two annuli $D_3-int(D_2)$ and $D_2-int(D_1)$ do not contain any disk $m_i$, one boundary circle of $W(\partial\Delta_2\cup R_2)$ must be a trivial circle in both $\partial W$ and $S_n$, which contradicts our constructions and assumptions on the $R_i$'s before.
The arguments above show that, for any innermost monogon $E$ and pinched disk $\Delta_i$ above, after some isotopies and surgeries, we can eliminate these outermost circles $c_i$'s so that $\Delta_i$ becomes a compressing disk in $H_1$, where $E-\cup_{i=1}^Kd_i\subset H_1$ as above. Now, similar to Case 1, we have two subcases.
\noindent\underline{\textbf{Subcase 2a}}. $h_1=2$.
In this subcase, $\hat{D}\cap S_n$ contains exactly one non-circular curve and this curve cuts $\hat{D}$ into a pair of innermost monogons. So, by the arguments above on innermost monogons, we can eliminate the outermost circles in $\hat{D}\cap S_n$, and construct two disjoint compressing disks in the two handlebodies as in Subcase 1a.
\noindent\underline{\textbf{Subcase 2b}}. $h_1\ge 4$.
The proof for this subcase is a combination of the proof of Subcase 1b and the arguments on innermost monogons above. Similar to Subcase 1b, we can find a monogon $E$ which is not innermost, but each monogon in the interior of $E$ is innermost. As in Subcase 1b, by connecting two parallel copies of $E$ and a thick HTH band $\Sigma$, we get a pinched disk $\Delta$ (see Definition~\ref{Dmonogon}), with $\partial\Delta\subset S_n$. Let $\epsilon_1,\dots,\epsilon_k$ be the monogons in $int(E)$. Then the corresponding parallel copies of $\epsilon_i$ and a sub-band of $\Sigma$ form a pinched disk $\Delta_i\subset\Delta$ ($i=1,\dots,k$). By Lemma~\ref{Ldouble} and our assumptions before, $\partial\Delta$ and each $\partial\Delta_i$ are essential in $S_n$. By the arguments on innermost monogons, after some isotopies and surgeries, we may assume $S_n\cap int(\Delta_i)=\emptyset$ and each $\Delta_i$ is a compressing disk in a handlebody. Since $S_n$ is strongly irreducible, these $\Delta_i$'s are compressing disks in the same handlebody, say $H_2$.
Let $c_1,\dots, c_{K}$ be the outermost circles in $E\cap S_n$. As before, $K$ is bounded by a number independent of $S_n$. By our assumption on innermost monogons, these $c_i$'s lie in $E-\cup_{i=1}^k\epsilon_i$. By the construction of the pinched disks, $S_n\cap(\Delta-\cup_{i=1}^k\Delta_i)$ has $2K$ outermost circles $c_1,\dots, c_K$ and $c_1',\dots, c_K'$, where each $c_i'$ is parallel to $c_i$ in $S_n$. As before, we may assume each $c_i$ is an essential curve in $S_n$. Let $d_i$ (resp. $d_i'$) be the disk in $\Delta$ bounded by $c_i$ (resp. $c_i'$). We use $P_\Delta$ to denote the closure of $\Delta-\cup_{i=1}^k\Delta_i-\cup_{i=1}^Kd_i-\cup_{i=1}^Kd_i'$. So $P_\Delta$ is a properly embedded planar surface in the handlebody $H_1$, and by our previous assumptions, each component of $\partial P_\Delta$ is essential in $S_n$. By Lemma~\ref{Ls22}, each circle in $\partial P_\Delta$ bounds a compressing disk in a handlebody. Since each $\partial\Delta_i$ bounds a disk in $H_2$ and the Heegaard surface $S_n$ is strongly irreducible, each component of $\partial P_\Delta$ bounds a compressing disk in $H_2$. By Corollary~\ref{Csch}, if $P_\Delta$ is incompressible in $H_1$, then $P_\Delta$ is $\partial$--parallel in $H_1$.
Similar to the arguments for the innermost monogons, we can take $2N$ parallel copies of $E$ and use $N$ disjoint HTH bands to construct $N$ pinched disks, $\tilde{\Delta}_1, \dots, \tilde{\Delta}_N$. Since these pinched disks are constructed using parallel copies of the same monogon $E$, we may apply the arguments for $\Delta$ and $P_\Delta$ above to each of the $N$ pinched disks $\tilde{\Delta}_1, \dots, \tilde{\Delta}_N$. Let $P_1,\dots,P_N$ be the planar sub-surfaces of these $N$ pinched disks constructed in the same way as the $P_\Delta$ above. In particular, each $P_i$ is properly embedded in $H_1$ and each circle in $\partial P_i$ bounds a compressing disk in $H_2$. Each boundary circle of $P_i$ is either the boundary of a pinched disk or a circle parallel to some $c_j$ in $S_n$. To simplify notation, we assume each $P_i$ is incompressible. The proof for the compressible case is the same after we compress the $P_i$'s into incompressible pieces, as in Subcase 1b. So, by Corollary~\ref{Csch}, each $P_i$ is $\partial$--parallel in $H_1$.
Let $W$ be the 3--manifold obtained by adding $K$ 2--handles to $H_1$ along these $c_1,\dots, c_K$. Since the $N$ pinched disks are constructed using parallel copies of the same monogon $E$, each $P_i$ can be extended to a properly embedded planar surface $\hat{P}_i$ in $W$. $\hat{P}_i$ can be considered as the planar surface obtained by capping off the $c_i$'s and $c_i'$'s by disks. So, by our assumption on $\partial P_i$, each boundary circle of $\hat{P}_i$ is the boundary of a pinched disk which is either some $\tilde{\Delta}_j$ or a pinched disk in $int(\tilde{\Delta}_j)$ formed by innermost monogons.
By the construction in section~\ref{Shelix}, there is a rectangle in $S_n$ with two opposite edges glued to the boundary of each pinched disk, as shown in the shaded regions in Figure~\ref{monogon} (b). Since the $c_i$'s are circles in $E$, these rectangles are in $\partial W$. Hence there is such a rectangle in $\partial W$ attached to each boundary circle of $\hat{P}_i$. Moreover, for any two adjacent pinched disks, there is also a short $\eta$--arc connecting the two rectangles, as shown in Figure~\ref{monogon} (b). Similar to the argument on innermost monogons, we may assume these $\tilde{\Delta}_i$'s have a natural order in the following sense: If $R_i$ is a rectangle attached to a circle in $\partial\hat{P}_i$ with $2\le i\le N-1$, then as shown in Figure~\ref{monogon} (c), there are two arcs $\eta_{i-1}$ and $\eta_i$ connecting $R_i$ to two rectangles $R_{i-1}$ and $R_{i+1}$, where $R_{i-1}$ (resp. $R_{i+1}$) is a rectangle attached to a circle in $\partial\hat{P}_{i-1}$ (resp. $\partial\hat{P}_{i+1}$). Therefore, we may assume that, if $i\ne 1$ and $i\ne N$, there are two $\eta$--arcs for each component of $\partial\hat{P}_i$, connecting the attached rectangle to $\partial\hat{P}_{i-1}$ and $\partial\hat{P}_{i+1}$, as shown in Figure~\ref{monogon} (c), where $\hat{P}_{i-1}$ and $\hat{P}_{i+1}$ are different planar surfaces. The fact that $\hat{P}_{i-1}$ and $\hat{P}_{i+1}$ are different surfaces is important to our proof.
Since each $P_i$ is $\partial$--parallel in $H_1$, each $\hat{P}_i$ must be $\partial$--parallel in $W$. Let $Q_i\subset\partial W$ be the sub-surface of $\partial W$ that is parallel to $\hat{P}_{i}$ and with $\partial Q_i=\partial\hat{P}_{i}$. Since these $\hat{P}_i$'s are disjoint, any two planar surfaces $Q_i$ and $Q_j$ are either disjoint or nested.
Similar to the arguments on the innermost monogons, let $m_1,\dots,m_{2K}$ be the $2K$ disks in $\partial W-S_n$ parallel to the cores of the 2--handles added to $H_1$. We first suppose some $Q_k$ ($1\le k\le N$) does not contain any disk $m_i$. Since any planar surface inside $Q_k$ does not contain any disk $m_i$ either, we may assume $Q_k$ is innermost, Then by the arguments in Subcase 1b on the $Q_i$'s, there must be a rectangle $R$ attached to $\partial Q_k$ and lying in $S_n-int(Q_k)$. So the $\eta$--arc attached to $R$ must lie in $Q_k$ and hence $Q_k$ must contain another planar surface $Q_j$ ($j\ne k$), which contradicts the assumption that $Q_k$ is innermost. Thus, we may assume each $Q_k$ contains some disk $m_i$.
Since $K$ is bounded by a number independent of $S_n$, similar to the arguments on the innermost monogons above, if $N$ is large enough, we can find 3 nested planar surfaces, say $Q_{n_1}\subset Q_{n_2}\subset Q_{n_3}$, such that $Q_{n_3}-Q_{n_2}$ and $Q_{n_2}-Q_{n_1}$ do not contain any disk $m_i$. Moreover, if $N$ is large, we can find many such nested planar surfaces so that $n_2\ne 1$ and $n_2\ne N$. Since each $Q_k$ contains some disk $m_i$, $Q_{n_3}-Q_{n_2}$ and $Q_{n_2}-Q_{n_1}$ do not contain any other planar surface $Q_k$. Moreover, we can choose the $Q_{n_1}$, $Q_{n_2}$ and $Q_{n_3}$ so that there is no $Q_k$ satisfying $Q_{n_1}\subset Q_{k}\subset Q_{n_2}$ or $Q_{n_2}\subset Q_{k}\subset Q_{n_3}$.
Let $\alpha$ be a boundary circle of $Q_{n_2}$. Since $Q_{n_3}$ is a planar surface and $Q_{n_2}\subset Q_{n_3}$, $\alpha$ is separating in $Q_{n_3}$ and bounds a sub-surface $Q_\alpha$ in $Q_{n_3}$. We can choose $\alpha$ so that $Q_{n_2}\subset Q_\alpha$. Let $R$ be the rectangle attached to this boundary circle $\alpha$ of $Q_{n_2}$. By our assumption on $n_2$, there is a pair of $\eta$--arcs connecting the rectangle $R$ to two different planar surfaces. However, by our assumptions on $Q_{n_1}$, $Q_{n_2}$, $Q_{n_3}$ and $\alpha$, if $R\subset Q_{n_2}$, both $\eta$--arcs must connect $R$ to $\partial Q_{n_3}$; if $R\subset S_n-int(Q_{n_2})$, both $\eta$--arcs must connect $R$ to $\partial Q_{n_1}$, which contradicts previous assumption that the pair of $\eta$--arcs connect $R$ to different $\hat{P}_i$'s, see Figure~\ref{monogon} (c). This finishes the proof Lemma~\ref{Lincomp}.
\end{proof}
\begin{lemma}\label{Lendincomp}
$\mu$ is end-incompressible.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{Lendincomp}]
As before, by Proposition~\ref{Psplit}, we can split $B^-$ and $B$ so that $B^-$ has no disk of contact and fully carries $\mu$. We may also split $B^-$ so that the number of components of $M-B^-$ is the smallest among all the branched surfaces fully carrying $\mu$. After some isotopy, we may assume that $\partial_hN(B^-)\subset\mu$. Since $\mu$ is incompressible by Lemma~\ref{Lincomp}, $\partial_hN(B^-)$ is incompressible in $M-int(N(B^-))$. Suppose $\mu$ is not end-incompressible and let $E$ be a monogon in $M-int(N(B^-))$. Let $\hat{E}$ be the component of $M-int(N(B^-))$ containing $E$. By Proposition~\ref{Pmon}, $\hat{E}$ must be a solid torus of the form $E\times S^1$. Let $L$ be the leaf that contains the horizontal boundary component of $\hat{E}$. Since $|M-B^-|$ is the smallest, we cannot split $N(B^-)$ along $L$ connecting $\hat{E}$ to other components of $M-int(N(B^-))$.
We may assume $L$ is an orientable surface. We claim that $L$ must be an infinite annulus. If $L$ is not an infinite annulus, we can construct a compressing disk for $L$ by connecting two parallel copies of the monogon $E$ and a long vertical band, as shown in Figure~\ref{monogon} (a), similar to the construction of a pinched disk before. Thus $L$ is an infinite annulus. Since $B^-$ does not carry any 2--sphere or torus, this contradicts Lemma~\ref{Lleaf}.
\end{proof}
Since $B^-$ does not carry any 2--sphere, Lemmas~\ref{Lincomp} and \ref{Lendincomp} imply that $\mu$ is an essential lamination. By Proposition~\ref{PHaken}, $M$ is Haken, which contradicts the hypothesis. This finishes the proof for part A.
\noindent\underline{\textbf{Part B}}. $\mu$ consists of compact leaves.
The only difference between the proofs for Part A and Part B is the construction of $P\times I$. By Theorem~\ref{TMS}, we may assume $\mu$ is either a family of parallel orientable closed surfaces or a twisted family of parallel closed surfaces. In both cases, $\mu$ corresponds to a rational point in $\mathcal{PL}(B)$. For any non-orientable surface $S$ carried by $B$, the boundary of a twisted $I$--bundle over $S$ is an orientable closed surface carried by $B$ and corresponding to the same point in $\mathcal{PL}(B)$ as $S$. Thus, by using the boundary of a twisted $I$--bundle if necessary, we may assume $\mu$ consists of orientable closed surfaces. Let $B^-$ be the sub-branched surface of $B$ fully carrying $\mu$. By Proposition~\ref{Psplit}, after some splittings, we may assume $B^-$ is an orientable closed surface and $N(B^-)$ is a product of an interval and the closed surface $B^-$. Moreover, by Corollary~\ref{Cnormal} and our assumptions on $B$ before, $B^-$ is a normal surface in $M$ with genus at least 2.
We first prove that there must be a non-separating simple closed curve in $B^-$ that bounds an embedded disk $D$ in $M$ (note that $int(D)\cap B^-$ may not be empty). Since $M$ is non-Haken, $B^-$ is compressible and we can perform a compression on $B^-$ and get a new surface which must also be compressible. So we can successively perform compressions on the resulting surfaces until we get a collection of $2$--spheres. If the boundary circle of every compressing disk is separating, then after some compressions, we get an embedded torus. As every essential simple closed curve in a torus is non-separating, we get a non-separating simple closed curve $\gamma$ in $B^-$ such that $\gamma$ bounds an embedded disk $D$ in $M$. Moreover, we may assume that $D$ is transverse to $B^-$ and every component of $int(D)\cap B^-$ is a separating curve in $B^-$.
Let $\gamma_1$ and $\gamma_2$ be two parallel copies of $\gamma$ in $B^-$. Each $\gamma_i$ bounds a disk $D_i$ in $M$ ($i=1,2$), and each $D_i$ is parallel to $D$. We may assume $D_1\cap D_2=\emptyset$. Since $\gamma$ is non-separating, there is an arc $\alpha\subset B^-$ connecting $\gamma_1$ to $\gamma_2$, forming a graph $\Gamma=\gamma_1\cup\alpha\cup\gamma_2$, such that $B^--\Gamma$ contains no disk component. Moreover, since every component of $int(D)\cap B^-$ is a separating curve in $B^-$, we may choose $\alpha$ so that $\alpha\cap int(D_i)=\emptyset$. Let $A_i$ ($i=1,2$) be an annular neighborhood of $\gamma_i$ in $B^-$ and $Q$ a small neighborhood of $\alpha$ in $B^-$. Then $P=A_1\cup Q\cup A_2$ is a sub-surface of $B^-$ and no boundary circle of $P$ bounds a disk in $B^-$. Let $P\times I=\pi^{-1}(P)$ and $A_i\times I=\pi^{-1}(A_i)$ ($i=1,2$), where $\pi: N(B^-)\to B^-$ is the projection. We may consider $P$ as the limit of $\{S_n\cap(P\times I)\}$ in the corresponding projective lamination space. We will use this $P\times I$ to construct our HTH bands, as in section~\ref{Shelix}.
As before, we may assume the sequence of surfaces $\{S_n\}$ satisfy the hypotheses of Lemma~\ref{Lcor}. By Lemma~\ref{Lessential}, we may assume $S_n\cap (\gamma_i\times I)$ ($i=1,2)$ does not contain any circle that is trivial in $S_n$, for each $n$. If $S_n\cap(\gamma_i\times I)$ consists of circles, then each circle is essential in $S_n$ and hence bounds a compressing disk in one of the two handlebodies by Lemma~\ref{Ls22}. However, if $S_n\cap(\gamma_i\times I)$ consists of circles, by Corollary~\ref{Clinear} and Example~\ref{exspiral}, the number of circles in $S_n\cap(\gamma_i\times I)$ tends to infinity as $n$ goes to infinity. This gives a contradiction to Lemma~\ref{Lbound}. So $S_n\cap(\gamma_i\times I)$ cannot be a union of circles if $n$ is large enough. Hence we may assume $S_n\cap(\gamma_i\times I)$ consists of spirals for each $n$.
Therefore, after splitting $B$, we may assume $S_n\cap(A_i\times I)$ ($i=1,2)$ consists of spiraling disks and $S_n\cap (P\times I)$ satisfies the conditions in Example~\ref{exjoint}. We use the same notations as section~\ref{Shelix}. Let $h_i$ be the number of components of $S_n\cap(A_i\times I)$ ($i=1,2$), and we may assume $n$ is sufficiently large. Since $\gamma_1$ and $\gamma_2$ are parallel in $B^-$, we may assume $h_1=h_2$. Then we can use Example~\ref{HTH} to construction our HTH bands and the remainder of the proof is the same as Part A. This finishes the proof of Theorem~\ref{Tfinite} and Theorem~\ref{main}.
\end{proof}
\section{The Casson-Gordon example}\label{Sexample}
Casson and Gordon gave an example of a 3--manifold that has an infinite family of strongly irreducible Heegaard splittings with different genera \cite{CG2}, see \cite{Ko, Sed}. By Theorem~\ref{main}, such a 3--manifold must be Haken. In fact, it is easy to directly show that the 3--manifolds in the Casson-Gordon example are Haken. The proof of Theorem~\ref{main} indicates that there should be an incompressible surface as the limit of the infinite family of Heegaard surfaces. In this section, we construct such an incompressible surface.
Before carrying out the construction, we give a brief overview of the Casson-Gordon example and we refer to \cite{Ko, Sed} for more details. We first take a pretzel knot $K=(p_1,p_2,p_3,1,p_4)$ in $S^3$, where $|p_i|\ge 5$. The standard Seifert surface $F_1$ from the Seifert algorithm is a free Seifert surface. Let $S$ be a 2--sphere in $S^3$ that cuts the knot into 2 tangles, as shown in Figure~\ref{pretzel} (a). If we flip a tangle bounded by $S$ along a horizontal axis by $180^\circ$, we get the same knot with a different projection $(p_1,-1,p_2,p_3,1,1,p_4)$. By a theorem of Parris \cite{P}, the standard Seifert surface $F_2$ from the Seifert algorithm is also a free Seifert surface with $genus(F_2)=genus(F_1)+1$. By flipping the tangle $k$ times, we get an infinite family of free Seifert surfaces $\{F_k\}$ with increasing genus.
\begin{figure}\label{pretzel}
\end{figure}
Let $\eta(K)$ be a tubular neighborhood of the knot $K$ and let $M_0=S^3-\eta(K)$ be the knot exterior. Let $H_k$ be the closure of a small neighborhood of $F_k$ in $M_0$. So $H_k$ is a handlebody. Since $F_k$ is a free Seifert surface, $\overline{M_0-H_k}$ is also a handlebody. Let $K(p/q)$ be the closed manifold obtained by the Dehn filling to $M_0$ along the slope $p/q$. We may regard $H_k$ as a handlebody in $K(p/q)$. In fact, if $p=1$, $K(1/q)-int(H_k)$ is also a handlebody and $S_k=\partial H_k$ is a Heegaard surface for $K(1/q)$. Casson and Gordon showed that \cite{CG2, Ko, MSch}, if $|q|\ge 6$, then this Heegaard splitting of $K(1/q)$ by $S_k=\partial H_k$ is strongly irreducible. So we get an infinite family of strongly irreducible Heegaard surfaces $\{S_k\}$ for $M=K(1/q)$ ($|q|\ge 6$).
In \cite{Ko}, Kobayashi gave an interpretation of the sequence of free Seifert surfaces $\{F_k\}$ through branched surfaces. Let $F_1$ be the free Seifert surface of $M_0=S^3-\eta(K)$ above, and $S$ the punctured 2--sphere as shown in Figure~\ref{pretzel}~(a). By fixing a normal direction for $F_1$ and $S$, we can deform $F_1\cup S$ into a branched surface $B_0$, as shown in Figure~\ref{pretzel} (b). Both $F_1$ and $S$ are carried by $B_0$, so we can assume $F_1$ and $S$ lie in $N(B_0)$, a fibered neighborhood of $B_0$. Then the canonical cutting and pasting on $F_1$ and $S$ produce another Seifert surface $F_1+S$. Kobayashi showed that $F_2=F_1+S$ is the same free Seifert surface described above. Moreover, $F_k=F_1+(k-1)S$.
\begin{figure}\label{deform}
\end{figure}
As we mentioned before, the closed manifold $M=K(1/q)$ is Haken. The 2--sphere $S\subset S^3$ in Figure~\ref{pretzel} (a) cuts $(S^3, K)$ into a pair of non-trivial tangles $(E_1,K_1)$ and $(E_2, K_2)$, where $E_1$ and $E_2$ are the pair of 3--balls in $S^3$ bounded by $S$ and $K_i\subset E_i$ is a pair of strings. Let $\eta(K_1)$ be a small neighborhood of $K_1$ in $E_1$. Then $\Gamma=\partial(E_1-\eta(K_1))$ is a closed surface of genus 2 in $S^3-K$. It is not hard to see that $\Gamma$ is incompressible in $S^3-K$ (for instance see \cite{Wu}). By a theorem of Menasco \cite{Me}, $\Gamma$ remains incompressible after any non-trivial Dehn surgery on $K$.
Next we will show that $\Gamma$ can be considered as the limit of the sequence of Heegaard surfaces $\{S_k\}$.
We start with the Seifert surface $F_1$ and consider $K=\partial F_1$ ($F_1\cap\Gamma\ne\emptyset$). Let $\eta(F_1)$ be a small neighborhood of $F_1$ in $S^3$. After moving $K$ slightly off $\eta(F_1)$, we can regard the Heegaard surface $S_1$ of $M=K(1/q)$ as the boundary surface of the closure of $\eta(F_1)$. $S_1\cap\Gamma$ consists of closed curves.
Similar to the construction of the branched surface $B_0$ above, we can deform $S_1\cup\Gamma$ into a branched surface $B$ as shown in Figure~\ref{deform} (a). $B$ carries both $S_1$ and $\Gamma$, so we can assume $\Gamma$ and $S_1$ lie in $N(B)$ and transverse to the $I$--fibers. Then we perform the canonical cutting and pasting on $S_1$ and 2 parallel copies of $\Gamma$, as shown in Figure~\ref{deform} (b). It is not hard to see that the resulting surface $S_1+2\Gamma$ is isotopic to $S_2$. Similarly, $S_3=S_2+2\Gamma$ and $S_k=S_1+2(k-1)\Gamma$. By our discussion on projective lamination spaces, $\Gamma$ is indeed the limit of the sequence of Heegaard surfaces $\{S_k\}$.
\end{document}
|
\begin{document}
{
\begin{center}
{\Large\bf
On orthogonal Laurent polynomials related to the partial sums of power series.}
\end{center}
\begin{center}
{\bf S.M. Zagorodnyuk}
\end{center}
\noindent
\textbf{Abstract.}
Let $f(z) = \sum_{k=0}^\infty d_k z^k$, $d_k\in\mathbb{C}\backslash\{ 0 \}$, $d_0=1$, be a power series with a non-zero
radius of convergence $\rho$: $0 <\rho \leq +\infty$. Denote by $f_n(z)$ the n-th partial sum of $f$, and
$R_{2n}(z) = \frac{ f_{2n}(z) }{ z^n }$, $R_{2n+1}(z) = \frac{ f_{2n+1}(z) }{ z^{n+1} }$, $n=0,1,2,...$.
By the result of Hendriksen and Van Rossum there exists a linear functional $\mathbf{L}$ on Laurent polynomials, such that
$\mathbf{L}(R_n R_m) = 0$, when $n\not= m$, while $\mathbf{L}(R_n^2)\not= 0$. We present an explicit integral representation for $\mathbf{L}$
in the above case of the partial sums. We use methods from the theory of generating functions. The case of finite systems
of such Laurent polynomials is studied as well.
\noindent
\textbf{MSC 2010:} 42C05.
\noindent
\textbf{Keywords.} Laurent polynomials, Maclaurin series, partial sums, orthogonal rational functions.
\section{Introduction.}
The theories of orthogonal polynomials on the real line (OPRL) and on the unit circle (OPUC) have a lot of various contributions
and applications~\cite{cit_50000_Gabor_Szego}, \cite{cit_5000_Ismail}, \cite{cit_48000_Simon_1}, \cite{cit_48000_Simon_2}.
One of their possible generalizations is a relatively new theory of biorthogonal rational functions,
see~\cite{cit_5500_Jones_Thron_N__1984}, \cite{cit_1000_H_v_R__1986}, \cite{cit_5100_Ismail_Masson_1995},
\cite{cit_100000_Zhedanov_1999}, \cite{cit_500_Book_Bultheel___1999}, \cite{cit_400_Beckermann_Dereviagin_Zhedanov___2010} and
references therein.
Denote by $\mathcal{A}$ a set of all (formal) Laurent polynomials of the following form:
\begin{equation}
\label{l1_4}
\lambda_p x^p + \lambda_{p+1} x^{p+1} + \lambda_{p+q} x^{p+q},\qquad p\in\mathbb{Z};\ q\in\mathbb{Z}_+,
\end{equation}
where $\lambda_j$ are complex coefficients and $x$ is an indeterminate.
Let $\{ Q_k \}_{k=0}^\infty$ be a sequence of Laurent polynomials of the following form:
\begin{equation}
\label{l1_7}
Q_{2n}(x) = \sum_{j=-n}^n \alpha_j^{(2n)} x^j,\quad \alpha_n^{(2n)}\not= 0,
\end{equation}
\begin{equation}
\label{l1_8}
Q_{2n+1}(x) = \sum_{j=-n-1}^n \alpha_j^{(2n+1)} x^j,\quad \alpha_{-n-1}^{(2n+1)}\not= 0,\qquad n\in\mathbb{Z}_+.
\end{equation}
By Proposition~1 in~\cite{cit_1000_H_v_R__1986}, if the sequence $\{ Q_k \}_{k=0}^\infty$ satisfies
\begin{equation}
\label{l1_9}
Q_{2n+1}(x) = (x^{-1} + g_{2n+1}) Q_{2n}(x) + f_{2n+1} Q_{2n-1}(x),
\end{equation}
\begin{equation}
\label{l1_10}
Q_{2n+2}(x) = (1 + g_{2n+2} x) Q_{2n+1}(x) + f_{2n+2} Q_{2n}(x),
\end{equation}
with $f_{2n+1}\not= 0$, $f_{2n+2}\not= 0$, ($n\in\mathbb{Z}_+$), and
\begin{equation}
\label{l1_12}
Q_{-1}(x) = 0,\quad Q_0(x) = \alpha_0^{(0)},
\end{equation}
then there exists a linear functional $L$: $\mathcal{A} \mapsto \mathbb{C}$, with
$L(1) = 1$, such that
\begin{equation}
\label{l1_14}
L(Q_k(x) Q_n(x))
\left\{
\begin{array}{cc} =0, & k\not=n\\
\not=0, & k=n,
\end{array}
\right.
\qquad k,n\in\mathbb{Z}_+.
\end{equation}
Recall that a $R_I$-type continuos fraction is associated with a system of monic polynomials $\{ P_n(z) \}_{n=0}^\infty$, generated by
(\cite[p. 5]{cit_5100_Ismail_Masson_1995})
\begin{equation}
\label{f1_9}
P_n(z) = (z - \mathbf{c}_n) P_{n-1}(z) - \lambda_n (z - \mathbf{a}_n) P_{n-2}(z),\qquad n=1,2,...,
\end{equation}
where $P_{-1}(z) := 0$, $P_0(z) := 1$, and
\begin{equation}
\label{f1_10}
\lambda_{n+1}\not= 0,\quad P_n(\mathbf{a}_{n+1})\not= 0.
\end{equation}
Polynomials $\{ P_n(z) \}_{n=0}^\infty$ are related to biorthogonal rational functions~\cite[Theorem 2.1]{cit_5100_Ismail_Masson_1995}.
The case $\mathbf{a}_n = 0$, $n\geq 2$, is related to general $T$-fractions and to the above orthogonal
Laurent polynomials $R_n$~\cite{cit_1000_H_v_R__1986}.
In fact, given a system of monic polynomials $\{ P_n(z) \}_{n=0}^\infty$, generated by~(\ref{f1_9}),(\ref{f1_10}) with $\mathbf{a}_n \equiv 0$,
and $\mathbf{c}_n \in\mathbb{C}\backslash\{ 0 \}$, $n\in\mathbb{N}$,
one can define
\begin{equation}
\label{f1_20}
\widetilde Q_{2n}(x) = \frac{1}{ \xi_{2n} } \frac{ P_{2n}(x) }{ x^n },\
\widetilde Q_{2n+1}(x) = \frac{1}{ \xi_{2n+1} } \frac{ P_{2n+1}(x) }{ x^{n+1} },\ n\in\mathbb{Z}_+,
\end{equation}
where
\begin{equation}
\label{f1_22}
\xi_k = (-1)^k \prod_{j=0}^k \mathbf{c}_j,\ \mathbf{c}_0:=1,\qquad k\in\mathbb{Z}_+.
\end{equation}
Then $\{ \widetilde Q_k \}_{k=0}^\infty$ satisfy recurrence relations~(\ref{l1_9}),(\ref{l1_10}) with
\begin{equation}
\label{f1_24}
g_k = -\frac{1}{ \mathbf{c}_k },\ f_k = -\frac{\lambda_k \xi_{k-2}}{ \xi_k },\qquad k\in\mathbb{N}.
\end{equation}
Let
\begin{equation}
\label{f1_29}
f(z) = \sum_{k=0}^\infty d_k z^k,\qquad d_k\in\mathbb{C}\backslash\{ 0 \},\quad d_0=1,
\end{equation}
be a power series with a non-zero radius of convergence $\rho$: $0 <\rho \leq +\infty$.
Denote by $f_n(z)$ the n-th partial sum of $f$, and
\begin{equation}
\label{f1_30}
F_n(z) = \frac{1}{d_n} f_n(z),\qquad n\in\mathbb{Z}_+.
\end{equation}
In~\cite{cit_90000_Z} it was shown that polynomials $\{ F_n(z) \}_{n=0}^\infty$ satisfy relations~(\ref{f1_9}),(\ref{f1_10})
with $\mathbf{a}_n \equiv 0$, and
\begin{equation}
\label{f1_31}
\mathbf{c}_n = -\frac{d_{n-1}}{d_n},\quad n\in\mathbb{N};\quad \lambda_n = \frac{ d_{n-2} }{ d_{n-1} },\quad n\geq 2.
\end{equation}
The associated orthogonal Laurent polynomials are given as follows:
\begin{equation}
\label{f1_32}
R_{2n}(z) = \frac{ f_{2n}(z) }{ z^n },\quad R_{2n+1}(z) = \frac{ f_{2n+1}(z) }{ z^{n+1} },\qquad n\in\mathbb{Z}_+.
\end{equation}
By the above mentioned result of Hendriksen and Van Rossum there exists a linear functional $\mathbf{L}$ on Laurent polynomials, such that
$\mathbf{L}(R_n R_m) = 0$, when $n\not= m$, while $\mathbf{L}(R_n^2)\not= 0$.
Our main purpose here is to obtain an explicit integral representation for $\mathbf{L}$.
We shall derive a generating function for $\{ R_k \}_{k=0}^\infty$, and use some methods from the theory of generating functions.
The case of finite systems of Laurent polynomials $\{ Q_k \}_{k=0}^{2\mathbf{n}}$ ($\mathbf{n}\in\mathbb{N}$),
satisfying relations~(\ref{l1_9}),(\ref{l1_10}),
is treated in another way.
We use results on moment problems to obtain an explicit integral representation for the corresponding linear functional $L$.
The latter is done not only for the case of the partial sums, but for arbitrary such finite systems
satisfying an additional condition (see~(\ref{f2_51})).
\noindent
{\bf Notations. }
Besides the given above notations, we shall use the following definitions.
As usual, we denote by $\mathbb{R}, \mathbb{C}, \mathbb{N}, \mathbb{Z}, \mathbb{Z}_+$,
the sets of real numbers, complex numbers, positive integers, integers and non-negative integers,
respectively.
For $k,l\in\mathbb{Z}$, we set $\mathbb{Z}_{k,l} := \{ j\in\mathbb{Z}: k\leq j\leq l\}$.
Set $\mathbb{T} := \{ z\in\mathbb{C}:\ |z|=1 \}$,
$\mathbb{D} := \{ z\in\mathbb{C}:\ |z|<1 \}$.
By $\mathfrak{B}(\mathbb{C})$ we mean the set of all Borel subsets of $\mathbb{C}$.
By $\mathbb{P}$ we denote the set of all polynomials with complex coefficients.
\section{The partial sums of a power series and orthogonal Laurent polynomials.}
Consider a power series $f(z)$, as in~(\ref{f1_29}), its partial sums $f_n(z)$, and the associated Laurent polynomials
$\{ R_k \}_{k=0}^\infty$ from~(\ref{f1_32}).
There are various methods for deriving candidates for generating functions, as described in details in a book of McBride~\cite{cit_5700_McBride__1971},
see also a book of Rainville~\cite{cit_5150_Rainville}.
A powerful tool is a formal series manipulation, see~\cite[p. 11]{cit_5700_McBride__1971} for examples.
Interchanging the order of summation
and using Lemma 10 on pages 56, 57 in~\cite{cit_5150_Rainville},
we can formally write:
$$ \sum_{n=0}^\infty f_n(x) t^n = \sum_{n=0}^\infty \sum_{k=0}^n d_k x^k t^n =
\sum_{n,k=0}^\infty d_k x^k t^{n+k} = $$
$$ = \sum_{n=0}^\infty t^n \sum_{k=0}^\infty d_k (xt)^k = \frac{1}{(1-t)} f(xt),\qquad |t|<1. $$
It remains to verify that the obtained candidate is valid. In the case of a generating function for $R_n$ we need some additional work, since
the correspondence between $R_n$ and $f_n$ depends on the parity of $n$.
\begin{theorem}
\label{t2_1}
Let $f(z)$ be a power series from~(\ref{f1_29}) with a non-zero radius of convergence $\rho$ ($\leq +\infty$), and $f_n(z)$ be its $n$-th partial sum.
Let $\{ R_k(z) \}_{k=0}^\infty$ be defined by~(\ref{f1_32}).
Then the following relations hold:
\begin{equation}
\label{f2_5}
\frac{1}{(1-t)} f(xt) = \sum_{n=0}^\infty f_n(x) t^n,\qquad t,x\in\mathbb{C}:\ |t|<1,\ |x|<\rho.
\end{equation}
$$ \left( \frac{ \sqrt{x} + 1 }{ \sqrt{x} - z } \right) f(\sqrt{x} z) +
\left( \frac{ \sqrt{x} - 1 }{ \sqrt{x} + z } \right) f(-\sqrt{x} z) = $$
\begin{equation}
\label{f2_7}
= 2 \sum_{n=0}^\infty R_n(x) z^n,\qquad x,z\in\mathbb{C}:\ 0<|x|<\rho,\ |z|<|\sqrt{x}|.
\end{equation}
Here by $\sqrt{x}$ we mean an arbitrarily chosen and fixed value of the square root for each $x$ (and the corresponding
values need not to form an analytic branch).
\end{theorem}
\noindent
\textbf{Proof.}
Choose an arbitrary $x$: $|x|<\rho$.
The left-hand side of~(\ref{f2_5}) is an analytic function of $t$ in $\mathbb{D}$.
Calculating its Taylor coefficient and using the Leibniz rule for the derivatives we derive relation~(\ref{f2_5}).
Let us check relation~(\ref{f2_7}).
Choose an arbitrary $x$: $0<|x|<\rho$, $t\in\mathbb{D}$, and fix an arbitrary value of $\sqrt{x}$. Denote $z = t\sqrt{x}$.
By the already established relation~(\ref{f2_5}) we may write:
$$ \left( \frac{ \sqrt{x} }{ \sqrt{x} - z } \right) f(\sqrt{x} z) = \sum_{n=0}^\infty f_n(x) (z / \sqrt{x})^n = $$
\begin{equation}
\label{f2_9}
= \sum_{k=0}^\infty R_{2k}(x) z^{2k} + \sqrt{x} \sum_{k=0}^\infty R_{2k+1}(x) z^{2k+1}.
\end{equation}
Denote
\begin{equation}
\label{f2_15}
\varphi(z;x) := \left( \frac{ \sqrt{x} }{ \sqrt{x} - z } \right) f(\sqrt{x} z).
\end{equation}
Then
$$ \varphi(z;x) + \varphi(-z;x) + (\varphi(z;x) - \varphi(-z;x))/\sqrt{x} = 2\sum_{n=0}^\infty R_n(x) z^n. $$
Collecting terms with $f(\sqrt{x} z)$ and $f(-\sqrt{x} z)$ we obtain relation~(\ref{f2_7}).
$\Box$
The obtained generating functions can find various applications. For example, by the integral formula for the Taylor coefficients
one can write:
$$ R_n(x) = $$
$$ = \frac{1}{4\pi i} \oint_{|z| = |\sqrt x|/2}
\left(
\left( \frac{ \sqrt{x} + 1 }{ \sqrt{x} - z } \right) f(\sqrt{x} z) +
\left( \frac{ \sqrt{x} - 1 }{ \sqrt{x} + z } \right) f(-\sqrt{x} z)
\right) z^{-n-1} dz, $$
\begin{equation}
\label{f2_17}
x \in\mathbb{C}:\ 0<|x|<\rho.
\end{equation}
Our next purpose is to obtain an explicit integral representation for the functional $\mathbf{L}$ which was discussed in
the Introduction.
In order to find a suitable candidate for a measure of integration we shall use generating functions.
It is well known that they help to establish orthogonality relations, see Section~19.3 in~\cite{cit_700_Book_Bateman___v_3}.
Observe that that the linear functional $\mathbf{L}$ is uniquely determined by the following relations:
\begin{equation}
\label{f2_19}
\mathbf{L}(R_n) = \delta_{n,1},\qquad n\in\mathbf{Z}_+.
\end{equation}
In fact, the span of functions $\{ R_n \}_{n=0}^\infty$ coincides with $\mathcal{A}$. If we formally apply $\mathbf{L}$ to the both
sides of relation~(\ref{f2_7}), then we obtain
\begin{equation}
\label{f2_22}
\mathbf{L}_x(I_4(x;z)) = 2,
\end{equation}
where the superscript $x$ means that $\mathbf{L}$ acts in variable $x$, and
\begin{equation}
\label{f2_24}
I_4(x;z) := \left( \frac{ \sqrt{x} + 1 }{ \sqrt{x} - z } \right) f(\sqrt{x} z) +
\left( \frac{ \sqrt{x} - 1 }{ \sqrt{x} + z } \right) f(-\sqrt{x} z).
\end{equation}
It is convenient to introduce a new variable $y=\sqrt{x}$, and write
\begin{equation}
\label{f2_27}
I_4(y^2;z) := \left( \frac{ y + 1 }{ y - z } \right) f(y z) +
\left( \frac{ y - 1 }{ y + z } \right) f(-y z).
\end{equation}
We can multiply the right-hand side of~(\ref{f2_27}) by an arbitrarily chosen function $a(y)$, and then calculate
some contour integrals $\oint I_4(y^2;z) a(y) dy$, trying to obtain the value $2$.
In this manner we obtain a candidate which is described in the next theorem.
\begin{theorem}
\label{t2_2}
Let $f(z)$ be a power series as in~(\ref{f1_29}) with a non-zero radius of convergence $\rho$, and $f_n(z)$ be its $n$-th partial sum.
Define $\{ R_k(z) \}_{k=0}^\infty$ by relations~(\ref{f1_32}). Let $\mathbf{L}$ be a linear functional on $\mathcal{A}$,
such that $\mathbf{L}(R_n R_m) = 0$, when $n\not= m$, while $\mathbf{L}(R_n^2)\not= 0$, $n,m\in\mathbb{Z}_+$.
Then the following integral representation holds:
\begin{equation}
\label{f2_29}
\mathbf{L}(R) = \frac{1}{2\pi i} \oint_{ |y|=c } R(y^2) \frac{dy}{ yf(y^2) },\qquad R\in\mathcal{A}.
\end{equation}
Here $c$ is an arbitrary positive number which is less than $\sqrt{\rho}$, and less than $\widehat\rho$, where $\widehat\rho$ is the radius of convergence
for the Maclaurin series of $1/f(y^2)$.~\footnote{Since $f(0)=1$, it follows by continuity that $\widehat\rho > 0$.}
\end{theorem}
\noindent
\textbf{Proof.}
Denote the right-hand side of~(\ref{f2_29}) by $\mathcal{L}(R)$.
Let us check that $\mathcal{L}$ has property~(\ref{f2_19}).
Choose an arbitrary $n\in\mathbb{Z}_+$.
By~(\ref{f1_32}) we may write:
$$ \mathcal{L}(R_{2n}) = \frac{1}{2\pi i} \oint_{ |y|=c } f_{2n}(y^2) \frac{dy}{ y^{2n+1} f(y^2) } = $$
$$ = \frac{1}{2\pi i} \oint_{ |y|=c } \left( f(y^2) - \sum_{j=2n+1}^\infty d_j y^{2j} \right)
\frac{dy}{ y^{2n+1} f(y^2) } = $$
$$ = \frac{1}{2\pi i} \oint_{ |y|=c } \left( 1 - \left(\sum_{j=2n+1}^\infty d_j y^{2j} \right) ( f(y^2) )^{-1} \right)
\frac{dy}{ y^{2n+1} } = $$
$$ = \frac{1}{ (2n)! } \left[ 1 - \left(\sum_{j=2n+1}^\infty d_j y^{2j} \right) ( f(y^2) )^{-1} \right]^{(2n)} (0) =
\left\{ \begin{array}{cc} 1, & n=0\\
0, & n>0 \end{array}
\right. . $$
The last equality can be justified, for example, using the Leibniz rule for derivatives.
In a similar way, we may write
$$ \mathcal{L}(R_{2n+1}) = \frac{1}{2\pi i} \oint_{ |y|=c } f_{2n+1}(y^2) \frac{dy}{ y^{2n+3} f(y^2) } = $$
$$ = \frac{1}{2\pi i} \oint_{ |y|=c } \left( f(y^2) - \sum_{j=2n+2}^\infty d_j y^{2j} \right)
\frac{dy}{ y^{2n+3} f(y^2) } = $$
$$ = \frac{1}{ (2n+2)! } \left[ 1 - \left(\sum_{j=2n+2}^\infty d_j y^{2j} \right) ( f(y^2) )^{-1} \right]^{(2n+2)} (0) = 0. $$
This completes the proof. $\Box$
For example, consider the following function:
\begin{equation}
\label{f2_35}
f(z) = e^{bz} \prod_{j=0}^m (1 - a_j z)^{-\lambda_j},\qquad b\geq 0,\ 0<a_j<1,\ \lambda_j>0;\ m\in\mathbb{N}.
\end{equation}
The corresponding Maclaurin series converges in $K := \{ z\in\mathbb{C}:\ |z| < \min(1/a_0,...,1/a_m) \}$, which contains $\mathbb{D}$.
Observe that this series has positive coefficients and the constant term $f(0)=1$.
Denote $\rho := \min(1/a_0,...,1/a_m)$.
The function
$$ 1/ f(y^2) = e^{-b y^2} \prod_{j=0}^m (1 - a_j y^2)^{\lambda_j}, $$
is analytic in $K_1 := \{ z\in\mathbb{C}:\ |z| < \sqrt{ \rho } \}$. Therefore, in this case we may
apply Theorem~\ref{t2_2} and write
\begin{equation}
\label{f2_39}
\mathbf{L}(R) = \frac{1}{2\pi i} \oint_{ \mathbb{T} } R(y^2) e^{-b y^2} \prod_{j=0}^m (1 - a_j y^2)^{\lambda_j}
\frac{dy}{ y },\qquad R\in\mathcal{A}.
\end{equation}
Notice that one may also use various cases of the generalized hypergeometric function as $f(z)$, and investigate the corresponding
partial sums and orthogonal Laurent polynomials.
\noindent
\textbf{Finite systems of Laurent polynomials.}
Fix an arbitrary $\textbf{n}\in\mathbb{N}$. Denote by $\mathcal{A}_{\textbf{n}}$
a set of all (formal) Laurent polynomials of the following form:
\begin{equation}
\label{f2_48}
\sum_{j=-\textbf{n}}^{\textbf{n}} \lambda_j x^j,\qquad \lambda_j\in\mathbb{C},
\end{equation}
where $x$ is an indeterminate.
Let $\{ Q_k \}_{k=0}^{2\mathbf{n}}$ be a set of Laurent polynomials which have forms as in~(\ref{l1_7}),(\ref{l1_8}).
Of course, these polynomials belong to $\mathcal{A}_{\textbf{n}}$ and span it.
Suppose that $\{ Q_k \}_{k=0}^{2\mathbf{n}}$ satisfy relations~(\ref{l1_9}),(\ref{l1_10}) with
some $f_{2n+1}\not= 0$, $f_{2n+2}\not= 0$, ($n\in\mathbb{Z}_{0,\mathbf{n}-1}$), and $Q_{-1}(x) := 0$.
We can expand the sequences of complex coefficients $\{ f_k \}_{k=0}^{2\mathbf{n}}$, $\{ g_k \}_{k=0}^{2\mathbf{n}}$,
to infinite complex sequences $\{ f_k \}_{k=0}^\infty$, $\{ g_k \}_{k=0}^\infty$, $f_k\not=0$, in an arbitrary way.
Then we extend the sequence of $Q_k$ by relations~(\ref{l1_9}),(\ref{l1_10}) to a sequence $\{ Q_k \}_{k=0}^\infty$.
By the result of Hendriksen and Van Rossum there exists a linear functional $L$ on Laurent polynomials, such that
$L(Q_n Q_m) = 0$, when $n\not= m$, while $Q(L_n^2)\not= 0$.
Denote by $L_{\textbf{n}}$ the restriction of $L$ to the set $\mathcal{A}_{\textbf{n}}$. We have
\begin{equation}
\label{f2_50}
L_{\textbf{n}} (Q_k Q_n)
\left\{
\begin{array}{cc} =0, & k\not=n\\
\not=0, & k=n,
\end{array}
\right.
\qquad k,n\in\mathbb{Z}_{0,2\mathbf{n}}.
\end{equation}
\begin{theorem}
\label{t2_3}
Let $\{ Q_k \}_{k=0}^{2\mathbf{n}}$ be a finite set of Laurent polynomials, having forms as in~(\ref{l1_7}),(\ref{l1_8});
$\textbf{n}\in\mathbb{N}$.
Suppose that $\{ Q_k \}_{k=0}^{2\mathbf{n}}$ satisfy relations~(\ref{l1_9}),(\ref{l1_10}) with
some $f_{2n+1}\not= 0$, $f_{2n+2}\not= 0$, ($n\in\mathbb{Z}_{0,\mathbf{n}-1}$), and $Q_{-1}(x) := 0$.
Suppose that the corresponding linear functional $L_{\textbf{n}}$, having property~(\ref{f2_50}),
satisfies the following condition:
\begin{equation}
\label{f2_51}
a := L_{\textbf{n}} (x^{-\mathbf{n}}) \not= 0,
\end{equation}
Then $L_{\textbf{n}}$ admits the following integral representation:
\begin{equation}
\label{f2_52}
L_{\textbf{n}} (Q) =
\int Q(z) a z^\mathbf{n} d\mu,\qquad Q\in\mathcal{A}_{\textbf{n}},
\end{equation}
where $\mu$ is a finitely atomic positive measure on $\mathfrak{B}(\mathbb{C})$.
\end{theorem}
\noindent
\textbf{Proof.}
Denote
$$ s_k = \frac{1}{a} L_{\textbf{n}} (x^{k-\mathbf{n}}),\qquad k=0,1,...,2\mathbf{n}. $$
Consider the following moment problem (see~\cite{cit_3700_Z}, \cite{cit_95000_Zagorodnyuk}):
find a (non-negative) measure $\mu$ on $\mathfrak{B}(\mathbb{C})$
such that
\begin{equation}
\label{f2_54}
\int_{\mathbb{C}} z^k d\mu(z) = s_k,\qquad k\in\mathbb{Z}_{0,2\mathbf{n}}.
\end{equation}
Since $s_0=1$, it is solvable and according to Algorithm~1 in~\cite{cit_95000_Zagorodnyuk}
it has a finitely atomic solution.
For an arbitrary $Q\in\mathcal{A}_{\mathbf{n}}$ we may write:
$$ Q(z) = \sum_{k=0}^{2\mathbf{n}} a_k z^{k-\mathbf{n}},\qquad a_k\in\mathbb{C}. $$
Substituting this expression into both sides of relation~(\ref{f2_52}), we shall obtain the same value.
The proof is complete.
$\Box$
We remark that Algorithm~1 in~\cite{cit_95000_Zagorodnyuk} provides an explicit procedure for the construction
of the corresponding atomic solution (see Example~1 in~\cite{cit_95000_Zagorodnyuk}).
Thus, for large classes of orthogonal Laurent polynomials it was here possible to construct explicit measures and
integral representations. The theories of generating functions and moment problems have shown their power and usefulness as
the corresponding tools of the investigation.
V. N. Karazin Kharkiv National University \newline\indent
School of Mathematics and Computer Sciences \newline\indent
Department of Higher Mathematics and Informatics \newline\indent
Svobody Square 4, 61022, Kharkiv, Ukraine
[email protected]; [email protected]
}
\end{document}
|
\begin{document}
\title{The reflexive closure of the adjointable operators}
\author{E. G. Katsoulis}
\address{Department of Mathematics, East Carolina University, Greenville, NC 27858, USA}
\email{[email protected]}
\thanks{2010 {\it Mathematics Subject Classification.}
46L08, 47L10}
\thanks{{\it Key words and phrases:} Hilbert $C^*$-module, adjointable operator, reflexive operator algebra, reflexive closure, invariant subspace, left centralizer, left multiplier.}
\begin{abstract} Given a Hilbert $C^*$-module $E$ over a C*-algebra $\cl A$, we give an explicit description for the invariant subspace lattice $\Lat \cl L (E)$ of all adjointable operators on $E$. We then show that the collection $\End_{\cl A}(E)$ of all bounded $\cl A$-module operators acting on $E$ forms the reflexive closure for $ \cl L (E) $, i.e., $\End _{\cl A} (E) = \Alg \Lat \cl L (E) $. Finally we make an observation regarding the representation theory of the left centralizer algebra of a $C^*$-algebra and use it to give an intuitive proof of a related result of H.~Lin. \end{abstract}
\maketitle
\section{Introduction}
In this note, $\cl A$ denotes a C*-algebra and $E$ a Hilbert C*-module over $\cl A$, i.e., a right $\cl A$-module equipped with an $\cl A$-valued inner product $\sca{\, , \, }$ so that the norm $\| \xi \|\equiv \| \sca{\xi , \xi }^{1/2} \|$ makes $E$ into a Banach space. The collection of all bounded $\cl A$-module operators acting on $E$ is denoted as $\End_{\cl A} (E)$. A linear operator $S$ acting on $E$ is said to be adjointable iff given $x , y \in E$ there exists $ y' \in E$ so that $\sca{S x , y} = \sca{x , y'}$. Elementary examples of adjointable operators are the ``rank one" operators $\theta_{\eta, \xi}$, defined by $\theta_{\eta, \xi}(x)\equiv \eta \sca{\xi, x}$, where $\eta, \xi , x \in E$. The collection of all adjointable operators acting on $E$ will be denoted as $\cl L (E)$ while the norm closed subalgebra generated by the rank one operators will be denoted as $\cl K (E)$.
It is a well known fact that $\cl L (E) \subseteq \End_{\cl A} (E)$. However, the reverse inclusion is known to fail in general; this is perhaps the first obstacle one encounters when extending the theory of operators on a Hilbert space to that of operators on a Hilbert $C^*$-module. This problem has been addressed since the beginning of the theory \cite[page 447]{Pasc} and has influenced its subsequent development. The first few chapters of the monograph of Manuilov and Troitsky \cite{ManT} and the references therein provide the basics of the theory and give a good account of what is known regarding that issue. (See also \cite{Ble, lance1995hilbert}.) The purpose of this note is to demonstrate that the inequality between $\cl L (E) $ and $\End_{\cl A} (E)$ is intimately related to another area of continuing mathematical interest, the reflexivity of operator algebras.
If ${\mathfrak{A}}$ is a unital operator algebra acting on a Banach space ${\mathfrak{X}}$, then $\Lat {\mathfrak{A}}$ will denote the collection of all closed subspaces $M\subseteq {\mathfrak{X}}$ which are left invariant by ${\mathfrak{A}}$, i.e., $A(m)\in M$, for all $A \in {\mathfrak{A}}$ and $m \in M$. Dually, for a collection ${\mathfrak{L}}$ of closed subspaces of ${\mathfrak{X}}$, we write $\Alg {\mathfrak{L}}$ to denote the collection of all bounded operators on ${\mathfrak{X}}$ that leave invariant each element of ${\mathfrak{L}}$. The reflexive cover of an algebra ${\mathfrak{A}}$ of operators acting on ${\mathfrak{X}}$ is the algebra $\Alg \Lat {\mathfrak{A}}$; we say that ${\mathfrak{A}}$ is \textit{reflexive} iff
\[
{\mathfrak{A}} = \Alg \Lat {\mathfrak{A}}.
\]
Similarly, the reflexive cover of a subspace lattice ${\mathfrak{L}}$ is the lattice $\Lat \Alg {\mathfrak{L}}$ and ${\mathfrak{L}}$ is said to be reflexive if ${\mathfrak{L}} = \Lat \Alg {\mathfrak{L}}$. A formal study of reflexivity for operator algebras and subspace lattices began with the work of Halmos \cite{Hal}, after Ringrose's proof \cite{Rin} that all nests on Hilbert space are reflexive. Since then, the concept of reflexivity for operator algebras and subspace lattices has been addressed by various authors on both Hilbert space \cite{An, ArP, a, DKP, Had2, Kak, KatP, Ol, Sar, ShuT} and Banach space \cite{BMZ, Erd, Had}, including in particular investigations on a Hilbert $C^*$-module.
The main results of this short note provide a link between the two areas of inquiry discussed above. In Theorem \ref{main} we show that the presence of bounded but not adjointable module operators on a $C^*$-module $E$ is equivalent to the failure of reflexivity for $\cl L (E)$. (Here we think of $\cl L (E)$ simply as an operator algebra acting on $E$.) Actually, we do more: we explicitly describe $\Lat \cl L (E)$ and we show that as a complete lattice, $\Lat \cl L (E)$ is isomorphic to the lattice of closed left ideals of $\overline{\sca{E,E}} $ (Theorem~\ref{lat}). A key step in the proof of Theorem \ref{main} is a classical result of Barry Johnson \cite[Theorem 1]{Jo}. Actually, our Theorem \ref{main} can also be thought of as a generalization of Johnson's result, since its statement reduces to the statement of \cite[Theorem 1]{Jo}, when applied to the case of the trivial (unital) Hilbert $C^*$-module.
Another interpretation for the inequality between $\cl L (E)$ and $\End_{\cl A} (E)$ comes from the work of H. Lin. Lin shows in \cite[Theorem 1.5]{Lin} that $\End_{\cl A} (E)$ is isometrically isomorphic as a Banach algebra to the left centralizer algebra of $\cl K (E)$. Furthermore, the isomorphism Lin constructs extends the familiar $*$-isomorphism between $\cl L (E)$ and the double centralizer algebra of $\cl K (E)$. This shows that the gap between $\cl L (E)$ and $\End_{\cl A} (E)$ is solely due to the presence of left centralizers for $\cl K (E)$ which fail to be double centralizers. In Proposition~\ref{repn} we observe that the representation theory of the left centralizer algebra of a $C^*$-algebra is flexible enough to allow the use of representations on a Banach space. This leads to yet another short proof of Lin's Theorem, which we present in Theorem~\ref{Linthm}. Our proof makes no reference to Cohen's Factorization Theorem and its only prerequisite is the existence of a contractive approximate identity for a $C^*$-algebra. (Compare also with \cite[Proposition 8.1.16 (ii)]{Ble}.)
A final remark. Johnson's Theorem \cite[Theorem 1]{Jo}, which plays a central role in this paper, may no longer be true for Banach algebras which are not semisimple. Nevertheless there are specific classes of (non-semisimple) operator algebras for which this theorem is actually valid. This is being explored in a subsequent work \cite{Katsnew}.
\section{the main result}
We begin by identifying a useful class of subspaces of $E$.
\begin{definition} \label{defn:E}
Let $E$ a Hilbert C*-module over a C*-algebra $\cl A$.
If ${\mathcal{J}} \subseteq \cl A$, then we define
$$E({\mathcal{J}}):=\overline{\sspp}\{\xi a \mid \xi\in E, a\in {\mathcal{J}}\}.$$
\end{definition}
The correspondence ${\mathcal{J}} \mapsto E({\mathcal{J}})$ of Definition~\ref{defn:E} is not bijective. Indeed, if $l({\mathcal{J}})$ is the closed left ideal generated by ${\mathcal{J}}\subseteq \cl A$, then it is easy to see that $E(l({\mathcal{J}}))=E({\mathcal{J}})$. Therefore we restrict our attention to closed left ideals of $\cl A$. It turns out that an extra step is still required to ensure bijectivity. First we need the following.
\begin{lemma}\label{descr}
Let $E$ be a Hilbert C*-module over a C*-algebra $\cl A$ and let ${\mathcal{J}}\subseteq \cl A$ be a closed left ideal.
Then
\[
E({\mathcal{J}})=\{ \xi \in E \mid \sca{\eta , \xi} \in {\mathcal{J}} \mbox{ for all } \eta \in E\}.
\]
\end{lemma}
\begin{proof} The inclusion
\[
E({\mathcal{J}}) \subseteq \{ \xi \in E \mid \sca{\eta , \xi} \in {\mathcal{J}} \mbox{ for all } \eta \in E\}
\]
is obvious. The reverse inclusion follows from the well known fact \cite[Lemma 1.3.9]{ManT} that
\[
\xi = \lim_{\epsilonilon \rightarrow 0} \xi \sca{\xi , \xi}[ \sca{\xi , \xi } + \epsilonilon ]^{-1}
\]
for any $\xi \in E$. \end{proof}
The following gives now a complete description for the lattice of invariant subspaces of the adjointable operators.
\begin{theorem} \label{lat}
Let $E$ a Hilbert C*-module over a C*-algebra $\cl A$. Then
\[
\Lat \cl L (E)= \{ E({\mathcal{J}}) \mid {\mathcal{J}} \subseteq \overline{\sca{E,E}} \mbox{ closed left ideal } \}
\]
and the association ${\mathcal{J}} \mapsto E({\mathcal{J}})$ establishes a complete lattice isomorphism between the closed left ideals of $\overline{\sca{E,E}} $ and $\Lat \cl L (E)$.
In addition,
\[
\Lat \cl K (E) = \Lat \cl L (E) = \Lat \End _{\cl A} (E).
\]
\end{theorem}
\begin{proof} First observe that if $ {\mathcal{J}} \subseteq \cl A$ is a closed left ideal, then the subspace $E({\mathcal{J}})$ is invariant under $\cl L (E)$, because $\cl L (E)$ consists of $\cl A$-module operators.
Conversely assume that $M \in \Lat \cl L (E)$ and let
\[
J(M) \equiv \overline{\sspp}\{ \sca{ \eta , m } \mid \eta \in E \mbox{ and }m \in M \}.
\]
Clearly, $J(M)\subseteq \overline{\sca{E,E}}$ and the identity
\[
a\sca{ \eta , m} = \sca{ \eta a^* , m}, \, a \in \cl A, \eta \in E, m \in M,
\]
implies that $J(M)$ is a left ideal. We claim that $M=E(J(M))$. Indeed, if $m \in M$, then by the definition of $J(M)$ we have $\sca{ \eta , m } \in J(M)$, for all $\eta \in E$, and so Lemma \ref{descr} implies that $m \in E(J(M))$. On the other hand, any $\xi a$, with $\xi \in E$ and $a \in J(M)$ is the limit of finite sums of elements of the form $\xi \sca{ \eta , m}$, where $\eta \in E$ and $m \in M$. However
\[
\xi \sca{ \eta , m}= \theta_{\xi , \eta}(m) \in M
\]
and so $M=E(J(M))$. This shows that ${\mathcal{J}} \mapsto E({\mathcal{J}})$ is surjective.
In order to prove that ${\mathcal{J}} \mapsto E({\mathcal{J}})$ is also injective we need to verify that ${\mathcal{J}} = J(E({\mathcal{J}}))$, for any closed ideal ${\mathcal{J}}\subseteq \overline{\sca{E,E}}$. Since ${\mathcal{J}} \subseteq \overline{\sca{E,E}} $ is a left ideal, $J(E({\mathcal{J}})) \subseteq {\mathcal{J}}$. On the other hand, if $(e_i)_i$ is a right approximate identity for ${\mathcal{J}}$, then any element of ${\mathcal{J}}\subseteq \overline{\sca{E,E}}$ can be approximated by elements of the form
\[
\sum_{k} \, \sca{\eta_k , \xi_k} e_k = \sum_{k} \, \sca{\eta_k , \xi_k e_k},\quad \eta_k , \xi_k \in E.
\]
However, $\xi_k e_k \in E({\mathcal{J}})$, by Definition \ref{defn:E}, and so sums of the above form belong to $J(E({\mathcal{J}})) $. Hence ${\mathcal{J}} \subseteq J(E({\mathcal{J}}))$ and so ${\mathcal{J}} \mapsto E({\mathcal{J}})$ is also injective with inverse $M \mapsto J(M)$.
The proof that ${\mathcal{J}} \mapsto E({\mathcal{J}})$ respects the lattice operations follows from two successive applications of Lemma \ref{descr}. Indeed, if $({\mathcal{J}}_i)_i$ is a collection of closed ideals of $\overline{\sca{E,E}}$, then $\xi \in \cap_i E({\mathcal{J}}_i)$ is equivalent by Lemma~\ref{descr} to $\sca{\eta , \xi} \in \cap_i {\mathcal{J}}_i$ which, once again by Lemma~\ref{descr}, is equivalent to $\xi \in E(\cap_i {\mathcal{J}}_i)$. Therefore $\cap_i E({\mathcal{J}}_i) = E(\cap_i {\mathcal{J}}_i)$. The proof of $\vee_i E({\mathcal{J}}_i) = E(\vee_i {\mathcal{J}}_i)$ is immediate.
For the final assertion of the theorem, first note that
\[
\Lat \cl K (E) \supseteq \Lat \cl L (E) \supseteq \Lat \End _{\cl A} (E).
\]
On the other hand, if $M \in \Lat \cl K (E)$, then an argument identical to that of the second paragraph of the proof shows that $M=E(J(M))$. Hence $M \in \Lat \End _{\cl A} (E)$ and the conclusion follows.
\end{proof}
The following result was proved by B. Johnson \cite[Theorem 1]{Jo} for arbitrary semisimple Banach algebras by making essential use of their representation theory. One can adopt Johnson's original proof to the C*-algebraic context by using the GNS construction and Kadison's Transitivity Theorem wherever representation theory is required in the original proof.
\begin{theorem} \label{Johnson}
Let $\cl A$ be a $C^*$-algebra and let $\Phi$ be a linear operator acting on $\cl A$ that leaves invariant all closed left ideals of $\cl A$. Then $\Phi (ba)=\Phi(b)a$, $\forall\, a,b \in \cl A$. In particular, if $1 \in \cl A$ is a unit then $\Phi$ is the left multiplication operator by $\Phi (1)$.
\end{theorem}
Note that the proof of Theorem~\ref{lat} shows that any bounded $\cl A$-module map leaves invariant $\Lat \cl L (E)$. This establishes one direction in the following, which is the main result of the paper.
\begin{theorem} \label{main}
Let $E$ be a Hilbert module over a C*-algebra $\cl A$. Then
\[
\Alg \Lat \cl L (E) = \End _{\cl A} (E).
\]
In particular, $\End_{\cl A} (E)$ is a reflexive algebra of operators acting on $E$.
\end{theorem}
\begin{proof}
Let $S \in \Alg \Lat \cl L (E)$ and $\xi , \eta \in E$. Consider the linear operator
\[
\Phi_{\eta, \xi}: \cl A \ni a \longmapsto \sca{\eta, S(\xi a) } \in \cl A
\]
We claim that $\Phi_{\eta, \xi}$ leaves invariant any of the closed left ideals of $\cl A$. Indeed, if ${\mathcal{J}} \subseteq \cl A$ is such an ideal and $j \in {\mathcal{J}}$, then $\xi j \in E({\mathcal{J}})$ and since $S \in \Alg \Lat \cl L $, $S(\xi j) \in E({\mathcal{J}})$. By Theorem~\ref{lat}, we have
$$ \Phi_{\eta, \xi}(j)=\sca{\eta, S(\xi j) } \in {\mathcal{J}}$$
and so $ \Phi_{\eta, \xi}$ leaves ${\mathcal{J}}$ invariant, which proves the claim. Hence Theorem~\ref{Johnson}, implies now that $\Phi_{\eta, \xi}(ba)= \Phi_{\eta, \xi}(b)a$, $\forall\, a,b \in \cl A$.
Let $(e_i)$ be an approximate unit for $\cl A$. By the above $\Phi_{\eta, \xi}(e_ia) =\Phi_{\eta, \xi }(e_i)a$, $\forall i$, and so
\begin{align*}
\sca{\eta, S(\xi a) } &=\lim_i \sca{\eta, S(\xi e_i a) } = \lim_i \Phi_{\eta, \xi}(e_ia) \\
&=\lim_i \Phi_{\eta, \xi }(e_i)a =\lim_i \sca{\eta, S(\xi e_i) } a \\
&= \sca{\eta, S(\xi ) } a
\end{align*}
Hence
\[
\sca{\eta, S(\xi a) } = \sca{\eta, S(\xi)a}, \quad \forall a \in \cl A,
\]
which establishes that $S$ is an $\cl A$-module map.
\end{proof}
The above Theorem can also be thought as a generalization of Theorem~\ref{Johnson} (Johnson's Theorem) since its statement reduces to the statement of Theorem \ref{Johnson} when applied to the case of the trivial unital Hilbert $C^*$-module.
\begin{corollary}
If $E$ is a selfdual Hilbert $C^*$-module, then $\cl L (E)$ is reflexive as an algebra of operators acting on $E$.
\end{corollary}
In particular, the above Corollary shows that if $\cl A$ is a unital $C^*$-algebra, then $\cl L ( \cl A^{(n)})$, $1\leq n<\infty$, is a reflexive operator algebra. This is not necessarily true for $\cl L ( \cl A^{(\infty)})$. Indeed in \cite[Example 2.1.2]{ManT} the authors give an example of a unital commutative $C^*$-algebra $\cl A$ for which $\cl L (\cl A^{(\infty)}) \neq \End_{\cl A}(\cl A^{(\infty)})$. By Theorem \ref{main}, $\cl L ( \cl A^{(\infty)})$ is not reflexive.
\section{Left Centralizers and a theorem of H. Lin}
An alternative description for the inclusion $\cl L (E) \subseteq \End_{\cl A}(E)$ has been given by H. Lin in \cite{Lin}.
\begin{definition} \label{centraldefn} If ${\mathfrak{A}}$ is a Banach algebra then a linear and bounded map $\Phi: {\mathfrak{A}} \rightarrow {\mathfrak{A}}$ is called a left centralizer if $\Phi(ab)=\Phi(a)b$, for all $a,b \in {\mathfrak{A}}$. If in addition there exists a map $\Psi: {\mathfrak{A}} \rightarrow {\mathfrak{A}}$ so that $\Psi(a)b=a\Phi(b)$, for all $a,b \in {\mathfrak{A}}$, then $\Phi$ is called a double centralizer.
\end{definition}
The collection of all left (resp. double) centralizers equipped with the supremum norm will be denoted as $\LC ({\mathfrak{A}})$ (resp. $\DC({\mathfrak{A}})$). Note that in the case where ${\mathfrak{A}}$ has an approximate unit, the linearity and boundedness of centralizers does not have to be assumed \textit{a priori} but instead follows from the condition $\Phi(ab)=\Phi(a)b$, for all $a,b \in {\mathfrak{A}}$. (See \cite{Jo2} for a proof; the unital case is of course trivial.)
In \cite[Theorem 1.5]{Lin} Lin shows that $\End_{\cl A}(E)$ is isometrically isomorphic as a Banach algebra to $\LC\left(\cl K (E) \right)$. Furthermore, the isomorphism Lin constructs extends the familiar $*$-isomorphism of Kasparov \cite{Kas} between $\cl L (E)$ and $\DC(\cl K(E))$. Lin's proof is similar in nature to that of Kasparov \cite{Kas} for the double centralizers of $\cl K (E)$. However it is more elaborate and also requires some additional results of Paschke \cite{Pasc}. In what follows we give an elementary proof of Lin's Theorem. Our argument depends on the observation that the representation theory for the left centralizers of a $C^*$-algebra $\cl A$ is flexible enough to allow the use of representations on a Banach space.
\begin{definition}
Let ${\mathfrak{X}}$ be a Banach space and let ${\mathfrak{A}}$ be a norm closed subalgebra of $B({\mathfrak{X}})$, the bounded operators on ${\mathfrak{X}}$. The left multiplier algebra of ${\mathfrak{A}}$ is the collection
\[
\LM_{{\mathfrak{X}}}({\mathfrak{A}}) \equiv \{ b \in B({\mathfrak{X}})\mid ba \in {\mathfrak{A}}, \mbox{ for all } a \in {\mathfrak{A}} \}.
\]
If $b \in \LM_{{\mathfrak{X}}}({\mathfrak{A}})$, then $L_b \in B({\mathfrak{A}})$ denotes the left multiplication operator by~$b$.
\end{definition}
The following has also a companion statement for double centralizers, which we plan to state and explore elsewhere.
\begin{proposition} \label{repn}
Let $\cl A$ be a $C^*$-algebra and assume that $\cl A$ is acting isometrically and non-degenerately on a Banach space ${\mathfrak{X}}$. Then the mapping
\begin{equation} \label{Linmap}
\LM_{{\mathfrak{X}}}(\cl A) \longrightarrow \LC (\cl A)\colon b \longmapsto L_b
\end{equation}
establishes an isometric Banach algebra isomorphism between $\LM_{{\mathfrak{X}}}(\cl A)$ and $\LC (\cl A)$.
\end{proposition}
\begin{proof} The statement of this Proposition is a well-known fact, provided that ${\mathfrak{X}}$ is a Hilbert space. In that case, in order to establish the surjectivity of (\ref{Linmap}) one starts with a contractive approximate unit $(e_i)_i$ for $\cl A$. If $B \in \LC\left(\cl A \right)$, then the net $( B(e_i))_i$ is bounded and therefore has at least one weak limit point $b \in B({\mathfrak{X}})$. The conclusion then follows by showing that $b \in \LM_{{\mathfrak{X}}}(\cl A)$. (See \cite[Proposition 3.12.3]{Ped} for a detailed argument.)
Bounded nets of operators on a Banach space need not have weak limits. However, the non-degeneracy of the action and the identity
\[
B(e_i)ax=B(e_ia)x, \,\, a \in \cl A , x \in {\mathfrak{X}},
\]
guarantees that the net $(B(e_i)x)_i $ is convergent when $x$ ranges over a dense subset of ${\mathfrak{X}}$. Since $( B(e_i))_i$ is bounded, we obtain that $(B(e_i)x)_i $ is Cauchy (and thus convergent) for any $x \in {\mathfrak{X}}$. This establishes that $(B(e_i))_i $ converges pointwise to some bounded operator $b \in B({\mathfrak{X}})$, even when ${\mathfrak{X}}$ is assumed to be a Banach space. With this observation at hand, the rest of the proof now goes as in the Hilbert space case.
\end{proof}
We are in position now to give the promised proof for Lin's Theorem.
\begin{theorem} \label{Linthm}
Let $E$ be a Hilbert $C^*$-module over a $C^*$-algebra $\cl A$. Then there exists an isometric isomorphism of Banach algebras
\[
\phi : \End_{\cl A}(E) \longrightarrow \LC\left(\cl K (E) \right),
\]
whose restriction $\phi_{\mid \cl L(E)}$ establishes a $*$-isomorphism between $\cl L(E)$ and \break $\DC(\cl K (E))$.
\end{theorem}
\begin{proof} In light of Proposition \ref{repn}, it suffices to verify that
$$\LM_{E}(\cl K (E)) = \End_{\cl A}(E).$$ Clearly $\End_{\cl A}(E)\subseteq \LM_{E}(\cl K (E))$. Conversely, let $S \in \LM_{E}(\cl K (E))$. If $a \in \cl A$ and $\eta , \xi, \zeta \in E$, then
\begin{align*}
S(\eta \sca{ \xi, \zeta} a)&=S\theta_{\eta , \xi}(\zeta a) = S\theta_{\eta , \xi}(\zeta ) a \\
&=S(\eta \sca{ \xi, \zeta}) a.
\end{align*}
However vectors of the form $\eta \sca{ \xi, \zeta}$, $\eta , \xi, \zeta \in E$, are dense in E by \cite[Lemma 1.3.9]{ManT} and so $S$ is an $\cl A$-module map, as desired.
Specializing now the mapping of~(\ref{Linmap}) to our setting, we obtain an isometric isomorphism
\begin{equation} \label{trueLinmap}
\phi \colon \End_{\cl A}(E) \longrightarrow \LC (\cl K (E))\colon S \longmapsto L_S.
\end{equation}
Furthermore, the restriction $\phi_{\mid \cl L(E)}$ coincides with Kasparov's map and the conclusion follows.
\end{proof}
{\noindent}{\it Acknowledgements.} The present paper grew out of discussions between the author and Aristides Katavolos during the International Conference on Operator Algebras, which was held at Nanjing University, China, June 20-23, 2013. The author would like to thank Aristides for the stimulating conversations and is grateful to the organizers of the conference for the invitation to participate and their hospitality.
\end{document}
|
\begin{document}
\begin{abstract}
We compute the divisor class group and the Picard group of projective varieties with Hibi rings as homogeneous coordinate rings. These varieties are precisely the toric varieties associated to order polytopes. We use tools from the theory of toric varieties to get a description of the two groups which only depends on combinatorial properties of the underlying poset.
\end{abstract}
\title{Divisors on Projective Hibi Varieties}
\section{introduction}
\newcommand{\mathcal{P}}{\mathcal{P}}
\newcommand{\mathcal{I}(\mathcal{P})}{\mathcal{I}(\mathcal{P})}
Let $(\mathcal{P},\le)$ be a finite partially ordered set (poset). A subset $I\subseteq\mathcal{P}$ is called an \emph{order ideal} if it is down-closed, i.e. $p\in I$ and $q\le p$ implies $q\in I$. Denote by $\mathcal{I}(\mathcal{P})$ the set of all order ideals of $\mathcal{P}$. The poset $(\mathcal{I}(\mathcal{P}),\subseteq)$ is a distributive lattice with join $I\vee J = I\cup J$ and meet $I\wedge J = I\cap J$ for $I,J\in\mathcal{I}(\mathcal{P})$.
\emph{Hibi rings} \cite{Hi87} are graded algebras with straightening laws associated to finite posets. More precisely, for a poset $\mathcal{P}=\{p_1,\ldots,p_n\}$ the Hibi ring $\mathbb{C}[\mathcal{P}]$ is the subalgebra of $\mathbb{C}[x_1,...,x_n,t]$ generated by the set of monomials $\{t\prod_{p_i\in I}{x_i}:I\in\mathcal{I}(\mathcal{P})\}$. Hibi rings are normal Cohen-Macaulay domains and we have $\mathbb{C}[\mathcal{P}]\cong\mathbb{C}[y_I:I\in\mathcal{I}(\mathcal{P})]/\mathfrak{I}_{\mathcal{I}(\mathcal{P})}$, where $\mathfrak{I}_{\mathcal{I}(\mathcal{P})}$ is the ideal generated by the so-called \emph{Hibi relations} $y_Iy_J-y_{I\wedge J}y_{I\vee J}$ for all $I,J\in\mathcal{I}(\mathcal{P})$ (see \cite{Hi87}).
Since the Hibi relations are homogeneous there is a natural grading on $\mathbb{C}[\mathcal{P}]$ coming from the standard grading on $\mathbb{C}[y_I:I\in\mathcal{I}(\mathcal{P})]$. In the following our central object of sudy are the projective varieties $X_\mathcal{P}$ with the graded ring $\mathbb{C}[\mathcal{P}]$ as its homogeneous coordinate ring, which we will call $\emph{(projective) Hibi varieties}$. Hibi varieties appear for example as flat degenerations of Grassmannians and flag varieties (\cite{MS05},\cite{EH12}). Moreover, they generalize several well-studied classes of varieties, such as certain determinantal and ladder determinantal varieties (\cite{BC03},\cite{Co95}).
Hibi varieties are toric varieties, hence geometric questions can be reduced to discrete-geometric questions about polytopes and fans. In the case of Hibi varieties one can hope to go even one step further and describe the geometry of $X_\mathcal{P}$ in terms of the combinatorics of $\mathcal{P}$.
A first step was done by Wagner in \cite{Wa96}, where the orbits of the torus action and the singular locus of $X_\mathcal{P}$ are described in terms of properties of $\mathcal{P}$.
In the present paper we compute the divisor class group and the Picard group of Hibi varieties. In Section 2 we describe the polytope of $X_\mathcal{P}$. This was already used without proof in \cite{Wa96}. In Section 3 we use general results on toric varieties to compute the divisor class group of $X_\mathcal{P}$. Finally, in Section 4 we use the description of the divisor class group to compute the Picard group of $X_\mathcal{P}$.
\section{Hibi Varieties and Order Polytopes}
\newcommand{\mathcal{P}p}{\mathcal{P}^{op}}
Let $\mathcal{P}$ be a finite poset. The projective variety $X_\mathcal{P}=\textnormal{Proj}(\mathbb{C}[\mathcal{P}])$ is called the \emph{(projective) Hibi variety} associated to $\mathcal{P}$.
Hibi varieties appear in various contexts and generalize some well-studied classes of varieties, as the following examples show.
\begin{example}
Let $\mathcal{P}_n$ denote the chain consisting of $n$ elements. The Hibi variety $X_{\mathcal{P}_n}$ is the complex projective space $\mathbb{P}^n$. More generally, if $\mathcal{P}$ is the disjoint union of chains $\mathcal{P}_{n_1},\ldots,\mathcal{P}_{n_l}$ the associated Hibi variety $X_\mathcal{P}$ is the Segre embedding of $\mathbb{P}^{n_1}\times\cdots\times\mathbb{P}^{n_l}$.
\end{example}
\begin{example}
For $1\le d\le n$ there exists a flat degeneration taking the \emph{Grassmannian} $G_{d,n}$ of $d$-dimensional subspaces of an $n$-dimensional complex vector space to the Hibi variety $X_{\mathcal{P}_d\times\mathcal{P}_{n-d}}$. For details see \cite{EH12}, \cite{Fr13} or \cite{St96}. More generally, also \emph{flag varieties} degenerate to Hibi varieties (see \cite{MS05}).
\end{example}
\begin{example}
\emph{Projective determinantal varieties} are determined by the vanishing of all minors of a fixed size of a matrix of indeterminates. In the case of $2$-minors of an $(n\times m)$-matrix $A$ the determinantal variety is the Hibi variety associated to $\mathcal{P}_{n-1}\cupdot\mathcal{P}_{m-1}$. Indeed, the lattice $\mathcal{I}(\mathcal{P}_{n-1}\cupdot\mathcal{P}_{m-1})$ is isomorphic to $\mathcal{P}_n\times\mathcal{P}_m$ and Hibi relations in $\mathcal{P}_n\times\mathcal{P}_m$ correspond precisely to the $2$-minors of $A$.
\end{example}
\begin{example}
\emph{Ladder determiantal varieties} are a generalisation of determinantal varieties, where instead of matrices so-called \emph{ladders} of indeterminates are considered (see e.g. \cite{Co95}). In the case of $2$-minors, these are again Hibi varieties.
\end{example}
In the following we will describe the polytope associated to the toric variety $X_\mathcal{P}$. For a poset $\mathcal{P}$ a subset $J\subseteq \mathcal{P}$ is called an \emph{order filter} if it is up-closed, i.e. if $b\ge a$ and $a\in J$ implies $b\in J$. Note that $J$ is an order filter if and only if its complement $\mathcal{P}\backslash J$ is an order ideal. The set $\mathcal{J}(\mathcal{P})$ of all order filters is a distributive lattice with union and intersection as join and meet operation, respectively. We have $\mathcal{J}(\mathcal{P})\cong\mathcal{I}(\mathcal{P}p)$, where $\mathcal{P}p$ is the \emph{opposite poset} of $\mathcal{P}$, the poset with the same underlying set as $\mathcal{P}$ but with the order reversed.
For a subset $S\subset\mathcal{P}$ we denote by $\mathbf{a}_S\in\mathbb{R}^\mathcal{P}$ the characteristic vector of $S$, i.e. $a_p=1$ if $p\in S$ and $a_p=0$ otherwise. The convex hull of the set $\{\mathbf{a}_J:J\in\mathcal{J}(\mathcal{P})\}$ is called the \emph{order polytope} of $\mathcal{P}$ and denoted by $\mathcal{O}(\mathcal{P})$. It can be shown that $\mathcal{O}(\mathcal{P})$ consists of all order-preserving functions $f:\mathcal{P}\to[0,1]\subseteq\mathbb{R}$ (see \cite{St86}).
There is the following close connection between Hibi varieties and order polytopes.
\begin{prop}
The Hibi variety $X_\mathcal{P}$ is isomorphic to the projective toric variety associated to the order polytope $\mathcal{O}(\mathcal{P}p)$.
\end{prop}
\begin{proof}
We will only sketch the proof, using results and notation from \cite{CLS11}. As in Chapter 1 and 2 of \cite{CLS11} for a finite set of lattice points $\mathcal{A}=\{\mathbf{a}_1,\ldots,\mathbf{a}_m\}\subseteq\mathbb{Z}^k$ we denote by $Y_\mathcal{A}$ the associated affine toric variety defined to be the Zariski closure of the image of the map
\begin{equation*}
\Phi_\mathcal{A}:(\mathbb{C}^*)^k\to \mathbb{C}^m,
\mathbf{t}\mapsto (\mathbf{t}^{\mathbf{a}_1},...,\mathbf{t}^{\mathbf{a}_m}).
\end{equation*}
Moreover, let $X_\mathcal{A}$ be the Zariski closure of the image of $\pi\circ\Phi_\mathcal{A}$, where $\pi:(\mathbb{C}^*)^m\to\mathbb{P}^{m-1}$ denotes the canonical projection.
The order polytope $\mathcal{O}(\mathcal{P}p)$ is normal, since it has a unimodular triangulation (\cite{St86}). Using that the integral points of $\mathcal{O}(\mathcal{P}p)$ are precisely its vertices, it now follows that the associated toric variety is isomorphic to $X_\mathcal{A}$, where $\mathcal{A}=\{\mathbf{a}_I:I\in\mathcal{I}(\mathcal{P})\}\subseteq\mathbb{Z}^\mathcal{P}$ and $\mathbf{a}_I$ denotes the characteristic vector of the ideal $I$. For the set $\mathcal{A}'=\{(1,\mathbf{a}_I):I\in\mathcal{I}(\mathcal{P})\}\subseteq\mathbb{Z}^{|\mathcal{P}|+1}$ of lattice points of the homogenization of $\mathcal{O}(\mathcal{P}p)$ we clearly have $X_\mathcal{A}=X_{\mathcal{A}'}$.
On the other hand, $\mathcal{A}'$ forms a set of generators of the affine semigroup of the Hibi ring $\mathbb{C}[\mathcal{P}]$. Hence if follows from Proposition 2.1.4 in \cite{CLS11} and the quotient description of $\mathbb{C}[\mathcal{P}]$ that $X_{\mathcal{A}'}\cong X_\mathcal{P}$.
\end{proof}
Since $\mathcal{O}(\mathcal{P}p)$ is full-dimensional and Hibi rings are normal (see \cite{Hi87}), we have the following immediate corollary.
\begin{cor}
$X_\mathcal P$ is a projectively normal toric variety of dimension $|\mathcal{P}|$.
\end{cor}
\section{Divisor Class Group}
A relation $p<q$ with $p,q\in\mathcal{P}$ is called a \emph{covering relation} if there is no $r\in\mathcal{P}$ with $p<r<q$. We write $\mathcal{C}(\mathcal P)$ for the set of covering relations in $\mathcal P$. The \emph{Hasse diagram} of $\mathcal{P}$ is the directed graph on the elements of $\mathcal{P}$ with an edge from $p$ to $q$ if and only if $p<q\in\mathcal{C}(\mathcal P)$.
For a finite poset $\mathcal{P}$ denote by $\hat{\mathcal{P}}$ the poset obtained from $\mathcal{P}$ by attaching a minimal element $\hat 0$ and a maximal element $\hat{1}$. For a covering relation $p<q\in\mathcal{C}(\hat{\mathcal P})$ define $\mathbf{u}_{p<q}\in\mathbb{Z}^{\mathcal{P}}$ by
\begin{equation}\label{FacetNormals}
\mathbf{u}_{p<q}=
\begin{cases}
\mathbf{e}_p & \textnormal{ if }q=\hat 1\\
-\mathbf{e}_q & \textnormal{ if }p=\hat 0\\
\mathbf{e}_p-\mathbf{e}_q & \textnormal{ otherwise},
\end{cases}
\end{equation}
where $\mathbf{e}_p$ is the standard basis vector corresponding to an element $p\in\mathcal{P}$.
Note that these vectors are precisely the facet normals of the order polytope $\mathcal{O}(\mathcal{P}p)$ (see \cite{St86}). For each such facet normal we can associate a torus-invariant divisor $D_{p<q}$ on $X_\mathcal{P}$. Moreover, the set $\{D_{p<q}:p<q\in\mathcal{C}(\hat{\mathcal{P}})\}$ of all such divisors forms a basis of $\textnormal{Div}_T(X_\mathcal{P})$, the group of torus-invariant divisors on $X_\mathcal{P}$ (see \cite{CLS11}, Chapter 4).
\begin{rem}
The facet of $\mathcal{O}(\mathcal{P}p)$ with normal vector $\mathbf{u}_{p<q}$ is linear equivalent to the order polytope $\mathcal{O}((\tilde\mathcal{P})^{op})$, where $\tilde\mathcal{P}$ is the poset obtained by first contracting the edge $p<q$ in the Hasse diagram of $\hat\mathcal{P}$ and then removing $\hat 0$ and $\hat 1$ (see \cite{St86}). Therefore it follows from \cite[Prop. 3.2.9]{CLS11} that $D_{p<q}$ is isomorphic to the Hibi variety $X_{\tilde\mathcal{P}}$. More explicitly, we have $D_{p<q}=X_\mathcal{P}\cap V(x_I:|(I\cup\{\hat{0}\})\cap\{p,q\}|=1)\subseteq\mathbb{P}^{|\mathcal{I}(\mathcal{P})|-1}$.
\end{rem}
Let $\textnormal{Cl}(X_\mathcal{P})$ denote the divisor class group of $X_\mathcal{P}$. The main result of this section is the following.
\begin{thm}\label{Cl}
Let $\mathcal{P}$ be a finite poset with $n$ elements and $X_\mathcal{P}$ the associated projective Hibi variety. Then we have
\begin{equation*}
\textnormal{Cl}(X_\mathcal{P})\cong\mathbb{Z}^{|\mathcal{C}(\hat{\mathcal{P}})|-n}.
\end{equation*}
\end{thm}
\begin{proof}
We have the well-known exact sequence (see e.g. \cite[Thm. 4.1.3]{CLS11})
\begin{equation*}
0\longrightarrow\mathbb{Z}^\mathcal{P}\xlongrightarrow{\phi}\textnormal{Div}_T(X_\mathcal{P})\longrightarrow\textnormal{Cl}(X_\mathcal{P})\longrightarrow 0
\end{equation*}
where the second map sends a divisor $D$ to its divisor class $[D]$ and $\phi$ is defined by
\begin{equation*}
\phi(\mathbf{m})=\sum\limits_{p<q\in\mathcal{C}(\hat{\mathcal{P}})}{\langle \mathbf{m},\mathbf{u}_{p<q}\rangle D_{p<q}}.
\end{equation*}
More explicitly, we have
\begin{equation}\label{phi}
\phi(\mathbf{e}_p)=\sum\limits_{p<q\in\mathcal{C}(\hat{\mathcal{P}})}{D_{p<q}}-\sum\limits_{r<p\in\mathcal{C}(\hat{\mathcal{P}})}{D_{r<p}}.
\end{equation}
To prove the theorem we will define a map $\psi:\textnormal{Div}_T(X_\mathcal{P})\to\mathbb{Z}^{|\mathcal{C}(\hat{\mathcal{P}})|-n}$ such that the sequence
\begin{equation*}
0\longrightarrow\mathbb{Z}^{|\mathcal{P}|}\xlongrightarrow{\phi}\textnormal{Div}_T(X_\mathcal{P})
\xlongrightarrow{\psi}\mathbb{Z}^{|\mathcal{C}(\hat{\mathcal{P}})|-n}\longrightarrow 0
\end{equation*}
is exact. From this it follows that $\textnormal{Cl}(X_{\mathcal{P}})\cong\mathbb{Z}^{|\mathcal{C}(\hat{\mathcal{P}})|-n}$.\\
To define $\mathcal{\psi}$ we do the following. For every $p\in\mathcal P$ we choose an element $r_p\in\mathcal{P}\cup\{\hat 0\}$ such that $r_p<p$ is a covering relation. Let $T$ be the connected subgraph of the Hasse diagram of $\mathcal{P}\cup\{\hat 0\}$ whose edges are the covering relations $r_p<p$ for all $p\in\mathcal P$. Since $T$ has $n$ edges we can define a basis of $\mathbb{Z}^{|\mathcal{C}(\hat{\mathcal{P}})|-n}$ of the form $\{\mathbf{e}_{p<q}:p<q\in\mathcal{C}(\hat{\mathcal{P}})\backslash T\}$. Now define $\psi(D_{p<q})=\mathbf{e}_{p<q}$ for $p<q\in\mathcal{C}(\hat{\mathcal{P}})\backslash T$. We want to define the image of all other divisors in a way such that $\textnormal{im}(\phi)\subseteq \textnormal{ker}(\psi)$. From \eqref{phi} we get that for $p<q\in\mathcal{C}(\hat{\mathcal{P}})$ we must have
\begin{equation}\label{psi}
\psi(D_{p<q})=\sum\limits_{q<r\in\mathcal{C}(\hat{\mathcal{P}})}{\psi(D_{q<r})}-
\sum\limits_{p'<q\in\mathcal{C}(\hat{\mathcal{P}}):p'\neq p}{\psi(D_{p'<q})}.
\end{equation}
If $q$ is a leaf of $T$ equation \eqref{psi} uniquely defines $\psi(D_{p<q})$. But in fact, as we see by inductively removing leaves, the condition in \eqref{psi} already determines the value of $\psi$ on all edges of $T$.\\
It remains to show that $\textnormal{ker}(\psi)\subseteq\textnormal{im}(\phi)$. Let $D=\sum\limits_{p<q\in\mathcal{C}(\hat{\mathcal{P}})}{\alpha_{p<q}D_{p<q}}$ be a divisor in $\textnormal{ker}(\psi)$. We claim that it suffices to find $\mathbf{m}\in\mathbb{Z}^{\mathcal{P}}$ such that for $D'=D+\phi(\mathbf{m})=\sum\limits_{p<q\in\mathcal{C}(\hat{\mathcal{P}})}{\alpha'_{p<q}D_{p<q}}$ we have $\alpha'_{p<q}=0$ whenever $p<q\in T$. Indeed, by the first part of the proof, $D'$ must lie in $\textnormal{ker}(\psi)$. But since $\alpha'_{p<q}=0$ for all $p<q\in T$ this implies $D'=0$ and therefore $D=-\phi(\mathbf{m})\in\textnormal{im}(\phi)$.
Any such $\mathbf{m}$ has to satisfy
\begin{equation*}
0=\alpha'_{r_p<p}=
\begin{cases}
\alpha_{r_p<p}-m_p & \textnormal{ if }r_p=\hat 0\textnormal{ and}\\
\alpha_{r_p<p}+m_{r_p}-m_p & \textnormal{ otherwise}.
\end{cases}
\end{equation*}
Hence we define $\mathbf{m}=(m_p)_{p\in\mathcal P}$ inductively by
\begin{equation*}
m_p=
\begin{cases}
\alpha_{\hat 0<p} & \textnormal{ for }p \textnormal{ minimal element of }\mathcal{P}\textnormal{ and}\\
\alpha_{r_p<p}+m_{r_p} & \textnormal{ otherwise}.
\end{cases}
\end{equation*}
It is easy to see that this $\mathbf{m}$ has the desired properties.
\end{proof}
From the proof of Theorem \ref{Cl} we immediately get the following description of generators of $\textnormal{Cl}(X_\mathcal{P})$.
\begin{cor}\label{gens}
Let $T$ be an arborescence in the Hasse diagram of $\mathcal{P}\cup\{\hat 0\}$, i.e. a subgraph which for every $p\in\mathcal{P}$ contains a unique directed path from $\hat 0$ to $p$. Then the divisor class group $\textnormal{Cl}(X_\mathcal{P})$ is the free abelian group generated by the divisor classes $\{[D_{p<q}]:p<q\in\mathcal{C}(\hat{\mathcal{P}})\backslash T\}$.
\end{cor}
\begin{rem}
The above proof is similar to the one in \cite{HHN92}, where the divisor class group of \emph{affine} Hibi varietes is computed.
\end{rem}
\section{Picard Group}
Let $\textnormal{Pic}(X_\mathcal{P})$ denote the Picard group of $X_{\mathcal{P}}$. The main result of this section is the following.
\begin{thm}\label{Pic}
We have $\textnormal{Pic}(X_\mathcal{P})\cong\mathbb{Z}^l$ where $l$ denotes the number of connected components of the Hasse diagram of $\mathcal{P}$.
\end{thm}
The Picard group $\textnormal{Pic}(X_\mathcal{P})$ is isomorphic to the subgroup of $\textnormal{Cl}(X_\mathcal{P})$ which consists of divisor classes of locally principal divisors. Hence, we want to understand when a divisor $D_{p<q}$ is locally principal.
For an ideal $I\in\mathcal{I}(\mathcal{P})$ let $\mathcal{C}_I(\hat{\mathcal{P}})=\{p<q\in\mathcal{C}(\hat{\mathcal{P}}):|\{p,q\}\cap(I\cup\{\hat 0\})|\neq 1\}$. Note that $\mathcal{C}_I(\hat{\mathcal{P}})$ corresponds to the set of all facets of $\mathcal{O}(\mathcal{P}p)$ which contain the vertex $\mathbf{a}_I$. Recall the description of the facet normals given in equation \eqref{FacetNormals}. With this notation we have the following criterion, which is a consequence of Thm. 4.2.8. in \cite{CLS11}.
\begin{lem}\label{LocPrinc}
Let $D=\sum\limits_{p<q\in\mathcal{C}(\hat{\mathcal{P}})}{\alpha_{p<q}D_{p<q}}$. Then $D$ is locally principal if and only if for every $I\in\mathcal{I}(\mathcal{P})$ there is $\mathbf{m}\in\mathbb{Z}^{\mathcal{P}}$ such that
\begin{equation*}
\langle \mathbf{m},\mathbf{u}_{p<q}\rangle=\alpha_{p<q}\textnormal{ for all }p<q\in\mathcal{C}_I(\hat{\mathcal{P}}).
\end{equation*}
\end{lem}
We will now use this to prove the main theorem.
\begin{proof}[Proof of Theorem \ref{Pic}]
We want to describe the subgroup of $\textnormal{Cl}(X_\mathcal{P})$ which consists of divisor classes of locally principal divisors. Let $[D]$ be a divisor class such that $D$ is locally principal. By Corollary \ref{gens} we may assume that $D$ is of the form
\begin{equation*}
D=\sum\limits_{p<q\in\mathcal{C}(\hat{\mathcal{P}})\backslash T}{\alpha_{p<q}D_{p<q}}.
\end{equation*}
We will first apply Lemma \ref{LocPrinc} for the ideals $\mathcal{P}$ and $\emptyset$ to get some conditions on the coefficients $\alpha_{p<q}$. Then we will show that these conditions are in fact sufficient.\\
Let $I=\mathcal{P}\in\mathcal{I}(\mathcal{P})$. Then $\mathcal{C}_I(\hat{\mathcal{P}})=\{p<q\in\mathcal{C}(\hat{\mathcal{P}}): q\neq\hat 1\}$. We claim that for all $p<q\in\mathcal{C}_I(\hat{\mathcal{P}})$ we must have $\alpha_{p<q}=0$. First note that for any chain $\hat 0< p_1<\cdots< p_k< q$ in the Hasse diagram of $\hat{\mathcal{P}}$ we have by the above lemma
\begin{equation*}
\alpha_{\hat 0<p_1}+\sum\limits_{1\le i\le k-1}{\alpha_{p_i<p_{i+1}}}+\alpha_{p_k<q}=-m_q=0,
\end{equation*}
where the last equality follows from choosing a chain in $T$. Now consider a chain of the form $\hat 0< p_1'<\cdots< p_l'< p< q$ such that $\hat 0< p_1'<\cdots< p_l'< p$ lies in $T$. This yields $\alpha_{p<q}=0$.\\
So far we have shown that $D$ must be of the form $D=\sum_{p\in M}{\alpha_{p<\hat 1}D_{p<\hat 1}}$, where $M$ denotes the set of maximal elements of $\mathcal{P}$. Now choose $I=\emptyset\in\mathcal{I}(\mathcal{P})$. We have $\mathcal{C}_I(\hat{\mathcal{P}})=\{p<q\in\mathcal{C}(\hat{\mathcal{P}}):p\neq\hat 0\}$. We claim that if $p_1,p_2\in M$ are in the same connected component of $\mathcal{P}$ then we must have $\alpha_{p_1<\hat 1}=\alpha_{p_2<\hat 1}$. We call $p_1,p_2\in M$ adjacent if there exists a $q\in\mathcal{P}$ such that $q<p_1$ and $q<p_2$. Since $\mathcal{P}$ is finite it suffices to prove the claim for adjacent $p_1,p_2$. Let $q\in\mathcal{P}$ such that $q<p_1,p_2$. As above we get $0=m_q-m_{p_1}=m_q-m_{p_2}$, which in particular implies $m_{p_1}=m_{p_2}$. But $m_{p_i}=\alpha_{p_i<\hat 1}$ by Lemma \ref{LocPrinc}, which proves the claim.\\
Let $\mathfrak{C}(\mathcal{P})$ be the set of connected components of $\mathcal{P}$. We have shown that $D$ must be of the form
\begin{equation*}
D=\sum\limits_{C\in\mathfrak{C}(\mathcal{P})}{\alpha_C D_C}\textnormal{ where }D_C=\sum\limits_{p\in M\cap C}{D_{p<\hat 1}}.
\end{equation*}
The only thing left to show is that every such $D$ is locally principal by again using Lemma \ref{LocPrinc}. Let $I\in\mathcal{I}(\mathcal{P})$. Define $\mathbf{m}=(m_p)_{p\in\mathcal{P}}$ as follows. For all $p\in I$ set $m_{p}=0$. For all $p\in\mathcal{P}\backslash I$, let $C$ be the connected component that $p$ lies in and set $m_{p}=\alpha_C$. It is easy to check that $\mathbf{m}$ has all the desired properties.
\end{proof}
\textbf{Acknowledgements.} This work generalizes results from the author's master thesis supervised by Gunnar Fl\o ystad, whom the author would like to thank for fruitful discussions and constant support. The author would also like to thank Raman Sanyal for helpful comments on a previous version of this paper.
\linespread{1.0}
\setlength{\parskip}{0cm}
\small
\end{document}
|
\begin{document}
\title{\bf Local data structures}
\author{{\bf J.F. Jardine}\thanks{Supported by NSERC.}}
\affil{[email protected]}
\maketitle
\begin{abstract}
Local data structures are systems of neighbourhoods within data sets. Specifications of neighbourhoods can arise in multiple ways, for example, from global geometric structure (stellar charts), combinatorial structure (weighted graphs), desired computational outcomes (natural language processing), or sampling. These examples are discussed, in the context of a theory of neighbourhoods.
This theory is a step towards understanding clustering for large data sets. These clusters can only be approximated in practice, but approximations can be constructed from neighbourhoods via patching arguments that are derived from the Healy-McInnes UMAP construction. The patching arguments are enabled by changing the theoretical basis for data set structure, from metric spaces to extended pseudo metric spaces.
\end{abstract}
\section*{Introduction}
This paper is a preliminary discussion of local structures for
large data sets.
Potential objects of study include subsets $\mathcal{U} \subset \mathbb{R}^{N}$, where the data set $\mathcal{U}$ (or ``universe'') is essentially infinite, meaning that $\mathcal{U}$ is too large to analyze with available computational devices.
Alternatively, there may not be a metric space structure on the data set $\mathcal{U}$. Such objects $\mathcal{U}$ can arise as vertices of large weighted graphs $\Gamma$, which could describe data transfers that occur during a time interval. Other examples arise in the ``bag of words'' model natural language processing, which model has a combinatorial structure that is not graph theoretic.
There could, finally, be no apparent geometric or combinatorial structure for $\mathcal{U}$, and its structure near a point may have to be approximated (or learned) by iterated sampling.
In general, one wants to break up a data set $\mathcal{U}$ into smaller computable pieces $N$ that cover $\mathcal{U}$ in the sense that every $x \in \mathcal{U}$ is in some neighbourhood $N$, in the hope/expectation that analyses of the neighbourhoods $N$ can be assembled to a full or at least useful partial analysis of the universal data set $\mathcal{U}$.
This is essentially the approach taken by the mapper algorithm \cite{CMS} (see Remark \ref{rem 12} below), and it can make perfect sense for clustering at relatively small distance scales.
The elements of a neighbourhood $N$ should be close to $x$ in some sense, but one has to address the question of how to find such neighbourhoods in a sea of data $\mathcal{U}$.
If there is no prior information about the structure or genesis of $\mathcal{U}$, the phrase ``close to $x$'' may not have much meaning. In good cases, there is information about local geometric or combinatorial structures that allows one to get started.
Most generally, a neighbourhood $N$ of a point $x$ in a data set $\mathcal{U}$ is a suitably sized subset of $\mathcal{U}$ which contains $x$. If $\mathcal{U}$ is a metric space (or an extended pseudo metric space) then $N$ has a diameter $s(N)$, which is the maximum distance $d(x,y)$ for $y \in N$.
The inclusion $N \subset \mathcal{U}$ determines an inclusion of Vietoris-Rips complexes $V(N) \subset V(\mathcal{U})$.
If every $x \in \mathcal{U}$ has a specific choice of neighbourhood $N_{x}$, as in Section 3, then the collection of all such neighbourhoods determines an inclusion of filtered complexes
\begin{equation*}
N(\mathcal{U}):=\cup_{x \in \mathcal{U}}\ V(N_{x}) \subset V(\mathcal{U}),
\end{equation*}
which complexes are filtered by distance in the usual way.
I say that $N(\mathcal{U})$ is the {\it neighbourhood complex} that is defined by the family of neighbourhoods $N=\{ N_{x} \}$.
The neighbourhood complex $V(N)$ is the mapper complex for the covering $V(N_{x}) \subset V(\mathcal{U})$ of the global Vietoris-Rips complex $V(\mathcal{U})$, as in \cite{CMS}.
Every element $y \ne x$ in a neighbourhood $N_{x}$ determines a ray
\begin{equation*}
\{x,y\} \subset N_{x} \subset \mathcal{U},
\end{equation*}
and the collection of such rays determines
a filtered subcomplex
\begin{equation*}
R(N_{x}) = \vee_{y \ne x}\ V(\{x,y\}) \subset V(N_{x}).
\end{equation*}
Taking the union
\begin{equation*}
R(\mathcal{U}) = \cup_{x \in \mathcal{U}}\ R(N_{x}) \subset V(\mathcal{U})
\end{equation*}
defines the {\it ray subcomplex} $R(\mathcal{U})$, which is a subcomplex of both $V(\mathcal{U}$ and $N(\mathcal{U})$.
The ray subcomplex $R(\mathcal{U})$ is a filtered (or weighted) graph.
If the neighbourhoods $N_{x}$ consist of sets of $k$-nearest neighbours for the points of $\mathcal{U}$, then the ray subcomplex $R(\mathcal{U})$ is the $k$-nearest neighbours graph, which is a well-studied object. The $k$-nearest neighbours graph is used to construct the UMAP graph of \cite{HMc-2020}, \cite{UMAP-stab}, \cite{github}.
The inclusions
\begin{equation*}
R(\mathcal{U}) \subset N(\mathcal{U}) \subset V(\mathcal{U})
\end{equation*}
of filtered complexes induce surjections
\begin{equation*}
\pi_{0}R_{s}(\mathcal{U}) \to \pi_{0}N_{s}(\mathcal{U}) \to \pi_{0}V_{s}(\mathcal{U})
\end{equation*}
for distance parameters $s$, which are analyzed in special cases in Sections 3 and 4.
There are good comparison results for finite $s$ for bounded neighbourhoods, which is the subject of Section 4. See Lemma \ref{lem 14},
Lemma \ref{lem 15}, Lemma \ref{lem 16} and Lemma \ref{lem 20}.
In that setting, the neighbourhood complex $N_{s}(\mathcal{U})$ for bounded neighbourhoods has the same $1$-skeleton as the global Vietoris-Rips complex $V_{s}(\mathcal{U})$ at small distance scales $s$, which makes the neighbourhood complex $N_{s}(\mathcal{U})$ a good approximation of $V_{s}(\mathcal{U})$ for clustering for such $s$.
At higher distance scales, the clusters of the ray complex $R_{s}(\mathcal{U})$ coincide with those of the neighbourhood complex $N_{s}(\mathcal{U})$. The outcome is that, for clustering, the neighbourhood complex $N(\mathcal{U})$ is a bridge between the ray complex $R(\mathcal{U})$ (a UMAP-like object) and the full Vietoris-Rips complex $V(\mathcal{U})$.
The basic ideas and constructions of this paper appear in the Sections 2 and 3, along with a discussion of the relationship between neighbourhoods and sequences of nearest neighbours.
With a view to potential applications (as in Section 5), we generally assume that $\mathcal{U}$ is an extended pseudo metric space, or an ep-metric space. The basic ideas around ep-metric spaces are summarized in Section 1.
Subsequent results and calculations are determined by choices of neighbourhoods, which choices vary with the geometric or combinatorial structures of specific examples.
The definitions and results of Sections 4, 6 and 7 are based on naive examples (or thought experiments) that motivate and illustrate these ideas:
\noindent
1)\
The Gaia Archive $\mathcal{U}$ is a database of roughly a billion stars in the Milky Way. The raw data for the Archive is a set of scans that has been collected by the Gaia Space Observatory spacecraft, starting in 2014.
The scans return high resolution photometric and spectral data for stars within small apertures, and so the archive is constructed from an assembly of local data. The positions of the stars in the archive relative to the Sun are determined, after repeated observations and much computation.
These positions can be expressed as a function $p: \mathcal{U} \to \mathbb{R}^{3}$ that determines the members of the Archive $\mathcal{U}$ uniquely.
The position function $p$ is a type of dimension reduction. In the language of the mapper construction, it is a filter function \cite{CMS}.
From observation, if $x$ is a star in the archive $\mathcal{U}$, then there is a neighbourhood $N_{x} \subset \mathcal{U}$ of stars close to $x$ such that $N_{x}$ has a computable number of elements. We could insist that $N_{x}$ is a bounded neighbourhood, in that it has a bounded radius $s(N_{x})$ and it contains at most $k$ elements for some choice of integer $k$.
This is an explicitly geometric example, which is closely aligned with methods that are presented in Section 4.
\noindent
2)\
For some data sets, there is a graph structure $\Gamma$ with no apparent ambient metric space.
For example, a collection of data transfers between computer accounts within a (short enough) time interval can be given the structure of a sparse directed weighted graph, as in Example \ref{ex 30} below. The number of bytes transmitted by a transfer is its weight.
The vertices of these graphs have low valence. One knows all of the transfers $e: x \leftrightarrow y$ for each account $x$, and from this one builds a computable neighbourhood $N_{k}(x)$ of accounts which are separated from $x$ by at most $k$ transfer steps (or hops).
One needs a way of assigning weights $d(x,y)$ to the various $y \in N_{k}(x)$.
Starting with an account $x$, one could expect that the accounts $y$ with which it does the most ``business'' are the closest to $x$.
The elements $y$ of $N_{k}(x)$ which are closest to $x$ are defined ``inversely'' by the sum $\Sigma(x,y)$ of all weights of directed edge paths between $x$ and $y$. Then the distance $d(x,y)$ can be defined by
\begin{equation*}
d(x,y)= e^{-\Sigma(x,y)}
\end{equation*}
between $x$ and $y$ for each $y \in N_{k}(x)$.
From the data of neighbourhoods and weights, the Healy-McInnes UMAP machine generates a global ep-metric $D$ on the set $Z$ of vertices of the graph $\Gamma$, with clusters given by the directed set $\pi_{0}V(Z,D)$, or equivalently (Theorem \ref{th 27}) by the directed set $\pi_{0}R(N)$ arising from the rays of the various neighbourhoods $N_{k}(x)$.
The point, ultimately, is that one uses the graph structure to find computable weighted neighbourhoods $N_{k}(x) $ for all vertices $x$ of a sparse weighted directed graph $\Gamma$. These local structures then patch together to define a global ep-metric on the full set of vertices of $\Gamma$, along with cluster constructions.
These ideas appear in Section 6. In broad outline, they apply
equally well to all sparse weighted graphs.
There is a fundamental idea in play here: the UMAP construction creates global space-level structure and cluster computations from local information given by weighted neighbourhoods, with or without the existence of an ambient metric.
This observation is applied repeatedly in examples that are displayed here. We specify neighbourhoods with weights, and then feed these neighbourhoods to general machinery.
The relevant theoretical features of the UMAP construction are summarized in Section 5. That section contains an alternate presentation of the UMAP graph, which is constructed by patching together rays without invoking most of the standard methods of UMAP --- see Theorem \ref{th 27}.
\noindent
3)\ Section 7 is a discussion of neighbourhoods of words in the ``continuous bag of words'' model from natural language processing (NLP). With such neighbourhoods in hand (and with appropriate definitions of weights), one again uses UMAP methods to construct an ep-metric space structure on the set of words $\mathcal{L}$ that of a corpus.
The methods of Section 7 extend to any finite set of strings of data elements, in which a local metric can be defined by proximity within strings.
In the examples displayed so far, the local nature of a data set varies within a given geometric or combinatorial structure. These structures are in part determined by desired computational outcomes, and they are the starting points for calculations.
One could, finally, be presented with a very large cloud of points $\mathcal{U}$ with an ep-metric space structure, but with no other information, from which one wants to approximate (or discover) a neighbourhood $N_{x}$ for a given point $x \in \mathcal{U}$.
There seems to be no choice in such a case but to apply brute force methods that are based on repeated random sampling, with the goal of learning a description of a neighbourhood, or ``$k$-complete'' neighbourhood $N_{x}$ for $x$. A potential method for doing so is described in Section 8.
The $k$-complete neighbourhoods of this paper (see Sections 2 and 4) are strongly related to sets of $k$-nearest neighbours for a point $x$, but have the benefit of being uniquely defined, and are therefore easier to manipulate theoretically. Of course, the positive integer $k$ must be specified up front.
\tableofcontents
\section{Extended pseudo metric spaces}
An {\it extended pseudo-metric space} $(X,d)$, here called an {\it ep-metric space}, is a set $X$ together with a function $d:X \times X \to [0,\infty]$ such that the following conditions hold:
\begin{itemize}
\item[1)] $d(x,x)=0$,
\item[2)] $d(x,y) = d(y,x)$,
\item[3)] $d(x,z) \leq d(x,y) + d(y,z)$.
\end{itemize}
There is no condition that $d(x,y)=0$ implies $x$ and $y$ coincide --- this is where the adjective ``pseudo'' comes from, and the gadget is ``extended'' because we allow infinite distance.
A metric space $(X,d)$ is an ep-metric space for which $d(x,y)=0$ implies $x=y$, and all distances $d(x,y)$ are finite.
There is a category $\mathbf{ep-met}$ of ep-metric spaces, with morphisms $f:(X,d) \to (Y,d')$ given by functions $f:X \to Y$ which are non-expanding in the sense that $d'(f(x),f(y)) \leq d(x,y)$ for all $x,y \in X$.
The category $\mathbf{ep-met}$ is a cocomplete in the sense that it has all small colimits.
In effect, the coproduct $\sqcup_{i}\ (X_{i},d{i})$ is the disjoint union set $\sqcup_{i}\ X_{i}$, equipped with the ep-metric $d$ defined by
\begin{equation*}
d(x,y) = \begin{cases}
d_{i}(x,y) & \text{if $x,y \in X_{i}$ for some $i$, and} \\
\infty & \text{otherwise.}
\end{cases}
\end{equation*}
Coequalizers are constructed from a quotient function. Suppose that $(X,d)$ is an ep-metric space and that $p: X \to Y$ is a surjective function. Then $Y$ has an ep-metric $D$ such that for any pair $z,w \in Y$,
\begin{equation*}
D(z,w) = \inf_{P}\ \sum d(x_{i},y_{i}),
\end{equation*}
where each ``path'' $P$ consists of pairs of points $\{x_{i},y_{i}\},\ i \leq n$ in $X$ such that $z=p(x_{0})$, $w = p(y_{n})$ and $p(y_{i}) = p(x_{i+1})$ for $i \leq n-1$. The function $p$ defines a map $p: (X,d) \to (Y,D)$ of ep-metric spaces that has the universal property of quotients.
\begin{example}
Suppose that $(X,d)$ and $(X,d')$ are ep-metric spaces having the same set of oelements $X$. Then the amalgamation (wedge) $(X,d) \vee (X,d')$ in the ep-metric space category is an ep-metric space structure on $X$ with
\begin{equation*}
D(z,w) = \inf_{P}\ \sum D(x_{i},x_{i+1}),
\end{equation*}
where each path $P$ is a string of elements $z=x_{0},x_{i}, \dots ,x_{n}=w$ of $X$ and
\begin{equation*}
D(x_{i},y_{i}) = \min\ \{d(x_{i},x_{i+1}),d'(x_{i},x_{i+1})\}.
\end{equation*}
\end{example}
Each finite ep-metric space $\mathcal{U}$ has a family of Vietoris-Rips complexes $V_{s}(\mathcal{U})$, which are parameterized by distance $s$. Explicitly, $V_{s}(\mathcal{U})$ is the abstract simplicial complex (or poset) whose simplices are the finite subsets $\sigma=\{x_{0}, \dots ,x_{k}\}$ of $\mathcal{U}$ such that $d(x_{i},x_{j}) \leq s$. The simplex $\sigma$ is a $k$-simplex, and it has cardinality $k+1$.
As in the standard case, there is an ascending family of complexes
\begin{equation*}
V_{s}(\mathcal{U}) \subset V_{t}(\mathcal{U}),\ s \leq t,
\end{equation*}
with $\mathcal{U} = V_{0}(\mathcal{U})$ (discrete complex on the set $\mathcal{U}$).
The limiting object $V_{\infty}(\mathcal{U})$ is a simplex $\Delta^{\mathcal{U}}$ with vertices $\mathcal{U}$, but it is not the case that $V_{\infty}(\mathcal{U})$ is a union of the subobjects $V_{s}(\mathcal{U})$ with $s$ finite. Write
\begin{equation*}
V_{<\infty}(\mathcal{U}) = \cup_{s < \infty}\ V_{s}(\mathcal{U}).
\end{equation*}
The simplicial set $V_{< \infty}(\mathcal{U})$ is a finite disjoint union of contractible components.
\section{Neighbourhoods}
Suppose that $(Z,d)$ is a finite ep-metric space and that $x \in Z$.
In all of the following,
\begin{equation*}
Z(x,s) = \{y \in Z\ \vert\ d(x,y) \leq s\}
\end{equation*}
is the closed ball of radius $s$ in $Z$ that is centred at $x$.
A {\bf neighbourhood} $N$ of $x$ is a subset
$N$ of $Z$ with $x \in N$ and $d(x,y) < \infty$.
A neighbourhood $N$ aquires an ep-metric space structure from $Z$, and defines a filtered subcomplex $V(N) \subset V(Z)$ of the Vietoris-Rips complex $V(Z)$.
The {\bf radius} $s(N)$ of the neighbourhood $N$ is defined by
\begin{equation*}
s(N) = \operatorname{max}_{y \in N}\ d(x,y).
\end{equation*}
Then $s(N) < \infty$ by assumption.
The neighbourhood $N$ is said to be {\bf complete} if $N=Z(x,s_{N})$.
A neighbourhood $N$ of $x$ is a set of {\bf nearest neighbours} if $d(x,z) \geq s(N)$ for all $z \in Z-N$. If $N = \{x,x_{1}, \dots ,x_{k}\}$ is a set of nearest neighbours (i.e. with cardinality $k+1$), then $N$ is a set of {\bf $k$-nearest neighbours}.
A nearest neighbour $y$ for $x$ with $d(x,y) < \infty$ can be identified with a neighbourhood $N = \{x,y\}$ of nearest neighbours. This means that $d(x,y) \leq d(x,z)$ for all $z \in Z-\{x\}$. The distance $d(x,y)$ could be $0$ in general.
Every complete neighbourhood $N = Z(x,s_{N})$ is a set of nearest neighbours for $x$, and is a set of $n$-nearest neighbours, where $n = \vert N \vert - 1$.
\begin{lemma}\label{lem 2}
Suppose that $N = \{x,x_{1}, \dots ,x_{k}\}$ is a set of nearest neighbours for $x$, and that the $x_{i}$ are ordered such that
\begin{equation*}
d(x,x_{1}) \leq d(x,x_{2}) \leq \dots \leq d(x,x_{k}).
\end{equation*}
Then $x_{i}$ is a nearest neighbour of $x$ in the subset $Z-\{x_{1}, \dots ,x_{i-1}\}$.
\end{lemma}
\begin{proof}
We have
\begin{equation*}
d(x,x_{i}) \leq d(x,x_{i+1}) \leq \dots \leq d(x,x_{k}) \leq d(x,z)
\end{equation*}
for all $z$ outside of $N$. It follows that $d(x,x_{i})) \leq d(x,w)$ for all $w \in Z-\{x_{1}, \dots x_{i-1}\}$.
\end{proof}
\begin{lemma}\label{lem 3}
Suppose that the neighbourhood $N$ is a set of nearest neighbours for $x$ and $z \in X-N$ is chosen such that $d(x,z) < \infty$ and $d(x,z) \leq d(x,v)$ for all $v \in Z-N$. Then the set $N \cup \{z\}$ is a set of nearest neighbours for $x$.
\end{lemma}
\begin{proof}
The radius $s_{z}$ of $N \cup \{z\}$ is $d(x,z)$. Choose $v \in Z - (N \cup \{z\})$. Then $s(N) \leq d(x,v)$, and $d(x,z) \leq d(x,v)$ by the minimality of $d(x,z)$. It follows that $s(N \cup \{z\}) \leq d(x,v)$.
\end{proof}
\begin{remark}
Applying Lemma \ref{lem 3} inductively gives nearest neighbourhoods $N$ of $x$ of all possible finite cardinalities $\vert N \vert$ with $\vert N \vert \leq \vert Z \vert$.
\end{remark}
There is a function $d_{x}: Z \to [0,\infty]$ with $d_{x}(y) = d(x,y)$. A nearest neighbour for $x$ is an element $z \in Z -\{x\}$ such that $d_{x}(z) < \infty$ and $d_{x}(z)$ is minimal.
For such an element $z$, write $s = d_{x}(z)$. Then $s$ is the minimum finite value of the image $d_{x}(Z)$, and $z \in Z_{x}(s)$, where
\begin{equation*}
Z_{x}(s) = d_{x}^{-1}(s)
\end{equation*}
is the fibre (pre-image) of $d_{x}$ over $s$.
\begin{lemma}\label{lem 5}
Suppose that $N$ is a set of nearest neighbours for $x$, and suppose that $\{s_{1}, \dots ,s_{p}\}$ is the set of elements of the image $d_{x}(N)$, with $s_{1} < \dots s_{p}$. Then $\{s_{1}, \dots ,s_{p}\}$ is a set of smallest finite elements of $d_{x}(Z)$, and
\begin{equation*}
N = Z_{x}(s_{1}) \cup \dots \cup Z_{x}(s_{p-1}) \sqcup F
\end{equation*}
where $F \subset Z_{x}(s_{p})$.
\end{lemma}
\begin{proof}
This is proved by induction on $\vert N\vert$, using Lemma \ref{lem 2} and Lemma \ref{lem 3}.
\end{proof}
If the neighbourhood $N=\{x,x_{1}, \dots ,x_{k}\}$ is a set of nearest neighbours of $x$ with
\begin{equation*}
d(x,x_{1}) \leq \dots \leq d(x,x_{k}),
\end{equation*}
one says that $(x_{1},x_{2}, \dots ,x_{k})$ is a {\bf sequence of $k$-nearest neighbours} for $x$.
\begin{lemma}\label{lem 6}
Suppose that $\{ y_{1}, \dots ,y_{k}\}$ is a set of distinct elements of $X-\{x\}$ with
\begin{equation*}
d(x,y_{1}) \leq d(x,y_{2}) \leq \dots \leq d(x,y_{k}) < \infty.
\end{equation*}
If $(x_{1}, \dots ,x_{k})$ is a sequence of $k$-nearest neighbours for $x$, then $d(x,x_{i}) \leq d(x,y_{i})$ for $1 \leq i \leq k$.
\end{lemma}
\begin{proof}
$d(x,x_{1}) \leq d(x,y_{1})$, since $x_{1}$ is a nearest neighbour.
Suppose that $d(x,x_{i}) \leq d(x,y_{i})$ for $i \leq r$. Then
\noindent
1)\ If $d(x,x_{r}) < d(x,y_{r+1})$ then $d(x,x_{r+1}) \leq d(x,y_{r+1})$ by minimality.
\noindent
2)\ If $d(x,x_{r}) = d(x,y_{r+1})$ then $y_{r+1}$ is a nearest neighbour of $x$ in $Z-\{x_{1},\dots,x_{r}\}$, and so $d(x,x_{r+1}) = d(x,y_{r+1})$.
\end{proof}
\begin{corollary}
Suppose that $(x_{1}, \dots ,x_{k})$ and $(y_{1}, \dots ,y_{k})$ are sequences of $k$-nearest neighbours for $x$. Then $d(x,x_{i}) = d(x,y_{i})$ for all $i$.
\end{corollary}
\begin{corollary}
Suppose that $W$ is a finite ep-metric space, and the inclusion $Z \subset W$ induces an ep-metric structure on the subset $Z$. Supppose that $x \in Z$. Suppose that $(w_{1}, \dots ,w_{k})$ and $(z_{1}, \dots ,z_{k})$ are sequences of $k$-nearest neighbours for $x$ in $W$ and $Z$, respectively. Then $d(x,w_{i}) \leq d(x,z_{i})$ for $1 \leq i \leq k$.
\end{corollary}
\begin{lemma}\label{lem 9}
Suppose that $(x_{1}, \dots ,x_{k})$ is a sequence of nearest neighbours for $x$ in $Z$, and that $\{y_{1}, \dots ,y_{k}\}$ is a sequence of distinct elements of $Z$ with $d(x,y_{1}) \leq \dots \leq d(x,y_{k}) < \infty$.
If $d(x,x_{i}) = d(y,y_{i})$ for all $i$, then $(y_{1}, \dots ,y_{k})$ is a sequence of nearest neighbours for $x$.
\end{lemma}
\begin{proof}
$d(x,y_{1}) = d(x,x_{1}) \leq d(x,z)$ for all $z$, so that $y_{1}$ is a nearest neighbour for $x$ in $Z$.
Inductively, suppose that $\{y_{1}, \dots ,y_{i}\}$ is a set of nearest neighbours for $x$.
Suppose that $d(x,x_{i}) = d(x,x_{i+1})$. Then $d(x,y_{i})=d(x,y_{i+1})$, and so $\{y_{1},\dots,y_{i+1}\}$ is a set of nearest neighbours.
If $d(x,x_{i}) < d(x,x_{i+1})$, then $\{y_{1}, \dots ,y_{i}\} = \{x_{1}, \dots ,x_{i}\}$ by comparing fibres $Z_{x}(s)$, so that $y_{i+1}$ is the nearest neighbour of $x$ in $Z - \{y_{1}, \dots ,y_{i}\}$.
\end{proof}
We close this section with a discussion of $k$-complete neighbourhoods.
The image of the distance function $d_{x}: Z \to [0,\infty]$ has the form
\begin{equation*}
\operatorname{Im}(d_{x}) = \{s_{1},s_{2}, \dots \},
\end{equation*}
where there are strict inequalities $s_{i} < s_{i+1}$ for all $i$. The data set
$Z$ is a disjoint union of non-empty fibres of $d_{x}$:
\begin{equation*}
Z = p_{x}^{-1}(s_{1}) \sqcup p_{x}^{-1}(s_{2}) \sqcup \dots =
Z_{x}(s_{1}) \sqcup Z_{x}(s_{2}) \sqcup \dots.
\end{equation*}
For each $s_{i}$, there is a unique complete neighbourhood $Z(x,s_{i})$ of $x$,
with
\begin{equation*}
Z(x,s_{i}) = d_{x}^{-1}(s_{1}) \sqcup \dots \sqcup d_{x}^{-1}(s_{i}).
\end{equation*}
The complete neighbourhoods of $x$ form a finite ascending tower
\begin{equation*}
Z(x,s_{1}) \subset Z(x,s_{2}) \subset Z(x,s_{3}) \subset \dots
\end{equation*}
Any complete neighbourhood $N$ with $Z(x,s_{i}) \subsetneqq N$ must have strictly greater radius $s_{N} > s_{i}$.
Suppose that $k$ is a positive integer and that $\vert Z \vert \geq k$. Then there is a smallest number $i$ such that $\vert Z(x,s_{i}) \vert \geq k$. In this case, the neighbourhood $Z(x,s_{i})$ is $k$-complete.
Alternatively, the $k$-complete neighbourhood $N$ of $x$ is the smallest complete neighbourhood such that $\vert N \vert \geq k$.
The element $x \in Z$ has a unique $k$-complete neighbourhood $N$ in $Z$, provided that $\vert Z \vert \geq k$. The $k$-complete neighbourhood $N$ is a well defined object, while there may be multiple sets of $k$-nearest neighbours of $x$
\begin{lemma}\label{lem 10}
Suppose that $Z_{1},Z_{2} \subset \mathcal{U}$, and that $x \in Z_{i}$. Suppose that $N_{i} \subset Z_{i}$ is the $k$-complete neighbourhood of $x$ in $Z_{i}$, and suppose that $N$ is the $k$-complete neighbourhood of $x$ in $Z = Z_{1} \cup Z_{2}$. Then $N \subset N_{1} \cup N_{2}$.
\end{lemma}
\begin{proof}
The set $N$ is a set of nearest neighbours for $x$ in $Z_{1} \cup Z_{2}$, and $Z_{i} \cap N$ is a set of nearest neighbours for $x$ in $Z_{i}$.
In effect, if $z \in Z_{i}$ is not in $Z_{i} \cap N$ then $z$ is not in $N$, so that $d(x,z) \geq s_{N}$, while $s_{N} \geq s_{Z_{i} \cap N}$.
If there is an $s < s_{N}$ such that $\vert Z_{i}(x,s) \vert \geq k$, then $\vert Z(x,s) \vert \geq k$ for $s < s_{N}$, and so $N=Z(x,s_{N})$ is not $k$-complete. It follows that $\vert Z_{i}(x,s) \vert < k$ for $s < s_{N}$, and so $Z_{i}(x,s_{N}) \subset N_{i}$.
Thus, $N \subset N_{1} \cup N_{2}$, as claimed
\end{proof}
Lemma \ref{lem 10} leads to a method of approximating $k$-complete neighbourhoods for a point $x$ in a very large data set $\mathcal{U}$.
In effect, if $Z_{i} \subset \mathcal{U}, 1 \leq i \leq p$ is a collection of subsets of $\mathcal{U}$ with $x \in Z_{i}$, and if $N_{i} \subset Z_{i}$ is a $k$-complete neighbourhood of $x$ in $Z_{i}$, then the $k$-complete neighboourhood $N$ of $x$ in $Z_{1} \cup \dots \cup Z_{p}$ is the $k$-complete neighbourhood of $x$ in the much smaller object $N_{1} \cup \dots \cup N_{p}$.
\section{Topological constructions}
Suppose that $(Z,d)$ is a finite ep-metric spac.
Suppose given a set of neighbourhoods $N_{x}$ for each $x \in Z$. Recall that the neighbourhood $N_{x}$ has a diameter $s(N_{x}) < \infty$.
Each neighbourhood $N_{x}$ determines a filtered subcomplex $V(N_{x}) \subset V(Z)$ of the Vietoris-Rips complex $V(Z)$.
The inclusions $\{x,y\} \subset V(N_{x})$, $y \in N_{x} -\{x\}$, induce filtered simplicial complex maps
\begin{equation}\label{eq 1}
R(N_{x}):= \vee_{y}\ \Delta^{1}_{\geq s} \subset V(N_{x}) \subset V(Z).
\end{equation}
The copies of $\Delta^{1}$ are defined by rays $\{x,y\}$ of weights $s$.
\begin{remark}\label{rem 11}
More properly, if the ray $\{x,y\}$ has weight $t=d(x,y)$, then the corresponding $1$-simplex of $R(N_{x})$ is the filtered simplex $\Delta^{1}_{\geq t}$ such that
\begin{equation*}
(\Delta^{1}_{\geq t})_{s} =
\begin{cases}
\emptyset & \text{if $s < t$, and} \\
\Delta^{1} & \text{if $s \geq t$.}
\end{cases}
\end{equation*}
It is better, sometimes, to say that $R(N_{x})$ is {\bf covered} by simplices $\Delta^{1}_{\geq s}$ corresponding to rays $\{x,y\}$ of weight $s$. This simply reflects the fact that the obvious map
\begin{equation*}
\sqcup_{y}\ \Delta^{1}_{\geq s} \to \vee_{y}\ \Delta^{1}_{\geq s} = R(N_{x})
\end{equation*}
is an epimorphism of filtered complexes.
\end{remark}
The full union
\begin{equation*}
R(N) = \cup_{x}\ R(N_{x}) \subset V(Z)
\end{equation*}
is the {\bf ray subcomplex} of $V(Z)$, for the collection of neighbourhoods $N = \{N_{x}\}$.
The ray subcomplex $R(N)$ is a filtered (or weighted) graph. If the neighbourhoods $N_{x}$ consist of $k$-nearest neighbours, then $R(N)$ is the $k$-nearest neighbours (kNN) graph.
The neighbourhoods $N_{x}$ generate an abstract simplicial complex $V(N) \subset V(Z)$ whose simplices are the subsets $\sigma \subset N_{x}$ of the various neighbourhoods $N_{x}$. The resulting filtered simplicial complex can be written
\begin{equation*}
V(N) = \cup_{x} \ V(N_{x}) \subset V(Z).
\end{equation*}
The subcomplex $V(N)$ of $V(Z)$ is called the {\bf neighbourhood complex}.
The inclusions $R(N_{x}) \subset V(N_{x})$ induce an inclusion $R(N) \subset V(N)$, so we have inclusions
\begin{equation}\label{eq 2}
R(N) \subset V(N) \subset V(Z)
\end{equation}
of filtered complexes, with corresponding inclusions
\begin{equation}\label{eq 3}
R_{s}(N) \subset V_{s}(N) \subset V_{s}(Z)
\end{equation}
of the various filtration stages.
The induced functions
\begin{equation*}
\pi_{0}R_{s}(N) \to \pi_{0}V_{s}(N) \to \pi_{0}V_{s}(Z)
\end{equation*}
in path components (or clusters)
are surjective for all parameters $t$, since all complexes have the same vertex set, namely $Z$.
\begin{remark}\label{rem 12}
The neighbourhood complex $V(N) = \cup_{x}\ V(N_{x})$ is covered by the subcomplexes $V(N_{x})$, in the sense that there is a surjection
\begin{equation*}
\bigsqcup_{x}\ V(N_{x}) \to V(N).
\end{equation*}
This covering has an associated \v{C}ech resolution, and there is a natural coequalizer
\begin{equation*}
\bigsqcup_{x,y}\ V_{s}(N_{x}) \cap V_{s}(N_{y}) \rightrightarrows \bigsqcup_{x}\ V_{s}(N_{x}) \to V_{s}(N)
\end{equation*}
in simplicial sets, where $s$ is the distance parameter. The path component functor preserves colimits, so there is a coequalizer
\begin{equation*}
\bigsqcup_{x,y}\ \pi_{0}(V_{s}(N_{x}) \cap V_{s}(N_{y})) \rightrightarrows \bigsqcup_{x}\ \pi_{0}V_{s}(N_{x}) \to \pi_{0}V_{s}(N)
\end{equation*}
in diagrams of sets, or clusters.
The directed set $\pi_{0}V_{s}(N)$ is the cluster object given by the mapper construction for the covering of $Z$ by the family of neighbourhoods $\{N_{x}\}$ \cite{CMS}.
\end{remark}
\begin{lemma}\label{lem 13}
Suppose that $x,y \in Z$. There is a path from $x$ to $y$ in $R(N)$ if and only if there is a sequence of elements
\begin{equation*}
x=x_{0}, x_{1}, \dots ,x_{r}=y
\end{equation*}
and neighbourhoods $N_{x_{i}}$ of $x_{i}$, such that $N_{x_{i}} \cap N_{x_{i+1}} \ne \emptyset$ for all $i$.
\end{lemma}
\begin{proof}
Suppose that
\begin{equation*}
x=z_{0}, \dots ,z_{p}=y
\end{equation*}
is a sequence of points such that $x_{i+1} \in N_{x_{i}}$ or $x_{i} \in N_{x_{i+1}}$ for neighbourhoods $N_{x_{i}}$ and $N_{x_{i+1}}$ of $x_{i}$ and $x_{i+1}$, respectively.
If $x_{i+1} \in N_{x_{i}}$ then $N_{x_{i}} \cap N_{x_{i+1}} \ne \emptyset$. Similarly, if $x_{i} \in N_{x_{i+1}}$ then $N_{x_{i}} \cap N_{x_{i+1}} \ne \emptyset$.
Suppose, conversely, that $v \in N_{x_{i}} \cap N_{x_{i+1}}$. Then there is an edge $x_{i} \to v$ in $N_{x_{i}}$ and an edge $x_{i+1} \to v$ in $N_{x_{i+1}}$, so that there is a path
\begin{equation*}
x_{i} \to v \leftarrow x_{i+1}
\end{equation*}
through neighbourhoods.
\end{proof}
By definition, the ray complex $R(N)$ is a filtered subcomplex of $V(Z)$. The subcomplex $R_{s}(N) \subset V_{s}(Z)$ is generated by rays $\{x,y\}$ with $d(x,y) \leq s$.
We have the following analog of Lemma \ref{lem 13}:
\begin{lemma}\label{lem 14}
Suppose that $x,y \in Z$. For each parameter value $s$, there is a path from $x$ to $y$ in $R_{s}(N)$ if and only if there is a sequence of elements
\begin{equation*}
x=x_{0}, x_{1}, \dots ,x_{r}=y
\end{equation*}
and neighbourhoods $N_{x_{i}}$ of $x_{i}$, such that $(N_{x_{i}})_{s} \cap (N_{x_{i+1}})_{s} \ne \emptyset$ for all $i$.
\end{lemma}
\section{Bounded neighbourhoods}
\subsection{$k$-bounded neighbourhoods}
In some examples (such as stellar charts), it is natural that neighbourhoods $N$ of $x$ have bounded cardinality and radius: $\vert N \vert \leq k+1$ for some $k$ and $s(N) \leq S$, with both $k$ and $S$ fixed.
From this point of view, for a fixed $x$, the {\bf $k$-bounded neighbourhoods} $N$ of $x$ are the subsets of $Z(x,S)$ which contain $x$ and have at most $k+1$ elements. Again, $Z(x,S)$ is the ball of radius $S$ in $Z$, which is centred on $x$.
We assume that $k \geq 1$ henceforth.
A point $x$ can have more than one $k$-bounded neighbourhood. The
$k$-bounded neighbourhoods of $x$ are ordered by inclusion, and the family has maximal elements. We have the following:
\begin{itemize}
\item[1)] The maximal $k$-bounded neighbourhboods $N \subset Z(x,S)$ either have cardinality $k+1$ or satisfy $N = Z(x,S)$.
\item[2)] All sets $N$ of $k$-nearest neighbours with $s(N) \leq S$ are maximal.
\item[3)] If $N = \{x\}$ is maximal, then $x$ is an isolated point for the parameter $S$.
\end{itemize}
The corresponding neighbourhood complex $V(k-N)$ is the filtered subcomplex of $V(Z)$ that is generated by the subobjects $V(N)$ for all $k$-bounded neighbourhoods $N$ of all $x$, and $R(k-N)$ is the associated ray subcomplex. As in (\ref{eq 2}), we have a sequence of inclusions
\begin{equation*}
R(k-N) \subset V(k-N) \subset V(Z).
\end{equation*}
If $t \leq S$ and $\sigma = \{x_{0}, \dots ,x_{n}\}$ is an $n$-simplex of $V(Z)_{t}$ with $n \leq k$, then $\sigma$ is a $k$-bounded neighbourhood of $x_{0}$. In effect, $\sigma$ has at most $k+1$ elements, of maximal distance $t \leq S$ from $x_{0}$. It follows that $V_{t}(k-N)_{n} = V_{t}(Z)_{n}$ for $n \leq k$, or that $\operatorname{sk}_{k}V_{t}(k-N) = \operatorname{sk}_{k}V_{t}(Z)$. In particular, $\operatorname{sk}_{1}V_{t}(k-N) = \operatorname{sk}_{1}V_{t}(Z)$ since $k \geq 1$, and so the simplicial sets $V_{t}(k-N)$ and $V_{t}(Z)$ have the same path components.
We have shown the following:
\begin{lemma}\label{lem 15}
Suppose that $t \leq S$, and construct the neighbourhood complex $V(k-N)$ from $k$-bounded neighbourhoods as above. Then the function
\begin{equation*}
\pi_{0}V_{t}(k-N) \to \pi_{0}V_{t}(Z)
\end{equation*}
is a bijection.
\end{lemma}
Suppose that $t \geq S$, and that $\{x,y\}$ is a $1$-simplex of $V_{t}(k-N)$. Then $\{x,y\} \subset N$ for a $k$-bounded neighbourhood $N$ of some $z$. Further, $d(z,x) \leq s(N)$ and $d(z,y) \leq s(N)$, so that $d(z,x),d(z,y) \leq s(N) \leq S \leq t$. It follows that there is a path
\begin{equation*}
x \leftarrow z \to y
\end{equation*}
in $R_{t}(k-N)$, and so the function
\begin{equation*}
\pi_{0}R_{t}(k-N) \to \pi_{0}V_{t}(k-N)
\end{equation*}
is a bijection.
We have proved
\begin{lemma}\label{lem 16}
Suppose that $t \geq S$. Then the induced function
\begin{equation*}
\pi_{0}R_{t}(k-N) \to \pi_{0}V_{t}(k-N)
\end{equation*}
is a bijection.
\end{lemma}
Write
\begin{equation*}
R(k-N) = \cup_{t}\ R_{t}(k-N).
\end{equation*}
Then the map
\begin{equation*}
\pi_{0}R_{t}(k-N) \to \pi_{0}R(k-N)
\end{equation*}
is a bijection for $t \geq S$, because $R_{t}(k-N) = R(k-N)$ in that range.
We therefore have the following:
\begin{corollary}
The functions
\begin{equation*}
\pi_{0}R(k-N) \leftarrow \pi_{0}R_{t}(k-N) \to \pi_{0}V_{t}(k-N)
\end{equation*}
are bijections for all $t \geq S$.
\end{corollary}
Write
\begin{equation*}
N(x) = \cup_{N}\ V(N)
\end{equation*}
in $V(Z)$, where the union is indexed over all $k$-bounded neighbourhoods $N$ of $x$. Let $R(x) \subset N(x)$ be the associated ray subcomplex.
We have the inclusions
\begin{equation*}
VR(x) \subset VN(x) \subset V(Z(x,S)).
\end{equation*}
Suppose that $t \leq S$ and
$n +1\leq k$. Suppose that $\sigma = \{x_{0}, \dots ,x_{n}\}$ is a non-degenerate $n$-simplex of $V(Z(x,S))_{t}$.
Write
\begin{equation*}
\sigma_{x} = \{x,x_{0}, \dots ,x_{n}\}.
\end{equation*}
Then $\vert \sigma_{x} \vert \leq k+1$, so that $\sigma_{x}$ is a $k$-bounded neighbourhood of $x$, and so $\sigma = d_{0}\sigma_{x}$ is in the image of the composite
\begin{equation*}
V(\sigma_{x})_{t} \to VN(x)_{t} \to V(Z(x,S))_{t}.
\end{equation*}
It follows that $\operatorname{sk}_{k-1}V_{t}N(x) = \operatorname{sk}_{k-1}V_{t}(Z(x,S))$ for $t \leq S$.
In particular, the map
\begin{equation*}
\pi_{0}V_{t}N(x) = \pi_{0}V_{t}(Z(x,S))
\end{equation*}
is a bijection if $k \geq 2$ and $t \leq S$.
Suppose that $t \geq S$, and that $y$ and $z$ are vertices of $N(x)$. Then $d(x,y), d(x,z) \leq S \leq t$, and it follows that the map
\begin{equation*}
\ast = \pi_{0}R(x)_{t} \to \pi_{0} N(x)_{t}
\end{equation*}
is a bijection.
Every $y \in Z(x,S)$ is a member of a $k$-bounded neighbourhood $\{x,y\}$ since $k \geq 1$. It follows that the maps
\begin{equation*}
\pi_{0}R(x)_{t} \to \pi_{0}N(x)_{t} \to \pi_{0}V_{t}Z(x,S)
\end{equation*}
are surjective.
We have proved:
\begin{lemma}\label{lem 18}
Suppose that the complexes $N(x)$ and $R(x)$ are defined as above. Suppose that $k \geq 2$. Then we have the following:
\begin{itemize}
\item[1)] If $t \leq S$ then the map $\pi_{0}N(x)_{t} \to \pi_{0}V_{t}(Z(x,s))$ is a bijection.
\item[2)] If $t \geq S$ then the maps
\begin{equation*}
\ast =\pi_{0}R(x)_{t} \to \pi_{0}N(x)_{t} \to \pi_{0}V(Z(x,S))_{t}
\end{equation*}
are bijections.
\end{itemize}
\end{lemma}
\subsection{Complete neighbourhoods}
Suppose that $Z$ is a finite ep-metric space, and that each $x \in Z$ has a fixed complete neighbourhood $N_{x}=Z(x,r_{x})$.
Form the associated filtered complexes
\begin{equation*}
R(N) \subset V(N) \subset V(Z),
\end{equation*}
for $Z$ and the system of neighbourhoods $N = \{N_{x}\}$.
\begin{example}\label{ex 19}
Suppose that $S>0$ is a fixed distance parameter and $k > 1$ is a fixed integer.
Say that a neighbourhood $N_{x}$ of $x \in Z$ is {\bf complete $k$-bounded} if $N_{x}$ has the form
\begin{equation*}
N_{x} = Z(x,s_{x}) \cap Z(x,S),
\end{equation*}
where $Z(x,s_{x})$ is the unique $k$-complete neighbourhood of $x$ (see Section 2).
There are two possibilities:\ $N_{x} = Z(x,s_{x})$, in which case $N_{x}$ is $k$-complete, or $N_{x}=Z(x,S)$ and $\vert Z(x,S) \vert < k$. In either case, the neighbourhood $N_{x}$ is uniquely determined and is complete.
The use of {\it complete} $k$-bounded neighbourhoods gives a different perspective for the stellar chart example. For a fixed (and appropriate) distance $S$ and positive integer $k$, the complete $k$-bounded neighbourhoods $N_{x}$ of stars $x$ in a globular cluster would be $k$-complete neighbourhoods of small radius, while stars in an outer spiral arm are more likely to have neighbourhoods $N_{x}$ of smaller cardinality.
\end{example}
\begin{lemma}\label{lem 20}
Suppose that $Z$ is a finite ep-metric space, and that each $x \in Z$ has a fixed complete neighbourhood $N_{x}=Z(x,r_{x})$.
\begin{itemize}
\item[1)] Suppose that $t \leq r_{x}$ for all $x$. Then the functions
\begin{equation*}
\pi_{0}R_{t}(N) \to \pi_{0}V_{t}(N) \to \pi_{0}V_{t}(Z)
\end{equation*}
are bijections.
\item[2)] Suppose that $t \geq r_{x}$ for all $x$. Then the map
\begin{equation*}
\pi_{0}R_{t}(N) \to \pi_{0}V_{t}(N)
\end{equation*}
is a bijection.
\item[3)] Suppose that $t \geq S \geq r_{x}$ for all $x$. Then the map
\begin{equation*}
\pi_{0}R_{S}(N) \to \pi_{0}R_{t}(N)
\end{equation*}
is a bijection.
\end{itemize}
\end{lemma}
\begin{proof}
For 1), suppose that $\{x,y\}$ is a $1$-simplex of length $t$. Then $y \in N_{x}$ since $t \leq r_{x}$, and $\{x,y\}$ is a ray of $N_{x}$. It follows that there are equalities of $1$-skeleta
\begin{equation*}
\operatorname{sk}_{1}R_{t}(N) = \operatorname{sk}_{1}V_{t}(N) = \operatorname{sk}_{1}V_{t}(Z),
\end{equation*}
and the statement follows.
For statement 2), suppose that $\{x,y\}$ is a $1$-simplex of $V_{t}(N_{z}) \subset V_{t}(N)$. Then there are $1$-simplices $x \leftarrow z \rightarrow y$ in $V_{t}(N_{z})$ since $t \geq r_{z}$. This is true for all $z$, and it follows that the function
\begin{equation*}
\pi_{0}R_{t}(N) \to \pi_{0}V_{t}(N)
\end{equation*}
is a bijection.
To prove statement 3), every ray ($1$-simplex) $\{x,y\}$ of $R(N)$ has length $\leq S$, so that $R_{S}(N) = R_{t}(N)$.
\end{proof}
\begin{corollary}\label{cor 21}
Suppose that $t \geq S \geq r_{x}$ for all $x \in Z$. Then the inclusion $V_{S}(N) \subset V_{t}(N)$ of neighbourhood complexes induces a bijection
\begin{equation*}
\pi_{0}V_{S}(N) \to \pi_{0}V_{t}(N).
\end{equation*}
\end{corollary}
\begin{proof}
The Corollary follows from statements 2) and 3) of the Lemma \ref{lem 20}
\end{proof}
\begin{remark}\label{rem 22}
Suppose that $Z'$ is the subset of elements $x \in Z$ such that $Z(x,r_{x}) = \{x\}$, and let $Z"=Z-Z'$. Then
\begin{itemize}
\item[1)]
$V_{t}(Z) = Z' \sqcup V_{t}(Z'')$,
\item[2)] $V(N)_{t} = Z' \sqcup V(N'')_{t}$,
\item[3)]
$R(N)_{t}= Z' \sqcup R(N'')_{t}$,
\end{itemize}
for $t \leq r_{z}$, all $z$, where $Z'$ is a discrete set. Here,
\begin{equation*}
Z'' = \cup_{y \in Z''}\ Z(y,r_{y}),
\end{equation*}
and $N''$ is the system of neighbourhoods $Z(y,n_{y})$ for $y \in Z''$.
\end{remark}
\section{The UMAP construction}
One starts with a neighbourhood $N_{x}$ for each vertex $x$ of a data set $Z$, with positive weights $d(x,y)$ for each $y \in N_{x}-\{x\}$. The subset $\{x,y\}$ for such a $y$ is said to be a ray.
The weight $d(x,y)$ defines an ep-metric space structure on the set $\{x,y\}$.
Form the ep-metric space
\begin{equation*}
Z_{x}= \vee_{y \in N(x)-\{x\}}\ \{x,y\},
\end{equation*}
from the rays $\{x,y\}$, for each $x \in Z$. This structure is extended to an ep-metric space structure $(Z,D_{x})$ on the full set of vertices $Z$ of $\Gamma$, by setting
\begin{equation*}
(Z,D_{x}) = (\sqcup_{z \in Z-Z_{x}}\ \{z\}) \sqcup Z_{x}
\end{equation*}
in ep-metric spaces.
The ep-metric space
\begin{equation*}
(Z,D) = \vee_{x \in Z}\ (Z,D_{x})
\end{equation*}
and the UMAP complex
\begin{equation*}
V(Z,N) = \vee_{x \in Z}\ V(Z,D_{x})
\end{equation*}
are formed by amalgamating along vertices (elements of $Z$), in ep-metric spaces and filtered complexes, respectively.
It is crucial, for these ep-metric space constructions, to know that the category of ep-metric spaces is cocomplete --- see Section 1.
The following excision statement for path components is Lemma 2 of \cite{UMAP-stab}:
\begin{theorem}\label{th 23}
The canonical map $V(Z,N) \to V(Z,D)$ induces isomorphisms
\begin{equation*}
\pi_{0}V(Z,N)_{s} \xrightarrow{\cong} \pi_{0}V(Z,D)_{s}
\end{equation*}
for $s$ finite.
\end{theorem}
Theorem \ref{th 23} is proved by observing that distances in $(Z,D)$ are computed from paths through neighbourhoods $N_{x}$.
We shall need the following local computation:
\begin{lemma}\label{lem 24}
Suppose that $y \in N(x)-\{x\}$ defines the ray $\{x,y\}$. Then
\begin{equation*}
d(x,y)=D_{x}(x,y).
\end{equation*}
in $Z_{x}$.
\end{lemma}
\begin{proof}
The number $D_{x}(x,y)$ is the minimum of all sums $\sum_{j}\ d(x_{j},x_{j+1})$, for paths
\begin{equation*}
P:\ x=x_{0},x_{1},\dots, x_{p}=y
\end{equation*}
through rays in $Z_{x}$. The ray $\{x_{p-1},y\}$ must be the ray $\{x,y\}$, so that
\begin{equation*}
D_{x}(x,y) \geq \sum_{j}\ d(x_{j},x_{j+1}) \geq d(x,y).
\end{equation*}
The subobject $\{x,y\}$ is a ray, so that $\{x,y\}$ is a path, and $D_{x}(x,y) \leq d(x,y)$.
\end{proof}
Each $y \in N_{x} - \{x\}$ determines an inclusion of filtered complexes
\begin{equation*}
V(\{x,y\}) \subset V(Z_{x},D_{x}) \subset V(Z,D).
\end{equation*}
The simplicial set $V_{t}(\{x,y\}$ consists of vertices $\{x,y\}$ for $t < d_{x}(x,y)$, and has $1$-simplices
\begin{equation*}
\{x\} \subset \{x,y\} \supset \{y\}
\end{equation*}
for $t \geq d_{x}(x,y)$.
\begin{remark}
Recall that $V(\{x,y\})$ is the barycentric subdivison of a filtered $1$-simplex that would be defined by imposing a total order on the set $\{ x,y \}$.
\end{remark}
Suppose that $\{x,u\}$ is a ray in $N_{x}$ and that $\{y,v\}$ is a ray of $N_{y}$, and consider the composite monomorphisms
\begin{equation*}
V(\{x,u\}) \subset V(Z_{x}) \subset V(Z),\quad V(\{y,v\}) \subset V(Z_{y}) \subset V(Z).
\end{equation*}
Suppose that $d_{x}(x,u) \leq d_{y}(y,v)$.
Generally, $V(X) = BP(X)$, where $P(X)$ is a poset of generating simplices. In the case at hand, therere is a pullback diagram
\begin{equation*}
\xymatrix{
BP(\{x,u\} \cap \{y,v\}) \ar[r] \ar[d] & BP(\{y,v\}) \ar[d] \\
BP(\{x,u\}) \ar[r] & BP(Z)
}
\end{equation*}
The intersection $\{x,u\} \cap \{y,v\}$ is at most a $2$-element set.
If $\{x,u\} \cap \{y,v\} = \emptyset$ the pullback is empty, and if
$\{x,u\} \cap \{y,v\}$ is a point the pullback is a point.
If $\{x,u\} \cap \{y,v\}$ is a $2$-element set, then $\{x,u\} = \{y,v\}$,
and there is a commutative diagram
\begin{equation*}
\xymatrix{
BP(\{x,u\}) \ar[dr] && BP(\{y,v\} \ar[ll]_{\theta} \ar[dl]\\
& BP(Z)
}
\end{equation*}
where $\theta$ ``reduces weight''. It follows, in this case, that
there is a pullback
\begin{equation}\label{eq 4}
\xymatrix{
BP(\{y,v\}) \ar[r]^{1} \ar[d]_{\theta} & BP(\{y,v\}) \ar[d] \\
BP(\{x,u\}) \ar[r] & BP(Z)
}
\end{equation}
The ray complex $R(N_{x}) \subset V(Z_{x})$ is the wedge of rays
\begin{equation*}
R(N_{x}) = \vee_{y \in N_{x}-\{x\}}\ V(\{x,y\}).
\end{equation*}
The filtered complex monomorphisms
\begin{equation*}
\phi_{x}: R(N_{x}) \to V(Z_{x},D_{x}) \to V(Z,D),
\end{equation*}
together define a monomorphism
\begin{equation*}
\phi: R(N) = \cup_{x\in Z}\ R(N_{x}) \subset V(Z),
\end{equation*}
and we say that the union $R(N)$ is the ray subcomplex of $V(Z)$. The ray complex $R(N)$ is a weighted graph.
The ray complex $R(N)$ is a union of (or is covered by) filtered subcomplexes $V(\{x,y\})$, which are defined by rays $\{x,y\}$ and their weights $d(x,y)$. The intersections (pullbacks)
\begin{equation*}
\xymatrix{
BP(\{y,v\} \cap \{y,v\}) \ar[r] \ar[d] & BP(\{y,v\}) \ar[d] \\
BP(\{x,u\}) \ar[r] & R(N)
}
\end{equation*}
are constructed in $V(Z)$ as above, since $R(N) \subset V(Z)$ is a monomorphism. It follows that the ray complex $R(N)$ is a union of rays, with possible adjustments of weights in intersections, as in the pullback diagram (\ref{eq 4}).
\begin{remark}
The present description of the ray complex $R(N)$ is independent of distances in the space $(Z,D)$. It generalizes the description of the ray complex that appears in Section 3, which uses a fixed ambient ep-metric.
\end{remark}
There is, finally, an excision result that makes $R(N)$ a candidate for the UMAP graph, as follows:
\begin{theorem}\label{th 27}
The filtered complex map $\phi: R(N) \subset V(Z,D)$ induces isomporphisms
\begin{equation*}
\phi_{\ast}: \pi_{0}R_{s}(N) \xrightarrow{\cong} \pi_{0}V_{s}(Z,D)
\end{equation*}
for all $s \geq 0$.
\end{theorem}
\begin{proof}
The proof is similar to that of Theorem \ref{th 23}.
The map $\phi$ is the identity on vertices, so the functions $\phi_{\ast}$ are surjective.
Suppose that there is a $1$-simplex $\{z,w\}$ of $V(Z,D)_{s}$. Then there is a path
\begin{equation*}
z=x_{0},x_{1}, \dots ,x_{p}=w
\end{equation*}
through rays $\{x_{i},x_{i+1}\}$ such that
\begin{equation*}
\sum_{i=0}^{p}\ d(x_{i},x_{i+1}) \leq s.
\end{equation*}
by Lemma \ref{lem 24}.
But then $d(x_{i},x_{i+1}) \leq s$ for all $i$, so that $z$ and $w$ are in the same path component of the simplicial set $R(N)_{s}$.
It follows that the functions $\phi_{\ast}$ are injective.
\end{proof}
\section{Weighted directed graphs}
A weighted directed graph $\Gamma$ consists of a set of edges $e:x \to y$, such that each edge $e$ has a weight $w(e) > 0$. For the present discussion, the vertices of $\Gamma$ are faces of the edges. I write $Z$ for the set of vertices of $\Gamma$.
Trivial examples are given by $1$-skeleta $\operatorname{sk}_{1}K$ of oriented simplicial complexes $K$, with weights $w(e) = 1$ for each $1$-simplex $e: x \to y$, and such that every vertex is a face of some non-degenerate $1$-simplex $e$.
A weighted directed graph $\Gamma$ is said to be {\bf sparse} if all vertices $x$ of $\Gamma$ have low valence. This means that each vertex of $\Gamma$ is in the boundary of a small (i.e. computable) number of edges.
Suppose that $x$ is a vertex of a transfer graph $\Gamma$.
A {\bf path} $P: x \dashrightarrow y$ from $x$ to another vertex $y$ in $\Gamma$ is a string of edges
\begin{equation}\label{eq 5}
P:\ x=y_{0} \xrightarrow{e_{1}} y_{1} \xrightarrow{e_{2}} \dots \xrightarrow{e_{p}} y_{p} = y.
\end{equation}
Say that the integer $p$ is the length $\ell(P)$ of the path $P$.
\begin{remark}
The collection of all paths $P: x \dashrightarrow y$ in the graph $\Gamma$ form a weighted graph $P(\Gamma)$ having the same vertices as the graph $\Gamma$.
The paths $P: x \dashrightarrow y$ and $Q: y \dashrightarrow z$ are composeable: the concatenation of $P$ with $Q$ defines a path $P \circ Q: x \dashrightarrow z$. Thus, $P(\Gamma)$ has more structure: $P(\Gamma)$ is the free category on the graph $\Gamma$.
The path graph $P(\Gamma)$ is not sparse in general.
\end{remark}
The weight $w(P)$ of the path $P$ can be defined by
\begin{equation}\label{eq 6}
w(P) = \min_{i}\ \{w(e_{i})\}.
\end{equation}
\begin{remark}
The definition of the weight of a path is somewhat arbitrary, and depends on applications. The assignment of (\ref{eq 6}) is motivated by graphs of data transfers, which are discussed below. One could, alternatively, set
\begin{equation*}
w(P) = \sum_{i}\ w(e_{i}).
\end{equation*}
\end{remark}
Fix a positive integer $k$.
The {\bf neighbourhood} $N_{k}(x)$ is the collection of all vertices $y$, which appear in paths
\begin{equation*}
Q:\ z_{0} \to z_{1} \to \dots \to z_{p},
\end{equation*}
having length $p=\ell(Q) \leq k$, such that $x=z_{i}$ for some $i$
We assign a weight (or distance) $d(x,y)$ for all $y$ in the neighbourhood $N_{k}(x)$.
For $y \in N_{k}(x)$, define the {\bf weight sum} $\Sigma(x,y)$ by
\begin{equation*}
\Sigma(x,y) = (\sum_{P: x \dashrightarrow y,\ell(P) \leq k}\ w(P)) + (\sum_{Q: y \dashrightarrow x,\ell(P) \leq k}\ w(Q)).
\end{equation*}
In a graph of transactions, the weight sum $\Sigma(x,y)$ represents the total value of all transactions between $x$ and $y$. If $\Sigma(x,y)$ has a large value, then there is more business between $x$ and $y$, and these objects should be closer in some sense. To express this relationship, use the Shannon information function to define a distance
\begin{equation}\label{eq 7}
d(x,y) = e^{-\Sigma(x,y)}
\end{equation}
for $y \in N_{k}(x)$.
Other approaches to defining a distance $d(x,y)$ for the vertices $y$ of $N_{k}(x)$ are certainly possible.
We end up with a computable neighbourhood $N_{k}(x)$ of vertices in a sparse directed graph $\Gamma$ for each of its vertices $x$, with distances (weights) $d(x,y)$ for $y \in N_{k}(x) - \{x\}$.
These are the inputs for the UMAP construction, which is described in Section 4.
\begin{example}[{\bf Data transfers}]\label{ex 30}
A data transfer $e: x \to y$ from a computer account $x$ to a different account $y$ has a weight $w(e)$, which is the number of bytes transferred. The transfer $e$ also has source and target time stamps, $s(e)$ and $t(e)$, respectively, with $s(e) < t(e)$. Thus (provisionally), a graph $\Gamma$ of data transfers has edges $e: (x,s(e)) \to (y,t(e))$ with $x \ne y$, and its vertices consist of pairs $(x,t)$, where $x$ is a computer account and $t$ is either a source or a target timestamp for some edge.
There may be multiple vertices $(x,t)$ for a fixed account $x$. Suppose that $t_{0} < t_{2} < \dots < t_{p}$ are the timestamps for a fixed account $x$. Say that the list $\{t_{0}, \dots ,t_{p}\}$ is the simplex of timestamps for the account $x$.
For $x \ne y$, an edge $E: (x,s) \to (y,t)$ of the transfer graph $\Gamma$ consists of a transfer $e: (x,s(e)) \to (y,t(e))$, together with relations $s \leq s(e)$ and $t(e) \leq t$ in the simplices of timestamps for the accounts $x$ and $y$, respectively. The weight $w(E)$ is the weight $w(e)$ of the transfer $e$. The set vertices $Z$ of $\Gamma$ consists of all pairs $(x,s)$ of accounts $x$ and timestamps $s$ of transfers.
If all timestamps lie within a small enough interval, then the transfer graph $\Gamma$ is sparse.
This example motivates the definitions of weights of paths and distances within neighbourhoods that are seen above.
Explicitly, a path
\begin{equation*}
P:\ (x,s) = x_{0} \xrightarrow{E_{1}} x_{1} \xrightarrow{E_{2}} \dots \xrightarrow{E_{p}} x_{p} = (y,t)
\end{equation*}
in $\Gamma$ consists of edges
\begin{equation*}
E_{i}: x_{i} = (x_{i},s_{i}) \to (x_{i+1},s_{i+1}) = x_{i+1}
\end{equation*}
with $x_{i} \ne x_{i+1}$, and each such edge has weight $w(E_{i})$.
The weight $w(P)$ of the path $P$ is defined by
\begin{equation*}
w(P) = \min_{i} \{w(E_{i})\},
\end{equation*}
as in (\ref{eq 6})).
The weight $w(P)$ represents the maximum amount of data that could be transferred from $(x,s)$ to $(y,t)$ along the path $P$.
Fix a positive integer $k$ and an element $x=(x,s)$ in the transfer graph $\Gamma$, and define the neighbourhood $N_{k}(x)$ as vertices of paths crossing $x$ of length at most $k$.
The weight sum $\Sigma(x,y)$ for $y \in N_{k}(x)$ is defined by
\begin{equation*}
\Sigma(x,y) = (\sum_{P: x \dashrightarrow y,\ell(P) \leq k}\ w(P)) + (\sum_{Q: y \dashrightarrow x,\ell(P) \leq k}\ w(Q)),
\end{equation*}
and the weight $d(x,y)$ of the ray $\{x,y\}$ has the form
\begin{equation*}
d(x,y) = e^{-\Sigma(x,y)}.
\end{equation*}
\end{example}
\begin{remark}[{\bf Undirected graphs}]\label{rem 31}
The directed structure for the graph $\Gamma$ is a central feature of the examples discussed above. Analogous local to global methods apply equally well to construct ep-metric spaces and UMAP complexes for undirected graphs.
Suppose that $\Omega$ is a sparse weighted graph, with weights $w(e)$ for the edges $e$ of $\Omega$. One assumes that the vertices of $\Omega$ are faces of its edges.
Suppose that $x$ is a vertex of $\Omega$. Say that $y \in N_{k}[x]$ if there is a path (path), or string of edges
\begin{equation*}
P:\ x=x_{0} \overset{e_{1}}{\leftrightarrow} x_{1} \overset{e_{2}}{\leftrightarrow} \dots \overset{e_{p}}{\leftrightarrow} x_{p}=y
\end{equation*}
with $p \leq k$.
Again there are choices, but define the weight $w(P)$ of the path $P: x \leftrightarrow y$ by
\begin{equation*}
w(P) = \min_{i}\ \{w(e_{i})\}.
\end{equation*}
Fix a vertex $x$ and a positive integer $k$. Define $N_{k}(x)$ to be the set of all vertices of $\Omega$ which lie on paths of length $\ell(P)$ at most $k$ that pass through $x$.
Write
\begin{equation*}
\Sigma(x,y) = \sum_{P: x \dashrightarrow y, \ell(P) \leq k}\ w(P),
\end{equation*}
and set
\begin{equation*}
d(x,y) = e^{-\Sigma(x,y)}
\end{equation*}
for $y \in N_{k}(x)$.
One uses the weights $d(x,y)$ to construct an ep-metric on the neighbourhood $N_{k}(x)$. These ep-metrics patch together, to give an ep-metric on the full set $Z$ of vertices of $\Omega$.
\end{remark}
\section{Bags of words}
In the ``bag of words'' model for natural language processing (see, for example \cite{bleiLDA}), one starts with a collection $C=\{C_{1}, \dots ,C_{N}\}$ of {\bf documents} $C_{i}$, where each $C_{i} = (t_{i,1}, \dots ,t_{i,M_{i}})$ is a sequence of {\bf tokens} (ie. words, phrases, etc.), with possible repetitions. The sequence $C$ is the {\bf corpus}.
The sequence $C_{i}$ is a function
\begin{equation*}
C_{i}: \underline{M}_{i} \to \mathcal{T},
\end{equation*}
where $\mathcal{T}$ is the set of distinct tokens in all $C_{i}$, and $\underline{M}_{i} = \{1,2, \dots ,M_{i}\}$. The sequence $C_{i}$ may have repeats, so the function $C_{i}$ is not injective in general.
The usual thing is to amalgamate some tokens (by root words, or whatever), to form a surjective map $\ell: \mathcal{T} \to \mathcal{L}$. The set $\mathcal{L}$ is the {\bf vocabulary} and its elements are called {\bf words}.
Write $p$ for the composite function
\begin{equation*}
p: \sqcup_{i}\ \underline{M}_{i} \xrightarrow{C} \mathcal{T} \xrightarrow{\ell} \mathcal{L},
\end{equation*}
and let $p_{i}: \underline{M}_{i} \to \mathcal{L}$ be the restriction of $p$ to the summand $\underline{M}_{i}$.
We assume that there are no common tokens (``stop words'') or rare tokens in the set $\mathcal{T}$, however these are determined. This means that the fibres $p^{-1}(w)$ of the function $p$ are neither too large nor too small, and in particular are computationally manageable. The function $p$ and its fibres are the objects
of interest for this discussion.
The fibres $p^{-1}(w)$ are the instances of the word $w \in \mathcal{L}$ in the corpus $C$.
\begin{remark}
In more generality, we could have functions $p_{i}: \underline{M}_{i} \to Z$ which cover a set $Z$, in the sense that the amalgamated function
\begin{equation*}
p: \sqcup_{i}\ \underline{M}_{i} \to Z
\end{equation*}
is surjective. Here, the restriction of $p$ to the summand $\underline{M}_{i}$ is $p_{i}$. One assumes that the fibres $p_{i}^{-1}(z)$ for $z \in Z$ are computationally manageable (or tractable in the sense of the next section), as is the collection of functions $\{p_{i}\}$.
Subject to size assumptions on the cardinals $\underline{M}_{i}$ and the collection of functions $p_{i}$, the following discussion can be applied in such a setting.
One could even replace the sets $\underline{M}_{i}$ with metric spaces in the discussion that follows.
\end{remark}
Write $\mathcal{L}_{i}$ for the image of the restricted function
\begin{equation*}
p_{i} = \ell_{i}: \underline{M}_{i} \xrightarrow{C_{i}} \mathcal{T} \xrightarrow{\ell} \mathcal{L}.
\end{equation*}
The composite $p_{i}$ restricts to a surjective function $p_{i}: \underline{M}_{i} \to \mathcal{L}_{i}$, and there is a commutative diagram of functions
\begin{equation*}
\xymatrix{
\underline{M}_{i} \ar[r]^{p_{i}} \ar[d] & \mathcal{L}_{i} \ar[d] \\
\sqcup_{i}\ \underline{M}_{i} \ar[r]_{p} & \mathcal{L}
}
\end{equation*}
in which the vertical maps are inclusions.
Each set $\underline{M}_{i}$ has a metric $d$ with $d(x,y) = \vert y-x \vert$.
Suppose that $r$ is a positive integer. Fix a word $v \in \mathcal{L}$, and suppose
that $p^{-1}_{i}(w)_{\leq r}$ is the set of all elements $y \in p_{i}^{-1}(w)$ such that $d(x,y) \leq r$ for some $x \in p_{i}^{-1}(v)$. Then we have
\begin{equation*}
p_{i}^{-1}(w)_{\leq r} = p_{i}^{-1}(w) \cap (\cup_{x \in p^{-1}(v)}\ [x-r,x+r])
\end{equation*}
in the set $\underline{M}_{i}$.
The subsets $p^{-1}_{i}(w)_{\leq r}$ filter the fibre $p_{i}^{-1}(w)$. Observe that $p_{i}^{-1}(v)_{\leq r} = p_{i}^{-1}(v)$.
Set
\begin{equation}\label{eq 8}
d_{i}[r](v,w) = \sum_{x \in p_{i}^{-1}(v),y \in p_{i}^{-1}(w), d(x,y) \leq r}\ d(x,y),
\end{equation}
and define
\begin{equation*}
d[r](v,w) = \sum_{i}\ d_{i}[r](v,w)
\end{equation*}
for all $v,w \in \mathcal{L}$.
The number $d_{i}[r](v,w)$ is non-zero if and only if there are elements $x \in p_{i}^{-1}(v)$ and $y \in p_{i}^{-1}(w)$ such that $d(x,y) \leq r$, and $d[r](v,w) \ne 0$ if and only if $d_{i}[r](v,w) \ne 0$ for some $i$.
In particular, $d_{i}[r](v,v)$ is the sum of the distances $d(x,y)$ between $x,y \in p_{i}^{-1}(v)$ such that $d(x,y) \leq r$, and $d_{i}[0](v,v)=0$.
It follows that $d[r](v,v)$ can be non-trivial for $r > 0$, and $d[0](v,v) = 0$.
Take all elements $x$ of the fibres $p_{i}^{-1}(v)$ and form all intervals $[x-r,x+r]$ in $\underline{M}_{i}$. The union
\begin{equation}\label{eq 9}
N_{v}[r] = \cup_{i}\ (\cup_{x \in p_{i}^{-1}(v)}\ p_{i}[x-r,x+r]) \subset \mathcal{L}.
\end{equation}
is a neighbourhood of $v$ in $\mathcal{L}$.
Observe that $N_{v}[0] = \{v\}$. Also, $N_{v}[r] \subset N_{v}[s]$ for $r \leq s$, and $\cup_{r}\ N_{v}[r] = \mathcal{L}$, so that the subsets $N_{v}[r]$ filter the set of words $\mathcal{L}$.
Subject to fixing a positive integer $r$, the set $N_{v}[r]$ is a neighbourhood for $v \in \mathcal{L}$, and the number $d[r](v,w)$ is the weight of $w \in N_{v}[r]$.
As in Section 5, the UMAP construction assembles the weighted neighbourhoods $(N_{v}[r],d[r])$, $v \in \mathcal{L}$, to form a the UMAP complex $V(\mathcal{L},N[r])$, an ep-metric space $(\mathcal{L},D[r])$, and a ray complex $R(N[r]) \subset V(\mathcal{L},D[r])$, all of which compute the same clusters.
\section{Sampling}
Suppose that the universal data set $\mathcal{U}$ has an ep-metric space structure, but with no other information.
In this case, one approximates (or discovers) a neighbourhood $N_{x}$ for a given point $x \in \mathcal{U}$ with a brute force method that is based on sampling techniques and construction of $k$-complete neighbourhoods within samples.
Suppose that $Z$ is a randomly chosen subset of $\mathcal{U}$ (a sample), and that $Z$ is tractable in the sense that there is a cardinality bound
$\vert Z \vert \leq M$, where data sets of size at most $M$ can be analyzed by available computational devices.
We assume that $x \in Z$.
For such a subset $Z$ the distance function $d_{x}: Z \to [0,\infty)$, with $d_{x}(z) = d(x,z)$, can be computed, and the image $d_{x}(Z)$ of $d_{x}$ defines a tractable subset of the interval $[0,\infty)$. The set $Z$ is a disjoint union of fibres
\begin{equation*}
Z = \sqcup_{s \in d_{x}(Z)}\ d_{x}^{-1}(s)
\end{equation*}
of the distance function $d_{x}$.
Suppose that $k$ is a fixed choice of positive integer with $k \leq \vert Z \vert$.
The element $x$ has a uniquely defined $k$-complete neighbourhood $N$ in $Z$, as in Section 2, which is the smallest complete neighbourhood $Z(x,s)$ such that $\vert Z(x,s) \vert \geq k$. The neighbourhood $N$ is a union of fibres $d_{x}^{-1}(t)$ for smallest values of $t$,
This construction can be repeated, in parallel, for an appropriately sized collection of samples $Z_{1}, \dots ,Z_{p}$ that contain $x$, with distance functions $d_{x}: Z_{i} \to [0,\infty)$. Each sample $Z_{i}$ has a uniquely defined (and computable) $k$-complete neighbourhood $N_{i}$ of $x$, and the $k$-complete neighbourhood $N$ of $x$ in the union $\cup_{i}\ Z_{i}$ is a $k$-complete neighbourhood of $x$ in the smaller object $\cup_{i}\ N_{i}$, by Lemma \ref{lem 10}.
There are various ways to invoke the samples $Z_{i}$:
\noindent
1)\ Starting with a $k$-complete neighbourhood $N_{x}$ of $x$ in a sample $Z_{x}$, choose samples $Z_{y}$ for each $y \in N_{x}$, with associated $k$-complete neighbourhoods $N_{y} \subset Z_{y}$. The union $\cup_{y \in N_{x}}\ N_{y}$ contains a $k$-complete neighbourhood $N'_{x}$, which is potentially a better approximation of a $k$-complete neighbourhood of $x$ in the universe $\mathcal{U}$.
This sequence of steps is an analogue of the $k$-nearest neighbour algorithm of \cite{DML}.
\noindent
2)\
The determination of a $k$-complete neighbourhood $N$ of $x$ in $V$ for some $V \subset \mathcal{U}$ can be extended to larger subsets of $\mathcal{U}$, subject to computational constraints, by adding more tractable samples to $V$. This is again a simple application of Lemma \ref{lem 10}.
\noindent
3)\ If $Z_{1}, \dots ,Z_{p}$ is a tractable collection of tractable samples in $\mathcal{U}$, then we can find a $k$-complete neighbourhood $N_{y}$ in $Z = \cup_{i}\ Z_{i}$ for any $y \in Z$. The corresponding subcomplexes $V(N_{y}) \subset V(Z)$ and $R(N_{y}) \subset V(N_{y})$ determine filtered subcomplexes
\begin{equation*}
\cup_{y \in Z}\ R(N_{y}) \subset \cup_{y \in Z}\ V(N_{y}),
\end{equation*}
which lead to a UMAP-style analysis that computes the clusters of $V(Z)$, and approximates the clusters of all $V(\mathcal{U})$.
The sampling technique displayed here is completely brute force. It only approximates clusters and neighbouhoods of points, and does not speak to the entire data set $\mathcal{U}$.
The method can be refined in the presence of global constraints, such as the local uniformity assumption of \cite{DML} that produces sets of $k$-nearest neighbours up to a probability estimate.
\nocite{github}
\end{document}
|
\begin{document}
\title{Inverse limits of finite rank free groups}
\begin{abstract}
We will show that the inverse limit of finite rank free groups with surjective connecting homomorphism is isomorphic either to a finite rank free group or to a fixed universal group. In other words, any inverse system of finite rank free groups which is not equivalent to an eventually constant system has the universal group as its limit. This universal inverse limit is naturally isomorphic
to the first shape group of the Hawaiian earring. We also give an example
of a homomorphic image of a Hawaiian earring group which lies in the inverse limit of free groups but is neither a free group
nor a Hawaiian earring group.\end{abstract}
One of the first to consider the inverse limits of finite rank free groups
was Higman. In \cite{hig}, he studies the inverse limit of finite rank
free groups which he calls the unrestricted free product of countably
many copies of $\mb Z$. There he proves that this group is not a free
group and that each of its free quotients has finite rank. He considers a
subgroup $P$ of the unrestricted product which turns out to be a Hawaiian
earring group but does not prove it there. In \cite{smit}, de Smit gives a
proof that the Hawaiian earring group embeds in an inverse limit of free
groups and gives a characterization of the elements of the image.
Daverman and Venema in \cite{DV} showed that a one-dimensional Peano
continuum either has the shape of a finite bouquet of circles or of a
Hawaiian earring. Hence any inverse limit of finite rank free groups
that arises from the inverse system of a one-dimensional Peano continuum
is either a finite rank free group or the standard Hawaiian inverse limit.
In section \ref{seclimit}, we will show that the result of Daverman and
Venema can be generalized in the following way. Every inverse limit of
finite rank free groups with surjective connecting homomorphisms is isomorphic to a free group or the standard Hawaiian inverse limit. Hence being the shape group of a one-dimensional Peano continuum is not necessary.
In \cite{ce}, Conner and Eda show that the fundamental group of a
one-dimensional Peano continuum which is not semilocally simply connected
at any point determines the homotopy type of the space. This was done
using uncountable homomorphic images of Hawaiian earring groups. It was
believed that any uncountable homomorphic image of a Hawaiian earring
group which embedded in an inverse limit of free groups was itself a
Hawaiian earring group. In Section \ref{secimage}, we will show that
there exists an uncountable homomorphic image of a Hawaiian earring group
which embeds in an inverse limit of free groups but is not a Hawaiian
earring group or a free group. This is done by using two propositions
which were originally proved by Higman in \cite{hig} to construct a
homomorphism with our desired image.
\section{Definitions}
A Hawaiian Earring group, which we will denote by $\mathbb H $,
is the fundamental group of the one-point compactification of a
sequences of disjoint open arcs. The Hawaiian earring group is uncountable and locally free. Cannon and Conner in \cite {cc1} and \cite{cc3} showed that the Hawaiian earring group is generated in the sense of infinite products by a countable sequence of loops corresponding to the disjoint arcs, where an infinite product is legal if each loop is transversed only finitely many times. (For more information on infinite products, see \cite{cc3}.) The Hawaiian earring can be realized in the plane as the union of circles centered at $(0,\frac1n)$ with radius $\frac1n$. We will use $\textbf{E}$ to denote this subspace of the plane and $a_n$ to denote the circle centered at $(0,\frac1n)$ with radius $\frac1n$.
The group $\mb H$ is generated, in the sense of infinite products, by an infinite set of loops which correspond to the circles $\{a_n\}$. When there is no chance of confusion, we will refer to this infinite generating set for the fundamental group of $\textbf{E}$ as $\{a_n\}$, i.e. $a_n$ represents the loop which transverses counterclockwise one time the circle of radius $\frac 1n$ centered at $(0,\frac1n)$. We will frequently denote the base point $(0,0)$ of $\textbf{E}$ by just $0$.
An \emph{inverse system of groups} is a collection of groups $F_\alpha$ indexed by a partially ordered set $J$ along with a collection of homomorphisms $\{\varphi_{\alpha,\beta}:F_\alpha\to F_\beta \ |\ \text{if } \alpha\geq\beta\}$, which are called \emph{connecting homomorphisms}. The connecting homomorphisms must satisfy the following condition. For every triple $\{\alpha, \beta, \gamma\}$ such that $\alpha\geq\beta\geq\gamma$, the connecting homomorphisms satisfy $\varphi_{\alpha,\beta}\circ\varphi_{\beta,\gamma} = \varphi_{\alpha,\gamma}$. An inverse limit of an inverse system is the subgroup of the direct product which consists of functions $f: J\to \bigcup\limits_{\alpha\in J} F_\alpha$ such that $f(\beta)\in F_\beta$ and $f(\beta) =\varphi_{\alpha,\beta}(f(\alpha))$, for every $\alpha,\beta$ such that $\beta \leq \alpha$.
\section{Inverse Limits of Finite Rank Free groups}\label{seclimit}
We will now describe the inverse system of finite rank free groups constructed by Higman. Let $A_i=\langle \mathbf{a}_1, \cdots \mathbf{a}_i\rangle$ be the free group on $i$ generators with the natural inclusions $A_1\subset \cdots \subset A_i\subset A_{i+1}\subset\cdots$. For $i\geq j$, the connecting homomorphisms $P_{i,j}: A_i \to A_j$ sends $\mathbf a_k$ to $\mathbf a_k$ for $k\leq j$ and $\mathbf a_k$ to $1$ for $k>j$. We will denote this inverse limit of this system by $\mb G$. We will use $P_i: \mb G \to A_i$ to denote the standard projection homomorphism.
Eda \cite{edaprivate} pointed out to the authors that Proposition \ref{a} doesn't hold in the case that the connecting homomorphisms are not surjective. However, it turns out the the proof of Proposition \ref{a} is still sufficient to enumerate all possible isomorphism types of inverse limits of countable rank free groups (see Remark \ref{remark2}).
\begin{prop}\label{a}
Let $G$ be an inverse limit of finite rank free groups, $F_i$, with surjective connected homomorphisms indexed over the natural numbers. Then $G$ is isomorphic to $\mb G$ or a finite rank free group.
\end{prop}
The following two lemmas are well known.
\begin{lem}\label{2}
If $G$ is an inverse limit of groups $\{F_\alpha\}$ and $G'$ is the
inverse limit of some cofinal sequence of $\{F_\alpha\}$, then $G$ is
isomorphic to $G'$.
\end{lem}
\begin{lem}\label{remark}
Any morphism of inverse systems which consists of isomorphisms induces an isomorphism of limits.
\end{lem}
\begin{proof}[Proof of Proposition \ref{a}]
Let $\pi_{i,j}$ be the connecting homomorphisms of $G$. If the rank of $F_n$ is eventually constant then the connecting homomorphisms must eventually be isomorphisms (see Proposition 2.12 in \cite{LS}). Hence $G$ is a finite rank free group. Otherwise by passing to a cofinal subsequence, we may assume that the rank of $F_n$ is a strictly increasing sequence.
Let $B_1$ be a basis for $F_1$. By induction, suppose that for all $m<n$, $B_m\cup K_m$ is a basis for $F_m$ such that $\langle K_m\rangle\subset \ker(\pi_{m,m-1})$ and $\pi_{m,m-1}$ maps $B_m$ bijectively onto $B_{m-1}\cup K_{m-1}$.
There exists a free basis $B_n'\cup K_n$ of $F_n$ with the property that $\langle K_n\rangle\subset \ker(\pi_{n,n-1})$ and $\pi_{n,n-1}$ restricted to $\langle B_n'\rangle$ is an isomorphism (again by Proposition 2.12 in \cite{LS}). Now restricting $\pi_{n,n-1}$ to this set we may define $B_n = \bigl(\pi_{n,n-1}|_{\langle B_n'\rangle}\bigr)^{-1}(B_{n-1})$. Since $\pi_{n,n-1}$ restricted to $\langle B_n'\rangle$ is an isomorphism; $B_n$ is a free basis
for $\langle B_n'\rangle$. It is a simple exercise to show that $B_n\cup K_n$
is still a free basis for $F_n$.
It is now trivial to find isomorphisms between the groups $F_n$ and $A_{|B_n\cup K_n|}$ which commute with the connecting homomorphisms. The result follows from the Lemma \ref{remark} and Lemma \ref{2}.
\end{proof}
\begin{rmk}\label{remark2}Given a system of countable rank free groups $(F_n, \pi_{n,n-1})$ with inverse limit $G$, one can pass to the inverse system $(\pi_n(G), \pi_{n,n-1})$ without changing isomorphism types of the inverse limit where $\pi_n$ is the canonical projection of $G$ to $F_n$. The new system then has surjective connecting homomorphisms. However, $\pi_n(G)$ is a free group of possibly infinite rank. Then as in the proof of Theorem \ref{a}, we can find a basis $B_n\cup K_n$ of $\pi_n(G)$ with the same properties as before. After passing to a cofinal inverse system, we may assume that $B_n$, $K_n$ are eventually trivial, finite, or countably infinite. Then the isomorphism type of the inverse limit is determined by the cardinalities of $B_n$, $K_n$. This gives five possible isomorphism types. Eda \cite{edaprivate} shows that these five types can all be realized and are distinct.
\end{rmk}
\section{Images of the Hawaiian earring group}\label{secimage}
Note that $A_i$ embeds in $\mb G$ by sending $\mathbf{a}_i $ to the element which is $1$ in the first $i-1$ coordinates and $\mathbf{a}_i$ in all other coordinates. The proof of de Smit in \cite{smit} shows the embedding of $\mb H$ into $\mb G$ sends $a_i$ to $\mathbf a_i$.
We will give $\mb G$ a metric such that $\mb G$ under the induced
topology is a topological group. In \cite{hig}, Higman defines a topology which is equivalent to our metric topology. Let $d_i$ be the
$(0,1)$-metric on $B_i$, i.e. $d_i(x,y)=0$ if $x=y$ and $1$
otherwise. For $(g_n),(h_n)\in\mb G$, let $d((g_n),(h_n)) =
\sum\limits_{n} \frac{1}{2^n}d_n(g_n,h_n)$.
\begin{rmk}\ulabel{converges}{Remark}
Under this topology, $g_i=(g_n^i)$ converges to $h=(h_n)$ if and only if for each $n$ there exists an $M(n)$ such that $g_n^i h_n^{-1}=1$ for all $i\geq M(n)$. It follows that if $g_i$ converges $g_ig_{i+1}^{-1}$ must converge to $1$. Suppose that $g_ig_{i+1}^{-1}$ converges to $1$. Then $g_n^i$ considered as a sequence in $i$ is eventually constant for every $n$. Hence $g_i$ converges.
\end{rmk}
We will leave it to the reader to verify that this topology makes $\mb G$ into a topological group. An interesting note is that under this metric $\mb G$ is complete and all three of the sets $\mb H$ , $\mb G -\mb H$, and the free group generated by $\{\mathbf a_i \ | \ i\in \mb N\}$ which wee will denote by $\langle\mathbf a_1,\mathbf{a}_2, \cdots\rangle$ are dense.
The following two propositions of Higman demonstrate the elegance of this topology. For completeness and to make the proof readily accessible to the reader, we will include their proofs here.
\begin{prop}Any endomorphism of $\mb G$ is continuous. \end{prop}
\begin{proof}
Let $\varphi: \mb G\to\mb G$ be an endomorphism. Then $d((g_n),(h_n))\leq \frac{1}{2^i}$ if and only if $g_n= h_n$ for all $n\leq i$. Higman in \cite{hig} showed that $P_i\circ\varphi$ factors through some $P_{n(i)}$. (see Theorem 1) Cannon and Conner have shown that the same holds true for any $\varphi: \mb H \to F$ where $F$ is a free group. (see Theorem 4.4 in \cite{cc3}) Hence, $P_i\circ\varphi = \varphi\circ P_{n(i)}$ for some $n(i)$ which depends on $i$. Thus $d((g_n),(h_n)) \leq \frac{1}{2^{n(i)}}$ implies that $d(\varphi((g_n)),\varphi((h_n))) \leq \frac{1}{2^{i}}$.
\end{proof}
\begin{prop}
Any set function $\varphi$ from $\{\mathbf {a}_1,\mathbf{a}_2,\cdots \}$ to $\mb G$ such that $d(\varphi(\mathbf a_i),1)$ converges to $0$ extends to an endomorphism of $\mb G$.
\end{prop}
\begin{proof}
Let $\varphi:\{\mathbf {a}_1,\mathbf{a}_2,\cdots \}\to \mb G$ be a set function such that $d(\varphi(\mathbf a_i),1)$ converges to $0$. Then $\varphi$ will extend to the free group $\langle\mathbf a_1,\mathbf{a}_2, \cdots\rangle\leq \mb G$. We want to be able to extend $\varphi$ to all of $\mb G$.
Suppose that $g_i\in \langle\mathbf a_1,\mathbf{a}_2, \cdots\rangle$ converges in $ \mb G$. We must show that $\varphi(g_i)$ also converges. By \ref{converges}, it is enough to show that $\varphi(g_i)\varphi(g_{i+1})^{-1}$ converges. Since $\varphi$ is a homomorphism on $\langle\mathbf a_1,\mathbf{a}_2, \cdots\rangle$, $\varphi(g_i)\varphi(g_{i+1})^{-1}= \varphi(g_ig_{i+1}^{-1})$. Hence it is sufficient to show that if $g_i\to1$ then $\varphi(g_i)\to 1$.
Let $\varphi(\mathbf a_i) = (\varphi(\mathbf a_i)_n)$. Suppose that $g_i = (g^i_n)$ converges to $1$. Fix $n$. We will show that $\varphi(g_i)_n$ when considered as a sequence in $i$ is eventually trivial. Fix $M$ such that $\varphi(\mathbf a_i)_n = 1$ for all $i\geq M$. Fix $M'$ such that $g^i_n = 1$ for all $i\geq M'$. By construction, $g_i$ is a word $w_i(\{\mathbf a_j\})$ in $\langle\mathbf a_1,\mathbf{a}_2, \cdots\rangle$. Then $\varphi(g_i) = w_i(\{\varphi(\mathbf a_j)\})$; hence, $\varphi(g_i)_n = w_i(\{\varphi(\mathbf a_j)_n\})$. Then for all $i\geq \max\{M,M'\}$, $\varphi(g_i)_n = w_i(\{\varphi(\mathbf a_j)_n\})= 1$.
Suppose that $g_i, g_i'\in \langle\mathbf a_1,\mathbf{a}_2, \cdots\rangle$ converge to the same element of $ \mb G$. Then $g_i'g_i^{-1}$ converges to $1$. Then $\varphi(g_i')\varphi(g_i^{-1}) = \varphi(g_i'g_i^{-1})\to 1$. Thus $\varphi$ extends to a well defined continuous function $\overline\varphi:\mb G\to\mb G$ which is independent of the chosen sequence.
Suppose that $g,h\in\mb G$. Then there exists $g_i,h_i \in \langle\mathbf a_1,\mathbf{a}_2, \cdots\rangle$ such that $g_i\to g$ and $h_i\to h$. Since $\overline\varphi$ is independent of the chosen sequence, $\varphi(g_i)\varphi(h_i) \to \overline\varphi(g)\overline\varphi(h)$ and $\varphi(g_i)\varphi(h_i) = \varphi(g_ih_i) \to \overline\varphi(gh)$. Hence $\varphi$ extends to a homomorphism $\overline\varphi$.
\end{proof}
For a path $\alpha: [0,t]\to X$, we will also use $\overline\alpha$ to represent the path where $\overline\alpha(s) = \alpha(t-s)$ . We will also use the following theorem.
\begin{thm}\ulabel{cont}{Theorem}[Eda \cite{eda}]
Let $\psi:\mathbb H \to\pi_1(X,x_0)$ a homomorphism into the fundamental group of a
one-dimensional Peano continuum $X$. Then there exists a continuous
function $f:(\textbf{E},0) \to (X,x)$ and a path $\alpha:(I,0,1)\to (X,x_0,x)$, with the property that $f_* =\widehat\alpha\circ\varphi$. Additionally, if the image of $\psi$ is uncountable the $\alpha$ is unique up to homotopy rel endpoints.
\end{thm}
Another proof, as well as a proof for a planar version, of this theorem can be found in the Masters Thesis of the second author (see \cite{ck}). Cannon and Conner in \cite {cc3} showed that in one dimensional spaces there exists a unique (up to reparametrization) reduced representative for each path class. We will use $[\cdot]_r$ (or $\varphi(\cdot)_r$) to represent the unique reduced representative for the path class $[\cdot]$ (or $\varphi(\cdot)$).
We are now ready to give our counter example. We will begin by defining a set function $\varphi:\{\mathbf {a}_1,\mathbf{a}_2,\cdots \}\to \mb G$ by
$$\varphi(\mathbf{a}_i)= \begin{cases} \mathbf{a}_{i} & \text{if $i$ is odd}
\\ \mathbf{a}_1\mathbf{a}_{i}{\mathbf{a}_1}^{-1} & \text{if $i=2\mod4$} \\ \mathbf{a}_{i-2} &
\text{if $i=0\mod4$} \end{cases}$$
Then $\varphi$ extends to an endomorphism of $\mb G$ which we may then restrict to the naturally embedded $\mb H$. Thus after extending and restricting, $\varphi: \mb H \to \mb G$ is a homomorphism with uncountable image. Suppose there existed an isomorphism $\psi: \varphi(\mb H) \to \pi_1(\textbf{E},0)$. Then $\psi\circ\varphi: \mb H\to \pi_1(\textbf{E},0)$ is a homomorphism from $\mb H$ to a one-dimensional Peano continuum which by \ref{cont} must be conjugate to a homomorphism induced by a continuous function. Let $T$ the be the path such that $\widehat{T}\circ \varphi$ is induced by a continuous function. Then by construction $\psi\circ\varphi(a_{4i-2})= \psi(a_1)^{-1}\psi(\mathbf{a}_{4i-2})\psi(a_1)$ and $\psi\circ\varphi(a_{4i})= \psi(\mathbf{a}_{4i-2})$ which imply that
\begin{align*}[\overline T* \psi\circ\varphi(a_{4i-2})_r*T] &= [\overline T*\bigl(\psi(a_1)^{-1}\psi(\mathbf{a}_{4i-2})\psi(a_1)\bigr)_r*T] \\ &= [\overline{T}*\bigl(\psi(a_1)^{-1}\psi\circ\varphi(a_{4i})\psi(a_1)\bigr)_r*T]\\ &= [\overline{T}*\overline{\psi(a_1)_r}*T*\overline T* \psi\circ\varphi(a_{4i})_r *T * \overline T *\psi(a_1)_r*T] \\ &= [\bigl(\overline{\overline T*\psi(a_1)_r*T}\bigr)*\overline T* \psi\circ\varphi(a_{4i})_r *T * \bigl(\overline {T} *\psi(a_1)_r*T\bigr)] \\ &= [\overline{\overline T*\psi(a_1)_r*T}]\cdot[\overline T* \psi\circ\varphi(a_{4i})_r *T ]\cdot[\overline {T} *\psi(a_1)_r*T]\end{align*}
By our choice of $T$, $\{[\overline T* \psi\circ\varphi(a_{4i-2})_r*T]_r\}$ and $\{[\overline T* \psi\circ\varphi(a_{4i})_r*T]_r\}$ are null sequences of loops in $\textbf{E}$. However, the second sequence of loops is conjugate to the first by a non-trivial loop $[\overline {T} *\psi(a_1)_r*T]_r$ which is a contradiction.
\end{document}
|
\begin{document}
\title{Surfaces and hypersurfaces as the joint spectrum of matrices}
\author{Patrick H. DeBonis}
\address{Department of Mathematics and Statistics, University of New Mexico,
Albuquerque, New Mexico 87131, USA}
\curraddr{Department of Mathematics, Purdue University
150 N. University Street, West Lafayette, Indiana 47907, USA}
\author{Terry A. Loring}
\address{Department of Mathematics and Statistics, University of New Mexico,
Albuquerque, New Mexico 87131, USA}
\author{Roman Sverdlov}
\address{Department of Mathematics and Statistics, University of New Mexico,
Albuquerque, New Mexico 87131, USA}
\subjclass{47A13,46L85, 15A18}
\keywords{Clifford spectrum, joint spectrum, emergent topology, Hermitian matrices}
\begin{abstract}
The Clifford spectrum is an elegant way to define the joint spectrum of several
Hermitian operators. While it has been know that for examples as small as three
$2$-by-$2$ matrices the Clifford spectrum can be a two-dimensional manifold,
few concrete examples have been investigated.
Our main goal is to generate examples of the Clifford spectrum of three or four
matrices where, with the assistance of a computer algebra package, we can
calculate the Clifford spectrum.
\end{abstract}
\maketitle
\tableofcontents{}
\section{Introduction}
The Clifford spectrum is one way extend the concept of joint spectrum
of commuting matrices to work for noncommuting operators. We are only
interested in Hermitian matrices as in the back of our minds we envision
applications to quantum physics and string theory. Given $(X_{1},\dots,X_{d})$,
where the $X_{j}$ are all $n$-by-$n$ Hermitian matrices, we define
a Dirac-type operator
\[
L(X_{1},\dots,X_{d})=\sum X_{j}\otimes\gamma_{j}
\]
where the $\gamma_{j}$ are $d$ matrices that satisfy the Clifford
relations
\begin{equation}
\begin{aligned}
\gamma_{j}^{*} & =\gamma_{j}\quad(\forall j) \\
\gamma_{j}^{2} & =I\quad(\forall j)\\
\gamma_{j}\gamma_{k} & =-\gamma_{k}\gamma_{j}\quad(j\neq k)
\label{eq:gamma_rep}
\end{aligned} .
\end{equation}
We can use $L(X_{1},\dots,X_{d})$ to determine only if $\boldsymbol{0}$
is in the Clifford spectrum. To find the full spectrum, we shift the
matrices by scalars, and define
\begin{align*}
L_{\boldsymbol{\lambda}}(X_{1},\dots,X_{d}) &=L(X_{1}-\lambda_{1},\dots,X_{d}-\lambda_{d}) \\
&=\sum\left(X_{j}-\lambda_{j}\right)\otimes\gamma_{j}.
\end{align*}
Due to many clashes of terminology between mathematics and physics,
it seems now prudent, as discussed in \cite{LoringSchuBa_even_AIII}, to
call $L_{\boldsymbol{\lambda}}$
the \emph{spectral localizer} of the $d$-tuple $(X_{1},\dots,X_{d})$.
\begin{defn}
The \emph{Clifford spectrum} of $d$-tuple $(X_{1},\dots,X_{d})$ of Hermitian
matrices is the set of $\lambda$ in $\mathbb{R}^{d}$ such that $L_{\boldsymbol{\lambda}}(X_{1},\dots,X_{d})$
is singular. This is denoted $\Lambda(X_{1},\dots,X_{d})$.
\end{defn}
\begin{rem}
This definition works for Hermitian operators, even when unbounded.
We will focus on the matrix case, except in a few comments and examples.
\end{rem}
It was Kisil \cite{KisilCliffordSpectrum} who noticed that the Clifford spectrum equals
the Taylor spectrum in the case where the $X_{j}$ all commute with
each other. In the case of finite matrices, a singular localizer at
$\boldsymbol{\lambda}$ implies there is a joint eivenvector with
eigenvalues the components of $\boldsymbol{\lambda}$, and this is
exactly what any form of joint spectrum should mean for commuting
finite matrices. We will see a more general result in \S \ref{sec:Bound_variance},
where it is shown that for almost commuting matrices we can associate
to points in the Clifford spectrum vectors with small variation with
respect to each $X_j$.
Kisil also used the theory of monogenic functions to prove that
the Clifford spectrum is always nonempty, and indeed compact.
However, it does not have to be a
finite set when computed for finite matrices that don't commute.
In string theory, the Clifford spectrum is used, but tends to be called
the ``emergent geometry'' \cite{berenstein2012matrix},
or the ``set of probe points''
\cite{SchneiderbauerMeasuringFiniteGeom} etc. In that context, the Clifford spectrum
consists of all the locations
where a fermionic probe of a D brane can lead to low energy resonance.
For some calculations we will look at the square of the localizer. It is important to note that the square of this Dirac-type matrix is not exactly the corresponding Laplace-type matrix.
Indeed, one can calculate \cite{LoringPseudospectra} that
\begin{equation}
\left(L_{\boldsymbol{\lambda}}(X_{1},\dots,X_{d})\right)^{2}
=\sum_{j=1}^{d}\left(X_{j}-\lambda_{j}\right)^{2}\otimes I+\sum_{j<k}[X_{j},X_{k}]\otimes\gamma_{j}\gamma_{k}.
\label{eq:square_of_Localizer}
\end{equation}
Why not use directly a Laplace-type operator
to define a spectrum? This will be correct in the commuting case.
\begin{defn}
The \emph{Laplace spectrum }of Hermitian $d$-tuple $(X_{1},\dots,X_{d})$
is the set of $\lambda$ in $\mathbb{R}^{d}$ such that
\[
\sum_{j=1}^{d}\left(X_{j}-\lambda_{j}\right)^{2}
\]
is singular.
\end{defn}
The Laplace spectrum is used in string theory \cite{SchneiderbauerMeasuringFiniteGeom}.
We will see it has a flaw that keeps it out of general use. In some cases,
when the commutators are small, one might be able to prove that the Laplace spectrum is
a decent approximation of the Clifford spectrum.
An issue with the Clifford spectrum is that it is very hard to
work examples by hand. Looking hard at the math and string theory
literature, we find a only a handful of explicit examples where the
Clifford spectrum is known. Indeed, Schneiderbauer and
Steinacker \cite{SchneiderbauerMeasuringFiniteGeom}, and also Sykora \cite{sykora2016fuzzy},
use a computer algebra package for many fuzzy geometry calculations. We are taking
on a similar challenge, using a computer algebra package to find more
examples.
We will primarily use a generalized characteristic polynomial to calculate
the Clifford spectrum of various examples. The generalized characteristic
polynomial probably first appeared in work by Berenstein, Dzienkowski
and Lashof-Regas \cite{berenstein2015spinning}.
\begin{defn}
The \emph{characteristic polynomial} of the $d$-tuple $(X_{1},\dots,X_{d})$
is the polynomial, in real variables $\lambda_{1}\dots,\lambda_{d}$,
\[
\boldsymbol{\lambda}\mapsto\det(L_{\boldsymbol{\lambda}}(X_{1},\dots,X_{d}))
\]
which we denote $\mathrm{char}(X_{1},\dots,X_{d})$.
\end{defn}
The equation $\mathrm{char}(X_{1},\dots,X_{d})=0$ determines
the Clifford spectrum. This can become
a polynomial with many monomials in many variable even in rather modest
examples. Hence the need for a computer assist and an experimental
approach.
Some of the complexity from increasing $d$, the number of matrices, comes from
the fact that the $\gamma_j$ get bigger. It is best to use an irreducible representation
of (\ref{eq:gamma_rep}), which means that each $\gamma$ is $g$-by-$g$ for
\begin{equation*}
g= 2^{\lfloor d/2\rfloor}
\end{equation*}
as one can see from \cite{OkuboCliffordReps}, for example.
The wrong value for
$g$ was used in \cite[\S 1]{LoringPseudospectra} and so the estimates there were not correct as stated. See Section~\ref{sec:Bound_variance}.
Section~\ref{sec:Bound_variance} discusses the variance of joint approximate eigenvalues.
Section~\ref{sec:One-or-two} discusses the cases of one or two matrices (or operators)
where the Clifford spectrum agrees with the ordinary single-operator spectrum.
Section~\ref{sec:Three-Hermitian-matrices} looks at the case of three matrices,
where the Clifford spectrum
can be a surface. This is where we have the most examples, as surfaces
in three space are easy to display.
Section~\ref{sec:Four-Hermitian-matrices} looks as the case of
four matrices, where the calculations and visualization become harder.
Section~\ref{SymmetryClasses} looks are variations
on the localizer and index that assist with plotting and proving the
stability of the Clifford spectrum.
Many of these examples in Section~\ref{sec:Three-Hermitian-matrices}
and the discussion of the archetypal polynomial are from
the thesis of DeBonis \cite{Debonis2019}.
We will use \emph{mathematical notation} throughout. Most importantly, Hermitian matrices are those for which $X^* = X$, and so complex conjugation is indicated by $X^*$. In several places we will focus on unit vectors, so have in mind states of a quantum system. Since the word state means something different in operator algebras, for this we stick the the neutral terminology.
The convention we prefer for identifying a tensor product of matrices with a
larger matrix is the one such that
\begin{equation*}
A\otimes \begin{pmatrix} a& b\\ c & d \end{pmatrix}
=
\begin{pmatrix} aA& bA\\ cA & dA \end{pmatrix}
\end{equation*}
and this is \emph{opposite} of the convention used by the \texttt{KroneckerProduct} operation in
Mathematica.
\section{Bounds on variance} \label{sec:Bound_variance}
Suppose $\mathbf{v}$ is a unit vector and $X$ is a Hermitian matrix. Two
important quantities when considering quantum measurement are the expection
value of $X$ with respect to $\mathbf{v}$
\begin{eqnarray*}
\textnormal{E}(X)_\mathbf{v} = \langle X\mathbf{v}, \mathbf{v} \rangle
\end{eqnarray*}
and the variance of $X$ with respect to $\mathbf{v}$
\begin{eqnarray*}
\textnormal{Var}(X)_\mathbf{v} =
\langle X^2\mathbf{v}, \mathbf{v} \rangle - \langle X\mathbf{v}, \mathbf{v} \rangle ^2 .
\end{eqnarray*}
For any scalar $\lambda$ we have
\begin{equation*}
\left\langle (X-\lambda)^{2}\mathbf{v},\mathbf{v}\right\rangle =\left\langle X^{2}\mathbf{v},\mathbf{v}\right\rangle -2\lambda\left\langle X\mathbf{v},\mathbf{v}\right\rangle +\lambda^{2}
\end{equation*}
and
\begin{equation*}
\left\langle (X-\lambda)\mathbf{v},\mathbf{v}\right\rangle ^{2}
=\left\langle X\mathbf{v},\mathbf{v}\right\rangle ^{2}-2\lambda\left\langle X\mathbf{v},\mathbf{v}\right\rangle +\lambda^{2}
\end{equation*}
so we see that
\begin{equation}
\textnormal{Var}(X - \lambda)_\mathbf{v} = \textnormal{Var}(X)_\mathbf{v}.
\label{eqn:how_Var_shifts}
\end{equation}
On the other hand,
\begin{equation}
\textnormal{E}(X - \lambda)_\mathbf{v} = \textnormal{E}(X)_\mathbf{v} - \lambda .
\label{eqn:how_E_shifts}
\end{equation}
If the $\textnormal{Var}(X)_\mathbf{v}=0$ then $\mathbf{v}$ is an eigenvector for $X$ for eigenvalue $\textnormal{E}(X)_\mathbf{v}$.
When attempting joint measurement, for observables $X_{1},\dots,X_{d}$, one confronts
often the impossibility of finding any unit vector $\mathbf{v}$ that is
simultaneously an eigenvector for all the observables. There are many
lower bounds on the variances that make this more precise, such as the
Robertson--Schr\"odinger relation bounding the product of the variance of two observables.
A more recent example of such a lower
bound, due to Chen and Fei \cite{Chen2015sumUncertainty}, gives lower bounds on the
sum of $d$ variances.
We look here at upper bounds on the sum of variances. Specifically, we
will derive an estimate on how small we can make the variances for if
we choose certain unit vectors that are related to points in the Clifford
spectrum.
\begin{lem}
\label{lem:basic_estimate_on_Xj_and_w}
Suppose $X_{1},\dots,X_{d}$ are Hermitian, $n$-by-$n$ matrices
and $\boldsymbol{\lambda}$ is in $\Lambda(X_{1},\dots,X_{d})$. Then
there is a unit vector $\mathbf{w}$ in $\mathbb{R}^{n}$ such
that
\begin{equation*}
\sum \left\langle \left(X_{j}-\lambda_{j}\right)^{2}\mathbf{w},\mathbf{w}\right\rangle \leq g \sum_{j<k}\left\Vert \left[X_{j},X_{k}\right]\right\Vert
\end{equation*}
for $g=2^{\lfloor d/2\rfloor}$.
\end{lem}
\begin{proof}
Since shifting the $X_{j}$ by $\lambda_{j}$ has no effect on the
commutators, we can reduce to the case of $\boldsymbol{\lambda}=\boldsymbol{0}$.
Assume then that $\bf{0}$ is in the Clifford spectrum of
$X_{1},\dots,X_{d}$. Then there is a vector $\mathbf{z}$ in $\mathbb{R}^{gn}$
such that
\begin{equation}
L_{\bf{0}} (X_1, \cdots, X_n)\mathbf{z} = 0
\label{eqn:Lz_assumed_zero}
\end{equation}
One might be tempted to diagonalize $L_{\bf{0}} (X_1, \cdots, X_d)$ so that $\boldsymbol{z}$ can be written down as a column vector with only one single non-zero entry. This, however, would not be the best move: if we change coordinate system, then $X_1 \otimes \gamma_1 + \cdots + X_n \otimes \gamma_d$ would no longer be written in a block form and, therefore, we would no longer be able to isolate $X_j$ and use some of its properties. Therefore, we refrain from diagonalizing and write $\mathbf{z}$ as
\begin{equation}
\mathbf{z}=\begin{bmatrix}
\mathbf{z}_1 \\
\vdots \\
\mathbf{z}_g \end{bmatrix}
\label{z}
\end{equation}
where $\mathbf{z}_k \in \mathbb{R}^n$ for all $k \in \{1, \cdots, g \}$.
From (\ref{eqn:Lz_assumed_zero}) we obtain
$\left(L_{\bf{0}} (X_1, \cdots, X_n)\right)^2\mathbf{z} = 0$. Now (\ref{eq:square_of_Localizer}) tells us
\begin{equation}
\sum_j (X_j^2 \otimes I_g) \mathbf{z} = - \sum_{j<k} ([X_j, X_k] \otimes (\gamma_j \gamma_k)) \mathbf{z} \nonumber
\end{equation}
and therefore
\begin{equation}
\left\| \sum_j X_j^2 \mathbf{z}_r \right\| \leq \sum_{j<k} \|[X_j, X_k]\| \nonumber
\end{equation}
for every $r$.
Now we select $r$ in such a way that it maximizes $\| \mathbf{z}_r \|$ and set
\begin{equation*}
\mathbf{w} = \frac{1}{\|\mathbf{z}_r\|} \mathbf{z}_r .
\end{equation*}
Thus,
\begin{equation}
1 = \| \mathbf{z} \|^2 = \sum_{j=1}^g \| \boldsymbol{z}_j \|^2 \leq g \| \mathbf{z}_r \|^2
\nonumber
\end{equation}
and, therefore, $\| \mathbf{z}_r \| \geq 1 / \sqrt{g}$.
We can now perform the following calculation:
\begin{align*}
\sum \left \langle X_j^2 \mathbf{w}, \mathbf{w} \right\rangle
& = \left \langle \sum X_j^2 \mathbf{w}, \mathbf{w} \right\rangle \\
& \leq g \left \langle \sum X_j^2 \mathbf{z}_r, \mathbf{z}_r \right\rangle \\
& \leq g \sum_{j<k} \|[X_j, X_k]\|.
\end{align*}
\end{proof}
\begin{thm}
Suppose $X_{1},\dots,X_{d}$ are Hermitian, $n$-by-$n$ matrices
and $\boldsymbol{\lambda}$ is in $\Lambda(X_{1},\dots,X_{d})$. Then
there is a unit vector $\mathbf{w}$ in $\mathbb{R}^{n}$ such
that
\begin{equation*}
\sum_{j=1}^{d}\textnormal{Var}(X_{j})_{\mathbf{w}}
+ \left|\textnormal{E}(X_{j})_{\mathbf{w}}-\lambda_{j}\right|^{2}
\leq g\sum_{j<k}\left\Vert \left[X_{j},X_{k}\right]\right\Vert
\end{equation*}
for $g=2^{\lfloor d/2\rfloor}$.
\end{thm}
\begin{proof}
By (\ref{eqn:how_Var_shifts}) and (\ref{eqn:how_E_shifts}) we can again assume, without loss of generality, that $\boldsymbol{\lambda} = \mathbf{0}$.
By Lemma~\ref{lem:basic_estimate_on_Xj_and_w} there exists a unit
vector $\mathbf{w}$ such that
\begin{equation*}
\sum \left\langle X_{j} ^{2}\mathbf{w},\mathbf{w}\right\rangle \leq g \sum_{j<k}\left\Vert \left[X_{j},X_{k}\right]\right\Vert .
\end{equation*}
For any Hermitian matrix $X$ and unit vector $\mathbf{v}$ we have
\begin{equation*}
\left\langle X^2 \mathbf{v},\mathbf{v}\right\rangle
= \textnormal{Var}(X)_\mathbf{v} + \left(\textnormal{E}(X)_\mathbf{v}\right)^2
\end{equation*}
so in this special case we have
\begin{equation*}
\sum \left(\textnormal{Var}(X_j)_\mathbf{w} + \left(\textnormal{E}(X_j)_\mathbf{w}\right)^2\right) \leq g \sum_{j<k} \left\Vert \left[X_{j},X_{k}\right]\right\Vert .
\end{equation*}
\end{proof}
For larger matrices, it will be difficult to determine the exact location of the
Clifford spectrum. A more practical approach is to find $\mathbf{\lambda}$ that are
in the (Clifford) $\epsilon$-pseudospectrum of $X_{1},\dots,X_{d}$, denoted
$\Lambda_\epsilon(X_{1},\dots,X_{d})$ as defined in \cite{LoringPseudospectra}.
By definition, $\mathbf{\lambda}$ is in $\Lambda_\epsilon(X_{1},\dots,X_{d})$ whenever
\begin{equation}
\label{eqn:pseusospectrum}
\| (L_{\boldsymbol{\lambda}} (X_1, \cdots, X_n)^{-1} \|^{-1} \leq \epsilon.
\end{equation}
In this paper, we will not use the function
\begin{equation*}
\boldsymbol{\lambda} \mapsto \|(L_{\boldsymbol{\lambda}} (X_1, \cdots, X_n)^{-1} \|^{-1}
\end{equation*}
to estimate the Clifford spectrum. Notice, however, that (\ref{eqn:pseusospectrum}) is
equivalent to the existence of a unit vector $\mathbf{\lambda}$ such that
\begin{equation}
\label{eqn:Lz_approx_zero}
L_{\boldsymbol{\lambda}} (X_1, \cdots, X_n) \mathbf{z} \leq \epsilon.
\end{equation}
This can be proven easily if one considers a unitary diagonalization of the localizer, which is itself Hermitian.
It is rather easy to compute a unit vector that satisfies (\ref{eqn:Lz_approx_zero})
and such vectors can in interesting, as we now show.
\begin{lem}
\label{lem:second_estimate_on_Xj_and_w}
Suppose $X_{1},\dots,X_{d}$ are Hermitian, $n$-by-$n$ matrices
and there is a vector $\mathbf{z}$ in $\mathbb{R}^{gn}$ such that
\rm{(\ref{eqn:Lz_approx_zero})} holds for some $\epsilon \geq 0$. Then
there is a unit vector $\mathbf{w}$ in $\mathbb{R}^{n}$ such
that
\begin{equation*}
\sum \left\langle \left(X_{j}-\lambda_{j}\right)^{2}\mathbf{w},\mathbf{w}\right\rangle \leq \epsilon^2 + g \sum_{j<k}\left\Vert \left[X_{j},X_{k}\right]\right\Vert
\end{equation*}
for $g=2^{\lfloor d/2\rfloor}$.
\end{lem}
\begin{proof}
The proof proceeds essentially the same as the proof of Lemma~\ref{lem:basic_estimate_on_Xj_and_w}.
The first difference is we find that
\begin{equation}
\left\| \sum_j X_j^2 \mathbf{z}_r \right\| \leq \epsilon^2 + \sum_{j<k} \|[X_j, X_k]\| \nonumber
\end{equation}
for every $r$, again with the $ \mathbf{z}_r$ the $g$ components of $ \mathbf{z}$.
\end{proof}
The following now follows from Lemma~\ref{lem:second_estimate_on_Xj_and_w} by the same
argument as above. Notice that the method to produce $\mathbf{w}$ from $\mathbf{v}$ is to just select the component of $\mathbf{w}$ that is largest and normalize it.
\begin{thm}
Suppose $X_{1},\dots,X_{d}$ are Hermitian, $n$-by-$n$ matrices
and there is a vector $\mathbf{z}$ in $\mathbb{R}^{gn}$ such that
\rm{(\ref{eqn:Lz_approx_zero})} holds for some $\epsilon \geq 0$. Then
there is a unit vector $\mathbf{w}$ in $\mathbb{R}^{n}$ such
that
\begin{equation*}
\sum_{j=1}^{d}\textnormal{Var}(X_{j})_{\mathbf{w}}
+ \left|\textnormal{E}(X_{j})_{\mathbf{w}}-\lambda_{j}\right|^{2} \\
\leq \epsilon + g\sum_{j<k}\left\Vert \left[X_{j},X_{k}\right]\right\Vert
\end{equation*}
for $g=2^{\lfloor d/2\rfloor}$.
\end{thm}
\section{One or two Hermitian matrices} \label{sec:One-or-two}
For one or two Hemitian matrices, the concept of Clifford spectrum overlaps with the usual concept of spectrum of a matrix.
In the case of a single matrix $X$, we can take as Clifford representation
\begin{equation}
\gamma_1 = 1 \label{eqn:CliffordRepOne}
\end{equation}
which means the localizer is just
\begin{equation*}
L_\lambda = X - \lambda
\end{equation*}
with $\lambda$ a real variable.
Since all the eigenvalues of $X$ are real, this makes no real difference
and so the new characteristic polynomial
$\det(L_\lambda)$
is the usual characteristic polynomial.
Thus $\Lambda(X)$ is just the ordinary spectrum of $X$.
The case of two Hermitian matrices $(X,Y)$ also deviates only in
technical ways from an ordinary spectrum. We will see right away that
it is essentially the spectrum of $X+iY$.
We can take here for Clifford representation
\begin{equation}
\gamma_1=\begin{bmatrix}
0 & 1\\
1 & 0
\end{bmatrix},
\
\gamma_2=\begin{bmatrix}
0 & -i\\
i & 0
\end{bmatrix} .
\label{eq:CliffordRepTwo}
\end{equation}
The localizer then becomes
\begin{equation*}
L_{(r,s)}(X,Y)
=\begin{bmatrix}
0 & \left((X-r)+i(Y-s)\right)^{*}\\
\left((X-r)+i(Y-s)\right) & 0
\end{bmatrix}
\end{equation*}
and so
\[
\left|\det\left(L_{(r,s)}(X,Y)\right)\right| =\left|\det\left((X+iY)-(r+is)\right)\right|^{2}.
\]
If we use a complex variable $z=r+is$ on the right that becomes the
square of the absolute value of the usual characteristic polynomial
of $X+iY$. Therefore
\begin{equation}
\label{eqn:twofoldCliffordVSspectrumXplusiY}
(r,s)\in\Lambda(X,Y) \iff r+is\in\sigma(X+iY).
\end{equation}
\begin{example}
Consider the two matrices
\[
X=\begin{bmatrix}
0 & 1\\
1 & 0
\end{bmatrix} ,\quad Y=\begin{bmatrix}
0 & -i\\
i & 0
\end{bmatrix} .
\]
Then
\[
X+iY=\begin{bmatrix}
0 & 2\\
0 & 0
\end{bmatrix}
\]
which has spectrum $\{0\}$. Thus the Clifford spectrum of $(X,Y)$
is just the set $\{(0,0)\}$. On the other hand, the Laplace spectrum
is the zero set of
\begin{align*}
\det\left(\begin{bmatrix}
-r & 1\\
1 & -r
\end{bmatrix} ^{2}
+\begin{bmatrix}
-s & -i\\
i & -s
\end{bmatrix} ^{2}\right)
& =\det\begin{bmatrix}
2+r^{2}+s^{2} & -2r+2is\\
-2r-2is & 2+r^{2}+s^{2}
\end{bmatrix} \\
& =4+r^{4}+2r^{2}s^{2}+s^{4}.
\end{align*}
The \emph{Laplace spectrum is the empty set} in this simple example.
Here endeth our interest in the Laplace spectrum.
\end{example}
\begin{prop}
For two Hermitian matrices of size $n$, the Clifford spectrum is
a finite set, with between $1$ and $n$ points as elements.
\end{prop}
\begin{proof}
This follows easily by the equivalence of the Clifford spectrum of
two Hermitian matrices with the ordinary spectrum of a single matrix.
\end{proof}
\begin{prop}
For $d$ commuting Hermitian matrices of size $n$, the Clifford spectrum
is a finite set, with between $1$ and $n$ points as elements.
\end{prop}
\begin{proof}
Now we use the equivalence of the Clifford spectrum of commuting Hermitian
matrices with the ordinary joint spectrum. The appropriate version
of the spectral theorem tells us the joint spectrum is a nonempty
finite set of at most $n$ points.
\end{proof}
The argument leading to the equivalence (\ref{eqn:twofoldCliffordVSspectrumXplusiY})
is valid for Hermitian operators as well. One example is worth examining.
\begin{example}
Let $P$ and $Q$ be the classical position and momentum
operators on $L^{2}(\mathbb{R})$, so
\[
Qf(x)=xf(x),\quad Pf(x)=-if'(x).
\]
We will see that joint Clifford spectrum $\Lambda(P,Q)$ is all of
$\mathbb{R}^1$.
This is because of its relation with the spectrum of $P+iQ$. Looking
more closely, let us look for eigenvectors, so $f$ with
\[
(Q+iP)f=(r+is)f.
\]
(If we look at the whole localizer, we need to solve
\[
\begin{bmatrix}
0 & \left(Q-r\right)-i\left(P-s\right)\\
\left(Q-r\right)+i\left(P-s\right) & 0
\end{bmatrix}
\begin{bmatrix}
g\\
f
\end{bmatrix}
=
\begin{bmatrix}
0\\
0
\end{bmatrix}
\]
which is essentially the same.)
This translates to
\[
f'(x)=\left(\alpha-x\right)f(x)
\]
where $\alpha=r+is$. Then
\[
f(x)=e^{-\frac{1}{2}(x-r)^{2} + isx}.
\]
is a (non-normalized) square-integrable solution to this differential equation for
$\alpha = r+is$. Such a Gaussian is well known to have limited deviation in
position and momentum, so the spectral localizer method captures what we would
expect in this example.
\end{example}
The previous example is in some sense the limit as $n \rightarrow \infty$
of an example we consider in Section~\ref{sec:Four-Hermitian-matrices}. There
the four Hermitian matrices are the Hermitian and anti-Hermitian parts of the usual clock and shift unitary matrices.
What physicsts call the clock and shift, mathematicians often call Voiculescu's unitaries. We
want $U$ to be the cyclic shift and $V$ to be a diagonal unitary with eigenvalues winding around the unit circle, specifically as as follows.
For each $n \geq 2$ we define these two $n$-by-$n$ unitary matrices as
\begin{equation}
\label{eqn:define_U}
U_n= \begin{bmatrix} 0 & & & & 1 \\ 1 & 0 & & & \\ & \ddots & \ddots & & \\ & & 1 & 0 \\ & & & 1 & 0 \end{bmatrix}
\end{equation}
and
\begin{equation}
\label{eqn:define_V}
V = \begin{bmatrix} e^{2 \pi i/n} & & & & \\ & e^{4 \pi i/n} & & & \\ & & \ddots & & \\ & & & e^{2 \pi i (n-1)/n} \\ & & & & 1 \end{bmatrix}.
\end{equation}
Now arguing heuristically, and from a physics perspective, suppose that space is compactified. Suppose space has diameter is $L$, and further suppose that it is discretized, with lattice spacing $\epsilon$. If $k$ is the row number, $k \in \{1, \cdots, n \}$, we have
\begin{equation*} k = \frac{x}{\epsilon} \; , \; n = \frac{L}{\epsilon} \end{equation*}
Therefore,
\begin{equation*} U_{k,k-1} \approx 1 - \epsilon \frac{\partial}{\partial x} = 1 -i \epsilon p \end{equation*}
and
\begin{equation*} V_{kk} = e^{2 \pi i k/n} = e^{x/L} \approx 1+ \frac{x}{L} .
\end{equation*}
This implies that joint spectrum of $U$ and $V$ would roughly correspond to the joint spectrum of $p$ and $x$, if we will be looking for the eigenvalues that are very large rather than very small. If the size of $U$ and $V$ gets larger and larger, the number of eigenvalues would increase as well, which intuitively explains why in the limit we will get a continuous spectrum.
\section{Three Hermitian matrices } \label{sec:Three-Hermitian-matrices}
In the case of three matrices, there is a range of interesting examples for which we can plot their Clifford spectrum using computer algebra package. We use the the Pauli Spin matrices for the Clifford representation so that,
\begin{equation}
\gamma_1=\begin{bmatrix}
0 & 1\\
1 & 0
\end{bmatrix},
\
\gamma_2=\begin{bmatrix}
0 & -i\\
i & 0
\end{bmatrix},
\
\gamma_3=\begin{bmatrix}
1 & 0\\
0 & -1
\end{bmatrix} .\label{eq:CliffordRepThree}
\end{equation}
The localizer now becomes,
\begin{equation*}
L_{(x, y, z)}(A,B,C)
= \begin{bmatrix}
(C -zI) & (A -xI) -i(B-yI) \\
(A -xI) + i(B-yI) & -(C -zI)
\end{bmatrix} .
\end{equation*}
\begin{figure}\label{fig:lem}
\end{figure}
\begin{example}
\label{exa:Pauli_sphere}
The first example with Clifford spectrum a surface was due to by Kisil \cite{KisilCliffordSpectrum}, and we repeat that here. The Pauli Spin matrices themselves are the three Hermitian matrices we consider. The following can be computed
by hand, but using symbolic algebra is preferred. We find
\begin{equation*}
\text{char} \left(\sigma_x, \sigma_y, \sigma_z \right)
= (x^2 + y^2 + z^2 - 1)(x^2 + y^2 + z^2 +3)
\end{equation*}
and that here the Clifford spectrum is the unit sphere.
\end{example}
\begin{example}
\label{exa:lemniscate}
A slight modification of the previous example leads to the Clifford spectrum
being a surface but not a manifold. We simply rescale some of the Pauli Spin matrices and consider $\tfrac{1}{2} \sigma_x$, $\sigma_y$ and $\tfrac{1}{2} \sigma_z$. The characteristic polynomial is now
\begin{equation*}
\textnormal{char} \left(\tfrac{1}{2} \sigma_x, \sigma_y, \tfrac{1}{2} \sigma_z \right)
= (x^2 + y^2 + z^2 )^2 + 2z^2 +2x^2 - y^2
\end{equation*}
Since $(x^2 + y^2)^2 + 2x^2 - y^2 = 0$ describes a lemniscate of Bernoulli, the surface here is a rotated lemniscate as illustrated by Figure \ref{fig:lem}.
\end{example}
Mathematica and other computer algebra programs can produce accurate and compelling pictures of the Clifford spectrum in many examples, but there are limitations.
Some rather simple examples can lead to the plot being incomplete, as we
will demonstrate. We are asking a computer to verify that a certain set is infinite, which is too big of a request. Two methods are available to verify the results of some
examples. The first is to factor the characteristic polynomial and identify the zero-sets of the factors, which might be impossible. The second is to employ the information we get from the $K$-theory indices associated to almost commuting matrices \cite{LoringPseudospectra}. These generally must be zero when the Clifford spectrum is a finite set, so
calculating a single index can tell us that that a certain spectrum is an infinite set.
The index we start with is the most basic of those introduced in \cite{LoringPseudospectra}. It is defined in terms of the signature. For an invertible Hermitian matrix, the signature is the the number of positive eigenvalues, minus the number of negative eigenvalues, of that matrix.
\begin{defn}
The \emph{index at} $\boldsymbol{\lambda}$ for an ordered triple of non-commuting Hermitian matrices $X_1, X_2, X_3$ is defined only when $\boldsymbol{\lambda}$ is not in $\Lambda(X_1, X_2, X_3),$ and is given by,
\[
\textnormal{Ind}_{\boldsymbol{\lambda}} (X_1, X_2, X_3) = \dfrac{1}{2} \textnormal{Sig} \left(L_{\boldsymbol{\lambda}} (X_1, X_2, X_3) \right).
\]
\end{defn}
\begin{figure}
\caption{The Clifford spectrum of the matrices from Example~\ref{exa:higher_lemniscate}
\label{fig:FS5}
\end{figure}
The index at the origin it $1$ for the Pauli spin matrices, as in
Example~\ref{exa:Pauli_sphere}.
Inside either lobe of the lemniscate example this index is also $1$.
These facts can be calculated by hand, or one can
see the supplemental files \texttt{PauliSpinTwoSphere.*} and \texttt{Lemniscate.*} for the calculations.
Consider a path $\boldsymbol{\lambda}_t$ in $\mathbb{R}^3$ with fixed $X_1, X_2, X_3$, and
assume that
\begin{equation*}
\textnormal{Ind}_{\boldsymbol{\lambda}_{t_0}} (X_1, X_2, X_3)
\neq
\textnormal{Ind}_{\boldsymbol{\lambda}_{t_1}} (X_1, X_2, X_3).
\end{equation*}
Since the localizer is Hermitian, the only for this change to occur is if
the localizer becomes singular at some intermediate $t$. Thus any path between two
points with differing index must cross the Clifford spectrum.
It is easy to prove that if $\boldsymbol{\lambda}$ is larger than
$\|L_{\mathbf{0}}{(X_1, X_2, X_3)}\|$ then the index at $\boldsymbol{\lambda}$
equals zero.
Thus proving that the index to be nonzero at a single point shown
that the Clifford spectrum separates that point from infinity.
This proves that in that instance the Clifford spectrum is
not a finite set.
Already with $2$-by-$2$ matrices, we start to see interesting topology emerge. Moving up to $5$-by-$5$ and $6$-by-$6$ and taking paths of Hermitian matrices, we see the suggestions of interesting patterns. Here we present some of what we found. We encourage the reader to use our Mathematica supplemental files, or the SageMath code listing in \cite{sykora2016fuzzy}, as a basis to explore more examples.
\begin{figure}\label{fig:TS5}
\end{figure}
\begin{example}
\label{exa:higher_lemniscate}
Berenstein, Dzienkowski, and Lashof-Regas \cite{berenstein2012matrix,berenstein2015spinning} looked at
the matrices generating a fuzzy sphere. We consider here similar matrices,
\[
A = \begin{bmatrix} 2 & 0 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & 0 & -2 \end{bmatrix},
\
B = \begin{bmatrix} 0 & \tfrac{1}{4} & 0 & 0 & 0 \\ \tfrac{1}{4} & 0 & \tfrac{1}{4} & 0 & 0 \\ 0 & \tfrac{1}{4} & 0 & \tfrac{1}{4} & 0 \\ 0 & 0 & \tfrac{1}{4} & 0 & \tfrac{1}{4} \\ 0 & 0 & 0 & \tfrac{1}{4} & 0 \end{bmatrix},
\
C = \begin{bmatrix} 0 & -\tfrac{i}{4} & 0 & 0 & 0 \\ \tfrac{i}{4} & 0 & -\tfrac{i}{4} & 0 & 0 \\ 0 & \tfrac{i}{4} & 0 & -\tfrac{i}{4} & 0 \\ 0 & 0 & \tfrac{i}{4} & 0 &-\tfrac{i}{4} \\ 0 & 0 & 0 & \tfrac{i}{4} & 0 \end{bmatrix}.
\]
By rescaling one of these matrices, we were able to see a higher iteration of the lemniscate surface. Specifically we looked along the path $(tA,B,C).$ We show in Figure~\ref{fig:FS5} the Clifford spectrum at some points along this path.
\end{example}
\begin{figure}
\caption{A two-holed torus, and a deformation of that, arising as the Clifford
spectrum of the three matrices in Example~\ref{exa:changing_genus}
\label{fig:two_holes_to_one}
\end{figure}
\begin{example}
\label{exa:torus2sphere}
This example is similar to one in \cite{berenstein2015spinning}, illustrating a transition in the Clifford spectrum between a torus and a sphere. As we want a torus, it is not surprising we start with the clock $V=V_n$ and shift $U=U_n$ unitaries from (\ref{eqn:define_U}) and (\ref{eqn:define_V}). In Section~\ref{sec:Four-Hermitian-matrices} we will consider Clifford spectrum of four Hermitian matrices and see again a torus. Here we want three matrices, so inspired by the usual parameterization of a torus embedded in three-space we define
\begin{eqnarray*}
\begin{aligned}
A &= \tfrac{1}{2} \left(R + \tfrac{r}{2} U^* + \tfrac{r}{2} U \right) V^*
+ \tfrac{1}{2} V \left(R+ \tfrac{r}{2} U^* +\tfrac{r}{2} U \right) \\
B &=\tfrac{i}{2} \left(R + \tfrac{r}{2} U^* + \tfrac{r}{2} U \right) V^* - \tfrac{i}{2} V \left(R+ \tfrac{r}{2} U^* +\tfrac{r}{2} U\right) \\
C &= \tfrac{ri}{2} U^* - \tfrac{ri}{2}U.
\end{aligned}
\end{eqnarray*}
We compute this specifically with $n=5$, outer radius $R=0.9$ and variable inner radius $r$.
For four values of $r$ lead to the Clifford spectrum shown in Figure~\ref{fig:TS5}.
\end{example}
\begin{example}
\label{exa:changing_genus}
Taking a hint from \cite{sykora2016fuzzy} we consider
\begin{eqnarray*}
\begin{aligned}
X&=\begin{bmatrix}
\frac{4}{5} & \frac{1}{2} & \frac{1}{2}\\
\frac{1}{2} & 0 & & \frac{1}{2}\\
\frac{1}{2} & & \frac{8}{5} & \frac{r}{2} & \frac{1}{2}\\
& \frac{1}{2} & \frac{r}{2} & \frac{4}{5} & & \frac{1}{2}\\
& & \frac{1}{2} & & \frac{12}{5} & \frac{1}{2}\\
& & & \frac{1}{2} & \frac{1}{2} & \frac{8}{5}
\end{bmatrix},
\quad
Y=\begin{bmatrix}
0 & -\frac{i}{2} & -\frac{i}{2}\\
\frac{i}{2} & 0 & & -\frac{i}{2}\\
\frac{i}{2} & & 0 & -\frac{ir}{2} & -\frac{i}{2}\\
& \frac{i}{2} & \frac{ir}{2} & 0 & & -\frac{i}{2}\\
& & \frac{i}{2} & & 0 & -\frac{i}{2}\\
& & & \frac{i}{2} & \frac{i}{2} & 0
\end{bmatrix}\\
Z& =\begin{bmatrix}
0\\
& \frac{13}{10}\\
& & \frac{13}{10}\\
& & & \frac{13}{5}\\
& & & & \frac{13}{5}\\
& & & & & \frac{39}{10}
\end{bmatrix}
\end{aligned}
\end{eqnarray*}
which, for $r=1$ is the smallest triples of matrices Sykora found that had
Clifford spectrum a two-holed torus.
We computed numerically that index is for $r=1$ at $(2, 0, 0.25)$ inside the two-holed torus to confirm we actually have a surface and not a cloud of points.
The plots of the Clifford spectrum for several values of $r$ are shown in
Figure~\ref{fig:two_holes_to_one} .
\end{example}
\section{Four Hermitian matrices } \label{sec:Four-Hermitian-matrices}
We need to make a choice of $\gamma_1, \dots, \gamma_4$, and warn the reader that these are related to but not equal to the Dirac matrices. The Dirac matrices square sometimes to $1$ and sometimes to $-1$. Here we need the relations (\ref{eq:gamma_rep}) which
dictate that the matrices are all Hermitian and square to $1$. Moreover, we have no use
for a $\gamma_0$ as we just want a linearly independent set. We use the Pauli spin matrices for convenience, but there is no connection here with the spin of a particle.
Our choice here is as follows.
\begin{equation}
\label{eqn:four_gammas}
\begin{aligned}
\gamma_1 &= \sigma_x \otimes (-\sigma_y)
= \begin{bmatrix}
0 & 0 & 0 & i \\
0 & 0 & i & 0 \\
0 & -i & 0 & 0 \\
-i & 0 & 0 & 0 \\
\end{bmatrix}
, \quad
\gamma_2 = \sigma_y \otimes (-\sigma_y)
= \begin{bmatrix}
0 & 0 & 0 & -1 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0 \\
-1 & 0 & 0 & 0 \\
\end{bmatrix}
\\
\gamma_3 &= \sigma_z \otimes (-\sigma_y)
= \begin{bmatrix}
0 & 0 & i & 0 \\
0 & 0 & 0 & -i \\
-i & 0 & 0 & 0 \\
0 & i & 0 & 0 \\
\end{bmatrix}
, \quad
\gamma_4 = I_2 \otimes (\sigma_x)
= \begin{bmatrix}
0 & 0 & 1 & 0 \\
0 & 0 & 0 & 1 \\
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
\end{bmatrix}
\end{aligned}
\end{equation}
The advantage these have is each $\gamma_j$ is block off-diagonal. We can thus define
the \emph{reduced localizer}
\begin{equation}
\label{eqn:reduced_localizer}
\widetilde{L}_{\boldsymbol{\lambda}}(X_1,X_2,X_3,X_4)
= \sum_{k=1}^3 (X_k - \lambda_k) \otimes \widetilde{\gamma}_k
\end{equation}
in terms of the upper-right blocks of the $\gamma_j$. Thus
\begin{equation*}
\widetilde{\gamma}_1 = i\sigma_x, \
\widetilde{\gamma}_2 = i\sigma_y, \
\widetilde{\gamma}_3 = i\sigma_z, \
\widetilde{\gamma}_4 = I_2.
\end{equation*}
With this notation, the localizer becomes
\begin{equation*}
\widetilde{L}_{\boldsymbol{\lambda}}(X_1,X_2,X_3,X_4)
=
\begin{bmatrix}
0 & \widetilde{L}_{\boldsymbol{\lambda}}(X_1,X_2,X_3,X_4) \\
(\widetilde{L}_{\boldsymbol{\lambda}}(X_1,X_2,X_3,X_4))^* & 0
\end{bmatrix}
\end{equation*}
and the characteristic polynomial can be computed via the formula
\begin{equation*}
\left|\textnormal{char}_{\boldsymbol{\lambda}}(X_1,X_2,X_3,X_4) \right|
= \left| \det\left( \widetilde{L}_{\boldsymbol{\lambda}}(X_1,X_2,X_3,X_4) \right) \right|^2
\end{equation*}
Thus we have what we call the \emph{reduced characteristic polynomial}
\begin{equation*}
\det\left( \widetilde{L}_{\boldsymbol{\lambda}}(X_1,X_2,X_3,X_4) \right)
\end{equation*}
and we can compute the Clifford spectrum by setting that to zero.
In computer calculations, especially, we use $(w,x,y,z)$ in place of $(\lambda_1,\lambda_2,\lambda_3,\lambda_4)$.
We have a three examples, with Clifford spectrum zero-dimensional, two-dimensional, and three-dimensional. The case of two-dimensional Clifford spectrum in four-space is the most difficult, as such a spectrum will not separate a point from infinity. This means there will be no possible $K$-theory argument, and we are stuck with examining a complicated characteristic polynomial. The significance of the reduced characteristic polynomial is that cuts down by half the degree of the polynomial we must study.
To get a torus in four space, we are able to use the Hermitian and anti-Hermitian parts of
the clock and shift unitaries. These are all symmetric matices (equal under the transpose
$(\mbox{--})^\mathrm{T}$) except the imaginary part of the shift, which is anti-symmetric.
The following lemma helps simplify things with that symmetry.
\begin{lem}
\label{lem:poly_symmetry_from_symmetry}
Suppose that $X_1,X_2,X_3,X_4$ are Hermitian matrices, that $X_1$, $X_3$ and $X_4$ are symmetric and $X_2$ is anti-symmetric. Then
\begin{equation*}
\det\left( \widetilde{L}_{(\lambda_1,-\lambda_2,\lambda_3,\lambda_4)}(X_1,X_2,X_3,X_4) \right)
=
\det\left( \widetilde{L}_{(\lambda_1,\lambda_2,\lambda_3,\lambda_4)}(X_1,X_2,X_3,X_4) \right).
\end{equation*}
\end{lem}
\begin{proof}
We observe that
\begin{equation*}
\widetilde{\gamma}_k ^\mathrm{T} =
\begin{cases}
\widetilde{\gamma}_k & \mbox{if } k\neq 2 \\
- \widetilde{\gamma}_k & \mbox{if } k=2
\end{cases}
\end{equation*}
and similarly we have the assumption
\begin{equation*}
X_k ^\mathrm{T} =
\begin{cases}
X_k & \mbox{if } k\neq 2 \\
- X_k & \mbox{if } k=2.
\end{cases}
\end{equation*}
so we get that every term $X_k \otimes \widetilde{\gamma}_k $ is symmetric. On
the other hand, every term $\lambda_k I_n \otimes \widetilde{\gamma}_k$ is
symmetric except for $k=2$, where that term is anti-symmetric.
Let $\epsilon_j = 1$ except for $\epsilon_2 = -1$.
Then we have
\begin{align*}
\left(\sum_{k=1}^3 (X_k - \lambda_k) \otimes \widetilde{\gamma}_k \right)^\mathrm{T}
&=
\left(\sum_{k=1}^3 X_k \otimes \widetilde{\gamma}_k \right)^\mathrm{T}
+
\left(\sum_{k=1}^3 \lambda_k I_k \otimes \widetilde{\gamma}_k \right)^\mathrm{T}\\
&= \sum_{k=1}^3 (X_k - \lambda_k) \otimes \widetilde{\gamma}_k
+ \sum_{k=1}^3 \epsilon_j \lambda_k I_k \otimes \widetilde{\gamma}_k \\
&= \sum_{k=1}^3 (X_k - \epsilon_j \lambda_k) \otimes \widetilde{\gamma}_k
\end{align*}
Since the transpose does not effect the determinant, the result follows.
\end{proof}
\begin{table}
\begin{tabular}{|c||c|}
\hline
$n$ & Imaginary part of reduced characteristic polynomial\tabularnewline
\hline
\hline
$3$ & $\left(w^{2}+x^{2}-y^{2}-z^{2}\right)\left(\frac{3}{2}\sqrt{3}\right)$\tabularnewline
\hline
\hline
$4$ & $\left(w^{2}+x^{2}-y^{2}-z^{2}\right)\left(4w^{2}+4x^{2}+4y^{2}+4z^{2}+8\right)$\tabularnewline
\hline
\hline
$5$ & $\left(w^{2}+x^{2}-y^{2}-z^{2}\right)\left(\frac{5}{2}\sqrt{\frac{1}{2}\left(65+29\sqrt{5}\right)}+[\cdots]+\frac{5}{2}\sqrt{\frac{1}{2}\left(5+\sqrt{5}\right)}z^{4}\right)$\tabularnewline
\hline
\hline
$6$ & $\left(w^{2}+x^{2}-y^{2}-z^{2}\right)\left(\frac{3}{2}\sqrt{3}\left(w^{2}+x^{2}+y^{2}+z^{2}+2\right)\left([\cdots]\right)\right)$\tabularnewline
\hline
\end{tabular}
\caption{The imaginary parts of the reduced characteristic polynomials used
in the proof of Theorem~\ref{thm:torus_in_4_space}. For the full polynomials
and how they are calculated, see the supplementary files \texttt{torus\_4\_n*.*}, in particular the variable \texttt{impoly}.
\label{tab:The-imaginary-parts}}
\end{table}
\begin{table}
\begin{tabular}{|c||c|}
\hline
$n$ & Effective real part of reduced characteristic polynomial\tabularnewline
\hline
\hline
$3$ & $(-2\cos(3\phi)-2\cos(3\theta))r^{3}+8r^{6}+12r^{4}+3r^{2}-1$\tabularnewline
\hline
\hline
$4$ & $r^{4}(-2\cos(4\phi)-2\cos(4\theta)+20)+16r^{8}+32r^{6}-4$\tabularnewline
\hline
\hline
$5$ & $32r^{10}+80r^{8}+\left(65+5\sqrt{5}\right)r^{6}+(-2\cos(5\phi)-2\cos(5\theta))r^{5}+[\cdots]$\tabularnewline
\hline
\hline
$6$ & $64r^{12}+192r^{10}+240r^{8}+(-2\cos(6\phi)-2\cos(6\theta)+148)r^{6}+9r^{4}-54r^{2}-27$\tabularnewline
\hline
\end{tabular}\caption{Real parts of the reduced characteristic polynomials used
in the proof of Theorem~\ref{thm:torus_in_4_space}. See the supplementary files \texttt{torus\_4\_n*.*}, in particular the variable \texttt{altpoly}.
\label{tab:Real-parts}}
\end{table}
\begin{table}
\begin{tabular}{|c||c|}
\hline
$n$ & Derivatives in $r$ of the Effective real parts\tabularnewline
\hline
\hline
$3$ & $(-6\cos(3\phi)-6\cos(3\theta))r^{2}+48r^{5}+48r^{3}+6r$\tabularnewline
\hline
\hline
$4$ & $r^{3}(-8\cos(4\phi)-8\cos(4\theta)+80)+128r^{7}+192r^{5}$\tabularnewline
\hline
\hline
$5$ & $320r^{9}+640r^{7}+\left(390+30\sqrt{5}\right)r^{5}+(-10\cos(5\phi)-10\cos(5\theta))r^{4}+[\cdots]$\tabularnewline
\hline
\hline
$6$ & $768r^{11}+1920r^{9}+1920r^{7}+(-12\cos(6\phi)-12\cos(6\theta)+888)r^{5}+36r^{3}-108r$\tabularnewline
\hline
\end{tabular}\caption{Derivatives in $r$ of the function in Table~\ref{tab:Real-parts}.
\label{tab:Real-parts-Deriv}}
\end{table}
\begin{thm}
\label{thm:torus_in_4_space}
Suppose $n$ equals $3$, $4$, $5$ or $6$, and define
\begin{equation*}
\begin{aligned}
X_1 &= \tfrac{1}{2}U_n^* + \tfrac{1}{2}U_n
, \quad
X_2 &= \tfrac{i}{2}U_n^* - \tfrac{i}{2}U_n\\
X_3 &= \tfrac{1}{2}V_n^* + \tfrac{1}{2}V_n
, \quad
X_4 &=\tfrac{i}{2}V_n^* - \tfrac{i}{2}V_n\\
\end{aligned}
\end{equation*}
where $U_n$ and $V_n$ are the clock and shift unitaries as in {\rm(\ref{eqn:define_U})} and {\rm (\ref{eqn:define_V})}. Then the Clifford spectrum of $(X_1,X_2,X_3,X_4)$ is
homeomorphic to a two-torus.
\end{thm}
\begin{proof}
We would like to solve for where the reduced localizer is zero,
\begin{equation}
\det\left( \widetilde{L}_{\boldsymbol{\lambda}}(X_1,X_2,X_3,X_4) \right) =0.
\end{equation}
We will do that in the following way. First, we will find the condition for the imaginary part of the localizer to be zero. Then, after setting its imaginary part to zero, we will show that the real part has both positive and negative values, which implies that it crosses zero at some point. Therefore, at the latter point both real and imaginary parts are zero, which means the whole thing is zero.
We let used computer algebra to calculate and simplify the reduced characteristic
polynomial, with results as shown in Table~\ref{tab:The-imaginary-parts}.
In all cases, the condition $\Im \det \widetilde{L}_{(w,x,y,z)}= 0$ becomes
\begin{equation}
w^2+x^2=y^2+z^2.
\label{eqn:one_radius}
\end{equation}
We now apply Lemma~\ref{lem:poly_symmetry_from_symmetry} and deduce we have $(w,x,y,z)$ in the Clifford spectrum if, and only if, $(w,-x,y,z)$ is the Clifford spectrum. Thus we are justified in assuming $x \geq 0$. With this assumption, the condition $\Im \det \widetilde{L}_{(w,x,y,z)}= 0$ becomes
\begin{equation*}
x = \sqrt{-w^2 + y^2+z^2}.
\end{equation*}
This means we can eliminate $x$ in the polynomial $\Re \det \widetilde{L}_{(w,x,y,z)}$ via the substitution
\begin{equation*}x \mapsto \sqrt{-w^2 + y^2+z^2} .
\end{equation*}
With this substitution, we get a somewhat more reasonable polynomial. In the
case of $n=3$ it is
\begin{equation*}
\begin{gathered}
-8 w^3+3 z^2 \left(2 w+8 y \left(y^3+y\right)+2 y+1\right) \\
+6 w y^2+8 y^6+12 y^4-2 y^3 \\
+12 \left(2 y^2+1\right) z^4+3 y^2+8z^6-1
\end{gathered}
\end{equation*}
and for $n=4,5,6$ this polynomial has too many terms to easily display. It
but can be seen as \texttt{realpoly} in the supplementary files
\texttt{torus\_4\_n*.*}.
Inspired by (\ref{eqn:one_radius}) we switch to polar coordinates in the first two and also the last two variables, as we know the radius will be the same. That is, we
make the substitution
\begin{equation}
\begin{aligned}
w &= r \cos \theta,
\quad
x &= r \sin \theta \\
y &= r \cos \phi ,
\quad
z &= r \sin \phi \\
\end{aligned}
\label{eqn:polar_coord}
\end{equation}
and find the computer does a much better job simplifying.
The Clifford spectrum will be the zero set of the functions
shown in Table~\ref{tab:Real-parts},
interpreted via (\ref{eqn:polar_coord}).
The function in the $n=5$ case was too long for the table, but can be
seen as \texttt{altpoly} in the supplementary files
\texttt{torus\_4\_n5.*}.
Now we finish the proof for the case $n=4$, which is the easiest case.
Let's denote the relevant function from
Table~\ref{tab:Real-parts} by $f(r, \theta, \phi)$, so
\begin{equation*}
f(r, \theta, \phi) = -4+32r^6+16r^8+(20-2 \cos (4 \phi) - 2 \cos (4 \theta))r^4 \end{equation*}
and its $r$ derivative is
\begin{equation*}
\frac{\partial f}{\partial r}
= 192 r^5 +128 r^7 + (80 - 8 \cos (4 \phi) - 8 \cos (4 \theta)) r^3 .
\end{equation*}
Since sine and cosine are bounded by $\pm 1$ we see that, for any
angles $\phi$ and $\theta$,
$\frac{\partial f}{\partial r} > 0 $
for all $r>0$ and so $f(r, \theta, \phi)$ is increasing for $r\geq 0$.
By observing that
\begin{equation*}
f (\theta, \phi, 0) = -4
\end{equation*}
and
\begin{equation*}
\lim_{r \rightarrow \infty} f (\theta, \phi, r) = \infty
\end{equation*}
we know that, for any fixed $(\theta, \phi)$, there exist at least one value of $r$ for which $f (\theta, \phi,r) =0$, and the fact that $\partial f/ \partial r>0$ implies that this value of $r$ is unique.
Call this value $\rho(\theta, \phi)$, so
\begin{equation*}
f(\theta, \phi, \rho(\theta, \phi)) = 0
\end{equation*}
Thus, the surface we are looking for is precisely the surface $r = \rho(\theta, \phi)$, which is
indeed topologically equivalent to a torus since $\rho(\theta, \phi)$ must
vary continuously in $\theta$ and $\phi$ since the roots of a polynomial
vary continuous with respect to the coefficients \cite{harrisRootAreContinuous}.
The resulting surface in illustrated in Figure~\ref{fig:torus_R4_n3}.
Now we look at the case $n=3$.
The relevant function from
Table~\ref{tab:Real-parts} is
\begin{equation*}
f(r, \theta, \phi) = (-2\cos(3\phi)-2\cos(3\theta))r^{3}+8r^{6}+12r^{4}+3r^{2}-1
\end{equation*}
with derivative in $r$ being
\begin{equation*}
\frac{\partial f}{\partial r} = (-6\cos(3\phi)-6\cos(3\theta))r^{2}+48r^{5}+48r^{3}+6r
\end{equation*}
For $0 < r \leq \tfrac{1}{2} $ we have the estimate
\begin{align*}
\frac{\partial f}{\partial r} & > (-6\cos(3\phi)-6\cos(3\theta))r^{2} + 6r \\
& \geq (-12r + 6)r \geq 0
\end{align*}
and for $\tfrac{1}{2} \leq r \leq 1 $ we have the estimate
\begin{align*}
\frac{\partial f}{\partial r} & > (-6\cos(3\phi)-6\cos(3\theta))r^{2}+48r^{3} \\
& \geq (-12 +48r)r^{2} \geq 0
\end{align*}
so again the derivative is positive except at zero it is zero. The rest of the
proof follows as in the case $n=4$.
The resulting surface in illustrated in Figure~\ref{fig:torus_R4_n4}.
For the case $n=5$ one can prove that for $0\leq r\leq \tfrac{3}{5}$,
\begin{equation*}
f(r, \theta, \phi) \leq -2
\end{equation*}
and, for $\tfrac{3}{5} \leq r \leq 1 $,
\begin{equation*}
\frac{\partial f}{\partial r} \geq 33
\end{equation*}
so again we see that for each pair of angles there is only one radius
to make this function zero.
The work to create these two estimates is shown in the
supplementary files
\texttt{torus\_4\_n5.*}.
For the case $n=6$ one can prove that for $0\leq r\leq \tfrac{3}{5}$,
\begin{equation*}
f(r, \theta, \phi) \leq -20
\end{equation*}
and, for $\tfrac{3}{5} \leq r \leq 1 $,
\begin{equation*}
\frac{\partial f}{\partial r} \geq 42
\end{equation*}
so again we see that for each pair of angles there is only one radius
to make this function zero.
The work to create these two estimates is shown in the
supplementary files
\texttt{torus\_4\_n6.*}.
\end{proof}
\begin{figure}
\caption{The Clifford spectrum as a surface in four space. The top and bottom
represent half the surface, with color indicating the value in the
fourth dimension \textemdash{}
\label{fig:torus_R4_n3}
\end{figure}
\begin{figure}
\caption{The Clifford spectrum as a surface in four space, for the Hermitian matrices extracted from the clock and shift matrices,
with $n=4$.
\label{fig:torus_R4_n4}
\label{fig:torus_R4_n4}
\end{figure}
\begin{example}
In example~\ref{exa:Pauli_sphere} we saw that the Clifford spectrum of the gamma matrices lead to a sphere. Taking the Clifford spectrum of the four gamma matrices (\ref{eqn:four_gammas}) gives a somewhat different answer.
In the supplementary file \texttt{GammaMatrices\_4B.*} is are the symbolic calculations
that for these four matrices the reduced characteristic polynomial is
\begin{equation*}
\left(w^2+x^2+y^2+z^2\right)^3 \left(w^2+x^2+y^2+z^2+8\right)
\end{equation*}
and so the Clifford spectrum is a single point.
\end{example}
\begin{figure}
\caption{The Clifford spectrum in Example~\ref{exa:rescaledGamma}
\label{fig:almostThreeSphere}
\end{figure}
\begin{example}
\label{exa:rescaledGamma}
Now we look at a rescaling of the four gamma matrices (\ref{eqn:four_gammas}),
\begin{equation*}
X_1 = 2 \gamma_1 ,\quad
X_2 = \gamma_2 ,\quad
X_3 = \gamma_3 ,\quad
X_4 = \gamma_4 .
\end{equation*}
and find, in supplementary file \texttt{GammaMatrices\_4A.*}, that the
reduced characteristic polynomial is
\begin{equation*}
(9 + 6 R^2 + R^4 - 6 w^2 + 2 R^2 w^2 + w^4) (-15 + 14 R^2 + R^4 +
2 w^2 + 2 R^2 w^2 + w^4)
\end{equation*}
where $R = \sqrt{x^2 + y^2 + z^2}$. For this example, the Clifford spectrum
is homeomorphic to the three-sphere. See Figure~\ref{exa:rescaledGamma}.
\end{example}
\section{Symmetry classes and $K$-theory charges}
\label{SymmetryClasses}
\subsection{Where the index and plotting fail}
\begin{figure}
\caption{An example where we cannot trust the plot via the characteristic
polynomial. This is using matrices as in Example~\ref{exa:bad_plot_example}
\label{fig:Two_spheres_bad_plotting}
\end{figure}
We have the index to give us critical information about the surfaces
we have plotted. Sometimes the Clifford spectrum is a surface
but the index is zero everywhere it is defined. Moreover, in those situations
the computer plotting can fail.
\begin{example}
\label{exa:bad_plot_example}
The three matrices we consider are as follows:
\begin{equation}
\label{eq:null_plot_example}
X=\begin{bmatrix}
0 & 1 & 0 & 0\\
1 & 0 & 0 & 0\\
0 & 0 & 0 & 1\\
0 & 0 & 1 & 0
\end{bmatrix},\,Y=\begin{bmatrix}
0 & -i & 0 & 0\\
i & 0 & 0 & 0\\
0 & 0 & 0 & i\\
0 & 0 & -i & 0
\end{bmatrix},\,Z=\begin{bmatrix}
1 & 0 & 0 & 0\\
0 & -1 & 0 & 0\\
0 & 0 & 1 & 0\\
0 & 0 & 0 & -1
\end{bmatrix} .
\end{equation}
Since the characteristic polynomial respects direct sums, it is easy to see
from Example \ref{exa:Pauli_sphere} that the characteristic polynomial
is
\begin{equation*}
\text{char} \left(\sigma_x, \sigma_y, \sigma_z \right)
= (x^2 + y^2 + z^2 - 1)^2(x^2 + y^2 + z^2 +3)^2
\end{equation*}
so the Clifford spectrum is the unit sphere. Also, by looking at the
direct sum structure, one can check that the index zero at the origin.
Thus the index is zero everywhere it is defined.
Figure~\ref{fig:Two_spheres_bad_plotting}
looks at the plot Mathematica makes using the characteristic polynomial
for
\begin{equation}
X_{r}=\begin{bmatrix}
0 & 1 & 0 & 0\\
1 & 0 & 0 & 0\\
0 & 0 & r & 1\\
0 & 0 & 1 & r
\end{bmatrix},\,Y_{r}=\begin{bmatrix}
0 & -i & 0 & 0\\
i & 0 & 0 & 0\\
0 & 0 & 0 & i\\
0 & 0 & -i & 0
\end{bmatrix},\,Z_{r}=\begin{bmatrix}
1 & 0 & 0 & 0\\
0 & -1 & 0 & 0\\
0 & 0 & 1 & 0\\
0 & 0 & 0 & -1
\end{bmatrix}\label{eq:bad_plot_matrices}
\end{equation}
for various small values of $r$, and also at zero. At zero the output
is the null plot, which is wrong.
\end{example}
\subsection{A refined index in the case of self-dual symmetry}
\begin{figure}
\caption{The self dual matrices from Example~\ref{exa:path_of_self_dual}
\label{fig:Sphere_to_lemniscate_Pfaffian}
\end{figure}
In the case of the matrices in Equation~\ref{eq:null_plot_example},
the matrices had an extra symmetry that went unused. They are all self-dual, a mathematical
interpretation of having fermionic time reversal symmetry.
Recall that the dual operation is defined as,
\begin{equation*}
X^{\#} = {\begin{bmatrix} A & B \\ C & D \end{bmatrix}}^{\#} = \begin{bmatrix} D^T & -B^T \\ -C^T & A^T \end{bmatrix},
\end{equation*}
where $A, B, C,$ and $D$ are square complex matrices. When a matrix $X$ is self-dual and Hermitian, we have both, $X^{\#}=X$ and $X^*=X.$
If we have three matrices that are Hermitian and self-dual, we find that the
localizer has an extra symmetry. In this case, there is a matrix $Q$ that conjugates the spectral localizer nicely, given by
\begin{equation*}
Q = \begin{bmatrix} I_{2n} & -iZ_{2n} \\ iZ_{2n} & I_{2n} \end{bmatrix}
\end{equation*}
where
\begin{equation*}
Z_{2n} = \begin{bmatrix} 0 & I_{n} \\ -I_{n} & 0 \end{bmatrix}.
\end{equation*}
Conjugating the spectral localizer, by the unitary matrix $\tfrac{1}{\sqrt{2}}Q$ we keep the determinant unchanged. That is,
\begin{equation*}
\Big( \tfrac{1}{\sqrt{2}}Q \Big)^* L_{\boldsymbol{\lambda}} ( A, B, C) \Big( \tfrac{1}{\sqrt{2}}Q \Big) = \tfrac{1}{2}Q^*L_{\boldsymbol{\lambda}} ( A, B, C)Q
\end{equation*}
and
\begin{equation*}
\text{det} \left( \tfrac{1}{2}Q^*L_{\boldsymbol{\lambda}} ( A, B, C)Q \right) = \text{det}( L_{\boldsymbol{\lambda}}(A,B,C))=\text{char}_{\boldsymbol{\lambda}}(A,B,C).
\end{equation*}
Using Lemma $8.1$ of Factorization of Matrices of Quaternions \cite{loring2012factorization} we confirm that the conjugation produces a skew-symmetric representation of the localizer and therefore,
\begin{equation*}
\left( \tfrac{1}{2}Q^*L_{\boldsymbol{\lambda}} ( A, B, C)Q \right)^\mathrm{T} = -\tfrac{1}{2}Q^*L_{\boldsymbol{\lambda}} ( A, B, C)Q
\end{equation*}
We can now use the pfaffian instead of the determinant to
detect where the localizer is singular.
\begin{defn}
The \textit{archetypal polynomial} of a self-dual Hermitian triple $( X, Y, Z)$ is defined as
\begin{equation*}
\textnormal{arch}_{\boldsymbol{\lambda}}(X, Y, Z) = \textnormal{Pf}\left( \tfrac{1}{2}Q^*L_{\boldsymbol{\lambda}} (X, Y, Z)Q \right).
\end{equation*}
\end{defn}
\begin{example}
\label{exa:path_of_self_dual}
We look at a different path that starts with
the troublesome matrices of (\ref{eq:null_plot_example}). For $0\leq s\leq\tfrac{1}{2}$ we define matrices
\begin{equation}
\begin{aligned}
X_{s}= & \begin{bmatrix}
0 & 1-2s & 0 & s\\
1-2s & 0 & -s & 0\\
0 & s & 0 & 1-2s\\
-s & 0 & 1-2s & 0
\end{bmatrix}
,\quad
Y_{s}= \begin{bmatrix}
0 & -i & 0 & 0\\
i & 0 & 0 & 0\\
0 & 0 & 0 & i\\
0 & 0 & -i & 0
\end{bmatrix}, \\
Z_{s}= &\begin{bmatrix}
1-s & 0 & 0 & 0\\
0 & -1+s & 0 & 0\\
0 & 0 & 1-s & 0\\
0 & 0 & 0 & -1+s
\end{bmatrix}
\end{aligned}
\label{eq:SelfDualPath}
\end{equation}
which are self-dual and Hermitian.
Here the plotting looks a lot better, shown in Figure~\ref{fig:Sphere_to_lemniscate_Pfaffian}.
Also, we can calculate a $\mathbb{Z}_{2}$ invariant, the sign of
the archetypal polynomial. Again, this is known to be trivial ($+1$)
far from the origin, and so a value of $-1$ of the invariant disallows
finite cardinality of the Clifford spectrum.
\end{example}
\subsection{An index for even and odd matrices}
Moving up a dimension, consider
\begin{equation}
\label{eqn:even_odd}
\begin{aligned}
X & =\begin{bmatrix}
0 & 2 & 0 & 0\\
2 & 0 & 0 & 0\\
0 & 0 & 0 & -2\\
0 & 0 & -2 & 0
\end{bmatrix}
, \quad
Y =\begin{bmatrix}
0 & -i & 0 & 0\\
i & 0 & 0 & 0\\
0 & 0 & 0 & -i\\
0 & 0 & -i & 0
\end{bmatrix},\\
Z & =\begin{bmatrix}
1 & 0 & 0 & 0\\
0 & -1 & 0 & 0\\
0 & 0 & -1 & 0\\
0 & 0 & 0 & 1
\end{bmatrix}
, \quad
H =\begin{bmatrix}
0 & 0 & 1 & 0\\
0 & 0 & 0 & 1\\
1 & 0 & 0 & 0\\
0 & 1 & 0 & 0
\end{bmatrix}.
\end{aligned}
\end{equation}
The characteristic polynomial of these four matrices, computed by
the code in the supplementary file \texttt{Even\_odd\_4CMathematica.nb},
is
\[
\left(R^{4}+2R^{2}w^{2}+6R^{2}+w^{4}-6w^{2}+9\right)\left(R^{4}+2R^{2}w^{2}+14R^{2}+w^{4}+2w^{2}-15\right)
\]
where
$ R^{2}=x^{2}+y^{2}+z^{2}$.
Again we have a surface homeomorphic to a three-sphere.
We introduce a grading via the matrix
\begin{equation*}
\Gamma=\begin{bmatrix}
1 & 0 & 0 & 0\\
0 & 1 & 0 & 0\\
0 & 0 & -1 & 0\\
0 & 0 & 0 & -1
\end{bmatrix}
\end{equation*}
so we consider a matrix $M$ \emph{even} if $M\Gamma=\Gamma M$ and
\emph{odd} if $M\Gamma=-\Gamma M$.
In the example under discussion, the first three matrices are
even and the last is odd.
With these symmetries, we get an index for points
$(w,x,y,z)$ not in the Clifford spectrum \emph{and with the restriction
that $z=0$}. This restriction is needed as translating $H$ will
ruin the symmetry $H\Gamma=\Gamma H$. The index is based on the fact
that
\begin{equation*}
i\widetilde{L}_{\boldsymbol{\lambda}}(X,Y,Z,Y)\left(\Gamma\otimes I_{2}\right)
\end{equation*}
is Hermitian, and the index is
\begin{equation*}
\frac{1}{2}\mathrm{Sig}\left(i\widetilde{L}_{\boldsymbol{\lambda}}(X,Y,Z,Y)\left(\Gamma\otimes I_{2}\right)\right).
\end{equation*}
Here we are referring the the reduced localizer of (\ref{eqn:reduced_localizer}).
This is explained in \cite{LoringSB_odd_dim}.
\begin{figure}
\caption{This figure shows parts of the Clifford spectrum for
the four matrices in (\ref{eq:deformed_3_sphere_matrices}
\label{fig:Holes_in_hypersurface}
\end{figure}
For the matrices in (\ref{eqn:even_odd}),
the index at the origin is $-1$. As always for lambda large compared to the
norm of the matrices the index is $0$. Thus the part of the Clifford spectrum that intersects the hyperplane
$z=0$ is protected. Small perturbations of the matrices will not
change by much the part of the Clifford spectrum intersected with $z=0$.
A little exploration of matrices near these lead
to the following. Consider the four matrices
\begin{equation}
\label{eq:deformed_3_sphere_matrices}
\begin{aligned}
X & =\begin{bmatrix}
\tfrac{3}{2} & 2 & 0 & 0\\
2 & 0 & 0 & 0\\
0 & 0 & 0 & -2\\
0 & 0 & -2 & \tfrac{3}{2}
\end{bmatrix}
,\quad
Y =\begin{bmatrix}
0 & -i & 0 & 0\\
i & 0 & 0 & 0\\
0 & 0 & 0 & -i\\
0 & 0 & -i & 0
\end{bmatrix}, \\
Z & =\begin{bmatrix}
1 & 0 & 0 & 0\\
0 & -1 & 0 & 0\\
0 & 0 & -1 & 0\\
0 & 0 & 0 & 1
\end{bmatrix}
, \quad
H =\begin{bmatrix}
0 & 0 & 1 & 0\\
0 & 0 & 0 & 1\\
1 & 0 & 0 & 0\\
0 & 1 & 0 & 0
\end{bmatrix},
\end{aligned}
\end{equation}
so $r=0$ recreates the previous example. Figure~\ref{fig:Holes_in_hypersurface}
looks at slices of the Clifford spectrum for this example.
\section*{Supplemetary files}
The supplementary files are available for download from
\begin{quote}
\url{math.unm.edu/~loring/CliffordExperiments/}
\end{quote}
and are all Mathematica files, videos created Mathematica files or
a PDF copy of a Mathematica file.
\section*{Acknowledgments}
The research of all authors for this project was supported in part by the National Science Foundation (DMS \#1700102).
\end{document}
|
\begin{document}
\title{
\TitleFont Assessing the Economics of Customer-Sited Multi-Use Energy Storage
}
\author{Wuhua Hu, Ping Wang, and Hoay Beng Gooi
\thanks{This work was supported in part by the Energy Innovation Programme Office (EIPO) through the National Research Foundation and Singapore Economic Development Board. W. Hu is with the Signal Processing Department, Institute for Infocomm Research, A*STAR, Singapore. P. Wang is with the School of Computer Engineering and H. B. Gooi is with the School of Electrical and Electronic Engineering, Nanyang Technological University, Singapore. E-mails: {\tt\small [email protected], \{wangping, ehbgooi\}@ntu.edu.sg}.}
}
\maketitle
\thispagestyle{empty}
\pagestyle{plain}
\begin{abstract}
This paper presents an approach to assess the economics of
customer-sited energy storage systems (ESSs) which are owned and operated by a customer.
The ESSs can participate in frequency regulation and spinning reserve
markets, and are used to help the customer consume available renewable energy and reduce electricity bill. A rolling-horizon approach is developed to optimize the service schedule, and the resulting costs and revenues are used to assess economics of the ESSs. The economic assessment approach is illustrated with case studies,
from which we obtain some new observations on profitability of
the customer-sited multi-use ESSs.
\end{abstract}
\section{Introduction}
Energy storage systems (ESSs) are a promising ingredient
for reliable integration of renewable energies into future
power grids. ESSs are however costly at the present stage, and recent studies
showed that they are unlikely to generate a net profit if ESSs are
used to provide a single service. This motivates the use of ESSs for
multiple service provision \cite{Fitzgerald2015}.
When ESSs are scheduled for multiple services concurrently,
potential conflicts occur due to the limited power and energy capacities
available. An ideal scheduling approach needs to address the conflicts
in an optimal way, such that the net profit is maximized subject to operational and service constraints. So far, only a few studies have
been conducted, partially addressing the encountered challenges.
Among them, \cite{you2010economic} presents a coarse framework to
investigate the net profit, but leaves the nontrivial modeling of the optimization objective and
constraints to readers for specific applications. Reference \cite{moreno2015milp}
develops a mixed-integer linear programming model focusing
on maximizing the revenue without
considering the costs of ESSs. More recently, \cite{wu2015energy}
presents a concrete optimization formulation in a rolling-horizon framework. However, the formulation does not appropriately capture the operating costs of ESSs which are dependent on
their varying charge and discharge rates \cite{trippe2014charging}.
This work considers customer-sited ESSs which provide multiple services.
The ESSs are used to participate in
regulation and spinning reserve markets, and help the customer consume available renewable energy and reduce time-of-use (TOU) electricity bill. We develop a comprehensive
scheduling model which captures the dynamics of ESSs and associated aging costs, the supported services and associated revenues, and all major service and operational constraints. By optimizing the schedule using a rolling-horizon approach, we are able to assess profitability of the ESSs. Different from aforementioned literature \cite{you2010economic,moreno2015milp,wu2015energy},
we include the support of self-consumption of renewable
energy and embed a more realistic aging model for the ESSs. When the storage is made of Li-ion batteries, the aging
model characterizes the battery aging cost in terms of its instant
charge/discharge rate and the duration, which was experimentally established
in \cite{trippe2014charging}.
\section{Modeling the ESSs and Their Services} \label{sec: Modeling-the-ESSs}
We use $\mathcal{N}$ to denote the set of ESSs.
The time is discretized into slots, each with a duration of $T_{{\rm s}}$. The charge and discharge of ESSs are scheduled periodically to support self-consumption of renewable energy, frequency regulation, spinning reserve, and TOU electric bill reduction. The mathematical
models of the ESSs and the four supported services are developed in
this section.
\subsection{Modeling the ESSs}
We assume that a customer owes and operates multiple ESSs, each of which
follows a generic model used in \cite{hu2016towards}. Let the charge
and discharge rates of ESS $i$ be scheduled as $p_{i,t}^{{\rm c}}$
and $p_{i,t}^{{\rm d}}$ for time slot $t$, respectively. And let
$v_{i,t}^{{\rm c}}$ indicate the working mode of the ESS $i$, which
is 1 (or 0) if it is \textit{not} discharged (or \textit{not} charged).
These variables satisfy
\begin{equation}
0\le p_{i,t}^{{\rm c}}\le v_{i,t}^{{\rm c}}p_{i,\max}^{{\rm c}},\thinspace\thinspace0\le p_{i,t}^{{\rm d}}\le(1-v_{i,t}^{{\rm c}})p_{i,\max}^{{\rm d}},\label{cons: ES charge-discharge limits}
\end{equation}
where $p_{i,\max}^{{\rm c}}$ and $p_{i,\max}^{{\rm d}}$ are the
corresponding upper bounds. The two constraints ensure that charge
and discharge comply with the rate limits and do not happen in the same time slot.
After charge/discharge, the state of charge (SOC) of the ESS $i$,
denoted by $s_{i,t}$, renews into
\begin{equation}
s_{i,t}=s_{i,t-1}+T_{s}(\eta_{i}^{{\rm c}}p_{i,t}^{{\rm c}}-p_{i,t}^{{\rm d}}/\eta_{i}^{{\rm d}})/E_{i}^{\text{cap}},\label{cons: SOC update}
\end{equation}
where $\eta_{i}^{{\rm c}},\eta_{i}^{{\rm d}}\in(0,1)$ are the energy
conversion coefficients, and $E_{i}^{{\rm cap}}$ is the energy capacity
of ESS $i$. The SOC must be maintained within certain limits in order to
protect the ESSs, and this will be discussed later in Section \ref{sub: Service-support-constraint}.
Both charge and discharge incur an aging cost, which is the money loss
of the initial investment. Let the cost be estimated as $C_{i}^{{\rm c}}(p_{i,t}^{{\rm c}})$
and $C_{i}^{{\rm d}}(p_{i,t}^{{\rm d}})$ for charging and discharging ESS $i$ at the rates of $p_{i,t}^{{\rm c}}$ and $p_{i,t}^{{\rm d}}$
for one hour, respectively. The cost of operating ESS $i$ in
time slot $t$ is then given by
\[
C_{i}(p_{i,t}^{{\rm c}},p_{i,t}^{{\rm d}})=T_{{\rm s}}C_{i}^{{\rm c}}(p_{i,t}^{{\rm c}})+T_{{\rm s}}C_{i}^{{\rm d}}(p_{i,t}^{{\rm d}}).
\]
If the ESSs use Li-ion batteries, the cost can be approximated by
a piece-wise linear function which is further obtained by solving
the following linear program \cite{trippe2014charging,hu2016towards}:{
\par
{\small{}
\begin{equation}
\begin{aligned} & C_{i}(p_{i,t}^{{\rm c}},p_{i,t}^{{\rm d}})\approx\frac{\alpha_{i}T_{{\rm s}}}{0.8E_{i}^{\text{cap}}}\min_{\zeta_{i,t}^{{\rm ESS}}}\zeta_{i,t}^{{\rm ESS}},\\
&\text{s.t., }\gamma_{i}\eta_{i}^{{\rm c}}[1000\times a_{k}^{{\rm ESS}}(p_{i,t}^{{\rm c}})^{2}+n_{i}b_{k}^{{\rm ESS}}p_{i,t}^{{\rm c}}]+\frac{1-\gamma_{i}}{\eta_{i}^{{\rm d}}}\\
& \times [1000\times a_{k}^{{\rm ESS}}(p_{i,t}^{{\rm d}})^{2}+n_{i}b_{k}^{{\rm ESS}}p_{i,t}^{{\rm d}}] \le\zeta_{i,t}^{{\rm ESS}},\thinspace\forall\, k\in\mathcal{K}_{{\rm ESS}},
\end{aligned}
\label{cons: C_ESS}
\end{equation}
}}where $\alpha_{i}$ is the unit capital cost (\$/Wh) to purchase ESS
$i$; $\zeta_{i,t}^{\rm ESS}$ is an auxiliary variable; $\gamma_{i}$
is the fraction of a single cyclic aging cost incurred by fully charging
the battery from empty; $n_{i}\triangleq E_{i}^{\text{cap}}/0.0081$,
which is the number of battery modules that form the ESS $i$, each
with a capacity of 0.0081 kW; and $\{a_{k}^{{\rm ESS}},b_{k}^{{\rm ESS}}\}_{k\in\mathcal{K}_{{\rm ESS}}}$
are the coefficients associated with the linear segments as indicated
by a certain set $\mathcal{K}_{{\rm ESS}}$.
\subsection{Service for self-consumption of renewable energy}
The customer has installed renewable energy generators. The aggregate generation power for time slot $t$ is denoted as $p_{t}^{{\rm re}}$. For time slot $t$, let the customer be scheduled to consume the renewable energy at a
rate of $p_{t}^{{\rm re,sc}}$, and the surplus
renewable energy be charged to ESS $i$ at a rate of $p_{i,t}^{{\rm re,c}}$,
and the remaining renewable energy be exported to the market at a
rate of $p_{t}^{{\rm re,s}}$. These power variables satisfy
{\par
\begin{equation} {\small
\begin{gathered}0\le p_{t}^{{\rm re,sc}}\le d_{t},\,\,0\le p_{i,t}^{{\rm re,c}}\le p_{i,\max}^{{\rm c}},\,\,0\le p_{t}^{{\rm re,s}}\le p_{\max}^{{\rm s}},\\
p_{t}^{{\rm re,sc}}+p_{t}^{{\rm re,s}}+\sum_{i\in\mathcal{N}}p_{i,t}^{{\rm re,c}}\le p_{t}^{{\rm re}},
\end{gathered} }
\label{cons: sc of renewable energy}
\end{equation}
}
where $d_{t}$ is the load demand of the customer, and
$p_{\max}^{{\rm s}}$ is the maximum power that can be injected to the grid.
The last inequality admits curtailment of surplus renewable generation,
if any.
Given the electricity purchase price $c_{t}^{{\rm p}}$ and sale price $c_{t}^{{\rm s}}$, we can compute
the revenue of consuming renewable energy with the
help of ESSs as{
\par
{\footnotesize{}
\begin{align*}
R_{{\rm sc}}(p_{t}^{{\rm re,sc}},p_{t}^{{\rm re,s}},\{p_{i,t}^{{\rm re,c}}\}_{i\in\mathcal{N}}) & =T_{{\rm s}}c_{t}^{{\rm p}}(p_{t}^{{\rm re,sc}}+\sum_{i\in\mathcal{N}}p_{i,t}^{{\rm re,c}})+T_{{\rm s}}c_{t}^{{\rm s}}p_{t}^{{\rm re,s}},
\end{align*}
}}
of which the first part owes to the avoided purchase of energy
from the market, and the second part owes to the surplus renewable
energy exported to the market.
\subsection{Service for frequency regulation}
Frequency regulation aims at stabilizing the grid frequency at a desired
value. Let $u_{t}^{{\rm fr},{\rm up}}$ be an indicator
which is 1 for ramp up regulation and 0 otherwise. The minimum power
to participate in the regulation market is required to be $p_{\min}^{{\rm fr}}$.
The ESSs may participate in the market or
not, as indicated by $v_{t}^{{\rm fr}}$ equal to 1 and 0, respectively.
Let ESS $i$ charge at a rate of $p_{i,t}^{{\rm fr,c}}$
if $u_{t}^{{\rm fr},{\rm up}}=0$ and discharge at a rate of $p_{i,t}^{{\rm fr,d}}$
if $u_{t}^{{\rm fr},{\rm up}}=1$. The power variables satisfy
\begin{equation}
\begin{gathered}0\le p_{i,t}^{{\rm fr,d}}\le v_{t}^{{\rm fr}} u_{t}^{{\rm fr},{\rm up}}p_{i,\max}^{{\rm d}},\thinspace\thinspace\forall\thinspace i\in\mathcal{N},\\
0\le p_{i,t}^{{\rm fr,c}}\le v_{t}^{{\rm fr}} (1-u_{t}^{{\rm fr},{\rm up}})p_{i,\max}^{{\rm c}}\thinspace\thinspace\forall\thinspace i\in\mathcal{N},\\
p_{t}^{{\rm fr}}\triangleq\sum_{i\in\mathcal{N}}(1-u_{t}^{{\rm fr},{\rm up}})p_{i,t}^{{\rm fr,c}}+u_{t}^{{\rm fr},{\rm up}}p_{i,t}^{{\rm fr,d}}\ge v_{t}^{{\rm fr}}p_{\min}^{{\rm fr}},
\end{gathered}
\label{cons: frequency regulation}
\end{equation}
where the first two inequalities ensure that charge and discharge for the regulation service do not happen concurrently.
Consider the payment scheme implemented by PJM in
USA \cite{avendano2015financial,manual2015energy}. The regulation service is paid by the committed
power capacity (denoted by $p_{t}^{{\rm fr}}$) and the regulation
performance (dictated by the performance score $\rho_{t}^{{\rm fr}}$
and the regulation mileage ratio $\mu_{t}^{{\rm fr}}$). The performance
score ($\rho_{t}^{{\rm fr}}$) is computed based on the regulation performance in the past period; and the
mileage ratio ($\mu_{t}^{{\rm fr}}$) is the mileage of the fast regulation
signal divided by the mileage of the slow (or conventional) regulation
signal, both in the past service period. The Regulation Market Capacity Clearing
Price (RMCCP) is denoted by $c_{t}^{{\rm RMCCP}}$, and the Regulation
Market Performance Clearing Price (RMPCP) is denoted by $c_{t}^{{\rm RMPCP}}$. Both prices
are updated at a period of $T_{{\rm s}}$.
The revenue of the regulation service is then computed
as{
\par
{\footnotesize{}
\begin{align*}
R_{{\rm fr}}(p_{t}^{{\rm fr}}) & =T_{{\rm s}}\rho_{t}^{{\rm fr}}p_{t}^{{\rm fr}}(c_{t}^{{\rm RMCCP}}+c_{t}^{{\rm RMPCP}}\mu_{t}^{{\rm fr}})+T_{{\rm s}}c_{t}^{{\rm p}}\sum_{i\in\mathcal{N}}(p_{i,t}^{{\rm fr,d}}-p_{i,t}^{{\rm fr,c}}),
\end{align*}
}}
where the first term owes to the service provided, and the second
term accounts for the revenue obtained from the energy charged/discharged to/from
the ESSs.
\subsection{Service as spinning reserve}
Consider a spinning reserve market which periodically publishes a
reserve availability price, denoted by $c_{t}^{{\rm sr}}$. The minimum participation power is required to be $p_{\min}^{{\rm sr}}$,
and the minimum commission time is $T_{\min}^{{\rm sr}}$. The ESSs may be scheduled to support this service, which
is dictated by a binary variable $v_{t}^{{\rm sr}}$, 1 for participation
and 0 otherwise. Let $p_{i,t}^{{\rm sr},{\rm d}}$ be the power reserved by ESS $i$, which
is the commissioned maximum discharge rate under contingencies. The reserved
power satisfies
\begin{equation}
0\le p_{i,t}^{{\rm sr,d}}\le v_{t}^{{\rm sr}}p_{i,\max}^{{\rm d}},\quad\sum_{i\in\mathcal{N}}p_{i,t}^{{\rm sr,d}}\ge v_{t}^{{\rm sr}}p_{\min}^{{\rm sr}}.\label{cons: operating reserve}
\end{equation}
The minimum support time will be enforced via
constraint (\ref{cons: service linkage c}) ahead. The revenue
for this service is calculated by
\[
R_{{\rm sr}}(\{p_{i,t}^{{\rm sr},{\rm d}}\}_{i\in\mathcal{N}})=c_{t}^{{\rm sr}}T_{{\rm s}}\sum_{i\in\mathcal{N}}p_{i,t}^{{\rm sr},{\rm d}}.
\]
\subsection{Service for TOU electricity bill reduction and preparation for future services}
With TOU electricity pricing information, the ESSs
may be used to reduce the electricity bill by charging and discharging the storage appropriately. At the meantime, the ESSs may be charged/discharged
to prepare for future services. Let
the aggregate charge and discharge rates of ESS $i$ for such purposes be scheduled
as $p_{i,t}^{{\rm fs,c}}$ and $p_{i,t}^{{\rm br+fs,d}}$ for time slot $t$, respectively. The revenue obtained from
the charged and discharged energy is computed as{
\par
{\small{}
\[
R_{{\rm br}}(\{p_{i,t}^{{\rm br+fs,d}},p_{i,t}^{{\rm fs,c}}\}_{i\in\mathcal{N}})=T_{{\rm s}}c_{t}^{{\rm p}}\sum_{i\in\mathcal{N}}(p_{i,t}^{{\rm br+fs,d}}-p_{i,t}^{{\rm fs,c}}),
\]
}}
which owes to the avoided or desired purchase of energy from the
market. The charge and discharge rates satisfy
\begin{equation}
0\le p_{i,t}^{{\rm br+fs,d}}\le p_{i,\max}^{{\rm d}},\quad0\le p_{i,t}^{{\rm fs,c}}\le p_{i,\max}^{{\rm c}}.\label{cons: bill reduction}
\end{equation}
\subsection{Feasibility constraints to support the multiple services \label{sub: Service-support-constraint}}
To support the four services above, we must have
\begin{gather}
p_{i,t}^{{\rm re,c}}+p_{i,t}^{{\rm fr,c}}+p_{i,t}^{{\rm fs,c}}=p_{i,t}^{{\rm c}}\le v_{i,t}^{{\rm c}}p_{i,\max}^{{\rm c}},\label{cons: service linkage a}\\
p_{i,t}^{{\rm fr,d}}+p_{i,t}^{{\rm br+fs,d}}=p_{i,t}^{{\rm d}}\le(1-v_{i,t}^{{\rm c}})(p_{i,\max}^{{\rm d}}-p_{i,t}^{{\rm sr,d}}),\label{cons: service linkage b}\\
s_{i,\min}+p_{i,t}^{{\rm sr,d}}T_{\min}^{{\rm sr}}/E_{i}^{{\rm cap}}\le s_{i,t}\le s_{i,\max},\label{cons: service linkage c}
\end{gather}
for each $i\in\mathcal{N}$. Constraints (\ref{cons: service linkage a}) and (\ref{cons: service linkage b}) are related to the aggregate charge rate and discharge rate for multiple services, respectively. Constraint (\ref{cons: service linkage c}) imposes SOC limits to protect the ESSs from being over charged
or discharged, in which $s_{i,\min},s_{i,\max}\in(0,1)$ are the required
limits and $p_{i,t}^{{\rm sr,d}}T_{\min}^{{\rm sr}}$ is the energy committed as spinning reserve. The three constraints link
up the four services provided by the ESSs, through which
the conflicts in between will be resolved via optimization.
The right hand side of the inequality in (\ref{cons: service linkage b})
contains a term $v_{i,t}^{{\rm c}}p_{i,t}^{{\rm sr,d}}$, which is
bilinear in the two decision variables. It is desirable to reformulate
this term into a linear equivalent form. Introduce an auxiliary
variable $z_{i,t}$. Then, $z_{i,t}$ is equal to $v_{i,t}^{{\rm c}}p_{i,t}^{{\rm sr,d}}$
if it satisfies the following constraints:
\begin{gather}
\begin{gathered}0\le z_{i,t}\le p_{i,\max}^{{\rm d}}v_{i,t}^{{\rm c}},\\
p_{i,t}^{{\rm sr,d}}+p_{i,\max}^{{\rm d}}(v_{i,t}^{{\rm c}}-1)\le z_{i,t}\le p_{i,t}^{{\rm sr,d}},
\end{gathered}
\label{cons: auxiliary z}
\end{gather}
The equivalence is easy to verify with the McCormick linearization
method \cite{mccormick1976computability}. Therefore, we can replace $v_{i,t}^{{\rm c}}p_{i,t}^{{\rm sr,d}}$
with $z_{i,t}$ subjected to the above constraints, which
makes constraint (\ref{cons: service linkage b}) completely linear
in the variables.
\section{The Storage Management Optimization Problem and Its Solution} \label{sec: The-Storage-Management-problem}
The storage management optimization problem is defined to maximize
the total net profit (namely, minimize the total net loss) over a rolling
horizon subject to service requirements and operational constraints. Given a decision time point $t$, the time slots
within a look-ahead horizon for a size of $H$ are denoted by the set $\mathcal{H}_{t}$. The optimization will be performed using the forecast data over the horizon.
The total net profit (TNP) over the horizon sums up the revenues earned
from the four services minus the operating cost incurred
to the ESSs. With the storage aging cost $C_{i}(p_{i,t}^{{\rm c}},p_{i,t}^{{\rm d}})$
computed from the linear program (\ref{cons: C_ESS}), the TNP can be shown to have
the following specific form:{
\par
{\scriptsize{}
\begin{align*}
& \text{TNP}(t)=\sum_{\tau\in\mathcal{H}_{t}}\left(\begin{array}{c}
R_{{\rm sc}}(p_{\tau}^{{\rm re,sc}},p_{t}^{{\rm re,s}},\{p_{i,\tau}^{{\rm re,c}}\}_{i\in\mathcal{N}})\\
+R_{{\rm fr}}(\{p_{i,\tau}^{{\rm fr,c}},p_{i,\tau}^{{\rm fr,d}}\}_{i\in\mathcal{N}})+R_{{\rm sr}}(\{p_{i,\tau}^{{\rm sr},{\rm d}}\}_{i\in\mathcal{N}})\\
+R_{{\rm br}}(\{p_{i,\tau}^{{\rm br+fs,d}},p_{i,\tau}^{{\rm fs,c}}\}_{i\in\mathcal{N}})\\
-\sum_{i\in\mathcal{N}}C_{i}(p_{i,\tau}^{{\rm c}},p_{i,\tau}^{{\rm d}})
\end{array}\right)\\
& =T_{{\rm s}}\sum_{\tau\in\mathcal{H}_{t}}\left(\begin{array}{c}
c_{\tau}^{{\rm p}}p_{\tau}^{{\rm re,sc}}+c_{\tau}^{{\rm s}}p_{\tau}^{{\rm re,s}}+\rho_{\tau}^{{\rm fr}}p_{\tau}^{{\rm fr}}(c_{\tau}^{{\rm RMCCP}}+c_{\tau}^{{\rm RMPCP}}\mu_{\tau}^{{\rm fr}})\\
+\sum_{i\in\mathcal{N}}\left(\begin{array}{c}
c_{\tau}^{{\rm p}}(p_{i,\tau}^{{\rm d}}-p_{i,\tau}^{{\rm c}}+2p_{i,\tau}^{{\rm re,c}})\\
+c_{\tau}^{{\rm sr}}p_{i,\tau}^{{\rm sr},{\rm d}}-\min_{\zeta_{i,\tau}^{{\rm ESS}}}\frac{\alpha_{i}\zeta_{i,\tau}^{{\rm ESS}}}{0.8E_{i}^{\text{cap}}}
\end{array}\right)
\end{array}\right)
\end{align*}
}}
which is subject to the constraints in (\ref{cons: C_ESS}). We have used
the revenue and cost expressions introduced in the
previous subsections and also the equalities in constraints (\ref{cons: service linkage a})
and (\ref{cons: service linkage b}) to deduce the TPN.
Since the decision variables $\{p_{i,\tau}^{{\rm br+fs,d}},p_{i,\tau}^{{\rm fs,c}}\}_{i\in\mathcal{N},\tau\in\mathcal{H}_{t}}$
do not appear in ${\rm TNP}(t)$, we can eliminate these redundant variables and simplify constraints (\ref{cons: service linkage a}) and
(\ref{cons: service linkage b}) into the following:
\begin{gather}
p_{i,t}^{{\rm re,c}}+p_{i,t}^{{\rm fr,c}}\le p_{i,t}^{{\rm c}}\le v_{i,t}^{{\rm c}}p_{i,\max}^{{\rm c}},\label{cons: service linkage a-1}\\
p_{i,t}^{{\rm fr,d}}\le p_{i,t}^{{\rm d}}\le(1-v_{i,t}^{{\rm c}})p_{i,\max}^{{\rm d}}-p_{i,t}^{{\rm sr,d}}+z_{i,t},\label{cons: service linkage b-1}
\end{gather}
for each $i\in\mathcal{N}$. Here the variable $z_{i,t}$ is an equivalent of $v_{i,t}^{{\rm c}}p_{i,t}^{{\rm sr,d}}$, satisfying constraint (\ref{cons: auxiliary z}). By minimizing
${\rm TNP}(t)$ with the new constraints, the solution of $(p_{i,\tau}^{{\rm br+fs,d}},p_{i,\tau}^{{\rm fs,c}})$
can then be recovered from (\ref{cons: service linkage a}) and (\ref{cons: service linkage b}).
Consequently, the ESS management
problem is defined as
\begin{align*}
\textbf{P0: } & \min-\text{TNP}(t)\\
\text{subject to, } & \eqref{cons: ES charge-discharge limits}-\eqref{cons: bill reduction},\thinspace\thinspace\eqref{cons: service linkage c}-\eqref{cons: service linkage b-1},\thinspace\thinspace\forall\thinspace i\in\mathcal{N},\tau\in\mathcal{H}_{t}
\end{align*}
where the subscript $\tau$ instead of $t$ is used in all
constraints, and constraint (\ref{cons: C_ESS}) refers only to the
inequalities. In $\textbf{P0}$, the power variables are
$p_{i,\tau}^{{\rm c}}$, $p_{i,\tau}^{{\rm re,c}}$ and $p_{i,\tau}^{{\rm fr,c}}$
for charge and $p_{i,\tau}^{{\rm d}}$, $p_{i,\tau}^{{\rm fr,d}}$
and $p_{i,\tau}^{{\rm sr,d}}$ for discharge of each ESS $i\in\mathcal{N}$
in each time slot $\tau\in\mathcal{H}_{t}$, and $p_{\tau}^{{\rm re,sc}}$
and $p_{\tau}^{{\rm re,s}}$ for the customer to self-consume and
sell available renewable energy in each time slot $\tau\in\mathcal{H}_{t}$.
The auxiliary variables are binary variables $\{v_{i,\tau}^{{\rm c}}\}_{i\in\mathcal{N}}$,
$v_{\tau}^{{\rm fr}}$ and $v_{\tau}^{{\rm sr}}$
for all $\tau\in\mathcal{H}_{t}$, and real variables $\{\zeta_{i,\tau}^{{\rm ESS}}\}_{i\in\mathcal{N}}$
and $z_{i,\tau}$ for all $\tau\in\mathcal{H}_{t}$ .
The objective and constraints of $\textbf{P0}$ are linear in the variables, except constraint $\eqref{cons: C_ESS}$
which is quadratic in the decision variables $\{p_{i,\tau}^{{\rm c}},p_{i,\tau}^{{\rm d}}\}$ for each $i\in\mathcal{N}$ and $\tau\in\mathcal{H}_{t}$.
Therefore, the problem is a mixed-integer quadratic program
(MIQP) and can be solved by standard MIQP solvers. Once an optimal solution
is obtained, only the part for the first time slot $t$ will be
implemented to dispatch the ESSs. The schedule for the future
time slot $(t+1)$ will be determined in a similar way by shifting the
time horizon forward by one slot and then solving the new optimization
problem.
\section{Case Study \label{sec: Case-studies}}
This section assesses the economics of multi-use ESSs based on the scheduling approach developed above.
\subsection{Simulation setup and input data}
The demand is scaled historical
hourly electricity demand of a college in California for a summer
week \cite{consumptiontrace2002}. The solar-PV generation, which has a peak equal to 60\% of the peak demand, is
scaled historical hourly generation power for a summer week in Brussels \cite{Solar-PV-ELIA2013}. The customer deploys two ESSs with specifications given in Table \ref{tb: ESS parameters}.
Their charge and discharge aging costs are estimated
by (\ref{cons: C_ESS}), in which the model parameters are set the same as those in \cite{hu2016towards}.
\begin{table}
\caption{\textsc{Parameters of the ESSs. The Power is in Unit of kW and the Energy is in Unit of kWh.}}
\label{tb: ESS parameters}
\centering{}
\begin{tabular}{cccccccc}
\hline
Type & $E_{i}^{\text{cap}}$ & $s_{i,\min}$ & $s_{i,\max}$ & $p_{i,\max}^{{\rm c}}$ & $p_{i,\max}^{{\rm d}}$ & $\eta_{i}^{{\rm c}}$ & $\eta_{i}^{{\rm d}}$\tabularnewline
\hline
1 & 480 & 0.2 & 0.9 & 102 & 74 & 0.82 & 0.88\tabularnewline
2 & 720 & 0.2 & 0.9 & 148 & 113 & 0.85 & 0.90\tabularnewline
\hline
\end{tabular}
\end{table}
The hourly regulation signal and associated market
clearing price are from the real operational records of PJM \cite{ancillary_src_data2015}. So are the hourly spinning
reserve prices. The price of purchasing electricity from the market is obtained
from PG \& E \cite{hu2016towards}, which consists of peak, mid-peak and off-peak prices for different periods of a day. The price
of selling electricity to the market is 60\%
of the purchase price. The purchase and sale powers are unrestricted in our study.
\subsection{Profitability of the multi-use ESSs}
When the input data within the horizon is perfectly known, the total net profit obtained is shown
in Fig. \ref{fig: profit-vs-horizon-size}(a). The profit decreases
with the storage purchase price, and vanishes once the purchase price
is higher than 300 \$/kWh. On the other
hand, the profit increases with the horizon size $H$, but the marginal benefit decreases and becomes small once the horizon
size is larger than 4.
\begin{figure}
\caption{{\small{}
\label{fig: profit-vs-horizon-size}
\end{figure}
The above profit contains the contribution of solar energy which is
assumed free here. To get rid of this contribution and obtain the value solely for the use of ESSs, we subtract the above profit
with the one obtained without having any ESSs. This yields the reduced profits
shown in Fig. \ref{fig: profit-vs-horizon-size}(b).
When the storage purchase price is fixed to 100 \$/kWh, the charge/discharge schedules for $H$ equal to 2 and 4 are shown in Fig. \ref{fig: detailed-schedules}. The associated revenues and storage operating costs are given in Table \ref{tb: revenues-vs-costs}. As can be seen, the higher profit for the case with $H=4$ owes to appropriate use of the ESSs for regulation and reserve services and for reducing the electricity
bill. The results in Table \ref{tb: revenues-vs-costs} also indicate
that, in the absence of free solar energy, using ESSs to support
only one or two of the services may not cover the associated
cost and hence would be unable to yield a positive profit.
\begin{figure}
\caption{{\small{}
\label{fig: detailed-schedules}
\end{figure}
\begin{table}
\caption{\textsc{Revenues and Costs in Unit of \$.}}
\label{tb: revenues-vs-costs}
\centering{}
\begin{tabular}{ccccccc}
\hline
$H$ & $R_{\text{sc}}$ & $R_{\text{fr}}$ & $R_{\text{sr}}$ & $R_{\text{br}}$ & Storage aging cost & Net profit\tabularnewline
\hline
2 & 4126 & 0 & 0 & 0 & 0 & 4126\tabularnewline
4 & 4126 & 506 & 81 & 159 & 569 & 4303\tabularnewline
\hline
\end{tabular}
\end{table}
\subsection{Impact of forecast errors}
It is of interest to see how forecast errors affect the economic results. Let the load demand, the renewable generation power, the regulation market clearing price and the spinning reserve availability price be forecasted with zero-mean and uniformly distributed errors. The maximum errors are proportional to the magnitude changes in the true data for two sequential time slots, and the proportion coefficients increase with the forecast horizon. Further the forecast is capped within 80\% of the minimum and 120\% of the maximum true values. The differences of the resulting net profits relative to those in Fig. \ref{fig: profit-vs-horizon-size}(a)-(b) are shown in Fig. \ref{fig: sensitivity-to-forecast-errors}. As observed, the differences are mostly negative (it can be positive as rolling-horizon optimization may be sub-optimal in the long run), indicating losses of profits caused by the forecast errors. Nevertheless, the magnitudes are small relative to the reference profits. This indicates that the economic assessment approach is somehow robust to forecast errors.
\begin{figure}
\caption{{\small{}
\label{fig: sensitivity-to-forecast-errors}
\end{figure}
\section{Conclusions} \label{sec: Conclusions}
This paper developed a rolling-horizon optimization approach to schedule customer-sited ESSs for multi-service provision. The operating cost and yielded revenues were used
to assess the economics of the ESSs. The effectiveness of the proposed approach was illustrated with case studies. Future research will investigate the impact of storage energy and power capacities on the economics of multi-use ESSs.
\end{document}
|
\begin{document}
\begin{abstract}
We prove that the Lawson surface $\xi_{g,1}$
in Lawson's original notation,
which has genus $g$ and can be viewed as a desingularization of two orthogonal great two-spheres in the round three-sphere $\mathbb{S}^3$,
has index $2g+3$ and nullity $6$ for any genus $g\ge2$.
In particular $\xi_{g,1}$ has no exceptional Jacobi fields,
which means that it cannot ``flap its wings'' at the linearized level
and is $C^1$-isolated.
\end{abstract}
\title{The index and nullity of the Lawson surfaces $\xi_{g,1}
\section{Introduction}
\subsection*{The general framework and brief discussion of the results}
$\phantom{ab}$
\nopagebreak
Determining the index and nullity of complete or closed minimal surfaces is a difficult problem
which has been fully solved only in a few cases;
see for example \cites{nayatani1992,nayatani1993, morabito}.
The index plays an important role in min-max theory \cite{neves2014};
this provides partial motivation for our result.
In this article we prove Theorem \ref{Mtheorem},
which determines (for the first time) the index and the nullity of the Lawson surfaces $\xi_{g,1}$ \cite{Lawson} with $g\ge2$.
These are the Lawson surfaces which have genus $g$ and can be viewed as desingularizations of two orthogonal great two-spheres in the round three-sphere $\mathbb{S}^3$
in the sense of \cite{alm20}*{Definition 1.3}.
The index determined is consistent with (but larger than) a lower bound established by Choe \cite{choe1990}.
We prove that the nullity is $6$ and so there are no exceptional Jacobi fields,
which means by Corollary \ref{C:flapping} that these surfaces cannot ``flap their wings'' at the linearized level
and are $C^1$-isolated.
This provides a partial answer to questions asked in \cite{alm20}*{Section 4.2}.
The ideas of our proof originate with work of NK on the approximate kernel for Scherk surfaces \cites{compact, alm20}.
Our approach requires a detailed understanding of the elementary geometry of $\mathbb{S}^3$ and of the surfaces involved,
especially their symmetries.
The proof makes heavy use also of Alexandrov reflection in the style of Schoen's \cite{schoen1983}.
The Courant nodal theorem \cite{courant} and an argument of Montiel-Ros \cite{Ros} play essential roles as well.
In ongoing work we hope to extend this result to determine the index and nullity
of all Lawson surfaces
$\xi_{m-1,k-1}$ in Lawson's original notation, with $m\ge k\ge 3$.
Another interesting problem, which could not be posed until the determination of the index of the Lawson surfaces,
is motivated by the characterization of the Clifford torus by Fischer-Colbrie (unpublished) and (independently) by Urbano \cite{urbano}
as the only closed minimal surface in $\mathbb{S}^3$, besides the great sphere, which has index $\le5$,
and also by some recent results for minimal surfaces in $\mathbb{R}^3$ \cites{davi,davi2}:
the problem is to classify all closed minimal surfaces in $\mathbb{S}^3$ which have index $\le7$
(the index of the Lawson surface of genus two),
or more generally $\le2g+3$ for small $g$
(the index of the Lawson surface $\xi_{g,1}$).
\subsection*{Notation and conventions}
$\phantom{ab}$
\nopagebreak
We denote by $\mathbb{S}^3 \subset \mathbb{R}^{4}$ the unit $3$-dimensional sphere.
\begin{notation}
\label{span}
For any
$A\subset \mathbb{S}^3 \subset \mathbb{R}^{4}$
we denote by $\operatorname{Span}(A)$ the span of $A$ as a subspace of $\mathbb{R}^{4}$
and by $\mathbb{S}(A):=\operatorname{Span}(A)\cap\mathbb{S}^3$.
\qed
\end{notation}
Given now a vector subspace $V$ of the Euclidean space $\mathbb{R}^{4}$,
we denote by $V^\perp$ its orthogonal complement in $\mathbb{R}^{4 }$,
and we define the reflection in $\mathbb{R}^{4}$ with respect to $V$,
${\underline{\mathsf{R}}}_V: \mathbb{R}^{4} \to \mathbb{R}^{4} $, by
\begin{equation}
\label{reflV}
{\underline{\mathsf{R}}}_V:= \Pi_V - \Pi_{V^\perp},
\end{equation}
where $\Pi_V$ and $\Pi_{V^\perp}$ are the orthogonal projections of $\mathbb{R}^{4}$ onto $V$ and $V^\perp$ respectively.
Alternatively
${\underline{\mathsf{R}}}_V: \mathbb{R}^{4 } \to \mathbb{R}^{4 } $ is the linear map which restricts to the identity on $V$
and minus the identity on $V^\perp$.
Clearly the fixed point set of ${\underline{\mathsf{R}}}_V$ is $V$.
\begin{definition}[Reflections ${\underline{\mathsf{R}}}_A$]
\label{D:refl}
Given any
$A\subset \mathbb{S}^3 \subset \mathbb{R}^{4 }$,
we define $A^\perp:=\left(\,Span(A) \, \right)^\perp \cap \mathbb{S}^3 $
and
${\underline{\mathsf{R}}}_A : \mathbb{S}^3 \to \mathbb{S}^3 $ to be the restriction to $\mathbb{S}^3$ of ${\underline{\mathsf{R}}}_{\operatorname{Span}(A)}$.
Occasionally we will use simplified notation:
for example for $A$ as before and $p\in\mathbb{S}^3$ we may write $\mathbb{S}(A,p)$ and ${\underline{\mathsf{R}}}_{A,p}$ instead of $\mathbb{S}(A\cup\{p\})$ and ${\underline{\mathsf{R}}}_{A\cup\{p\}}$
respectively.
\qed
\end{definition}
Note that the set of fixed points of ${\underline{\mathsf{R}}}_A$ above is $\mathbb{S}(A)$ as in notation \ref{span},
which is $\mathbb{S}^3$,
or a great two-sphere,
or a great circle,
or the set of two antipodal points,
or the empty set,
depending on the dimension of $\operatorname{Span}(A)$.
Following now the notation in \cite{choe:hoppe}, we have the following.
\begin{definition}[The cone construction]
\label{D:cone}
For $p,q\in\mathbb{S}^3$ which are not antipodal we denote
the minimizing geodesic segment joining them by $\overline{pq}$.
For $A,B\subset\mathbb{S}^3$ such that no point of $A$ is antipodal to a point of $B$
we define the cone of $A$ and $B$ in $\mathbb{S}^3$ by
$$
A\mbox{$\times \hspace*{-0.244cm} \times$} B := \bigcup_{p\in A, \, q\in B} \overline{pq}.
$$
If $A$ or $B$ contains only one point we write the point instead of $A$ or $B$ respectively;
we have then $p\mbox{$\times \hspace*{-0.244cm} \times$} q = \overline{pq} $
for any
$p,q\in\mathbb{S}^3$ which are not antipodal.
More generally,
given linearly independent $p_1,\cdots,p_k\in \mathbb{S}^3$,
we define inductively for $k\ge3$
$
\overline{p_1\cdots p_k} := p_k \mbox{$\times \hspace*{-0.244cm} \times$} \overline{p_1\cdots p_{k-1} }.
$
\qed
\end{definition}
If $\mathscr{G}$ is a group acting on a set $B$ and if $A$ is a subset of $B$,
then we refer to the subgroup
\begin{equation}
\label{stab}
\operatorname{Stab}_{\mathscr{G}}(A):=\{ \mathbf{g} \in \mathscr{G} \; | \; \mathbf{g}A = A \}
\end{equation}
as the \emph{stabilizer} of $A$ in $\mathscr{G}$.
When $A$ is a subset of the round $3$-sphere, we will set
\begin{equation}
\label{Gsym}
\mathscr{G}_{sym}^A:=\operatorname{Stab}_{{O(4)}} A.
\end{equation}
In the next definition we find it convenient to work with piecewise-smooth functions on a domain in a surface.
By this we mean that each such function is continuous on the domain,
the domain can be subdivided into domains by a finite union of piecewise-smooth embedded curves,
and on the closure of each of these domains the function is smooth.
We use $C_{pw}^\infty({\underline{U}})$ to denote the space of piecewise-smooth functions on a domain ${\underline{U}}$.
\begin{definition}[Eigenvalues]
\label{D:eigen}
We assume given a compact domain ${\underline{U}}$ in a smooth surface equipped with a Riemannian metric ${\underline{g}}$,
a smooth function $f$ on ${\underline{U}}$,
and a linear space of piecewise-smooth functions $V'\subset C_{pw}^\infty({\underline{U}})$ which is invariant under the Schr\"{o}dinger operator
$\mathcal{L}u=\Delta_{{\underline{g}}}+f$
defined on ${\underline{U}}$.
We define
$\lambda_i(V',\mathcal{L}u)$ to be the $i^{th}$ eigenvalue,
where we are counting in non-decreasing order and with multiplicity.
(Note also that we follow the conventions which make the eigenvalues of the Laplacian on a closed surface nonnegative.)
Moreover for $\lambda\in\mathbb{R}$ we denote by
$\#_{<\lambda}(V',\mathcal{L}u )$,
$\#_{=\lambda}(V',\mathcal{L}u )$,
and
$\#_{\le\lambda}(V',\mathcal{L}u )$,
the number of eigenvalues $\lambda_i(V',\mathcal{L}u)$ which are $<\lambda$, or $=\lambda$, or $\le\lambda$, respectively.
We also define the \emph{index of $\mathcal{L}u$ on $V'$},
$\operatorname{Ind}(V',\mathcal{L}u ):= \#_{<0}(V',\mathcal{L}u)$,
and
the \emph{nullity of $\mathcal{L}u$ on $V'$},
$\operatorname{Null}(V',\mathcal{L}u):= \#_{=0}(V',\mathcal{L}u)$.
Finally note that we may omit $\mathcal{L}u$ from the notation when it can be inferred from the context.
\qed
\end{definition}
\begin{definition}[Eigenvalue equivalence]
\label{D:eigen-eq}
Suppose $\mathcal{L}u$, ${\underline{U}}$, and $V'$ are as in \ref{D:eigen}
and $\mathcal{L}uu$, ${\underline{U}}u$, and $V''$ satisfy correspondingly the same conditions.
We define $V'\sim_{\mathcal{L}u, \mathcal{L}uu} V''$---or $V'\sim V''$ if the operators are understood from the context---to mean
that there is a linear isomorphism $\mathcal{F}:V'\to V''$ such that the following holds:
$\forall f'\in V'$, $f'$ is an eigenfunction with respect to $\mathcal{L}u$ if and only if
$\mathcal{F}(f')$ is an eigenfunction with respect to $\mathcal{L}uu$ of the same eigenvalue as $f$.
We say then that $\mathcal{L}u$ on $V'$ and $\mathcal{L}uu$ on $V''$ are \emph{eigenvalue equivalent}.
\qed
\end{definition}
Note that clearly if \ref{D:eigen-eq} holds,
then $\forall i \in \mathbb{N}$ we have $\lambda_i(V' , \mathcal{L}u ) = \lambda_i(V'' , \mathcal{L}uu ) $.
In this article we will say that a function satisfies the \emph{Dirichlet condition} on a curve if it vanishes there and the \emph{Neumann condition} if
its derivative along the normal to the curve vanishes.
\begin{definition}[Eigenvalues for mixed Dirichlet and Neumann boundary conditions]
\label{D:mixed}
Suppose $\mathcal{L}u$ and ${\underline{U}}$ are as in \ref{D:eigen} and moreover
the boundary $\partial{\underline{U}}$ is piecewise-smooth and can be decomposed as $\partial {\underline{U}}= \partial_D{\underline{U}} \cup \partial_N{\underline{U}}$---note that
$\partial_D{\underline{U}}$, $\partial_N{\underline{U}}$ can be empty.
We define then the following for $i\in\mathbb{N}$ and $\lambda\in\mathbb{R}$:
\\
(i)
$C_{pw}^\infty[{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}]$ to be the space of piecewise-smooth functions on ${\underline{U}}$ which satisfy
the Dirichlet condition on $\partial_D{\underline{U}}$ and the Neumann condition on $\partial_N{\underline{U}}$;
\\
(ii)
$\lambda_i[\mathcal{L}u,{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}] := \lambda_i( \, \mathcal{L}u, C_{pw}^\infty[{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}] \,)$;
\\
(iii)
$\#_{<\lambda}[\mathcal{L}u,{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}] := \#_{<\lambda}( \, \mathcal{L}u, C_{pw}^\infty[{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}] \,)$
and similarly for ``$=\lambda$'' and ``$\le\lambda$''.
\qed
\end{definition}
\subsection*{Acknowledgments}
$\phantom{ab}$
\nopagebreak
The authors would like to thank Richard Schoen for his continuous support and interest in the results of this article
and Otis Chodosh for bringing this problem to their attention.
NK was partially supported by NSF grant DMS-1405537.
NK would like to thank also for their hospitality and support the Institute for Advanced Study at Princeton during the 2018 Fall term,
and the University of California, Irvine, during Spring 2019.
Finally we would like to thank Robert Kusner and Richard Schoen for pointing out that our theorem
implies isolatedness for the surfaces involved.
\section{Basic spherical geometry}
\subsection*{Rotations along or about great circles}
$\phantom{ab}$
\nopagebreak
Note that by \ref{D:refl},
$C^\perp$ is the great circle furthest from a given great circle $C$ in $\mathbb{S}^3$.
(Note that the points of $C^\perp$ are at distance $\pi/2$ in $\mathbb{S}^3$ from $C$ and any point of $\mathbb{S}^3\setminus C^\perp$
is at distance $<\pi/2$ from $C$).
Equivalently $C^\perp$ is the set
of poles of great hemispheres with equator $C$;
therefore $C$ and $C^\perp$ are linked.
The group
$\mathscr{G}_{sym}^{C \cup C^\perp } $
contains
$\mathscr{G}_{sym}^{C } = \mathscr{G}_{sym}^{C^\perp } $
(which includes arbitrary rotation or reflection in the two circles)
and includes also
orthogonal transformations exchanging
$C$ with $C^\perp$.
\begin{definition}[Rotations $\mathsf{R}_C^\phi$, $\mathsf{R}^C_\phi$ and Killing fields $K_{C}$, $K^{C}$]
\label{l:D:rot}
Given a great circle $C\subset\mathbb{S}^3$, $\phi \in \mathbb{R}$,
and an orientation chosen on the totally orthogonal circle $C^\perp$,
we define the following:
\newline
(i)
the rotation about $C$ by angle $\phi$
is the element $\mathsf{R}_C^\phi$ of $SO(4)$ preserving $C$ pointwise
and rotating the totally orthogonal circle $C^\perp$ along itself by angle $\phi$
(in accordance with its chosen orientation);
\newline
(ii)
the Killing field $K_{C}$ on $\mathbb{S}^3$
and the normalized Killing field $\widetilde{K}_{C}$ on $\mathbb{S}^3 \setminus C $
are given by
$\left.\phantom{\frac12}K_{C}\right|_p := \left. \frac{\partial}{\partial\phi} \right|_{\phi=0} \mathsf{R}_{C}^\phi(p)$
$\forall p\in\mathbb{S}^3$
and
$\left.\widetilde{K}_{C}\right|_p \, := \,
\frac{ \left.K_{C}\right|_p }{ \left| \left.K_{C}\right|_p \right| }
$
$\forall p\in\mathbb{S}^3\setminus C$.
Assuming now an orientation chosen on $C$ we define the following:
\newline
(iii)
the rotation along $C$ by angle $\phi$ is $\mathsf{R}^C_\phi := \mathsf{R}_{C^\perp}^\phi$;
\newline
(iv)
the Killing field $K^{C}:=K_{C^\perp}$ on $\mathbb{S}^3$
and the normalized Killing field $\widetilde{K}^{C}:=\widetilde{K}_{C^\perp}$ on $\mathbb{S}^3\setminus C^\perp$.
\qed
\end{definition}
Note that $\mathsf{R}^C_\phi = \mathsf{R}_{C^\perp}^\phi$ resembles a translation along $C$,
while in the vicinity of $C^\perp$ it is a rotation.
Note also that $K_{C}$ is defined to be a rotational Killing field around $C$,
vanishing on $C$ and equal to the unit velocity on ${C}^\perp$.
\begin{lemma}[Orbits]
\label{orbits}
For $K^C$ as in \ref{l:D:rot}, the orbits of $K^C$ (that is its flowlines) are planar circles
and $\forall\mathsf{p}\in C $ each orbit intersects the closed hemisphere $C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}$ exactly once.
Moreover the intersection (when nontrivial) is orthogonal.
\end{lemma}
\begin{proof}
This is straightforward to check already in $\mathbb{R}^4$ with the
hemisphere $C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}$ replaced by the half-three-plane containing $\mathsf{p}$ and with boundary $\operatorname{Span}(C^\perp)$.
By restricting then to $\mathbb{S}^3$ the result follows.
\end{proof}
This lemma allows us to define a projection which effectively identifies the space of orbits in discussion
with a closed hemisphere:
\begin{definition}[Projections by rotations]
\label{Pi}
For $C$ and $\mathsf{p}$ as in \ref{orbits}
we define the smooth map
$\Pi^C_\mathsf{p} :\mathbb{S}^3\to C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}$ by requiring $\Pi^C_\mathsf{p} x$ to be the intersection of $C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}$ with the orbit of $K^C$ containing $x$,
for any $x \in\mathbb{S}^3$.
\end{definition}
\begin{definition}[Graphical sets]
\label{graphical}
A set $A\subset \mathbb{S}^3$ is called \emph{graphical with respect to $K^C$} (with $C$ as above)
if each orbit of $K^C$ intersects $A$ at most once.
If moreover $A$ is a submanifold and there are no orbits of $K^C$ which are tangent to $A$,
then $A$ is called
\emph{strongly graphical with respect to $K^C$}.
\end{definition}
\subsection*{The geometry of totally orthogonal circles}
$\phantom{ab}$
\nopagebreak
We fix now some $C$ and $C^\perp$ as above, and orientations on both.
(Of course, after choosing an orientation on $C$,
choosing an orientation on $C^\perp$ is equivalent to choosing an orientation on $\mathbb{S}^3$.)
We define $\forall \phi\in\mathbb{R}$ the points
\begin{equation}
\label{points}
\mathsf{p}_{\phi}=\mathsf{p}_{\phi}[C]:= \mathsf{R}_{C^\perp}^{\phi}\,\mathsf{p}_0\,\in \, C,
\qquad
\mathsf{p}^{\phi}=\mathsf{p}^{\phi}[C]:= \mathsf{R}_{C}^{\phi}\,\mathsf{p}^0\,\in \, C^{\perp},
\end{equation}
where $\mathsf{p}_0,\mathsf{p}^0$ are arbitrarily fixed points on $C$ and $C^{\perp}$ respectively.
Note that we will routinely omit $[C]$ when understood from the context.
Using \ref{span} we further define $\forall \phi\in\mathbb{R}$ the great spheres
\begin{equation}
\label{hemispheres}
\Sigma^\phi = \Sigma^\phi [C] := \,\mathbb{S}( C , \mathsf{p}^\phi ) ,
\qquad
\Sigma_\phi = \Sigma_\phi [C] := \, \mathbb{S}( C^\perp , \mathsf{p}_\phi),
\end{equation}
and $\forall\phi,\phi'\in\mathbb{R}$
the great circles
\begin{equation}
\label{circles}
C_\phi^{\phi'} = C_\phi^{\phi'} [C] := \mathbb{S}( \, \mathsf{p}_\phi, \mathsf{p}^{\phi'} \, ).
\end{equation}
\begin{definition}[Coordinates on $\mathbb{R}^4$]
\label{D:coordinates}
Given $C$ as above and points as in \ref{points},
we define coordinates $(x^1,x^2,x^3,x^4)$ on $\mathbb{R}^4\supset\mathbb{S}^3$ by requiring that
$$
\mathsf{p}_0=(1,0,0,0), \qquad \mathsf{p}_{\pi/2}=(0,1,0,0), \qquad \mathsf{p}^0=(0,0,1,0), \qquad \mathsf{p}^{\pi/2}=(0,0,0,1).
$$
\end{definition}
\begin{lemma}[Basic geometry related to $C$ and $C^\perp$]
\label{L:obs}
The following hold $\forall\phi,\phi', \phi_1,\phi'_1,\phi_2,\phi'_2 \in\mathbb{R}$.
\\
(i) $ \mathsf{p}_{\phi+\pi} = - \mathsf{p}_\phi $ and
$ \mathsf{p}^{\phi+\pi} = - \mathsf{p}^\phi $.
Similarly
$ \Sigma_{\phi+\pi} = \Sigma_\phi $ and
$ \Sigma^{\phi+\pi} = \Sigma^\phi $.
\\
(ii)
$C_\phi^{\phi'} \cap C = \{ \mathsf{p}_\phi , \mathsf{p}_{\phi+\pi} \} $
and
$C_\phi^{\phi'} \cap C^\perp = \{ \mathsf{p}^{\phi'} , \mathsf{p}^{\phi'+\pi} \} $
with orthogonal intersections.
Moreover
$
C_\phi^{\phi'}
=
\overline{ \mathsf{p}_\phi \mathsf{p}^{\phi'} } \, \cup \,
\overline{ \mathsf{p}^{\phi'} \mathsf{p}_{\phi+\pi} } \, \cup \,
\overline{ \mathsf{p}_{\phi+\pi} \mathsf{p}^{\phi'+\pi} } \, \cup \,
\overline{ \mathsf{p}^{\phi'+\pi} \mathsf{p}_\phi }
.
$
\\
(iii)
$C \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}^\phi$ and $C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_\phi$
are closed great hemispheres
with boundary $C$ and $C^\perp$ and poles $\mathsf{p}^\phi$ and $\mathsf{p}_\phi$ respectively.
\\
(iv)
$\Sigma_\phi = ( C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_\phi ) \cup ( C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_{\phi+\pi} )$
and
$\Sigma^\phi = ( C \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}^\phi ) \cup ( C \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}^{\phi+\pi} )$.
\\
(v)
$\Sigma^\phi \cap C^\perp = \{ \mathsf{p}^\phi , \mathsf{p}^{\phi+\pi} \}$
and
$\Sigma_\phi \cap C = \{ \mathsf{p}_\phi, \mathsf{p}_{\phi+\pi}\}$
with orthogonal intersections.
\\
(vi)
$C_\phi^{\phi'} = \Sigma_\phi \cap \Sigma^{\phi'}$
with orthogonal intersection.
\\
(vii)
$\left( { C_\phi^{\phi'} } \right)^\perp = C_{\phi+\pi/2}^{\phi'+\pi/2}$.
\\
(viii)
$\Sigma^\phi \cap \Sigma^{\phi'} = C$ unless $\phi=\phi' \pmod \pi$
in which case
$\Sigma^\phi = \Sigma^{\phi'}$.
Similarly
$\Sigma_\phi \cap \Sigma_{\phi'} = C^\perp$ unless $\phi=\phi' \pmod \pi$
in which case
$\Sigma_\phi = \Sigma_{\phi'}$.
In both cases the intersection angle is $\phi'-\phi \pmod \pi$.
\\
(ix)
$C_{\phi_1}^{\phi_1'} \cap C_{\phi_2}^{\phi_2'} = \emptyset$
unless
$\phi_1=\phi_2 \pmod \pi$
or
$\phi'_1=\phi'_2 \pmod \pi$.
If both conditions hold then
$C_{\phi_1}^{\phi_1'} = C_{\phi_2}^{\phi_2'}$.
If only the first condition holds then
$C_{\phi_1}^{\phi_1'} \cap C_{\phi_2}^{\phi_2'} =
\{ \mathsf{p}_{\phi_1} , \mathsf{p}_{\phi_1+\pi} \}$
with intersection angle equal to $\phi'_2-\phi'_1 \pmod \pi$.
If only the second condition holds then
$C_{\phi_1}^{\phi_1'} \cap C_{\phi_2}^{\phi_2'} =
\{ \mathsf{p}^{\phi_2} , \mathsf{p}^{\phi_2+\pi} \}$
with intersection angle equal to $\phi_2-\phi_1 \pmod \pi$.
\end{lemma}
\begin{proof}
It is straightforward to verify all these statements by using the coordinates defined in
\ref{D:coordinates}.
\end{proof}
\begin{definition}[Symmetries of Killing fields]
\label{DKsymm}
We call a Killing field $K$ \emph{even (odd) under an isometry ${\underline{\mathsf{R}}}$} if it satisfies
${{\underline{\mathsf{R}}}}_* \circ K = K \circ {\underline{\mathsf{R}}}$
(${{\underline{\mathsf{R}}}}_* \circ K = - K \circ {\underline{\mathsf{R}}}$).
\qed
\end{definition}
\begin{lemma}[Some symmetries of Killing fields]
\label{Ksymm}
The following hold $\forall \phi,\phi'\in\mathbb{R}$.
\\
(i)
$K_C$ is
odd under ${\underline{\mathsf{R}}}_{\Sigma^{\phi}}$
and
${\underline{\mathsf{R}}}_{ C_{ \phi }^{ \phi' } }$
and even under ${\underline{\mathsf{R}}}_{\Sigma_{\phi}}$.
\\
(ii)
$K_{C^\perp}$ is
odd under ${\underline{\mathsf{R}}}_{\Sigma_{\phi}}$
and
${\underline{\mathsf{R}}}_{ C_{ \phi }^{ \phi' } }$
and even under ${\underline{\mathsf{R}}}_{\Sigma^{\phi}}$.
\\
(iii)
$K_{ C_{ \phi }^{ \phi' } }$ is odd under
${\underline{\mathsf{R}}}_{\Sigma_\phi}$ and ${\underline{\mathsf{R}}}_{\Sigma^{\phi'}}$
and even under
${\underline{\mathsf{R}}}_{\Sigma_{\phi+\pi/2}}$ and ${\underline{\mathsf{R}}}_{\Sigma^{\phi'+\pi/2}}$.
Moreover
${\Sigma_{\phi+\pi/2}}$ and ${\Sigma^{\phi'+\pi/2}}$
are preserved under the flow of
$K_{ C_{ \phi }^{ \phi' } }$ and contain the fixed points $\pm\mathsf{p}^{\phi'}\in {\Sigma_{\phi+\pi/2}}$ and $\pm\mathsf{p}_{\phi}\in {\Sigma^{\phi'+\pi/2}}$
and the geodesic orbit
${ C_{ \phi +\pi/2 }^{ \phi' +\pi/2 } } = {\Sigma_{\phi+\pi/2}}\cap {\Sigma^{\phi'+\pi/2}}$.
\end{lemma}
\begin{proof}
For any great circle $C'$ we have that $K_{C'}$ is even (odd) with respect to a reflection ${\underline{\mathsf{R}}}$
if and only if ${\underline{\mathsf{R}}}(C'^\perp) = C'^\perp$ and ${\underline{\mathsf{R}}}$ respects (reverses) the orientation of $C'^\perp$.
Applying this it is straightforward to confirm the lemma.
\end{proof}
\section{Tessellations of $\mathbb{S}^3$}
\subsection*{Lawson tessellations}
$\phantom{ab}$
\nopagebreak
Our purpose is to study the Lawson surfaces $\xi_{m-1,1}$ \cite{Lawson},
which have genus $g=m-1$ and can be viewed as desingularizations of $\Sigma^{\pi/4}\cup \Sigma^{-\pi/4}$,
where $m\ge3$, $m\in\mathbb{N}$.
With this goal it is helpful to introduce the notation
\begin{equation}
\label{qpoints}
\begin{aligned}
t_{i'} :=& {(2i'-1)\frac\pi{2m}} \in \mathbb{R} ,
\qquad &
t^{j'} :=& {(2j'-1)\frac\pi{4}} \in \mathbb{R} ,
\\
\mathsf{q}_{i'} :=& \mathsf{p}_{ t_{i'} } \in C ,
\qquad &
\mathsf{q}^{j'} :=& \mathsf{p}^{ t^{j'} } \in C^\perp ,
\end{aligned}
\qquad
\forall i',j'\in\frac12\mathbb{Z}.
\end{equation}
Note that we have then $2m$ points $\mathsf{q}_{i}$ for $i\in\mathbb{Z}$ subdividing $C$ into $2m$ equal arcs of length $\pi/m$ each,
and $4$ points $\mathsf{q}^j$ for $j\in\mathbb{Z}$ subdividing $C^\perp$ into $4$ arcs of length $\pi/2$ each.
$\mathsf{q}_{i+\frac12}$ is the midpoint of $\overline{\, \mathsf{q}_i \mathsf{q}_{i+1} \,}$ for each $i\in\mathbb{Z}$ and
$\mathsf{q}^{j+\frac12}$ is the midpoint of $\overline{\, \mathsf{q}^j \mathsf{q}^{j+1} \,}$ for each $j\in\mathbb{Z}$.
We define now $\forall i,j\in\mathbb{Z}$ compact domains
$\Omega_i,\Omega^j,\Omega_i^j$ by
\begin{equation}
\label{Om}
\begin{gathered}
\Omega_i :=
C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \overline{\, \mathsf{q}_i \mathsf{q}_{i+1} \, } ,
\qquad
\Omega^j :=
C \mbox{$\times \hspace*{-0.244cm} \times$} \overline{\, \mathsf{q}^{j} \mathsf{q}^{j+1} \, },
\qquad
\Omega_i^j := \Omega_i \cap \Omega^j =
\overline{\, \mathsf{q}_i \mathsf{q}_{i+1} \mathsf{q}^{j} \mathsf{q}^{j+1} \, } \, .
\end{gathered}
\end{equation}
Clearly we have then the decompositions with disjoint interiors
\begin{equation}
\label{Omdec}
\mathbb{S}^3=\bigcup_{i=0}^{2m-1} \Omega_i
=\bigcup_{j=0}^3 \Omega^j
=\bigcup_{i=0}^{2m-1} \bigcup_{j=0}^3 \Omega_i^j.
\end{equation}
Note that
\begin{equation}
\label{Omrot}
\Omega_i= \mathsf{R}_{C^\perp}^{(i-i')\frac\pi{m} } \, \Omega_{i'},
\qquad
\Omega^j= \mathsf{R}_{C}^{(j-j')\frac\pi{2} } \, \Omega^{j'},
\qquad
\Omega_i^j=\mathsf{R}_{C^\perp}^{(i-i')\frac\pi{m} } \, \mathsf{R}_{C}^{(j-j')\frac\pi{2} } \, \Omega_{i'}^{j'} .
\end{equation}
Moreover we have
\begin{equation}
\label{Omboundary}
\begin{gathered}
\partial \Omega_i =
C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \{ \mathsf{q}_i, \mathsf{q}_{i+1} \} ,
\\
\partial \Omega^j
= C \mbox{$\times \hspace*{-0.244cm} \times$} \{ \mathsf{q}^j, \mathsf{q}^{j+1} \} .
\end{gathered}
\end{equation}
\begin{lemma}[Properties of $\Omega_i^j$]
\label{Om:p}
$\forall i,j\in\mathbb{Z}$, $\Omega_i^j$ is a spherical tetrahedron and satisfies the following.
\\
(i)
Its faces are the spherical triangles
$\overline{ \, \mathsf{q}_{ i } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } $,
$\overline{ \, \mathsf{q}_{ i+1 } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } $,
$\overline{ \, \mathsf{q}_{ i } \mathsf{q}_{ i+1 } \mathsf{q}^{ j } \, } $,
and
$\overline{ \, \mathsf{q}_{ i } \mathsf{q}_{ i+1 } \mathsf{q}^{ j+1 } \, } $.
\\
(ii)
Its dihedral angles are all $\pi/2$ except for the one along
$\overline{ \, \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } $
which is $\pi/m$.
\\
(iii)
It is bisected by the spherical triangles
$\overline{ \, \mathsf{q}_{ i+\frac12 } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } $
and
$\overline{ \, \mathsf{q}_{ i } \mathsf{q}_{ i+1 } \mathsf{q}^{ j+\frac12 } \, } $
and its symmetries are given by (${\underline{\mathsf{R}}}_{\mathbb{S}^3}$ is the identity map on $\mathbb{S}^3$)
\begin{equation}
\label{Omsym}
\mathscr{G}_{sym}^{\Omega_i^j}= \{ \, {\underline{\mathsf{R}}}_{\mathbb{S}^3},
{\underline{\mathsf{R}}}_{\Sigma_{i\pi/m}} ,
{\underline{\mathsf{R}}}_{\Sigma^{j\pi/2}} ,
{\underline{\mathsf{R}}}_{C_{i\pi/m}^{j\pi/2} } \, \}
\simeq \mathbb{Z}_2\times \mathbb{Z}_2
.
\end{equation}
(iv) It is convex in the sense that $\overline{xy}\subset\Omega_i^j$ $\forall x,y\in \Omega_i^j$.
\end{lemma}
\begin{proof}
It is straightforward to check all these statements by using the definitions and for (iii) that $m>2$.
\end{proof}
\ref{Om:p}.iii
motivates us to define $\forall i,j\in\mathbb{Z}$,
by modifying \ref{Om},
compact domains
$\Omega_{i\pm},\Omega^{j\pm},\Omega_{i\pm}^{j\pm}$ by
\begin{equation}
\label{Ompm}
\begin{gathered}
\Omega_{i\pm} := \,
C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \overline{\, \mathsf{q}_{i+\frac12 } \mathsf{q}_{i+\frac12\pm\frac12} \, } ,
\qquad
\Omega^{j\pm} := \,
C \mbox{$\times \hspace*{-0.244cm} \times$} \overline{\, \mathsf{q}^{j+\frac12 } \mathsf{q}^{j+\frac12\pm\frac12} \, },
\\
\Omega_{i\pm}^{j\pm} := \, \Omega_{i\pm} \cap \Omega^{j\pm}=
\overline{\, \mathsf{q}_{i+\frac12} \mathsf{q}_{i+\frac12\pm\frac12} \mathsf{q}^{j+\frac12} \mathsf{q}^{j+\frac12\pm\frac12} \, } \, ,
\\
\Omega_{i}^{j\pm} := \, \Omega_{i} \cap \Omega^{j\pm}=
\overline{\, \mathsf{q}_i \mathsf{q}_{i+1} \mathsf{q}^{j+\frac12} \mathsf{q}^{j+\frac12\pm\frac12} \, } \, ,
\\
\Omega_{i\pm}^{j} := \, \Omega_{i\pm} \cap \Omega^{j}=
\overline{\, \mathsf{q}_{i+\frac12} \mathsf{q}_{i+\frac12\pm\frac12} \mathsf{q}^{j} \mathsf{q}^{j+1} \, } \, .
\end{gathered}
\end{equation}
We have then various decompositions with disjoint interiors, for example
\begin{equation}
\label{Ompmdec}
\Omega_i^j= \Omega_{i-}^{j-} \cup \Omega_{i+}^{j-} \cup \Omega_{i-}^{j+} \cup \Omega_{i+}^{j+},
\qquad
\Omega_{i+}^j=\Omega_{i+}^{j-} \cup \Omega_{i+}^{j+}.
\end{equation}
Note also that
\begin{equation}
\label{Omrefl}
\Omega_{i+}^{j-} = {\underline{\mathsf{R}}}_{\Sigma^{j\pi/2}} \Omega_{i+}^{j+} ,
\qquad
\Omega_{i-}^{j+} = {\underline{\mathsf{R}}}_{\Sigma_{i\pi/m}} \Omega_{i+}^{j+} ,
\qquad
\Omega_{i-}^{j-} = {\underline{\mathsf{R}}}_{C_{i\pi/m}^{j\pi/2} } \Omega_{i+}^{j+} .
\end{equation}
Moreover
all four tetrahedra $\Omega_{i\pm}^{j\pm}$ have
$\overline{ \mathsf{p}_{ i\frac\pi m } \mathsf{p}^{ j\frac\pi 2 } } = \overline{ \mathsf{q}_{i+\frac12} \mathsf{q}^{ j+\frac12 } } $
as a common edge and adjacent ones have
common faces given by
$\Omega_{i-}^{j+} \cap \Omega_{i-}^{j-} = \overline{\, \mathsf{q}_{i+\frac12} \mathsf{q}_{i} \mathsf{q}^{j+\frac12} \, } $,
$\Omega_{i+}^{j+} \cap \Omega_{i+}^{j-} = \overline{\, \mathsf{q}_{i+\frac12} \mathsf{q}_{i+1} \mathsf{q}^{j+\frac12} \, } $,
$\Omega_{i-}^{j-} \cap \Omega_{i+}^{j-} = \overline{\, \mathsf{q}_{i+\frac12} \mathsf{q}^{j+\frac12} \mathsf{q}^{j} \, } $,
and
$\Omega_{i-}^{j+} \cap \Omega_{i+}^{j+} = \overline{\, \mathsf{q}_{i+\frac12} \mathsf{q}^{j+\frac12} \mathsf{q}^{j+1} \, } $.
\subsection*{Subdividing $\mathbb{S}^3$ with mutually orthogonal two-spheres}
$\phantom{ab}$
\nopagebreak
Note that by \ref{L:obs}.vi,viii
$\Sigma^0$, $\Sigma^{\pi/2}$, $\Sigma_0$, and $\Sigma_{\pi/2}$
form a system of four mutually orthogonal two-spheres in $\mathbb{S}^3$.
We will later study the subdivisions these two-spheres effect on $\mathbb{S}^3$ and the Lawson surfaces.
To this end we define
$\Omegau_{**}^{\pm*}$,
$\Omegau_{**}^{*\pm}$,
$\Omegau^{**}_{\pm*}$,
and
$\Omegau^{**}_{*\pm}$,
to be the closures of the connected components into which $\mathbb{S}^3$ is subdivided by the removal of
$\Sigma^0$, $\Sigma^{\pi/2}$, $\Sigma_0$, or $\Sigma_{\pi/2}$ respectively,
chosen so that
\begin{equation}
\label{Omu+}
\mathsf{p}^{\pm\pi/2}\in \Omegau_{**}^{\pm*}, \quad
\mathsf{p}^{\frac{\pi}2\mp\frac{\pi}2}\in \Omegau_{**}^{*\pm}, \quad
\mathsf{p}_{\pm\pi/2}\in \Omegau^{**}_{\pm*}, \quad
\mathsf{p}_{\frac{\pi}2\mp\frac{\pi}2}\in \Omegau^{**}_{*\pm}.
\end{equation}
To further subdivide we replace $*$'s by $\pm$ signs to denote the corresponding intersections
of these domains; for example we have
\begin{equation}
\label{Omu+-}
\Omegau_{+-}^{-*} := \Omegau_{+*}^{**} \cap \Omegau_{*-}^{**} \cap \Omegau_{**}^{-*}.
\end{equation}
Clearly we have
\begin{equation}
\label{partialOmu+}
\partial \Omegau_{**}^{\pm*} = \Sigma^0 , \quad
\partial \Omegau_{**}^{*\pm} = \Sigma^{\pi/2} , \quad
\partial \Omegau^{**}_{\pm*} = \Sigma_0 , \quad
\partial \Omegau^{**}_{*\pm} = \Sigma_{\pi/2} .
\end{equation}
\begin{lemma}[Elementary geometry of $\Omegau^{++}_{++}$]
\label{Omu:p}
$\Omegau^{++}_{++}$ is the spherical tetrahedron
$\overline{\mathsf{p}^0 \mathsf{p}^{\pi/2} \mathsf{p}_0 \mathsf{p}_{\pi/2} }$
and satisfies the following.
\\
(i)
Its faces are the spherical triangles
$\overline{\mathsf{p}_{\pi/2} \mathsf{p}^{\pi/2} \mathsf{p}^0} \subset \Sigma_{\pi/2}$,
$\overline{\mathsf{p}_{\pi/2} \mathsf{p}^{\pi/2} \mathsf{p}_0} \subset \Sigma^{\pi/2}$,
$\overline{\mathsf{p}_{0} \mathsf{p}^{\pi/2} \mathsf{p}^0} \subset \Sigma_0$,
$\overline{\mathsf{p}_{0} \mathsf{p}_{\pi/2} \mathsf{p}^0} \subset \Sigma^0$.
All angles of all faces are $\pi/2$.
\\
(ii)
All its edges have length $\pi/2$ and its dihedral angles are all $\pi/2$.
\\
(iii)
Its symmetry group is isomorphic to the symmetric group on its vertices.
$\Omegau^{++}_{++}$ is bisected by six spherical triangles including
$\overline{ \, \mathsf{p}_{ \pi/4 } \mathsf{p}^{ 0 } \mathsf{p}^{ \pi/2 } \, } $
and
$\overline{ \, \mathsf{p}_{ 0 } \mathsf{p}_{ \pi/2 } \mathsf{p}^{ \pi/4 } \, } $
and its symmetries include
${\underline{\mathsf{R}}}_{\Sigma_{\pi/4}}$,
${\underline{\mathsf{R}}}_{\Sigma^{\pi/4}}$,
and
${\underline{\mathsf{R}}}_{C_{\pi/4}^{\pi/4} }$.
\end{lemma}
\begin{proof}
It is straightforward to verify all these statements by using the definitions.
\end{proof}
\begin{lemma}[Some decompositions]
\label{OmuOm}
We have the following.
\\
(i)
$\Omegau^{++}_{**} = \cup_{i=0}^{2m-1} ( \Omega^{0+}_{i} \cup \Omega^{1-}_{i} )$.
\\
(ii)
$\Omegau^{++}_{+*} =
\Omegau^{++}_{++} \cup {\underline{\mathsf{R}}}_{\Sigma_{\pi/2}} \Omegau^{++}_{++} =
\Omega^{0+}_{0+} \cup \Omega^{1-}_{0+} \cup
\left( \cup_{i=1}^{{m}-1} ( \Omega^{0+}_{i} \cup \Omega^{1-}_{i} ) \right)
\cup \Omega^{0+}_{{m} - } \cup \Omega^{1-}_{{m} - }
$.
\\
(iii)
$\Omegau^{++}_{++} = \Omega^{0+}_{0+} \cup \Omega^{1-}_{0+} \cup
\left\{
\begin{aligned}
& \left( \cup_{i=1}^{\frac{m}2-1} ( \Omega^{0+}_{i} \cup \Omega^{1-}_{i} ) \right) \cup \Omega^{0+}_{\frac{m}2 - } \cup \Omega^{1-}_{\frac{m}2 - },
&& \text{ if } m\in 2\mathbb{Z},
\\
& \left( \cup_{i=1}^{\frac{m-1}2} ( \Omega^{0+}_{i} \cup \Omega^{1-}_{i} ) \right), && \text{ if } m\in 2\mathbb{Z}+1.
\end{aligned}
\right.
$
\end{lemma}
\begin{proof}
It is straightforward to verify all these statements by using the definitions.
\end{proof}
\subsection*{The coordinate Killing fields}
$\phantom{ab}$
\nopagebreak
Using the coordinates defined in \ref{D:coordinates},
we endow $\mathbb{R}^4$ with its standard orientation $dx^1 \wedge dx^2 \wedge dx^3 \wedge dx^4$,
and we endow the six coordinate $2$-planes with the orientations
\begin{equation}
\begin{aligned}
&dx^1 \wedge dx^2, \quad dx^3 \wedge dx^4, \quad dx^1 \wedge dx^4, \\
&dx^2 \wedge dx^3, \quad dx^1 \wedge dx^3, \quad \mbox{and} \quad dx^4 \wedge dx^2.
\end{aligned}
\end{equation}
Note that these orientations have been chosen so that one obtains the orientation
of $\mathbb{R}^4$ upon taking the wedge product (in either order)
of the orientation forms of a pair of orthogonally complementary $2$-planes.
In turn we orient each coordinate unit circle by taking the interior product
of its outward unit normal with the orientation form of the $2$-plane it spans.
These choices are consistent with the convention that for any oriented great circle $C'$
we orient ${C'}^\perp$ so that the wedge product of
the two corresponding $2$-plane orientations will yield the standard orientation on $\mathbb{R}^4$.
Thus
\begin{equation}
\label{E:killing}
\begin{aligned}
K_{C^\perp}(x)=K^{C}(x)&=x^1 \, \mathsf{p}_{\pi/2} - x^2 \, \mathsf{p}_0 \\
K_C(x)=K^{C^\perp}(x)&=x^3 \, \mathsf{p}^{\pi/2} - x^4 \, \mathsf{p}^0 , \\
K_{C_{\pi/2}^{\pi/2}}(x)=K^{C_0^0}(x)&=x^1 \, \mathsf{p}^0 - x^3 \, \mathsf{p}_0 , \\
K_{C_0^0}(x)=K^{C_{\pi/2}^{\pi/2}}(x)&=x^4 \, \mathsf{p}_{\pi/2} - x^2 \, \mathsf{p}^{\pi/2} , \\
K_{C_0^{\pi/2}}(x)=K^{C_{\pi/2}^0}(x)&=x^2 \, \mathsf{p}^0 - x^3 \, \mathsf{p}_{\pi/2} , \mbox{ and} \\
K_{C_{\pi/2}^0}(x)=K^{C_0^{\pi/2}}(x)&=x^1 \, \mathsf{p}^{\pi/2} - x^4 \, \mathsf{p}_0 .
\end{aligned}
\end{equation}
\begin{lemma}[$K_{C_{\phi}^{\phi'}}$ on $\Omegau^{++}_{+\pm}$ for $\phi,\phi'\in\{0,\pi/2\}$]
\label{Omu:K}
We have the following (recall \ref{l:D:rot} and \ref{E:killing}).
\\
(i)
$\mathsf{R}_{C_0^0} \mathsf{p}^{\pi/2} = \mathsf{p}_{\pi/2} $,
$\widetilde{K}_{C_0^0} ( \Omegau^{++}_{+*} ) = \overline{ \, \mathsf{p}_{\pi/2} \mathsf{p}^{-\pi/2} \,}$.
\\
(ii)
$\mathsf{R}_{C_0^{\pi/2}} \mathsf{p}_{\pi/2} = \mathsf{p}^{0} $,
$\widetilde{K}_{C_0^{\pi/2}} ( \Omegau^{++}_{+*} ) = \overline{ \, \mathsf{p}^{0} \mathsf{p}_{-\pi/2} \,}$.
\\
(iii)
$\mathsf{R}_{C^0_{\pi/2}} \mathsf{p}_{0} = \mathsf{p}^{\pi/2} $,
$\mathsf{R}_{C^0_{\pi/2}} \mathsf{p}^{\pi/2} = \mathsf{p}_{\pi} $,
$\widetilde{K}_{C^0_{\pi/2}} ( \Omegau^{++}_{++} ) = \overline{ \, \mathsf{p}^{\pi/2} \mathsf{p}_{\pi} \,}$,
$\widetilde{K}_{C^0_{\pi/2}} ( \Omegau^{++}_{+-} ) = \overline{ \, \mathsf{p}_{\pi} \mathsf{p}^{-\pi/2} \,}$.
\\
(iv)
$\mathsf{R}_{C^{\pi/2}_{\pi/2}} \mathsf{p}_{0} = \mathsf{p}^{0} $,
$\mathsf{R}_{C^{\pi/2}_{\pi/2}} \mathsf{p}^{0} = \mathsf{p}_{\pi} $,
$\widetilde{K}_{C^{\pi/2}_{\pi/2}} ( \Omegau^{++}_{++} ) = \overline{ \, \mathsf{p}^{0} \mathsf{p}_{\pi} \,}$,
$\widetilde{K}_{C^{\pi/2}_{\pi/2}} ( \Omegau^{++}_{+-} ) = \overline{ \, \mathsf{p}_{\pi} \mathsf{p}^{\pi} \,}$.
\end{lemma}
\begin{proof}
All claims follow easily from \eqref{E:killing} and Definition \ref{l:D:rot}.
\end{proof}
\subsection*{Some quadrilaterals in $\mathbb{S}^3$}
$\phantom{ab}$
\nopagebreak
We consider now $\forall i,j \in \mathbb{Z}$
the spherical quadrilateral $Q_i^j\subset \partial \Omega_i^j$
consisting of the four edges of the spherical tetrahedron $\Omega_i^j$
not contained in $C$ or $C^\perp$;
that is
\begin{equation}
\label{D:Q}
Q_i^j :=
\overline{ \mathsf{q}_{ i } \mathsf{q}^{ j } }
\cup
\overline{ \mathsf{q}^{ j } \mathsf{q}_{ i+1 } }
\cup
\overline{ \mathsf{q}_{ i+1 } \mathsf{q}^{ j+1 } }
\cup
\overline{ \mathsf{q}^{ j+1 } \mathsf{q}_{ i } }
.
\end{equation}
For ease of reference we define the set of vertices of $Q_i^j$
(the same as the set of vertices of $\Omega_i^j$)
\begin{equation}
\label{D:Qv}
{{Q\mspace{.8mu}\!\!\!\!{/}\,}}_i^j :=
\{
\mathsf{q}_{ i } ,
\mathsf{q}_{ i+1 } ,
\mathsf{q}^{ j } ,
\mathsf{q}^{ j+1 }
\}
.
\end{equation}
Recall that by \ref{Omsym} $\forall i,j\in\mathbb{Z}$ the circle
$\mathbb{S}( \mathsf{q}_{i+\frac12},\mathsf{q}^{j+\frac12} ) = {C_{i\pi/m}^{j\pi/2} } $ is an axis of symmetry of $\Omega_i^j$.
It is natural then to call this circle the ``axis'' of $\Omega_i^j$
and study rotations along it as in the following lemma.
We also define
\begin{equation}
\label{E:partialpm}
\partial_+ \Omega_i^j := \overline{ \, \mathsf{q}_{ i } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } \cup \overline{ \, \mathsf{q}_{ i+1 } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, }
\qquad \text{ and } \qquad
\partial_- \Omega_i^j := \overline{ \, \mathsf{q}_{ i } \mathsf{q}_{ i+1 } \mathsf{q}^{ j } \, } \cup \overline{ \, \mathsf{q}_{ i } \mathsf{q}_{ i+1 } \mathsf{q}^{ j+1 } \, },
\end{equation}
and by \ref{Om:p} we have then
\begin{equation}
\label{E:partialpm2}
\partial\Omega_i^j= \partial_+ \Omega_i^j \cup \partial_- \Omega_i^j
\qquad \text{ and } \qquad
Q_i^j= \partial_+ \Omega_i^j \cap \partial_- \Omega_i^j.
\end{equation}
\begin{lemma}[{$\Omega_i^j$} and rotations along its axis]
\label{L-alex}
The following are true $\forall i,j\in\mathbb{Z}$ and any orbit $O$ of
$K_{\widetilde{C}}$,
where
$\widetilde{C} := ({C_{i\pi/m}^{j\pi/2} })^\perp = C_{i\pi/m+\pi/2}^{j\pi/2+\pi/2} $.
\\ (i) $( \mathsf{R}^t_{\widetilde{C}} \Omega_i^j ) \cap \Omega_i^j= \emptyset$ for $t\in (-3\pi/2, -\pi/2)\cup (\pi/2,3\pi/2)$.
Moreover either
$( \mathsf{R}^{\pm\pi/2}_{\widetilde{C}} \Omega_i^j ) \cap \Omega_i^j =\{ \mathsf{q}_{i+\frac12} \}$ or
$( \mathsf{R}^{\pm\pi/2}_{\widetilde{C}} \Omega_i^j ) \cap \Omega_i^j =\{ \mathsf{q}^{j+\frac12} \}$
(depending or the orientation of ${C_{i\pi/m}^{j\pi/2} } $ and the sign).
\\ (ii)
For each $\Omega_{i\pm}^{j\pm}$ either $O \cap \Omega_{i\pm}^{j\pm} = O \cap \Omega_{i}^{j} $ or $O \cap \Omega_{i\pm}^{j\pm} = \emptyset$.
\\ (iii)
If $O\cap \Omega_i^j \ne \emptyset$,
then (recall \ref{E:partialpm})
$O\cap \partial_{\pm} \Omega_i^j = \{x_{\pm}\} $ for some $x_{\pm} \in \partial_{\pm} \Omegaega_i^j$.
Moreover $O\cap\Omega_i^j$ is
a connected arc (possibly a single point) whose endpoints are $x_+$ and $x_-$.
\\ (iv)
If $O\cap Q_i^j\ne \emptyset$, then $x_+=x_-\in Q_i^j$
and
$O\cap\Omega_i^j=\{x_+\}$.
\\ (v)
If $O\cap ( \Omega_i^j \setminus {{Q\mspace{.8mu}\!\!\!\!{/}\,}}_i^j ) \ne \emptyset$,
then $O$ intersects each face of $\Omega_i^j$ containing $x_+$ ($x_-$) transversely.
\\ (vi)
$\Pi_i^j(\Omega_i^j) \subset C_{i\pi/m+\pi/2}^{j\pi/2+\pi/2} \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_{i\pi/m} $
is homeomorphic to a closed disc with boundary
$\Pi_i^j(Q_i^j) $,
where
$\Pi_i^j:= \Pi^{C_{i\pi/m}^{j\pi/2}}_{\mathsf{p}_{i\pi/m} } $
is defined as in \ref{Pi}
(recall also $\mathsf{p}_{i\pi/m} = \mathsf{q}_{i+\frac12}$).
\end{lemma}
\begin{proof}
We can clearly assume without loss of generality that $i=j=0$.
To prove (i) note that $H:=\mathsf{p}^0\mbox{$\times \hspace*{-0.244cm} \times$} C^{\pi/2}_{\pi/2}$ and $H':=\mathsf{p}_0\mbox{$\times \hspace*{-0.244cm} \times$} C^{\pi/2}_{\pi/2}$ are orthogonal
closed hemispheres with common boundary $C^{\pi/2}_{\pi/2}$,
intersecting $C^0_0$ orthogonally at $\mathsf{p}^0$ and $\mathsf{p}_0$ respectively,
and satisfying
$\mathsf{R}^{\pi/2}_{\widetilde{C}} (H')=H$.
Moreover $\overline{\mathsf{q}^0\mathsf{q}^1}\subset H$ and $\overline{\mathsf{q}_0\mathsf{q}_1}\subset H'$ with both geodesic segments avoiding the boundary $C^{\pi/2}_{\pi/2}$.
Since two orthogonal hyperplanes separate $\mathbb{R}^4$ into four convex connected components,
(i) follows easily.
Because each of the bisecting spheres $\Sigma_0$ and $\Sigma^0$
is preserved by the family $\mathsf{R}^{C_0^0}_t$,
the orbits of $K^{C_0^0}$ cannot cross either sphere, proving (ii).
Before turning to the remaining items
we first show that no orbit of $K^{C_0^0}$ intersects any face of $\Omegaega_0^0$
tangentially, except at a vertex.
By the symmetries it suffices to prove that orbits
intersect $\overline{\mathsf{p}^0\mathsf{q}_1\mathsf{q}^1} \subset \Sigma_{\pi/2m}$
and $\overline{\mathsf{p}_0\mathsf{q}_1\mathsf{q}^1} \subset \Sigma^{\pi/4}$ transversely (if at all)
except at $\mathsf{q}_1$ (the orbit through which is tangential to $\Sigma_{\pi/2m}$)
and $\mathsf{q}^1$ (the orbit through which is tangential to $\Sigma^{\pi/4}$).
Of course the spheres $\Sigma_{\pi/2m}$ and $\Sigma^{\pi/4}$ are minimal surfaces
and neither contains $C_0^0$,
so the Killing field $K^{C_0^0}$
induces a nontrivial Jacobi field on each of them.
A point where an orbit meets one of these spheres tangentially is a zero
of the corresponding Jacobi field,
but we know these nontrivial Jacobi fields are simply first harmonics,
each of whose nodal sets consists of a single great circle.
Clearly the reflection ${\underline{\mathsf{R}}}_{\Sigma^{\pi/2}}$ (${\underline{\mathsf{R}}}_{\Sigma_{\pi/2}}$) preserves
the sides of $\Sigma_{\pi/2m}$ ($\Sigma^{\pi/4}$)
and reverses each orbit of $K^{C_0^0}$.
Thus orbits can meet $\Sigma_{\pi/2m}$ ($\Sigma^{\pi/4}$) tangentially
only along $C_{\pi/2m}^{\pi/2}$ ($C_{\pi/2}^{\pi/4}$),
which intersects $\overline{\mathsf{q}_1\mathsf{q}^1}$ only at $\mathsf{q}_1$ ($\mathsf{q}^1$),
establishing the asserted transversality.
Next we argue that no orbit of $K^{C_0^0}$ intersects
any face of $\Omegaega_0^0$ at more than one point.
Again (by the symmetries) it suffices to
show that every orbit intersects each of the faces
$\overline{\mathsf{p}^0\mathsf{q}_1\mathsf{q}^1} \subset \Sigma_{\pi/2m}$
and $\overline{\mathsf{p}_0\mathsf{q}_1\mathsf{q}^1} \subset \Sigma^{\pi/4}$
at most once.
To see this first note that the orbits of $K^{C_0^0}$ in $\mathbb{R}^4 \supset \mathbb{S}^3$
are planar circles, so if one intersects a great $2$-sphere at more than one point,
then the intersection must be either a great circle (the entire orbit) or a pair of points.
In the first case the $2$-sphere so intersected must contain $C_0^0$,
but neither the sphere $\Sigma^{\pi/4} \supset \overline{\mathsf{p}_0\mathsf{q}_1\mathsf{q}^1}$
nor the sphere $\Sigma_{\pi/2m} \supset \overline{\mathsf{p}^0\mathsf{q}_1\mathsf{q}^1}$
contains $C_0^0$, and so the orbits of $K^{C_0^0}$ must meet these spheres at most twice.
However, the reflection ${\underline{\mathsf{R}}}_{\Sigma^{\pi/2}}$ (${\underline{\mathsf{R}}}_{\Sigma_{\pi/2}}$)
preserves both $\Sigma_0$ ($\Sigma^{\pi/4}$)
and each orbit (as a set) of $K^{C_0^0}$,
so that if an orbit intersects $\Sigma_0$ ($\Sigma^{\pi/4}$) in two points,
these points must lie on opposite sides of $\Sigma^{\pi/2}$ ($\Sigma_{\pi/2}$).
Since in fact $\Omegaega_0^0$ crosses neither sphere of symmetry,
we see that any orbit meets each face at most once, as claimed.
Now we are ready to prove (iii), (iv), and (v).
By the symmetries
it suffices to consider an orbit $O$ intersecting $\Omegaega_{0+}^{0+}$.
Of course by (i) $O$ is not contained in $\Omegaega_{0+}^{0+}$
and obviously by (ii) $O$ can enter (or exit) $\Omegaega_{0+}^{0+}$ only through
$\overline{\mathsf{p}_0\mathsf{q}_1\mathsf{q}^1}$ or $\overline{\mathsf{p}^0\mathsf{q}_1\mathsf{q}^1}$,
but by the preceding paragraph it intersects each at most once.
Since $\overline{\mathsf{q}_1 \mathsf{q}^1}$ lies on both these triangles,
it follows that any orbit $O$ meeting $\overline{\mathsf{q}_1 \mathsf{q}^1}$
intersects $\Omegaega_0^0$ at only one point.
If on the other hand $O$ misses $\overline{\mathsf{q}_1 \mathsf{q}^1}$,
then, by the transversality above,
it must intersect the interior of $\Omegaega_0^0$,
so in this case it must cross $\overline{\mathsf{p}_0\mathsf{q}_1\mathsf{q}^1} \cup \overline{\mathsf{p}^0\mathsf{q}_1\mathsf{q}^1}$ at least twice,
meaning, by the above, that in fact $O$ must intersect each of these triangles exactly once.
This completes the proof of (iii), (iv), and (v).
For (vi) set $\Pi:=\Pi_0^0$.
Since the quadrilateral $Q_0^0$ is itself a closed curve missing
$C_{\pi/2}^{\pi/2}=\Pi^{-1}\left(C_{\pi/2}^{\pi/2}\right)$,
its image $Q':=\Pi\left(Q_0^0\right)$ under $\Pi$
is likewise a closed curve missing $C_{\pi/2}^{\pi/2}$.
By item (iv) (and the embeddedness of $Q_0^0$)
it follows that $Q'$ is an embedded closed curve in the interior of $C_{\pi/2}^{\pi/2} \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_0$,
so that $\left(C_{\pi/2}^{\pi/2} \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_0\right) \backslash Q'$
has two connected components, one the disc bounded by $Q'$
and the other the annulus bounded by $Q'$ and $C_{\pi/2}^{\pi/2}$.
Call the closure of the disc $D'$.
Since the hemisphere
$\Pi^{-1}\left(\overline{\mathsf{p}_0 \mathsf{p}_{\pi/2}}\right)=C_0^0 \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_{\pi/2} \subset \Sigma^0$
intersects $Q_0^0$ only at $\mathsf{q}_1$,
we see that the geodesic arc $\overline{\mathsf{p}_0 \mathsf{p}_{\pi/2}}$
intersects $Q'$ exactly once (at $\mathsf{q}_1$),
and so we conclude that $\mathsf{p}_0 \in D'$.
A second application of item (iv)
ensures that $\Pi\left(\Omegaega_0^0 \backslash Q_0^0\right)$
misses $Q'$,
but $\Omegaega_0^0 \backslash Q_0^0$ is connected and includes $\mathsf{p}_0$,
so we have $\Pi\left(\Omegaega_0^0\right) \subset D'$.
Last, note that $D'':=\overline{\mathsf{q}_0\mathsf{q}_1\mathsf{q}^0} \cup \overline{\mathsf{q}_0\mathsf{q}_1\mathsf{q}^1}$
is a disc in $\Omegaega_0^0$ whose boundary is $Q_0^0$ and thereby mapped by $\Pi$
homeomorphically onto $Q'=\partial D'$.
It follows (by degree theory) that $\Pi(D'')=D'$,
and so of course $\Pi(\Omegaega_0^0)=D'$ as well.
\end{proof}
\section{The Lawson surfaces}
\subsection*{Definition, uniqueness, and symmetries}
$\phantom{ab}$
\nopagebreak
Note that the surfaces we define below are the surfaces called $\xi_{m-1,1}$ in \cite{Lawson}.
Recall that these surfaces can be viewed as desingularizations of two orthogonal great two-spheres.
In this article we do not consider any other Lawson surfaces and when we refer to Lawson surfaces we mean these surfaces only.
The surfaces defined in the next theorem are positioned so that they can be viewed as desingularizations of $\Sigma^{\pi/4}\cup \Sigma^{ - \pi/4}$ along $C$.
Note also that we restrict our attention to the case $m\ge3$ because the surfaces produced
otherwise are the great sphere ($m=1$) and the Clifford torus ($m=2$).
\begin{theorem}[Lawson 1970 \cite{Lawson}]
\label{T:lawson}
Given an integer $m\ge3$
there is a unique compact connected
minimal surface $D_i^j \subset \Omega_i^j$ with
$\partial D_i^j = Q_i^j$ (recall \eqref{Om} and \eqref{D:Q}).
Moreover $D_i^j$ is a disc, minimizing area among such discs,
and
$$
M=M[C,m] :=\bigcup_{i+j\in2\mathbb{Z}} D_i^j
$$
is an embedded connected closed (so two-sided) smooth minimal surface of genus $m-1$.
\end{theorem}
\begin{proof}
The theorem except for the uniqueness part but including
the existence of a minimizing disc $D_i^j$ is proved in \cite{Lawson}.
Although the uniqueness is also claimed in \cite{Lawson},
the subsequent literature
(for example \cite{choe:soret:2009}) does not assume uniqueness known.
We provide now a simple proof of uniqueness.
Suppose ${D'}_i^j$ is another connected minimal surface in $\Omega_i^j$ with boundary $Q_i^j$.
By \ref{L-alex} $\mathsf{R}^t_{C_{i\pi/m+\pi/2}^{j\pi/2+\pi/2} } D_i^j$ cannot intersect
${D'}_i^j$ for any $t\in(-\pi,0)\cup(0,\pi)$ because
otherwise we can consider the $\sup$ or $\inf$ of such $t$'s which we call $t'$.
For $t'$ then we would have tangential contact on one side in the interior.
By the maximum principle
\cite{schoen1983}*{Lemma 1}
this would imply
equality of the surfaces and the boundaries, a contradiction.
By \ref{L-alex}
the orbits which are close enough to $Q_i^j\setminus{{Q\mspace{.8mu}\!\!\!\!{/}\,}}_i^j$ and intersect $\Omega_i^j$
also intersect $D_i^j$ and ${D'}_i^j$.
Since there are no intersections for $t\ne0$ above, we conclude that
$D_i^j$ and ${D'}_i^j$ agree on a neighborhood of $Q_i^j\setminus{{Q\mspace{.8mu}\!\!\!\!{/}\,}}_i^j$ and therefore by analytic continuation they are identical.
\end{proof}
\begin{corollary}[Symmetries of the Lawson discs]
\label{Dsym0}
$\forall i,j\in\mathbb{Z}$ $D_i^j$ inherits the symmetries of $\Omega_i^j$: it is preserved as a set by
${\underline{\mathsf{R}}}_{\Sigma_{i\pi/m}} = {\underline{\mathsf{R}}}_{\Sigma_{t_{i+\frac12}} } ={\underline{\mathsf{R}}}_{C^\perp,\mathsf{q}_{i+\frac12} }$,
${\underline{\mathsf{R}}}_{\Sigma^{j\pi/m}} = {\underline{\mathsf{R}}}_{\Sigma^{t^{j+\frac12}} } ={\underline{\mathsf{R}}}_{C,\mathsf{q}^{j+\frac12} }$,
and
the composition of those
${\underline{\mathsf{R}}}_{C_{i\pi/m}^{j\pi/2} }$.
Moreover it has no more symmetries.
\end{corollary}
\begin{proof}
That the symmetries of $\Omega_i^j$ are symmetries of $D_i^j$ follows from the uniqueness of $D_i^j$ discussed in \ref{T:lawson}.
Any symmetry of $D_i^j$ has to be a symmetry of its boundary and then of its vertices, and hence of $\Omega_i^j$ as well.
By \ref{Om:p}.iii this completes the proof.
\end{proof}
\begin{lemma}[Generating symmetries of the Lawson surfaces]
\label{Msym}
For $M=M[C,m]$ as in \ref{T:lawson} we have the following symmetries,
which generate $\mathscr{G}_{sym}^M$.
\\
(i)
$\forall i,j\in\mathbb{Z}$ we have
${\underline{\mathsf{R}}}_{\Sigma^{j\pi/2}}, {\underline{\mathsf{R}}}_{\Sigma_{i\pi/m}} \in \mathscr{G}_{sym}^M$.
Moreover
the collection of the great two-spheres of symmetry of $M$ is
$\{\Sigma^{ j\pi /2 }\}_{j\in\mathbb{Z}}
\cup
\{\Sigma_{ i\pi /m }\}_{i\in\mathbb{Z}} $ and contains $m+2$ spheres.
\\
(ii)
$\forall i,j\in\mathbb{Z}$ we have
${\underline{\mathsf{R}}}_{C_{ (2i- 1)\frac\pi{2m} }^{ (2j- 1)\frac\pi4 } }
= {\underline{\mathsf{R}}}_{\mathsf{q}_{i}, \mathsf{q}^{j} }
\in \mathscr{G}_{sym}^M$.
Moreover
the collection of great circles contained in $M$ is
$\left\{ C_{ (2i- 1)\frac\pi{2m} }^{ (2j- 1)\frac\pi4 \,}
= \mathbb{S} (\mathsf{q}_{i}, \mathsf{q}^{j} )
\right\}_{i,j\in\mathbb{Z}}$
and contains $2m$ great circles.
Furthermore if $\nu:M\to\mathbb{S}^3$ is a unit normal smoothly chosen on $M$, then
$\nu$ is even under the symmetries in (i)
(that is for such a symmetry ${\underline{\mathsf{R}}}$ we have $\nu\circ{\underline{\mathsf{R}}} = {\underline{\mathsf{R}}}_* \circ \nu$)
and odd under the symmetries in (ii)
(that is for such a symmetry ${\underline{\mathsf{R}}}$ we have $\nu\circ{\underline{\mathsf{R}}} = - {\underline{\mathsf{R}}}_* \circ \nu$).
\end{lemma}
\begin{proof}
Set $\mathbf{Q}:=\left\{Q_i^j\right\}_{i+j \in 2\mathbb{Z}}$
and $\mathbf{\Omegaega}:=\left\{\Omegaega_i^j\right\}_{i+j \in 2\mathbb{Z}}$.
It is easy to see (keeping in mind that $m>2$)
that an element of $O(4)$ permutes $\mathbf{Q}$
if and only if it permutes $\mathbf{\Omegaega}$.
By the uniqueness assertion of Theorem \ref{T:lawson}
any element of $O(4)$ permuting $\mathbf{Q}$
is then a symmetry of $M$.
Conversely,
since $M$ is disjoint from the interior of every $\Omegaega_i^j$ with $i+j \in 2\mathbb{Z}+1$,
every element of $\mathscr{G}_{sym}^M$
must permute $\mathbf{Q}$.
Now write $\mathscr{G}$ for the subgroup of $O(4)$
generated by all the orthogonal transformations named in the statement of the lemma.
It is immediately verified from definitions
\ref{D:refl}, \ref{l:D:rot}, \ref{hemispheres}, \ref{circles}, \ref{Om}, and \ref{D:Q}
that every element of $\mathscr{G}$ indeed permutes
$\mathbf{Q}$,
confirming that $\mathscr{G} \subseteq \mathscr{G}_{sym}^M$.
In fact it is clear that $\mathscr{G}$ acts transitively on $\mathbf{\Omegaega}$,
so in order to show that $\mathscr{G}_{sym}^M \subseteq \mathscr{G}$
it suffices to show that any orthogonal transformation
preserving $\Omegaega_0^0$ as a set belongs to $\mathscr{G}$,
but this is evident from \ref{Omsym}.
Thus $\mathscr{G}_{sym}^M=\mathscr{G}$.
The counts of the spheres and circles named in (i) and (ii)
are obvious from \eqref{hemispheres} and \eqref{circles} alone.
It is also obvious from the definitions that every circle
in item (ii) of the lemma
indeed lies on $M$,
and furthermore for this very reason
reflection through such a circle
must reverse $\nu$.
Note that for each $j \in \mathbb{Z}$ the symmetry ${\underline{\mathsf{R}}}_{\Sigma^{j\pi/2}}$
fixes $C$ pointwise.
In particular ${\underline{\mathsf{R}}}_{\Sigma^{j\pi/2}}$
fixes the point $\mathsf{q}_1 \in C \cap M$,
but $\mathsf{q}_1$
lies on each of the circles of symmetry
$C_{\pi/2m}^{\pi/4}$ and $C_{\pi/2m}^{-\pi/4}$
orthogonally intersecting $C$ there,
and so $\nu(\mathsf{q}_1)$ points along $C$
and is thereby preserved by ${\underline{\mathsf{R}}}_{\Sigma^{j\pi/2}}$.
A similar argument shows that $\nu$ is likewise preserved by every ${\underline{\mathsf{R}}}_{\Sigma_{i\pi/m}}$
with $i \in \mathbb{Z}$.
The only assertions left to prove are
that $M$ is invariant under no spheres of symmetry
other than those enumerated in (i)
and that $M$ contains no circles of symmetry other than those enumerated in (ii)
(since the reflection principle \cite{Lawson}*{Proposition 3.1}
then ensures that $M$ contains no other great circles at all).
Accordingly suppose that $S$ is such a sphere or circle of symmetry.
so that ${\underline{\mathsf{R}}}_S \in \mathscr{G}_{sym}^M$.
As explained above,
${\underline{\mathsf{R}}}_S$ therefore permutes $\mathbf{Q}$,
but because $m>2$, this requires in particular that ${\underline{\mathsf{R}}}_S$ preserve $C$ (and so $C^\perp$ too) as a set.
If $S$ is a great sphere, it must consequently intersect either $C$ or $C^\perp$ orthogonally
(containing the other), but to permute $\mathbf{Q}$ it can then be only one of the spheres listed in (i)
(a sphere bisecting some $\Omegaega_i^j$, since reflection through a sphere containing a face of an $\Omegaega_i^j$
takes the corresponding $Q_i^j$ to a quadrilateral outside of $\mathbf{Q}$).
If instead $S$ is a great circle, in order to preserve $C$ as a set it
must (a) coincide with $C$, (b) coincide with $C^\perp$, or (c) intersect $C$ (and so also $C^\perp$) orthogonally.
Clearly neither $C$ nor $C^\perp$ is contained in $M$, since, for example, neither
$\overline{\mathsf{q}_0 \mathsf{q}_1}$ nor $\overline{\mathsf{q}^0 \mathsf{q}^1}$ is contained in $\partial D_0^0$.
In case (c), in order to permute $\mathbf{Q}$, $S$ can be only one of the circles
listed in (ii) (a circle containing an edge of a quadrilateral in $\mathbf{Q}$)
or one of the circles of intersection of a pair of spheres of symmetry
(a circle bisecting the edges on $C$ and $C^\perp$ of some $Q_i^j$, not necessarily having $i+j$ even),
but none of these latter circles is contained in $M$,
since, for example, for all $i,j \in \mathbb{Z}$ $\mathsf{q}_{i+\frac12} \not \in \partial D_i^j$.
\end{proof}
\begin{cor}[Umbilics on the Lawson surfaces]
\label{umb}
For $M=M[C,m]$ as in \ref{T:lawson} we have only four umbilics, $\mathsf{q}^1$, $\mathsf{q}^2$, $\mathsf{q}^3$, and $\mathsf{q}^4$,
of degree (as in \cite{Lawson}) $m-2$ each.
\end{cor}
\begin{proof}
By the symmetries it is clear that each of these point is an umbilic of degree $m-2$.
By a result of Lawson \cite{Lawson}*{Proposition 1.5}
the total degree of the umbilics is $4g-4=4m-8$ and so there can be no other.
\end{proof}
\begin{cor}[The unit normal on the geodesic segments $\overline{ \mathsf{q}_i\mathsf{q}^1 } $]
\label{Lnui}
By appropriate choice of the unit normal
$\nu:M\to\mathbb{S}^3$ smoothly defined on $M$ we have $\forall i\in\mathbb{Z}$
$$
\nu\left( \, \overline{\mathsf{q}_{i}\mathsf{q}^1} \, \right) =
\overline{ \, \mathsf{q}_{i - (-1)^i\frac{m}2} \, \mathsf{q}^0 \, } =
\overline{ \, \mathsf{p}_{\frac{i}{m}\pi -(-1)^i\frac\pi2 - \frac{\pi}{2m} } \, \mathsf{p}^{-\pi/4} \, }.
$$
\end{cor}
\begin{proof}
A unit vector normal to a great circle $C' \subset \mathbb{S}^3$
lies on the circle $C'^\perp$.
By the symmetries (Lemma \ref{Msym})
the unit normal $\nu$ on $M \cap C$ must point along $C$,
while on $M \cap C^\perp$ it must point along $C^\perp$.
Thus $\nu(\mathsf{q}_1)=\pm \mathsf{q}_{1+m/2}$ and $\nu(\mathsf{q}^1)=\pm \mathsf{q}^0$.
Assume that
$\nu(\mathsf{q}_1)=\mathsf{q}_{1+m/2}$.
Using Lemma \ref{Msym} again,
it suffices to complete the proof for $i=1$.
Since $M$ is disjoint from the interior of $\Omegaega_1^0$ (and $\Omegaega_0^1$),
we conclude that along all of $\overline{\mathsf{q}_1\mathsf{q}^1}$
the normal $\nu$ cannot cross either $\Sigma_{\pi/2m}$ or $\Sigma^{\pi/4}$
and more specifically, by our choice of $\nu(\mathsf{q}_1)$, must point into $\Omegaega_1^0$.
It follows that $\nu(\mathsf{q}^1)=\mathsf{q}^0$
and $\nu(x) \cdot \nu(\mathsf{q}_1) \geq 0$ and $\nu(x) \cdot \nu(\mathsf{q}^1) \geq 0$
for all $x \in \overline{\mathsf{q}_1\mathsf{q}^1}$,
completing the proof.
\end{proof}
\begin{comment}
\begin{lemma}[Further symmetries of the Lawson surfaces]
\label{Msym2old}
For $M=M[C,m]$ as in \ref{T:lawson} we have the following symmetries.
\\
(i)
$C$ is a great circle of symmetry and
${\underline{\mathsf{R}}}_C={\underline{\mathsf{R}}}_{\Sigma^{ 0 } } \circ {\underline{\mathsf{R}}}_{\Sigma^{ \pi /2 } } \in \mathscr{G}_{sym}^M$.
$C^\perp$ is a great circle of symmetry
if and only if $m$ is even,
in which case
${\underline{\mathsf{R}}}_{C^\perp} = {\underline{\mathsf{R}}}_{\Sigma_{ 0 } } \circ {\underline{\mathsf{R}}}_{\Sigma_{ \pi /2 } } \in \mathscr{G}_{sym}^M$.
Finally for $i,j\in\mathbb{Z}$ we have
$ {\underline{\mathsf{R}}}_{C^{j\pi/2}_{i\pi/m}} =
{\underline{\mathsf{R}}}_{\Sigma^{j\pi/2}} \circ {\underline{\mathsf{R}}}_{\Sigma_{i\pi/m}} \in \mathscr{G}_{sym}^M$
and
$\left\{ C_{i\pi/m}^{j\pi/2} \right\}_{i,j\in\mathbb{Z}}$
is the collection of the $2m$ great circles of symmetry of $M$ not contained in $M$ and other than $C,C^\perp$.
\\
(ii)
$\forall i\in\mathbb{Z}$,
${\underline{\mathsf{R}}}_{\mathsf{p}_{ (2i+ 1)\frac\pi{2m} } \,} \circ \mathsf{R}_C^{\pi/2}
=
{\underline{\mathsf{R}}}_C \circ {\underline{\mathsf{R}}}_{\Sigma^{\pi/2}} \circ {\underline{\mathsf{R}}}_{C_{(2i+1)\frac{\pi}{2m}}^{\pi/4}}
\in \mathscr{G}_{sym}^M$.
\end{lemma}
\begin{proof}
The claim in the final sentence of (i) that the circles listed there
exhaust all circles of symmetry not lying on $M$
follows from the final paragraph of the proof of Lemma \ref{Msym}.
All other assertions are easily proven using Lemma \ref{Msym} itself
(and the group structure of $O(4)$) as follows.
Clearly, ${\underline{\mathsf{R}}}_{\Sigma'} \circ {\underline{\mathsf{R}}}_{\Sigma''}={\underline{\mathsf{R}}}_{\Sigma' \cap \Sigma''}$
for any two great spheres intersecting orthogonally.
On the other hand, according to Lemma \ref{Msym},
the great $2$-spheres of symmetry of $M$ are precisely the spheres $\Sigma_{i\pi/m}$ and $\Sigma^{j\pi/2}$
for $i,j \in \mathbb{Z}$, so in particular $\Sigma_{\pi/2}$ is a sphere of symmetry precisely when $m$ is even.
Together, the preceding two sentences complete the proof of (i).
The proof of the rest will appear elsewhere because this lemma is not used in this article.
\end{proof}
\end{comment}
Although it is not needed in this article,
we include the following lemma to offer a fuller picture of the symmetry group.
\begin{lemma}[Further symmetries of the Lawson surfaces]
\label{Msym2}
For $M=M[C,m]$ as in \ref{T:lawson} we have the following symmetries.
\\
(i) A great circle $C' \not \subset M$ is a circle of symmetry for $M$
if and only if (a) $C'=C$, (b) $C'$ is one of the $2m$ circles
$C_{i\pi/m}^{j\pi/2}$ having $i,j \in \mathbb{Z}$,
or (c) $m$ is even and $C'=C^\perp$.
Each such ${\underline{\mathsf{R}}}_{C'}$ preserves $\nu$.
\\
(ii) The antipodal map ${\underline{\mathsf{R}}}_{\emptyset}$ belongs to $\mathscr{G}_{sym}^M$ if and only if $m$ is even,
in which case ${\underline{\mathsf{R}}}_{\emptyset}$ preserves $\nu$.
\\
(iii) A point $x \in \mathbb{S}^3$ is a point of symmetry for $M$
(${\underline{\mathsf{R}}}_x \in \mathscr{G}_{sym}^M$) if and only if
(a) $x$ is one of the $2m$ points $\mathsf{p}_{i\pi/m}$ with $i \in \mathbb{Z}$
or (b) $x$ is one of the $4$ points $\mathsf{p}^{j\pi/4}$ with $j \in 2\mathbb{Z}+m$.
Each such ${\underline{\mathsf{R}}}_x$ reverses $\nu$.
\\
(iv) For every $i \in \mathbb{Z}$ the map
${\underline{\mathsf{R}}}_{\mathsf{q}_i} \circ \mathsf{R}_C^{\pi/2}
\in \mathscr{G}_{sym}^M$ and reverses $\nu$.
\end{lemma}
\begin{proof}
It is easy to see that for any $i \in \mathbb{Z}$
both ${\underline{\mathsf{R}}}_{\mathsf{q}_i}$ and $\mathsf{R}_C^{\pi/2}$
exchange the sets $\left\{\Omegaega_i^j\right\}_{i+j \in 2\mathbb{Z}}$
and $\left\{\Omegaega_i^j\right\}_{i+j \in 2\mathbb{Z}+1}$,
so that the composite acts as a permutation on each of these sets
and therefore (as explained in the proof of Lemma \ref{Msym})
belongs to $\mathscr{G}_{sym}^M$;
it is also easy to see that the composite reverses the normal at $\mathsf{q}_i$,
completing the proof of (iv).
Item (ii) follows from (i),
since ${\underline{\mathsf{R}}}_{\emptyset}={\underline{\mathsf{R}}}_C{\underline{\mathsf{R}}}_{C^\perp}$.
The fact that the circles listed in (i)
exhaust all circles of symmetry not lying on $M$
follows from the final paragraph of the proof of Lemma \ref{Msym}.
The rest of (i) is easily proven using Lemma \ref{Msym} itself
(and the group structure of $O(4)$) as follows.
Clearly ${\underline{\mathsf{R}}}_{\Sigma'} \circ {\underline{\mathsf{R}}}_{\Sigma''}={\underline{\mathsf{R}}}_{\Sigma' \cap \Sigma''}$
for any two great spheres $\Sigma'$ and $\Sigma''$ intersecting orthogonally.
On the other hand, according to Lemma \ref{Msym},
the great $2$-spheres of symmetry of $M$ are precisely the spheres $\Sigma_{i\pi/m}$ and $\Sigma^{j\pi/2}$
for $i,j \in \mathbb{Z}$, so in particular $\Sigma_{\pi/2}$ is a sphere of symmetry precisely when $m$ is even.
Together, the preceding two sentences complete the proof of (i).
To prove (iii)
first note that for any point $x \in \mathbb{S}^3$
the set $x^\perp$
is the round $2$-sphere centered at $\pm x$,
and
moreover
${\underline{\mathsf{R}}}_x={\underline{\mathsf{R}}}_{-x}=-{\underline{\mathsf{R}}}_{x^\perp}={\underline{\mathsf{R}}}_{\Sigma_x} \circ {\underline{\mathsf{R}}}_{C_x}$,
where $\Sigma_x$ is any great sphere through $\pm x$ and $C_x$ is any
great circle orthogonally intersecting $\Sigma_x$ at $\pm x$.
In particular ${\underline{\mathsf{R}}}_x \in \mathscr{G}_{sym}^M$ precisely when ${\underline{\mathsf{R}}}_{x^\perp}$
takes $M$ to $-M$.
Since $-D_i^j=D_{i+m}^{j+2}$,
we have $-M=\bigcup_{i+j \equiv m \bmod 2} D_i^j$.
It is clear from Lemma \ref{Msym} that $\mathscr{G}_{sym}^M$ preserves $C$ as a set
(since each generator obviously does so). Thus in order for $x$ to be a point
of symmetry of $M$ (whatever the parity of $m$) $x$ must lie on either $C$ or $C^\perp$.
Since $C$ is itself a great circle of symmetry,
any point of symmetry lying on $C$ must also lie on a sphere of symmetry intersecting $C$ orthogonally.
Thus the set of points of symmetry lying on $C$
is simply $\left\{\mathsf{p}_{i\pi/m}\right\}_{i \in \mathbb{Z}}$.
To identify the points of symmetry on $C^\perp$
we observe by Lemma \ref{Msym}
that $\mathscr{G}_{sym}^M$ preserves the set $\{\mathsf{q}^j\}_{j \in \mathbb{Z}}$,
which means that a point of symmetry on $C^\perp$ must lie in
$\{\mathsf{q}^j\}_{j \in \frac12 \mathbb{Z}}=\{\mathsf{p}^{j\pi/4}\}_{j \in \mathbb{Z}}$.
It is easy to see that ${\underline{\mathsf{R}}}_{\Sigma^{j \pi/4+\pi/2}}$
takes $M$ to $-M$ precisely when $j-m \in 2\mathbb{Z}$,
which completes the proof.
\end{proof}
\subsection*{Graphical properties}
$\phantom{ab}$
\nopagebreak
\begin{lemma}[Graphical property and subdivisions of $D_i^j$]
\label{Dsym}
$\forall i,j\in\mathbb{Z}$ the following hold.
\\
(i)
$D_i^j$ is graphical---with its interior strongly graphical---with respect to
$K^{C_{i\pi/m}^{j\pi/2} } = K_{C_{i\pi/m+\pi/2}^{j\pi/2+\pi/2} }$ (recall \ref{graphical})
and each orbit which intersects $\Omega_i^j$ intersects $D_i^j$ as well.
\\
(ii)
Each of $D_{i\pm}^{j} := D_i^j \cap \Omega_{i\pm}^{j}$, $D_{i}^{j\pm} := D_i^j \cap \Omega_{i}^{j\pm}$, and $D_{i\pm}^{j\pm} := D_i^j \cap \Omega_{i\pm}^{j\pm}$,
is homeomorphic to a closed disc.
\end{lemma}
\begin{proof}
To prove (i) we first prove that $D_i^j$ is graphical.
This follows by the same argument
as in the second paragraph of the proof of \ref{T:lawson} but with ${D'}_i^j$ replaced by $D_i^j$.
Consider now the Jacobi field $\nu\cdot K_{C_{i\pi/m+\pi/2}^{j\pi/2+\pi/2} }$,
which clearly by the graphical property and appropriate choice of $\nu$ is $\ge0$ on $D_i^j$ and hence by the maximum principle is $>0$ on the interior of $D_i^j$.
This implies that the interior of $D_i^j$ is strongly graphical.
Next we
recall the projection map
\begin{equation}
\label{Piij}
\Pi_i^j:= \Pi^{C_{i\pi/m}^{j\pi/2}}_{\mathsf{p}_{i\pi/m} }
: \Omega_i^j \to C_{i\pi/m+\pi/2}^{j\pi/2+\pi/2} \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_{i\pi/m}
\end{equation}
defined in \ref{L-alex}.vi.
Let $D':=\Pi_i^j(\Omega_i^j)$,
which by \ref{L-alex}.vi is homeomorphic to a closed disc with $\partial D'= \Pi_i^j(Q_i^j)$.
Clearly then $\Pi_i^j( D_i^j )\subset D'$.
Since $\partial D_i^j=Q_i^j$ we have also
$\Pi_i^j( \partial D_i^j ) = \partial D'$,
and therefore
$\Pi_i^j( D_i^j ) = D'$,
which completes the proof of (i).
Furthermore, as shown above, $D_i^j$ is graphical with respect to $K^{C_{i\pi/m}^{j\pi/2}}$,
so the restriction $\Pi_i^j|_{D_i^j}$ is one-to-one.
We conclude that $\Pi_i^j$ takes $D_i^j$ homeomorphically onto
$D'$.
The proof of (ii) is then completed by the fact that $\Pi_i^j$ clearly respects the symmetries of $\Omegaega_i^j$.
\end{proof}
By the definitions when $i+j\in 2\mathbb{Z}$ we have
$M \cap \Omega_{i\pm}^{j\pm} = D_{i\pm}^{j\pm}$;
otherwise we have
$M \cap \Omega_{i\pm}^{j\pm} = \emptyset$.
By \ref{Dsym}
each $D_{i\pm}^{j\pm}$ is an embedded minimal disc.
To study $\partial D_{i\pm}^{j\pm}$ and the intersections with two-spheres of symmetry we define
the intersections of $D_i^j$ and $D_{i\pm}^{j\pm}$ with the bisecting two-spheres as follows.
\begin{equation}
\label{ab}
\begin{aligned}
\alpha_i^{j\pm} :=& \,
D_i^j \cap \overline{ \, \mathsf{q}_{ i+\frac12 } \mathsf{q}^{j+\frac12} \mathsf{q}^{j+\frac12\pm\frac12} \, } =
D_i^{j\pm} \cap \Sigma_{ i \frac\pi{m} } ,
\\
\alpha_i^{j} :=& \,
D_i^j \cap \overline{ \, \mathsf{q}_{ i+\frac12 } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } =
D_i^{j} \cap \Sigma_{ i \frac\pi{m} } =
\alpha_i^{j - } \cup \alpha_i^{j + } ,
\\
\beta_{i\pm}^j :=& \,
D_i^j \cap \overline{ \, \mathsf{q}_{i+\frac12} \mathsf{q}_{i+\frac12\pm\frac12} \mathsf{q}^{ j+\frac12 } \, } =
D_{i\pm}^j \cap \Sigma^{ j \frac\pi{2} } ,
\\
\beta_{i}^j :=& \,
D_i^j \cap \overline{ \, \mathsf{q}_{ i } \mathsf{q}_{ i+1 } \mathsf{q}^{ j+\frac12 } \, } =
D_{i}^j \cap \Sigma^{ j \frac\pi{2} } =
\beta_{i - }^j \cup \beta_{i + }^j .
\end{aligned}
\end{equation}
\begin{lemma}[The $\alpha$ and $\beta$ curves]
\label{Dpm}
$\forall i,j\in\mathbb{Z}$ the following hold.
\\
(i)
$D_i^j$ intersects $\overline{ \mathsf{p}_{ i\frac\pi m } \mathsf{p}^{ j\frac\pi 2 } } =
\overline{ \mathsf{q}_{ i+\frac12 } \mathsf{q}^{ j+\frac12 } }
$
at a single point which we will call $\mathsf{x}_i^j$.
\\
(ii)
The sets $\alpha_i^{j - }$, $\alpha_i^{j + }$, $\beta_{i - }^j$, $\beta_{i + }^j$, $\alpha_i^{j}$, and $\beta_{i}^j$
are connected curves with
$\partial \alpha_i^{j - } = \{ \mathsf{q}^{ j } , \mathsf{x}_i^j \}$,
$\partial \alpha_i^{j + } = \{ \mathsf{q}^{ j+ 1} , \mathsf{x}_i^j \}$,
$\partial \beta_{i - }^j = \{ \mathsf{q}_{ i } , \mathsf{x}_i^j \}$,
$\partial \beta_{i + }^j = \{ \mathsf{q}_{ i+ 1 } , \mathsf{x}_i^j \}$,
$\partial \alpha_i^{j} = \{ \mathsf{q}^j, \mathsf{q}^{j+1 } \}$,
and
$\partial \beta_{i}^j = \{ \mathsf{q}_i, \mathsf{q}_{i+1 } \}$.
\\
(iii)
$\partial D_{i\pm}^{j\pm}
\, = \, \overline{ \mathsf{p}_{ (2i \pm 1)\frac\pi{2m} } \, \mathsf{p}^{ (2j \pm 1)\frac\pi4 } }
\cup
\alpha_i^{j\pm}
\cup
\beta_{i\pm}^j
\, = \, \overline{ \mathsf{q}_{ i + \frac12 \pm \frac12 } \, \mathsf{q}^{ j + \frac12 \pm \frac12 } }
\cup
\alpha_i^{j\pm}
\cup
\beta_{i\pm}^j.$
\end{lemma}
\begin{proof}
As in the previous proof we consider $\Pi_i^j$,
which is a homeomorphism from $D_i^j$ onto $D'$ and moreover respects the symmetries of $\Omega_i^j$.
Using the various definitions it is then straightforward to complete the proof.
\end{proof}
\begin{lemma}[Graphical properties of $D_{i\pm}^{j\pm}$]
\label{Dsym2}
$\forall i,j\in\mathbb{Z}$ the following hold.
\\
(i)
The interior of $D_i^j$ is contained in the interior of $\Omega_i^j$ and the conormal of $D_i^j$ at a point of $Q_i^j\setminus {{Q\mspace{.8mu}\!\!\!\!{/}\,}}_i^j$
is transverse to each face of $\Omega_i^j$ containing the point.
\\
(ii)
$D_{i\pm}^{j}$ (as in \ref{Dsym}(ii)) is graphical with respect to $K_{C^\perp}$ and strongly graphical in its interior.
\\
(iii)
$D_{i}^{j\pm}$ (as in \ref{Dsym}(ii)) is graphical with respect to $K_{C}$ and strongly graphical in its interior.
\end{lemma}
\begin{proof}
(i) follows easily by the maximum principle
\cite{schoen1983}*{Lemma 1}.
The proofs of (ii) and (iii) are based on Alexandrov reflection in the style of \cite{schoen1983}.
Clearly $ \Pi^C_{\mathsf{q}_i } \Omega_i^j = \overline{ \, \mathsf{q}_{ i } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } $
by \ref{Pi} and \ref{Om:p}.
For $t\in[0,\pi/m]$ we define (recall \ref{qpoints})
$$
\mathsf{q}_{i,t} := \mathsf{p}_{t_i+t},
\qquad
D_{i,t}^j :=D_i^j \cap \overline{ \, \mathsf{q}_{ i } \mathsf{q}_{i,t} \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } ,
\qquad \text{ and } \qquad
D_{i:t}^j :=D_i^j \cap \overline{ \, \mathsf{q}_{ i+1 } \mathsf{q}_{i,t} \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, }.
$$
We clearly have then $D_i^j = D_{i,t}^j \cup D_{i:t}^j$ and $D_{i,t}^j \cap D_{i:t}^j = D_i^j \cap \overline{ \, \mathsf{q}_{ i,t } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } $.
Clearly
$\Omega_i^j\setminus D_i^j$ consists of two connected components,
which in this proof we call $U_1$ and $U_2$,
where
$U_1$ is chosen to be the component which contains the interior of
$ \overline{ \, \mathsf{q}_{ i } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } $.
We define
$$
T:= \{ t\in (0,\pi/m) : U_1 \cap {\underline{\mathsf{R}}}_{ C^\perp , \mathsf{q}_{i,t} } D_{i,t}^j \ne \emptyset \} .
$$
(i) implies that $D_{i,t}^j$ is graphical for $t$ small enough,
and therefore
$t\notin T$ for $t$ small enough.
We conclude that $t':=\inf T>0$.
If $t'<\frac\pi{2m}$, then by the definition of $t'$,
${\underline{\mathsf{R}}}_{ C^\perp , \mathsf{q}_{i,t'} } D_{i,t'}^j$
and
$D_{i:t'}^j$ have a point of one-sided interior or boundary tangential contact.
By the maximum principle
\cite{schoen1983}*{Lemma 1} and analytic continuation this implies that
${\underline{\mathsf{R}}}_{ C^\perp , \mathsf{q}_{i,t'} } D_{i,t'}^j$
and
$D_{i:t'}^j$ are identical contradicting the symmetries of $Q_i^j$ (alternatively \ref{Msym}).
We conclude that $t'\ge\frac\pi{2m}$ and hence
$$
U_1 \cap {\underline{\mathsf{R}}}_{ C^\perp , \mathsf{q}_{i,t} } D_{i,t}^j = \emptyset
\qquad \text{ for }
t\in (0, \pi/2m ).
$$
Using this we prove now that
$D_{i-}^{j}$
is graphical with respect to $K_{C^\perp}$:
Otherwise there would be an orbit which would contain two points $\mathsf{y}_1\ne\mathsf{y}_2$ with
$\mathsf{y}_i\in D_i^j \cap \overline{ \, \mathsf{q}_{ i,t_i } \mathsf{q}^{ j } \mathsf{q}^{ j+1 } \, } $
for $i=1,2$, where $0< t_1<t_2\le \frac\pi{2m} $.
$\mathsf{y}_2$ is then a point of interior one-sided tangential contact of
${\underline{\mathsf{R}}}_{ C^\perp , \mathsf{q}_{i,t_*} } D_{i,t_*}^j$
and $D_i^j$, where $t_*=\frac{t_1+t_2}2\in (0,\frac\pi{2m} )$.
This implies that
${\underline{\mathsf{R}}}_{ C^\perp , \mathsf{q}_{i,t_*} } $
is a symmetry of $D_i^j$, and hence of $\partial D_i^j = Q_i^j$, which is a contradiction.
To prove that it is strongly graphical in the interior we argue as in the proof of \ref{Dsym}.
By symmetry we conclude the statement for
$D_{i+}^{j}$.
This completes the proof of (ii).
The proof of (iii) is similar with the roles of $C$ and $C^\perp$ exchanged.
\end{proof}
We define $[m:2]:= 0$ if $m\in2\mathbb{Z}$ and $[m:2]:= 1$ otherwise.
\begin{lemma}[Some intersections of $M$ with great two-spheres]
\label{SigmaM}
We have the following.
\newline
(i)
$
M\cap \mathbb{S} (C^\perp,\mathsf{q}_i)
=
M\cap \Sigma_{(2i- 1)\frac\pi{2m} \,}
=
\bigcup_{j\in\mathbb{Z}}
\mathbb{S}(\mathsf{q}_i,\mathsf{q}^j)
=
\bigcup_{j\in\mathbb{Z}}
C_{ (2i- 1)\frac\pi{2m} }^{ (2j- 1)\frac\pi4 \,}$
$\forall i \in \mathbb{Z}$.
\\
(ii)
$
M\cap \mathbb{S} (C^\perp,\mathsf{q}_{i+\frac12} )
=
M\cap \Sigma_{i\frac\pi{m} \,} = \bigcup_{j\in2\mathbb{Z}-i} \alpha_i^j \cup \alpha_{i+m}^{j+[m:2]} $
$\forall i \in \mathbb{Z}$.
\newline
(iii)
$
M\cap \mathbb{S} (C,\mathsf{q}^j)
=
M\cap \Sigma^{(2j- 1)\frac\pi{4} \,}
=
\bigcup_{i\in\mathbb{Z}}
\mathbb{S}(\mathsf{q}_i,\mathsf{q}^j)
=
\bigcup_{i\in\mathbb{Z}}
C_{ (2i- 1)\frac\pi{2m} }^{ (2j- 1)\frac\pi4 \phantom{{\frac12}^A}}$
$\forall j \in \mathbb{Z}$.
\\
(iv)
$
M\cap \mathbb{S} (C,\mathsf{q}^{j+\frac12})
=
M\cap \Sigma^{j\frac\pi{2} \,} = \bigcup_{i\in2\mathbb{Z}-j} \beta_i^j \cup \beta_{i}^{j+2}$
$\forall j \in \mathbb{Z}$.
\end{lemma}
\begin{proof}
That the circles are contained in the intersections in (i) and (iii) follows from the definition of $M$ in \ref{T:lawson}
and the reverse inclusions follow from \ref{Dsym2}.i
completing the proof of (i) and (iii).
By \ref{L:obs}.iv we have
$ \Sigma_{i\frac\pi{m} \,} = ( C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_{i\frac\pi{m}\,} ) \cup ( C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_{i\frac\pi{m}+\pi\,} )$
and
$\Sigma^{j\frac\pi{2} \,} = ( C \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}^{j\frac\pi{2} \,} ) \cup ( C \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}^{j\frac\pi{2}+\pi \,} )$.
By \ref{ab} and \ref{T:lawson} we have
$M\cap ( C^\perp \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}_{i\frac\pi{m}\,} )
=
\bigcup_{j\in2\mathbb{Z}-i}
\alpha_i^j $
and
$M\cap ( C \mbox{$\times \hspace*{-0.244cm} \times$} \mathsf{p}^{j\frac\pi{2} \,} )
=
\bigcup_{i\in2\mathbb{Z}-j}
\beta_i^j $ .
Using \ref{qpoints} we complete the proof of (ii) and (iv).
\end{proof}
\subsection*{Subdividing the Lawson surfaces with mutually orthogonal two-spheres}
$\phantom{ab}$
\nopagebreak
\begin{definition}
\label{Mpm}
For $M=M[C,m]$ as in \ref{T:lawson}
we define $M^{\pm\pm}_{\pm\pm}:= M \cap \Omegau^{\pm\pm}_{\pm\pm}$,
where instead of $\pm$ we could also have $*$
(recall \ref{Omu+-}).
For example
$M_{+-}^{-*} := M \cap \Omegau_{+-}^{-*}$.
\qed
\end{definition}
\begin{lemma}[Description of $M^{++}_{**}$]
\label{M++}
The following hold.
\\
(i)
$M^{\pm\pm}_{**}$ is homeomorphic to a closed disc and
$M^{++}_{**} = \cup_{i=0}^{m-1} ( D^{0+}_{2i} \cup D^{1-}_{2i+1} )$.
\\
(ii)
$\partial M^{++}_{**} = ( \Sigma^0\cap M^{++}_{**} ) \cup ( \Sigma^{\pi/2} \cap M^{++}_{**} ) $
and is homeomorphic to a circle.
\\
(iii)
$\Sigma^0\cap M^{++}_{**} = \cup_{i=0}^{m-1} \beta^{0}_{2i} $,
and so consists of $m$ connected components,
each homeomorphic to a closed interval.
\\
(iv)
$\Sigma^{\pi/2} \cap M^{++}_{**} = \cup_{i=0}^{m-1} \beta^{1}_{2i+1} $,
and so consists of $m$ connected components,
each homeomorphic to a closed interval.
\\
(v)
$\Sigma_0\cap M^{++}_{**}$ is homeomorphic to a closed interval and
$\Sigma_0\cap M^{++}_{**} = \left\{
\begin{aligned}
& \alpha_0^{0+} \cup \alpha_m^{0+} \text{ if } m\in 2\mathbb{Z},
\\
& \alpha_0^{0+} \cup \alpha_m^{1-} \text{ if } m\in 2\mathbb{Z}+1.
\end{aligned}
\right.
\qquad
$
\\
(vi)
$\Sigma_{\pi/2} \cap M^{++}_{**}$ is homeomorphic to a closed interval and
\\ $\phantom{kk}$
$\Sigma_{\pi/2} \cap M^{++}_{**} = \left\{
\begin{aligned}
& \alpha_{m/2}^{0+} \cup \alpha_{3m/2}^{0+} && \text{ if } m\in 4\mathbb{Z},
\\
& \overline{\mathsf{q}^1\mathsf{q}_{\frac{m+1}2}} \cup \overline{\mathsf{q}^1\mathsf{q}_{\frac{3m+1}2}} && \text{ if } m\in 2\mathbb{Z}+1,
\\
& \alpha_{m/2}^{1-} \cup \alpha_{3m/2}^{1-} && \text{ if } m\in 4\mathbb{Z}+2.
\end{aligned}
\right.
\qquad
$
\end{lemma}
\begin{proof}
All items follow easily from \ref{OmuOm}, \ref{T:lawson}, \ref{ab}, \ref{Dpm}, and \ref{Mpm}.
\end{proof}
\begin{lemma}[Description of $M^{++}_{++}$]
\label{M++++}
The following hold.
\\
(i)
$M^{++}_{++} = D_{0+}^{0+} \cup
\left\{
\begin{aligned}
& \cup_{i=1}^{\frac{m}{2}-1} D^{[i:2]\pm}_i \cup D_{\frac{m}2-}^{0+} && \qquad \text{ if } m\in 4\mathbb{Z}, \qquad
\\
& \cup_{i=1}^{\frac{m-1}{2}} D^{[i:2]\pm}_i && \qquad \text{ if } m\in 2\mathbb{Z}+1, \qquad
\\
& \cup_{i=1}^{\frac{m}{2}-1} D^{[i:2]\pm}_i \cup D_{\frac{m}{2}-}^{1-} && \qquad \text{ if } m\in 4\mathbb{Z}+2, \qquad
\end{aligned}
\right.
$
\\
where the $\pm$ signs are $+$ for $i$ even and $-$ for $i$ odd.
Therefore $M^{++}_{++}$ is homeomorphic to a closed disc.
\\
(ii)
$\partial M^{++}_{++}$ is homeomorphic to a circle.
Moreover we can write
$\partial M^{++}_{++} =
\gamma_1\cup\gamma_2\cup\gamma_3$, where
$\gamma_1:= \gamma_{1-} \cup \gamma_{1+}$,
$\gamma_{1-}:= \Sigma^{0} \cap M^{++}_{++} $,
$\gamma_{1+}:= \Sigma^{\pi/2} \cap M^{++}_{++} $,
$\gamma_2:= \Sigma_0\cap M^{++}_{++}$, and
$\gamma_3:= \Sigma_{\pi/2} \cap M^{++}_{++} $,
and each of $\gamma_1, \gamma_2,\gamma_3$ is homeomorphic to a closed interval.
\\
(iii)
$\gamma_1 = \gamma_{1-} \cup \gamma_{1+} =
\beta_{0+}^{0} \cup
\left\{
\begin{aligned}
& \cup_{i=1}^{\frac{m}{2}-1} \beta^{[i:2]}_i \cup \beta_{\frac{m}2-}^{0} && \text{ if } m\in 4\mathbb{Z},
\\
& \cup_{i=1}^{\frac{m-1}{2}} \beta^{[i:2]}_i && \text{ if } m\in 2\mathbb{Z}+1,
\\
& \cup_{i=1}^{\frac{m}{2}-1} \beta^{[i:2]}_i \cup \beta_{\frac{m}{2}-}^1 && \text{ if } m\in 4\mathbb{Z}+2,
\end{aligned}
\right.
$
\\
(iv)
$\gamma_{1-} = \Sigma^0 \cap M^{++}_{++} =
\overline{\mathsf{p}_{0} \mathsf{p}_{\pi/2} \mathsf{p}^0} \cap M^{++}_{++} =
\beta_{0+}^{0} \cup
\left\{
\begin{aligned}
& \cup_{i=1}^{\frac{m-4}{4}} \beta_{2i}^0 \cup \beta_{\frac{m}2-}^{0} && \text{ if } m\in 4\mathbb{Z},
\\
& \cup_{i=1}^{\frac{m-1}{4}} \beta_{2i}^0 && \text{ if } m\in 4\mathbb{Z}+1,
\\
& \cup_{i=1}^{\frac{m-2}{4}} \beta_{2i}^0 && \text{ if } m\in 4\mathbb{Z}+2,
\\
& \cup_{i=1}^{\frac{m-3}{4}} \beta_{2i}^0 && \text{ if } m\in 4\mathbb{Z}+3.
\end{aligned}
\right.
$
\\
(v)
$\gamma_{1+} = \Sigma^{\pi/2} \cap M^{++}_{++} =
\overline{\mathsf{p}_{\pi/2} \mathsf{p}^{\pi/2} \mathsf{p}_0} \cap M^{++}_{++} =
\left\{
\begin{aligned}
& \cup_{i=1}^{m/4} \beta_{2i-1}^1 && \text{ if } m\in 4\mathbb{Z},
\\
& \cup_{i=1}^{\frac{m-1}{4}} \beta_{2i-1}^1 && \text{ if } m\in 4\mathbb{Z}+1,
\\
& \cup_{i=1}^{\frac{m-2}{4}} \beta_{2i-1}^1 \cup \beta_{\frac{m}{2}-}^1 && \text{ if } m\in 4\mathbb{Z}+2,
\\
& \cup_{i=1}^{\frac{m+1}{4}} \beta_{2i-1}^1 && \text{ if } m\in 4\mathbb{Z}+3.
\end{aligned}
\right.
$
\\
(vi)
$\gamma_2 = \Sigma_0 \cap M^{++}_{++} =
\overline{\mathsf{p}_{0} \mathsf{p}^{\pi/2} \mathsf{p}^0} \cap M^{++}_{++} =
\alpha_0^{{{0+}}} $.
\\
(vii)
$\gamma_3 = \Sigma_{\pi/2} \cap M^{++}_{++} =
\overline{\mathsf{p}_{\pi/2} \mathsf{p}^{\pi/2} \mathsf{p}^0} \cap M^{++}_{++} =
\left\{
\begin{aligned}
& \alpha_{m/2}^{0+} && \text{ if } m\in 4\mathbb{Z},
\\
& \overline{\mathsf{q}^1\mathsf{q}_{\frac{m+1}2}} = \overline{\mathsf{p}^{\pi/4} \mathsf{p}_{\pi/2} } && \text{ if } m\in 2\mathbb{Z}+1,
\\
& \alpha_{m/2}^{1-} && \text{ if } m\in 4\mathbb{Z}+2,
\end{aligned}
\right.
$
\end{lemma}
\begin{proof}
All items follow easily from \ref{OmuOm}, \ref{T:lawson}, \ref{ab}, \ref{Dpm}, and \ref{Mpm}.
\end{proof}
\begin{lemma}[Description of $M^{++}_{+*}$]
\label{M+++}
The following hold.
\\
(i)
$M^{++}_{+*} = D_{0+}^{0+} \cup
\left\{
\begin{aligned}
& \cup_{i=1}^{m-1} D^{[i:2]\pm}_i \cup D_{m-}^{0+} && \qquad \text{ if } m\in 2\mathbb{Z}, \qquad
\\
& \cup_{i=1}^{m-1} D^{[i:2]\pm}_i \cup D_{m-}^{1-} && \qquad \text{ if } m\in 2\mathbb{Z}+1, \qquad
\end{aligned}
\right.
$
\\
where the $\pm$ signs are $+$ for $i$ even and $-$ for $i$ odd.
Therefore $M^{++}_{+*}$ is homeomorphic to a closed disc.
\\
(ii)
$\partial M^{++}_{+*}$ is homeomorphic to a circle.
Moreover we can write
$\partial M^{++}_{+*} =
\gamma_4\cup\gamma_5$, where
$\gamma_4:= \gamma_{4-} \cup \gamma_{4+}$,
$\gamma_{4-}:= \Sigma^{0} \cap M^{++}_{+*} $,
$\gamma_{4+}:= \Sigma^{\pi/2} \cap M^{++}_{+*} $,
$\gamma_5:= \Sigma_0\cap M^{++}_{+*}$,
and each of $\gamma_4, \gamma_5$ is homeomorphic to a closed interval.
\\
(iii)
$\gamma_4 = \gamma_{4-} \cup \gamma_{4+} =
\beta_{0+}^{0} \cup
\left\{
\begin{aligned}
& \cup_{i=1}^{m-1} \beta^{[i:2]}_i \cup \beta_{m-}^{0} && \text{ if } m\in 2\mathbb{Z},
\\
& \cup_{i=1}^{m-1} \beta^{[i:2]}_i \cup \beta_{m-}^1 && \text{ if } m\in 2\mathbb{Z}+1,
\end{aligned}
\right.
$
\\
(iv)
$\gamma_{4-} = \Sigma^0 \cap M^{++}_{+*} =
\beta_{0+}^{0} \cup
\left\{
\begin{aligned}
& \cup_{i=1}^{\frac{m-2}{2}} \beta_{2i}^0 \cup \beta_{{m}-}^{0} && \text{ if } m\in 2\mathbb{Z},
\\
& \cup_{i=1}^{\frac{m-1}{2}} \beta_{2i}^0 && \text{ if } m\in 2\mathbb{Z}+1.
\end{aligned}
\right.
$
\\
(v)
$\gamma_{4+} = \Sigma^{\pi/2} \cap M^{++}_{+*} =
\left\{
\begin{aligned}
& \cup_{i=1}^{m/2} \beta_{2i-1}^1 && \text{ if } m\in 2\mathbb{Z},
\\
& \cup_{i=1}^{\frac{m-1}{2}} \beta_{2i-1}^1 \cup \beta_{m-}^1 && \text{ if } m\in 2\mathbb{Z}+1.
\end{aligned}
\right.
$
\\
(vi)
$\gamma_5 = \Sigma_0 \cap M^{++}_{+*} =
\Sigma_0\cap M^{++}_{**} = \left\{
\begin{aligned}
& \alpha_0^{0+} \cup \alpha_m^{0+} \text{ if } m\in 2\mathbb{Z},
\\
& \alpha_0^{0+} \cup \alpha_m^{1-} \text{ if } m\in 2\mathbb{Z}+1.
\end{aligned}
\right.
\qquad
$
\\
(vii)
$\Sigma_{\pi/2} \cap M^{++}_{+*} =
\Sigma_{\pi/2} \cap M^{++}_{++} =
\left\{
\begin{aligned}
& \alpha_{m/2}^{0+} && \text{ if } m\in 4\mathbb{Z},
\\
& \overline{\mathsf{q}^1\mathsf{q}_{\frac{m+1}2}} = \overline{\mathsf{p}^{\pi/4} \mathsf{p}_{\pi/2} } && \text{ if } m\in 2\mathbb{Z}+1,
\\
& \alpha_{m/2}^{1-} && \text{ if } m\in 4\mathbb{Z}+2,
\end{aligned}
\right.
$
\end{lemma}
\begin{proof}
All items follow easily from \ref{OmuOm}, \ref{T:lawson}, \ref{ab}, \ref{Dpm}, and \ref{Mpm}.
\end{proof}
\begin{lemma}[Symmetries of $M^{++}_{++}$ and $M^{++}_{+*}$]
\label{symM++++}
$\mathsf{p}_{\frac\pi2} = \mathsf{q}_{\frac{m+1}2 }$ and for $m$ even $\mathsf{p}_{\frac\pi4} = \mathsf{q}_{\frac{m}4 + \frac12}$.
Moreover the following hold.
\\
(i)
If $m\in 4\mathbb{Z}$ then ${\underline{\mathsf{R}}}_{\Sigma_{\pi/4}} = {\underline{\mathsf{R}}}_{\mathsf{q}_{\frac{m}4 + \frac12} , C^\perp }$ is a symmetry of $M^{++}_{++}$ preserving the unit normal.
\\
(ii)
If $m\in 4\mathbb{Z}+2$ then ${\underline{\mathsf{R}}}_{ C_{\pi/4}^{\pi/4} } = {\underline{\mathsf{R}}}_{\mathsf{q}_{ \frac{m+2}4 } , \mathsf{q}^1} $ is a symmetry of $M^{++}_{++}$ reversing the unit normal.
\\
(iii)
If $m\in 2\mathbb{Z}+1$ then ${\underline{\mathsf{R}}}_{ C_{\pi/2}^{\pi/4} } = {\underline{\mathsf{R}}}_{ \mathsf{q}_{ \frac{m+1}2 } , \mathsf{q}^1 } $ is a symmetry of $M^{++}_{+*}$
reversing the unit normal and exchanging $M^{++}_{++}$ with $M^{++}_{+-}$.
\end{lemma}
\begin{proof}
All items follow easily from \ref{Omu:p}.iii, \ref{Msym}, \ref{Mpm}, and \ref{M++++}.
\end{proof}
\section{Jacobi fields}
As is well known the linearized operator for the mean curvature on
$M=M[C,m]$ (recall \ref{T:lawson})
is given by
\begin{equation}
\label{Lcal}
\mathcal{L} := \Delta + |A|^2 + 2 ,
\end{equation}
where $|A|^2$ is the square of the length of the second fundamental form of the surface.
We recall next the following standard definition.
\begin{definition}[Jacobi fields on {{$M=M[C,m]$}}]
\label{jacobiM}
We call a function $J\in C^\infty(M)$ a \emph{Jacobi field on $M=M[C,m]$} if it satisfies $\mathcal{L} J = 0$.
\qed
\end{definition}
It is well known that Killing fields induce Jacobi fields as in the following definition.
\begin{definition}[Jacobi fields $J_{C'}$]
\label{jacobi}
We adopt now for the rest of this article the same choice for the unit normal $\nu$
on the Lawson surface $M=M[C,m]$ as in \ref{Lnui}.
Given then a great circle $C'$ in $\mathbb{S}^3$ and assuming an orientation on ${C'}^\perp$,
we define the \emph{Jacobi field} $J^{{C'}^\perp}= J^{{C'}^\perp}[C,m] = J_{C'}= J_{C'}[C,m] \in C^\infty (\, M[C,m]\,)$ by
$
J^{{C'}^\perp}= J_{C'} := K_{C'} \cdot \nu
$
(recall \ref{l:D:rot}).
\qed
\end{definition}
Note that multiplying a Jacobi field by $-1$
changes neither its nodal set nor any other significant properties,
and so the orientation of ${C'}^\perp$ and direction of $\nu$
do not play a fundamental role.
\begin{definition}[Exceptional and non-exceptional Jacobi fields]
\label{exceptional}
We call a Jacobi field on $M$ \emph{non-exceptional} if it is induced by a Killing field;
otherwise we call it \emph{exceptional}.
\qed
\end{definition}
We proceed to study some non-exceptional Jacobi fields which as we will see in \ref{basis-killing} form a basis.
It is useful to introduce first the following notation.
\begin{notation}
\label{N:perp}
We define $0_\perp:= \pi/2$ and $(\pi/2)_\perp:=0$.
\qed
\end{notation}
\begin{lemma}[Symmetries of Jacobi fields]
\label{Jsym}
$\forall i,j\in\mathbb{Z}$ we have the following.
\\
(i)
$J_C$ is odd under ${\underline{\mathsf{R}}}_{\Sigma^{j\pi/2}} = {\underline{\mathsf{R}}}_{\, C , \mathsf{q}^{j+\frac12} } $
and even under ${\underline{\mathsf{R}}}_{\Sigma_{i\pi/m}} = {\underline{\mathsf{R}}}_{\, C^\perp, \mathsf{q}_{i+\frac12} } $
and
${\underline{\mathsf{R}}}_{ C_{ (2i- 1)\frac\pi{2m} }^{ (2j- 1)\frac\pi4 \,} }
= {\underline{\mathsf{R}}}_{\, \mathsf{q}^{j} , \mathsf{q}_{i} } $.
\\
(ii)
$J_{C^\perp}$ is odd under ${\underline{\mathsf{R}}}_{\Sigma_{i\pi/m}} = {\underline{\mathsf{R}}}_{\, C^\perp, \mathsf{q}_{i+\frac12} } $
and even under ${\underline{\mathsf{R}}}_{\Sigma^{j\pi/2}} = {\underline{\mathsf{R}}}_{\, C , \mathsf{q}^{j+\frac12} } $
and
${\underline{\mathsf{R}}}_{ C_{ (2i- 1)\frac\pi{2m} }^{ (2j- 1)\frac\pi4 \,} }
= {\underline{\mathsf{R}}}_{\, \mathsf{q}^{j} , \mathsf{q}_{i} } $.
\\
(iii)
If $m\in2\mathbb{Z}$ and $\phi,\phi'\in\{0,\pi/2\}$,
then $J_{C_\phi^{\phi'} }$ is odd under ${\underline{\mathsf{R}}}_{\Sigma_\phi}$ and ${\underline{\mathsf{R}}}_{\Sigma^{\phi'}}$
and even under ${\underline{\mathsf{R}}}_{\Sigma_{\phi_\perp}}$ and ${\underline{\mathsf{R}}}_{\Sigma^{\phi'_\perp}}$.
\\
(iv)
If $m\in2\mathbb{Z}+1$, then the symmetries in (iii) hold except for the ones associated with ${\underline{\mathsf{R}}}_{\Sigma_{\pi/2}}$.
\end{lemma}
\begin{proof}
All items follow from \ref{Ksymm} and \ref{Msym}.
Note that ${\underline{\mathsf{R}}}_{\Sigma_{\pi/2}}$ preserves $M$ only when $m$ is even.
\end{proof}
\begin{lemma}[Action of some symmetries on some Jacobi fields]
\label{AsymJ}
The following hold.
\\
(i)
If $m\in 4\mathbb{Z}$, then
$J_{C^0_0} \circ {\underline{\mathsf{R}}}_{\Sigma_{\pi/4}} = - J_{C^0_{\pi/2}}$ and
$J_{C^{\pi/2}_{\pi/2}} \circ {\underline{\mathsf{R}}}_{\Sigma_{\pi/4}} = J_{C_0^{\pi/2}}$.
\\
(ii)
If $m\in 4\mathbb{Z}+2$, then
$J_{C^0_0} \circ {\underline{\mathsf{R}}}_{ C_{\pi/4}^{\pi/4} } = J_{C^{\pi/2}_{\pi/2}}$ and
$J_{C^0_{\pi/2}} \circ {\underline{\mathsf{R}}}_{ C_{\pi/4}^{\pi/4} } = - J_{C_0^{\pi/2}}$.
\\
(iii)
If $m\in 2\mathbb{Z}+1$, then
$J_{C^0_0} \circ {\underline{\mathsf{R}}}_{ C_{\pi/2}^{\pi/4} } = J_{C_0^{\pi/2}}$
and
$J_{C^{\pi/2}_{\pi/2}} \circ {\underline{\mathsf{R}}}_{ C_{\pi/2}^{\pi/4} } = J_{C^0_{\pi/2}} $.
\end{lemma}
\begin{proof}
All items follow easily from \ref{symM++++} and the definitions, using in particular the orientation conventions specified in \ref{E:killing}.
\end{proof}
\begin{lemma}[Gradient of Jacobi fields at $\mathsf{q}^1$]
\label{L:grad}
If $\phi,\phi'\in\{0,\pi/2\}$,
then
$J_{C_\phi^{\phi'} }(\mathsf{q}^1)=0$
and the gradient
$\nabla_{\mathsf{q}^1}J_{C_\phi^{\phi'} }$ at $\mathsf{q}^1$
is nonzero and tangential to $\Sigma_{\phi_\perp}$.
\end{lemma}
\begin{proof}
By \ref{umb} $M$ has high-order contact with $\Sigma^{\pi/4}$ at $\mathsf{q}^1$,
so we can consider the corresponding Jacobi field on $\Sigma^{\pi/4}$ instead.
That Jacobi field is clearly a first harmonic of $\Sigma^{\pi/4}$
and the result follows without calculation by the symmetries.
\end{proof}
\begin{lemma}[Non-exceptional Jacobi fields]
\label{basis-killing}
$J_{C}$,
$J_{C^\perp}$,
and
$J_{C_\phi^{\phi'} }$
for
$\phi,\phi'\in\{0,\pi/2\}$
form a basis of the space of non-exceptional Jacobi fields on $M=M[C,m]$.
\end{lemma}
\begin{proof}
Since the space of Killing fields has dimension six,
it is enough to prove that the Jacobi fields under consideration are linearly independent.
By symmetrizing and antisymmetrizing with respect to
${\underline{\mathsf{R}}}_{\Sigma^0}$, ${\underline{\mathsf{R}}}_{\Sigma^{\pi/2}}$, and (for the last four) ${\underline{\mathsf{R}}}_{\Sigma_0}$,
we can kill all of them by \ref{Jsym} except for a single Jacobi field arbitrarily chosen in advance.
This reduces the proof to proving that each of the six Jacobi fields does not vanish identically.
Clearly $J_C(\mathsf{q}^1)\ne0$, $J_{C^\perp}(\mathsf{q}_1)\ne0$, and for the rest we consider the gradient
at $\mathsf{q}^1$ and we appeal to \ref{L:grad}. This completes the proof.
An alternative proof is that
the map $K \mapsto K \cdot \nu$ from the space of Killing fields
to the space of Jacobi fields is injective:
if not,
there would exist a nontrivial Killing field everywhere tangential to $M$,
meaning $M$ would have a one-parameter family of symmetries.
By Lemma \ref{Msym}, however, $\mathscr{G}_{sym}^M$ is discrete, completing the proof.
\end{proof}
\begin{lemma}[Some Jacobi fields on geodesic segments]
\label{rays}
We have the following.
\\
(i)
For $i\in (2\mathbb{Z}+1) \cap [1,(m+1)/2]$
and
$i\in (2\mathbb{Z}) \cap [(m+1)/2, m ]$
we have
$J_{C_0^0} \ge0$ on
$\overline{ \mathsf{q}_i \mathsf{q}^1 } \subset M^{++}_{+*}$.
\\
(ii)
For $i\in (2\mathbb{Z}) \cap [1,(m+1)/2]$
and
$i\in (2\mathbb{Z}+1) \cap [(m+1)/2, m ]$
we have
$J_{C_0^{\pi/2}} \ge0$ on
$\overline{ \mathsf{q}_i \mathsf{q}^1 } \subset M^{++}_{+*}$.
\\
(iii)
For $i\in (2\mathbb{Z}) \cap [1,(m+1)/2]$
we have
$J_{C^0_{\pi/2}} \le0$ on
$\overline{ \mathsf{q}_i \mathsf{q}^1 } \subset M^{++}_{++}$
and
\\
for $i\in (2\mathbb{Z}+1) \cap [(m+1)/2, m ]$
we have
$J_{C^0_{\pi/2}} \ge0$ on
$\overline{ \mathsf{q}_i \mathsf{q}^1 } \subset M^{++}_{+-}$.
\\
(iv)
For $i\in (2\mathbb{Z}+1) \cap [1,(m+1)/2]$
we have
$J_{C^{\pi/2}_{\pi/2}} \ge0$ on
$\overline{ \mathsf{q}_i \mathsf{q}^1 } \subset M^{++}_{++}$
and
\\
for $i\in (2\mathbb{Z}) \cap [(m+1)/2, m ]$
we have
$J_{C^{\pi/2}_{\pi/2}} \le0$ on
$\overline{ \mathsf{q}_i \mathsf{q}^1 } \subset M^{++}_{+-}$.
\end{lemma}
\begin{proof}
By \ref{Omu:K} and \ref{Mpm}
\begin{equation}
\begin{aligned}
&\left.K_{C_0^0}\right|_{M_{+\pm}^{++}}
\mbox{ is a convex combination of }
\mathsf{p}_{\pi/2} \mbox{ and } -\mathsf{p}^{\pi/2}, \\
&\left.K_{C_0^{\pi/2}}\right|_{M_{+\pm}^{++}}
\mbox{ is a convex combination of }
-\mathsf{p}_{\pi/2} \mbox{ and } \mathsf{p}^0, \\
&\left.K_{C_{\pi/2}^0}\right|_{M_{+\pm}^{++}}
\mbox{ is a convex combination of }
-\mathsf{p}_0 \mbox{ and } \pm \mathsf{p}^{\pi/2}, \mbox{ and} \\
&\left.K_{C_{\pi/2}^{\pi/2}}\right|_{M_{+\pm}^{++}}
\mbox{ is a convex combination of }
-\mathsf{p}_0 \mbox{ and } \pm \mathsf{p}^0.
\end{aligned}
\end{equation}
Meanwhile, according to Lemma \ref{Lnui},
at each point on $\overline{\mathsf{q}_i\mathsf{q}^1}$ the unit normal $\nu$
is a convex combination of
$\nu(\mathsf{q}_i)=\mathsf{p}_{\frac{2i-1}{2m}\pi+(-1)^{i+1}\frac{\pi}{2}}$
and $\nu(\mathsf{q}^1)=\mathsf{p}^{-\pi/4}$.
Note that
\begin{equation}
\nu(\mathsf{q}_i) \in
\begin{cases}
\overline{\mathsf{p}_{\pi/2}\mathsf{p}_\pi} &\mbox{for } i \in (2\mathbb{Z}+1) \cap [1,(m+1)/2] \\
\overline{\mathsf{p}_{-\pi/2}\mathsf{p}_0} &\mbox{for } i \in (2\mathbb{Z}) \cap [1,(m+1)/2] \\
\overline{\mathsf{p}_\pi \mathsf{p}_{-\pi/2}} &\mbox{for } i \in (2\mathbb{Z}+1) \cap [(m+1)/2,m] \\
\overline{\mathsf{p}_0 \mathsf{p}_{\pi/2}} &\mbox{for } i \in (2\mathbb{Z}) \cap [(m+1)/2,m],
\end{cases}
\end{equation}
so on $M^{++}_{+*}$
\begin{equation}
\mathsf{p}_0 \cdot \nu(\mathsf{q}_i) \in
\begin{cases}
[0,1] &\mbox{for } i \in 2\mathbb{Z} \\
[-1,0] &\mbox{for } i \in 2\mathbb{Z}+1
\end{cases}
\end{equation}
and
\begin{equation}
\mathsf{p}_{\pi/2} \cdot \nu(\mathsf{q}_i) \in
\begin{cases}
[0,1] &\mbox{for } i \in \left((2\mathbb{Z}+1) \cap [1,(m+1)/2]\right) \cup \left((2\mathbb{Z}) \cap [(m+1)/2,m]\right) \\
[-1,0] &\mbox{for } i \in \left((2\mathbb{Z}) \cap [1,(m+1)/2]\right) \cup \left((2\mathbb{Z}+1) \cap [(m+1)/2,m]\right).
\end{cases}
\end{equation}
On the other hand, obviously
\begin{equation}
\mathsf{p}^0 \cdot \nu(\mathsf{q}^1) \in [0,1]
\quad \mbox{and} \quad
\mathsf{p}^{\pi/2} \cdot \nu(\mathsf{q}^1) \in [-1,0],
\end{equation}
while of course
\begin{equation}
\mathsf{p}^0 \cdot \nu(\mathsf{q}_i)=\mathsf{p}^{\pi/2} \cdot \nu(\mathsf{q}_i)=\mathsf{p}_0 \cdot \nu(\mathsf{q}^1)=\mathsf{p}_{\pi/2} \cdot \nu(\mathsf{q}^1)=0.
\end{equation}
All items now follow from the convexity and the signs of the inner products recorded above.
\end{proof}
We define now a kind of discrete derivative $\dd$ for functions on $M$ by appropriately adapting to the current
situation the discrete derivative defined in \cite{alm20}*{(8.13), page 319}:
\begin{definition}[$\mathbb{T}r$ and the discrete derivative $\dd$]
\label{D:TD}
We define an isometry $\mathbb{T}r:\mathbb{S}^3\to\mathbb{S}^3$ by $\mathbb{T}r:= {\underline{\mathsf{R}}}_{\mathsf{q}_1,\mathsf{q}^1} \circ {\underline{\mathsf{R}}}_{\Sigma_0}$ and
a linear map $\dd:C^\infty(M)\to C^\infty(M)$ by $\dd f := \frac1{2 \sin(\pi/m)} ( f\circ \mathbb{T}r - f \circ \mathbb{T}r^{-1} ) $
$\forall f\in C^\infty(M)$.
\qed
\end{definition}
\begin{lemma}[Elementary properties of $\mathbb{T}r$ and $\dd$]
\label{L:T}
$\dd$ as in \ref{D:TD} is well defined and
$\mathbb{T}r$ preserves $C$, $C^\perp$, and $M=M[C,m]$,
and on $M$ satisfies $\mathbb{T}r^{-1}_*\circ \nu \circ \mathbb{T}r = -\nu$.
Moreover $\mathbb{T}r =\mathsf{R}^C_{\pi/m} \circ {\underline{\mathsf{R}}}_{\Sigma^{\pi/4}}$ and so $\mathbb{T}r$ rotates $C$ along itself by angle $\pi/m$ and reflects $C^\perp$ to itself
while fixing $\mathsf{q}^1=\mathsf{p}^{\pi/4}$ and $\mathsf{q}^3=-\mathsf{p}^{\pi/4}$.
\end{lemma}
\begin{proof}
The first statement about $\mathbb{T}r$ follows from \ref{Msym}.
It follows then that $\dd$ is well defined.
Using the definitions it is easy to check that $\mathbb{T}r$ maps $\mathsf{p}_0, \mathsf{p}_{\pi/2}, \mathsf{p}^0, \mathsf{p}^{\pi/2}$ to
$\mathsf{p}_{\pi/m} , \mathsf{p}_{\pi/2 + \pi/m}, \mathsf{p}^{\pi/2}, \mathsf{p}^0$
respectively.
This implies the last statement and completes the proof.
\end{proof}
\begin{lemma}[Discrete derivatives of some Jacobi fields]
\label{L:D}
The following hold.
\\
(i) $\dd J_{C_0^0} = \, J_{C_{\pi/2}^{\pi/2} }$ and $\dd J_{C_{\pi/2}^{\pi/2} } = \, - J_{C_0^0} $.
\\
(ii) $\dd J_{C_0^{\pi/2} } = \, - J_{C_{\pi/2}^{0} }$ and $\dd J_{C_{\pi/2}^0 } = \, J_{C_{0}^{\pi/2} }$.
\end{lemma}
\begin{proof}
Note that
if $J=K\cdot\nu$ is a Jacobi field induced by a Killing field $K$,
then
$J\circ\mathbb{T}r= ( K\circ\mathbb{T}r ) \cdot ( \nu\circ\mathbb{T}r ) = ( \mathbb{T}r^{-1}_*\circ K \circ\mathbb{T}r ) \cdot ( \mathbb{T}r^{-1}_*\circ \nu \circ \mathbb{T}r )
= - ( \mathbb{T}r^{-1}_*\circ K \circ\mathbb{T}r ) \cdot \nu$
and similarly
$J\circ\mathbb{T}r^{-1} = - ( \mathbb{T}r_*\circ K \circ\mathbb{T}r^{-1} ) \cdot \nu$,
so we have
\begin{equation}
\label{E:Tr}
\dd J = \textstyle{ \frac1{2 \sin(\pi/m)} } \, ( - \mathbb{T}r^{-1}_*\circ K \circ\mathbb{T}r + \mathbb{T}r_*\circ K \circ\mathbb{T}r^{-1} ) \cdot \nu.
\end{equation}
By \ref{L:T} we have
\begin{equation}
\begin{aligned}
\mathbb{T}r ( \, y^1 \mathsf{p}_0 + y^2 \mathsf{p}_{\pi/2} + y^3 \mathsf{p}^0 + y^4 \mathsf{p}^{\pi/2} \, ) \, =& \, (cy^1-sy^2) \mathsf{p}_0 + (sy^1+cy^2)\mathsf{p}_{\pi/2} + y^4\mathsf{p}^0 + y^3 \mathsf{p}^{\pi/2},
\\
\mathbb{T}r^{-1} ( \, y^1 \mathsf{p}_0 + y^2 \mathsf{p}_{\pi/2} + y^3 \mathsf{p}^0 + y^4 \mathsf{p}^{\pi/2} \, ) \, =& \, (cy^1+sy^2) \mathsf{p}_0 + (-sy^1+cy^2)\mathsf{p}_{\pi/2} + y^4\mathsf{p}^0 + y^3 \mathsf{p}^{\pi/2},
\end{aligned}
\end{equation}
where in this proof we simplify the notation by taking $c:=\cos\frac\pi{m}$ and $s:=\sin\frac\pi{m}$.
It is easy to calculate then by referring to \ref{E:killing} that
\begin{equation*}
\begin{aligned}
\mathbb{T}r_*^{-1} \circ K_{C_0^0} \circ \mathbb{T}r \, ( \, x^1 \mathsf{p}_0 + x^2 \mathsf{p}_{\pi/2} + x^3 \mathsf{p}^0 + x^4 \mathsf{p}^{\pi/2} \, ) \, =&
\, \phantom{-} s x^3 \mathsf{p}_0 + cx^3 \mathsf{p}_{\pi/2} - (sx^1+cx^2) \mathsf{p}^0,
\\
\mathbb{T}r_*^{-1} \circ K_{C_{\pi/2}^{\pi/2} } \circ \mathbb{T}r \, ( \, x^1 \mathsf{p}_0 + x^2 \mathsf{p}_{\pi/2} + x^3 \mathsf{p}^0 + x^4 \mathsf{p}^{\pi/2} \, ) \, =&
\, -c x^4 \mathsf{p}_0 + sx^4 \mathsf{p}_{\pi/2} + (cx^1-sx^2) \mathsf{p}^{\pi/2},
\\
\mathbb{T}r_*^{-1} \circ K_{C_{0}^{\pi/2} } \circ \mathbb{T}r \, ( \, x^1 \mathsf{p}_0 + x^2 \mathsf{p}_{\pi/2} + x^3 \mathsf{p}^0 + x^4 \mathsf{p}^{\pi/2} \, ) \, =&
\, -s x^4 \mathsf{p}_0 - cx^4 \mathsf{p}_{\pi/2} + (sx^1+cx^2) \mathsf{p}^{\pi/2},
\\
\mathbb{T}r_*^{-1} \circ K_{C^{0}_{\pi/2} } \circ \mathbb{T}r \, ( \, x^1 \mathsf{p}_0 + x^2 \mathsf{p}_{\pi/2} + x^3 \mathsf{p}^0 + x^4 \mathsf{p}^{\pi/2} \, ) \, =&
\, -c x^3 \mathsf{p}_0 + sx^3 \mathsf{p}_{\pi/2} + (cx^1-sx^2) \mathsf{p}^{0},
\end{aligned}
\end{equation*}
If we exchange $\mathbb{T}r$ and $\mathbb{T}r^{-1}$ in the left hand sides we obtain the same expressions but with ``$s$'' replaced by ``$-s$''.
Subtracting, applying \ref{E:Tr}, and referring to \ref{E:killing} again, we conclude the proof.
\end{proof}
\section{Eigenfunctions on the Lawson surfaces}
In this section we study the index and nullity of the linear operator $\mathcal{L}$ on $M=M[C,m]$ defined in \ref{Lcal}.
$\mathcal{L}$ is the only operator we consider in this section and so we often omit it in order to simplify the notation,
especially in the notation of \ref{D:mixed}.
We start by defining
\begin{equation}
\label{Vpm}
V^{\pm\pm}:= \{u\in C_{pw}^\infty(M) \, : \, u\circ {\underline{\mathsf{R}}}_{\Sigma^0}=\pm u \text{ and } u\circ{\underline{\mathsf{R}}}_{\Sigma^{\pi/2}}=\pm u\,\},
\end{equation}
where the $\pm$ signs are taken correspondingly, the first one referring to ${\underline{\mathsf{R}}}_{\Sigma^0}$ and the second one to ${\underline{\mathsf{R}}}_{\Sigma^{\pi/2}}$.
We clearly have
\begin{equation}
\label{V++}
V = V^{++} {\,{\oplus}_{\Lcal}\,} V^{+-} {\,{\oplus}_{\Lcal}\,} V^{-+} {\,{\oplus}_{\Lcal}\,} V^{--},
\end{equation}
where we use ${\,{\oplus}_{\Lcal}\,}$ to mean ``direct sum'' not only in the sense of linear spaces,
but also to mean that the summands are invariant under $\mathcal{L}$,
and therefore the same decomposition holds for the corresponding eigenspaces.
\begin{prop}
\label{P:V--}
We have the following (recall \ref{D:eigen} and \ref{D:eigen-eq}).
\\ (i)
$J_C\in V^{--}$ and
$V^{--}\sim C_{pw}^\infty[M^{++}_{**}; \partial M^{++}_{**} ,\emptyset ]$.
\\ (ii)
$\operatorname{Ind}(V^{--}) = 0$
and
$\operatorname{Null}(V^{--}) = 1$.
\end{prop}
\begin{proof}
(i) follows from the definitions,
\ref{M++}, and \ref{Jsym}, where the linear isomorphism is given by restriction to $M^{++}_{**}$ in one direction
and its inverse by extension by appropriate reflections.
For (ii) recall first
that Lemma \ref{Jsym} implies that
$J_{C}$ is nonnegative on $M^{++}_{**}$ by \ref{Dsym2}.iii and the symmetries.
$J_{C}$ is nontrivial by \ref{basis-killing} and therefore,
as a consequence of Courant's nodal theorem \ref{Ctheorem},
there are no other eigenfuctions in $V^{--}$ of the same or lower eigenvalue as the eigenvalue of $J_{C}$,
which is zero.
The result follows.
\end{proof}
To study
$V^{++}$ now we define
\begin{equation}
\label{V++pm}
\begin{aligned}
V^{++}_{\pm} :=& \{u\in V^{++} \, : \, \forall i\in\mathbb{Z} \,\, \,\, \, \, u\circ {\underline{\mathsf{R}}}_{\Sigma_{i\pi/m}} = \pm u \,\},
\\
V^{++}_{\pm\pm} :=& \{u\in V^{++}_{\pm} \, : \, \forall i,j\in\mathbb{Z} \,\, \,\, \, u\circ {\underline{\mathsf{R}}}_{\, \mathsf{q}^{j} , \mathsf{q}_{i} } = \pm u\,\},
\end{aligned}
\end{equation}
where in the second equation the $\pm$ signs are taken correspondingly.
Note that
\begin{equation}
\label{V+++o}
V^{++}_{+} = V^{++}_{++} {\,{\oplus}_{\Lcal}\,} V^{++}_{+-}
\qquad \text{and} \qquad
V^{++}_{-} = V^{++}_{-+} {\,{\oplus}_{\Lcal}\,} V^{++}_{--} .
\end{equation}
On the other hand $V^{++}$ is not the direct sum of
$V^{++}_{+}$
and
$V^{++}_{-}$.
\begin{lemma}
\label{V++-}
The following hold (recall \ref{Dsym}.ii and \ref{ab}).
\\ (i)
$J_{C^\perp}\in V_{-+}^{++}$ and
$
V^{++}_{-} \sim
C_{pw}^\infty[ D_{0+}^{0+}\cup D_{1-}^{1-} ; \alpha_1^{1-} \cup \alpha_0^{0+} , \beta_{0+}^0\cup \beta_{1-}^1 ]
$.
\\ (ii)
$\lambda_1(V^{++}_{-})=0<\lambda_2(V^{++}_{-})$.
\end{lemma}
\begin{proof}
$J_{C^\perp}\in V_{-+}^{++}$ follows from \ref{Jsym}.ii and the definitions.
Recall now
that
$D_{0+}^{0+}\cup D_{1-}^{1-}$ is homeomorphic to a closed disc and its boundary is $\beta_{0+}^0\cup \beta_{1-}^1\cup \alpha_1^{1-} \cup \alpha_0^{0+}$.
We clearly have (i) then,
where the linear isomorphism is given by restriction in one direction and its inverse by extending using reflections.
On $D_{0+}^{0+}$ $J_{C^\perp}$ is nonnegative by \ref{Dsym2}.ii and nontrivial by \ref{basis-killing}.
By \ref{Jsym}.ii it is then nonnegative on $D_{1-}^{1-}$ as well.
As a consequence of
Courant's nodal theorem \ref{Ctheorem}
$J_{C^\perp}$ corresponds then to the lowest eigenvalue and the proof is complete.
\end{proof}
\begin{lemma}
\label{V+++}
The following hold.
\\ (i)
$V^{++}_{+-} \sim C_{pw}^\infty [ D_{0+}^{0+}; \overline{\mathsf{q}^1\mathsf{q}_1} , \beta_{0+}^0\cup \alpha_0^{0+} ] \sim
\{\, u\in C_{pw}^\infty[ D_0^0 ; \partial D_0^0 , \emptyset ] \, : \,
u \circ {\underline{\mathsf{R}}}_{\Sigma_0} = u \circ {\underline{\mathsf{R}}}_{\Sigma^0} = u \, \}$.
\\ (ii)
$\lambda_1(V^{++}_{+-})>0$ and
$\lambda_1(V^{++}_{+})<0<\lambda_2(V^{++}_{+})$.
\end{lemma}
\begin{proof}
(i) follows easily by the symmetries with linear isomorphisms being restrictions in one direction and inverses given by extensions using
even or odd reflections appropriately.
By \ref{V+++o} to prove (ii) it is enough to prove
\begin{equation}
\label{l3}
\lambda_1(V^{++}_{+-})>0, \qquad
\lambda_1(V^{++}_{++})<0, \qquad
\lambda_2(V^{++}_{++})>0.
\end{equation}
Let $\phi_1$ be an eigenfunction corresponding to the eigenvalue $\lambda_1(V^{++}_{+-})$.
Since $D_0^0$ is a minimizing disc, it is weakly stable, and so $\lambda_1(V^{++}_{+-})\ge0$.
The first inequality in \ref{l3} will follow if we prove $\lambda_1(V^{++}_{+-})\ne0$.
By Courant's nodal theorem \ref{Ctheorem} $\phi_1$ cannot change sign on $D_0^0$ and so by the maximum principle
(without loss of generality) $\phi_1>0$ on the interior of $D_0^0$ and $\eta \phi_1<0$ on
$\partial D_0^0\setminus {{Q\mspace{.8mu}\!\!\!\!{/}\,}}_0^0= Q_0^0\setminus {{Q\mspace{.8mu}\!\!\!\!{/}\,}}_0^0$,
where $\eta$ is the outward unit conormal derivative of $D_0^0$ at $\partial D_0^0\setminus {{Q\mspace{.8mu}\!\!\!\!{/}\,}}_0^0$.
By applying Green's second identity to $\phi_1$ and $J_{C_{\pi/2}^{\pi/2}}$ we conclude
$$
\int_{D_0^0} ( J_{C_{\pi/2}^{\pi/2}} \mathcal{L}\phi_1 - \phi_1 \mathcal{L} J_{C_{\pi/2}^{\pi/2}} ) =
\int_{\partial D_0^0} ( J_{C_{\pi/2}^{\pi/2}} \eta \phi_1 - \phi_1 \eta J_{C_{\pi/2}^{\pi/2}} ) .
$$
If $\lambda_1(V^{++}_{+-})=0$, then the left hand side vanishes.
Since $\phi_1$ satisfies the Dirichlet condition on $\partial D_0^0$,
we conclude
$$
\int_{\partial D_0^0} J_{C_{\pi/2}^{\pi/2}} \,\, \eta \phi_1 \, = \, 0.
$$
By \ref{Dsym}.i $J_{C_{\pi/2}^{\pi/2}}$ does not change sign on $D_0^0$.
Since $\eta \phi_1<0$ on $\partial D_0^0\setminus {{Q\mspace{.8mu}\!\!\!\!{/}\,}}_0^0$, we conclude that $J_{C_{\pi/2}^{\pi/2}}=0$ on $\partial D_0^0$.
This contradicts \ref{L:grad} (alternatively it implies odd symmetries which together with \ref{Jsym}
contradict the nontriviality of $J_{C_{\pi/2}^{\pi/2}}$) and therefore we conclude the first inequality in \ref{l3}.
The positivity of the zeroth-order term of $\mathcal{L}$
implies the second inequality in \ref{l3}.
Suppose now that $\lambda_2(V^{++}_{++})\le0$ and let $\phi_2$ be a corresponding eigenfunction.
Then $\phi_2$ satisfies Neumann conditions on $\partial D_{0+}^{0+}$
and moreover by Courant's nodal theorem \ref{Ctheorem} will have two nodal domains on $D_{0+}^{0+}$.
It follows from \cite{Cheng}*{Theorem 2.5} that the nodal set $\phi_2^{-1}(\{0\})$
contains (at least) a piecewise $C^2$ embedded circle or segment
whose endpoints (if it has any) lie on $\partial D_{0+}^{0+}$
but which is otherwise disjoint from $\partial D_{0+}^{0+}$.
In particular this nodal curve separates $D_{0+}^{0+}$
into two components and misses the interior of at least one of the three sides---$\beta_{0+}^0$,
$\alpha_0^{0+}$, and $\overline{\mathsf{q}^1\mathsf{q}_1}$---of $\partial D_{0+}^{0+}$.
We call the missed side $\gamma$.
By domain monotonicity we conclude that
$\lambda_1[D_{0+}^{0+}; \gamma, \partial D_{0+}^{0+} \setminus \gamma \, ] <0 $.
If $\gamma= \overline{\mathsf{q}^1\mathsf{q}_1}$, this would contradict the first inequality in \ref{l3}, which already has been proved.
If $\gamma= \alpha_0^{0+}$, this would contradict \ref{V++-}.
Finally if $\gamma= \beta_{0+}^0$, this would contradict \ref{P:V--}, and the proof is complete.
\end{proof}
\begin{prop}
\label{P:V++}
We have $\operatorname{Ind}(V^{++}) = 2m- 1$
and
$\operatorname{Null}(V^{++}) = 1$.
\end{prop}
\begin{proof}
By considering $M^{++}_{**}$ and subdividing along the curves of intersection with $\Sigma_{i\pi/m}$
and imposing the Dirichlet or the Neumann conditions appropriately,
the result follows from \ref{P:eigen} by using \ref{V++-} and \ref{V+++}.
\end{proof}
To study
$V^{+-}$ and $V^{-+}$ now we define
\begin{equation}
\label{V+-pm}
V^{+-}_{\pm} := \{u\in V^{+-} \, : \, u\circ {\underline{\mathsf{R}}}_{\Sigma_{0}} = \pm u \,\},
\qquad
V^{-+}_{\pm} := \{u\in V^{-+} \, : \, u\circ {\underline{\mathsf{R}}}_{\Sigma_{0}} = \pm u \,\}.
\end{equation}
Note that
\begin{equation}
\label{V+-+o}
V^{+-} = V^{+-}_{+} {\,{\oplus}_{\Lcal}\,} V^{+-}_{-},
\qquad
V^{-+} = V^{-+}_{+} {\,{\oplus}_{\Lcal}\,} V^{-+}_{-}.
\end{equation}
\begin{lemma}[The sign of some Jacobi fields]
\label{MsJ}
The following hold.
\\ (i)
If $m\ge3$, then
$J_{C^0_0}\ge0$ and $J_{C^{\pi/2}_0}\ge0$ on $M^{++}_{+*}$.
\\ (ii)
If in addition $m$ is even, then
$J_{C^{\pi/2}_{\pi/2}}\ge0$ and $J_{C_{\pi/2}^0}\le0$
on $M^{++}_{++}$.
\end{lemma}
\begin{proof}
\emph{Step 1:
We prove that
$\forall i\in \mathbb{Z}\cap [1,(m+1)/2]$
we have $J_{C^0_0}\ge0$ on $\overline{ \mathsf{q}_i \mathsf{q}^1 } $---equivalently
$J_{C^0_0}\ge0$ on all geodesic segments contained in $M^{++}_{++}$. }
If $i$ is odd or $i=\frac{m+1}2$,
we already know this by \ref{rays}.i.
We can assume then that $m\ge4$ because for $m=3$ step 1 is proved.
By \ref{rays}.iv and \ref{L:D}.i we have for
$i\in (2\mathbb{Z}+1) \cap [1,(m+1)/2]$ that
$J_{C^0_0} \circ \mathbb{T}r \ge J_{C^0_0} \circ \mathbb{T}r^{-1} $ on
$\overline{ \mathsf{q}_{i} \mathsf{q}^1 } $.
Since $\mathbb{T}r^{\pm 1}(\overline{\mathsf{q}_i\mathsf{q}^1})=\overline{\mathsf{q}_{i\pm 1}\mathsf{q}^1}$ by \ref{L:T},
this means that $J_{C^0_0}$ on $\overline{ \mathsf{q}_{i} \mathsf{q}^1 } \subset M^{++}_{++}$ is increasing with increasing even $i$.
Arguing inductively on even $i$ it is enough to prove then that
$J_{C^0_0}\ge0$ on
$\overline{ \mathsf{q}_{2} \mathsf{q}^1 } $.
Taking $i=1$ in the inequality in the previous paragraph we establish that
$J_{C^0_0} \circ \mathbb{T}r \ge J_{C^0_0} \circ \mathbb{T}r^{-1} $ on
$\overline{ \mathsf{q}_{1} \mathsf{q}^1 } $.
By \ref{D:TD} we have
$\mathbb{T}r^{-1} = {\underline{\mathsf{R}}}_{\Sigma_0}$ on ${\overline{\mathsf{q}_1\mathsf{q}^1}}$,
so by \ref{Jsym}.iii-iv we know that
$J_{C^0_0} \circ \mathbb{T}r^{-1} = - J_{C^0_0} $ on
$\overline{ \mathsf{q}_{1} \mathsf{q}^1 } $.
Combining we obtain
$J_{C^0_0} \circ \mathbb{T}r + J_{C^0_0} \ge 0 $ on $\overline{ \mathsf{q}_{1} \mathsf{q}^1 } $.
We consider now the symmetrization $\varphi:= J_{C^0_0} \circ {\underline{\mathsf{R}}}_{\Sigma_{\pi/m} } + J_{C^0_0}$ of $J_{C^0_0}$ on $D^{1-}_1 \subset M^{++}_{++}$.
Recall that
$\partial D^{1-}_1 = \overline{ \mathsf{q}_{1} \mathsf{q}^1 } \cup \overline{ \mathsf{q}_{2} \mathsf{q}^1 } \cup \beta_1^1$.
Since by \ref{L:T} $\mathbb{T}r= {\underline{\mathsf{R}}}_{\Sigma_{\pi/m}}$ on $\overline{ \mathsf{q}_{1} \mathsf{q}^1 } $,
by the last inequality above we have $\varphi\ge0$ on
$\overline{ \mathsf{q}_{1} \mathsf{q}^1 } \cup \overline{ \mathsf{q}_{2} \mathsf{q}^1 } \subset \partial D^{1-}_1 $.
By \ref{Jsym}.iii-iv $\varphi$ satisfies the Neumann condition on the remaining boundary $\beta_1^1$.
If we assume that $\varphi$ attains negative values on $D^{1-}_1$, then by domain monotonicity,
and since $\mathcal{L} \varphi =0$,
we obtain a contradiction to $\lambda_1(V_{+-}^{++})>0$ in \ref{V+++}.ii by using \ref{V+++}.i.
Hence $\varphi\ge0$ on $D^{1-}_1$ and since $\varphi=2 J_{C^0_0}$ on $ \alpha^{1-}_1 = D^{1-}_1 \cap \Sigma_{\pi/m} $ (by \ref{ab}),
we conclude that $ J_{C^0_0} \ge 0$ on $ \alpha^{1-}_1 \subset D^{1-}_1 $.
We consider now the domain $\Phi := D^{1-}_{1+} \cup D^{0+}_2$.
Clearly $\Phi$ is homeomorphic to a disc and has $\partial \Phi = \beta^1_{1+} \cup \beta^0_2 \cup \overline{ \mathsf{q}_{3} \mathsf{q}^1 } \cup \alpha^{1-}_1$
and $\overline{ \mathsf{q}_{2} \mathsf{q}^1 } \subset \Phi$.
We postpone the case $m=4$ for later and we assume that $m\ge5$ so that
$ \overline{ \mathsf{q}_{3} \mathsf{q}^1 } \subset \Phi \subset M^{++}_{++}$.
We already know then
(by the preceding paragraphs and \ref{Jsym})
that
$ J_{C^0_0} \ge 0$ on $ \overline{ \mathsf{q}_{3} \mathsf{q}^1 } \cup \alpha^{1-}_1$ and satisfies the Dirichlet condition on $\beta^0_2$ and the Neumann condition on $\beta^1_{1+}$.
In order to apply now \ref{P:eigen}
we subdivide $\Phi$ along
$\overline{ \mathsf{q}_{2} \mathsf{q}^1 }$ into
$D^{1-}_{1+}$ and $D^{0+}_2$.
We clearly have
$C_{pw}^\infty[\mathcal{L}, D^{1-}_{1+} ; \alpha^{1-}_1 , \overline{ \mathsf{q}_{2} \mathsf{q}^1 } \cup \beta^1_{1+} ] \sim V^{++}_{-+} $
and
$\lambda_1[\mathcal{L}, D^{0+}_2 ; \beta^0_2 \cup \overline{ \mathsf{q}_{3} \mathsf{q}^1 } , \overline{ \mathsf{q}_{2} \mathsf{q}^1 } ]
>
\lambda_1[\mathcal{L}, D^{0+}_2 ; \beta^0_2 , \overline{ \mathsf{q}_{2} \mathsf{q}^1 } \cup \overline{ \mathsf{q}_{3} \mathsf{q}^1 } ] \ge \lambda_1(V^{--}) $.
By referring to \ref{V++-} and \ref{P:V--} we conclude that
$\#_{<0} [\mathcal{L}, D^{1-}_{1+} ; \alpha^{1-}_1 , \overline{ \mathsf{q}_{2} \mathsf{q}^1 } \cup \beta^1_{1+} ] =0$
and
$\#_{\le0} [\mathcal{L}, D^{0+}_2 ; \beta^0_2 \cup \overline{ \mathsf{q}_{3} \mathsf{q}^1 } , \overline{ \mathsf{q}_{2} \mathsf{q}^1 } ] =0$.
By \ref{P:eigen} then we conclude that
$\lambda_1[\mathcal{L}, \Phi; \beta^0_2 \cup \overline{ \mathsf{q}_{3} \mathsf{q}^1 } \cup \alpha^{1-}_1, \beta^1_{1+} ] >0 $,
which by domain monotonicity would contradict an assumption that $J_{C^0_0}$ takes negative values on $\Phi$.
We conclude that
$ J_{C^0_0} \ge 0$ on $\overline{ \mathsf{q}_{2} \mathsf{q}^1 } \subset \Phi$
which completes step 1 under the assumption $m\ge5$.
We consider now the case $m=4$.
Note that
$D^{1-}_{1+} \cup D^{0+}_{2-}$ is homeomorphic to a disc and can be subdivided into
$D^{1-}_{1+}$ and $D^{0+}_{2-}$ by $\overline{ \mathsf{q}_2 \mathsf{q}^1 }$.
Recall that
$\partial D^{1-}_{1+} = \alpha^{1-}_1 \cup \beta_{1+}^1 \cup \overline{ \mathsf{q}_2 \mathsf{q}^1 } $ and
$\partial D^{0+}_{2-} = \alpha^{0+}_2 \cup \beta_{2-}^0 \cup \overline{ \mathsf{q}_2 \mathsf{q}^1 } $.
We have $\lambda_1[ D^{1-}_{1+} ; \alpha^{1-}_1 , \beta_{1+}^1 \cup \overline{ \mathsf{q}_2 \mathsf{q}^1 } ] = 0 $ by \ref{V++-} and
$\lambda_1[ D^{0+}_{2-} ; \beta_{2-}^0 , \alpha^{0+}_2 \cup \overline{ \mathsf{q}_2 \mathsf{q}^1 } ] =0$ by \ref{P:V--}.
Applying \ref{P:eigen} we conclude that
$\lambda_1[ D^{1-}_{1+} \cup D^{0+}_{2-} ; \alpha^{1-}_1 \cup \beta_{2-}^0 , \beta_{1+}^1 \cup \alpha^{0+}_2 ] \ge 0 $.
Since for $m=4$ we have $\alpha_2^{0+} \subset \Sigma_{\pi/2}$,
it follows by \ref{Jsym} that
$ J_{C^0_0} $ satisfies the same boundary conditions except on $\alpha_1^{1-}$,
where we proved above that it is $\ge0$.
Moreover
$ J_{C^0_0} $ cannot vanish identically on $\alpha_1^{1-}$ by \ref{L:grad}.
By domain monotonicity we obtain a contradiction on the assumption that
$ J_{C^0_0} $ is not nonnegative on
$D^{1-}_{1+} \cup D^{0+}_{2-}$.
We conclude
$ J_{C^0_0} \ge 0$ on $\overline{ \mathsf{q}_{2} \mathsf{q}^1 } \subset D^{1-}_{1+} \cup D^{0+}_{2-}$ and step 1 is complete in all cases.
\emph{Step 2:
We prove that
$\forall i\in \mathbb{Z}\cap [1,(m+1)/2]$
we have $J_{C^{\pi/2}_0}\ge0$ on $\overline{ \mathsf{q}_i \mathsf{q}^1 } $---equivalently
$J_{C^{\pi/2}_0}\ge0$ on all geodesic segments contained in $M^{++}_{++}$. }
Unlike the case of
$J_{C^0_0}$ we now know this by \ref{rays}.ii when $i$ is even.
Using the discrete derivative as before,
we have
by \ref{rays}.iii and \ref{L:D}.ii
for
$i\in (2\mathbb{Z}) \cap [1,(m+1)/2]$ that
$J_{C^{\pi/2}_0} \circ \mathbb{T}r \ge J_{C^{\pi/2}_0} \circ \mathbb{T}r^{-1} $ on
$\overline{ \mathsf{q}_{i} \mathsf{q}^1 } $.
Arguing inductively on odd $i$,
it is enough to prove then that
$J_{C^{\pi/2}_0}\ge0$ on
$\overline{ \mathsf{q}_{1} \mathsf{q}^1 } $.
For this we consider the domain $\Phi' := D^{0+}_{0+} \cup D^{1-}_1$.
Clearly $\Phi'$ is isometric to $\Phi$ in the previous step (in fact $\Phi=\mathsf{T}(\Phi')$) and
has $\partial \Phi' = \beta^0_{0+} \cup \beta^1_1 \cup \overline{ \mathsf{q}_{2} \mathsf{q}^1 } \cup \alpha^{0+}_0$
and $\overline{ \mathsf{q}_{1} \mathsf{q}^1 } \subset \Phi'$.
Similarly to the previous step,
we already know that
$J_{C^{\pi/2}_0}\ge0$ on $\overline{ \mathsf{q}_{2} \mathsf{q}^1 }$ and satisfies the Dirichlet condition on $\beta^1_1 \cup \alpha^{0+}_0$ and the Neumann condition on $\beta^0_{0+}$.
Arguing then as in the previous step,
we conclude that $J_{C^{\pi/2}_0}\ge0$ on $\Phi'$ and hence on
$\overline{ \mathsf{q}_{1} \mathsf{q}^1 } \subset \Phi'$,
which completes step 2.
\emph{Step 3:
We prove that $J_{C^0_0}\ge0$ and $J_{C^{\pi/2}_0}\ge0$ on $M^{++}_{++}$. }
Recall from \ref{M++++} that $M^{++}_{++}$ can be subdivided along the geodesic segments it contains
into $D^{[i:2]\pm}_i$'s,
$D_{0+}^{0+}$, and $D_{\frac{m}2-}^{0+}$ (for $m\in 4\mathbb{Z}$) or
$D_{\frac{m}{2}-}^{1-}$ (for $m\in 4\mathbb{Z}+2$).
We already know that on the geodesic segments in the boundaries of these regions
we have $J_{C_0^0},J_{C_0^{\pi/2}} \geq 0$,
while on the rest of each boundary $J_{C_0^0}$ and $J_{C_0^{\pi/2}}$
satisfy Dirichlet or Neumann conditions.
We also know, using \ref{P:V--} and
\ref{V+++},
that if we impose the Dirichlet condition on each geodesic segment
and leave the remaining boundary conditions unchanged,
then the corresponding lowest eigenvalue on each region
(obtained by subdividing along the geodesic segments)
is strictly positive.
If we assume then that
$J_{C^0_0}$ or $J_{C^{\pi/2}_0}$ attains negative values,
we will have a contradiction by domain monotonicity.
This completes step 3.
\emph{Step 4:
We complete the proof of the lemma.}
For $m$ even (i) follows from step 3 and the even symmetry with respect to ${\underline{\mathsf{R}}}_{\Sigma_{\pi/2} }$,
as asserted in \ref{Jsym}.iii,
which exchanges $M^{++}_{++}$ with $M^{++}_{+-}$.
For $m$ odd (i) follows from step 3 and
by using \ref{symM++++}.iii and the identity
$J_{C^0_0} \circ {\underline{\mathsf{R}}}_{ C_{\pi/2}^{\pi/4} } = J_{C_0^{\pi/2}}$
from \ref{AsymJ}.iii.
Finally (ii) follows from step 3 and (for $m\in 4\mathbb{Z}$)
\ref{symM++++}.i and \ref{AsymJ}.i
or (for $m\in 4\mathbb{Z}+2$)
\ref{symM++++}.ii and \ref{AsymJ}.ii.
\end{proof}
\begin{prop}
\label{P:V+--}
We have the following (recall \ref{M+++}).
\\ (i)
$J_{C_0^0}\in V_{-}^{-+}$
and
$V_{-}^{-+} \sim C_{pw}^\infty[M^{++}_{+*}; \gamma_{4-} \cup \gamma_5 , \gamma_{4+} ]$.
\\ (ii)
$J_{C_{0}^{\pi/2}}\in V_{-}^{+-}$
and
$V_{-}^{+-} \sim C_{pw}^\infty[M^{++}_{+*}; \gamma_{4+} \cup \gamma_5 , \gamma_{4-} ]$.
\\ (iii)
$\operatorname{Ind}(V_{-}^{-+} ) = \operatorname{Ind}( V_{-}^{+-} ) = 0$
and
$\operatorname{Null}(V_{-}^{-+} ) = \operatorname{Null}( V_{-}^{+-} ) = 1$.
\end{prop}
\begin{proof}
(i) and (ii) follow easily from the definitions and the symmetries in \ref{Jsym},
with the linear isomorphisms between the spaces given by restriction to $M^{++}_{+*}$
and their inverses by extending using the appropriate reflections.
(iii) follows then from \ref{MsJ}.i and \ref{Ctheorem}.
\end{proof}
We proceed to study now
$V_{+}^{-+}$ and $V_{+}^{+-}$.
One would like to decompose these spaces further,
but unfortunately it is clear how to do this only when $m$ is even.
If $m$ is even, we define
\begin{equation}
\label{Vpm4}
V^{\circ\circ}_{\circ\pm} := \{u\in V^{\circ\circ}_{\circ} \, : \, u\circ {\underline{\mathsf{R}}}_{\Sigma_{\pi/2} } = \pm u\,\} \quad \text{ for $m$ even},
\end{equation}
where the upper circles can be $+-$ or $-+$ (on both sides) and the lower circle $+$ or $-$ (on both sides).
We have then for $m$ even that
\begin{equation}
\label{V+-+oo}
V^{+-}_{\pm} = V^{+-}_{\pm-} {\,{\oplus}_{\Lcal}\,} V^{+-}_{\pm+},
\qquad
V^{-+}_{\pm} = V^{-+}_{\pm-} {\,{\oplus}_{\Lcal}\,} V^{-+}_{\pm+}.
\end{equation}
Although we will not use the following lemma, we state it for completeness of exposition---compare also with \ref{AsymJ}.
\begin{lemma}[Some eigenvalue equivalences]
\label{symV}
The following hold.
\\
(i)
If $m\in 4\mathbb{Z}$, then
$V_{-+}^{-+}\sim V^{-+}_{+-}$
and
$V_{-+}^{+-}\sim V^{+-}_{+-}$.
\\
(ii)
If $m\in 4\mathbb{Z}+2$, then
$V_{-+}^{-+}\sim V^{+-}_{+-}$,
and
$V_{-+}^{+-}\sim V^{-+}_{+-}$.
\\
(iii)
If $m\in 2\mathbb{Z}+1$, then
$V^{-+}_{-}\sim V^{+-}_{-}$
and
$V^{-+}_{+}\sim V^{+-}_{+}$.
\end{lemma}
\begin{proof}
All items follow easily from \ref{symM++++} and the definitions.
\end{proof}
\begin{prop}
\label{P:V+-+}
We have the following (recall \ref{M+++} and \ref{M++++}).
\\ (i)
$J_{C_{\pi/2}^{0}}\in V_{+}^{-+}$
and
$V_{+}^{-+} \sim C_{pw}^\infty[M^{++}_{+*}; \gamma_{4-} , \gamma_{4+} \cup \gamma_5 ]$.
Moreover if $m$ is even, we have
$J_{C_{\pi/2}^{0}}\in V_{+-}^{-+}$
and
$V_{+-}^{-+}\sim C_{pw}^{\infty}[ \, M^{++}_{++} ; \gamma_{1-}\cup\gamma_3 , \gamma_{1+}\cup \gamma_2 \, ]$.
\\ (ii)
$J_{C_{\pi/2}^{\pi/2}}\in V_{+}^{+-}$
and
$V_{+}^{+-} \sim C_{pw}^\infty[M^{++}_{+*}; \gamma_{4+} , \gamma_{4-} \cup \gamma_5 ]$.
Moreover if $m$ is even, we have
$J_{C_{\pi/2}^{\pi/2}}\in V_{+-}^{+-}$
and
$V_{+-}^{+-}\sim C_{pw}^{\infty}[ \, M^{++}_{++} ; \gamma_{1+}\cup\gamma_3 , \gamma_{1-}\cup \gamma_2 \, ]$.
\\ (iii)
$\operatorname{Ind}(V_{+}^{-+} ) = \operatorname{Ind}( V_{+}^{+-} ) = 1$
and
$\operatorname{Null}(V_{+}^{-+} ) = \operatorname{Null}( V_{+}^{+-} ) = 1$.
\end{prop}
\begin{proof}
As in the proof of \ref{P:V+--},
(i) and (ii) follow easily from the definitions and the symmetries in \ref{Jsym},
with the linear isomorphisms between the spaces given by restriction to $M^{++}_{+*}$
and their inverses by extending using the appropriate reflections.
To prove (iii) now we provide different arguments depending on whether $m$ is even or odd,
the even case being easier because of the extra symmetry we can employ.
We assume first that $m$ is even.
By (i), (ii), \ref{MsJ}.ii, and \ref{Ctheorem} we conclude that $\lambda_1( V^{+-}_{+-} ) = \lambda_1( V^{-+}_{+-} ) =0 $,
$\lambda_2( V^{+-}_{+-} ) >0$, and $\lambda_2( V^{-+}_{+-} ) >0 $.
Replacing the Dirichlet condition with the Neumann condition reduces the eigenvalues and therefore
$\lambda_1 ( V^{+-}_{++} ) < \lambda_1 ( V^{+-}_{+-} ) = 0$
and
$\lambda_1 ( V^{-+}_{++} ) < \lambda_1 ( V^{-+}_{+-} ) = 0$.
By Courant's nodal theorem \ref{Ctheorem} and arguing as in the proof of \ref{V+++} using
\cite{Cheng}*{Theorem 2.5}, we conclude that
the eigenfunction corresponding to
$\lambda_2 ( V^{+-}_{++} ) $
must contain a separating nodal curve in $M^{++}_{++}$
which does not intersect at least one of $\gamma_1$, $\gamma_2$, or $\gamma_3$ defined as in \ref{M++++}.ii.
There is a nodal domain then in $M^{++}_{++}$ which does not intersect
at least one of $\gamma_1$, $\gamma_2$, or $\gamma_3$.
If it does not intersect $\gamma_1$, by extending to $M^{++}_{++}$
and by using domain monotonicity we conclude that
$\lambda_2(V^{+-}_{++}) > \lambda_1(V^{--}) $.
If it does not intersect $\gamma_2$, using domain monotonicity we conclude that
$\lambda_2(V^{+-}_{++}) > \lambda_1(V^{+-}_{-+})$.
If it does not intersect $\gamma_3$, using domain monotonicity we conclude that
$\lambda_2(V^{+-}_{++}) > \lambda_1(V^{+-}_{+-})$.
Since
$\lambda_1(V^{--}) =0 $,
$\lambda_1(V^{+-}_{-+})=0 $,
and
$\lambda_1(V^{+-}_{+-})=0 $
by \ref{P:V--}, \ref{P:V+--}, and the above,
we conclude that
$0 < \lambda_2 ( V^{+-}_{++} ) $.
Arguing similarly we conclude that
$0 < \lambda_2 ( V^{-+}_{++} ) $.
The above together with the decompositions (by \ref{V+-+oo})
\begin{equation*}
V^{+-}_{+} = V^{+-}_{+-} {\,{\oplus}_{\Lcal}\,} V^{+-}_{++},
\qquad
V^{-+}_{+} = V^{-+}_{+-} {\,{\oplus}_{\Lcal}\,} V^{-+}_{++}
\end{equation*}
imply (iii) in the case that $m$ is even.
Suppose now that $m$ is odd.
Recall \ref{M+++}.
By ``cutting through'' $\overline{ \mathsf{q}_1\mathsf{q}^1 } $ and $\alpha^{1-}_1$
we obtain the decomposition
$M^{++}_{+*} = D_{0+}^{0+} \cup D_{1-}^{1-} \cup M'$,
where
$$
M':= D_{1+}^{1-}\cup \cup_{i=2}^{m-1} D^{[i:2]\pm}_i \cup D_{m-}^{1-},
$$
with the signs as in \ref{M+++}.i.
By \ref{Dsym}.ii $D_{0+}^{0+}$, $D_{1-}^{1-}$, and $M'$ are each homeomorphic to a disc and
we have
$\partial D_{0+}^{0+} = \beta^0_{0+} \cup \overline{ \mathsf{q}_1\mathsf{q}^1 } \cup \alpha^{0+}_0$,
$\partial D_{1-}^{1-} = \beta^1_{1-} \cup \overline{ \mathsf{q}_1\mathsf{q}^1 } \cup \alpha^{1-}_1$,
and
$\partial M' = \gamma'_4\cup \alpha^{1-}_1 \cup \alpha_m^{1-}$,
where
$\gamma'_4:= \gamma'_{4-} \cup \gamma'_{4+}$,
$\gamma'_{4-} = \gamma_{4-} \cap M' = \gamma_{4-} \setminus \beta^0_{0+} $,
and
$\gamma'_{4+} = \gamma_{4+} \cap M' = \gamma_{4+} \setminus \beta^1_{1-} $.
The advantage of $M'$ over $M^{++}_{+*}$ is that $M'$ has an extra symmetry,
${\underline{\mathsf{R}}}_{ \mathsf{q}_{\frac{m}2+1}, C^\perp} = {\underline{\mathsf{R}}}_{\Sigma_{\frac\pi2 + \frac\pi{2m} } }$,
which preserves each of $\gamma'_{4-}$ and $\gamma'_{4+}$.
To exploit this we define
$$
W:= C_{pw}^\infty [M'; \gamma'_{4-}, \gamma'_{4+} \cup \alpha^{1-}_1 \cup \alpha_m^{1-} ],
\qquad
W_\pm:= \{u\in W : u=\pm u \circ {\underline{\mathsf{R}}}_{\Sigma_{\frac\pi2 + \frac\pi{2m} } } \} .
$$
We clearly have then the decomposition $W=W_+{\,{\oplus}_{\Lcal}\,} W_-$.
We claim now that
\begin{equation}
\label{claim}
\lambda_2(W) =
\lambda_2[M'; \gamma'_{4-}, \gamma'_{4+} \cup \alpha^{1-}_1 \cup \alpha_m^{1-} ]
>0.
\end{equation}
To prove the claim it is enough to prove that $\lambda_1(W_-) >0$ and $\lambda_2(W_+)>0$.
By ``cutting through'' with
${\Sigma_{\frac\pi2 + \frac\pi{2m} } }$
we have the decomposition
$M'=M'_+\cup{\underline{\mathsf{R}}}_{\Sigma_{\frac\pi2 + \frac\pi{2m} } } M'_+ $,
where
$$
M'_+:= D^{[\frac{m+1}2:2]\pm}_{\frac{m+1}2+} \cup \cup_{i=\frac{m+3}2}^{m-1} D^{[i:2]\pm}_i \cup D_{m-}^{1-}.
$$
We have then
$M'_+\cap{\underline{\mathsf{R}}}_{\Sigma_{\frac\pi2 + \frac\pi{2m} } } M'_+ = M'\cap \Sigma_{\frac\pi2 + \frac\pi{2m} } = \alpha^{[\frac{m+1}2:2]\pm}_{\frac{m+1}2}$,
$\gamma'_{4\pm} \cap M'_+ = \gamma_{4\pm} \cap M'_+ $,
$$
\begin{aligned}
W_-\sim & \, C_{pw}^\infty \left[ M'_+ ; (\gamma_{4-}\cap M'_+ ) \cup \alpha^{[\frac{m+1}2:2]\pm}_{\frac{m+1}2} , ( \gamma_{4+} \cap M'_+ ) \cup \alpha_m^{1-} \right],
\\
W_+\sim & \, C_{pw}^\infty \left[ M'_+ ; \gamma_{4-}\cap M'_+ , \alpha^{[\frac{m+1}2:2]\pm}_{\frac{m+1}2} \cup ( \gamma_{4+} \cap M'_+ ) \cup \alpha_m^{1-} \right].
\end{aligned}
$$
Next we reposition $M'_+$ by using $\mathbb{T}r^{-\frac{m+1}2}$ (recall \ref{L:T}) to obtain
$$
M'' := \mathbb{T}r^{-\frac{m+1}2} M'_+ = D_{0+}^{0+}\cup \cup_{i=1}^{\frac{m-3}2} D^{[i:2]\pm}_i \cup D_{\frac{m-1}2-}^{[\frac{m-1}2:2]\pm},
$$
and we use
${\underline{\mathsf{R}}}_{ \mathsf{q}_{m/2} , C^\perp \, } = {\underline{\mathsf{R}}}_{\Sigma_{\frac\pi2 - \frac\pi{2m} } }$ to ``double'' $M''$,
producing
$$
M''':=
M'' \cup ({\underline{\mathsf{R}}}_{ \mathsf{q}_{m/2} , C^\perp \, } M'' )
= D_{0+}^{0+}\cup \cup_{i=1}^{m-2} D^{[i:2]\pm}_i \cup D_{(m-1)-}^{0+},
$$
where $\alpha^{[\frac{m+1}2:2]\pm}_{\frac{m+1}2}$,
which was used to subdivide $M'$,
has been moved and ``doubled'' to $\alpha^{0+}_0 \cup \alpha_{m-1}^{0+} \subset \partial M'''$.
We have then that a first eigenfunction in $W_-$ (corresponding to $\lambda_1(W_-)$)
corresponds to an eigenfunction
in $C_{pw}^\infty [M'''; \alpha^{0+}_0 \cup \alpha_{m-1}^{0+} \cup (\gamma_{4\pm}\cap M''') , (\gamma_{4\mp}\cap M''') \, ]$
which moreover is even under reflection with respect to
${\underline{\mathsf{R}}}_{ \mathsf{q}_{m/2} , C^\perp \, } = {\underline{\mathsf{R}}}_{\Sigma_{\frac\pi2 - \frac\pi{2m} } }$,
and where the $\pm$ and $\mp$ signs are opposite and depend on whether $m\in 4\mathbb{Z}+1$ or $m\in 4\mathbb{Z}+3$.
Either way by \ref{P:V+--} and by domain monotonicity
(since $M'''\subsetneq M^{++}_{+*}$)
we conclude
that $\lambda_1(W_-) >0$.
We have also $W_+\sim C_{pw}^\infty [ M''; (\gamma_{4\pm}\cap M'') , \alpha^{0+}_0 \cup \alpha_{\frac{m-1}2}^{[\frac{m-1}2:2]\pm} \cup (\gamma_{4\mp}\cap M'') \, ]$.
Suppose $\varphi$ is an eigenfunction corresponding to
$\lambda_2(W_+)$.
By Courant's nodal theorem \ref{Ctheorem} and arguing as in the proof of \ref{V+++} using
\cite{Cheng}*{Theorem 2.5}, we conclude that
there is a
separating nodal curve $\gamma$ which has to avoid at least one of
$\gamma_4\cap M''$, $\alpha^{0+}_0 $, or $ \alpha_{\frac{m-1}2}^{[\frac{m-1}2:2]\pm} $.
In the first case by domain monotonicity we conclude that
$$
\lambda_2(W_+) >
\lambda_1 [ M''; (\gamma_{4}\cap M'') , \alpha^{0+}_0 \cup \alpha_{\frac{m-1}2}^{[\frac{m-1}2:2]\pm} \, ] =0,
$$
where the last equality follows from \ref{P:V--}.
In the second case we again use domain monotonicity,
but the comparison is with $\lambda_1(W_-) $,
which we proved positive above.
In the third case we reposition $M''$ and we argue as for the second case.
This completes the proof that
$\lambda_2(W_+) > 0$ and hence of our claim
\ref{claim}.
Clearly by \ref{P:V--} we have
\begin{equation}
\label{E:D1}
\lambda_1 [ D_{0+}^{0+} ; \beta^0_{0+} , \overline{ \mathsf{q}_1\mathsf{q}^1 } \cup \alpha^{0+}_0 ] = 0.
\end{equation}
We consider now an eigenfunction corresponding to
$\lambda_2 [ D_{1-}^{1-} ; \emptyset, \beta^1_{1-} \cup \overline{ \mathsf{q}_1\mathsf{q}^1 } \cup \alpha^{1-}_1] $.
By Courant's nodal theorem \ref{Ctheorem} and arguing as in the proof of \ref{V+++} using
\cite{Cheng}*{Theorem 2.5} again, we conclude that
there is a separating nodal curve which avoids at least one of
$\beta^1_{1-}$, $\overline{ \mathsf{q}_1\mathsf{q}^1 }$, or $\alpha^{1-}_1$.
We can use domain monotonicity then to assert that
$\lambda_2 [ D_{1-}^{1-} ; \emptyset, \beta^1_{1-} \cup \overline{ \mathsf{q}_1\mathsf{q}^1 } \cup \alpha^{1-}_1] $
is $>$ one of
$\lambda_1 [ D_{1-}^{1-} ; \beta^1_{1-} , \overline{ \mathsf{q}_1\mathsf{q}^1 } \cup \alpha^{1-}_1] $,
$\lambda_1 [ D_{1-}^{1-} ; \overline{ \mathsf{q}_1\mathsf{q}^1 } , \beta^1_{1-} \cup \alpha^{1-}_1] $,
or
$\lambda_1 [ D_{1-}^{1-} ; \alpha^{1-}_1 , \beta^1_{1-} \cup \overline{ \mathsf{q}_1\mathsf{q}^1 } ] $.
By appealing to \ref{P:V--}, \ref{V+++}, or \ref{V++-} correspondingly we conclude that
\begin{equation}
\label{E:D2}
\lambda_2 [ D_{1-}^{1-} ; \emptyset, \beta^1_{1-} \cup \overline{ \mathsf{q}_1\mathsf{q}^1 } \cup \alpha^{1-}_1] >0.
\end{equation}
We can apply \ref{P:eigen} now to the decomposition
$M^{++}_{+*} = D_{0+}^{0+} \cup D_{1-}^{1-} \cup M'$ to conclude
by referring to \ref{claim}, \ref{E:D1}, and \ref{E:D2}, that
$\#_{\le0}(V_{+}^{-+}) = \#_{\le0}[M^{++}_{+*}; \gamma_{4-} , \gamma_{4+} \cup \gamma_5 ] \le2$.
Since
$J_{C_{\pi/2}^{0}}\in V_{+}^{-+}$ changes sign on $M^{++}_{+*}$ by \ref{L:grad},
it cannot be a first eigenfunction.
Since it has eigenvalue $0$, we conclude by the last inequality that
it is a second eigenfunction, which completes the proof of (iii) for $V_+^{-+}$.
The proof for $V_{+}^{+-}$ is similar.
\end{proof}
The main theorem follows.
Recall that $\xi_{g,1}$ in the notation of \cite{Lawson} denotes the genus-$g$ Lawson surface
which can be viewed as a desingularization of two orthogonal great two-spheres in the round three-sphere $\mathbb{S}^3$.
\begin{theorem}
\label{Mtheorem}
If $g\in\mathbb{N}$ and $g\ge2$, then the index of $\xi_{g,1}$ is $2g+3$.
Moreover $\xi_{g,1}$ has nullity $6$ and no exceptional Jacobi fields.
\end{theorem}
\begin{proof}
Recall that $m=g+1$.
Combining then \ref{V++}, \ref{V+-+o},
Propositions \ref{P:V--}, \ref{P:V++}, \ref{P:V+--} and \ref{P:V+-+},
we conclude the proof.
\end{proof}
\begin{remark}[Alternative proof for high genus]
\label{R2}
The Lawson surfaces of high genus can be constructed by gluing and then one obtains a detailed knowledge of their geometry.
The gluing construction is a straightforward desingularization construction for $\Sigma^{\pi/4}\cup \Sigma^{-\pi/4}= \cup_{j=1}^4 (\mathsf{q}^j\mbox{$\times \hspace*{-0.244cm} \times$} C)$,
that is two orthogonal great two-spheres,
in the fashion of those constructions in \cite{kapouleas:wiygul} which are for two orthogonally intersecting Clifford tori.
The surfaces constructed are modeled in the vicinity of $C$ after the classical Scherk surface \cite{Scherk} desingularizing two orthogonal planes in $\mathbb{R}^3$
and
given in appropriate Cartesian coordinates
by the equation $\sinh x^1 \sinh x^2 = \sin x^3$.
For each large $m$ we can impose on the construction all the symmetries of $M=M[C,m]$.
By the uniqueness then in \ref{T:lawson} we can infer that the surface constructed is actually $M=M[C,m]=\xi_{m-1,1}$.
By the control the construction provides we can conclude then that for large $m$ (equivalently large genus)
the region of the Lawson surface $M=\xi_{m-1,1}$ in the vicinity of $C$ can be approximated by an appropriately scaled singly periodic Scherk surface,
which has been transplanted to $\mathbb{S}^3$ so that its axis covers $C$.
The rest of the Lawson surface approximates $\cup_{j=1}^4 (\mathsf{q}^j\mbox{$\times \hspace*{-0.244cm} \times$} C)$ (that is the two great two-spheres being desingularized)
with a small neighborhood of $C$ removed.
This information can be used to simplify the proofs of many intermediate results on which the proof of the main theorem is based,
thus avoiding the need for many of the arguments we have used in this article.
\qed
\end{remark}
Note now that there is a smooth family of singly periodic Scherk surfaces which can be parametrized by the angle $\theta\in(0,\pi)$ between
two adjacent asymptotic half planes.
The Scherk surface in \ref{R2} corresponds then to $\theta=\pi/2$.
Since we can then prescribe $\theta$, we say that the Scherk surfaces can \emph{``flap their wings''}.
In \cite{alm20}*{Section 4.2} a heuristic argument was provided indicating that this is not the case for the Lawson surfaces
and further questions motivated by this were asked.
The non-existence of exceptional Jacobi fields as in \ref{Mtheorem} provides partial answers to some of those questions.
In particular it implies that each $\xi_{g,1}$ is isolated as in the following corollary.
Isolatedness can be proved by adapting the proof of \cite{KMP}*{Proposition 3.1} or by a more direct argument suggested to us by R. Schoen as follows.
\begin{cor}[No flapping and isolatedness]
\label{C:flapping}
$\xi_{g,1}$ as in \ref{Mtheorem}
cannot ``flap its wings'' at the linearized level and moreover it is isolated in the sense that there is an $\epsilon>0$ such that
any minimal surface
within a $C^1$ $\epsilon$-neighborhood of $\xi_{g,1}$
is congruent to $\xi_{g,1}$.
\end{cor}
\begin{proof}
By ``no flapping at the linearized level'' we mean that there are no Jacobi fields
which are infinitesimal deformations consistent with varying the angle of intersection of the
two spheres of which the surfaces can be viewed as desingularizations.
Since there are no exceptional Jacobi fields by Theorem \ref{Mtheorem},
the result follows.
To prove now isolatedness
suppose $M=\xi_{g,1}$ is not isolated modulo congruence.
Then there exists a sequence $\{M_n\}$
of embedded
minimal surfaces none of which is congruent to $M$
but which $C^1$-converge to $M$.
Each $M_n$ is then the graph (via the exponential map in the normal direction)
of some function $u_n$ on $M$, so that $\{u_n\}$ converges to $0$ in $C^1(M)$.
By appropriately rotating each $M_n$ we may assume that eventually $u_n$
is $L^2(M)$-orthogonal to the space of nonexceptional Jacobi fields,
at least to first order in $\norm{u_n}_{C^0}$.
Since each $M_n$ is minimal, by elliptic regularity
the sequence $\left\{u_n/\norm{u_n}_{C^1}\right\}$
is bounded in $C^{2,\alpha}(M)$, so has a subsequence converging in $C^2(M)$,
thereby producing a nontrivial exceptional Jacobi field, a contradiction.
\end{proof}
\appendix
\section{Eigenvalues and subdivisions}
\label{A:eigen}
\nopagebreak
In this appendix following \cite{Ros} we state two bounds on the number of eigenvalues on a domain
in terms of the number of eigenvalues on appropriate subdivisions of the domain.
More precisely suppose that we are given $\mathcal{L}u$, ${\underline{U}}$, ${\underline{g}}$, and $\partial {\underline{U}}= \partial_D{\underline{U}} \cup \partial_N{\underline{U}}$ as in \ref{D:mixed}.
We assume further that by removing a finite union of smooth embedded one-dimensional submanifolds,
${\underline{\gamma}}\subset {\underline{U}}$,
we subdivide ${\underline{U}}$ into $n\in\mathbb{N}$ connected components
whose (compact) closures we denote by ${\underline{U}}_i$ for $i=1,\dots,n$.
We define $\partial_D {\underline{U}}_i:= \partial {\underline{U}}_i \cap \partial_D{\underline{U}}$,
$\partial_N {\underline{U}}_i:= \partial {\underline{U}}_i \cap \partial_N{\underline{U}}$,
and
${\underline{\gamma}}_i:= \partial {\underline{U}}_i \cap {\underline{\gamma}}$.
Clearly then we have the decomposition
$$
\partial {\underline{U}}_i = {\underline{\gamma}}_i \cup \partial_D {\underline{U}}_i \cup \partial_N {\underline{U}}_i.
$$
We have then the following.
\begin{prop}[Montiel-Ros \cite{Ros}]
\label{P:eigen}
Assuming the above and in the notation of \ref{D:mixed} we have the following $\forall\lambda\in\mathbb{R}$.
\\
(i)
$\#_{<\lambda}[\mathcal{L}u,{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}]
\ge
\#_{<\lambda}[\mathcal{L}u,{\underline{U}}_1;{\underline{\gamma}}_1\cup \partial_D{\underline{U}}_1,\partial_N{\underline{U}}_1]
+
\sum_{i=2}^n
\#_{\le\lambda}[\mathcal{L}u,{\underline{U}}_i;{\underline{\gamma}}_i\cup \partial_D{\underline{U}}_i,\partial_N{\underline{U}}_i]$.
\\
(ii)
$\#_{\le\lambda}[\mathcal{L}u,{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}]
\le
\#_{\le\lambda}[\mathcal{L}u,{\underline{U}}_1;\partial_D{\underline{U}}_1,{\underline{\gamma}}_1\cup \partial_N{\underline{U}}_1]
+
\sum_{i=2}^n
\#_{<\lambda}[\mathcal{L}u,{\underline{U}}_i;\partial_D{\underline{U}}_i,{\underline{\gamma}}_i\cup \partial_N{\underline{U}}_i]$.
\end{prop}
\begin{proof}
Parts (i) and (ii) generalize Lemma 12 and Lemma 13 respectively of \cite{Ros},
whose proofs carry over here with only minor modification.
Nevertheless we sketch a proof for ease of reference.
First we introduce some general notation:
for $\mathcal{L}u$, ${\underline{U}}$, $g$, and $\partial {\underline{U}}= \partial_D{\underline{U}} \cup \partial_N{\underline{U}}$ as in \ref{D:mixed}
and $\lambda \in \mathbb{R}$
we will write $E_\lambda[\underline{U};\partial_D\underline{U},\partial_N\underline{U}]$
for the $\lambda$-eigenspace of $\mathcal{L}u$ on $\underline{U}$
with Dirichlet condition on $\partial_D \underline{U}$ and Neumann condition on $\partial_N\underline{U}$.
We will understand $E_\lambda[{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}]=\{0\}$
when $\lambda$ is not an eigenvalue.
Now we make the same assumptions on ${\underline{U}}$ and its boundary as above
and fix $\lambda \in \mathbb{R}$.
To prove (i) we define the $n$ spaces of test functions
\begin{equation*}
\begin{aligned}
V_1
&:=
\{
u \in C^\infty_{pw}[{\underline{U}}] \, : \,
u|_{{\underline{U}}_1} \in \bigoplus_{\lambda'<\lambda} E_{\lambda'}[{\underline{U}}_1;{\underline{\gamma}}_1 \cup \partial_D{\underline{U}}_1,\partial_N{\underline{U}}_1]
\mbox{ and } u|_{{\underline{U}}_j}=0 \mbox{ if } j \neq 1
\} \mbox{ and} \\
V_i
&:=
\{
u \in C^\infty_{pw}[{\underline{U}}] \, : \,
u|_{{\underline{U}}_i} \in \bigoplus_{\lambda' \leq \lambda} E_{\lambda'}[{\underline{U}}_i;{\underline{\gamma}}_i \cup \partial_D{\underline{U}}_i,\partial_N{\underline{U}}_i]
\mbox{ and } u|_{{\underline{U}}_j}=0 \mbox{ if } j \neq i
\} \mbox{ for } 2 \leq i \leq n.
\end{aligned}
\end{equation*}
Clearly for any $u_i \in V_i$ and $u_j \in V_j$ with $i \neq j$
we have $\nabla u_i \perp_{L^2({\underline{U}})} \nabla u_j$
and $u_i \perp_{L^2({\underline{U}})} hu_j$ for any $h \in C^\infty_{pw}(\underline{U})$.
Define also
\begin{equation*}
V_{<\lambda}
:=
\bigoplus_{\lambda'<\lambda} E_{\lambda'}[{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}].
\end{equation*}
We claim that the $L^2({\underline{U}})$-orthogonal projection
$\bigoplus_{i=1}^n V_i \to V_{<\lambda}$ is injective,
which will establish (i).
To check the injectivity suppose $u \in \bigoplus_{i=1}^n V_i$, so that
$u=\sum_{i=1}^n u_i$
for some $u_1 \in V_1$, $u_2 \in V_2$, \ldots, $u_n \in V_n$.
Then
\begin{equation*}
\langle \nabla u, \nabla u \rangle_{L^2} - \langle u,fu\rangle
=
\sum_{i=1}^n \left(\langle \nabla u_i, \nabla u_i \rangle_{L^2} - \langle u_i, fu_i \rangle_{L^2}\right)
\leq
\lambda \langle u, u \rangle_{L^2},
\end{equation*}
but the additional assumption $u \perp_{L^2} V_{<\lambda}$
forces the equality case,
which then implies
$u|_{\underline{U}_1}=0$ and $\mathcal{L}u u=-\lambda u$ everywhere.
We conclude by the unique-continuation principle \cite{Aronszajn} that in fact $u=0$.
To prove
now (ii) we define the $2+n$ vector spaces
\begin{equation*}
\begin{aligned}
&W:=\prod_{i=1}^n C^\infty_{pw}[{\underline{U}}_i], \quad
V_{\leq \lambda}
:=
\bigoplus_{\lambda' \leq \lambda} E_{\lambda'}[{\underline{U}},\partial_D{\underline{U}},\partial_N{\underline{U}}],
\quad
W_1
:=
\bigoplus_{\lambda' \leq \lambda} E_{\lambda'}[{\underline{U}}_1,\partial_D{\underline{U}}_1,{\underline{\gamma}}_1 \cup \partial_N{\underline{U}}_1], \\
&\mbox{and for } 2 \leq i \leq n \qquad
W_i
:=
\bigoplus_{\lambda'<\lambda} E_{\lambda'}[{\underline{U}}_i,\partial_D{\underline{U}}_i,{\underline{\gamma}}_i \cup \partial_N{\underline{U}}_i].
\end{aligned}
\end{equation*}
Clearly $\prod_{i=1}^n W_i$ is a subspace of $W$
and the map
$\iota: u \in V_{\leq \lambda} \mapsto \left(u|_{{\underline{U}}_1},\ldots,u|_{{\underline{U}}_n}\right) \in W$
is injective.
Endowing $W$ with the obvious $L^2$ inner product and writing
$\pi: W \to \prod_{i=1}^n W_i$
for the corresponding projection onto $\prod_{i=1}^n W_i$,
we claim that $\pi \circ \iota$ is injective,
implying (ii).
To check the injectivity suppose $u \in V_{\leq \lambda}$.
Then
\begin{equation*}
\sum_{i=1}^n \langle \nabla u|_{\underline{U}_i}, \nabla u|_{\underline{U}_i} \rangle_{L^2}
-\sum_{i=1}^n \langle fu|_{\underline{U}_i},u|_{\underline{U}_i} \rangle_{L^2}
=
\langle \nabla u, \nabla u \rangle_{L^2} - \langle u, fu \rangle_{L^2}
\leq \lambda \langle u, u \rangle_{L^2}.
\end{equation*}
On the other hand, if $\iota(u) \perp \prod_{i=1}^n W_i$,
then for each $i$
\begin{equation*}
\langle \nabla u|_{\underline{U}_i}, \nabla u|_{\underline{U}_i} \rangle_{L^2}
-\langle fu|_{\underline{U}_i},u|_{\underline{U}_i} \rangle_{L^2}
\geq
\lambda \langle u|_{\underline{U}_i}, u|_{\underline{U}_i} \rangle_{L^2},
\end{equation*}
with strict inequality when $n=1$, unless $u|_{{\underline{U}}_1}=0$.
Since $\sum_{i=1}^n \langle u|_{\underline{U}_i}, u|_{\underline{U}_i} \rangle_{L^2}=\langle u,u\rangle_{L^2}$,
we conclude that if $u \in \ker \pi \circ \iota$, then $u$ is a solution to $\underline{\mathcal{L}}u=-\lambda u$
and vanishes identically on $\underline{U}_1$,
forcing $u=0$ by unique continuation.
\end{proof}
\section{The Courant nodal theorem}
\label{B:courant}
\nopagebreak
In this appendix we recall Courant's nodal theorem in the form we use it.
Suppose that we are given $\mathcal{L}u$, ${\underline{U}}$, ${\underline{g}}$, and $\partial {\underline{U}}= \partial_D{\underline{U}} \cup \partial_N{\underline{U}}$ as in \ref{D:mixed}.
Suppose moreover ${\underline{U}}$ is connected.
We define the number of nodal domains of an eigenfunction $u$ of $\mathcal{L}u$ to be the number of connected components of
${\underline{U}}\setminus u^{-1}(0)$.
We have then the following,
where for ease of reference we include in the theorem its corollary on the simplicity of the first eigenvalue.
\begin{theorem}[Courant's nodal theorem \cite{courant}]
\label{Ctheorem}
Given $\mathcal{L}u$, ${\underline{U}}$, $g$, and $\partial {\underline{U}}= \partial_D{\underline{U}} \cup \partial_N{\underline{U}}$ as above,
let $N_n$ for each $n\in\mathbb{N}$
be the number of nodal domains of an eigenfunction corresponding to the $n^{th}$ eigenvalue
$\lambda_n[\mathcal{L}u,{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}]$
in the notation of \ref{D:mixed}.
We have then for $n=1$: $N_1=1$ and
$\lambda_1[\mathcal{L}u,{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}] <
\lambda_2[\mathcal{L}u,{\underline{U}};\partial_D{\underline{U}},\partial_N{\underline{U}}] $;
and for $n>1$: $2\le N_n \le n$.
\end{theorem}
\def.9{.9}
\end{document}
|
\begin{document}
\title[Large deviations for the coefficients]
{Large deviation expansions for the coefficients of random walks on \\ the general linear group}
\author{Hui Xiao}
\author{Ion Grama}
\author{Quansheng Liu}
\curraddr[Xiao, H.]{Universit\'{e} de Bretagne-Sud, LMBA UMR CNRS 6205, Vannes, France}
\email{[email protected]}
\curraddr[Grama, I.]{Universit\'{e} de Bretagne-Sud, LMBA UMR CNRS 6205, Vannes, France}
\email{[email protected]}
\curraddr[Liu, Q.]{Universit\'{e} de Bretagne-Sud, LMBA UMR CNRS 6205, Vannes, France}
\email{[email protected]}
\begin{abstract}
Let $(g_n)_{n\geqslant 1}$ be a sequence of independent and identically distributed
elements of the general linear group $GL(d, \bb R)$.
Consider the random walk $G_n : = g_n \ldots g_1$.
Under suitable conditions, we establish Bahadur-Rao-Petrov type large deviation expansion for
the coefficients $\langle f, G_n v \rangle$, where $f \in (\bb R^d)^*$ and $v \in \bb R^d$.
In particular, our result implies the large deviation principle with an explicit rate function,
thus improving significantly the large deviation bounds established earlier.
Moreover, we establish Bahadur-Rao-Petrov type large deviation expansion for
the coefficients $\langle f, G_n v \rangle$ under the changed measure.
Toward this end we prove the H\"{o}lder regularity of the stationary measure corresponding to
the Markov chain $G_n v /|G_n v|$ under the changed measure, which is of independent interest.
In addition, we also prove local limit theorems with large deviations for the coefficients of $G_n$.
\end{abstract}
\date{\today}
\subjclass[2010]{Primary 60F10, 60B15, 37A30; Secondary 60B20, 60J05}
\keywords{Bahadur-Rao-Petrov large deviations; Products of random matrices;
Coefficients; Regularity of stationary measure; Spectral gap}
\maketitle
\section{Introduction}
\subsection{Background and objectives}
Let $d\geqslant 2$ be an integer.
Assume that on the probability space $(\Omega,\mathcal{F},\mathbb{P})$ we are given
a sequence of real random $d\times d$ matrices $(g_{n})_{n\geqslant 1}$
which are independent and identically distributed (i.i.d.) with common law $\mu$.
A great deal of research has been devoted to studying the random matrix product $G_n: = g_n \ldots g_1$.
Many fundamental results related to $G_n$,
such as the strong law of large numbers, the central limit theorem, the law of iterated logarithm
and large deviations have been established by
Furstenberg and Kesten \cite{FK60}, Kingman \cite{Kin73},
Le Page \cite{LeP82}, Guivarc'h and Raugi \cite{GR85}, Bougerol and Lacroix \cite{BL85},
Gol'dsheid and Margulis \cite{GM89}, Hennion \cite{Hen97}, Furman \cite{Fur02},
Guivarc'h and Le Page \cite{GL16},
Benoist and Quint \cite{BQ16, BQ17}, to name only a few.
These limit theorems turn out to be very useful in various areas,
such as the spectral theory of random Schr\"{o}dinger operators \cite{BL85, CL90},
disordered systems and chaotic dynamics coming from statistical physics \cite{CPV93},
the multidimensional stochastic recursion \cite{Kes73,GL16},
the dynamics of group actions \cite{BFLM11, BQ13},
and the survival probabilities and conditioned limit theorems
of branching processes in random environment \cite{GLP17, LPP18, GLL19}.
Denote by $\langle f, G_n v \rangle$ the coefficients of the matrix $G_n$,
where $f \in (\bb R^d)^*$ and $v \in \bb R^d$, and $\langle \cdot, \cdot \rangle$ is the corresponding dual bracket.
There has been of growing interest in the study of the asymptotic behavior of $\langle f, G_n v \rangle$,
since the seminal work of Furstenberg and Kesten \cite{FK60}, where
the following strong law of large numbers has been established for positive matrices:
\begin{align*}
\lim_{n \to \infty } \frac{1}{n} \log | \langle f, G_n v \rangle | = \lambda, \quad \mbox{a.s.},
\end{align*}
with $\lambda$ a constant called the first Lyapunov exponent of the sequence $(g_{n})_{n\geqslant 1}$.
In \cite{FK60} the central limit theorem has also been proved,
thus giving an affirmative answer to Bellman's conjecture in \cite{Bel54}.
In the case of invertible matrices, Guivarc'h and Raugi \cite{GR85} have established
the strong law of large numbers and the central limit theorem
for the coefficients $\langle f, G_n v \rangle$,
where the proof turns out to be more involved than that in \cite{FK60}, and
is based on the regularity of the stationary measure of
the Markov chain $G_n x: = G_n v/|G_n v|$ with $x = \bb R v$ a starting point on the projective space $\bb P^{d-1}$.
Recently, Benoist and Quint \cite{BQ17} have proved the following large deviation bound:
for any $q > \lambda$, there exists a constant $c>0$ such that
\begin{align}\label{IntroLDBQ}
\mathbb{P} \big( \log | \langle f, G_n v \rangle | > nq \big) \leqslant e^{-cn}.
\end{align}
But the precise decay rate on the large deviation probability in \eqref{IntroLDBQ} is not known.
The goal of this paper is to establish an exact
large deviation asymptotic for the coefficients $\langle f, G_n v \rangle$,
called Bahadur-Rao-Petrov type large deviations
following the groundwork by Bahadur-Rao \cite{BR60} and Petrov \cite{Pet65}
for sums of i.i.d.\ real-valued random variables.
Our result will imply the large deviation principle with an explicit rate function,
which
improves \eqref{IntroLDBQ}.
Moreover, we shall also establish Bahadur-Rao-Petrov type upper tail large deviation asymptotics
for the couple $(G_n x, \log |\langle f, G_n v \rangle|)$ with target functions,
which is of independent interest; in particular it implies a new result
on the local limit theorem with large deviations for coefficients $\langle f, G_n v \rangle$.
Similar results for lower tail large deviations are also obtained, whose
proof turns out to be more delicate.
\subsection{Brief overview of the main results}
Let $I_{\mu}= \{ s \geqslant 0: \mathbb{E}(\|g_1\|^{s})< \infty\} $, where $\| g \|$ is the operator norm of a matrix $g$.
For any $s\in I_\mu$, define $ \kappa(s)=\lim_{n\to\infty}\left(\mathbb{E}\| G_n \|^{s}\right)^{\frac{1}{n}}$.
Set $\Lambda = \log\kappa$ and consider its Fenchel-Legendre transform
$\Lambda^*$, which satisfies $\Lambda^*(q) = s q - \Lambda(s) >0$
for $q = \Lambda'(s)$ and $s \in I_{\mu}^{\circ}$ (the interior of the interval $I_{\mu}$).
In the sequel $\langle \cdot, \cdot \rangle$ and $| \cdot |$
denote respectively the dual bracket and the Euclidean norm.
Denote by $\mathbb{P}^{d-1}: = \{ x = \mathbb R v: v \in \mathbb{R}^d \setminus \{0\} \}$
the projective space in $\bb R^d$; the projective space $(\mathbb P^{d-1})^*$ in $(\bb R^d)^*$ is defined similarly.
For any $x=\mathbb Rv \in \mathbb P^{d-1}$ and $y=\mathbb Rf \in (\mathbb P^{d-1})^*$ we define
$\delta(y,x)= \frac{| \langle f,v \rangle |}{|f| |v|}$.
For any $g\in GL(d, \bb R)$ and $x = \mathbb R v \in\mathbb P^{d-1}$,
let $gx = \mathbb R gv \in \mathbb P^{d-1}$,
and denote by $gv\in \mathbb R^d$ the image of the automorphism $v\mapsto gv$ on $\mathbb R^d$.
Consider the transfer operator $P_s$ defined by
$P_s \varphi (x) = \mathbb{E} [ e^{ s \sigma(g_1, x)} \varphi(g_1 x) ]$, $x = \bb R v \in \mathbb{P}^{d-1}$,
where $\sigma(g, x) = \log \frac{|gv|}{|v|}$, and $\varphi$ is a continuous function on $\mathbb{P}^{d-1}$;
the conjugate transfer operator $P_s^*$ is defined similarly: see \eqref{transfoper001}.
The operators $P_{s}$ and $P_s^*$ have continuous strictly positive eigenfunctions $r_s$ and $r_s^*$
on $\mathbb{P}^{d-1}$ which are unique up to a scaling constant,
and unique probability eigenmeasures $\nu_s$ and $\nu_s^*$,
satisfying $P_s r_s = \kappa(s) r_s$, $P_s \nu_s = \kappa(s) \nu_s$,
$P_s^* r_s^* = \kappa(s) r_s^*$ and $P_s^* \nu_s^* = \kappa(s) \nu_s^*$.
Denote $\sigma_s:= \sqrt{ \Lambda''(s) }>0$.
For details see Section \ref{subsec a change of measure}.
Our first objective is to establish a Bahadur-Rao type large deviation asymptotic
for the coefficients $\langle f, G_n v \rangle$;
we refer to Bahadur and Rao \cite{BR60} for the case of i.i.d.\ real-valued random variables.
More precisely, we prove that, for any $s \in I_{\mu}^{\circ}$, $v \in \bb R^d $ and $f \in (\bb R^d)^*$
with $|v|=|f| = 1$, $q = \Lambda'(s)$, $x = \bb R v$ and $y = \bb R f$, as $n \to \infty$,
\begin{align}\label{IntroEntryInver02}
\mathbb{P} \Big( \log | \langle f, G_n v \rangle | \geqslant nq \Big) =
\frac{r_{s}(x) r^*_{s}(y)}{ \varrho_s }
\frac{ \exp \big( -n \Lambda^*(q) \big) }{ s \sigma_s \sqrt{2\pi n} } \big[ 1 + o(1) \big],
\end{align}
where
$\varrho_s = \nu_s (r_s) = \nu_s^* (r_s^*) >0$.
The asymptotic \eqref{IntroEntryInver02} clearly implies the
large deviation principle for $\langle f, G_n v \rangle$ with the rate function $\Lambda^*$,
which obviously improves the large deviation bound \eqref{IntroLDBQ}.
In fact, we shall extend \eqref{IntroEntryInver02}
to the couple $(G_n x, \log | \langle f, G_n v \rangle |)$ with target functions.
Precisely, for any $s \in I_{\mu}^{\circ}$, any H\"{o}lder continuous function $\varphi$ on $\mathbb{P}^{d-1}$
and any measurable function $\psi$ on $\mathbb{R}$
such that $u \mapsto e^{-s_1u} \psi(u)$ is directly Riemann integrable for some $s_1 \in (0,s)$,
we prove that as $n \to \infty$,
\begin{align}
& \mathbb{E} \Big[ \varphi(G_n x) \psi \big( \log |\langle f, G_n v \rangle| - nq \big) \Big] \label{IntroTargSPosi} \\
& = \frac{r_{s}(x)}{ \varrho_s }
\frac{ \exp (-n \Lambda^*(q)) }{ \sigma_{s}\sqrt{2\pi n}}
\left[ \int_{\mathbb{P}^{d-1}} \varphi(x) \delta(y, x)^s \nu_s(dx)
\int_{\mathbb{R}} e^{-su} \psi(u) du + o(1) \right]. \nonumber
\end{align}
Our second objective is to establish a Bahadur-Rao type result for the lower large deviation probabilities
$\mathbb{P} \big( \log |\langle f, G_n v \rangle| \leqslant nq \big)$,
where $q = \Lambda'(s) < \lambda$ with $s<0$ sufficiently close to $0$.
Specifically, for $s<0$ small enough, we prove that, as $n \to \infty$,
\begin{align} \label{IntroEntrySNeg}
\mathbb{P} \left( \log | \langle f, G_n v \rangle | \leqslant nq \right)
= \frac{r_{s}(x) r^*_{s}(y)}{ \varrho_s }
\frac{ \exp \left( -n \Lambda^*(q) \right) } { - s \sigma_{s}\sqrt{2\pi n}} \big[ 1 + o(1) \big],
\end{align}
where $r_s$, $r_s^*$, $\varrho_s$, $\Lambda^*$ and $\sigma_s$
are defined in Section \ref{subsec a change of measure}, which are strictly positive,
similarly to the case $s>0$.
The asymptotic \eqref{IntroEntrySNeg} is of course much sharper than
the corresponding lower tail large deviation principle for $\langle f, G_n v \rangle$.
More generally, we extend the lower tail large deviation expansion \eqref{IntroEntrySNeg} to the couple
$(G_n x, \log | \langle f, G_n v \rangle |)$ with target functions,
in the same line as \eqref{IntroTargSPosi}.
For a brief description of the main ideas of the approach see Section \ref{proof strategy}.
The assertions \eqref{IntroEntryInver02}, \eqref{IntroTargSPosi} and \eqref{IntroEntrySNeg} stated above
concern Bahadur-Rao type large deviation asymptotics.
Actually we shall establish an extended version of these results with an additional vanishing perturbation on $q$,
which in the literature is known as Bahadur-Rao-Petrov type large deviation expansion.
Such type of extensions has important and interesting implications,
for instance, to local limit theorems with large deviations for the coefficients $\langle f, G_n v \rangle$:
see Theorem \ref{Theorem local LD002}.
Recently, Buraczewski, Collamore, Damek and Zienkiewicz \cite{BCDZ16}
have established a law of large numbers, a central limit theorem and large deviation results
for perpetuities using the Bahadur-Rao-Petrov large deviation asymptotic for sums of i.i.d.\ real valued random variables.
With the help of our large deviation results for products of random matrices
it is possible to extend these results to multivariate perpetuity sequences arising in financial mathematics.
Another potential application of our results is in the study of multitype branching processes
and branching random walks governed by products of random matrices;
we refer to Mentemeier \cite{Men16}, Bui, Grama and Liu \cite{BGL2020a, BGL2020b} for details.
It is worth mentioning that using the approach developed in this paper,
it is possible to establish new limit theorems
for the Gromov product of random walks on hyperbolic groups;
we refer to Gou\"{e}zel \cite{Gou09, Gou14} on this topic.
We also mention that our approach opens a way to
study invariance principles for the coefficients $\langle f, G_n v \rangle$;
recent progress in the study of invariance principles can be found
in Cuny, Dedecker and Jan \cite{CDJ17}
and Cuny, Dedecker and Merlev\`ede \cite{CDM19},
where the vector norm $|G_n v|$ and the operator norm $\|G_n\|$
have been studied via the martingale approximation approach.
\section{Main results}\label{sec.prelim}
In this section we present our main results and the strategy of the proofs.
\subsection{Notation and conditions}\label{subsec.notations}
Denote by $c$, $C$ absolute constants whose values may change from line to line.
By $c_\alpha$, $C_{\alpha}$ we mean constants depending only on the parameter $\alpha.$
For any integrable function $\rho: \mathbb{R} \to \mathbb{C}$, denote its Fourier transform by
$\widehat{\rho} (t) = \int_{\mathbb{R}} e^{-ity} \rho(y) dy$, $t \in \mathbb{R}$.
For a measure $\nu$ and a function $\varphi$ we write $\nu(\varphi)=\int \varphi d\nu.$
Let $\mathbb N = \{1,2,\ldots\}$. By convention $\log 0 =-\infty$.
The space $\mathbb{R}^d$ is equipped with the standard scalar product $\langle \cdot, \cdot\rangle$
and the associated norm $|\cdot|$.
For any integer $d \geqslant 2$,
denote by $\bb G: = GL(d,\mathbb R)$ the general linear group of invertible $d \times d$ matrices
with coefficients in $\mathbb R$.
The projective space $\mathbb P^{d-1}$ of $\mathbb R^d$
is the set of elements $x = \bb R v$, where $v \in \bb R^d \setminus \{0\}$.
The projective space of $(\mathbb R^d)^*$ is denoted by $(\mathbb P^{d-1})^*$.
We equip $\bb P^{d-1}$ with the angular distance $\mathbf d$ (see \cite{GL16}), i.e.,
for any $x, x' \in \mathbb{P}^{d-1}$ with $x \in \bb R v$ and $x' \in \bb R v'$,
$\mathbf{d}(x,y)= (1 - \frac{|\langle v, v' \rangle|}{|v| |v'|})^{1/2}$.
Let $\mathcal{C}(\bb P^{d-1})$ be the space of complex-valued continuous functions on $\bb P^{d-1}$.
We write $\mathbf{1}$ for the identity function $1(x)$, $x \in \bb P^{d-1}$.
Throughout this paper, $\gamma>0$ is a fixed sufficiently small constant.
For any $\varphi\in \mathcal{C}(\bb P^{d-1})$, set
\begin{align*}
\|\varphi\|_{\infty}:= \sup_{x\in \bb P^{d-1}}|\varphi(x)| \quad \mbox{and} \quad
\|\varphi\|_{\gamma}:= \|\varphi\|_{\infty}
+ \sup_{x \neq y} \frac{ |\varphi(x)-\varphi(y)| }{ \mathbf{d}(x,y)^{\gamma} },
\end{align*}
and consider the Banach space
$\mathcal{B}_{\gamma}:=\{\varphi\in \mathcal{C}(\bb P^{d-1}): \|\varphi\|_{\gamma}< +\infty\}.$
All over the paper $(g_{n})_{n\geqslant 1}$ is a sequence of i.i.d.\ elements
of the same probability law $\mu$ on $\bb G$.
Denote by $\Gamma_{\mu}$ the smallest closed semigroup generated by the support of $\mu$.
For any $g \in \bb G$,
denote $\|g\| = \sup_{ v \in \bb R^d \setminus \{0\} } \frac{|g v|}{|v|}$.
Let
\begin{align*}
I_{\mu} = \big\{ s \geqslant 0: \mathbb{E}(\| g_1 \|^s) < + \infty \big\},
\end{align*}
and $I_{\mu}^{\circ}$ be its interior.
In the sequel we always assume that there exists $s>0$ such that
$\mathbb{E} ( \| g_1 \|^s ) < + \infty,$
so that $I_{\mu}^{\circ}$ is non-empty open interval of $\mathbb{R}$.
For any $g \in \bb G$,
set $\iota(g) = \inf_{ v \in \bb R^d \setminus \{0\} } \frac{|g v|}{|v|}$,
and it holds that $\iota(g) = \| g^{-1} \|^{-1}$.
We will need the following exponential moment condition:
\begin{conditionA}\label{Condi_Exp}
There exist $s\in I_\mu^\circ$ and $\beta \in(0,1)$ such that
$$
\int_{\bb G} \| g \|^{s + \beta} \iota(g)^{-\beta} \mu (dg) < + \infty.
$$
\end{conditionA}
Moreover, we shall use the following two-sided moment condition.
Denote $N(g) = \max\{ \|g\|, \| g^{-1} \| \}$ for any $g \in \bb G$.
\begin{conditionA}\label{Condi-TwoExp}
There exists a constant $\eta > 0$ such that $\mathbb{E} \big( N(g_1)^{\eta} \big) < +\infty$.
\end{conditionA}
A matrix $g \in \bb G$ is called \emph{proximal} if it has an algebraic simple dominant eigenvalue,
namely, $g$ has an eigenvalue $\lambda_{g}$ satisfying $|\lambda_{g}| > |\lambda_{g}'|$
for all other eigenvalues $\lambda_{g}'$ of $g$.
It is easy to verify that $\lambda_{g} \in \mathbb{R}$.
The eigenvector $v_g$ with unit norm $|v_g| = 1$, corresponding to the eigenvalue $\lambda_{g}$,
is called the dominant eigenvector.
We will need the following strong irreducibility and proximality conditions:
\begin{conditionA}\label{Condi-IP}
{\rm (i)(Strong irreducibility)}
No finite union of proper subspaces of $\mathbb{R}^d$ is $\Gamma_{\mu}$-invariant.
{\rm (ii)(Proximality)} $\Gamma_{\mu}$ contains at least one proximal matrix.
\end{conditionA}
For any $g \in \bb G$ and $x = \bb R v \in \bb P^{d-1}$,
let $gx = \bb R gv \in \mathbb P^{d-1}$ and
\begin{align}\label{Def_MarkovChain01}
G_0 x: = x, \quad
G_n x: = \bb R G_n v, \quad n \geqslant 1.
\end{align}
Then $(G_n x)_{n \geqslant 0}$ forms a Markov chain on the projective space $\bb P^{d-1}$.
Moreover, under condition \ref{Condi-IP},
$(G_n x)_{n \geqslant 0}$ has a unique stationary probability measure $\nu$ on $\bb P^{d-1}$
such that for any $\varphi \in \mathcal{C}(\bb P^{d-1})$,
\begin{align} \label{mu station meas}
\int_{\bb P^{d-1}} \int_{\bb G} \varphi(gx) \mu(dg) \nu(dx)
= \int_{\bb P^{d-1}} \varphi(x) \nu(dx).
\end{align}
Furthermore, the support of $\nu$ is given by
\begin{align}\label{Def_supp_nu}
\supp \nu = \overline{\{ v_{g}\in
\mathbb P^{d-1}: g\in\Gamma_{\mu}, \ g \mbox{ is proximal} \}}.
\end{align}
For any $s\in (-s_0, 0) \cup I_{\mu}$ with small enough $s_0>0$,
define the transfer operator $P_s$ and the conjugate transfer operator $P_{s}^{*}$
as follows: for any $\varphi \in \mathcal{C}(\bb P^{d-1})$,
\begin{align}\label{transfoper001}
P_{s}\varphi(x) = \int_{\bb G} e^{s \sigma (g,x)} \varphi( g x ) \mu(dg),
\quad x\in \bb P^{d-1},
\end{align}
where $\sigma(g, x) = \log \frac{|gv|}{|v|}$,
and for any $\varphi \in \mathcal{C}((\bb P^{d-1})^*)$,
\begin{align}\label{transfoper002}
P_{s}^{*}\varphi(y) = \int_{\bb G} e^{s \sigma (g^*, y)} \varphi(g^* y) \mu(dg),
\quad y \in (\bb P^{d-1})^*.
\end{align}
where $g^*$ denotes the adjoint automorphism of the matrix $g$.
Under suitable conditions,
the transfer operator $P_s$ has a unique probability eigenmeasure $\nu_s$ on $\bb P^{d-1}$
corresponding to the eigenvalue $\kappa(s)$:
$P_s \nu_s = \kappa(s)\nu_s.$
Similarly, the conjugate transfer operator $P_{s}^{*}$
has a unique probability eigenmeasure $\nu^*_s$
corresponding to the eigenvalue $\kappa(s)$:
$P_{s}^{*} \nu^*_s = \kappa(s)\nu^*_s.$
For any $x = \bb R v \in \bb P^{d-1}$ and $y = \bb R f \in (\bb P^{d-1})^*$
with $v \in \bb R^d \setminus \{0\}$ and $f \in (\bb R^d)^* \setminus \{0\}$,
denote $\delta(y, x) = \frac{|\langle f, v \rangle|}{|f||v|}$ and set
\begin{align*}
r_{s}(x) = \int_{(\bb P^{d-1})^*} \delta(y, x)^s \nu^*_{s}(dy), \quad
r_{s}^*(y) = \int_{\bb P^{d-1}} \delta(y, x)^s \nu_{s}(dx).
\end{align*}
Then, $r_s$ is the unique, up to a scaling constant,
strictly positive eigenfunction of $P_s$:
$P_s r_s = \kappa(s)r_s$;
similarly
$r^*_s$ is the unique, up to a scaling constant,
strictly positive eigenfunction of $P_{s}^{*}$: $P_{s}^{*} r^*_s = \kappa(s)r^*_s$.
It is easy to see that
$$\nu_s(r_s) = \nu^*_s(r^*_s)= : \varrho_s.$$
The stationary measure $\pi_s$ is defined by
$ \pi_{s}(\varphi)=\frac{\nu_{s}(\varphi r_{s})}{\varrho_{s}}$, for any $\varphi\in \mathcal{C}(\bb P^{d-1})$.
We refer to Section \ref{subsec a change of measure} for details.
Define $\Lambda = \log\kappa: (-s_0, 0) \cup I_{\mu} \to \mathbb R$,
then the function $\Lambda$ is convex and analytic.
Condition \ref{Condi-IP} implies
that $\sigma_s=\Lambda''(s)$ is strictly positive for any $s\in (-s_0, 0) \cup I_{\mu}$.
Denote by $\Lambda^{\ast}$ the Fenchel-Legendre transform of $\Lambda$,
then it holds that $\Lambda^*(q)=s q - \Lambda(s)>0$
if $q=\Lambda'(s)$ for $s\in (-s_0, 0) \cup I_{\mu}^{\circ}$.
\subsection{Precise large deviations for coefficients}\label{sec scalar prod}
The goal of this section is to state exact large deviation asymptotics
for the coefficients $\langle f, G_n v \rangle$, where $f \in (\bb R^d)^*$ and $v \in \bb R^d$.
To the best of our knowledge, the precise large deviations and even the large deviation principle
for $\langle f, G_n v \rangle$
have not been studied by now in the literature.
Our first result is a large deviation asymptotic of the Bahadur-Rao type (see \cite{BR60})
for the upper tails of $\langle f, G_n v \rangle$.
Recall the notation $x = \bb R v$ and $y = \bb R f$
for any $v \in \bb R^d \setminus \{0\}$ and $f \in (\bb R^d)^* \setminus \{0\}$.
\begin{theorem} \label{thrmBR001}
Assume conditions \ref{Condi_Exp} and \ref{Condi-IP}.
Let $s \in I_{\mu}^{\circ}$ and $q = \Lambda'(s)$.
Then, as $n \to \infty$,
uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{SCALREZ01BR}
\mathbb{P} \Big( \log | \langle f, G_n v \rangle | \geqslant nq \Big)
= \frac{ r_{s}(x) r^*_{s}(y)}{\varrho_s}
\frac{ \exp \left( -n \Lambda^*(q) \right) } {s \sigma_{s}\sqrt{2\pi n}} \big[ 1 + o(1) \big].
\end{align}
\end{theorem}
In particular, if we fix a basis $(e_i^*)_{1 \leqslant i \leqslant d}$ in $(\bb R^d)^*$
and a basis $(e_j)_{1 \leqslant j \leqslant d}$ in $\bb R^d$,
then taking $f=e_i^*$ and $v =e_j$ in \eqref{SCALREZ01BR}, we get the
Bahadur-Rao type large deviation asymptotic for the $(i,j)$-th entry $G_n^{i,j}$ of the matrix product $G_n$.
It is easy to verify that the large deviation asymptotic \eqref{SCALREZ01BR} implies a large deviation principle,
as stated below: under the assumptions of Theorem \ref{thrmBR001},
we have, uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}\label{LDP-001}
\lim_{n \to \infty} \frac{1}{n} \log
\mathbb{P} \Big( \log |\langle f, G_n v \rangle| \geqslant n q \Big)
= - \Lambda^*(q).
\end{align}
In its turn,
the asymptotic \eqref{LDP-001}
improves significantly the bound
\eqref{IntroLDBQ}.
An important field of applications of large deviation asymptotics for the coefficients of type \eqref{SCALREZ01BR}
is the study of asymptotic behaviors of multi-type branching processes in random environment.
For results in the case of single-type branching processes we refer to \cite{GLM-EJP-2017, GLM-SPA-2017}
and for the relation between the coefficients of products of random matrices
and the multi-type branching processes we refer to \cite{Cohn}.
Our next result is an improvement of Theorem \ref{thrmBR001} by allowing a vanishing perturbation $l$ on $q=\Lambda'(s)$,
in the spirit of the Petrov result \cite{Pet65}, called the Bahadur-Rao-Petrov type large deviation.
Large deviations with a perturbation $l$ have been used
for example in
Buraczewski, Collamore, Damek and Zienkiewicz \cite{BCDZ16} for a recent application
to the asymptotic of the ruin time in some models of financial mathematics.
These results are also useful to deduce local limit theorems with large deviations,
see subsection \ref{Applic to LocalLD}.
\begin{theorem} \label{Thm_BRP_Upper}
Assume conditions \ref{Condi_Exp} and \ref{Condi-IP}.
Let $s \in I_{\mu}^{\circ}$ and $q = \Lambda'(s)$.
Let $(l_n)_{n \geqslant 1}$ be any positive sequence satisfying $\lim_{n \to \infty} l_n = 0$.
Then,
we have, as $n \to \infty$, uniformly in $|l| \leqslant l_n$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\mathbb{P} \Big( \log | \langle f, G_n v \rangle | \geqslant n(q+l) \Big)
= \frac{ r_{s}(x) r^*_{s}(y)}{\varrho_s}
\frac{ \exp \left( -n \Lambda^*(q+l) \right) } {s \sigma_{s}\sqrt{2\pi n}} \big[ 1 + o(1) \big].
\end{align*}
More generally, for any measurable function $\psi$ on $\mathbb{R}$
such that $u \mapsto e^{-s'u}\psi(u)$ is directly Riemann integrable for some $s' \in (0,s)$,
we have, as $n \to \infty$, uniformly in $|l| \leqslant l_n$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$, and $\varphi \in \mathcal{B}_{\gamma}$,
\begin{align}
& \mathbb{E} \Big[ \varphi(G_n x) \psi \big( \log |\langle f, G_n v \rangle| - n(q+l) \big) \Big]
\label{SCALREZ02} \\
& = \frac{r_{s}(x)}{\varrho_s}
\frac{ \exp (-n \Lambda^*(q+l)) }{ \sigma_{s}\sqrt{2\pi n}}
\left[ \int_{\bb P^{d-1}} \varphi(x) \delta(y,x)^s \nu_s(dx)
\int_{\mathbb{R}} e^{-su} \psi(u) du + o(1) \right]. \nonumber
\end{align}
\end{theorem}
A more general version of Theorem \ref{Thm_BRP_Upper} is given in Theorem \ref{Thm_BRP_Uni_s},
where it is shown that the above large deviation asymptotics hold uniformly in $s \in K_{\mu}$
with any compact set $K_{\mu} \subset I_{\mu}^{\circ}$.
Consider the reversed random walk $M_n$ defined by $M_n = g_1 \ldots g_n$.
Since the two probabilities $\mathbb{P} \big( \log | \langle f, G_n v \rangle | \geqslant n(q+l) \big)$ and
$\mathbb{P} \big( \log | \langle f, M_n v \rangle | \geqslant n(q+l) \big)$ are equal (as $G_n$ and $M_n$ have the same law),
for $M_n$ we have the same large deviation expansions as for $G_n$.
Now we are going to give exact asymptotics of the lower tail large deviation probabilities
$\mathbb{P}( \log |\langle f, G_n v \rangle| \leqslant nq)$, where
$q=\Lambda'(s)<\lambda = \Lambda'(0)$ for $s<0$.
These asymptotics cannot be deduced from Theorems \ref{thrmBR001} and \ref{Thm_BRP_Upper};
the proofs turn out to be more delicate
and require to develop the corresponding spectral gap theory for the transfer operator $P_s$
and to establish the H\"older regularity for the stationary measure $\pi_s$ with $s<0$.
\begin{theorem} \label{Thm-Posi-Neg-s}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, there exists a constant $s_0>0$
such that for any $s \in (-s_0, 0)$ and $q=\Lambda'(s)$,
as $n \to \infty$,
uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{LD_Lower01}
\mathbb{P} \Big( \log |\langle f, G_n v \rangle| \leqslant n q \Big)
= \frac{ r_{s}(x) r^*_{s}(y)}{\varrho_s}
\frac{ \exp \left( -n \Lambda^*(q) \right) } { -s \sigma_{s}\sqrt{2\pi n}} \big[ 1 + o(1) \big].
\end{align}
\end{theorem}
In particular, fixing a basis $(e_i^*)_{1 \leqslant i \leqslant d}$ in $(\bb R^d)^*$
and a basis $(e_j)_{1 \leqslant j \leqslant d}$ in $\bb R^d$,
with $f = e_i^*$ and $v = e_j$ in \eqref{LD_Lower01},
we obtain the Bahadur-Rao type lower tail large deviation asymptotic
for the entries $G_n^{i,j}$.
From \eqref{LD_Lower01} we get a lower tail large deviation principle
under the assumptions of Theorem \ref{Thm-Posi-Neg-s}:
uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}\label{LDP-002}
\lim_{n \to \infty} \frac{1}{n} \log
\mathbb{P} \Big( \log |\langle f, G_n v \rangle| \leqslant n q \Big)
= - \Lambda^*(q).
\end{align}
The result \eqref{LDP-002} sharpens the following lower tail large deviation bound
established by Benoist and Quint \cite[Theorem 14.21]{BQ17}:
for $q < \lambda$, there exists a constant $c>0$ such that for
all $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\mathbb{P} \Big( \log |\langle f, G_n v \rangle| < nq \Big) \leqslant e^{-cn}.
\end{align*}
Now we give a Bahadur-Rao-Petrov version of the above theorem.
\begin{theorem} \label{Thm-Posi-Neg-sBRP}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Let $(l_n)_{n \geqslant 1}$ be any positive sequence satisfying $\lim_{n \to \infty} l_n = 0$.
Then, there exists a constant $s_0>0$ such that for any $s \in (-s_0, 0)$ and $q=\Lambda'(s)$,
we have, as $n \to \infty$,
uniformly in $|l| \leqslant l_n$, $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\mathbb{P} \Big( \log |\langle f, G_n v \rangle| \leqslant n(q+l) \Big)
= \frac{ r_{s}(x) r^*_{s}(y)}{\varrho_s}
\frac{ \exp \left( -n \Lambda^*(q+l) \right) } { -s \sigma_{s}\sqrt{2\pi n}}
\big[ 1 + o(1) \big].
\end{align*}
More generally, for any $\varphi \in \mathcal{B}_{\gamma}$ and any measurable function $\psi$ on $\mathbb{R}$
such that $u \mapsto e^{-s'u} \psi(u)$ is directly Riemann integrable for some $s'\in(-s_0,s)$, we have,
as $n \to \infty$, uniformly in $|l| \leqslant l_n$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
& \mathbb{E} \Big[ \varphi(G_n x) \psi \big( \log |\langle f, G_n v \rangle| - n(q+l) \big) \Big]
= \frac{r_{s}(x)}{\varrho_s}
\frac{ \exp (-n \Lambda^*(q+l)) }{ \sigma_{s}\sqrt{2\pi n}} \nonumber\\
& \qquad\qquad\qquad \times \left[ \int_{ \bb P^{d-1} } \varphi(x) \delta(y,x)^s \nu_s(dx)
\int_{\mathbb{R}} e^{-su} \psi(u) du + o(1) \right].
\end{align*}
\end{theorem}
\subsection{Local limit theorems with large deviations for coefficients} \label{Applic to LocalLD}
In this subsection we formulate the precise local limit theorems with large deviations
for the coefficients $\langle f, G_n v \rangle$.
For sums of independent real-valued random variables,
local limit theorems with large and moderate deviations can be found for instance in
Gnedenko \cite{Gne48}, Sheep \cite{She64}, Stone \cite{Sto65},
Borovkov and Borovkov \cite{BB08}, Breuillard \cite{Bre2005}, Varju \cite{Var15}.
For products of random matrices, such types of local limit theorems for the vector norm $|G_n v|$
have been recently established in \cite{BQ17, XGL19a, XGL19b}.
Our following theorem extends the results in \cite{XGL19a, XGL19b} for the vector norm $|G_n v|$
to the case of the coefficients $\langle f, G_n v \rangle$.
\begin{theorem}\label{Theorem local LD002}
Let $(l_n)_{n \geqslant 1}$ be any positive sequence satisfying $\lim_{n \to \infty} l_n = 0$.
Let $-\infty < a_1 < a_2 < \infty$ be real numbers.
\begin{enumerate}
\item
Assume conditions \ref{Condi_Exp} and \ref{Condi-IP}.
Let $s \in I_{\mu}^{\circ}$ and $q = \Lambda'(s)$.
Then, as $n\to\infty,$ uniformly in $|l| \leqslant l_n$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}
\qquad \quad & \mathbb{P} \Big( \log |\langle f, G_n v \rangle| \in [a_1, a_2] + n(q+l) \Big) \nonumber \\
& = \big( e^{-s a_1} - e^{-s a_2} \big)
\frac{ r_{s}(x) r^*_{s}(y)}{\varrho_s}
\frac{ \exp \left( -n \Lambda^*(q+l) \right) } {s \sigma_{s}\sqrt{2\pi n}}
\big[ 1 + o(1) \big]. \label{LLTLDa}
\end{align}
\item
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, there exists a constant $s_0>0$
such that for any $s \in (-s_0, 0)$ and $q=\Lambda'(s)$,
as $n \to \infty$, uniformly in $|l| \leqslant l_n$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}
\qquad\quad & \mathbb{P} \Big( \log |\langle f, G_n v \rangle| \in [a_1, a_2] + n(q+l) \Big) \nonumber\\
& = \big( e^{-s a_2} - e^{-s a_1} \big)
\frac{ r_{s}(x) r^*_{s}(y)}{\varrho_s}
\frac{ \exp \left( -n \Lambda^*(q+l) \right) } { -s \sigma_{s}\sqrt{2\pi n}}
\big[ 1 + o(1) \big]. \label{LLTLDb}
\end{align}
\end{enumerate}
\end{theorem}
Taking $\varphi = \mathbf{1}$ and $\psi = \mathbbm 1_{[a_1, a_2]}$
with real numbers $a_1 < a_2$,
it is easy to see that Theorem \ref{Thm_BRP_Upper} and Theorem \ref{Thm-Posi-Neg-sBRP}
respectively recovers the local limit theorem with large deviations \eqref{LLTLDa} and \eqref{LLTLDb}.
\subsection{Precise large deviations for coefficients under the changed measure}
We now give Bahadur-Rao-Petrov type large deviations
for the coefficients $\langle f, G_n v \rangle$ under the changed measure $\bb Q_s^x$,
which are useful for example in the study of branching processes and branching random walks.
We first deal with the upper tail case. The following result is a more general version of Theorems \ref{thrmBR001} and \ref{Thm_BRP_Upper}.
Denote $q_s = \Lambda'(s)$ and $q_t = \Lambda'(t)$
for any $s, t \in (-s_0, 0] \cup I^{\circ}_{\mu}$ with $s<t$.
\begin{theorem} \label{Thm_Coeff_BRLD_changedMea}
Assume conditions \ref{Condi_Exp}, \ref{Condi-TwoExp} and \ref{Condi-IP}.
Let $s_{\infty} = \sup \{ s: s \in I_{\mu} \}$.
Then, there exists a constant $s_0 > 0$
such that for any fixed $s \in (-s_0, s_{\infty})$
and any compact set $K_{\mu} \subset (s, s_{\infty})$,
we have, as $n \to \infty$,
uniformly in $t \in K_{\mu}$, $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}
& \mathbb Q_s^x \Big( \log| \langle f, G_n v \rangle | \geqslant n q_t \Big)
= \frac{ r_{t}(x) }{ r_{s}(x) }
\frac{ \exp \{ -n(\Lambda^*(q_t) - \Lambda^*(q_s) - s(q_t -q_s)) \} }
{(t -s)\sigma_{t} \sqrt{2\pi n}}
\nonumber\\
& \qquad\qquad\qquad\qquad\qquad\qquad \times
\int_{ \bb P^{d-1} } \delta(y, x)^t \, \frac{r_s(x)}{r_t(x)} \pi_t(dx)
[ 1 + o(1)]. \label{LD_Upper_ChangeMea001}
\end{align}
More generally, there exists a constant $s_0 > 0$
such that for any fixed $s \in (-s_0, s_{\infty})$
and any compact set $K_{\mu} \subset (s, s_{\infty})$,
for any measurable function $\psi$ on $\mathbb{R}$
such that $u \mapsto e^{-s'u}\psi(u)$ is directly Riemann integrable
for any $s' \in K_{\mu}^{\epsilon} : = \{ s' \in \bb R: |s' - s| < \epsilon, s \in K_{\mu} \}$
with $\epsilon >0$ small enough,
we have, as $n \to \infty$, uniformly in $t \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}
& \mathbb{E}_{\bb Q_s^x}
\Big[ \varphi(G_n x) \psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \Big] \nonumber\\
& = \frac{ r_{t}(x) }{ r_{s}(x) }
\frac{ \exp \{ -n(\Lambda^*(q_t) - \Lambda^*(q_s) - s(q_t -q_s)) \} }
{\sigma_{t} \sqrt{2\pi n}} \nonumber \\
& \quad \times
\left[ \int_{ \bb P^{d-1} } \varphi(x) \delta(y, x)^t \, \frac{r_s(x)}{r_t(x)} \pi_t(dx)
\int_{\mathbb{R}} e^{- (t-s) u} \psi(u) du + o(1) \right]. \label{LD_Upper_ChangeMea002}
\end{align}
\end{theorem}
We next consider the lower tail case.
The following result is an extension of Theorems \ref{Thm-Posi-Neg-s} and \ref{Thm-Posi-Neg-sBRP}.
Denote $q_s = \Lambda'(s)$ and $q_t = \Lambda'(t)$
for any $s, t \in (-s_0, 0] \cup I^{\circ}_{\mu}$ with $s > t$.
\begin{theorem} \label{Thm_Coeff_BRLD_changedMea02}
Assume conditions \ref{Condi_Exp}, \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, there exists a constant $s_0 > 0$
such that for any fixed $s \in (-s_0, 0] \cup I^{\circ}_{\mu}$
and any compact set $K_{\mu} \subset (-s_0, s)$,
we have, as $n \to \infty$,
uniformly in $t \in K_{\mu}$, $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
& \mathbb Q_s^x \Big( \log| \langle f, G_n v \rangle | \leqslant n q_t \Big)
= \int_{ \bb P^{d-1} } \delta(y, x)^t \, \frac{r_s(x)}{r_t(x)} \pi_t(dx)
\nonumber\\
& \qquad\quad \times \frac{ r_{t}(x) }{ r_{s}(x) }
\frac{ \exp \{ -n(\Lambda^*(q_t) - \Lambda^*(q_s) - s(q_t -q_s)) \} }
{(s - t)\sigma_{t} \sqrt{2\pi n}} [ 1 + o(1)].
\end{align*}
More generally, there exists a constant $s_0 > 0$
such that for any fixed $s \in (-s_0, 0] \cup I^{\circ}_{\mu}$
and any compact set $K_{\mu} \subset (-s_0, s)$,
for any measurable function $\psi$ on $\mathbb{R}$
such that $u \mapsto e^{-s'u}\psi(u)$ is directly Riemann integrable
for any $s' \in K_{\mu}^{\epsilon} : = \{ s' \in \bb R: |s' - s| < \epsilon, s \in K_{\mu} \}$
with $\epsilon >0$ small enough,
we have, as $n \to \infty$, uniformly in $t \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}
& \mathbb{E}_{\bb Q_s^x}
\Big[ \varphi(G_n x) \psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \Big] \nonumber\\
& = \frac{ r_{t}(x) }{ r_{s}(x) }
\frac{ \exp \{ -n(\Lambda^*(q_t) - \Lambda^*(q_s) - s(q_t -q_s)) \} }
{\sigma_{t} \sqrt{2\pi n}} \nonumber \\
& \quad \times
\left[ \int_{ \bb P^{d-1} } \varphi(x) \delta(y, x)^t \, \frac{r_s(x)}{r_t(x)} \pi_t(dx)
\int_{\mathbb{R}} e^{ -(t-s) u} \psi(u) du + o(1) \right]. \label{LD_Lower_ChangeMea002}
\end{align}
\end{theorem}
The proof of Theorem \ref{Thm_Coeff_BRLD_changedMea02}
relies essentially on the
H\"{o}lder regularity of the stationary measure $\pi_s$,
which will be presented in Section \ref{sect-holder-reg}.
\subsection{Proof strategy} \label{proof strategy}
The standard approach to obtain precise large deviations for i.i.d.\ real-valued random variables consists in performing a change of measure
and proving an Edgeworth expansion under the changed measure (see e.g. \cite{BR60, Pet65, DZ09}).
Applying this strategy to the coefficients $\langle f, G_n v \rangle$
of products of random matrices turns out to be way more difficult.
We have to overcome three main difficulties:
state an Edgeworth expansion for the couple $(G_n x, \log |\langle f, G_n v \rangle|)$
with a target function $\varphi$ on the Markov chain $G_n x$ under the changed measure;
give a precise control of the difference between $\log |\langle f, G_n v \rangle|$ and $\log |G_n v|$;
establish the regularity of the eigenmeasure $\nu_s$.
For the first point,
it turns out that the techniques which work for the quantity $\log |\langle f, G_n v \rangle|$
alone cannot be applied for the couple.
Dealing with a couple $(G_n x, \log |\langle f, G_n v \rangle|)$ with a target function on $G_n x$
needs considering a new kind of smoothing inequality
on a complex contour, instead of the usual Esseen one on the real line.
We make use of the saddle point method to obtain precise asymptotics
for the integrals of the corresponding Laplace transforms on the complex plane.
For this method we refer to a recent work of the authors \cite{XGL19b}
where the Edgeworth expansion with a target function on $G_n x$
for the norm cocycle $\log |G_n v|$ has been established.
Secondly, from the previous work on limit theorems such as the strong law of large numbers,
the central limit theorem and the law of iterated logarithm
for the coefficients $\langle f, G_n v \rangle$,
see e.g. \cite{GR85, BL85, Hen97, BQ17},
we know that the difference $|\log |\langle f, G_n v \rangle| - \log |G_n v| |$
generally diverges to infinity as $n \to \infty$.
It is controlled by the corresponding norming factors in these limit theorems.
However, such a control is not enough
to obtain precise large deviation expansions for $\langle f, G_n v \rangle$, nor even for a large deviation
principle with explicit rate function. A precise account of the contribution of the error term
is given by the following decomposition:
for any $x = \bb R v$ and $y = \bb R f$ with $|f| = |v| =1$,
\begin{align} \label{basic decompos001}
\log | \langle f, G_n v \rangle | = \log |G_n v| + \log \delta(y, G_n x), \quad n \geqslant 1,
\end{align}
where $\delta(y,x) = \frac{|\langle f, v \rangle|}{|f||v|}$.
The exact decomposition \eqref{basic decompos001} allows us to deduce the precise large deviation asymptotic
from the results for the couple $(G_n x, \log |G_n v|)$
with a target function on $G_n x$ established in \cite{XGL19a}.
The idea is as follows:
with $\mathbb{Q}_{s}^{x}$ the changed measure defined in Section \ref{subsec a change of measure},
we have
\begin{align} \label{intro001}
\frac{ e^{ n\Lambda^{*}( q) } }{r_{s}(x)}
\mathbb{P} \left( \log |\langle f, G_n v \rangle | \geqslant n q \right)
= \mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ \frac{ e^{ -s (\log |G_n v| - nq ) } }{ r_{s}(G_n x) }
\mathbbm{1}_{ \{ \log | \langle f, G_n v \rangle | - nq \geqslant 0 \} } \right].
\end{align}
We only sketch
how to cope with the upper bound of the right-hand side of \eqref{intro001}.
Consider a partition $I_k: = (-\eta k, -\eta(k-1)]$, $k \geqslant 1$, of the interval $(-\infty,0]$,
where $\eta > 0$ is a sufficiently small constant.
Using \eqref{basic decompos001} we get the upper bound
\begin{align*}
\mathbbm{1}_{ \{ \log | \langle f, G_n v \rangle | - nq \geqslant 0 \} }
\leqslant \sum_{k = 1}^{\infty} \mathbbm{1}_{ \big\{ \log |G_n v | - nq - \eta (k-1) \geqslant 0 \big\} }
\mathbbm{1}_{ \big\{ \log \delta(y, G_n x) \in I_k \big\} },
\end{align*}
which we substitute into \eqref{intro001}.
Thus we are led to the estimation of the sum
\begin{align}\label{Intro-UppSum01}
\sum_{k = 1}^{\infty} e^{ - s \eta (k-1) }
\mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ \frac{ \psi_s (\log |G_n v| - nq - \eta (k-1) ) }{ r_{s}(G_n x) }
\mathbbm{1}_{ \{ \log \delta(y, G_n x) \in I_k \} } \right],
\end{align}
where $\psi_s (u) = e^{-su} \mathbbm{1}_{ \{ u \geqslant 0 \} }$, $u \in \mathbb{R}$.
Let $R_{s,it}(\varphi)(x) = \mathbb{E}_{\mathbb{Q}_{s}^{x}} [e^{it (\sigma(g_1, x) - q )} \varphi(g_1 x)]$
be the perturbed transfer operator defined
for any H\"{o}lder continuous function $\varphi$ on $\mathbb{P}^{d-1}$,
and $R^{n}_{s,it}$ be its $n$-th iteration.
Then, by the Fourier inversion formula, the sum in \eqref{Intro-UppSum01} is bounded from above by
\begin{align}\label{Intro-Inte-a}
\frac{ 1 }{2 \pi} \sum_{k = 1}^{\infty} e^{ - s \eta (k-1) }
\int_{\mathbb{R}} e^{-it \eta (k-1) }
R^{n}_{s,it}( r_s^{-1} \Phi_{s,k,\ee_2} )(x) \widehat{\Psi}_{s, \ee_1} (t)dt,
\end{align}
where we choose some appropriate smooth functions $\Phi_{s,k,\ee_2}$ and $\Psi_{s,\ee_1}$,
for $\ee_1, \ee_2>0$,
which dominate $\mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_k \} }$ and $\psi_s$, respectively.
Using spectral gap properties of $R_{s,it}$,
it has been established recently in \cite{XGL19a}
(see Propostion \ref{Prop Rn limit1})
that, for any $k \geqslant 1$,
the term under the sign of the infinite sum in \eqref{Intro-Inte-a}, say $I_n(k)$,
converges as $n \to \infty$ to a limit, say
$I(k)=\frac{ \sqrt{ 2 \pi} }{ s \sigma_s \nu_s(r_s) } e^{ - s \eta (k-1) } \nu_s( \Phi_{s,k,\ee_2} )$.
The interchangeability of the limit as $n \to \infty$
and of the summation over $k$ in \eqref{Intro-Inte-a} is justified
by specifying the rate in the convergence of $I_n(k)$ to $I(k)$, as argued in \cite{XGL19a}.
This implies that as $n \to \infty$ and $\ee_1 \to 0$, \eqref{Intro-Inte-a} converges to
$\sum_{k = 1}^{\infty} I_k$.
It remains to show that the last sum converges to $r_s^* (y)$, as $\eta \to 0$ and $\ee_2 \to 0$.
For this we have to make use of
the zero-one law of the eigenmeasure $\nu_s$ established recently in \cite{GQX20}:
for any $y \in (\bb P^{d-1})^*$ and any $t \in (-\infty, 0)$,
\begin{align} \label{Intro-Regu02}
\nu_s \left( \left\{x \in \mathbb{P}^{d-1}: \log \delta(y, x) = t \right\} \right)
= 0 \ \mbox{or} \ 1.
\end{align}
With $s=0$ it was used in \cite{GQX20} to prove a local limit theorem
for the coefficients $\langle f, G_n v \rangle$.
The proof of the lower large deviation asymptotic \eqref{IntroEntrySNeg}
can be carried out in a similar way as that of upper large deviation asymptotic \eqref{IntroEntryInver02}.
The novelty here consists in the use of the change of measure formula for $\mathbb{Q}_s^x$
when $s<0$ and of the spectral gap theory under the changed measure as stated in \cite{XGL19b} for $s<0$.
In addition we need the H\"{o}lder regularity of the eigenmeasure $\nu_s$ for $s <0$ sufficiently close to $0$.
In some applications it is very useful to extend the large deviation results
\eqref{IntroEntryInver02}, \eqref{IntroTargSPosi} and \eqref{IntroEntrySNeg}
to the setting under the changed measure $\bb Q_s^x$;
see Theorems \ref{Thm_Coeff_BRLD_changedMea} and \ref{Thm_Coeff_BRLD_changedMea02}.
To obtain these results, an important step
is to establish the H\"{o}lder regularity of the eigenmeasure $\nu_s$
when $s > 0$; see Proposition \ref{PropRegularity}.
For this we adapt the arguments from \cite{GR85} and \cite{BL85}
where \eqref{Intro-Regu02} was established for $s=0$.
For $s>0$ the arguments are much more delicate.
One of the difficulties is that the sequence $(g_n)_{n \geqslant 1}$
becomes dependent under the changed measure $\mathbb{Q}_s^x$.
We need to extend the results in \cite{BL85} to this case.
Of crucial importance are
the simplicity of the dominant Lyapunov exponent for $G_n$ under the changed measure
recently established in \cite{GL16} (see Lemma \ref{Lem_Lya_Meas}),
and the key proximality property which states that $M_n m$
(here $M_n = g_1 \ldots g_n$)
converges weakly to the Dirac measure $\delta_{Z_s}$,
where $Z_s$ is a random variable whose law is the stationary measure $\pi_s$ of $G_n x$, for $s>0$
(see Lemma \ref{Lem_DiracMea}),
and $m$ is the unique rotation invariant measure on $\mathbb P^{d-1}$.
\section{Spectral gap properties and H\"{o}lder regularity of the stationary measure} \label{sect-holder-reg}
In this section we present some preliminaries on the spectral gap properties and
state some new results on the regularity of the stationary measure $\pi_s$.
The spectral gap and regularity properties will be used in the proofs of the main theorems.
In particular, the regularity properties of the stationary measure $\pi_s$,
will play an important role in
the proof of Theorem \ref{Thm_Coeff_BRLD_changedMea02}.
As other applications of the regularity properties, we will obtain
a law of large numbers and
a central limit theorem for coefficients under the changed measure.
\subsection{Spectral gap properties and a change of measure}\label{subsec a change of measure}
Recall that the transfer operator $P_{s}$ and the conjugate transfer operator
$P_{s}^{*}$ are defined by \eqref{transfoper001}.
Below $P_s\nu_{s}$ stands for the measure on $\bb P^{d-1}$ such that $P_s\nu_{s}(\varphi)=\nu_{s}(P_s \varphi),$
for any continuous functions $\varphi$ on $\bb P^{d-1}$, and $P^*_s\nu^*_{s}$ is defined similarly.
The spectral gap properties of $P_{s}$ and $P_{s}^{*}$ are summarized in the following proposition which was proved
in \cite{GL16}.
\begin{proposition}\label{transfer operator}
Assume condition \ref{Condi-IP}.
Then, for any $s\in I_{\mu}^{\circ}$, the following assertions hold:
\begin{enumerate}
\item
the spectral radii of the operators $P_s$ and $P_s^*$ are both equal to $\kappa(s)$
and there exist a unique, up to a scaling constant,
strictly positive H\"{o}lder continuous
function $r_{s}$
and a unique probability measure $\nu_{s}$ on $\bb P^{d-1}$ such that
\begin{align*}
P_s r_s=\kappa(s)r_s, \quad P_s\nu_{s}=\kappa(s)\nu_{s};
\end{align*}
\item
there exist a unique strictly positive H\"{o}lder continuous function
$r_{s}^{\ast}$ and
a unique probability measure $\nu_{s}^{*}$ on $\bb P^{d-1}$ such that
\begin{align*}
P_{s}^{*}r_{s}^{*}=\kappa(s)r_{s}^{*}, \quad P_{s}^{*}\nu_{s}^{*}=\kappa(s)\nu_{s}^{\ast};
\end{align*}
moreover, the function $\kappa: I_{\mu}^{\circ} \mapsto \mathbb R $ is analytic.
\end{enumerate}
\end{proposition}
The case of $s<0$ is not covered by Proposition \ref{transfer operator}.
We state below the corresponding result, which was proved in \cite{GQX20, XGL19a}.
\begin{proposition} \label{Prop-Trans-s-neg}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then there exists a constant $s_0 > 0$ such that for any $s \in (-s_0, 0)$,
the assertions (1) and (2) of Proposition \ref{transfer operator} remain valid.
Moreover, the function $\kappa: (-s_0, 0) \mapsto \mathbb R $ is analytic.
\end{proposition}
Now we give explicit formulae for the eigenfunctions $r_s$ and $r_s^*$.
\begin{lemma} \label{Lemma-expleinenfun-s-neg}
\begin{enumerate}
\item
Assume condition \ref{Condi-IP}.
Then, for $s\in I_{\mu}^{\circ}$, the eigenfunctions $r_s$ and $r_s^*$ are given as follows:
for any $x\in \bb P^{d-1}$ and $y \in (\bb P^{d-1})^*$,
\begin{align} \label{expleigenfun001}
\quad\quad r_{s}(x)= \int_{(\bb P^{d-1})^*} \delta(x,y)^s \nu^*_{s}(dy),
\quad r_{s}^*(y)= \int_{\bb P^{d-1}} \delta(x,y)^s \nu_{s}(dx).
\end{align}
\item
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then there exists a constant $s_0 > 0$ such that for any $s \in (-s_0, 0)$,
the expressions in \eqref{expleigenfun001} remain valid.
\end{enumerate}
\end{lemma}
The first assertion of Lemma \ref{Lemma-expleinenfun-s-neg} for $s>0$ was proved in \cite{GL16}.
The proof of the second one for $s<0$ is quite different from that in the case $s>0$
and was proved in \cite{GQX20}.
It is based on the H\"{o}lder regularity of the eigenmeasures $\nu_s$ and $\nu_s^*$
which is the subject of the next section.
By Propositions \ref{transfer operator} and \ref{Prop-Trans-s-neg},
the eigenvalue $\kappa(s)$ and the eigenfunction $r_s$ are both strictly positive.
This allows to perform a change of measure, as shown below.
Under the corresponding assumptions of Propositions \ref{transfer operator} and \ref{Prop-Trans-s-neg},
for any $s \in (-s_0, 0) \cup I_{\mu}$,
the family of probability kernels
$q_{n}^{s}(x,g) = \frac{ e^{s \sigma(g, x)} }{\kappa^{n}(s)} \frac{r_{s}(g x)}{r_{s}(x)},$
$n\geqslant 1$, satisfies the cocycle property:
for any $x \in \bb P^{d-1}$ and $g_1, g_2 \in \Gamma_{\mu}$,
\begin{align} \label{cocycle01}
q_{n}^{s}(x,g_1)q_{m}^{s}(g_1 x, g_2)=q_{n+m}^{s}(x,g_2g_1).
\end{align}
Thus the probability measures
$q_{n}^{s}(x,g_{n}\dots g_{1})\mu(dg_1)\dots\mu(dg_n)$
form a projective system on $\bb G^{\bb N^*}$.
By the Kolmogorov extension theorem,
there exists a unique probability measure $\mathbb Q_s^x$ on $\bb G^{\bb N^*}$.
The corresponding expectation is denoted by $\mathbb{E}_{\mathbb Q_s^x}$.
Then the change of measure formula follows:
for any measurable function $h$ on $(\bb P^{d-1} \times \mathbb R)^{n}$,
\begin{align}\label{basic equ1}
& \frac{1}{ \kappa^{n}(s) r_{s}(x) }
\mathbb{E} \Big[ r_{s}(G_n x) e^{s \sigma(G_n, x) }
h \Big( G_1 x, \sigma (G_1, x), \dots, G_n x, \sigma (G_n, x) \Big) \Big] \nonumber\\
& =\mathbb{E}_{\mathbb{Q}_{s}^{x}}
\Big[ h \Big( G_1 x, \sigma (G_1, x), \dots, G_n x, \sigma (G_n, x) \Big) \Big].
\end{align}
Under the changed measure $\mathbb Q_s^x$, the process $(G_n x)_{n \geqslant 0}$ defined by \eqref{Def_MarkovChain01}
still constitutes a Markov chain on $\bb P^{d-1}$ with the transition operator given by
\begin{align}\label{Def_Q_s_lll}
Q_{s}\varphi(x) = \frac{1}{\kappa(s)r_{s}(x)}P_s(\varphi r_{s})(x),
\quad x \in \bb P^{d-1}.
\end{align}
The Markov operator $Q_{s}$ has a unique stationary
probability measure $\pi_{s}$ satisfying that there exists constants $c, C >0$ such that
for any $\varphi \in \mathcal{B}_{\gamma}$,
\begin{align} \label{equcontin Q s limit}
\| Q_{s}^{n}\varphi - \pi_{s}(\varphi) \|_{\gamma} \leqslant C e^{-cn} \|\varphi\|_{\gamma},
\quad \mbox{where} \quad
\pi_{s}(\varphi)=\frac{\nu_{s}(\varphi r_{s})}{\nu_{s}(r_{s})}.
\end{align}
For any $s \in (-s_0, 0) \cup I_{\mu}$ and $t \in \mathbb{R},$
define a family of perturbed operators $R_{s,it}$ as follows:
for any $\varphi \in \mathcal{B}_{\gamma}$,
\begin{align}\label{operator Rsz}
R_{s, it}\varphi(x)
= \mathbb{E}_{\mathbb{Q}_{s}^{x}} \left[ e^{ it ( \sigma(g_1, x) - q ) } \varphi(g_1 x) \right],
\quad x \in \bb P^{d-1}.
\end{align}
It follows from the cocycle property \eqref{cocycle01} that
\begin{align*}
R^{n}_{s, it}\varphi(x)
= \mathbb{E}_{\mathbb{Q}_{s}^{x}} \left[ e^{ it( \sigma(G_n, x) - nq) } \varphi(G_n x) \right],
\quad x \in \bb P^{d-1}.
\end{align*}
Under various restrictions on $s$, it was shown in \cite{BM16, XGL19a, XGL19b}
that the operator $R_{s, it}$ acts onto the Banach space $\mathcal{B}_{\gamma}$
and has a spectral gap.
\subsection{H\"{o}lder regularity of the stationary measure}
In this section we present our results on the H\"{o}lder regularity of the stationary measure
$\pi_s$ and of the eigenmeasure $\nu_s$.
The regularity of $\pi_s$ and $\nu_s$ is central to establishing
the precise large deviation asymptotics for the coefficients $\langle f, G_n v \rangle$
under the changed measure $\bb Q_s^x$
and is also of independent interest.
Below we denote $B(y, r) = \{ x \in \bb P^{d-1}: \delta(y,x) \leqslant r \}$
for $y \in (\bb P^{d-1})^*$ and $r \geqslant 0$.
\begin{proposition}\label{PropRegularity}
Assume conditions \ref{Condi_Exp} and \ref{Condi-IP}.
Then, for any $s \in I_{\mu}^{\circ}$, there exists a constant $\alpha > 0$ such that
\begin{align} \label{RegularityIne00}
\sup_{y \in (\bb P^{d-1})^* } \int_{ \bb P^{d-1} } \frac{1}{ \delta(y, x)^{\alpha}} \pi_s(dx) < +\infty.
\end{align}
In particular, for any $s \in I_{\mu}^{\circ}$,
there exist constants $\alpha, C >0$ such that for any $r \geqslant 0$,
\begin{align} \label{RegularityIne}
\sup_{y \in (\bb P^{d-1})^* }
\pi_s \big( B(y, r) \big) \leqslant C r^{\alpha}.
\end{align}
Moreover, the assertions \eqref{RegularityIne00} and \eqref{RegularityIne} remain valid
when the stationary measure $\pi_s$ is replaced by the eigenmeasure $\nu_s$.
\end{proposition}
The proof of Proposition \ref{PropRegularity} is technically involved and
is postponed to Section \ref{Sec:regpositive}.
By \eqref{RegularityIne} and the Frostman lemma, it follows that the Hausdorff
dimension of the stationary measure $\pi_s$ is at least $\alpha$.
For $s=0$ the H\"{o}lder regularity of the stationary measure $\nu$ ($\nu = \pi_0 = \nu_0$) is due to Guivarc'h \cite{Gui90}.
We also refer to \cite{BL85} for a detailed description of the method used in \cite{Gui90}
and to \cite{BFLM11, BQ17} for a different approach.
Such regularity is of great importance in the study of products of random matrices.
For example, it turns out to be crucial for establishing limit theorems for the coefficients $\langle f, G_n v \rangle$
and for the spectral radius $\rho(G_n)$ of $G_n$.
However, similar result has not been established in the literature
for the stationary measure $\pi_s$ when $s \in I_{\mu}^{\circ}$.
The proof of the assertion \eqref{RegularityIne00}
is based on the asymptotic properties of the components in the Cartan and Iwasawa decompositions
of the reversed random matrix product $M_n = g_1 \ldots g_n$
and on the simplicity of the dominant Lyapunov exponent of $G_n$ under the changed measure $\mathbb{Q}_s^x$:
see Section \ref{Sec:regpositive}.
When $s$ is non-positive and sufficiently close to $0$,
we also give the H\"{o}lder regularity of the stationary measure $\pi_s$.
\begin{proposition}\label{PropRegu02}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, there exist constants $\alpha, s_0, C>0$ such that
the statements \eqref{RegularityIne00} and \eqref{RegularityIne} hold for any $s \in (-s_0, 0]$.
\end{proposition}
Proposition \ref{PropRegu02} has been recently established in \cite{GQX20} using the
H\"older regularity of the stationary measure $\nu$ and the analyticity of the eigenfunction $\kappa$.
We will establish the following assertion, which is a stronger version of Proposition \ref{PropRegularity}.
\begin{proposition}\label{Prop_Regu_Strong01}
Assume conditions \ref{Condi_Exp} and \ref{Condi-IP}.
Let $s \in I_{\mu}^{\circ}$.
Then, for any $\ee > 0$,
there exist constants $c: = c(s) > 0$ and $n_0: = n_0(s) \geqslant 1$
such that for all $n \geqslant k \geqslant n_0$, $x \in \mathbb P^{d-1}$ and $y \in (\mathbb P^{d-1})^*$,
\begin{align*}
\bb Q_s^x \Big( \delta (y, G_n x) \leqslant e^{- \ee k} \Big) \leqslant e^{- c k}.
\end{align*}
\end{proposition}
Similarly, the following result is a stronger version of Proposition \ref{PropRegu02}.
\begin{proposition}\label{Prop_Regu_Strong02}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Let $s \in (-s_0, s_0)$, where $s_0 > 0$ is small enough.
Then, for any $\ee > 0$,
there exist constants $c: = c(s) > 0$ and $n_0: = n_0(s) \geqslant 1$
such that for all $n \geqslant k \geqslant n_0$, $x \in \mathbb P^{d-1}$ and $y \in (\mathbb P^{d-1})^*$,
\begin{align*}
\bb Q_s^x \Big( \delta (y, G_n x) \leqslant e^{- \ee k} \Big) \leqslant e^{- c k}.
\end{align*}
\end{proposition}
It turns out that Propositions \ref{Prop_Regu_Strong01} and \ref{Prop_Regu_Strong02}
play an important role for establishing the Bahadur-Rao-Petrov type lower tail large deviations
for the coefficients $\langle f, G_n v \rangle$ under the changed measure $\bb Q_s^x$,
see Theorem \ref{Thm_Coeff_BRLD_changedMea02}.
Moreover, they are very useful to obtain the strong law of large numbers (SLLN)
and the central limit theorem (CLT) for the coefficients $\langle f, G_n v \rangle$
under the changed measure $\bb Q_s^x$, see the next section.
\subsection{Applications to SLLN and CLT for the coefficients}
In this section we formulate the SLLN and the CLT
for the coefficients $\langle f, G_n v \rangle$
under the changed measure $\mathbb{Q}_{s}^{x}$.
These assertions are not used in the proofs of our large deviation results, but are of independent interest.
They are deduced from the SLLN and the CLT for the norm cocycle $\log |G_n v|$ using the H\"older regularity of
stationary measure $\pi_s$ stated in Propositions \ref{PropRegularity} and \ref{PropRegu02}.
When $s \in I_{\mu}$, the SLLN for $\log |G_n v|$ was established in \cite{GL16}:
under conditions \ref{Condi-TwoExp} and \ref{Condi-IP},
for any $x = \bb R v \in \bb P^{d-1}$,
\begin{align}\label{SLLN_Gnx}
\lim_{n \to \infty} \frac{1}{n} \log |G_n v|
= \Lambda'(s),
\quad \mathbb{Q}_s^x\mbox{-a.s.},
\end{align}
where $\Lambda'(s) = \frac{\kappa'(s)}{\kappa(s)}$
with the function $\kappa$ defined in Proposition \ref{transfer operator}.
The CLT for $\log |G_n v|$ under the changed measure
$\mathbb{Q}_s^x$ was proved in \cite{BM16}:
for any $s \in I_{\mu}$ and $t \in \mathbb{R}$,
it holds uniformly in $x = \bb R v \in \bb P^{d-1}$ with $|v| = 1$ that
\begin{align}\label{CLT_Cocycle01}
\lim_{ n \to \infty } \mathbb{Q}_s^x
\left( \frac{ \log |G_n v| - n \Lambda'(s) }{ \sigma_s \sqrt{n} } \leqslant t \right)
= \Phi(t),
\end{align}
where $\Phi$ is the standard normal distribution function on $\mathbb{R}$.
When $s \in (-s_0, 0)$ with small enough $s_0 > 0$, the SLLN and the CLT for $\log |G_n v|$ under the measure
$\mathbb{Q}_s^x$ have been recently established in \cite{XGL19b}.
We now give the SLLN and the CLT for the coefficients $\langle f, G_n v \rangle$
under the measure $\mathbb{Q}_s^x$.
\begin{proposition}\label{LLN_CLT_Entry}
\begin{itemize}
\item[\rm{(1)}]
Assume conditions \ref{Condi_Exp} and \ref{Condi-IP}.
Then, for any $s \in I_{\mu}$,
uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}\label{SLLN_Entry}
\lim_{ n \to \infty } \frac{1}{n} \log | \langle f, G_n v \rangle | = \Lambda'(s),
\quad \mathbb{Q}_s^x \mbox{-a.s..}
\end{align}
Moreover, for any $s \in I_{\mu}$ and $t \in \mathbb{R}$, we have,
uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}\label{CLT_Entry}
\lim_{ n \to \infty } \mathbb{Q}_s^x
\left( \frac{ \log | \langle f, G_n v \rangle | - n \Lambda'(s) }{ \sigma_s \sqrt{n} } \leqslant t \right)
= \Phi(t).
\end{align}
\item[\rm{(2)}]
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, there exists $s_0>0$ such that for any $s \in (-s_0, 0)$,
the assertions \eqref{SLLN_Entry} and \eqref{CLT_Entry} hold.
\end{itemize}
\end{proposition}
The proof of Proposition \ref{LLN_CLT_Entry} relies on Propositions \ref{PropRegularity} and \ref{PropRegu02}
and is postponed to Section \ref{Sec:regpositive}.
\section{Auxiliary results} \label{sec-spgappert}
In this section we state some preliminary results about the Taylor's expansion of $\Lambda^*$,
a smoothing inequality, and some asymptotics of the perturbed operator $R_{s,it}$,
which will be used to establish Bahadur-Rao-Petrov type large deviations.
The following lemma is proved in \cite{XGL19a} and gives Taylor's expansion of $\Lambda^*(q+l)$
with respect to the perturbation $l$.
Recall that
under conditions \ref{Condi_Exp}, \ref{Condi-TwoExp} and \ref{Condi-IP},
the moment generating function $\Lambda = \log \kappa$ is strictly convex and analytic on $(-s_0, 0) \cup I_{\mu}$;
see e.g. \cite{GL16, BM16, XGL19a}.
Set $\gamma_{s,k} = \Lambda^{(k)} (s)$, $k\geqslant 1$.
In particular, $\gamma_{s,2} = \Lambda'' (s) = \sigma_s^2$.
Under the changed measure $\mathbb Q_s^x$,
define the Cram\'{e}r series $\zeta_s$ (see Petrov \cite{Pet75})
by
\begin{align*}
\zeta_s (t) = \frac{\gamma_{s,3} }{ 6 \gamma_{s,2}^{3/2} }
+ \frac{ \gamma_{s,4} \gamma_{s,2} - 3 \gamma_{s,3}^2 }{ 24 \gamma_{s,2}^3 } t
+ \frac{\gamma_{s,5} \gamma_{s,2}^2 - 10 \gamma_{s,4} \gamma_{s,3} \gamma_{s,2} + 15 \gamma_{s,3}^3 }{ 120 \gamma_{s,2}^{9/2} } t^2
+ \ldots,
\end{align*}
which converges for small enough $|t|$.
\begin{lemma} \label{lemmaCR001}
Assume either conditions \ref{Condi_Exp} and \ref{Condi-IP} when $s \in I_{\mu}^{\circ}$,
or conditions \ref{Condi-TwoExp} and \ref{Condi-IP} when $s \in (-s_0,0)$ with small enough $s_0>0$.
Let $q=\Lambda'(s)$.
Then, there exists a constant $\eta >0$ such that for any $|l|\leqslant \eta,$
\begin{align*}
\Lambda^*(q+l) = \Lambda^{*}(q) + sl + h_s(l),
\end{align*}
where
$h_s$ is linked to the Cram\'{e}r series $\zeta_s$ by the identity
\begin{align*}
h_s(l) = \frac{ l^2}{2 \sigma_s^2} - \frac{l^3}{\sigma_s^3} \zeta_s( \frac{l}{\sigma_s} ).
\end{align*}
\end{lemma}
In the sequel let us fix a non-negative density function $\rho$ on $\mathbb{R}$
with $\int_{\mathbb{R}} \rho(u) du = 1$,
whose Fourier transform $\widehat{\rho}$ is supported on $[-1,1]$.
Moreover, there exists a constant $C>0$ such that $\rho(u) \leqslant \frac{C}{1+u^4}$ for all $u \in \bb R$.
For any $\ee>0$, define the scaled density function $\rho_{\ee}$ by
$\rho_{\ee}(u) = \frac{1}{\ee}\rho(\frac{u}{\ee})$, $u\in\mathbb R,$
whose Fourier transform $\widehat{\rho}_{\ee}$ is supported on $[-\ee^{-1},\ee^{-1}]$.
For any non-negative integrable function $\psi$ on $\mathbb{R}$,
we introduce two modified functions related to $\psi$
as follows: for any $u \in \mathbb{R}$,
set $\mathbb{B}_{\ee}(u) = \{u' \in\mathbb{R}: |u' - u| \leqslant \ee\}$ and
\begin{align}\label{smoo001}
\psi^+_{\ee}(u) = \sup_{u' \in \mathbb{B}_{\ee}(u)} \psi(u')
\quad \text{and} \quad
\psi^-_{\ee}(u) = \inf_{u' \in \mathbb{B}_{\ee}(u)} \psi(u').
\end{align}
The following smoothing inequality gives two-sided bounds of $\psi$.
\begin{lemma} \label{estimate u convo}
Suppose that $\psi$ is a non-negative integrable function on $\bb R$ and that
$\psi^+_{\ee}$ and $\psi^-_{\ee}$ are measurable for any $\ee>0$.
Then, for $0< \ee <1$,
there exists a positive constant $C_{\rho}(\ee)$ with $C_{\rho}(\ee) \to 0$ as $\ee \to 0$,
such that for any $u\in \mathbb{R}$,
\begin{align}
{\psi}^-_{\ee}\!\ast\!\rho_{\ee^2}(u) -
\int_{|w|\geqslant \ee} {\psi}^-_{\ee}(u - w) \rho_{\ee^2}(w)dw
\leqslant \psi(u) \leqslant (1+ C_{\rho}(\ee))
{\psi}^+_{\ee}\!\ast\!\rho_{\ee^2}(u). \nonumber
\end{align}
\end{lemma}
The proof of the above lemma is similar to that of Lemma 5.2 in \cite{GLL17}, and will not be detailed here.
The next proposition gives precise asymptotics of the perturbed operator $R_{s,it}$,
which will be used to establish Bahadur-Rao-Petrov type large deviations
for the coefficients $\langle f, G_n v \rangle$.
Its proof is based on the spectral gap properties of the perturbed operator $R_{s,it}$.
\begin{proposition} \label{Prop Rn limit1}
Suppose that $\psi: \mathbb R \mapsto \mathbb C$
is bounded measurable function with compact support,
and that $\psi$ is differentiable in a small neighborhood of 0 in $\bb R$.
\noindent(1)
Assume conditions \ref{Condi_Exp} and \ref{Condi-IP}.
Then, for any compact set $K_{\mu} \subset I_{\mu}^{\circ}$, there exist constants
$\delta = \delta(K) >0$, $c = c(K) >0$, $C = C(K) >0$ such that
for all $x\in \bb P^{d-1}$, $s \in K_{\mu}$, $|l| = O( \frac{1}{\sqrt{n}})$, $\varphi \in \mathcal{B}_{\gamma}$
and $n \geqslant 1$,
\begin{align} \label{Thm1 lim R1}
& \left| \sigma_s \sqrt{n} \, e^{\frac{nl^2}{2 \sigma_s^2}}
\int_{\mathbb R} e^{-it l n} R^{n}_{s,it}(\varphi)(x) \psi (t) dt
- \sqrt{2\pi} \psi(0)\pi_{s}(\varphi) \right| \nonumber\\
& \leqslant \frac{ C }{ \sqrt{n} } \| \varphi \|_\gamma
+ \frac{C}{n} \|\varphi\|_{\gamma} \sup_{|t| \leqslant \delta} \big( |\psi(t)| + |\psi'(t)| \big)
+ Ce^{-cn} \|\varphi\|_{\gamma} \int_{\bb R} |\psi(t)| dt.
\end{align}
\noindent(2)
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, there exist constants $s_0 > 0$, $\delta = \delta(s_0) >0$, $c = c(s_0) >0$, $C = C(s_0) >0$
such that for any compact set $K_{\mu} \subset (-s_0, 0)$,
the inequality \eqref{Thm1 lim R1} holds uniformly in $x\in \bb P^{d-1}$,
$s \in K_{\mu}$, $|l| = O( \frac{1}{\sqrt{n}} )$, $\varphi \in \mathcal{B}_{\gamma}$ and $n \geqslant 1$.
\end{proposition}
The assertions (1) and (2) of Proposition \ref{Prop Rn limit1}
were respectively established in \cite{XGL19a} and \cite{XGL19b}.
The perturbation $l$ as well as the explicit rate of convergence in Proposition \ref{Prop Rn limit1}
are important in the sequel.
They play a crucial role to establish the Bahadur-Rao type large deviations
for the coefficients $\langle f, G_n v \rangle$
in Theorems \ref{thrmBR001}, \ref{Thm_BRP_Upper}, \ref{Thm-Posi-Neg-s} and \ref{Thm-Posi-Neg-sBRP}.
\section{Proof of upper tail large deviations for coefficients} \label{sec proof scalarprod}
The aim of this section is to establish
Theorems \ref{thrmBR001} and \ref{Thm_BRP_Upper}.
Since Theorems \ref{thrmBR001} is a
direct consequence of Theorem \ref{Thm_BRP_Upper},
it suffices to establish Theorem \ref{Thm_BRP_Upper}.
We also establish a large deviation result under the changed measure.
\subsection{Zero-one laws for the stationary measure}
We first present some zero-one laws for the stationary measure which will be used in the proof of
Theorem \ref{Thm_BRP_Upper}.
\begin{lemma} \label{Lem_Gui_LeP}
Assume condition \ref{Condi-IP}.
Then, for any $s \in I_{\mu}^{\circ}$
and any proper projective subspace $Y \subsetneq \mathbb P^{d-1}$,
it holds that $\pi_s(Y)=0$.
\end{lemma}
\begin{lemma}\label{Lem_zero_law_s_Neg}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, there exists a constant $s_0 >0$ such that for any
$s \in (-s_0, 0)$ and any proper projective subspace $Y \subsetneq \mathbb P^{d-1}$,
it holds that $\pi_s( Y)=0$.
\end{lemma}
Lemma \ref{Lem_Gui_LeP} was established by Guivarc'h and Le Page \cite{GL16} using the
strategy of Furstenberg \cite{Fur63}.
Lemma \ref{Lem_zero_law_s_Neg} was proved in \cite{GQX20} based on the H\"{o}lder regularity of the
stationary measure $\nu$.
Note that the results in \cite{GL16} and \cite{GQX20} are stated for the eigenmeasure $\nu_s$,
but they also hold for the stationary measure $\pi_s$ since the measures $\pi_s$ and $\nu_s$ are equivalent.
We shall also need the following zero-one law of the stationary measure $\pi_s$
recently established in \cite{GQX20}.
\begin{lemma}\label{Lem_0-1_law_s}
Assume condition \ref{Condi-IP}.
Then, for any $s \in I_{\mu}^{\circ}$ and any algebraic subset $Y$ of $\mathbb P^{d-1}$,
it holds that either $\pi_s( Y)=0$ or $\pi_s( Y)=1$.
In particular, for any $y \in (\bb P^{d-1})^*$ and any $t \in (-\infty, 0)$,
\begin{align} \label{0-1_law_s_Posi_Ex}
\pi_s \left( \left\{x \in \mathbb{P}^{d-1}: \log \delta(y, x) = t \right\} \right)
= 0 \ \mbox{or} \ 1.
\end{align}
\end{lemma}
\begin{lemma}\label{Lem_0-1_law_s_Neg}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, there exists a constant $s_0 >0$ such that for any
$s \in (-s_0, 0)$ and any algebraic subset $Y$ of $\mathbb P^{d-1}$,
it holds that either $\pi_s( Y)=0$ or $\pi_s( Y)=1$.
In particular, for any $y \in (\bb P^{d-1})^*$ and any $t \in (-\infty, 0)$,
\begin{align} \label{0-1_law_s_Nega_Ex}
\pi_s \left( \left\{x \in \mathbb{P}^{d-1}: \log \delta(y, x) = t \right\} \right)
= 0 \ \mbox{or} \ 1.
\end{align}
\end{lemma}
The assertions \eqref{0-1_law_s_Posi_Ex} and \eqref{0-1_law_s_Nega_Ex}
are sufficient for us to establish the Bahadur-Rao type large deviation asymptotics
for the coefficient $\langle f, G_n v \rangle$ (cf. Theorems \ref{thrmBR001} and \ref{Thm-Posi-Neg-s}).
However, in order to obtain the Petrov type extensions (cf. Theorems \ref{Thm_BRP_Upper} and \ref{Thm-Posi-Neg-sBRP}),
we need the following slightly stronger statements than \eqref{0-1_law_s_Posi_Ex} and \eqref{0-1_law_s_Nega_Ex}.
\begin{lemma}\label{Lem_0-1_law_s_Posi_Uni}
Assume condition \ref{Condi-IP}.
Then, for any $y \in (\bb P^{d-1})^*$ and any $t \in (-\infty, 0)$, if
\begin{align} \label{0-1_law_s_Posi_uni}
\pi_s \left( \left\{x \in \mathbb{P}^{d-1}: \log \delta(y, x) = t \right\} \right)
= 0
\end{align}
holds for some $s \in I_{\mu}^{\circ}$, then \eqref{0-1_law_s_Posi_uni} holds for all $s \in I_{\mu}^{\circ}$.
\end{lemma}
\begin{lemma}\label{Lem_0-1_law_s_Neg_Uni}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, for any $y \in (\bb P^{d-1})^*$ and any $t \in (-\infty, 0)$, if
\begin{align} \label{0-1_law_s_Nega_uni}
\pi_s \left( \left\{x \in \mathbb{P}^{d-1}: \log \delta(y, x) = t \right\} \right)
= 0
\end{align}
holds for some $s \in (-s_0, 0)$ with $s_0 >0$ small enough,
then \eqref{0-1_law_s_Nega_uni} holds for all $s \in (-s_0, 0]$.
\end{lemma}
\begin{proof}[Proof of Lemmas \ref{Lem_0-1_law_s_Posi_Uni} and \ref{Lem_0-1_law_s_Neg_Uni}]
We first prove Lemma \ref{Lem_0-1_law_s_Posi_Uni}.
For any $y \in (\bb P^{d-1})^*$ and any $t \in (-\infty, 0)$,
denote $Y_{y, t} = \{x \in \mathbb{P}^{d-1}: \log \delta(y, x) = t \}$.
Suppose that there exist $s_1, s_2 \in I_{\mu}^{\circ}$ with $s_1 \neq s_2$ such that
$\pi_{s_1}(Y_{y, t}) = 0$ and $\pi_{s_2}(Y_{y, t}) \neq 0$.
Then by Lemma \ref{Lem_0-1_law_s} we have $\pi_{s_2}(Y_{y, t}) = 1$.
Since $Y_{y, t}$ is a closed set in $\bb P^{d-1}$,
by the definition of the support of the measure we get that $\supp \pi_{s_2} \subset Y_{y, t}$.
Since it is proved in \cite{GL16} that $\supp \pi_{s_1} = \supp \pi_{s_2}$
(both coincide with $\supp \nu$ defined by \eqref{Def_supp_nu}),
it follows that $\supp \pi_{s_1} \subset Y_{y, t}$ and hence $\pi_{s_1} (Y_{y, t}) = 1$.
This contradicts to the assumption $\pi_{s_1} (Y_{y, t}) = 0$.
Therefore, if \eqref{0-1_law_s_Posi_uni} holds for some $s \in I_{\mu}^{\circ}$,
then it holds for all $s \in I_{\mu}^{\circ}$.
The proof of Lemma \ref{Lem_0-1_law_s_Neg_Uni} is similar by using the fact that
$\supp \pi_s = \supp \nu$ for any $s \in (-s_0, 0)$, which is proved in \cite{GQX20}.
\end{proof}
\subsection{Proof of Theorem \ref{Thm_BRP_Upper}
}
Now we are equipped to establish Theorem \ref{Thm_BRP_Upper}.
This theorem is a direct consequence of the following more general result.
Recall that $s$ and $q$ are related by $q = \Lambda'(s)$.
\begin{theorem} \label{Thm_BRP_Uni_s}
Assume conditions \ref{Condi_Exp} and \ref{Condi-IP}.
Let $K_{\mu} \subset I_{\mu}^{\circ}$ be any compact set in $\bb R$.
Then,
we have,
as $n \to \infty$, uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{SCALREZ02_Uni_s_0000}
\mathbb{P} \Big( \log | \langle f, G_n v \rangle | \geqslant nq \Big)
= \frac{ r_{s}(x) r^*_{s}(y)}{\varrho_s}
\frac{ \exp \left( -n \Lambda^*(q) \right) } {s \sigma_{s}\sqrt{2\pi n}} \big[ 1 + o(1) \big].
\end{align}
More generally, for any measurable function $\psi$ on $\mathbb{R}$
such that $u \mapsto e^{-s'u}\psi(u)$ is directly Riemann integrable
for any $s' \in K_{\mu}^{\epsilon} : = \{ s' \in \bb R: |s' - s| < \epsilon, s \in K_{\mu} \}$
with $\epsilon >0$ small enough,
we have, as $n \to \infty$, uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align}
& \mathbb{E} \Big[ \varphi(G_n x) \psi \big( \log |\langle f, G_n v \rangle| - nq \big) \Big]
\label{SCALREZ02_Uni_s} \\
& = \frac{r_{s}(x)}{\varrho_s}
\frac{ \exp (-n \Lambda^*(q)) }{ \sigma_{s}\sqrt{2\pi n}}
\left[ \int_{\bb P^{d-1}} \varphi(x) \delta(y,x)^s \nu_s(dx)
\int_{\mathbb{R}} e^{-su} \psi(u) du + o(1) \right]. \nonumber
\end{align}
\end{theorem}
\begin{proof}
It suffices to prove
assertion \eqref{SCALREZ02_Uni_s},
since
\eqref{SCALREZ02_Uni_s_0000}
follows from
\eqref{SCALREZ02_Uni_s}
by choosing $\varphi = \mathbf{1}$
and $\psi (u) = \mathbbm{1}_{ \{ u\geqslant 0 \} },$ $u \in \mathbb R.$
With no loss of generality, we assume that the target functions $\varphi$ and $\psi$
are non-negative.
For brevity, denote $\psi_s(u)=e^{-su}\psi(u)$ for $s \in
I_{\mu}^{\circ}$,
and
\begin{align*}
\psi^+_{s,\ee} (u) = \sup_{u'\in\mathbb{B}_{\ee}(u)} \psi_s(u'),
\quad
\psi^-_{s,\ee}(u) = \inf_{u'\in\mathbb{B}_{\ee}(u)} \psi_s(u').
\end{align*}
Introduce the following condition:
for any $\ee>0,$ the functions
$u \mapsto \psi^+_{s,\ee}(u)$
and $u \mapsto \psi^-_{s,\ee}(u)$
are measurable and
\begin{align}\label{condition g}
\lim_{\ee \to 0^{+}} \int_{\mathbb{R}} \psi^+_{s,\ee}(u) du
= \lim_{\ee \to 0^{+}} \int_{\mathbb{R}} \psi^-_{s,\ee}(u) du
=\int_{\mathbb{R}} e^{-su} \psi(u) du < +\infty.
\end{align}
To prove
\eqref{SCALREZ02_Uni_s},
we can assume additionally
that the function $\psi$ satisfies the condition \eqref{condition g},
In fact, using the approximation techniques similar to that in \cite{XGL19a},
we can prove that if
\eqref{SCALREZ02_Uni_s}
holds under \eqref{condition g}, then it also holds
under the directly Riemann integrability condition introduced in the theorem.
So in the following we assume \eqref{condition g}.
Note that we have $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
and $y = \bb R f \in (\bb P^{d-1})^*$ and $x = \bb R v \in \bb P^{d-1}$.
Hence $\log |\langle f, G_n v \rangle| = \log |G_n v| + \log \delta(y, G_n x),$
and that $\log |\langle f, G_n v \rangle| = - \infty$ if and only if $\log \delta(y, G_n x) = -\infty$.
Taking into account that $\psi(-\infty) = 0$, we can replace
the logarithm of the coefficient $\log |\langle f, G_n v \rangle|$
by the sum $\log |G_n v| + \log \delta(y, G_n x)$ as follows:
\begin{align*}
A: & = \sigma_s \sqrt{2\pi n} \frac{ e^{n \Lambda^*(q)} }{r_s(x)}
\mathbb{E} \Big[ \varphi(G_n x)\psi( \log |\langle f, G_n v \rangle| - n q ) \Big] \nonumber\\
& = \sigma_s \sqrt{2\pi n}
\frac{ e^{n \Lambda^*(q)} }{r_s(x)} \mathbb{E} \Big[ \varphi(G_n x)
\psi( \log |G_n x| + \log \delta(y, G_n x) - nq ) \Big].
\end{align*}
For short, we denote for any $y = \bb R f \in (\bb P^{d-1})^*$ and $x \in \bb R v \in \bb P^{d-1}$,
\begin{align*}
T_n^v: = \log |G_n v| - nq, \qquad Y_n^{x,y}: = \log \delta(y, G_n x).
\end{align*}
Recall that $q = \Lambda'(s)$.
Taking into account that $e^{n\Lambda^{*}(q)} = e^{nsq} \kappa^{-n}(s)$
and using the change of measure formula \eqref{basic equ1},
we get
\begin{align} \label{ScaProLimAn 01}
A = \sigma_s \sqrt{2\pi n}
\mathbb{E}_{\mathbb{Q}_{s}^{x}} \Big[ (\varphi r_{s}^{-1})(G_n x) e^{-s T_n^v}
\psi \big( T_{n}^v + Y_n^{x,y} \big) \Big].
\end{align}
For any fixed small constant $0< \eta <1$, denote $I_k: = (-\eta k, -\eta(k-1)]$, $k \geqslant 1$.
Let $M_n:= \floor{ C_1 \log n }$, where $C_1>0$ is a sufficiently large constant
and $\floor{a}$ denotes the integer part of $a \in \bb R$.
Then from \eqref{ScaProLimAn 01} we have the following decomposition:
\begin{align}\label{PosiScalA_ccc}
A = A_1 + A_2,
\end{align}
where
\begin{align*}
& A_1 : = \sigma_s \sqrt{2\pi n} \mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ (\varphi r_{s}^{-1})(G_n x) e^{-s T_n^v}
\psi \big( T_{n}^v + Y_n^{x,y} \big) \mathbbm{1}_{\{Y_n^{x, y} \leqslant -\eta M_n \}} \right],
\nonumber\\
& A_2 : = \sigma_s \sqrt{2\pi n} \sum_{k =1}^{M_n}
\mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ (\varphi r_{s}^{-1})(G_n x) e^{-s T_n^v}
\psi \big( T_{n}^v + Y_n^{x,y} \big) \mathbbm{1}_{\{Y_n^{x,y} \in I_k \}} \right].
\end{align*}
We now give a bound for the first term $A_1$.
Since the function $u \mapsto e^{-s' u} \psi(u)$ is directly Riemann integrable on $\mathbb{R}$ for
any $s' \in K_{\epsilon} : = \{ s' \in \bb R: |s' - s| < \epsilon, s \in K \}$ with $\epsilon >0$ small enough,
one can verify that the function $u \mapsto e^{-s u} \psi(u)$ is bounded on $\mathbb{R}$, uniformly in $s \in K_{\mu}$,
and hence there exists a constant $C >0$ such that for all $s \in K_{\mu}$,
\begin{align*}
e^{-s T_n^v} \psi( T_{n}^v + Y_n^{x,y} ) \mathbbm{1}_{\{Y_n^{x, y} \leqslant -\eta M_n \}}
\leqslant C e^{ s Y_n^{x,y} } \mathbbm{1}_{\{Y_n^{x, y} \leqslant -\eta M_n \}}
\leqslant C e^{- s \eta M_n}.
\end{align*}
Since the function $\varphi r_s^{-1}$ is uniformly bounded on $\bb P^{d-1}$, uniformly in $s \in K_{\mu}$,
we get the following upper bound for $A_1$: as $n \to \infty$, uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{ScalInverAn 2}
A_1 \leqslant C \sqrt{n} \, e^{ - s \eta M_n} \leqslant C n^{- (s \eta C_1 - \frac{1}{2})} \to 0.
\end{align}
The remaining part of the proof is devoted to
establishing upper and lower bounds for the second term $A_2$ defined by \eqref{PosiScalA_ccc}.
\textit{Upper bound for $A_2$.}
On the event $\{ Y_n^{x, y} \in I_k \}$,
we have $Y_n^{x, y} + \eta(k-1) \in (0, \eta]$. With the notation
$\psi^+_{\eta} (u) = \sup_{u' \in \mathbb{B}_{\eta}(u)} \psi(u')$, we get
\begin{align*}
\psi \big( T_{n}^v - nl + Y_n^{x,y} \big)
\leqslant \psi_{\eta}^+ \big( T_{n}^v -nl - \eta(k-1) \big).
\end{align*}
It follows that
\begin{align*}
& A_2 \leqslant \sigma_s \sqrt{2\pi n} \sum_{k =1}^{M_n}
\mathbb{E}_{\mathbb{Q}_{s}^{x}} \left[ (\varphi r_{s}^{-1})(G_n x) e^{-s T_n^v}
\psi_{\eta}^+( T_{n}^v - \eta(k-1)) \mathbbm{1}_{\{ Y_n^{x, y} \in I_k \}} \right].
\end{align*}
We choose a small constant $\ee > \eta$ and set
\begin{align}\label{Def_Psi_aaa}
\Psi_{s,\eta} (u) = e^{-su} \psi_{\eta}^+(u),
\quad
\Psi^+_{s, \eta, \ee}(u) = \sup_{u'\in\mathbb{B}_{\ee}(u)} \Psi_{s,\eta} (u'),
\quad
u \in \bb R.
\end{align}
Since the function $\Psi^+_{s, \eta, \ee}$ is non-negative and integrable on the real line,
using Lemma \ref{estimate u convo}, we get
\begin{align} \label{ScaProLimAn Bn 01}
& A_2 \leqslant ( 1+ C_{\rho}(\ee) )
\sigma_s \sqrt{2\pi n} \sum_{k =1}^{\infty} \mathbbm{1}_{ \{ k \leqslant M_n \} } e^{-s\eta (k-1)} \nonumber\\
& \qquad \times \mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[(\varphi r_{s}^{-1})(G_n x) \mathbbm{1}_{ \{ Y_n^{x,y} \in I_k \} }
({\Psi}^+_{s, \eta, \ee}\!\ast\!\rho_{\ee^2})
(T_{n}^v - \eta(k-1))\right],
\end{align}
where $C_{\rho}(\ee) >0$ is a constant converging to $0$ as $\ee \to 0$.
For fixed small constant $\ee_1 >0$,
introduce the density function $\bar{\rho}_{\ee_1}$ defined as follows:
$\bar{\rho}_{\ee_1}(u) = \frac{1}{\ee_1}(1 - \frac{|u|}{\ee_1}) $
for $u \in [-\ee_1, \ee_1]$, and $\bar{\rho}_{\ee_1}(u) = 0$ otherwise.
For any $k \geqslant 1$,
setting $\chi_k(u) := \mathbbm{1}_{\{u \in I_k \}}$
and $\chi_{k, \ee_1}^+(u) = \sup_{u' \in \mathbb{B}_{\ee_1}(u)} \chi_k(u')$,
one can verify that the following smoothing inequality holds:
\begin{align} \label{Pf_LD_SmoothIneHolder01}
\chi_k(u) \leqslant
(\chi_{k, \ee_1}^+ * \bar{\rho}_{\ee_1})(u)
\leqslant \chi_{k, 2\ee_1}^+(u), \quad u \in \mathbb{R}.
\end{align}
For short, we denote
$\tilde\chi_k(u):= (\chi_{k, \ee_1}^+ * \bar{\rho}_{\ee_1})(u)$, $u \in \mathbb{R}$,
and
\begin{align}\label{Pf_Baradur_LD_varph_11}
\varphi_{s,k,\ee_1}^y(x) = (\varphi r_s^{-1})(x) \tilde\chi_k(\log \delta(y, x)),
\quad x \in \bb{P}^{d-1}.
\end{align}
In view of \eqref{ScaProLimAn Bn 01}, using the smoothing inequality \eqref{Pf_LD_SmoothIneHolder01} leads to
\begin{align} \label{ScalarBn a}
A_2 & \leqslant (1+ C_{\rho}(\ee))
\sigma_s \sqrt{2\pi n} \sum_{k =1}^{\infty} \mathbbm{1}_{ \{ k \leqslant M_n \} }
e^{-s\eta (k-1)} \nonumber \\
& \quad \times \mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ \varphi_{s,k,\ee_1}^y(G_n x)
({\Psi}^+_{s, \eta, \ee}\!\ast\!\rho_{\ee^2})
(T_{n}^v - \eta(k-1))\right] \nonumber \\
& =: A_2^+.
\end{align}
Let $\widehat{{\Psi}}^+_{s, \eta,\ee}$ be the Fourier transform of ${\Psi}^+_{s,\eta, \ee}$.
By the Fourier inversion formula,
\begin{align*}
{\Psi}^+_{s, \eta, \ee}\!\ast\!\rho_{\ee^{2}}(u)
=\frac{1}{2\pi}\int_{\mathbb{R}}e^{itu}
\widehat {\Psi}^+_{s, \eta, \ee}(t) \widehat\rho_{\ee^{2}}(t)dt,
\quad u \in \mathbb{R}.
\end{align*}
Substituting $y=T_{n}^v - nl - \eta (k-1)$, taking expectation with respect to $\mathbb{E}_{\mathbb{Q}_{s}^{x}},$
and using Fubini's theorem, we obtain
\begin{align} \label{ScalarFourier a}
& \mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ \varphi_{s,k,\ee_1}^y(G_n x)
({\Psi}^+_{s, \eta, \ee}\!\ast\!\rho_{\ee^2})
(T_{n}^v - \eta(k-1))\right] \nonumber\\
& = \frac{1}{2\pi} \int_{\mathbb{R}} e^{-it \eta(k-1)}
R^{n}_{s,it} \big( \varphi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^+_{s, \eta, \ee}(t) \widehat\rho_{\ee^{2}}(t) dt,
\end{align}
where
\begin{align}
R^{n}_{s,it} \big( \varphi_{s,k,\ee_1}^y \big)(x)
=\mathbb{E}_{\mathbb{Q}_{s}^{x}}\left[e^{it T_{n}^v} \varphi_{s,k,\ee_1}^y(G_n x)
\right]. \nonumber
\end{align}
Substituting \eqref{ScalarFourier a} into \eqref{ScalarBn a},
we get
\begin{align} \label{Pf_LDScalA2_2}
& A_2^+ = (1+ C_{\rho}(\ee))
\sigma_s \sqrt{\frac{n}{2\pi}} \,
\sum_{k =1}^{\infty} \mathbbm{1}_{ \{ k \leqslant M_n \} } e^{-s\eta (k-1)} \nonumber\\
& \qquad \quad \times \int_{\mathbb{R}} e^{-it \eta(k-1)}
R^{n}_{s,it} \big( \varphi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^+_{s, \eta, \ee}(t) \widehat\rho_{\ee^{2}}(t) dt.
\end{align}
We shall use Proposition \ref{Prop Rn limit1} to handle the integral in \eqref{Pf_LDScalA2_2} for each fixed $k \geqslant 1$.
Let us first check the conditions stated in Proposition \ref{Prop Rn limit1}.
Since the function $\tilde \chi_k$ is H\"{o}lder continuous on the real line,
one can check that $\varphi_{s,k,\ee_1}^y$ defined by \eqref{Pf_Baradur_LD_varph_11} is also H\"{o}lder continuous
on the projective space $\bb P^{d-1}$.
Using the fact that the function
$u \mapsto e^{-s'u}\psi(u)$ is directly Riemann integrable on $\mathbb{R}$ for
any $s' \in K_{\epsilon} : = \{ s' \in \bb R: |s' - s| < \epsilon, s \in K \}$ with $\epsilon >0$ small enough,
one can also verify that the function $\widehat {\Psi}^+_{s, \eta, \ee} \widehat\rho_{\ee^{2}}$
is compactly supported in $\mathbb{R}$.
Moreover, for any $s \in K_{\mu}$, the function $\widehat {\Psi}^+_{s, \eta, \ee} \widehat\rho_{\ee^{2}}$
is differentiable in a small neighborhood of $0$ on the real line.
Hence, applying Proposition \ref{Prop Rn limit1}
with $\varphi = \varphi_{s,k,\ee_1}^y$,
$\psi = \widehat {\Psi}^+_{s, \eta, \ee} \widehat\rho_{\ee^{2}}$
and with $l = l_{n,k}: = \frac{\eta(k-1)}{n}$,
we obtain that for sufficiently large $n$, uniformly in $1 \leqslant k \leqslant M_n$,
$s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{ScalarKeyProp 1}
I_k^+ : & = \Big| \sigma_s \sqrt{n}e^{n h_s(l_{n,k})}
\int_{\mathbb R} e^{-it n l_{n,k}}
R^{n}_{s,it} \big( \varphi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^+_{s, \eta, \ee}(t) \widehat\rho_{\ee^{2}}(t) dt
- B^{+}(k) \Big| \nonumber\\
& \leqslant \frac{C}{\sqrt{n}} \| \varphi_{s,k,\ee_1}^y \|_{\gamma},
\end{align}
where
\begin{align*}
B^{+}(k) : = \sqrt{2\pi} \widehat {\Psi}^+_{s, \eta, \ee}(0) \widehat\rho_{\ee^{2}}(0)
\pi_{s} \big( \varphi_{s,k,\ee_1}^y \big).
\end{align*}
Taking into account that $1 \leqslant k \leqslant M_n = \floor{ C_1 \log n }$,
by Lemma \ref{lemmaCR001}, we get that
$|e^{ - n h_s(l_{n,k})} - 1| \leqslant \frac{C \log n}{\sqrt{n}}$,
uniformly in $1 \leqslant k \leqslant M_n$ and $s \in K_{\mu}$.
Using \eqref{ScalarKeyProp 1} and the fact that $B^{+}(k)$ is dominated by
$\| \varphi_{s,k,\ee_1}^y \|_{\gamma}$,
we can replace $e^{n h_s(l_{n,k})}$ by $1$, yielding that
uniformly in $s \in K_{\mu}$, $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{ScalarKeyProp 2}
& \Big| \sigma_s \sqrt{n}
\int_{\mathbb R} e^{-it n l_{n,k} } R^{n}_{s,it} \big( \varphi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^+_{s, \eta, \ee}(t) \widehat\rho_{\ee^{2}}(t) dt - B^{+}(k) \Big| \nonumber\\
& \leqslant I_k^+ e^{ - n h_s(l_{n,k})} + |e^{ - n h_s(l_{n,k})} - 1| B^{+}(k) \nonumber\\
& \leqslant \frac{C}{\sqrt{n}} \| \varphi_{s,k,\ee_1}^y \|_{\gamma}
+ \frac{C \log n}{\sqrt{n}} \| \varphi_{s,k,\ee_1}^y \|_{\gamma} \nonumber\\
& \leqslant \frac{C \log n}{\sqrt{n}} \| \varphi_{s,k,\ee_1}^y \|_{\gamma}.
\end{align}
By calculations, one can get that $\gamma$-H\"{o}lder norm
$\| \varphi_{s,k,\ee_1}^y \|_{\gamma}$
is bounded by
$
\frac{ e^{ \eta \gamma k} }{ ( 1 - e^{-2\ee_1} )^{\gamma} }.
$
Taking sufficiently small $\gamma>0$, we obtain that
the series $ \frac{\log n}{\sqrt{n}} \sum_{k = 1}^{\infty} e^{-s\eta (k-1)} \| \varphi_{s,k,\ee_1}^y \|_{\gamma}$
is convergent, and moreover, its limit is $0$ as $n \to \infty$.
Consequently, we are allowed to interchange the limit as $n \to \infty$
and the infinite summation over $k$ in \eqref{Pf_LDScalA2_2}.
Therefore, from \eqref{Pf_LDScalA2_2}, \eqref{ScalarKeyProp 1} and \eqref{ScalarKeyProp 2}
we deduce that, uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{ScalarBn abc}
\limsup_{n \to \infty} A_2^+
\leqslant (1+ C_{\rho}(\ee)) \widehat {\Psi}^+_{s, \eta, \ee}(0) \widehat\rho_{\ee^{2}}(0)
\sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \big( \varphi_{s,k,\ee_1}^y \big).
\end{align}
In order to calculate the sum in \eqref{ScalarBn abc},
we shall make use of the zero-one law of the stationary measure $\pi_s$.
Note that
$\widehat\rho_{\ee^{2}}(0) =1$.
Using \eqref{Pf_LD_SmoothIneHolder01}, we have $\tilde \chi_k \leqslant \chi_{k, 2\ee_1}^+$.
Therefore, we obtain
\begin{align} \label{LimsuBn a}
\limsup_{n \to \infty} A_2^+
\leqslant (1+ C_{\rho}(\ee))
\widehat {\Psi}^+_{s, \eta, \ee}(0)
\sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \big( \tilde \varphi_{s,k,\ee_1}^y \big),
\end{align}
where
\begin{align} \label{Pf_LDLimsuBn b}
\tilde \varphi_{s,k,\ee_1}^y (x)
= (\varphi r_s^{-1})(x) \mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_k \}}(x)
+ (\varphi r_s^{-1})(x) \mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_{k,\ee_1} \}}(x),
\end{align}
and $I_{k,\ee_1} = \big(-\eta k -2 \ee_1, -\eta k \big]
\cup \big(-\eta (k-1), -\eta (k-1) + 2 \ee_1 \big]$.
For the first term on the right hand-side of \eqref{Pf_LDLimsuBn b},
we claim that uniformly in $s \in K_{\mu}$,
\begin{align} \label{ScalPosiMainpart}
& \lim_{\eta \to 0}
\sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \Big( (\varphi r_{s}^{-1})
\mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_k \}} \Big) \nonumber\\
& = \int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align}
Indeed, recalling that $I_k = (-\eta k, -\eta(k-1)]$, we have
\begin{align*}
& \sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \Big( (\varphi r_{s}^{-1})
\mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_k \}} \Big) \nonumber\\
& \geqslant \sum_{k =1}^{\infty}
\pi_{s} \Big( (\varphi r_{s}^{-1}) \delta(y, \cdot)^s
\mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_k \}} \Big)
= \int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align*}
On the other hand, we have, as $\eta \to 0$, uniformly in $s \in K_{\mu}$,
\begin{align*}
& \sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \Big( (\varphi r_{s}^{-1})
\mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_k \}} \Big) \nonumber\\
& \leqslant e^{s \eta} \sum_{k =1}^{\infty}
\pi_{s} \Big( (\varphi r_{s}^{-1}) \delta(y, \cdot)^s
\mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_k \}} \Big)
\to \int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align*}
Hence \eqref{ScalPosiMainpart} holds.
To deal with the second term on the right-hand side of \eqref{Pf_LDLimsuBn b},
we need to apply Lemma \ref{Lem_Gui_LeP} and the zero-one law of the stationary measure $\pi_s$
stated in Lemma \ref{Lem_0-1_law_s}.
Specifically, taking into account that the function $\varphi r_{s}^{-1}$ is uniformly bounded on
the projective space $\bb P^{d-1}$, using the Lebesgue dominated convergence theorem we get
that there exists a constant $C_1 >0$ such that for all $y \in (\bb P^{d-1})^*$,
\begin{align}\label{Pf_Upp_A1_ff}
& \lim_{\ee_1 \to 0}
\sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \Big( (\varphi r_{s}^{-1})
\mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_{k,\ee_1} \}} \Big) \nonumber\\
& \leqslant C_1 \lim_{\ee_1 \to 0}
\sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) \in I_{k,\ee_1} \Big) \nonumber\\
& = C_1 \sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) = - \eta k \Big) \nonumber\\
& \quad + C_1 \sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) = - \eta (k-1) \Big) \nonumber\\
& = 2 C_1 \sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) = - \eta k \Big),
\end{align}
where the last equality holds due to Lemma \ref{Lem_Gui_LeP}.
Now we are going to apply Lemma \ref{Lem_0-1_law_s_Posi_Uni} to prove that
there exists a constant $0< \eta < 1$ such that
\begin{align}\label{sum_0_1_law}
\sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) = - \eta k \Big) = 0.
\end{align}
Indeed, by Lemma \ref{Lem_0-1_law_s_Posi_Uni}, we get that,
for any $y \in (\bb P^{d-1})^*$ and
any set $Y_{y,t} = \{ x \in \bb P^{d-1}: \log \delta(y,x) = t \}$ with $t \in (-\infty, 0)$,
it holds that either $\pi_s (Y_{y,t}) = 0$ or $\pi_s (Y_{y,t}) = 1$ for all $s \in K_{\mu}$.
If $\pi_s (Y_{y,t}) = 0$ for all $y \in (\bb P^{d-1})^*$ and $t \in (-\infty, 0)$, then clearly \eqref{sum_0_1_law} holds.
If $\pi_s (Y_{y_0, t_0}) = 1$ for some $y_0 \in (\bb P^{d-1})^*$ and $t_0 \in (-\infty, 0)$,
then we can always choose $0< \eta <1$ in such a way that $- \eta k \neq t_0$ for all $k \geqslant 1$,
so that we also obtain that \eqref{sum_0_1_law} holds for all $s \in K_{\mu}$.
Hence, in view of \eqref{LimsuBn a},
combining \eqref{ScalPosiMainpart} and \eqref{sum_0_1_law} we obtain that uniformly in $s \in K_{\mu}$,
\begin{align}\label{Pf_LD_pi_s_hh}
\lim_{\eta \to 0}
\lim_{\ee_1 \to 0} \sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \big( \tilde \varphi_{s,k,\ee_1}^y \big)
= \int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align}
Since the target function $\psi$ satisfies the condition \eqref{condition g},
from \eqref{Def_Psi_aaa} we get
\begin{align} \label{Pf_LD_psi_limit}
\lim_{\ee \to 0} \lim_{\eta \to 0} \widehat {\Psi}^+_{s, \eta, \ee}(0)
= \int_{\mathbb{R}} e^{-su} \psi(u) du.
\end{align}
Consequently, recalling that $A_2 \leqslant A_2^+$ and $C_{\rho}(\ee) \to 0$ as $\ee \to 0$,
we obtain the desired upper bound for $A_2$: uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{ScaProLimBn Upper 01}
\lim_{\ee \to 0} \lim_{\eta \to 0}
\lim_{\ee_1 \to 0} \limsup_{n \to \infty} A_2
\leqslant \int_{\mathbb{R}} e^{-su} \psi(u) du
\int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align}
\textit{Lower bound for $A_2$.}
We are going to establish the lower bound for $A_2$ given by \eqref{PosiScalA_ccc}.
Recall that $Y_n^{x,y} = \log \delta(y, G_n x)$.
On the event $\{ Y_n^{x,y} \in I_k \}$
we have $Y_n^{x,y} + \eta(k-1) \in (0, \eta]$. With the notation
$\psi^-_{\eta} (u) = \inf_{u'\in\mathbb{B}_{\eta}(u)} \psi(u)$, we get
\begin{align*}
\psi( T_{n}^v + Y_n^{x,y} ) \geqslant \psi_{\eta}^- ( T_{n}^v - \eta k ).
\end{align*}
In view of \eqref{PosiScalA_ccc}, using Fatou's lemma, it follows that
\begin{align}
\liminf_{ n \to \infty} A_2
& \geqslant \sum_{k =1}^{\infty}
\liminf_{ n \to \infty}
\sigma_s \sqrt{2\pi n} \, \mathbbm{1}_{ \{ k \leqslant M_n \} } \label{Pf_Low_A2_dd}\\
& \quad \times
\mathbb{E}_{\mathbb{Q}_{s}^{x}} \Big[ (\varphi r_{s}^{-1})(G_n x) e^{-s T_n^v}
\psi^-_{\eta}( T_{n}^v - \eta k ) \mathbbm{1}_{\{Y_n^{x,y} \in I_k \}} \Big]. \nonumber
\end{align}
We choose a small constant $\ee > \eta$ and set
\begin{align}\label{Def_Psi_ccc}
\Psi_{s,\eta} (u) = e^{-su} \psi_{\eta}^-(u),
\quad
\Psi^-_{s, \eta, \ee}(u) = \inf_{u'\in\mathbb{B}_{\ee}(u)} \Psi_{s,\eta} (u'),
\quad
u \in \bb R.
\end{align}
Noting that the function $\Psi^-_{s, \eta, \ee}$ is non-negative and integrable on the real line,
by Lemma \ref{estimate u convo}, from \eqref{Pf_Low_A2_dd} we get the following lower bound:
\begin{align} \label{Scal Low An 1}
\liminf_{ n \to \infty} A_2
\geqslant \sum_{k =1}^{\infty} \liminf_{ n \to \infty} A_3 -
\sum_{k =1}^{\infty} \limsup_{ n \to \infty} A_4,
\end{align}
where, with the notation $a_{n,k} = \sigma_s \sqrt{2\pi n} \, e^{-s\eta k} \mathbbm{1}_{ \{ k \leqslant M_n \} }$,
\begin{align*}
& A_3 = a_{n,k}
\mathbb{E}_{\mathbb{Q}_{s}^{x}}
\Big[ (\varphi r_{s}^{-1})(G_n x) \mathbbm{1}_{\{Y_n^{x,y} \in I_k \}}
({\Psi}^-_{s, \eta, \ee}\!\ast\!\rho_{\ee^2})
(T_{n}^v - \eta k ) \Big], \nonumber\\
& A_4 = a_{n,k} \int_{|u|\geqslant \ee} \mathbb{E}_{\mathbb{Q}_{s}^{x}}
\Big[ (\varphi r_{s}^{-1})(G_n x) \mathbbm{1}_{\{Y_n^{x,y} \in I_k \}}
{\Psi}^-_{s, \eta, \ee}(T_{n}^v - \eta k - u) \Big] \rho_{\ee^2}(u) du.
\end{align*}
We are going to give a lower bound for $A_3$.
For brevity, we denote $\chi_k(u)= \mathbbm{1}_{\{u \in I_k \}}$
and $\chi_{k, \ee_1}^-(u) = \inf_{u'\in\mathbb{B}_{\ee_1}(u)} \chi_k(u')$, $u \in \bb R$,
where $\ee_1 >0$ is a fixed small constant.
Similarly to \eqref{Pf_LD_SmoothIneHolder01},
one can get the following smoothing inequality:
\begin{align} \label{SmoothIne Holder 02}
\chi_{k, 2\ee_1}^-(u)
\leqslant (\chi_{k, \ee_1}^- * \bar{\rho}_{\ee_1})(u)
\leqslant \chi_k(u), \quad u \in \mathbb{R},
\end{align}
where the density function $\bar{\rho}$ is the same as that in \eqref{Pf_LD_SmoothIneHolder01}.
In a similar way as in \eqref{Pf_Baradur_LD_varph_11}, we denote
$\tilde\chi_k^-(u) := (\chi_{k, \ee_1}^- * \bar{\rho}_{\ee_1})(u)$, $u \in \bb R$, and
\begin{align}\label{Pf_Baradur_LD_varph_22}
\phi_{s,k,\ee_1}^y(x) = (\varphi r_s^{-1})(x) \tilde \chi_k^-(\log \delta(y, x)),
\quad x \in \bb{P}^{d-1}.
\end{align}
For the first term $A_3$ in \eqref{Scal Low An 1},
using the inequality \eqref{SmoothIne Holder 02} leads to
\begin{align} \label{ScalarBn Low a}
A_3 \geqslant a_{n,k}
\mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ \phi_{s,k,\ee_1}^y(G_n x) ( {\Psi}^-_{s,\delta,\ee}\!\ast\!\rho_{\ee^2} )
(T_{n}^v - \eta k) \right].
\end{align}
Denote by
$\widehat{{\Psi}}^-_{s,\eta,\ee}$ the Fourier transform of ${\Psi}^-_{s,\eta,\ee}$.
Applying the Fourier inversion formula to ${\Psi}^-_{s,\eta,\ee}\!\ast\!\rho_{\ee^{2}}$,
and using Fubini's theorem, we get
\begin{align} \label{ScalarFourier Low a}
& \mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ \phi_{s,k,\ee_1}^y(G_n x)
({\Psi}^-_{s, \eta, \ee}\!\ast\!\rho_{\ee^2})
(T_{n}^v - \eta k)\right] \nonumber\\
& = \frac{1}{2\pi} \int_{\mathbb{R}} e^{-it \eta k}
R^{n}_{s,it} \big( \phi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^-_{s,\eta, \ee}(t) \widehat\rho_{\ee^{2}}(t) dt,
\end{align}
where
\begin{align*}
R^{n}_{s,it} \big( \phi_{s,k,\ee_1}^y \big)(x)
= \mathbb{E}_{\mathbb{Q}_{s}^{x}}\left[e^{it T_{n}^v}
\phi_{s,k,\ee_1}^y(G_n x)
\right], \quad x \in \bb P^{d-1}.
\end{align*}
Substituting \eqref{ScalarFourier Low a} into \eqref{ScalarBn Low a}, we obtain
\begin{align} \label{Pf_Low_Bn-}
A_3 \geqslant \frac{a_{n,k}}{2 \pi}
\int_{\mathbb{R}} e^{-it \eta k}
R^{n}_{s,it} \big( \phi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^-_{s,\delta,\ee}(t) \widehat\rho_{\ee^{2}}(t) dt.
\end{align}
We shall use Proposition \ref{Prop Rn limit1} to give a precise asymptotic
for the above integral.
Let us first verify the conditions of Proposition \ref{Prop Rn limit1}.
Since the function $\tilde \chi_k^-$ is H\"{o}lder continuous for any fixed $k \geqslant 1$,
one can check that $\phi_{s,k,\ee_1}^y$ is H\"{o}lder continuous
on the projective space $\bb P^{d-1}$.
Since the function
$u \mapsto e^{-s'u}\psi(u)$ is directly Riemann integrable on $\mathbb{R}$ for
any $s' \in K_{\epsilon} : = \{ s' \in \bb R: |s' - s| < \epsilon, s \in K \}$ with $\epsilon >0$ small enough,
one can also verify that the function $\widehat {\Psi}^-_{s, \eta, \ee} \widehat\rho_{\ee^{2}}$
has compact support in $\mathbb{R}$,
and that $\widehat {\Psi}^-_{s, \eta, \ee} \widehat\rho_{\ee^{2}}$
is differentiable in a small neighborhood of $0$ on the real line, for all $s \in K_{\mu}$.
Thus, using Proposition \ref{Prop Rn limit1}
with $\varphi = \phi_{s,k,\ee_1}^y$,
$\psi = \widehat {\Psi}^-_{s,\eta,\ee} \widehat\rho_{\ee^{2}}$
and $l = l_{n,k}': = \frac{\eta k }{n}$,
we obtain that for sufficiently large $n$, there exists a constant $C>0$ such that for all
$1 \leqslant k \leqslant M_n = \floor{C_1 \log n}$, $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{ScalarKeyPropLower1}
I_k^-: & = \Big| \sigma_s \sqrt{n} e^{n h_s(l_{n,k}')}
\int_{\mathbb R} e^{-it n l_{n,k}' }
R^{n}_{s,it} \big( \phi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^-_{s,\eta,\ee}(t) \widehat\rho_{\ee^{2}}(t) dt
- B^{-}(k) \Big| \nonumber\\
& \leqslant \frac{C}{\sqrt{n}} \| \phi_{s,k,\ee_1}^y \|_{\gamma},
\end{align}
where
\begin{align*}
B^{-}(k) : = \sqrt{2\pi} \widehat {\Psi}^-_{s, \eta, \ee}(0) \widehat\rho_{\ee^{2}}(0)
\pi_{s} \big( \phi_{s,k,\ee_1}^y \big).
\end{align*}
Since $1 \leqslant k \leqslant M_n= \floor{C_1 \log n}$,
using Lemma \ref{lemmaCR001} there exists a constant $C>0$ such that for all $1 \leqslant k \leqslant M_n$,
$s \in K_{\mu}$ and $n \geqslant 1$, it holds that
$|e^{ - n h_s(l_{n,k}') } - 1| \leqslant \frac{C \log n}{\sqrt{n}}$.
In a similar way as in the proof of \eqref{ScalarKeyProp 2},
we can replace $e^{n h_s( l_{n,k}' )}$ by $1$ to obtain that,
uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
& \Big| \sigma_s \sqrt{n}
\int_{\mathbb R} e^{-it n l_{n,k}'}
R^{n}_{s,it} \big( \phi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^{-}_{s, \eta, \ee}(t) \widehat\rho_{\ee^{2}}(t) dt - B^{-}(k) \Big| \nonumber\\
& \leqslant I_k^- e^{ - n h_s(l_{n,k}')} + |e^{ - n h_s(l_{n,k}')} - 1| B^{-}(k) \nonumber\\
& \leqslant \frac{C}{\sqrt{n}} \| \phi_{s,k,\ee_1}^y \|_{\gamma}
+ \frac{C \log n}{\sqrt{n}} \| \phi_{s,k,\ee_1}^y \|_{\gamma} \nonumber\\
& \leqslant \frac{C \log n}{\sqrt{n}} \| \phi_{s,k,\ee_1}^y \|_{\gamma}.
\end{align*}
Since the $\gamma$-H\"{o}lder norm
$\| \phi_{s,k,\ee_1}^y \|_{\gamma}$
is bounded by
$\frac{ e^{ \eta \gamma k} }{ ( e^{2\ee_1} - 1 )^{\gamma} }$,
taking sufficiently small $\gamma>0$, we obtain that
the series $ \frac{\log n}{\sqrt{n}} \sum_{k = 1}^{\infty} e^{-s\eta (k-1)} \| \phi_{s,k,\ee_1}^y \|_{\gamma}$
converges to $0$ as $n \to \infty$.
As a result, by virtue of \eqref{ScalarKeyPropLower1}, we obtain that,
uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\sum_{k =1}^{\infty} \liminf_{ n \to \infty} A_3
\geqslant \widehat {\Psi}^-_{s,\eta,\ee}(0) \widehat\rho_{\ee^{2}}(0)
\sum_{k =1}^{\infty} e^{-s\eta k} \pi_{s} \big( \phi_{s,k,\ee_1}^y \big).
\end{align*}
Note that $\widehat\rho_{\ee^{2}}(0) =1$.
Using \eqref{SmoothIne Holder 02}, we have that $\tilde \chi_k \geqslant \chi_{k, 2\ee_1}^-$.
Consequently,
we obtain the lower bound for the first term on the right hand side of \eqref{Scal Low An 1}:
uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{LimsuBn Low a}
\sum_{k =1}^{\infty} \liminf_{ n \to \infty} A_3
\geqslant \widehat {\Psi}^-_{s,\eta,\ee}(0)
\sum_{k =1}^{ \infty } e^{-s\eta k}
\pi_{s} \big( \tilde \phi_{s,k,\ee_1}^y \big).
\end{align}
where
\begin{align} \label{Pf_LDLimsuA_3_nn}
\tilde \phi_{s,k,\ee_1}^y (x)
= (\varphi r_s^{-1})(x) \mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_k \}}(x)
- (\varphi r_s^{-1})(x) \mathbbm{1}_{\{ \log \delta(y, \cdot) \in \tilde I_{k,\ee_1} \}}(x),
\end{align}
and $\tilde I_{k,\ee_1} = \big(-\eta k, -\eta k + 2 \ee_1 \big]
\cup \big( -\eta (k-1) + 2 \ee_1, -\eta (k-1) \big]$.
For the first term on the right hand-side of \eqref{Pf_LDLimsuA_3_nn},
since $I_k = (-\eta k, -\eta(k-1)]$, in a similar way as in the proof of \eqref{ScalPosiMainpart},
it holds uniformly in $s \in K_{\mu}$ that
\begin{align} \label{ScalPosiMainpart_bb}
& \lim_{\eta \to 0}
\sum_{k =1}^{\infty} e^{-s\eta k}
\pi_{s} \Big( (\varphi r_{s}^{-1})
\mathbbm{1}_{\{ \log \delta(y, \cdot) \in I_k \}} \Big) \nonumber\\
& = \int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align}
To handle the second term on the right-hand side of \eqref{Pf_LDLimsuA_3_nn},
we make use of Lemma \ref{Lem_Gui_LeP} and the zero-one law of the stationary measure $\pi_s$
shown in Lemma \ref{Lem_0-1_law_s_Posi_Uni}.
Specifically, similarly to the proof of \eqref{Pf_Upp_A1_ff},
since the function $\varphi r_{s}^{-1}$ is bounded on $\bb P^{d-1}$, uniformly in $s \in K_{\mu}$,
using the Lebesgue dominated convergence theorem we get
that there exists a constant $C_1 >0$ such that for all $y \in (\bb P^{d-1})^*$ and $s \in K_{\mu}$,
\begin{align}\label{Pf_Upp_A3_uu}
& \lim_{\ee_1 \to 0}
\sum_{k =1}^{\infty} e^{-s\eta k}
\pi_{s} \Big( (\varphi r_{s}^{-1})
\mathbbm{1}_{\{ \log \delta(y, \cdot) \in \tilde I_{k,\ee_1} \}} \Big) \nonumber\\
& \leqslant C_1 \lim_{\ee_1 \to 0}
\sum_{k =1}^{\infty} e^{-s\eta k}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) \in \tilde I_{k,\ee_1} \Big) \nonumber\\
& = C_1 \sum_{k =1}^{\infty} e^{-s\eta k}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) = - \eta k \Big) \nonumber\\
& \quad + C_1 \sum_{k =1}^{\infty} e^{-s\eta k}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) = - \eta (k-1) \Big) \nonumber\\
& = 2 C_1 \sum_{k =1}^{\infty} e^{-s\eta k}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) = - \eta k \Big),
\end{align}
where in the last equality we used Lemma \ref{Lem_Gui_LeP}.
In the same way as in the proof of \eqref{sum_0_1_law},
applying Lemma \ref{Lem_0-1_law_s_Posi_Uni} we can obtain that
there exists a constant $0< \eta < 1$ such that for all $s \in K_{\mu}$,
\begin{align}\label{sum_0_1_law_A_3}
\sum_{k =1}^{\infty} e^{-s\eta k}
\pi_{s} \Big( x \in \bb P^{d-1}: \log \delta(y, x) = - \eta k \Big) = 0.
\end{align}
Since the target function $\psi$ satisfies the condition \eqref{condition g},
from \eqref{Def_Psi_aaa} we get that uniformly in $s \in K_{\mu}$,
\begin{align}\label{Pf_LD_psi_dd}
\lim_{\ee \to 0} \lim_{\eta \to 0} \widehat {\Psi}^-_{s, \eta, \ee}(0)
= \int_{\mathbb{R}} e^{-su} \psi(u) du.
\end{align}
Consequently, in view of \eqref{LimsuBn Low a},
combining \eqref{ScalPosiMainpart_bb}, \eqref{Pf_Upp_A3_uu}, \eqref{sum_0_1_law_A_3} and \eqref{Pf_LD_psi_dd},
we get the desired lower bound for $A_3$: uniformly in $s \in K_{\mu}$,
\begin{align}\label{Scal Low Bn}
\lim_{\ee \to 0} \lim_{\eta \to 0}
\lim_{\ee_1 \to 0}
\sum_{k =1}^{\infty} \liminf_{ n \to \infty} A_3
\geqslant \int_{\mathbb{R}} e^{-su} \psi(u) du
\int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align}
Now we proceed to establish an upper bound for the term $A_4$ in \eqref{Scal Low An 1}.
Note that $\Psi^-_{s,\eta,\ee} \leqslant \Psi_{s,\eta}$, where
$\Psi_{s,\eta} (u) = e^{-sy} \psi_{\eta}^+(u)$, $u \in \mathbb{R}$.
Then it follows from Lemma \ref{estimate u convo} that
$\Psi^-_{s,\eta,\ee}
\leqslant (1+ C_{\rho}(\ee)) \widehat{\Psi}^+_{s,\eta,\ee} \widehat{\rho}_{\ee^2}$,
where ${\Psi}^+_{s, \eta, \ee}(u) = \sup_{u'\in\mathbb{B}_{\ee}(u)} \Psi_{s,\eta} (u')$,
$u \in \mathbb{R}$.
Moreover, using \eqref{Pf_LD_SmoothIneHolder01}, we get
$\mathbbm{1}_{ \{ Y_n^{x,y} \in I_k \} } \leqslant \tilde\chi_k(u)
= (\chi_{k, \ee_1}^+ * \bar{\rho}_{\ee_1})(u)$.
Consequently, similarly to the proof of \eqref{Pf_LDScalA2_2},
we can get the upper bound for $A_4$: uniformly in $s \in K_{\mu}$,
\begin{align} \label{Pf_Upper_Un-}
& A_4
\leqslant (1+ C_{\rho}(\ee)) \frac{a_{n,k}}{2 \pi} \nonumber\\
& \quad \times \int_{ |u|\geqslant \ee } \left[
\int_{\mathbb{R}} e^{-it (\eta k + u )}
R^{n}_{s,it} \big( \varphi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^+_{s,\eta,\ee}(t) \widehat\rho_{\ee^{2}}(t) dt \right]
\rho_{\ee^2}(u) du.
\end{align}
In order to handle the above integral, we first use the Lebesgue dominated convergence theorem
to interchange the limit $n \to \infty$ and the integral $\int_{|u| \geqslant \ee}$,
and then we apply Proposition \ref{Prop Rn limit1}.
An important issue is to find a dominating function, which can be done as follows.
We split the integral $\int_{|u| \geqslant \ee}$ on the right hand side of \eqref{Pf_Upper_Un-}
into two parts: $\int_{\ee \leqslant |u| \leqslant \sqrt{n}}$ and $\int_{ |u| > \sqrt{n}}$.
For the first part, by elementary calculations it holds that
$e^{- nh_s( \eta \frac{k}{n} + \frac{u}{n})} \to 1$,
uniformly in $s \in K_{\mu}$, $1 \leqslant k \leqslant M_n$ and $|u| \leqslant \sqrt{n}$ as $n \to \infty$.
Hence, using Proposition \ref{Prop Rn limit1}, the function on the right hand side of
\eqref{Pf_Upper_Un-} under the integral $\int_{\ee \leqslant |u| \leqslant \sqrt{n}}$ is dominated by
$C \rho_{\ee^2}$, which is integrable on $\mathbb{R}$.
For the second part $\int_{ |u| > \sqrt{n}}$,
since the density function $\rho$ has
polynomial decay, i.e. $\rho_{\ee^2}(u) \leqslant \frac{C}{1 + u^4}$, $|u| > \sqrt{n}$,
we get that $\sqrt{n} \rho_{\ee^2}(u) \leqslant \frac{C}{1 + |u|^3}$,
which is clearly integrable on $\mathbb{R}$.
Therefore, we can pass the limit as $n\to \infty$ under the integration $\int_{|u|\geqslant \ee}$
and then we use Proposition \ref{Prop Rn limit1} to obtain the desired upper bound for $A_4$:
uniformly in $s \in K_{\mu}$,
\begin{align*}
\sum_{k =1}^{\infty} \limsup_{ n \to \infty} A_4
& \leqslant (1+ C_{\rho}(\ee)) \sum_{k=1}^{ \infty } e^{-s \eta k}
\pi_{s} \big( \varphi_{s,k,\ee_1}^y \big) \nonumber\\
& \quad \times \widehat \Psi^+_{s,\eta,\ee}(0) \widehat\rho_{\ee^{2}}(0)
\int_{|u| \geqslant \ee} \rho_{\ee^2}(u)du.
\end{align*}
In the same way as in the proof of \eqref{Pf_LD_pi_s_hh},
by Lemmas \ref{Lem_Gui_LeP} and \ref{Lem_0-1_law_s_Posi_Uni}, we can get that uniformly in $s \in K_{\mu}$,
\begin{align*}
\lim_{\eta \to 0}
\lim_{\ee_1 \to 0} \sum_{k =1}^{\infty} e^{-s\eta k}
\pi_{s} \big( \tilde \varphi_{s,k,\ee_1}^y \big)
= \int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align*}
Using \eqref{Pf_LD_psi_limit} and noting that $\widehat\rho_{\ee^{2}}(0) = 1$,
it follows that uniformly in $s \in K_{\mu}$,
\begin{align*}
\lim_{\eta \to 0} \lim_{\ee_1 \to 0}
\sum_{k =1}^{\infty} \limsup_{ n \to \infty} A_4
& \leqslant (1+ C_{\rho}(\ee))
\int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx) \nonumber\\
& \quad \times \int_{\mathbb{R}} e^{-su} \psi(u) du
\int_{|u| \geqslant \ee} \rho_{\ee^2}(u)du.
\end{align*}
Since $C_{\rho}(\ee) \to 0$ and $\int_{|u| \geqslant \ee} \rho_{\ee^2}(u)du \to 0$ as $\ee \to 0$,
this implies
\begin{align*}
\lim_{\ee \to 0}
\lim_{\eta \to 0} \lim_{\ee_1 \to 0}
\sum_{k =1}^{\infty} \limsup_{ n \to \infty} A_4 = 0.
\end{align*}
Combining this with \eqref{Scal Low An 1} and \eqref{Scal Low Bn},
we get the desired lower bound for $A_2$:
uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\lim_{\ee \to 0} \lim_{\eta \to 0}
\lim_{\ee_1 \to 0} \liminf_{n \to \infty} A_2
\geqslant \int_{\mathbb{R}} e^{-su} \psi(u) du
\int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align*}
This, together with \eqref{PosiScalA_ccc}, \eqref{ScalInverAn 2} and \eqref{ScaProLimBn Upper 01},
proves the desired asymptotic
\eqref{SCALREZ02_Uni_s}.
This concludes the proof of Theorem \ref{Thm_BRP_Uni_s} as well as Theorem \ref{Thm_BRP_Upper}.
\end{proof}
\subsection{Proof of Theorem \ref{Thm_Coeff_BRLD_changedMea}}
\begin{proof}[Proof of Theorem \ref{Thm_Coeff_BRLD_changedMea}]
It suffices to prove \eqref{LD_Upper_ChangeMea002}
since \eqref{LD_Upper_ChangeMea001} follows from \eqref{LD_Upper_ChangeMea002}
by taking $\varphi = \bf 1$ and $\psi(u) = \bbm{1}_{\{u \geqslant 0\}}$.
Using the change of measure formula \eqref{basic equ1} twice, we get
\begin{align}\label{Pf_BRP_Changed_ff}
& \mathbb{E}_{\bb Q_s^x}
\Big[ \varphi(G_n x) \psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \Big] \nonumber\\
& = \frac{1}{\kappa^n(s) r_s(x)}
\mathbb{E}
\left[ (\varphi r_s)(G_n x) e^{s \sigma (G_n, x)}
\psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \right] \nonumber\\
& = \frac{ \kappa^n(t) r_t(x) }{\kappa^n(s) r_s(x)}
\mathbb{E}_{\mathbb Q_t^x}
\left[ (\varphi r_s r_t^{-1})(G_n x)
e^{-(t-s) \sigma(G_n, x)}
\psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \right] \nonumber\\
& = \frac{ \kappa^n(t) r_t(x) }{\kappa^n(s) r_s(x)} e^{ -(t-s) n q_t} \times \nonumber\\
& \quad \mathbb{E}_{\mathbb Q_t^x}
\left[ (\varphi r_s r_t^{-1})(G_n x)
e^{-(t-s) (\sigma(G_n, x) - n q_t)}
\psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \right],
\end{align}
where $\mathbb Q_t^x$ is the changed measure defined
in the same way as $\mathbb Q_s^x$ with $s$ replaced by $t$.
Following the proof of Theorem \ref{Thm_BRP_Upper}, one can verify that,
as $n \to \infty$,
uniformly in $t \in K_{\mu}$, $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
& \mathbb{E}_{\mathbb Q_t^x}
\left[ (\varphi r_s r_t^{-1})(G_n x)
e^{-(t-s) (\sigma(G_n, x) - n q_t)}
\psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \right] \nonumber\\
& = \frac{1}{\sigma_t \sqrt{2 \pi n}}
\left[ \int_{ \bb P^{d-1} } \varphi(x) \delta(y, x)^t \, \frac{r_s(x)}{r_t(x)} \pi_t(dx)
\int_{\mathbb{R}} e^{- (t-s) u} \psi(u) du + o(1) \right].
\end{align*}
The result follows by taking into account that
$\Lambda^*(q_s) = sq_s - \Lambda(s)$, $\Lambda^*(q_t) = tq_t - \Lambda(t)$,
$\Lambda(s) = \log \kappa(s)$ and $\Lambda(t) = \log \kappa(t)$.
\end{proof}
\section{Proof of lower tail large deviations for coefficients} \label{sec_Pf_Low_LD}
The goal of this section is to establish
Theorems \ref{Thm-Posi-Neg-s} and \ref{Thm-Posi-Neg-sBRP} on Bahadur-Rao-Petrov type lower tail large deviations.
In contrast to the proof of Theorems \ref{thrmBR001} and \ref{Thm_BRP_Upper},
it turns out that the proof of Theorems \ref{Thm-Posi-Neg-s} and \ref{Thm-Posi-Neg-sBRP} is more delicate.
It suffices to prove Theorem \ref{Thm-Posi-Neg-sBRP}
since Theorem \ref{Thm-Posi-Neg-s} is a
direct consequence of Theorem \ref{Thm-Posi-Neg-sBRP}
by taking $l = 0$, $\varphi = \bf 1$ and $\psi(u) = \bbm{1}_{ \{u \leqslant 0\} }$, $u \in \bb R$.
\subsection{Proof of Theorem \ref{Thm-Posi-Neg-sBRP}}
We shall need the H\"{o}lder regularity of the stationary measure $\pi_s$
(for sufficiently small $s$)
recently established in \cite{GQX20}.
\begin{lemma}\label{Lem_Regu_pi_s}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, for any $\ee >0$, there exist constants $s_0 >0$, $k_0 \in \bb N$ and $c, C >0$ such that
for all $s \in (-s_0, s_0)$, $n \geqslant k \geqslant k_0$, $y \in (\bb P^{d-1})^*$ and $x \in \bb P^{d-1}$,
\begin{align}\label{Regu_pi_s}
\bb Q_s^x \Big( \log \delta(y, G_n x) \leqslant -\ee k \Big) \leqslant C e^{- ck}.
\end{align}
\end{lemma}
Note that \eqref{Regu_pi_s} is stronger than the following assertion
of the H\"{o}lder regularity of the stationary measure $\pi_s$:
there exist constants $s_0, \alpha > 0$ such that
\begin{align*}
\sup_{ s\in (-s_0, s_0) } \sup_{y \in (\bb{P}^{d-1})^* }
\int_{\bb{P}^{d-1} } \frac{1}{ \delta(y, x)^{\alpha} } \pi_s(dx) < +\infty.
\end{align*}
As an application of Lemma \ref{Lem_Regu_pi_s},
we show the following result about the high-order negative moment of the $\delta(y, G_n x)$
under the changed measure $\bb Q_s^x$,
which will play important role in the proof of Theorem \ref{Thm-Posi-Neg-sBRP}.
\begin{lemma}\label{Lem_Inte_Regu_a}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Let $p>0$ be any fixed constant.
Then, there exists a constant $s_0 > 0$ such that
\begin{align*}
\sup_{n \geqslant 1} \sup_{ s\in (-s_0, s_0) } \sup_{y \in (\bb{P}^{d-1})^* }
\sup_{x \in \bb P^{d-1} }
\bb E_{\bb Q_s^x} \left( \frac{1}{ \delta(y, G_n x)^{p|s|} } \right) < + \infty.
\end{align*}
\end{lemma}
\begin{proof}
By Lemma \ref{Lem_Regu_pi_s},
for any $\ee >0$, there exist constants $s_0 >0$, $k_0 \in \bb N$ and $c, C >0$ such that
for all $s \in (-s_0, s_0)$, $n \geqslant k \geqslant k_0$ and $y \in (\bb P^{d-1})^*$, $x \in \bb P^{d-1}$,
\begin{align}\label{Regu_Q_sx_bb}
\bb Q_s^x \Big( \delta(y, G_n x) \leqslant e^{-\ee k} \Big) \leqslant C e^{- ck}.
\end{align}
For any $y \in (\bb P^{d-1})^*$ and $k \geqslant k_0$, we denote
\begin{align*}
B_{n,k} = \left\{ x \in \bb P^{d-1}: e^{- \ee (k+1)} \leqslant \delta(y, G_n x) \leqslant e^{- \ee k} \right\}.
\end{align*}
By \eqref{Regu_Q_sx_bb}, it follows that there exist constants $c, C >0$ such that for all $s \in (-s_0, s_0)$,
\begin{align*}
& \bb E_{\bb Q_s^x} \left( \frac{1}{ \delta(y, G_n x)^{p|s|} } \right) \\
& = \bb E_{\bb Q_s^x} \left( \frac{1}{ \delta(y, G_n x)^{p|s|} }
\bbm{1}_{\{ \delta(y, G_n x) > e^{- \ee k_0} \} } \right)
+ \sum_{k = k_0}^{\infty} \bb E_{\bb Q_s^x} \left( \frac{1}{ \delta(y, G_n x)^{p|s|} }
\bbm{1}_{B_{n,k}} \right) \\
& \leqslant e^{\ee k_0 p|s|} + C \sum_{k = k_0}^{\infty} e^{\ee (k+1) p|s|} e^{-ck},
\end{align*}
which is finite by taking $s_0>0$ small enough. This proves Lemma \ref{Lem_Inte_Regu_a}.
\end{proof}
Now we are in a position to establish Theorem \ref{Thm-Posi-Neg-sBRP}.
In the same spirit as in Theorem \ref{Thm_BRP_Uni_s},
we are able to prove the following result which is stronger than Theorem \ref{Thm-Posi-Neg-sBRP}.
\begin{theorem} \label{Thm_BRP_Neg_Uni}
Assume conditions \ref{Condi-TwoExp} and \ref{Condi-IP}.
Then, there exists a constant $s_0>0$ such that for any compact set $K_{\mu} \subset (-s_0, 0)$,
we have, as $n \to \infty$,
uniformly in $s \in K_{\mu}$, $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\mathbb{P} \Big( \log |\langle f, G_n v \rangle| \leqslant n q \Big)
= \frac{ r_{s}(x) r^*_{s}(y)}{\varrho_s}
\frac{ \exp \left( -n \Lambda^*(q) \right) } { -s \sigma_{s}\sqrt{2\pi n}}
\big[ 1 + o(1) \big].
\end{align*}
More generally, for any $\varphi \in \mathcal{B}_{\gamma}$ and any measurable function $\psi$ on $\mathbb{R}$
such that $u \mapsto e^{-s'u} \psi(u)$ is directly Riemann integrable for all
$s' \in K_{\mu}^{\epsilon} : = \{ s' \in \bb R: |s' - s| < \epsilon, s \in K_{\mu} \}$ with $\epsilon >0$ small enough,
we have,
as $n \to \infty$, uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
& \mathbb{E} \Big[ \varphi(G_n x) \psi \big( \log |\langle f, G_n v \rangle| - n q \big) \Big] \nonumber\\
& = \frac{r_{s}(x)}{\varrho_s}
\frac{ \exp (-n \Lambda^*(q)) }{ \sigma_{s}\sqrt{2\pi n}}
\left[ \int_{ \bb P^{d-1} } \varphi(x) \delta(y,x)^s \nu_s(dx)
\int_{\mathbb{R}} e^{-su} \psi(u) du + o(1) \right].
\end{align*}
\end{theorem}
\begin{proof}[Proof of Theorems \ref{Thm-Posi-Neg-sBRP} and \ref{Thm_BRP_Neg_Uni}]
We only need to prove Theorem \ref{Thm_BRP_Neg_Uni}
since Theorem \ref{Thm-Posi-Neg-sBRP} is a direct consequence of Theorem \ref{Thm_BRP_Neg_Uni}.
It suffices to prove the second assertion of Theorem \ref{Thm_BRP_Neg_Uni}, since
the first one
follows from the second by choosing $\varphi = \mathbf{1}$
and $\psi (u) = \mathbbm{1}_{ \{ u \leqslant 0 \} },$ $u \in \mathbb R.$
As in the proof of Theorem \ref{Thm_BRP_Upper},
we assume that the target functions $\varphi$ and $\psi$
are non-negative, and that
the function $\psi$ satisfies the condition \eqref{condition g}.
Since $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
and $y = \bb R f$ and $x \in \bb R v$,
we have $\log |\langle f, G_n v \rangle| = \log |G_n v| + \log \delta(y, G_n x).$
Hence we can replace the logarithm of the coefficient $\log |\langle f, G_n v \rangle|$
by the sum $\log |G_n v| + \log \delta(y, G_n x)$ as follows:
\begin{align*}
J : & = \sigma_s \sqrt{2\pi n} \frac{ e^{n \Lambda^*(q)} }{r_s(x)}
\mathbb{E} \Big[ \varphi(G_n x)\psi( \log |\langle f, G_n v \rangle| - n q ) \Big] \nonumber\\
& = \sigma_s \sqrt{2\pi n}
\frac{ e^{n \Lambda^*(q)} }{r_s(x)} \mathbb{E} \Big[ \varphi(G_n x)
\psi( \log |G_n x| + \log \delta(y, G_n x) - n q ) \Big].
\end{align*}
As in the proof of Theorem \ref{Thm_BRP_Upper},
we denote for any $y = \bb R f \in (\bb P^{d-1})^*$ and $x \in \bb R v \in \bb P^{d-1}$,
\begin{align*}
T_n^v: = \log |G_n v| - nq, \qquad Y_n^{x,y}: = \log \delta(y, G_n x).
\end{align*}
Taking into account that $q = \Lambda'(s)$ and $e^{n\Lambda^{*}(q)}=e^{nsq} \kappa^{-n}(s)$,
and using the change of measure formula \eqref{basic equ1},
we get
\begin{align} \label{ScaProLimAnLow01}
J = \sigma_s \sqrt{2\pi n}
\mathbb{E}_{\mathbb{Q}_{s}^{x}} \Big[ (\varphi r_{s}^{-1})(G_n x) e^{-s T_n^v}
\psi \big( T_{n}^v + Y_n^{x,y} \big) \Big].
\end{align}
For any fixed small constant $0< \eta <1$, denote $I_k: = (-\eta k, -\eta(k-1)]$, $k \geqslant 1$.
Let $M_n:= \floor{ C_1 \log n }$, where $C_1>0$ is a sufficiently large constant
and $\floor{a}$ denotes the integer part of $a \in \bb R$.
Then from \eqref{ScaProLimAnLow01} we have the following decomposition:
\begin{align}\label{PosiScalALow_decom}
J = J_1 + J_2,
\end{align}
where
\begin{align*}
& J_1 : = \sigma_s \sqrt{2\pi n} \mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ (\varphi r_{s}^{-1})(G_n x) e^{-s T_n^v}
\psi \big( T_{n}^v + Y_n^{x,y} \big) \mathbbm{1}_{\{Y_n^{x, y} \leqslant -\eta M_n \}} \right],
\nonumber\\
& J_2 : = \sigma_s \sqrt{2\pi n} \sum_{k =1}^{M_n}
\mathbb{E}_{\mathbb{Q}_{s}^{x}}
\left[ (\varphi r_{s}^{-1})(G_n x) e^{-s T_n^v}
\psi \big( T_{n}^v + Y_n^{x,y} \big) \mathbbm{1}_{\{Y_n^{x,y} \in I_k \}} \right].
\end{align*}
\textit{Upper bound of $J_1$.}
Since the function $u \mapsto e^{-s' u} \psi(u)$ is directly Riemann integrable on $\mathbb{R}$ for some $s' \in (0, s)$,
one can verify that the function $u \mapsto e^{-s u} \psi(u)$ is bounded on $\mathbb{R}$
and hence there exists a constant $C >0$ such that for all $s \in (-s_0, 0]$,
\begin{align*}
e^{-s T_n^v} \psi( T_{n}^v + Y_n^{x,y} ) \leqslant C e^{ s Y_n^{x,y} }.
\end{align*}
Hence, by the H\"{o}lder inequality, Lemma \ref{Lem_Regu_pi_s} and Lemma \ref{Lem_Inte_Regu_a},
we obtain that as $n \to \infty$, uniformly in $s \in (-s_0, 0]$,
\begin{align*}
J_1 & \leqslant C \sqrt{n} \, \left\{ \bb E_{\bb Q_s^x} \left( \frac{1}{ \delta(y, G_n x)^{-2s} } \right)
\bb Q_s^x \Big( \log \delta(y, G_n x) \leqslant -\eta \floor{C_1 \log n} \Big) \right\}^{1/2} \nonumber\\
& \leqslant C \sqrt{n} \, e^{-c_{\eta} \floor{C_1 \log n} } \to 0.
\end{align*}
\textit{Upper bound of $J_2$.}
Following the proof of \eqref{Pf_LDScalA2_2}, one has
\begin{align*}
& J_2 \leqslant (1+ C_{\rho}(\ee))
\sigma_s \sqrt{\frac{n}{2\pi}} \,
\sum_{k =1}^{\infty} \mathbbm{1}_{ \{ k \leqslant M_n \} } e^{-s\eta (k-1)} \nonumber\\
& \qquad \quad \times \int_{\mathbb{R}} e^{-it \eta(k-1)}
R^{n}_{s,it} \big( \varphi_{s,k,\ee_1}^y \big)(x)
\widehat {\Psi}^+_{s, \eta, \ee}(t) \widehat\rho_{\ee^{2}}(t) dt,
\end{align*}
where $\varphi_{s,k,\ee_1}^y$ and $\Psi^+_{s, \eta, \ee}$
are respectively defined by \eqref{Pf_Baradur_LD_varph_11} and \eqref{Def_Psi_aaa}.
Since $|s|$ and $\gamma>0$ are sufficiently small, by elementary calculations we get that
we obtain that
the series $ \frac{\log n}{\sqrt{n}} \sum_{k = 1}^{M_n} e^{-s\eta (k-1)} \| \varphi_{s,k,\ee_1}^y \|_{\gamma}$
converges to $0$ as $n \to \infty$.
Hence, we can apply Proposition \ref{Prop Rn limit1} (2) instead of Proposition \ref{Prop Rn limit1} (1),
and follow the proof of \eqref{ScalarKeyProp 1}, \eqref{ScalarKeyProp 2} and \eqref{ScalarBn abc}
to obtain that uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align} \label{ScalarBn_Low_J2}
\limsup_{n \to \infty} J_2
\leqslant (1+ C_{\rho}(\ee)) \widehat {\Psi}^+_{s, \eta, \ee}(0) \widehat\rho_{\ee^{2}}(0)
\sum_{k =1}^{\infty} e^{-s\eta (k-1)}
\pi_{s} \big( \varphi_{s,k,\ee_1}^y \big).
\end{align}
Then, we can proceed in a similar way as in the proof of
\eqref{LimsuBn a}, \eqref{ScalPosiMainpart}, \eqref{Pf_Upp_A1_ff} and \eqref{sum_0_1_law}.
One of the main differences is that in \eqref{Pf_Upp_A1_ff} we need to
use the H\"{o}lder regularity of the stationary measure $\pi_s$ stated in Lemma \ref{Lem_Regu_pi_s}
to justify the applicability of the Lebesgue dominated convergence theorem,
when we interchange the limit $\ee_1 \to 0$ and the sum over $k$ in \eqref{Pf_Upp_A1_ff}.
Another difference is that in \eqref{sum_0_1_law} it is necessary to
use the zero-one law for the stationary measure $\pi_s$ shown in
Lemma \ref{Lem_0-1_law_s_Neg_Uni} instead of Lemma \ref{Lem_0-1_law_s_Posi_Uni}.
Consequently, one can obtain the desired upper bound of $J_2$
which is similar to \eqref{ScaProLimBn Upper 01}:
uniformly in $s \in K_{\mu}$,
$f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\lim_{\ee \to 0} \lim_{\eta \to 0}
\lim_{\ee_1 \to 0} \limsup_{n \to \infty} J_2
\leqslant \int_{\mathbb{R}} e^{-su} \psi(u) du
\int_{ \bb P^{d-1} } \delta(y, x)^s \varphi(x) r_s^{-1}(x) \pi_s(dx).
\end{align*}
The lower bound of $J_2$ can be carried out in a similar way and hence we omit the details.
\end{proof}
By Theorem \ref{Thm_BRP_Upper} and \ref{Thm-Posi-Neg-sBRP},
we now give a proof of Theorem \ref{Theorem local LD002}
on the local limit theorem with large deviations for coefficients $\langle f, G_n v \rangle$.
\begin{proof}[Proof of Theorem \ref{Theorem local LD002}]
The asymptotic \eqref{LLTLDa} follows from Theorem \ref{Thm_BRP_Upper}
by taking $\varphi = \bf 1$ and $\psi(u) = \bbm{1}_{ \{u \in [a_1, a_2]\} } (u)$, $u \in \bb R$.
In the same way, the asymptotic \eqref{LLTLDb}
is a direct consequence of Theorem \ref{Thm-Posi-Neg-sBRP}.
\end{proof}
\section{Proof of the H\"{o}lder regularity of the stationary measure} \label{Sec:regpositive}
In this section we prove Proposition \ref{PropRegularity}
on the H\"{o}lder regularity of the stationary measure $\pi_s$
for any $s \in I_{\mu}^{\circ}$.
This result is of independent interest and
plays a crucial role for establishing the precise large deviation asymptotics
for the coefficients $\langle f, G_n v \rangle$ under the changed measure $\bb Q_s^x$,
see Theorem \ref{Thm_BRP_Upper}.
The study of the regularity of the stationary measure $\nu$ defined by \eqref{mu station meas},
attracted a great deal of attention,
see e.g. \cite{Aou11, BQ13, BQ16, BQ17, BL85, BFLM11, CPV93, DKW19, Gui90, GR85}.
As far as we know, there are three different approaches to establish
the regularity of $\nu$.
The first one is originally due to Guivarc'h \cite{Gui90},
see also \cite{BL85}.
The approach in \cite{Gui90} consists in investigating the asymptotic behaviors of the components
in the Cartan and Iwasawa decompositions of the random matrix product $M_n = g_1 \ldots g_n$.
The second one is developed in \cite{BFLM11} for the study of the regularity of
the stationary measure on the torus $\bb T^d = \bb R^d / \bb Z^d$,
and has been applied to the setting of products of random matrices in \cite{BQ16, BQ17},
where the large deviation bounds for the Iwasawa cocycle and for the Cartan projection play a crucial role.
The third one, which is recently developed in \cite{DKW19} for the special linear group $SL(2, \mathbb{C})$
consisting of complex $2 \times 2$ matrices with determinant one,
is based on the theory of super-potentials introduced in \cite{DS09}.
All of the results mentioned above are concerned with the regularity of the stationary measure $\nu$.
However, the regularity of the eigenmeasure $\nu_s$ or of the stationary measure $\pi_s$ for $s$ different from $0$
was not known before in the literature.
In order to prove Proposition \ref{PropRegularity},
we first extend some convergence results concerning
the Cartan and Iwasawa decompositions of the matrix product $M_n$
established earlier in \cite{BL85} under the measure $\mathbb P$,
to the framework of the changed measure $\mathbb{Q}_s$.
Similarly to \eqref{Def_Q_s_lll}, for any $s \in I_{\mu}$,
we define the conjugate Markov operator $Q_s^*$
as follows: for any $\varphi \in \mathcal{C}((\bb P^{d-1})^*)$,
\begin{align*}
Q_s^* \varphi (y) = \frac{1}{\kappa(s) r_s^*(y) } P_s^* (\varphi r_s^*) (y), \quad y \in (\bb{P}^{d-1})^*.
\end{align*}
Then $Q_s^*$ has a unique stationary measure $\pi_s^*$ given by
$\pi_s^*(\varphi) = \frac{\nu_s^* (\varphi r_s^*)}{\nu_s^* (r_s^*)}$
for any $\varphi \in \mathcal{C}((\bb P^{d-1})^*)$.
\subsection{Asymptotics for the Cartan decomposition} \label{Sec_Cartan}
Recall that $G_n = g_n \ldots g_1$.
We are going to investigate asymptotic behaviors of the components
of the Cartan decomposition of the transposed matrix product
\begin{align*}
G_n^* = g_1^* g_2^* \ldots g_n^*, \quad n \geqslant 1,
\end{align*}
where $g^*$ is the adjoint automorphism of the matrix $g$.
Let $K = SO(d,\mathbb{R})$ be the orthogonal group,
and $A^+$ be the set of diagonal matrices whose diagonal entries starting from the upper left corner
are strictly positive and decreasing.
With these notation, the well known Cartan decomposition states that $GL(d, \mathbb{R}) = K A^+ K$.
The Cartan decomposition of $G_n^*$ is written as
$G_n^* = k_n a_n k_n'$, where $k_n, k_n' \in K$
and $a_n \in A^+$ with its diagonal elements (singular values)
satisfying $a_n^{1,1} \geqslant a_n^{2,2} \geqslant \ldots \geqslant a_n^{d,d} > 0$.
Note that the diagonal matrix $a_n$ is uniquely determined,
but the orthogonal matrices $k_n$ and $k_n'$ are not unique.
We choose one such decomposition of $G_n^*$.
Denote by $e_1^*, \ldots, e_d^*$ the dual basis of $(\bb R^d)^*$.
The vector $k_n e_1^* \in (\mathbb{P}^{d-1})^*$ is called the \emph{density point} of $G_n^*$.
It plays an important role in the study of products of random matrices: see \cite{BFLM11, BQ17}.
The following result shows that the
density point converges almost surely to the
random variable $Z_s^*$ of the law $\pi_s^*$ under the changed measure
$\mathbb{Q}_s: = \int_{\mathbb{P}^{d-1}} \mathbb{Q}_s^x \pi_s(dx)$.
Note that by definition the measure $\mathbb{Q}_s$ is shift-invariant and ergodic
since $\pi_s$ is the unique stationary measure of the Markov operator $Q_s$.
Recall that $\delta(y,x) = \frac{|\langle f, v \rangle|}{|f||v|}$
for any $y = \bb R f \in (\bb P^{d-1})^*$ and $x = \bb R v \in \bb P^{d-1}$.
\begin{lemma}\label{LemConverThmQs}
Let $s \in I_{\mu}^{\circ}$.
Under condition \ref{Condi-IP}, with the above notation,
we have
\begin{align}\label{ConverThmQs}
\lim_{n \to \infty} \frac{ a_n^{2,2} }{ a_n^{1,1} } = 0, \ \mathbb{Q}_s\mbox{-}a.s.
\quad \mbox{and} \quad
\lim_{n \to \infty} k_n e_1 = Z_s^*, \ \mathbb{Q}_s\mbox{-}a.s.,
\end{align}
and for any $x = \bb R v \in \mathbb{P}^{d-1}$ with $v \in \bb R^d \setminus \{0\}$,
\begin{align} \label{ConverThmQs02}
\lim_{ n \to \infty} \frac{ | G_n v | }{ \| G_n \| |v| } = \delta(Z_s^*, x),
\ \mathbb{Q}_s\mbox{-}a.s.,
\end{align}
where the law of the random variable $Z_s^*$ (on $(\mathbb{P}^{d-1})^*$) is the stationary measure $\pi_s^*$.
Moreover, the assertions \eqref{ConverThmQs} and \eqref{ConverThmQs02} also hold true with the measure $\mathbb{Q}_s$
replaced by $\mathbb{Q}_s^x$, for any starting point $x \in \mathbb{P}^{d-1}$.
\end{lemma}
Before proceeding to proving Lemma \ref{LemConverThmQs},
let us first recall the following two results which were established in \cite{GL16}.
In the sequel,
let $m^*$ be the unique rotation invariant probability measure on the projective space $(\mathbb{P}^{d-1})^*$.
For any matrix $g \in GL(d, \mathbb{R})$, denote by $g^* m^*$ the probability measure on $(\mathbb{P}^{d-1})^*$
such that for any measurable function $\varphi$ on $(\mathbb{P}^{d-1})^*$,
\begin{align*}
\int_{(\mathbb{P}^{d-1})^* } \varphi(y) (g^* m^*) (dy) = \int_{(\mathbb{P}^{d-1})^* } \varphi( g^* y ) m^*(dy).
\end{align*}
\begin{lemma}\label{Lem_DiracMea}
Assume condition \ref{Condi-IP}. Let $s \in I_{\mu}^{\circ}$. Then,
the probability measure $G_n^* m^*$ converges weakly to the Dirac measure $\delta_{Z_s^*}$, $\mathbb{Q}_s$-a.s.,
where the law of the random variable $Z_s^*$ under the measure $\mathbb{Q}_s$ is given by $\pi_s^*$.
\end{lemma}
\begin{proof}
This result has been recently established in \cite[Theorem 3.2]{GL16}.
\end{proof}
The following result is proved in \cite[Lemma 3.5]{GL16}.
\begin{lemma}\label{Lem_AbsoConti}
Assume condition \ref{Condi-IP}. Let $s \in I_{\mu}^{\circ}$. Then,
there exists a constant $c_s>0$ such that for any $x \in \mathbb{P}^{d-1}$,
it holds that $\mathbb{Q}_s^x \leqslant c_s \mathbb{Q}_s$.
\end{lemma}
The assertion of Lemma \ref{Lem_AbsoConti} implies that
the measure $\mathbb{Q}_s^x$ is absolutely continuous with respect to $\mathbb{Q}_s$.
Using Lemmas \ref{Lem_DiracMea} and \ref{Lem_AbsoConti}, we are now in a position to prove Lemma \ref{LemConverThmQs}.
\begin{proof}[Proof of Lemma \ref{LemConverThmQs}]
By the Cartan decomposition of $G_n^*$, we have $G_n^* = k_n a_n k_n'$,
where $k_n, k_n' \in K$ and $a_n \in A^+$.
By Lemma \ref{Lem_DiracMea},
the probability measure $G_n^* m^*$ converges weakly to the Dirac measure $\delta_{Z_s^*}$, $\mathbb{Q}_s$-a.s..
Since $m^*$ is a rotation invariant measure on $(\mathbb{P}^{d-1})^*$,
it follows that $(k_n a_n) m^*$ converges weakly to the random variable $Z_s^*$, $\mathbb{Q}_s$-a.s..
Taking into account that
$a_n$ is a diagonal random matrix with decreasing diagonal entries,
we deduce that, as $n \to \infty$, we have
$a_n m^* \to \delta_{e_1^*}$,
$a_n^{2,2} / a_n^{1,1} \to 0$ and $k_n e_1^* \to Z_s^*$, $\mathbb{Q}_s$-a.s..
This concludes the proof of the assertion \eqref{ConverThmQs}.
To show \eqref{ConverThmQs02}, using again the decomposition $G_n^* = k_n a_n k_n'$,
it follows that for any $x = \bb R v \in \mathbb{P}^{d-1}$,
\begin{align*}
\frac{|G_n v|^2}{|v|^2} = \frac{\langle a_n k_n^* v, a_n k_n^* v \rangle}{|v|^2}
= \sum_{j=1}^d ( a_n^{j,j} )^2 \frac{| \langle k_n^* v, e_j^* \rangle |^2}{|v|^2}
= \sum_{j=1}^d ( a_n^{j,j} )^2 \delta(k_n e_j^*, x)^2.
\end{align*}
This, together with the fact that $\| G_n \| = a_n^{1,1}$, implies \eqref{ConverThmQs02}.
Taking into account Lemma \ref{Lem_AbsoConti}, we see that
the assertions \eqref{ConverThmQs} and \eqref{ConverThmQs02} remain valid with the measure $\mathbb{Q}_s$
replaced by $\mathbb{Q}_s^x$.
\end{proof}
\subsection{Asymptotics for the Iwasawa decomposition} \label{Sec_Iwasawa}
In this subsection we study the asymptotics of the components
in the Iwasawa decomposition of $G_n^*$ under the changed measure $\mathbb{Q}_s^x$.
Denote by $L$ the group of lower triangular matrices with $1$ in the diagonal elements,
by $A$ the group of diagonal matrices with strictly positive entries in the diagonal elements,
and as before by $K$ the group of orthogonal matrices.
The Iwasawa decomposition states that $GL(d, \mathbb{R}) = LAK$
and such decomposition is unique.
Hence, for the product $G_n^*$, there exist unique $L(G_n^*) \in L$,
$A(G_n^*) \in A$ and $K(G_n^*) \in K$ such that $G_n^* = L(G_n^*) A(G_n^*) K(G_n^*)$.
The following result shows that $L(G_n^*) e_1^*$ converges almost surely
under the measures $\mathbb{Q}_s$ and $\mathbb{Q}_s^x.$
\begin{lemma}\label{LemIwasaLim}
Let $s \in I_{\mu}^{\circ}$.
Under condition \ref{Condi-IP}, for any $x \in \mathbb{P}^{d-1}$,
\begin{align*}
\lim_{n \to \infty} L(G_n^*) e_1^*
= \frac{ Z_s^* }{ \langle Z_s^*, e_1 \rangle},
\quad \mathbb{Q}_s\mbox{-a.s.} \ \mbox{and} \quad \mathbb{Q}_s^x\mbox{-a.s.}.
\end{align*}
where $Z_s^*$ is a random variable given by Lemma \ref{LemConverThmQs}.
\end{lemma}
\begin{proof}
In view of Lemma \ref{Lem_AbsoConti}, it suffices to prove the assertion under the measure $\mathbb{Q}_s$.
Using the Iwasawa decomposition $G_n^* = L(G_n^*) A(G_n^*) K(G_n^*)$
and noticing that $K(G_n^*)$ is an orthogonal matrix, it follows that
\begin{align}\label{Pf_Reg_Eq1}
\frac{G_n^* G_n e_1^* }{ | G_n e_1^* |^2 }
= \frac{ L(G_n^*) A(G_n^*)^2 L(G_n^*)^* e_1^* }
{ | A(G_n^*) L(G_n^*)^* e_1^* |^2}
= L(G_n^*) e_1^*,
\end{align}
where the second equality holds due to the fact that
$A(G_n^*)^2 L(G_n^*)^* e_1^* = | A(G_n^*) L(G_n^*) e_1^*|^2 e_1^*$.
By the Cartan decomposition of $G_n^*$ we have $G_n^* = k_n a_n k_n'$,
where $k_n, k_n'$ are two orthogonal matrices.
Hence,
for any $v \in (\mathbb{R}^d)^*$,
\begin{align}\label{Pf_Reg_Equaa}
\langle G_n^* G_n e_1^*, v \rangle
& = \langle (a_n)^2 k_n^* e_1^*, k_n^* v \rangle \nonumber\\
& = ( a_n^{1,1} )^2 \langle k_n^* e_1^*, e_1 \rangle \langle e_1^*, k_n^* v \rangle
+ O( a_n^{1,1} a_n^{2,2} ) \nonumber\\
& = ( a_n^{1,1} )^2 \langle k_n^* e_1^*, e_1 \rangle \langle k_n e_1^*, v \rangle + O( a_n^{1,1} a_n^{2,2} ).
\end{align}
Consequently, by \eqref{Pf_Reg_Eq1} and \eqref{Pf_Reg_Equaa} we obtain that $\mathbb{Q}_s$-a.s.,
\begin{align*}
\lim_{n \to \infty} \langle L(G_n^*) e_1^*, v \rangle
= \lim_{n \to \infty} \frac{\langle G_n^* G_n e_1^*, v \rangle}{\langle G_n^* G_n e_1^*, e_1^* \rangle}
= \lim_{n \to \infty} \frac{\langle k_n e_1^*, v \rangle}{\langle k_n e_1^*, e_1^* \rangle}
= \frac{\langle Z_s^*, v \rangle}{\langle Z_s^*, e_1^* \rangle},
\end{align*}
where in the first equality we used \eqref{Pf_Reg_Eq1}, in the second one we used \eqref{Pf_Reg_Equaa}
and Lemma \ref{LemConverThmQs},
and in the last one we applied again Lemma \ref{LemConverThmQs}.
Since $v \in (\mathbb{R}^d)^*$ is arbitrary, the proof of Lemma \ref{LemIwasaLim} is complete.
\end{proof}
For any $1 \leqslant k \leqslant d$,
we briefly recall the notion of exterior algebra $\wedge^k (\mathbb{R}^d)$ of the vector space $\mathbb{R}^d$.
The space $\wedge^k (\mathbb{R}^d)$ is endowed with the dual bracket $\langle \cdot, \cdot \rangle$
and the norm $| \cdot |$; we use the same notation as in $\mathbb{R}^d$
and the distinction should be clear from the context.
The scalar product in $\wedge^k (\mathbb{R}^d)$ satisfies the following property:
for any $u_i$, $v_j \in \mathbb{R}^d$, $1 \leqslant i, j \leqslant d$,
\begin{align*}
\langle u_1 \wedge \cdots \wedge u_k, v_1 \wedge \cdots \wedge v_k \rangle
= \det( \langle u_i, v_j \rangle )_{1 \leqslant i, j \leqslant d},
\end{align*}
where $\det( \langle u_i, v_j \rangle )_{1 \leqslant i, j \leqslant d}$ denotes the determinant of the associated matrix.
It is well known that
$\{ e_{i_1} \wedge e_{i_2} \wedge \cdots \wedge e_{i_k}, 1 \leqslant i_1 < i_2 < \cdots < i_k \leqslant d \}$
forms a basis of $\wedge^k (\mathbb{R}^d)$, $1 \leqslant k \leqslant d$,
and that $v_1 \wedge \cdots \wedge v_k$ is nonzero if and only if $v_1, \ldots, v_k$
are linearly independent in $\mathbb{R}^d$.
For any $g \in GL(d, \mathbb{R})$ and $1 \leqslant k \leqslant d$,
the exterior product $\wedge^k g$ of the matrix $g$
is defined as follows: for any $v_1, \ldots, v_k \in \mathbb{R}^d$,
\begin{align*}
\wedge^k g ( v_1 \wedge \cdots \wedge v_k ) = g v_1 \wedge \cdots \wedge g v_k.
\end{align*}
Set $\| \wedge^k g \| = \sup \{ | (\wedge^k g) v |: v \in \wedge^k (\mathbb{R}^d), |v| =1 \}$.
Since $\wedge^k (g g') = (\wedge^k g) ( \wedge^k g' )$,
it holds that $\| \wedge^k (g g') \| \leqslant \| \wedge^k g \| \| \wedge^k g' \|$
for any $g, g' \in GL(d, \mathbb{R})$.
Besides, if we denote by $a_{11}, \ldots, a_{dd}$ the singular values of the matrix $g$,
then $\| \wedge^k g \| = a_{11} \ldots a_{kk}$.
In particular, we have $\| \wedge^k g \| \leqslant \| g \|^k$.
The following lemma
was proved in \cite{BL85}.
For any $g \in GL(d,\mathbb{R})$, by the Iwasawa decomposition we have
$g = L(g) A(g) K(g)$, where $L(g) \in L$, $A(g) \in A$ and $K(g) \in K$.
In the sequel, we denote $N(g) = \max \{ \| g \|, \| g^{-1}\| \}$.
\begin{lemma}\label{LemCruciIne}
For any integers $n, m \geqslant 0$, we have
\begin{align*}
\left| L(G_{n+m}^*) e_1^* - L(G_{n}^*) e_1^* \right|
\leqslant \sum_{j=n}^{n+m-1} \frac{\|\wedge^2 G_j \|}{ | G_j e_1 |^2 } e^{2 \log N(g_{j+1}^* )},
\end{align*}
where we use the convention that $L(G_0) = 0$
and $\frac{\|\wedge^2 G_0 \|}{ | G_0 e_1 |^2 } = 0$.
\end{lemma}
The following result shows the simplicity of the dominant Lyapunov exponent
for $G_n$ under the changed measure $\mathbb{Q}_s^x$.
\begin{lemma}\label{Lem_Lya_Meas}
Assume condition \ref{Condi-IP}.
Let $s \in I_{\mu}^{\circ}$.
Then, uniformly in $x = \bb R v \in \mathbb{P}^{d-1}$,
\begin{align}\label{LLNMeas01}
\lim_{n \to \infty} \frac{1}{n} \mathbb{E}_{\mathbb{Q}_s^x} (\sigma(G_n, x)) = \lambda_1(s),
\end{align}
and
\begin{align}\label{LLNMeas02}
\lim_{n \to \infty} \frac{1}{n} \mathbb{E}_{ \mathbb{Q}_s^x }(\log \| \wedge^2 G_n \|)
= \lambda_1(s) + \lambda_2(s),
\end{align}
where $\lambda_1(s) > \lambda_2(s)$ are called the first two Lyapunov exponents of $G_n$ under
the measure $\mathbb{Q}_s^x$.
\end{lemma}
The assertion \eqref{LLNMeas01} is proved in \cite[Theorem 3.10]{GL16}.
The assertion \eqref{LLNMeas02} follows by combining Theorems 3.10 and 3.17 in \cite{GL16}.
The fact that $\lambda_1(s) > \lambda_2(s)$ will play an essential role
in the proof of the H\"older regularity of the stationary measure $\pi_s$, see Proposition \ref{PropRegularity}.
Using the simplicity of the Lyapunov exponent (see Lemma \ref{Lem_Lya_Meas})
we can complement the convergence result
in Lemma \ref{LemIwasaLim}
by giving the rate of convergence.
This result is not used in the proofs, but is of independent interest.
\begin{proposition}\label{Prop_L1Conver}
Assume condition \ref{Condi-IP}. Let $s \in I_{\mu}^{\circ}$.
Then, there exist constants $\alpha, C > 0$ such that
uniformly in $x \in \mathbb{P}^{d-1}$ and $n \geqslant 1$,
\begin{align}\label{Ine_L1Conv}
\mathbb{E}_{ \mathbb{Q}_s^x }
\left| L(G_n^*) e_1^* - \frac{ Z_s^* }{ \langle Z_s^*, e_1^* \rangle} \right|^{\alpha} \leqslant e^{ -Cn }.
\end{align}
Moreover, the assertion \eqref{Ine_L1Conv} remains valid when the measure $\mathbb{Q}_s^x$ is replaced by $\mathbb{Q}_s$.
\end{proposition}
The proof of Proposition \ref{Prop_L1Conver} is postponed to subsection \ref{Sec_Pf_Reg}.
By Jensen's inequality, the bound \eqref{Ine_L1Conv} implies that
there exists a constant $C>0$ such that uniformly in $x \in \mathbb{P}^{d-1}$,
\begin{align*}
\limsup_{ n \to \infty } \frac{1}{n} \mathbb{E}_{ \mathbb{Q}_s^x }
\log \left| L(G_n^*) e_1^* - \frac{ Z_s^* }{ \langle Z_s^*, e_1^* \rangle} \right| \leqslant -C.
\end{align*}
When $s = 0$, it was proved in \cite{BL85} that $C = \lambda_1(0) - \lambda_2(0)$.
We conjecture that $C = \lambda_1(s) - \lambda_2(s)$ also for $s>0$, but the proof eluded us.
\subsection{Proof of Propositions \ref{PropRegularity} and \ref{Prop_L1Conver}}\label{Sec_Pf_Reg}
With the results established in subsections \ref{Sec_Cartan} and \ref{Sec_Iwasawa},
we are well equipped to prove Propositions \ref{PropRegularity} and \ref{Prop_L1Conver}.
\begin{proof}[\textit{Proof of Proposition \ref{PropRegularity}}]
Since $r_s$ is bounded away from infinity and $0$ uniformly on $\mathbb{P}^{d-1}$,
it suffices to establish \eqref{RegularityIne00} and \eqref{RegularityIne} for the stationary measure $\pi_s$.
Define the function $\rho: GL(d, \mathbb{R}) \times \mathbb{P}^{d-1} \to \mathbb{R}$ as follows:
for $g \in GL(d, \mathbb{R})$ and $x \in \mathbb{P}^{d-1}$,
\begin{align*}
\rho(g, x) = \log \| \wedge^2 g \| - 2 \log | gx |.
\end{align*}
It is clear that
\begin{align*}
\mathbb{E}_{ \mathbb{Q}_s^x } \rho(G_n, x)
= \mathbb{E}_{ \mathbb{Q}_s^x } \big( \log \| \wedge^2 G_n \| \big)
- 2 \mathbb{E}_{ \mathbb{Q}_s^x } \big( \log |G_n x| \big).
\end{align*}
By Lemma \ref{Lem_Lya_Meas}, we see that
\begin{align*}
\lim_{ n \to \infty }
\frac{1}{n} \sup_{x \in \mathbb{P}^{d-1} }
\mathbb{E}_{ \mathbb{Q}_s^x } \rho(G_n, x ) <0,
\end{align*}
which clearly implies that, for large enough $n$,
\begin{align}\label{Pf-RegBoundNeg}
\sup_{x \in \mathbb{P}^{d-1} }
\mathbb{E}_{ \mathbb{Q}_s^x } \rho(G_n, x ) <0.
\end{align}
We claim that there exists a constant $\alpha > 0$ such that
\begin{align}\label{Pf-RegIne-b}
\limsup_{n \to \infty} \frac{1}{n} \log
\sup_{x \in \mathbb{P}^{d-1} }
\mathbb{E}_{ \mathbb{Q}_s^x }
\frac{ \| \wedge^2 G_n \|^{\alpha} }{|G_n x |^{2 \alpha} }
< 0.
\end{align}
To prove \eqref{Pf-RegIne-b}, we denote
$a_n = \log \big( \sup_{x \in \mathbb{P}^{d-1}}
\mathbb{E}_{ \mathbb{Q}_s^x } \big( e^{ \alpha \rho(G_n, x) } \big) \big)$,
for sufficiently small constant $\alpha >0$.
Using the cocycle property \eqref{cocycle01} and the fact that $\rho$ is subadditive,
we get that for any $n, m \geqslant 1$,
\begin{align*}
& \mathbb{E}_{ \mathbb{Q}_s^x } \big( e^{ \alpha \rho( G_{n+m}, x ) } \big) \nonumber\\
& \leqslant \mathbb{E} \Big( q_m^s(x, G_m) e^{ \alpha \rho( G_m, x ) } \Big)
\mathbb{E} \Big( q_n^s(x, g_{m+1} \ldots g_{m+n} )
e^{ \alpha \rho( g_{m+n} \ldots g_{m+1}, x ) } \Big) \nonumber\\
& = \mathbb{E}_{ \mathbb{Q}_s^x } \big( e^{ \alpha \rho( G_m, x ) } \big)
\mathbb{E}_{ \mathbb{Q}_s^x } \big( e^{ \alpha \rho( G_n, x ) } \big).
\end{align*}
Taking supremum on both sides of the above inequality,
we see that the sequence $(a_n)_{n\geqslant 1}$ satisfies the subadditive property: $a_{n+m} \leqslant a_m + a_n$.
Hence we get $a = \lim_{n \to \infty} \frac{a_n}{n} = \inf_{n\geqslant 1} \frac{a_n}{n}.$
To show that $a<0$, it suffices to check that there exists some integer $p \geqslant 1$ such that
\begin{align}\label{Pf-RegBound01}
\sup_{x \in \mathbb{P}^{d-1} }
\mathbb{E}_{ \mathbb{Q}_s^x } \big( e^{ \alpha \rho( G_{p}, x ) } \big) <1.
\end{align}
We proceed to verify \eqref{Pf-RegBound01}.
Using the fact
that $\sup_{x} |\rho(g, x)| \leqslant 4 \log N(g)$
and the basic inequality $e^y \leqslant 1 + y + \frac{y^2}{2} e^{|y|}$, $y \in \mathbb{R}$,
we obtain
\begin{align}\label{Pf_Regu_Inver_aa}
\mathbb{E}_{ \mathbb{Q}_s^x } \big( e^{ \alpha \rho( G_{p}, x ) } \big)
\leqslant 1 + \alpha \mathbb{E}_{ \mathbb{Q}_s^x } \big( \rho( G_{p}, x ) \big)
+ \frac{ {\alpha}^2}{2} \mathbb{E}_{ \mathbb{Q}_s^x }
\Big( 16 \log^2 N( G_{p} ) e^{ 4 \alpha \log N( G_{p} ) } \Big).
\end{align}
The second term on the right-hand side of \eqref{Pf_Regu_Inver_aa}
is strictly negative by using the bound \eqref{Pf-RegBoundNeg} and taking large enough $p$.
The third term is finite due to the moment condition \ref{Condi-TwoExp}.
Consequently, taking $\alpha > 0$ small enough, we obtain the inequality \eqref{Pf-RegBound01}
and thus the desired assertion \eqref{Pf-RegIne-b} follows.
Since the bound \eqref{Pf-RegIne-b} holds uniformly in $x \in \mathbb{P}^{d-1}$,
taking into account that $\mathbb{Q}_s = \int_{\mathbb{P}^{d-1}} \mathbb{Q}_s^x \pi_s(dx)$,
it follows that there exist constants $C>0$ and $0< r < 1$ such that
\begin{align}\label{RegIneq001}
\mathbb{E}_{\mathbb{Q}_s} \frac{ \|\wedge^2 G_n \|^{\alpha} }{|G_n x|^{2 \alpha} } \leqslant C r^n.
\end{align}
Using Lemma \ref{LemIwasaLim}, Fatou's lemma and the fact that $|Z_s^*| = 1$,
we obtain that for sufficiently small constant $\alpha >0$,
\begin{align}\label{Pf-Regu-Inv01}
\mathbb{E}_{\mathbb{Q}_s} \frac{ 1 }{ | \langle Z_s^*, e_1^* \rangle |^{\alpha} }
\leqslant \liminf_{n \to \infty}
\mathbb{E}_{\mathbb{Q}_s} \big( | L(G_n^*) e_1^* |^{\alpha} \big).
\end{align}
From Lemma \ref{LemCruciIne} with $n=0$, it follows that
\begin{align*}
| L(G_n^*) e_1^* |^{\alpha}
\leqslant \sum_{ j=1 }^{\infty} \frac{ \|\wedge^2 G_j \|^{c} }{ | G_j e_1|^{2 \alpha} } e^{2 \alpha \log N(g_{j+1}^* )}.
\end{align*}
Notice that $G_j$ and $g_{j+1}^*$ are not independent under the measure $\mathbb{Q}_s$.
Using Fubini's theorem, H\"{o}lder's inequality and the bound \eqref{RegIneq001}, we get
\begin{align*}
\mathbb{E}_{\mathbb{Q}_s} \big( | L(G_n^*) e_1^* |^{\alpha} \big)
& \leqslant \sum_{ j=1 }^{\infty}
\left[ \mathbb{E}_{\mathbb{Q}_s} \frac{\|\wedge^2 G_j \|^{2 \alpha} }{ | G_j e_1|^{4 \alpha} } \right]^{1/2}
\left[ \mathbb{E}_{\mathbb{Q}_s} e^{4 \alpha \log N(g_{j+1}^* )} \right]^{1/2} \nonumber\\
& \leqslant C \mathbb{E}_{\mathbb{Q}_s} ( e^{4 \alpha \log N(g_{1}^* )} ) \sum_{j=1}^{\infty} r^j < + \infty.
\end{align*}
Combining this with \eqref{Pf-Regu-Inv01} leads to
$\mathbb{E}_{\mathbb{Q}_s} \frac{ 1 }{ | \langle Z_s^*, e_1^* \rangle |^{\alpha} } < + \infty.$
Note that for any $y \in (\mathbb{P}^{d-1})^*$, we can choose an orthogonal matrix $k$ such that
$k e_1^* = y$. If we replace $g_i^*$ by $k^{-1} g_i^* k$, then it is easy to see that
$G_n^*$ is replaced by $k^{-1} G_n^* k$.
Moreover, in view of Lemma \ref{Lem_DiracMea}, the random variable $Z_s^*$ is replaced by $k^{-1} Z_s^*$.
Since the bound \eqref{RegIneq001} holds uniformly in $x \in \mathbb{P}^{d-1}$, it follows that
\begin{align*}
\mathbb{E}_{\mathbb{Q}_s} \frac{ 1 }{ | \langle k^{-1} Z_s^*, e_1^* \rangle |^{\alpha} }
\leqslant C \mathbb{E}_{\mathbb{Q}_s} \big( e^{4 \alpha \log N( k^{-1} g_{1}^* k )} \big)
\sum_{j=1}^{\infty} r^j < + \infty.
\end{align*}
Observe that
$N( k^{-1} g_{1}^* k ) = N( g_{1}^*)$
and $\langle k^{-1} Z_s^*, e_1 \rangle = \langle Z_s^*, y \rangle$.
Therefore, for any $s \in I_{\mu}^{\circ}$, there exists a constant $\alpha > 0$ such that
\begin{align*}
\sup_{x \in \mathbb P^{d-1}} \int_{(\mathbb P^{d-1})^* } \frac{1}{ \delta(y, x)^{\alpha} } \pi_s^*(dy)
= \sup_{y \in (\mathbb P^{d-1})^* } \mathbb{E}_{\mathbb{Q}_s} \frac{ 1 }{ | \langle Z_s^*, y \rangle |^{\alpha} }
< + \infty.
\end{align*}
This implies that there exists a constant $C>0$ such that for any $0< t < 1$,
uniformly in $x \in \mathbb P^{d-1}$,
\begin{align*}
\pi_s^* \left( \left\{ y \in (\bb P^{d-1})^*: \delta(y, x) \leqslant t \right\} \right)
\leqslant t^{\alpha} \int_{ (\mathbb P^{d-1})^* } \frac{1}{ \delta(y, x)^{\alpha} } \pi_s^*(dy)
\leqslant C t^{\alpha}.
\end{align*}
The proof of Proposition \ref{PropRegularity} is complete.
\end{proof}
\begin{proof}[Proof of Proposition \ref{Prop_L1Conver}]
In view of Lemma \ref{Lem_AbsoConti}, it suffices to prove the assertion of the proposition with $\mathbb{Q}_s$ instead of $\mathbb{Q}_s^x$,
i.e.\ we show that there exist constants $\alpha, C>0$ such that for all $n \geqslant 1$,
\begin{align}\label{Ine_L1Conv02}
\mathbb{E}_{ \mathbb{Q}_s }
\left| L(G_n^*) e_1 - \frac{ Z_s^* }{ \langle Z_s^*, e_1 \rangle} \right|^{\alpha} < e^{ -Cn }.
\end{align}
Using Lemma \ref{LemCruciIne} and H\"{o}lder's inequality,
for sufficiently small constant $\alpha >0$ and for any $n, m \geqslant 1$, we get
\begin{align*}
& \mathbb{E}_{ \mathbb{Q}_s } \big| L(G_{n+m}^*) e_1^* - L(G_n^*) e_1^* \big|^{\alpha} \nonumber \\
& \leqslant \sum_{j=n}^{ n+m-1 }
\left[ \mathbb{E}_{\mathbb{Q}_s} \frac{\|\wedge^2 G_j \|^{2 \alpha} }{ | G_j e_1|^{4 \alpha} } \right]^{1/2}
\left[ \mathbb{E}_{\mathbb{Q}_s} e^{4 \alpha \log N(g_{j+1}^* )} \right]^{1/2} \nonumber\\
& \leqslant \ C \sum_{j=n}^{ n+m-1 }
\left[ \mathbb{E}_{\mathbb{Q}_s} \frac{\|\wedge^2 G_j \|^{2 \alpha} }{ | G_j e_1|^{4 \alpha} } \right]^{1/2},
\end{align*}
where the last inequality holds due to the moment condition \ref{Condi-TwoExp}.
By the Fatou lemma,
taking the limit as $m\to\infty$, we see that
\begin{align*}
\mathbb{E}_{ \mathbb{Q}_s }
\left| L(G_n^*) e_1^* - \frac{ Z_s^* }{ \langle Z_s^*, e_1^* \rangle} \right|^{\alpha}
\leqslant C \sum_{j=n}^{\infty}
\left[ \mathbb{E}_{\mathbb{Q}_s} \frac{\|\wedge^2 G_j \|^{2 \alpha} }
{ | G_j e_1|^{4 \alpha} } \right]^{1/2}
\leqslant C e^{ - Cn},
\end{align*}
where the last inequality holds due to the bound \eqref{RegIneq001}.
\end{proof}
\subsection{Proofs of Propositions \ref{Prop_Regu_Strong01}, \ref{Prop_Regu_Strong02}, \ref{LLN_CLT_Entry} and Theorem \ref{Thm_Coeff_BRLD_changedMea02}}
We first establish Propositions \ref{Prop_Regu_Strong01} and \ref{Prop_Regu_Strong02}
based on Propositions \ref{PropRegularity} and \ref{PropRegu02}, respectively,
together with the fact that, under the changed measure $\bb Q_s^x$,
the Markov chain $(X_n^x)_{n \geqslant 0}$ converges exponentially fast to the stationary measure $\pi_s$.
\begin{proof}[Proof of Propositions \ref{Prop_Regu_Strong01} and \ref{Prop_Regu_Strong02}]
For any $1 \leqslant k \leqslant n$ and $\ee >0$,
denote $\chi_k(u) := \mathbbm{1}_{\{u \in (-\infty, -\ee k] \}}$
and $\chi_{k, \ee_1}^+(u) = \sup_{u' \in \mathbb{B}_{\ee_1}(u)} \chi_k(u')$ for $\ee_1 > 0$.
In the same way as in \eqref{Pf_LD_SmoothIneHolder01},
we have the following smoothing inequality:
\begin{align} \label{Pf_Regu_Smooth_dd}
\chi_k(u) \leqslant
(\chi_{k, \ee_1}^+ * \bar{\rho}_{\ee_1})(u)
= : \tilde\chi_k(u), \quad u \in \mathbb{R},
\end{align}
where $\bar{\rho}_{\ee_1}$ is the density function given in \eqref{Pf_LD_SmoothIneHolder01}.
For brevity, we denote
\begin{align} \label{Pf_regu_varphi_dd}
\varphi_{k,\ee_1}^y(x) = \tilde\chi_k(\log \delta(y, x)), \quad x \in \bb{P}^{d-1}.
\end{align}
By \eqref{Pf_Regu_Smooth_dd} and \eqref{Pf_regu_varphi_dd}, it follows that
\begin{align*}
\bb Q_s^x \Big( \delta (y, G_n x) \leqslant e^{- \ee k} \Big)
& \leqslant \bb E_{\bb Q_s^x} \big[ \varphi_{k,\ee_1}^y (G_n x) \big] \nonumber\\
& \leqslant \left| \bb E_{\bb Q_s^x} \big[ \varphi_{k,\ee_1}^y (G_n x) \big] - \pi_s(\varphi_{k,\ee_1}^y) \right|
+ \pi_s(\varphi_{k,\ee_1}^y).
\end{align*}
For the first term, note first that
$\| \varphi_{k,\ee_1}^y \|_{\gamma} \leqslant \frac{ e^{ \ee \gamma k} }{ ( 1 - e^{-2\ee_1} )^{\gamma} }.$
Using \eqref{equcontin Q s limit} and taking $\gamma >0$ sufficiently small,
we get that for any $1 \leqslant k \leqslant n$,
\begin{align}\label{Pf_Regu_I1_f}
\left| \bb E_{\bb Q_s^x} \big[ \varphi_{k,\ee_1}^y (G_n x) \big] - \pi_s(\varphi_{k,\ee_1}^y) \right|
\leqslant C e^{-cn} \| \varphi_{k,\ee_1}^y \|_{\gamma} \leqslant C e^{-cn /2}.
\end{align}
For the second term, using the fact that
$\tilde\chi_k(u) \leqslant \chi_{k, 2\ee_1}^+(u) = \mathbbm{1}_{\{u \in (-\infty, -\ee k + 2 \ee_1] \}}$,
and applying Propositions \ref{PropRegularity} and \ref{PropRegu02}
(respectively for $s \in I_{\mu}^{\circ}$ and $s \in (-s_0, 0)$),
we obtain that there exist constants $c, C >0$ such that
\begin{align}\label{Pf_Regu_I2_f}
\pi_s(\varphi_{k,\ee_1}^y)
\leqslant \pi_s \left( x \in \bb P^{d-1}: \delta(y,x) \in [0, e^{- \ee k + 2 \ee_1}] \right)
\leqslant C e^{-ck}.
\end{align}
Putting together \eqref{Pf_Regu_I1_f} and \eqref{Pf_Regu_I2_f},
we conclude the proof of Propositions \ref{Prop_Regu_Strong01} and \ref{Prop_Regu_Strong02}.
\end{proof}
Using Propositions \ref{Prop_Regu_Strong01} and \ref{Prop_Regu_Strong02}, we are now in a position to
establish Proposition \ref{LLN_CLT_Entry}
on the SLLN and the CLT for the coefficients $\langle f, G_n v \rangle$
under the measure $\mathbb{Q}_s^x$.
\begin{proof}[Proof of Proposition \ref{LLN_CLT_Entry}]
(1) We first prove \eqref{SLLN_Entry}.
By Proposition \ref{Prop_Regu_Strong01} and Borel-Cantelli's lemma,
we get that for any $\varepsilon > 0$ and $s \in I_{\mu}^{\circ}$,
uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\liminf_{ n \to \infty } \frac{1}{n} \log \frac{ | \langle f, G_n v \rangle | }{ | G_n v| } \geqslant - \varepsilon,
\quad \mathbb{Q}_s^x \mbox{-a.s..}
\end{align*}
Since $\varepsilon > 0$ can be arbitrary small, this together with \eqref{SLLN_Gnx} implies the desired lower bound:
uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\liminf_{ n \to \infty } \frac{1}{n} \log | \langle f, G_n v \rangle | \geqslant \Lambda'(s),
\quad \mathbb{Q}_s^x \mbox{-a.s..}
\end{align*}
The upper bound follows easily from \eqref{SLLN_Gnx} and the fact that
$\log | \langle f, G_n v \rangle | \leqslant \log | G_n v|$.
Hence \eqref{SLLN_Entry} holds.
We next prove \eqref{CLT_Entry}.
Using Proposition \ref{Prop_Regu_Strong01} with $k = \sqrt{n}$,
we get the following convergence in probability: for any $\varepsilon>0$,
uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
\lim_{ n \to \infty } \mathbb{Q}_s^x
\left( \frac{ \log |G_n v| - \log | \langle f, G_n v \rangle | }{ \sigma_s \sqrt{n} } \geqslant \varepsilon \right)
= 0.
\end{align*}
This yields \eqref{CLT_Entry} using \eqref{CLT_Cocycle01} together with Slutsky's lemma.
(2) The proof of part (2) can be carried out in an analogous way using Proposition \ref{Prop_Regu_Strong02},
the SLLN and the CLT for the norm cocycle $\log |G_n v|$
under the changed measure $\mathbb{Q}_s^x$ when $s<0$ established in \cite{XGL19b}.
\end{proof}
Using Propositions \ref{Prop_Regu_Strong01} and \ref{Prop_Regu_Strong02},
we are able to prove Theorem \ref{Thm_Coeff_BRLD_changedMea02}.
\begin{proof}[Proof of Theorem \ref{Thm_Coeff_BRLD_changedMea02}]
In a similar way as in the proof of \eqref{Pf_BRP_Changed_ff},
one can verify that for any $s < t$ with $s \in (-s_0, 0] \cup I^{\circ}_{\mu}$
and $t \in K_{\mu} \subset (-s_0, s)$,
\begin{align*}
& \mathbb{E}_{\bb Q_s^x}
\Big[ \varphi(G_n x) \psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \Big]
= \frac{ \kappa^n(t) r_t(x) }{\kappa^n(s) r_s(x)} e^{ (s-t) n q_t} \times \nonumber\\
& \quad \mathbb{E}_{\bb Q_t^x}
\left[ (\varphi r_s r_t^{-1})(G_n x) e^{(s - t) (\log |G_n v| - n q_t)}
\psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \right].
\end{align*}
Recalling that $\Lambda^*(q_s) = sq_s - \Lambda(s)$, $\Lambda^*(q_t) = tq_t - \Lambda(t)$,
$\Lambda(s) = \log \kappa(s)$ and $\Lambda(t) = \log \kappa(t)$,
we have
\begin{align*}
\frac{ \kappa^n(t) }{\kappa^n(s)} e^{ (s-t) n q_t}
= \exp \{ -n(\Lambda^*(q_t) - \Lambda^*(q_s) - s(q_t -q_s)) \}.
\end{align*}
Hence, to prove Theorem \ref{Thm_Coeff_BRLD_changedMea02}, we are led to handle
\begin{align*}
J : = \sigma_t \sqrt{2\pi n}
\mathbb{E}_{\bb Q_t^x}
\left[ (\varphi r_s r_t^{-1})(G_n x)
e^{(s - t) (\log |G_n v| - n q_t)}
\psi \big( \log |\langle f, G_n v \rangle| - nq_t \big) \right].
\end{align*}
For simplicity, denote
\begin{align*}
T_n^v: = \log |G_n v| - n q_t, \qquad Y_n^{x,y}: = \log \delta(y, G_n x).
\end{align*}
For any fixed small constant $0< \eta <1$, set $I_k: = (-\eta k, -\eta(k-1)]$, $k \geqslant 1$.
Take a sufficiently large constant $C_1 > 0$ and let $M_n:= \floor{ C_1 \log n }$.
Then,
\begin{align}\label{Pf_LD_Low_decom_f}
J = J_1 + J_2,
\end{align}
where
\begin{align*}
& J_1 : = \sigma_t \sqrt{2\pi n} \mathbb{E}_{ \bb Q_t^x }
\left[ (\varphi r_s r_t^{-1})(G_n x) e^{(s-t) T_n^v}
\psi \big( T_{n}^v + Y_n^{x,y} \big) \mathbbm{1}_{\{Y_n^{x, y} \leqslant -\eta M_n \}} \right],
\nonumber\\
& J_2 : = \sigma_t \sqrt{2\pi n} \sum_{k =1}^{M_n}
\mathbb{E}_{ \bb Q_t^x }
\left[ (\varphi r_s r_t^{-1})(G_n x) e^{(s-t) T_n^v}
\psi \big( T_{n}^v + Y_n^{x,y} \big) \mathbbm{1}_{\{Y_n^{x,y} \in I_k \}} \right].
\end{align*}
For $J_1$,
since the function $u \mapsto e^{-s' u} \psi(u)$ is directly Riemann integrable on $\mathbb{R}$
for any $s' \in K_{\mu}^{\epsilon}$,
we see that the function $u \mapsto e^{(s-t) u} \psi(u)$ is bounded on $\mathbb{R}$
and so there exists a constant $C >0$ such that for all $s \in (-s_0, 0] \cup I^{\circ}_{\mu}$,
\begin{align*}
e^{(s-t) T_n^v} \psi( T_{n}^v + Y_n^{x,y} ) \leqslant C e^{ (t-s) Y_n^{x,y} }.
\end{align*}
Hence, using Propositions \ref{Prop_Regu_Strong01} and \ref{Prop_Regu_Strong02},
we get that as $n \to \infty$,
\begin{align*}
J_1 \leqslant C \sqrt{n}
\bb Q_t^x \Big( \log \delta(y, G_n x) \leqslant -\eta \floor{C_1 \log n} \Big)
\leqslant C \sqrt{n} \, e^{-c_{\eta} \floor{C_1 \log n} } \to 0.
\end{align*}
For $J_2$, one can follow the proof of Theorem \ref{Thm-Posi-Neg-sBRP} to obtain
that as $n \to \infty$,
uniformly in $f \in (\bb R^d)^*$ and $v \in \bb R^d$ with $|f| = |v| = 1$,
\begin{align*}
J_2 = \int_{ \bb P^{d-1} } \varphi(x) \delta(y, x)^t \, \frac{r_s(x)}{r_t(x)} \pi_t(dx)
\int_{\mathbb{R}} e^{- (t-s) u} \psi(u) du + o(1).
\end{align*}
This ends the proof of Theorem \ref{Thm_Coeff_BRLD_changedMea02}.
\end{proof}
\end{document}
|
\begin{document}
\title{Single-shot high-resolution identification of discrete frequency modes of single-photon-level optical pulses}
\author{Daisuke Yoshida}
\email{[email protected]}
\affiliation{
Yokohama National University, 79-5 Tokiwadai, Hodogaya, Yokohama 240-8501, Japan
}
\affiliation{LQUOM Inc., 79-5 Tokiwadai, Hodogaya, Yokohama 240-8501, Japan}
\author{Mayuka Ichihara}
\affiliation{
Yokohama National University, 79-5 Tokiwadai, Hodogaya, Yokohama 240-8501, Japan
}
\author{Takeshi Kondo}
\affiliation{
Yokohama National University, 79-5 Tokiwadai, Hodogaya, Yokohama 240-8501, Japan
}
\author{Feng-Lei Hong}
\affiliation{
Yokohama National University, 79-5 Tokiwadai, Hodogaya, Yokohama 240-8501, Japan
}
\author{Tomoyuki Horikiri}
\affiliation{
Yokohama National University, 79-5 Tokiwadai, Hodogaya, Yokohama 240-8501, Japan
}
\Erase{
\affiliation{
JST, PRESTO, 4-1-8 Honcho, Kawaguchi, Saitama, 332-0012, Japan
}
}
\date{\today}
\begin{abstract}
Frequency-multiplexed quantum communication usually requires a single-shot identification of the frequency mode of a single photon \Erase{unless a non-destructive measurement of the photon is possible}.
In this paper, we propose a scheme that can identify the frequency mode with high-resolution even for spontaneously emitted photons whose generation time is unknown, by combining the time-\Add{to-}space and frequency-\Add{to-}time mode mapping.
We also demonstrate the mapping of the frequency mode (100 MHz intervals) to the \Erase{time}\Add{temporal} mode (435 ns intervals) for weak coherent pulses using atomic frequency combs.
\Add{This frequency interval is close to the minimum frequency mode interval of the \AddRevise{a}tomic frequency comb quantum memory with $\mathrm{Pr^{3+}}$ ion-doped $\mathrm{Y_2SiO_5}$ crystal, and the proposed scheme has the potential to maximize the frequency multiplexing of the quantum repeater scheme with the memory.}
\Erase{This scheme engender\Add{may have} the potential for use in frequency multiplexed quantum communication. }
\end{abstract}
\maketitle
\section{\label{level1-1}INTRODUCTION}
The realization of quantum communication \Erase{using quantum states} enables various applications such as quantum key distribution~\cite{bb84,bbm92}, \Add{blind} quantum \Erase{cloud} computing~\cite{Broadbent2009}, \Addadd{and} atomic clocks with unprecedented stability and accuracy \cite{Komar2014}\Erase{, and so on}.
Quantum repeaters enable long distance quantum communication and are expected to constitute the core technology for the future quantum Internet~\cite{Kimble2008}. For these reasons, extensive \Add{studies}\Erase{research} ha\Add{ve}\Erase{s} been conducted in recent years toward their realization.
One trend is frequency multiplexing \Add{ which is necessary for improving entanglement distribution rate} for quantum communication~\cite{Sinclair2014, Wengerowsky2018}.
In frequency-multiplexed quantum communication, the identification of frequency modes is usually necessary.
The frequency mode identification of single photons used in quantum communication is more difficult than that of classical light\EraseRevise{.M}\AddRevise{m}ainly because it must be measured in a single-shot, unless non-destructive measurement of the photon is possible.
Recently, an impactful study on quantum repeaters was reported by Lago-Rivera et al.~\cite{Lago-Rivera2021}.
\Add{T}\Erase{t}he authors demonstrated a quantum repeater scheme~\cite{Simon2007} that combines time-division multiplexed absorptive quantum memories based on \Erase{the} \Add{a}\Erase{A}tomic \Add{f}\Erase{F}requency \Add{c}\Erase{C}omb (AFC) scheme \cite{Afzelius2009} and photon-pair sources.
The AFC is a comb-shaped absorption \Erase{line}\Add{profile}.
It can be used not only as a time-multiplexing but also as a frequency multiplexing memory~\cite{Seri2019, Yang2018, Sinclair2014}. Here, we consider frequency multiplexing of the quantum repeater scheme~\cite{Lago-Rivera2021,Simon2007}.
In this case, AFCs are required to store photons of multiple frequency modes, and photon sources are required to generate photon pairs of frequency modes corresponding to those of AFCs.
In Ref.~\cite{Lago-Rivera2021}, the AFC was tailored in inhomogeneous broadening of $\mathrm{^{3}H_{4}\leftrightarrow}$ $\mathrm{^{1}D_{2}}$ transition in $\mathrm{Pr^{3+}}$ ion-doped $\mathrm{Y_2SiO_5}$ crystal (Pr:YSO) by hole-burning technique~\cite{Nilsson2004}.
In the case of the AFC in Pr:YSO, the upper bandwidth limit of one frequency mode is about 4 MHz and the lower limit of the interval between frequency modes is about 100 MHz when used as a memory capable of reading out stored photons on demand~\cite{Ortu2022}. These limits are determined by the hyperfine level spacing in Pr:YSO~\cite{Ortu2022}.
The region where AFCs are created is limited to within the inhomogeneous broadening of Pr:YSO. Therefore, in the case of frequency-multiplexed quantum repeater using AFC\Addadd{s} in Pr:YSO, the upper limit of the entanglement distribution rate can be increased by making the frequency-mode interval as narrow as possible. As a frequency-multiplexed photon pair source, cavity-enhanced spontaneous parametric down-conversion (cSPDC)~\cite{Ou1999, Riel_nder2016, Niizeki2020}, which is also used in Ref.~\cite{Lago-Rivera2021}, would be promising.
The frequency mode interval of the photons generated by cSPDC depends on the free spectral range (FSR) of the cavity.
\AddRevise{In Ref.~\cite{Lago-Rivera2021}, where entanglement between quantum memories was generated by single-photon interference, the pump laser for cSPDC was continuous-wave (CW).}
In the case of CW excitation, the time at which photon pairs are generated is unknown.
To summarize the above, for frequency multiplexing of the quantum repeater scheme performed in Ref.~\cite{Lago-Rivera2021}, a method to identify high-resolution frequency modes of narrow linewidth photons without photon generation time information is desired.
A frequency-\Add{to-}time mode mapper (FTMM)~\cite{Saglamyurek2016,Saglamyurek2014, Avenhaus2009, Davis2017} and a frequency-\Add{to-}space mode mapper (FSMM)~\cite{Cheng2019,Casas2021} allow frequency mode identification of single photons. Typical examples of FTMMs are wavelength dispersion~\cite{Avenhaus2009} and chirped fiber Bragg gratings~\cite{Davis2017}.
In general, these are superior in that they are easy to implement but face difficulties in achieving resolution below GHz.
An FTMM using \Erase{atomic ensemble}\Add{AFCs} is also possible~\cite{Saglamyurek2016,Saglamyurek2014}.
In this method, the resolution is determined by the homogeneous broadening width and the energy level spacing used, and currently, high-resolution below GHz has been achieved~\cite{Saglamyurek2014}.
However, the FTMM that can be adapted to photons with narrow linewidths below 10 MHz has not been realized.
\Erase{Another}\Add{A} common drawback of FTMM\Addadd{s} is that \Eraseadd{it}\Addadd{they} require\Eraseadd{s} information about the generation time of the photon for which the frequency mode is to be identified.
In contrast, an FSMM can identify the frequency mode without information about the photon generation time since frequency is identified by \Erase{space}\Add{spatial} mode. However, in general it is not easy to achieve a high-resolution of less than GHz.
In this paper, we propose a scheme that can identify high-resolution frequency modes of narrow linewidth photons without photon generation time information by combining a time-\Add{to-}space mode mapper (TSMM) and FTMMs.
We also demonstrate a high-resolution FTMMs using AFC, which can be applied to narrow linewidth photons.
The demonstrated FTMM is capable of identifying frequency modes in the 100 MHz interval, which is close to the \Erase{free spectral range}\Add{FSR} of recent cSPDC two-photon sources~\cite{Lago-Rivera2021, Niizeki2020} and the lower limit of frequency interval of AFCs in Pr:YSO.
\section{\label{level1-2}SCHEME}
In this section, we initially propose a frequency mode identification scheme that combines FTMMs and \Addadd{a} TSMM\Eraseadd{s}. \Erase{Then the AFC, which can be an FTMM and a TSMM\\} \Erase{with the AFC will be described.}\Add{Next, We describe FTMMs using AFCs.}
\subsection{\label{level2-1}Frequency mode identification}
First, we consider frequency-mode identification when only an FTMM is used.
A schematic diagram of this case is shown in Fig.~\ref{scheme}(a).
By mapping multiple frequency modes, which exist in the same \Erase{time}\Add{temporal} mode \Erase{of different \Erase{time}\Add{temporal} modes}, it is possible to identify the frequency mode from the observation time of the photon if its generation time is known.
In this case, however, to prevent multiple frequency modes from existing in the same \Erase{time}\Add{temporal} mode after frequency-\Add{to-}time mode mapping, the \Erase{\Erase{time}\Add{temporal} mode of the pulses}\Add{number of temporal modes} before mapping must be \Erase{thinned out}\Add{reduced}. From \Eraseadd{the communication rate point of view}\Addadd{the view of the communication rate}, \Erase{thinning out}\Add{reducing} the \Erase{\Erase{time}\Add{temporal}}\Add{number of temporal} modes is a disadvantage.
Therefore, we propose a frequency-mode identification scheme combining FTMMs and a TSMM.
In this scheme, frequency modes can be identified even if the photon generation time is unknown.
It also eliminates the need to \Erase{thwart}\Add{reduce} \Erase{\Erase{time}\Add{temporal}}\Add{the number of temporal} modes before FTMMs and a TSMM.
The overview diagram is shown in Fig.~\ref{scheme}(b).
Consider the case where the generation time of the photon is unknown, i.e., the probability of the photon's existence is equally present at all times.
\Add{The duration of the temporal mode of input photon is set to be $\mathit{\Delta_{\mathrm{t}}}$.}
\Erase{At this time,}
\Add{Here,}
$\mathit{\Delta_{\mathrm{t}}}$
is assumed to be sufficiently longer than the coherence time of the photon.
The TSMM converts each \Erase{time}\Add{temporal} mode separated by
$\mathit{\Delta_{\mathrm{t}}}$
into \Erase{space}\Add{spatial} modes
$\mathrm{S_{1}, S_{2},\cdots, S_{\it{N}_{\rm{S}}},S_{1},\cdots}$
w\Erase{h}ith $N_{\rm{S}}$ being the total number of \Erase{space}\Add{spatial} modes.
The \Erase{frequency-time mode mapper}\Add{FTMM} provided for each \Erase{space}\Add{spatial} mode converts each frequency mode to a \Erase{time}\Add{temporal} mode at $\mathit{\Delta_{\mathrm{t}}}^{\prime}$ intervals.
At this time,
$\mathit{\Delta_{\mathrm{t}}}\leqq \mathit{\Delta_{\mathrm{t}}}^{\prime}$
must be satisfied to ensure that different frequency modes do not exist in the same \Erase{time}\Add{temporal} mode. For the same reason, if the total number of frequency modes is
$N_{\rm{F}}$, it must satisfy
$N_\mathrm{F} \mathit{\Delta_{\mathrm{t}}^{\prime}} \leqq N_\mathrm{S} \mathit{\Delta_{\mathrm{t}}}$.
In this way, the frequency mode can be uniquely determined from the \Erase{space}\Add{spatial} and \Erase{time}\Add{temporal} modes in which the photon was observed.
Fig.~\ref{scheme}(b) shows the case
$\mathit{\Delta_{\mathrm{t}} = \Delta_{\mathrm{t}}^{\prime}}$, $N_{\mathrm{F}} = N_{\mathrm{S}} = 3$.
The frequency resolution of this scheme depends on the resolution of the FTMMs.
A promising candidate for a high-resolution FTMMs is the AFC, which we describe below.
A promising candidate for the TSMM could be an optical switch array using electro-optic modulators (EOMs)~\cite{Tu2019}.
\begin{figure*}
\caption{
(a) Schematic of mapping frequency mode to \Erase{time}
\label{scheme}
\end{figure*}
\subsection{\label{level2-2}\Addadd{A} \Eraseadd{F}\Addadd{f}requency-to-time mode mapper \\using an atomic frequency comb}
The AFC is an equally spaced comb-\Erase{like}\Add{shaped} absorption \Erase{line}\Add{profile} as shown in Fig.~\ref{diag}(a).
Photons absorbed in the AFC are re-emitted in the same \Erase{space}\Add{spatial} mode in the inverse time of the comb spacing $\it{\Delta}$ (henceforth referred to as the echo signal).
Typically, AFCs are created by hole-burning in inhomogeneous broadening of rare-earth ion-doped crystals.
The AFC can also read out the absorbed photons on demand by using another ground level with no population~\cite{Afzelius2009, Afzelius2010, Timoney2013}.
In this study, frequency-\Add{to-}time mode \Erase{conversion}\Add{mapping} was performed by AFC, which is used as a fixed-time memory.
AFC is considered to be used as a high-resolution FTMM.
A schematic diagram of frequency-\Add{to-}time mode \Erase{conversion}\Add{mapping} using AFC is shown in Fig.~\ref{diag}(b).
Multiple AFCs can be tailored in inhomogeneous broadening~\cite{Seri2019, Yang2018, Sinclair2014}.
If AFCs are tailored with different comb intervals for each frequency mode, the time at which the echoes are reproduced can be changed for each frequency mode.
However, it is not possible to create multiple AFCs in arbitrary bands because creating a hole in one band will create \Erase{an} anti-hole\Add{s} in \Erase{an}other band\Add{s}.
\textcolor{black}{The lower limit of the interval between frequency modes is about 100 MHz when used as a memory capable of reading out stored photons on demand~\cite{Ortu2022}.}
The spacing between adjacent AFCs should be at least ~100 MHz~\cite{Ortu2022}.
\Add{Therefore} \Erase{I}\Add{i}n this study, experiments were conducted to map three frequency modes separated by $\sim100$ MHz into \Erase{time}\Add{temporal} modes with $\mathit{\Delta_{\mathrm{t}}^{\prime}} \sim 435$ ns spacing.
\begin{figure}
\caption{(a) Schematic diagram representing the function of AFC.
The light pulse absorbed at time 0 is re-emitted at time 1/$\mathit{\Delta}
\label{diag}
\end{figure}
\section{\label{level1-3}EXPERIMENTAL SETUP}
The overall experiment setup is illustrated in Fig.~\ref{setup}.
\Add{In our experiment, multiple AFCs were created within inhomogeneous broadening of Pr:YSO which had a $\mathrm{Pr^{3+}}$ doping rate of 0.05\% and dimensions of 3 mm × 3 mm × 5 mm.}
A wide modulation range is required for the laser beam to create an AFC in multiple \EraseA{bands}\AddA{frequency modes} by taking advantage of the inhomogeneous broadening of Pr:YSO (about 10 GHz~\cite{Nilsson2004}).
In this study, we \EraseRevise{created}\AddRevise{developed} a frequency stabilization and modulation system that can perform accurate GHz-order modulation
\Add{\EraseRevise{by} using a dynamical phase lock technique~\cite{Numata2012}} and accurate and fast modulation of \EraseRevise{about}\AddRevise{approximately} 10 MHz \EraseRevise{by} using
\Erase{a dynamical phase lock technique~\cite{Numata2012}, and} an acousto-optic modulator (AOM)~\cite{Donley2005}.
\Add{The absolute frequency of the master Laser (Toptica, TA pro) was stabilized to an optical frequency comb phase-locked to the GPS signals and the linewidth \EraseRevise{is}\AddRevise{was} narrowed using a reference cavity.
The slave laser (Toptica, TA-SHG pro) used for pump, probe and input pulse\Addadd{s} was stabilized against \AddRevise{the} master laser.}
We used a closed-cycle cryostat (Montana instruments, cryostation) to cool the Pr:YSO crystal to $< 3.3$ K.
Each AFC was created by modulating the pump \Erase{light}\Add{laser} by ~10 MHz using the AOM in the double-path configuration~\cite{Donley2005}.
After each AFC is created, the laser itself is modulated by 100 MHz using the dynamic phase lock technique, and a different AFC was created by modulation with the AOM again.
The time required for 100 MHz modulation was \EraseRevise{about}\AddRevise{approximately} 10 ms in our setup.
In this way, three AFCs with different comb spacing were created at 100 MHz intervals.
The probe laser for observing the created AFC can be turned on and off by \Add{an} AOM.
It was turned off during AFC creation and turned on only during observation.
During AFC observation, the probe laser was modulated by chirping the reference RF signal for phase locking in the dynamical phase lock technique~\cite{Numata2012}.
The input pulse for observing the echo signal of the AFC was the same path as the probe \Erase{light}\Add{laser}.
The input pulse was tailored by \Add{the} AOM to be Gaussian with full width at half maximum (FWHM) of 5 MHz. The echo signal is coupled to a single mode fiber (SMF) and detected by a single photon counting module (SPCM, Perkin Elmer, \Erase{SPCM-CD 3254}\Add{SPCM-AQRH-14-FC}).
The coupling efficiency of the SMF was \Erase{64}\Add{59}\%, the \Erase{\Add{(specification of the)}} detection efficiency \Erase{\Add{(at the corresponding efficiency)}} of the \textcolor{black}{SPCM was 59\%,} and the dark count rate is $\sim150$ Hz.
\Erase{For intensity,}
the \Add{power of} pump \Erase{light}\Add{laser} was set to \Erase{$\sim1$ mW}\Add{$\sim2.6$ mW}, probe \Erase{light}\Add{laser} to \Erase{7}\Add{1} µW, and input pulse to mean photon number \Erase{$\mu = 5.1$}\Add{$\mu = 0.12$} per pulse.
The beam diameter was set to $\sim500$ µm for the pump \Eraseadd{beam} and $\sim100$ µm for the probe and input \Eraseadd{beams}\Addadd{pulses}. These polarization\AddRevise{s} were aligned with the D2 axis of the Pr:YSO crystal.
\begin{figure*}
\caption{
Experimental setup.
Second harmonic generation (SHG) of 1212 nm external cavity diode laser (ECDL) is used as pump, probe and input pulse\Addadd{s}
\label{setup}
\end{figure*}
\section{\label{level1-4}EXPERIMENTAL RESULT}
The three AFCs were observed by chirping a weak probe \Erase{light}\Add{laser} at a chirp rate of 5.2 MHz/ms.
The optical depth shown in Fig.~\ref{AFC_all} was the average of 16 measurements. It can be seen that AFCs with different comb spacing\AddRevise{s} were created.
In this experiment, the comb spacing was set to $\mathit{\Delta}_1 = 1.533$ MHz, $\mathit{\Delta}_2 = \Erase{952}\Add{920}$ kHz, and $\mathit{\Delta}_3 = 657$ kHz, respectively, in order from the low frequency side.
This means that the expected echo times are $t_1 = 65\Erase{4}\Add{2}$ ns, $t_2 = 10\Erase{50}\Add{87}$ ns\AddA{, and $t_3 = 1522$ ns}.
In this experiment, for each \AddA{frequency mode} created with multiple frequency modes, 1000\AddA{0} Gaussian pulses were input \Erase{for only one frequency mode}.
\Add{By repeating this procedure, }
\Add{t}he total number of pulses input for each frequency mode was $N_{\mathrm{in}}=\Erase{2.4}\Add{7.5}\times10^5$. The observed results are shown in Fig.~\ref{echo}.
It can be seen that the echoes appear in the expected \Erase{time}\Add{temporal} mode for each frequency mode.
Table~\ref{tab1} shows the expected \Erase{time}\Add{temporal} mode of detection for each frequency mode, echo efficiency $\eta_{\mathrm{echo}}$, and probability $\eta_{\mathrm{error}}$ of observing a photon in the other two expected \Erase{time}\Add{temporal} modes.
$\eta_{\mathrm{echo}}$ is the ratio of the number of counts within the expected \Erase{time}\Add{temporal} mode (corrected for \Eraseadd{APD}\Addadd{SPCM} detection efficiency and fiber coupling loss) to the total number of photons input ($N_{\mathrm{in}} \mu$).
$\eta_{\mathrm{error}}$ is the ratio of the number of counts within the other two \Erase{time}\Add{temporal} modes that are not expected (corrected for SPCM detection efficiency and fiber coupling loss) to $N_{\mathrm{in}} \mu$.
\begin{figure}
\caption{An overall view of the three AFCs and an enlarged view of each AFC.}
\label{AFC_all}
\end{figure}
\begin{figure}
\caption{
Echo signal observation results.
\AddRevise{The bin size is 4.096 ns.}
\label{echo}
\end{figure}
\begin{table}[b]
\caption{\label{tab1}
Comb spacing and \Erase{time}\Add{temporal} mode for each AFC, and experimental results for $\eta_{\rm{echo}}$ and $\eta_{\rm{error}}$.\Add{Time window for each \Eraseadd{time}\Addadd{temporal} mode is 435 ns.}}
\begin{ruledtabular}
\begin{tabular}{lccc}
\multicolumn{1}{c}{\textrm{}}&
AFC1&
AFC2&
AFC3\\
\colrule
$\it{\Delta}$& 1.533 MHz & \Erase{952}\Add{920} kHz & 657 kHz\\
\Erase{time}\Add{temporal} mode & \Erase{437-870}\Add{$652$} ns & \Erase{870-1305}\Add{1087} ns & \Erase{1305-1639}\Add{1522} ns\\
$\eta_{\rm{echo}}$ & 21\% & 14\% & 11\%\\
$\eta_{\rm{error}}$ & 2.2\%& 1.4\% & 1.2\%
\end{tabular}
\end{ruledtabular}
\end{table}
\section{\label{level1-5}DISCUSSION}
In this \Erase{experiment}\Add{study}, three frequency modes at 100 MHz intervals were \Add{succesfully} mapped in different \Erase{time}\Add{temporal} modes. However, the probability of successful mapping is low, about 10\%.
Moreover, since the absorption efficiency of AFC is not unit, the \Add{photons that are transmitted without being absorbed}\Erase{transmitted \Erase{light}\Add{pulse}} exist\Erase{s} around 0 ns in Fig.~\ref{echo} and occup\Add{y}\Erase{ies} the \Erase{time}\Add{temporal} mode. Theoretically, near unit efficiency absorption and reemission can be obtained by using an AFC in a cavity~\cite{Afzelius2010_2}, which would help \Add{re}solving these problems.
We consider the limits of the number of frequency modes for an FTMM using AFC\Add{s}. Factors that determine the limit include the modulation range of the \Add{pump} laser, inhomogeneous broadening, linewidth of the pump laser, and the creation time of the AFC.
In our system, the laser was directly modulated, but the modulation range is limited by the mode-hop range, which is about 15 GHz. Alternatively, the inhomogeneous width of the Pr:YSO doping rate of 0.05\% that we used is ~10 GHz~\cite{Nilsson2004}.
Therefore, if we were to create an AFC with different comb spacing every 100 MHz, the limit would be about 100 modes. In fact, if we assume a time width of 435 ns for one-\Erase{time}\Add{temporal} mode and try to create an AFC with different comb spacing in 100 modes, the comb spacing of the AFC with the smallest comb spacing will be ~20 kHz, and the linewidth of the pump laser must be sufficiently narrowed.
In the current system, the lower limit of the comb spacing that can be stably produced is about 500 kHz, and to achieve even smaller comb spacing, it is necessary to use ultra-low expansion cavity or self-heterodyne method for narrowing linewidth of the \Add{pump} laser~\cite{Young1999,Kefelian2009}.
The upper limit of the allowable AFC creation time is determined by the relaxation time between hyperfine levels, since the comb structure degrades after the AFC is created.
In this experiment, the time to create one AFC was $< 50$ ms and the time for frequency mode modulation was $\sim10$ ms.
Therefore, the time required to create an $N$ mode AFC\Addadd{s} is $< 60N$ ms. The extent to which the creation time is acceptable will depend on the system to which the AFC is applied.
\Eraseadd{The combination of this experimental system and a}
\Eraseadd{high-speed optical switch enables high-resolution}
\Eraseadd{frequency-mode identification of photons with an}
\Eraseadd{unknown photon generation time.}
This scheme is expected to be applied not only to multiplexing for quantum communication but also to various spectroscopic measurements. However, when AFC is used as an FTMM, frequency conversion is required to perform frequency mode identification of photons in various frequency bands.
\AddA{Coupling of \EraseRevise{a} AFC in Pr:YSO with telecommunication wavelength photons using frequency conversion are achieved~\cite{Maring2014,mannami2021}.}
\Erase{However}\Add{In contrast}, \Addadd{an FSMM}\Eraseadd{\Add{a} frequency-\Add{to-}space \Add{mode mapper}}\Erase{Aconversion} using VIPA~\cite{Casas2021} or gratings~\cite{Cheng2019} is also expected as a frequency\Erase{-} mode identification method. Compared to our scheme, they are superior in terms of ease of implementation \Add{and} frequency band extension\Erase{and configuration}.
The superiority of our scheme is that it allows for high-resolution frequency identification.
\Erase{\textcolor{black}{If the frequency-mode spacing to be identified is large enough,\\}} \Erase{there would be no advantage to employing our scheme.)}
\section{\label{sec:level1}CONCLUSION}
In this study, we \EraseRevise{have} achieved frequency mode identification at 100 MHz intervals, which is close to the frequency multiplexing limit of AFC in Pr:YSO, a promising quantum memory for quantum repeater.
Since the FSR of a cSPDC source with high coupling to quantum memory has been demonstrated to be about 100 MHz~\cite{Lago-Rivera2021, Niizeki2020}, this spectroscopic system not only achieves the upper limit of discrete-mode spectral resolution with Pr:YSO, but also shows promising results for improving the quantum entanglement generation rate.
\begin{acknowledgments}
We thank Ippei Nakamura for the useful discussion.
This work was supported by SECOM foundation, JST PRESTO (JPMJPR1769), JST START (ST292008BN), JSPS KAKENHI Grant Number JP20H02652 and NEDO (JPNP14012).
We also acknowledge the members of the Quantum Internet Task Force, which is a research consortium to realize the
Quantum Internet, for comprehensive and interdisciplinary discussions of the Quantum Internet.
\end{acknowledgments}
\end{document}
|
\begin{document}
\title{f Real analytic local well-posedness for the Triple Deck}
\begin{abstract} The Triple Deck model is a classical high order boundary layer model that has been proposed to describe flow regimes where the Prandtl theory is expected to fail. At first sight the model appears to lose two derivatives through the pressure-displacement relation which links pressure to the tangential slip. In order to overcome this, we split the Triple Deck system into two coupled equations: a Prandtl type system on $\mathbb{H}$ and a Benjamin-Ono type equation on $\mathbb{R}$. This splitting enables us to extract a crucial leading order cancellation at the top of the lower deck. We develop a functional framework to subsequently extend this cancellation into the interior of the lower deck, which enables us to prove the local well-posedness of the model in tangentially real analytic spaces.
{\bf \today}
\end{abstract}
\setcounter{tocdepth}{1}
\tableofcontents
\section{Introduction}
A fundamental challenge in fluid mechanics is to describe the vanishing viscosity limit $(\ensuremath{\nonumber}u \rightarrow 0)$ of the Navier-Stokes equations on domains with a solid boundary. In this paper we consider the fluid domain to be the two-dimensional half space $\mathbb{H}$. The main difficulty is due to the incompatibility between the no-slip boundary condition for the Navier-Stokes velocity field ($\bold{U}^{\ensuremath{\nonumber}u}|_{{\partial} \Omega} = 0$) and the slip boundary condition for the Euler velocity field $(\bold{U}^E|_{{\partial} \Omega} \cdot (0,-1)= 0)$, which makes it difficult to obtain uniform in $\ensuremath{\nonumber}u$ estimates for norms of $\bold{U}^\ensuremath{\nonumber}u$ which are stronger than $L^\infty_t L^2_x$.
\subsection{Historical Overview}
In order to rectify this mismatch, Prandtl~\cite{Prandtl1904} proposed the existence of a thin, $\OO(\ensuremath{\nonumber}u^{\frac{1}{2}})$, fluid layer near the boundary through which the Navier-Stokes velocity field transitions from an outer Euler flow in the bulk, to the no-slip condition on the solid wall. Mathematically, this corresponds to a formal asymptotic expansion of the viscous incompressible flow $\bold{U}^{\ensuremath{\nonumber}u}$ as
\begin{align} \label{Pr.ansatz}
\bold{U}^\ensuremath{\nonumber}u(t, x,y) = \bold{U}^E(t, x,y) + \bold{U}^{BL}(t,x, \frac{y}{\sqrt{\ensuremath{\nonumber}u}}) + \OO(\ensuremath{\nonumber}u^{\frac{1}{2}})
\end{align}
where $\bold{U}^E$ is the Euler flow typically assumed to be known \textit{a-priori}, and $\bold{U}^{BL}$ is known as the Prandtl boundary layer corrector. The boundary layer unknown $\bold{u}^P = [u^P,v^P] := \bold{U}^{BL} + \bold{U}^E|_{Y = 0}$ is a function of the tangential variable $x$ and the normal fast variable $\bar Y = \frac{y}{\sqrt{\ensuremath{\nonumber}u}}$, and is governed by the famous Prandtl boundary layer equations
\begin{subequations} \label{Pr:sys}
\begin{align} \label{Pr:sys:a}
&{\partial}_t u^P + u^P {\partial}_x u^P + v^P {\partial}_{\bar Y} u^P - {\partial}_{\bar Y}^2 u^P = - {\partial}_x P^E(t,x,0) , \\ \label{Pr:sys:b}
&{\partial}_x u^P + {\partial}_{\bar Y} v^P = 0, \\ \label{Pr:sys:c}
&[u^P, v^P]|_{{\bar Y} = 0} = 0, \hspace{3 mm} u^P|_{{\bar Y} \to 0} = u^E(t,x,0), \hspace{3 mm}
\end{align}
\end{subequations}
posed in the half space $\mathbb{H}= \{ (x,\bar Y) \colon \bar Y > 0\}$. The system treats the Euler pressure trace $P^E$ and the Euler wall slip velocity $u^E$ as known, and is supplemented with an initial condition $u^P|_{t = 0} = u^P_0(x,\bar Y)$.
A first step towards establishing the validity or the invalidity of the Prandtl expansion \eqref{Pr.ansatz} is a detailed understanding of the Prandtl system \eqref{Pr:sys} itself. The well- and ill-posedness of the Prandtl equations has a long history of which we only provide a very brief summary (see the reviews~\cite{BardosTiti13,MaekawaMazzucato16} for further references). Under the monotonicity assumption ${\partial}_y u^P|_{t = 0} > 0$, Oleinik~\cite{Oleinik66,OleinikSamokhin99} obtained global in time, regular solutions on the domain $[0, L] \times \mathbb{R}_+$ for small $L$, and local in time regular solutions for arbitrary finite $L$. The aforementioned results rely on the Crocco transform, which is available from the monotonicity hypothesis. See also the global in time existence result of weak solutions obtained in \cite{XinZhang04} under the additional assumption of a favorable pressure gradient ${\partial}_x P^E(t,x) \le 0$. Without using the Crocco transform, local existence was established in the works of \cite{MasmoudiWong15,KukavicaMasmoudiVicolWong14} using energy methods and \cite{AlexandreWangXuYang14} using a Nash-Moser iteration. When the monotonicity assumption is removed, local well-posedness results for \eqref{Pr:sys} were first established assuming tangential real analyticity of the initial datum~\cite{SammartinoCaflisch98a,LombardoCannoneSammartino03,KukavicaVicol13} (see also~\cite{IgnatovaVicol16} for an almost global existence result for small datum), and more recently assuming only tangential Gevrey-class regularity~\cite{GerardVaretMasmoudi13,LiYang16}. The sharp Gevrey-$2$ result without any structural assumptions was recently established in~\cite{DiGV18}. On the other hand, in Sobolev spaces without monotonicity, the Prandtl equations are ill-posed, as was shown in~\cite{GerardVaretDormy10,GuoNguyen11,GerardVaretNguyen12,LiuYang17}.
Concerning the validity of Prandtl ansatz~\eqref{Pr.ansatz}, in the unsteady setting the expansion has been verified locally in time assuming the initial datum is real-analytic~\cite{SammartinoCaflisch98b,WangWangZhang17,NguyenNguyen18}, under the assumption that the initial vorticity is supported away from the boundary~\cite{Maekawa14,FeiTaoZhang16,FeiTaoZhang18}, in the Gevrey setting for initial data close to certain stable shear flows~\cite{GerardVaretMaekawaMasmoudi16}, or assuming that the initial vorticity is analytic only near the boundary of the half space~\cite{KukavicaVicolWang19}.
In contrast, for initial datum in Sobolev spaces the ansatz \eqref{Pr.ansatz} has been proven to be {\em invalid}~\cite{Grenier00,GrenierGuoNguyen14b,GrenierGuoNguyen14c,GrenierNguyen17}, with the recent result~\cite{GrenierNguyen18a} proving that the expansion is not valid in the $L^\infty$ topology.
A notable success of the Prandtl theory is in the steady regime, where it was in fact derived in~\cite{Prandtl1904}. For steady flows in~\eqref{Pr:sys:a}, the initial datum~\eqref{Pr:sys:c} is typically replaced by in-flow data at $\{x = 0\}$, which represents for instance the leading edge of a flat plate. Shortly after Prandtl's original work, Blasius~\cite{Blasius08} discovered the self-similar solution to the steady Prandtl equations
\begin{subequations}
\label{eq:Blasius}
\begin{align} \label{Blasius.a}
&[u^P, v^P] := [f'(\eta), \frac{1}{\sqrt{x}} \{ \eta f'(\eta) - f(\eta)\} ], \text{ where } \eta = \frac{y}{\sqrt{x}}, \\ \label{Blasius.b}
&f f'' + f''' = 0, \qquad f'(0) = 0, f'(\infty) = 1, \frac{f(\eta)}{\eta} \xrightarrow{\eta \rightarrow \infty} 1.
\end{align}
\end{subequations}
\ensuremath{\nonumber}oindent Experiments have confirmed the accuracy of \eqref{Pr.ansatz} for steady flow over a plate to a remarkable degree of precision~\cite{Schlichting60}, especially for the Blasius self-similar boundary layers \eqref{eq:Blasius}. Mathematically, in the steady case, the ansatz has been recently verified in \cite{GerardVaretMaekawa18} for shear boundary layer flows which arise from forced Navier-Stokes equations, and \cite{GuoIyer18,GuoIyer18a} for a general class of $x$-dependent boundary layer flows, which arise from homogeneous Navier-Stokes flows, and which include the Blasius solution. See also~\cite{GuoNguyen14} for related results on a moving plate.
In spite of the success of the Prandtl theory in the steady regime, the phenomenon of boundary layer separation remains mostly unsolved, both in the steady and the unsteady regimes~\cite{Schlichting60,SychevEtAl98,CousteixMauss07,SmithBrown12}. In the unsteady case, the van Dommelen and Shen singularity~\cite{VanDommelenShen80}, which was recently proven to occur rigorously~\cite{EEngquist97,KukavicaVicolWang17,CollotGhoulMasmoudi18,CollotGhoulIbrahimMasmoudi18} may be seen as as a diagnostic of separation~\cite{GarganoSammartinoSciacca09}: an adverse Euler pressure gradient causes a finite time singularity in the displacement thickness, and so the flow is detached from the flat plate. The vorticity generated at the boundary is ejected into the bulk of the flow where it rolls up and is considered as one of the factors responsible for the anomalous dissipation of energy. In the steady case the detachment of the boundary layer from the flat plate was predicted by Goldstein~\cite{Goldstein48} and has been proven recently in~\cite{DalibardMasmoudi18}. This breakdown of the assumptions on which Prandtl equations are derived signals the limitations of the classical Prandtl boundary layer theory, and new, higher order, theories are required in order to model the inviscid-boundary layer coupling near points of separation \cite{CebeciCousteix05,CousteixMauss07}.
Two well-known higher order models are the {\em Prescribed Displacement Thickness} (PDT) model~\cite{CatherallMangler66} and the {\em Interactive Boundary Layer} (IBL) model~\cite{Carter74,LeBalleur90,Lagree10}. For instance, in the IBL model the Euler flow and boundary layer flow are strongly coupled through a boundary condition of the type
\begin{align} \label{eq:IBL}
v^E|_{y = 0} = \sqrt{\ensuremath{\nonumber}u}{\partial}_x \{ \kappa u^E|_{y = 0} \}, \qquad \kappa := \int_{\mathbb{R}_+} \Big(1 - \frac{u^P}{u^E|_{Y = 0}} \Big) \,\mathrm{d} y, \qquad u^P|_{y \to \infty} = u^E(t,x,0).
\end{align}
This model has been studied rigorously in~\cite{DDLM18}, where it is shown to be {\em linearly ill-posed even in analytic spaces}. Similar dramatic ill-posedness results are shown in \cite{DDLM18} to hold for the PDT model. These severe instabilities in the PDT and IBL higher order boundary layer models lead us to consider the Triple Deck system, which is the main purpose of this paper.
\subsection{Triple Deck equations}
In order to describe the Triple Deck system, it is useful to keep in mind the below diagram, taken from \cite[pp.~220, Figure 4]{Smith82}, which describes the steady flow past a finite plate whose boundary is at $\{y=0\}$, with a leading edge to the left and a trailing in the bottom center of the figure:
\begin{center}
\includegraphics[width=0.6\textwidth]{Triple_Deck.jpeg}
\end{center}
Here $R$ denotes the Reynolds number. Near the leading edge of the plate, the flow is accurately described by the Prandtl theory, and in particular by the self-similar Blasius profile~\eqref{eq:Blasius}. The trailing edge of the plate creates a disturbance, and the flow undergoes the so-called Goldstein singularity. The triple-deck theory describes specifically the transition from the Blasius profile on the left of the plate to the Goldstein near which occurs after the trailing edge of the plate. This was formalized in the works of \cite{Stewartson68,Stewartson69,Neiland69,Messiter70} who proposed the {\em three deck} structure, and introduce the horizontal $\OO(\ensuremath{\nonumber}u^{-\frac 5 8})$ and vertical $\OO(\ensuremath{\nonumber}u^{-\frac 5 8}), \OO(\ensuremath{\nonumber}u^{-\frac 3 8})$ length scales that are not present in the Prandtl theory. The notion of introducing different scales at the point of boundary layer separation was introduced earlier in~\cite{Lighthill53}. We refer the reader to the works~\cite{Smith82,Meyer82,Klingenberg83,Meyer83,CowleyTutty85,Lagree} for an overview of the ideas and history behind the Triple Deck model, and include a formal derivation of the unsteady triple deck model (cf.~system \eqref{eq:TD:main}--\eqref{eq:TD:A:p} below) in Appendix~\ref{sec:appendix} of this paper.
Specifically, see e.g.~\cite[Section 3]{Smith82},~\cite[Section 2]{CowleyTutty85} or~\cite[Section 4]{Duck87}, we consider the {\em unsteady Triple Deck system} posed in the half space $\mathbb{H}= \{ (x,y) \colon y> 0\}$, which is given by
\begin{subequations}
\label{eq:TD:main}
\begin{align}
{\partial}artial_t u + u {\partial}artial_x u + v {\partial}artial_y u &= - {\partial}artial_x p + {\partial}artial_y^2 u \label{eq:TD:main:a}\\
{\partial}artial_x u + {\partial}artial_y v &=0 \label{eq:TD:main:b}\\
{\partial}artial_y p &= 0 \label{eq:TD:main:c}
\end{align}
\end{subequations}
supplemented with the boundary conditions
\begin{subequations}
\label{eq:TD:BC}
\begin{align} \label{eq:TD:BC:a}
&u(x,0,t)= v(x,0,t) = 0, \\ \label{eq:TD:BC:b}
&u(x,y,t) - y \to 0 \qquad \mbox{as} \qquad x \to - \infty \\ \label{eq:TD:BC:c}
&u(x,y,t) - y \to A(x,t) \qquad \mbox{as} \qquad y \to + \infty
\end{align}
\end{subequations}
and with the {\em pressure-displacement relation}
\begin{align}
\label{eq:TD:A:p}
p(x,t) = \frac{1}{{\partial}i} p.v. \int_{\RR} \frac{({\partial}artial_x A)(\bar x,t)}{x-\bar x} d\bar x = |{\partial}artial_x| A(x,t)
\end{align}
characteristic of incompressible flows. Note that other pressure-displacement relations may be specified in the case of supersonic and jet-like compressible boundary layer flows (see e.g.~\cite[Equation (2.4a-e)]{CowleyTutty85}). The system \eqref{eq:TD:main}--\eqref{eq:TD:A:p} is supplemented with a compatible initial condition
\begin{align}
\label{eq:TD:IC}
u|_{t=0} = u_0
\end{align}
on $\HH$. Note that while equations \eqref{eq:TD:main} look the same as the classical Prandtl equations, the main difference is that $p$ is not given in advance, and neither is the value of $u$ at the top of the lower deck. Instead, these are coupled by the relation \eqref{eq:TD:A:p} above.
The Triple Deck and the IBL models share the common feature that $u$, respectively $u^P$, converge as $y \to \infty$ to a function that is not given \textit{a-priori}, and must determined through the evolution. However, in contrast to \eqref{eq:IBL} in which $u^P|_{y \to \infty}$ is governed ultimately by the Euler equations, the behavior of $u|_{y \to \infty}$ in~\eqref{eq:TD:main} is governed by the Benjamin-Ono equations, as is shown in Section~\ref{sec:BO:evo}. It has been alluded to in~\cite{DDLM18} (see also~\cite{Smith79,Duck87}) that the Triple Deck has favorable stability features relative to the IBL model, but to our knowledge this has not been studied mathematically until the present work. In fact, it is not known whether the system \eqref{eq:TD:main}--\eqref{eq:TD:IC} is well-posed, even locally in time.
The unsteady Triple Deck model poses significant mathematical difficulties, because the map $u \mapsto {\partial}artial_x p$ loses two derivatives in $x$ (in view of \eqref{eq:TD:A:p}) and half a derivative in $y$ (due to the restriction to the boundary $\{y=\infty\}$). The {\em two derivative loss} in $x$ seems to preclude the well-posedness of the system, even in spaces of analytic functions. Our goal is to show that due to a certain cancellation in $L^2_x$, the loss is only of one derivative in $x$, and hence the system admits local in time real-analytic solutions with respect to $x$, which are Sobolev smooth in $y$. Our main result is Theorem~\ref{thm:main} below, which may be stated informally as: assume that $A_0(x)$ is real-analytic and that the function $u_0(x,y) - y - A_0(x)$ is tangentially real-analytic and lies in a weighted $L^2$ space with respect to the normal variable; then there exits locally in time a unique solution in this class. We discuss the main difficulties and the main ideas of the proof in Section~\ref{sec:main} below. Prior to this, we introduce the functional setting of the paper and the decomposition \eqref{eq:decomp:1}--\eqref{eq:decomp:2} of the solution.
\subsection{Main result and functional setting}
\subsubsection{Analytic norms}
In order to measure decay in $y$, we introduce the $y$-weight given by
\begin{align}
\rho (y,t) = e^{\frac{y^{2}}{8(1+t/{\varepsilon})}} \, .
\label{eq:rho:def}
\end{align}
Note that $\rho$ does not depend on $x$. The parameter, ${\varepsilon}$, appearing in \eqref{eq:rho:def} will be selected small, based only on the initial datum, according to the relation \eqref{choice:eps:a}. The time scale over which we prove existence will be, without loss of generality, restricted to $t \in [0,T_\ast]$, where $T_\ast \le {\varepsilon}$, so that in particular the quotient $t/{\varepsilon}$ appearing in the weight above is bounded.
We denote the Fourier transform a function $f$ in the $x$-variable only, at frequency $\xi \in \RR$, as $f_\xi= f_\xi(y,t)$. Since $f$ is real-valued, we automatically have that $f_{-\xi} = \overline{f_\xi}$.
For $\tau = \tau(t)>0$, $r > 2$, and a function $f(x,y,t)$ we use Plancherel to define
\begin{align} \label{eq:norm}
\ensuremath{\nonumber}orm{f}_{\tau,r}^2 &= \ensuremath{\nonumber}orm{\rho \, e^{\tau |{\partial}artial_x|} f}_{H^r_x L^2_y}^2 = \int_{\RR} \int_0^\infty \rho^2(y) \abs{f_\xi(y)}^2 e^{2\tau |\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\end{align}
where $\brak{\xi}^2 = 1 + |\xi|^2$, and we have suppressed the time dependence of $\tau, \rho$, and $f_\xi$.
Associated to this norm, it is convenient to also define the inner product
\begin{align*}
\brak{f,g}_{\tau,r} = \int_\mathbb{R}\int_0^\infty \rho^2(y) f_\xi(y) \overline{g_\xi(y)} e^{2\tau |\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\end{align*}
where the time dependence is suppressed. The idea to use real-analytic norms of the type~\eqref{eq:norm} goes back to the work of Foias-Temam~\cite{FoiasTemam89} in the context of the Navier-Stokes equations, and to~\cite{LevermoreOliver97} in the context of the Euler equations. See also~\cite{OliverTiti00,KukavicaVicol09,BonaGrujicKalisch10} and references therein.
Notice that by definition of the $\ensuremath{\nonumber}orm{\cdot}_{\tau,r}$ norm, we have the identity
\begin{align}
\frac{1}{2} \frac{d}{dt} \ensuremath{\nonumber}orm{f}_{\tau,r}^2 + (-\dot{\tau}) \ensuremath{\nonumber}orm{|{\partial}artial_x|^{1/2}f}_{\tau,r}^2
&= \int_{\RR} \left( \frac 12 \frac{d}{dt} \ensuremath{\nonumber}orm{\rho \, f_\xi}_{L^2_y([0,\infty))}^2 \right) e^{2\tau |\xi|} \brak{\xi}^{2r} \,\mathrm{d}\xi
\ensuremath{\nonumber}otag\\
&= \brak{{\partial}artial_t f + f {\partial}artial_t (\log \rho), f}_{\tau,r} \, .
\label{eq:ODE:XY}
\end{align}
Therefore, a decrease in the analyticity radius yields a $|{\partial}artial_x|^{1/2}$-dissipative term.
We introduce similar analytic norms for functions $g(x,t)$, which are independent of $y$. Here, we let
\begin{align}
\label{eq:tilde:X:norms}
\ensuremath{\nonumber}orm{g}_{\widetilde{\tau,r}}^2 = \ensuremath{\nonumber}orm{e^{\tau|{\partial}artial_x|} g}_{H^r_x} = \int_{\RR} |g_\xi|^2 e^{2\tau |\xi|} \brak{\xi}^{2r} \,\mathrm{d}\xi
\end{align}
with associated inner product
\begin{align*}
\brak{f,g}_{\widetilde{\tau,r}} = \int_\mathbb{R} f_\xi \overline{g_\xi} e^{2\tau |\xi|} \brak{\xi}^{2r} \,\mathrm{d} \xi \,.
\end{align*}
As in \eqref{eq:ODE:XY}, we have
\begin{align}
\frac 12 \frac{d}{dt} \ensuremath{\nonumber}orm{g}_{\widetilde{\tau,r}}^2 + (-\dot{\tau}) \ensuremath{\nonumber}orm{|{\partial}artial_x|^{1/2} g}_{\widetilde{\tau,r}}^2 = \int_{\RR} \left(\frac 12 \frac{d}{dt} |g_\xi|^2 \right) e^{2\tau |\xi|} \brak{\xi}^{2r} = \brak{{\partial}artial_t g, g}_{\widetilde{\tau,r}}\, .
\label{eq:ODE:tilde:XY}
\end{align}
Having defined the basic norms, we turn to the definition of the total norms used in this paper and the corresponding unknowns that we measure using these norms.
\subsubsection{Representation of the solution and the total energy}
We shall work with the following decomposition of the solution $u(x,y,t)$ of \eqref{eq:TD:main}--\eqref{eq:TD:BC}. We write
\begin{align}
u(x,y,t) = y + w(x,y,t)
\label{eq:decomp:1}
\end{align}
where the function $w(x,y,t)$ is defined in terms of its tangential (i.e., with respect to $x$) Fourier transform coefficients, $w_\xi(y,t) = \int_{\RR} w(x,y,t) e^{-i x \xi} dx$, given by
\begin{align}
w_\xi(y,t) = \bar w_{\xi}(y,t) + A_\xi(t) \theta_\xi(y,t)
\, .
\label{eq:decomp:2}
\end{align}
The Gaussian weight function $\theta_\xi(y,t)$ is defined {\em explicitly} in \eqref{eq:theta:def} below.
The coefficients $A_{\xi}(t)$ are nothing but the Fourier coefficients in $x$ of the function $A(x,t) = \lim_{y\to \infty} w(x,y,t)$. In Section~\ref{sec:BO:evo} we show that $A$ obeys a {\em forced Benjamin-Ono equation}, cf.~\eqref{eq.A}, which arises as a compatibility equation for \eqref{eq:TD:main}--\eqref{eq:TD:BC}. On the other hand, the main unknowns $w_{\xi}(y,t)$ are shown in Section~\ref{sec:Prandtl:evo} to solve an evolution equation, cf.~\eqref{eq:bar:w:evo}--\eqref{eq:NLMB:def}, which has a very similar structure to the classical Prandtl system, with the addition of certain singular coupling terms to the evolution for $A$. The point is that the original function $u$ may be reconstructed explicitly from knowledge of the Fourier coefficients $\bar w_\xi$ and $A_\xi$. Accordingly, our total norms measure the analytic regularity of $\bar w$ and $A$.
Throughout the paper fix a value for $r>2$ and a smooth cutoff function $\chi(y)$ approximating ${\bf 1}_{\{y\geq 2\}}$ (defined in \eqref{eq:chi:def} below). For a function $\tau(t)>0$ to be defined later, for a parameter $\delta > 1$ to be chosen precisely later, and with the norms $\ensuremath{\nonumber}orm{\cdot}_{\tau,r}$ and $\ensuremath{\nonumber}orm{\cdot}_{\widetilde{\tau,r}}$ defined in \eqref{eq:norm} respectively \eqref{eq:tilde:X:norms}, we let
\begin{subequations}
\label{eq:cluster:fuck}
\begin{align} \label{X:tau:norm:def}
\ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}
&= \ensuremath{\nonumber}orm{\bar w(t)}_{\tau(t),r} + \frac{1}{\delta} \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau(t),r-1/2} + \ensuremath{\nonumber}orm{A(t)}_{\widetilde{\tau(t),r}} \\ \label{Y:tau:norm:def}
\ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}
&= \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} \bar w(t)}_{\tau(t),r} + \frac{1}{\delta} \ensuremath{\nonumber}orm{\chi \abs{{\partial}_x}^{1/2} {\partial}_y \bar w}_{\tau(t),r-1/2} + \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A(t)}_{\widetilde{\tau(t),r}}\\ \label{norm:Z:tau}
\ensuremath{\nonumber}orm{ \bar w }_{Z_\tau}
&= \ensuremath{\nonumber}orm{{\partial}_y \bar w(t)}_{\tau(t),r} + \frac{1}{\delta} \ensuremath{\nonumber}orm{\chi {\partial}_{yy} \bar w}_{\tau(t),r-1/2} \\ \label{norm:H:tau}
\ensuremath{\nonumber}orm{ \bar w }_{H_\tau}
&= \ensuremath{\nonumber}orm{y \bar w(t)}_{\tau(t),r} + \frac{1}{\delta} \ensuremath{\nonumber}orm{y \chi {\partial}_{y} \bar w}_{\tau(t),r-1/2}
\, .
\end{align}
\end{subequations}
The $X_\tau$ norm is the main analytic-in-$x$ and weighted $L^2$-in-$y$ norm used in this paper. The $Y_\tau$ norm quantifies dissipation in the $x$ variable due to a shrinking analyticity radius, the $Z_\tau$ norm quantifies dissipation in the $y$ variable due to the ${\partial}artial_{yy}$ terms present in the equation, while the $H_\tau$ norm encodes a gain of a $y$ weight which is important due to the unboundedness of the domain $[0,\infty)$. Associated to these norms we define the total analytic energy via
\begin{align} \label{def:E}
E(T) &= \sup_{t \in [0, T]} \ensuremath{\nonumber}orm{(\bar w,A)}_{X_{\tau(t)}}^2 + \int_0^T \! \| (\bar{w}, A) \|_{Y_\tau}^2 \,\mathrm{d} t + \frac{1}{16} \int_0^T \! \ensuremath{\nonumber}orm{\bar w}_{Z_{\tau(t)}}^2 \,\mathrm{d} t + \frac{1}{64 {\varepsilon}} \int_0^T \! \ensuremath{\nonumber}orm{\bar w}_{H_{\tau(t)}}^2 \,\mathrm{d} t.
\end{align}
\subsubsection{Main Theorem and Overview of Proof}
\label{sec:main}
We are now ready to state the main result.
\begin{theorem}[\bf Main Theorem] \label{thm:main} Fix an initial radius of analyticity $\tau_0 > 0$, and any $r > 2$, where $r$ is the analytic weight parameter appearing in \eqref{eq:norm}. We decompose the initial data in the following form, written on the Fourier side in the tangential variable
\begin{align*}
u^{(0)}_\xi(y) = y + \theta_\xi(y, 0) A^{(0)}(\xi) + \bar{w}^{(0)}_\xi(y) \, ,
\end{align*}
where $\theta_\xi$ is defined in \eqref{eq:theta:def}. Assume that $\bar{w}^{(0)}$ and $A^{(0)}$ satisfy
\begin{align*}
E_0 := \| (\bar{w}^{(0)}, A^{(0)}) \|_{X_{10 \tau_0}} < \infty,
\end{align*}
where the analytic energy is defined in \eqref{def:E}. Then there exists a $T_\ast > 0$ depending on $\tau_0, r, E_0$, and there exists a unique solution $(\bar{w}, A)$ to the coupled system \eqref{eq:bar:w:evo}, \eqref{eq:A:evo} with initial datum $(\bar{w}, A)|_{t = 0} = (\bar{w}^{(0)}, A^{(0)})$ such that the total analytic energy $E(t)$ is defined \eqref{def:E} is bounded as
\begin{align*}
\sup_{t \in [0, T_\ast]} E(t) \leq 2 E_0 \,.
\end{align*}
Equivalently, this defines a unique tangentially analytic solution
\begin{align*}
u_\xi = y + \theta_\xi(t,y) A_\xi(t) + \bar{w}_\xi(t,y)
\end{align*}
to the original system \eqref{eq:TD:main}--\eqref{eq:TD:A:p}.
\end{theorem}
The central difficulty towards establishing Theorem~\ref{thm:main} is the apparent loss of two $x$-derivatives in the coupled equations \eqref{eq:TD:main:a} and \eqref{eq:TD:A:p}. Indeed, replacing $-{\partial}_x p$ on the right-hand side of \eqref{eq:TD:main:a} with ${\partial}_x |{\partial}_x|A(x,t)$ according to \eqref{eq:TD:A:p}, and subsequently replacing $A$ with $u(x,y) - y|_{y \rightarrow \infty}$, we see that, in terms of a formal derivative count we have
\begin{align} \label{loss}
{\partial}_t u + u {\partial}_x u + v {\partial}_y u = - {\partial}_x |{\partial}_x| u(x,\infty) \, .
\end{align}
This loss of \textit{two} $x$ derivatives precludes the well-posedness of the system even in analytic spaces.
Our starting point is the observation of skew-adjointedness of the loss term on the right-hand side of \eqref{loss}. Indeed, for any smooth decaying function $g(x)$ one has
\begin{align}
\int_\mathbb{R}g \, {\partial}_x |{\partial}_x| g = 0 = \int_\mathbb{R}|{\partial}_x| g \, {\partial}_x |{\partial}_x| g.
\label{eq:cancellation}
\end{align}
The cancellation~\eqref{eq:cancellation} holds because we have $|{\partial}_x| = - H {\partial}_x$, where $H$ is the Hilbert transform, and both $H$ and ${\partial}_x$ are skew-adjoint operators on $L^2(\mathbb{R})$.
This motivates our main reformulation of the system and the extraction of the unknowns we analyze. First, we notice that according to \eqref{eq:TD:BC:c}, $u$ grows like $y$ as $y \to \infty$, while ${\partial}_x u = - {\partial}_y v$ converges to ${\partial}_x A(t,x)$, a bounded function as $y \to \infty$. Hence $v = - I_y[{\partial}_x u]$ also grows like $y$ at $\infty$. Here and throughout the paper we write
\begin{align}
I_y[f] = \int_0^y f(y') \,\mathrm{d} y' \, .
\label{eq:funky:notation}
\end{align}
We are thus led to introduce the expansion of $v$ at $\infty$:
\begin{align*}
v = y v_1(t,x) + v_0(t,x) + \OO(y^{-1})\quad \text{ as } \quad y \to \infty.
\end{align*}
The coefficient $v_0(t,x)$ will play a crucial role in the analysis, and is given by the nonlocal integral $I_\infty[{\partial}_x u - {\partial}_x A]$. We thus reinterpret \eqref{eq:TD:main} as giving three relations simultaneously, corresponding to the orders of growth as $y \to \infty$. First, collecting the contributions from \eqref{eq:TD:main:a} which are $\OO(y)$ (arising from the terms $u {\partial}_x u$ and $ v {\partial}_y u$), we obtain the asymptotic information that $v_1 = - {\partial}_x A$. Second, we collect the terms which contribute $\OO(1)$ terms at $y = \infty$. This yields a forced Benjamin-Ono equation for the unknown $A(t,x)$:
\begin{align} \label{A:eq:Intro}
{\partial}_t A + A {\partial}_x A + {\partial}_x |{\partial}_x|A = - v_0 \quad \text{ on } \quad \mathbb{R}.
\end{align}
The cancellation alluded to earlier in \eqref{eq:cancellation} is now readily apparent upon computing the inner-product of $A$ against the Benjamin-Ono equation.
Having extracted the $\OO(y)$ and $\OO(1)$ contributions, the third step is to extract the functions in \eqref{eq:TD:main:a} which decay as $y\to \infty$. The relevant unknown, $\bar{w}$, is then a homogenized version of $u$, and it obeys a Prandtl type equation (see~\eqref{eq.hom.w.a} below). This procedure gives rise to the start of our analysis: we analyze simultaneously a Benjamin-Ono equation for $A$, forced by $v_0 = v_0(\bar{w})$, as well as a Prandtl type equation for $\bar{w}$, forced by $A$ related quantities. Summarizing, the simultaneous system of equations we extract are (leaving $F$ an unspecified forcing term for now)
\begin{subequations}
\begin{align} \label{intro:BO}
&{\partial}_t A + A {\partial}_x A + {\partial}_x |{\partial}_x| A = - v_0(\bar{w}) \quad \text{ on } \quad \mathbb{R} \hspace{10 mm} \text{(Benjamin-Ono)}, \\ \label{intro:PR}
&{\partial}_t \bar{w} - {\partial}_y^2 \bar{w} + y {\partial}_x \bar w = F(\bar{w}, A) \hspace{10 mm} \text{ on } \quad \mathbb{H}\hspace{10 mm} \text{(Prandtl-type)} \, .
\end{align}
\end{subequations}
The cancellation \eqref{eq:cancellation} applies for the quantity $A$, which describes $u$ at $y = \infty$, and thus \eqref{eq:cancellation} should be interpreted as solving the derivative-loss problem {\em at $y = \infty$}. We now must continue exploiting this cancellation for values of $y < \infty$. Indeed, the two-derivative loss is still lurking for finite $y$ through the forcing term in \eqref{intro:PR}. Specifically, the reader should consult $\mathcal{B}_\xi(\bar{w}, A)$, defined in \eqref{eq:B:def:*}, and in particular the most singular contributions arise from the ${\partial}_t A_\xi$ term, which in turn create a $i \xi |\xi| A_\xi$ contribution, again yielding a two-derivative loss. Our observation is that such a term is accompanied by a factor of $(1-\theta_\xi)$. By selecting the lift function $\theta_\xi$ in a frequency-dependent manner, we are able to gain back $\langle {\partial}_x \rangle^{3/2}$. The idea of tangential-frequency-dependent boundary layer lifts was also successfully used in~\cite{GVMV18} in the contest of the hydrostatic Navier Stokes equations. For us, the selection of a $\xi$-dependent lift, coupled with Hardy-type inequalities with the homogeneous weights of $y$ enables us to gain back enough regularity {\em near $\{y = 0\}$}.
A further difficulty that arises in our analysis is the loss of one $y$-weight. This occurs due to the non-local integral in \eqref{eq:M:def}, which forces the $\bar{w}$ evolution. In order to handle the loss of a $y$ weight, we control the quantity $y {\partial}_y \bar{w}$ in $L^2$, which is seen in the specification of the $\| \cdot \|_{H_\tau}$ in \eqref{norm:H:tau}. To control this component of the $H_\tau$ norm, we in turn need to commute the vector-field $y \,\mathrm{d} y$ with the Prandtl system, which necessitates an analysis of the vorticity equation that governs the evolution of ${\partial}_y \bar{w}$. To successfully analyze the vorticity equation, we capitalize on two essential features. First, we only require this enhanced vector-field for values of $y \ge 1$, so we do not see the boundary effect of the vorticity. Second, we can control the $y \,\mathrm{d} y$ in a {\em weaker norm} in terms of $x$ regularity, which is the reason that the second terms in \eqref{norm:Z:tau} and \eqref{norm:H:tau} are measured on the Sobolev scale $r - 1/2$. This type of {\em lagging norm} structure is essential for our scheme of estimates to close, and in particular prevents a further loss of $y$-weight in the vorticity equation.
\begin{remark}[Notation] We use heavily the notation $\lesssim$ to suppress universal constants. It is important to emphasize that these universal constants are \textit{independent} of small values of $t, {\varepsilon}, \delta$, where ${\varepsilon}$ is the weight parameter in \eqref{eq:rho:def}, and $\delta$ is the parameter appearing in our norms, \eqref{X:tau:norm:def}--\eqref{norm:H:tau}.
\end{remark}
\section{The Prandtl-Benjamin-Ono splitting}
\subsection{Benjamin-Ono evolution for $A$}
\label{sec:BO:evo}
We need to understand the asymptotic behavior at $y = \infty$ a bit more carefully. First, from \eqref{eq:TD:BC} we obtain
\begin{align*}
u \sim y + A(x,t), \qquad {\partial}_x u \rightarrow {\partial}_x A, \qquad {\partial}_t u \rightarrow {\partial}_t A, \qquad {\partial}_y u \rightarrow1 , \, \qquad \text{ as } y \rightarrow \infty.
\end{align*}
The function $v(x,y,t) = - I_y[{\partial}_x u](x,t) = - \int_0^y {\partial}artial_x u(x,z,t) dz$ is expected to grow like $y$ at $\infty$, so we let
\begin{align}
v \sim v_0(x,t) + v_1(x,t)y \, \qquad \text{ as } y \rightarrow \infty.
\label{eq:v:asymptiotic:1}
\end{align}
We now evaluate the original equation \eqref{eq:TD:main:a} at $y = \infty$ and use the above information to obtain
\begin{align} \label{eq:A:w:y}
{\partial}_t A + (y + A) {\partial}_x A + (v_0 + v_1 y) + {\partial}_x |{\partial}_x| A = 0.
\end{align}
Due to the super-exponential weights in $y$, $\rho(t,y)$, appearing in our norm \eqref{eq:norm}, we guarantee that the remaining terms in \eqref{eq:TD:main:a} vanish sufficiently rapidly as $y \to \infty$ so as to not contribute towards \eqref{eq:A:w:y}. From here, we extract two equations by matching the orders of $y$ for $y \to \infty$:
\begin{subequations}
\begin{align}
&{\partial}_x A + v_1 = 0, \label{eq.v1} \\
&{\partial}_t A(x,t) + A{\partial}_x A + v_0 + {\partial}_x |{\partial}_x|A = 0. \label{eq.v0}
\end{align}
\end{subequations}
We now compute the function $v_0$ in a different fashion:
\begin{align}
v = - I_y[{\partial}_x u] = - I_y \Big[ ({\partial}_x u - {\partial}_x A) + {\partial}_x A \Big] = - y {\partial}_x A -I_y [{\partial}_x u - {\partial}_x A].
\label{eq:v:asymptiotic:2}
\end{align}
Here we use the notation in \eqref{eq:funky:notation} for $I_y[\cdot]$.
From \eqref{eq:v:asymptiotic:1} and \eqref{eq:v:asymptiotic:2} we deduce that $v_1 = - A_x$ and that
\begin{align}
v_0(x,t) = -I_\infty [{\partial}_x u(x,y,t) - {\partial}_x A(x,t)] \, .
\label{eq:v0:def}
\end{align}
Thus, $v_0$ can be expressed in terms of $u$ and $A$. To emphasize this, we will write $v_0 = v_0(u,A)$. Note that we need to understand $u$ (or ${\partial}_x u$) for all $y$ in order to understand $v_0$ (it is nonlocal). Inserting back into \eqref{eq.v0}, we obtain the evolution equation for $A$:
\begin{align} \label{eq.A}
{\partial}_t A + A {\partial}_x A + {\partial}_x |{\partial}_x| A = - v_0(u,A) \text{ for } x \in \mathbb{R}.
\end{align}
\subsection{Prandtl-type evolution for $\bar{w}$}
\label{sec:Prandtl:evo}
The first step towards homogenizing the boundary conditions for $u$ in the equation \eqref{eq:TD:main:a} is to remove the linear profile $y$ and introduce the unknown
\begin{align*}
w = u - y
\end{align*}
so that \eqref{eq:TD:BC:a}--\eqref{eq:TD:BC:c} yield
\begin{align*}
w|_{y = 0} = w|_{x = -\infty} = w|_{x = \infty} = 0, \qquad w|_{y \to \infty} = A(x,t).
\end{align*}
We do not need to change $v$ here, as it is given by $-I_y({\partial}artial_x u) = -I_y( {\partial}artial_x w)$. It follows that the evolution equation for $w$ is
\begin{align*}
{\partial}_t w + w {\partial}_x w + (y {\partial}_x w +v) + v {\partial}_y w - {\partial}_{y}^2 w + {\partial}_x |{\partial}_x|A(x,t) =0.
\end{align*}
Summarizing, the unknowns $w$ and $v$ take the place of the usual Prandtl unknowns, and the equation obeyed by $w$ is nothing but the usual Prandtl system with a few extra linear terms
\begin{subequations}
\label{eq:w:evolution}
\begin{align}
&{\partial}_t w - {\partial}_{yy}w + w{\partial}_x w + v{\partial}_y w + (y {\partial}_x w + v) + {\partial}_x |{\partial}_x| A = 0, \label{start.Prandtl.a} \\
&w|_{y = 0} = w|_{x = - \infty} = w|_{x = +\infty} = 0, \qquad w|_{y = \infty} = A(x,t), \label{start.Prandtl.b} \\
&{\partial}_x w +{\partial}_yv = 0, \quad v|_{y = 0} = 0, \quad \Rightarrow v = - I_y[ {\partial}_x w], \label{start.Prandtl.c}
\end{align}
\end{subequations}
The system \eqref{eq:w:evolution} is of course coupled to the evolution equation for $A$ given in \eqref{eq.A}.
In order to analyze the system \eqref{eq:w:evolution}, it is convenient to homogenize the boundary condition of $w$ as $y\to \infty$. For this purpose we introduce a tangential frequency dependent lift of the normal boundary condition, so that we need to write the system obeyed by the Fourier transform in the $x$ variable of \eqref{start.Prandtl.a}--\eqref{start.Prandtl.c}. This yields
\begin{subequations}
\begin{align*}
&{\partial}_t w_\xi - {\partial}_{yy} w_\xi + (w {\partial}artial_x w + v {\partial}artial_y w)_\xi + i \xi (y w_\xi - I_y[w_\xi]) + i \xi |\xi| A_\xi = 0, \\
&w_\xi|_{y = 0} = 0, \qquad w_\xi|_{y \rightarrow \infty} = A_\xi, \\
&i \xi w_\xi + {\partial}_y v_\xi = 0, \qquad v_\xi|_{y= 0} = 0 \Rightarrow v_\xi = - i \xi I_y[w_\xi].
\end{align*}
\end{subequations}
For each $\xi \in \RR$ we introduce a lift function $\theta_\xi$ (given explicitly in \eqref{eq:theta:def} below) and define new unknowns,
\begin{align*}
\bar{w}_\xi &:= w_\xi(y,t) - A_\xi(t) \theta_\xi(y,t) \\
\bar{v}_\xi &:= v_\xi + i \xi A_\xi I_y[\theta_\xi]
\,.
\end{align*}
We derive from \eqref{eq:w:evolution} the evolution for $\bar w_\xi$, which reads
\begin{subequations}
\label{eq:bar:w:evo}
\begin{align} \label{eq.hom.w.a}
&{\partial}_t \bar{w}_\xi - {\partial}_{yy}\bar{w}_\xi + i \xi y \bar{w}_\xi + \Ncal_\xi(\bar{w}, \bar{w}) + \Lcal_\xi (\bar{w} , A ) + \Mcal_{\xi} (\bar{w} , A ) + \Bcal_\xi (\bar{w}, A) = 0, \\ \label{eq.hom.w.a.b}
&\bar{v}_\xi = - i\xi I_y [\bar{w}_\xi] , \\ \label{eq.hom.w.a.c}
&\bar{w_\xi}|_{y=0} = \bar{w_\xi}|_{y\to \infty} = 0,
\end{align}
\end{subequations}
where in \eqref{eq.hom.w.a} above we have defined
\begin{subequations}
\label{eq:NLMB:def}
\begin{align}
\Ncal_\xi ( \bar{w}, \bar{w} )
&:= i \int_{\RR} \Big(\bar w_\eta (\xi-\eta) \bar w_{\xi-\eta} - \eta I_y[\bar w_\eta] {\partial}artial_y \bar w_{\xi-\eta} \Big) \,\mathrm{d}\eta
\label{eq:N:def}
\\
\Lcal_\xi (\bar{w} , A )
&:= i \int_{\RR}\biggl( \bar w_\eta (\xi-\eta) A_{\xi-\eta} \theta_{\xi-\eta} + A_\eta \theta_\eta (\xi-\eta) \bar w_{\xi-\eta} - \eta I_y[\bar w_\eta] A_{\xi-\eta} {\partial}artial_y \theta_{\xi-\eta} \biggr) \,\mathrm{d}\eta
\label{eq:L:def}
\\
\Mcal_{\xi}(\bar{w}, A)
&:= -i \int_{\RR} \eta A_\eta I_y[\theta_\eta] {\partial}_y \bar w_{\xi-\eta} \,\mathrm{d} \eta
\label{eq:M:def}
\\
\Bcal_\xi (\bar{w},A)
&:= A_\xi ({\partial}artial_t - {\partial}artial_{yy})\theta_\xi + \left(\theta_\xi -1\right) {\partial}artial_t A_\xi + ({\partial}_t A_\xi + i \xi |\xi| A_\xi) + i\xi \Big( A_\xi \left( y \theta_\xi - I_y[\theta_\xi] \right) -I_y[\bar w_\xi] \Big) \ensuremath{\nonumber}otag\\
&\qquad
+ i \int_{\RR} \Big( A_\eta \theta_\eta (\xi-\eta) A_{\xi-\eta} \theta_{\xi-\eta} - \eta A_\eta I_y[\theta_\eta] A_{\xi-\eta} {\partial}artial_y \theta_{\xi-\eta} \Big) \,\mathrm{d} \eta
\,.\label{eq:B:def}
\end{align}
\end{subequations}
At this stage, we make the following choice for the lift function
\begin{align}
\theta_\xi(y,t) = 1 - e^{- \frac{y^2 \brak{\xi}^2}{2(1+t/{\varepsilon})}} \, ,
\label{eq:theta:def}
\end{align}
where ${\varepsilon}>0$ is a parameter to be chosen later. We emphasize here that $\theta_\xi(y,0)$ {\em does not depend on ${\varepsilon}$}, which is crucial for the proof. Informally, ${\varepsilon}$ will be selected small relative to universal constants, and relative to the size of the initial data (which is independent of ${\varepsilon}$). The time of existence $T_\ast$ will be selected small relative to ${\varepsilon}$ and in particular we restrict ourselves to $T_\ast \le {\varepsilon}$, so that the quotient $t/{\varepsilon}$ is always bounded by $1$.
It is also useful to denote
\begin{align} \label{c:theta:xi}
c_{\theta,\xi}(t) := I_\infty[1-\theta_\xi](t) = \int_0^\infty (1-\theta_\xi(y,t)) dy = \int_0^\infty e^{- \frac{y^2 \brak{\xi}^2}{1+t/{\varepsilon}}} dy = \frac{ \sqrt{{\partial}i (1+t/{\varepsilon})}}{2 \brak{\xi}} \,.
\end{align}
With $\theta_\xi$ as defined by \eqref{eq:theta:def}, we identify the function $ v_0(\bar w, A)$ from \eqref{eq:v0:def} as
\begin{align}
(v_0(\bar w, A))_\xi(t) &= - i\xi \int_0^\infty \left( \bar w_\xi(y,t) - A_\xi(t) (1-\theta_\xi(y,t)) \right) \,\mathrm{d} y \ensuremath{\nonumber}otag\\
&= - i\xi I_{\infty}[\bar w_\xi] (x,t) + i\xi c_{\theta,\xi}(t) A_\xi(t)
\,.
\label{eq:v0:new}
\end{align}
With this notation, we return to \eqref{eq.A} which in view of \eqref{eq:v0:new} becomes
\begin{align}
{\partial}_t A_\xi + i\xi c_{\theta,\xi} A_\xi - i\xi I_{\infty}[\bar w_\xi] + i \xi |\xi| A_\xi = - i \int_{\RR} A_\eta (\xi-\eta) A_{\xi-\eta} \,\mathrm{d} \eta \, .
\label{eq:A:evo}
\end{align}
We notice that $A$ enters the evolution equation for $\bar w$ only through the coefficients of $\Bcal, \Lcal$, and $\Mcal$, whereas ${\partial}artial_x \bar w$ enters the evolution equation for $A$ only thought its vertical mean encoded in $v_0(\bar w,A)$.
Lastly, using that $A$ obeys the Benjamin-Ono equation \eqref{eq:A:evo}, and using that $c_{\theta,\xi} = I_\infty[1-\theta_\xi]$, we may rewrite the forcing term $\Bcal_\xi(\bar w,A)$ given in \eqref{eq:B:def} as
\begin{align}
\Bcal_\xi (\bar{w},A)
& = A_\xi ({\partial}artial_t - {\partial}artial_{yy})\theta_\xi + \left(\theta_\xi -1\right) {\partial}artial_t A_\xi + i\xi \left( I_\infty[\bar w_\xi] - I_y[\bar w_\xi] \right) \ensuremath{\nonumber}otag\\
&\qquad + i\xi A_\xi \bigl( y (\theta_\xi-1) - ( I_\infty[1-\theta_\xi] - I_y[1-\theta_\xi] )\bigr) \ensuremath{\nonumber}otag\\
&\qquad + i \int_{\RR} \bigl( (\xi-\eta) A_\eta A_{\xi-\eta} \left( \theta_\eta \theta_{\xi-\eta} -1\right) - \eta A_\eta A_{\xi-\eta} I_y[\theta_\eta] {\partial}artial_y \theta_{\xi-\eta} \bigr)
\label{eq:B:def:*}
\end{align}
Because of our choice of $\theta_\xi$, every single term in $\Bcal_\xi(\bar v,A)$ decays to $0$ as $y\to \infty$.
Throughout the rest of the paper, we use the formulation \eqref{eq:B:def:*} of the $\Bcal_\xi$ term (instead of \eqref{eq:B:def}).
\section{Energy estimates and the proof of the Main Theorem}
\label{sec:energy}
In this section we give the energy estimates which prove Theorem~\ref{thm:main}, under the assumption that the nonlinear terms may be bounded suitably (cf.~Lemma~\ref{lem:main}). These terms are then estimated in Section~\ref{sec:main:lemma}.
\subsection{Energy inequality for $A$}
In view of \eqref{eq:ODE:tilde:XY} we take product of equation \eqref{eq:A:evo} with the complex conjugate $\overline{{A}_\xi}$, and integrate in $\xi$ against $e^{2\tau |\xi|} \langle \xi \rangle^{2r} \langle \xi \rangle$ to obtain
\begin{align}
\brak{{\partial}artial_t A, A}_{\widetilde{\tau,r}}
&= - \int_{\RR} e^{2\tau|\xi|} \brak{\xi}^{2r} \left(i \xi (c_{\theta, \xi} +|\xi|) |A_\xi|^2 - i \xi I_{\infty}[\bar{w}_\xi] \overline{A_\xi} + i \overline{A_\xi} \int_{\mathbb{R}} A_\eta(\xi - \eta) A_{\xi - \eta} \,\mathrm{d} \eta \right) \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&= T_{\mathcal A,1} - T_{\mathcal A,2}
\label{eq:t:A:energy:*}
\end{align}
where we have defined
\begin{subequations}
\label{eq:T:A12}
\begin{align}
T_{\mathcal A,1} &= \brak{I_{\infty}[{\partial}_x \bar w], A}_{\widetilde{\tau,r}} = \int_{\RR} i \xi I_\infty[\bar{w}_\xi] \overline{A_\xi} e^{2 \tau |\xi|} \langle \xi \rangle^{2r} \,\mathrm{d} \xi
\label{eq:T:A12:a}\\
T_{\mathcal A,2} &= \brak{A {\partial}artial_x A, A}_{\widetilde{\tau,r}} = \int_{\RR} \int_{\RR} i (\xi - \eta) A_\eta A_{\xi - \eta} \overline{A_\xi} e^{2 \tau |\xi|} \langle \xi \rangle^{2r} \,\mathrm{d} \eta \,\mathrm{d} \xi
\label{eq:T:A12:b}
\, .
\end{align}
\end{subequations}
In \eqref{eq:dt:A:energy} we have used that $A$ is real-valued, so that $A_{-\xi} = \overline{A_{\xi}}$, and that $c_{\theta, \xi} = c_{\theta,-\xi} \in \mathbb{R}$ (cf.~\eqref{c:theta:xi}). Combining \eqref{eq:ODE:tilde:XY} with \eqref{eq:t:A:energy:*} we arrive at
\begin{align}
\frac{d}{2 dt} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}^2 + (-\dot{\tau}) \ensuremath{\nonumber}orm{|{\partial}artial_x|^{1/2} A}_{\widetilde{\tau,r}}^2 & \leq \abs{T_{\mathcal A,1}} +\abs{T_{\mathcal A,2}}
\label{eq:dt:A:energy}
\end{align}
which is the desired energy inequality for the analytic norm of $A$. The terms on the right side of \eqref{eq:dt:A:energy} are estimated in Lemma~\ref{lem:main}, bounds \eqref{eq:Shaq:A}.
\subsection{Energy inequality for $\bar{w}$}
In view of \eqref{eq:ODE:XY} we need to compute $\brak{{\partial}artial_t \bar w + \bar w ({\partial}artial_t \log) \rho,\bar w}_{\tau,r}$. Note that by the definition \eqref{eq:rho:def} we have
\begin{align}
{\partial}artial_t (\log \rho) = -\frac{y^2}{8 {\varepsilon} (1+t/{\varepsilon})^2}
\label{eq:LeBron:0}
\end{align}
and thus we obtain the damping weight-gaining term
\begin{align}
\brak{\bar w {\partial}artial_t (\log \rho),\bar w}_{\tau,r} = -\frac{1}{8 {\varepsilon} (1+t/{\varepsilon})^2} \ensuremath{\nonumber}orm{y \bar w}_{\tau,r}^2
\, .
\label{eq:LeBron:1}
\end{align}
In order to compute $\brak{{\partial}artial_t \bar w,\bar w}_{\tau,r}$, we multiply \eqref{eq.hom.w.a} with $\rho^2 \overline{\bar w_\xi} e^{2\tau|\xi|} \brak{\xi}^{2r}$ and integrate over $(\xi,y) \in\RR\times [0,\infty)$ to obtain
\begin{align}
\brak{{\partial}artial_t \bar w,\bar w}_{\tau,r}
&= - \ensuremath{\nonumber}orm{{\partial}artial_y \bar w}_{\tau,r}^2 - \frac{1}{2(1+t/{\varepsilon})} \int_{\RR} \int_0^\infty {\partial}artial_y \bar w_\xi y \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} \eta \,\mathrm{d} \xi \ensuremath{\nonumber}otag\\
&\qquad - i \int_{\RR} \int_0^\infty \xi \abs{\bar w_\xi}^2 \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} \eta \,\mathrm{d} \xi - T_{\Ncal} - T_{\Lcal} - T_{\Mcal} - T_{\Bcal} \ensuremath{\nonumber}otag\\
&\leq -\frac 12 \ensuremath{\nonumber}orm{{\partial}artial_y \bar w}_{\tau,r}^2 +\frac{1}{8(1+t/{\varepsilon})^2} \ensuremath{\nonumber}orm{y \bar w}_{\tau,r}^2 + \abs{T_{\Ncal}} + \abs{T_{\Lcal}} + \abs{T_{\Mcal}} + \abs{T_{\Bcal}} \,
\label{eq:LeBron:2}
\end{align}
where we have used that by oddness in $\xi$ we have
\begin{align*}
i \int_{\RR} \int_0^\infty \xi \abs{\bar w_\xi}^2 \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} \eta \,\mathrm{d} \xi
= 0
\end{align*}
and we have denoted
\begin{subequations}
\label{eq:T:cal:1}
\begin{align}
T_{\Ncal} &= \int_{\RR} \int_0^\infty \Ncal_\xi (\bar w, \bar w) \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\\
T_{\Lcal} &= \int_{\RR} \int_0^\infty \Lcal_\xi(\bar w,A) \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\\
T_{\Mcal} &= \int_{\RR} \int_0^\infty \Mcal_\xi(\bar w,A) \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\\
T_{\Bcal} &= \int_{\RR} \int_0^\infty \Bcal_\xi(\bar w,A) \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\end{align}
\end{subequations}
with $\Ncal_\xi, \Lcal_\xi, \Mcal_\xi, \Bcal_\xi$, as defined in \eqref{eq:NLMB:def}.
Combining \eqref{eq:ODE:XY} with \eqref{eq:LeBron:1}--\eqref{eq:LeBron:2} we arrive at
\begin{align}
\frac{d}{2 dt} \ensuremath{\nonumber}orm{\bar w}_{\tau,r}^2 + (-\dot{\tau}) \ensuremath{\nonumber}orm{|{\partial}artial_x|^{1/2} \bar w}_{\tau,r}^2 + \frac 12 \ensuremath{\nonumber}orm{{\partial}artial_y \bar w}_{\tau,r}^2 + \frac{1-{\varepsilon}}{8 {\varepsilon} (1+t/{\varepsilon})^2} \ensuremath{\nonumber}orm{y \bar w}_{\tau,r}^2
\leq \abs{T_{\Ncal}} + \abs{T_{\Lcal}} + \abs{T_{\Mcal}} + \abs{T_{\Bcal}}
\label{eq:LeBron:3}
\end{align}
which is the desired energy inequality for the analytic norm of $\bar w$. The four error terms on the right side of \eqref{eq:LeBron:3} are estimated in Lemma~\ref{lem:main}, bounds~\eqref{eq:Shaq:w}.
\subsection{Energy inequality for ${\partial}_y \bar{w}$}
In order to overcome a loss of $y$ weight in the term $T_{\Mcal}$, we need to also consider the evolution of the normalized vorticity ${\partial}_y \bar w$. We apply ${\partial}_y$ to \eqref{eq.hom.w.a} to obtain
\begin{align} \label{eq:diff:y}
{\partial}_t {\partial}_y \bar{w}_\xi - {\partial}_y^2 {\partial}_y \bar{w}_\xi + i \xi \bar{w}_\xi + i y \xi {\partial}_y \bar{w}_\xi
= - \left({\partial}_y \mathcal{N}_\xi + {\partial}_y \mathcal{L}_\xi + {\partial}_y \mathcal{M}_{\xi} + {\partial}_y \mathcal{B}_\xi \right).
\end{align}
Note that some of the terms on the right side of \eqref{eq:diff:y} have cancelations in them.
Using that $I_y[{\partial}artial_y \bar{w}_\xi] = \bar{w}_\xi = {\partial}artial_y I_y[\bar w_\xi]$, and upon noting symmetries $\eta \leftrightarrow \xi-\eta$ in the below integrals,
we may rewrite
\begin{subequations}
\label{eq:dy:nonlinear:terms}
\begin{align}
{\partial}artial_y \Ncal_\xi(\bar w,\bar w)
&= \Ncal_\xi(\bar w, {\partial}_y \bar w) = i \int_{\RR} \bigr(\bar w_\eta (\xi-\eta) {\partial}_y \bar w_{\xi-\eta} - \eta I_y[\bar w_\eta] {\partial}artial_{yy} \bar w_{\xi-\eta} \bigl) \,\mathrm{d}\eta
\label{eq:dy:nonlinear:terms:a}
\\
{\partial}artial_y \Lcal_\xi(\bar w,\bar w)
&= i \int_{\RR}\bigl({\partial}_y \bar w_\eta (\xi-\eta) A_{\xi-\eta} \theta_{\xi-\eta} + \bar w_{\eta} (\xi-\eta)A_{\xi-\eta}{\partial}_y \theta_{\xi-\eta} \ensuremath{\nonumber}otag\\
&\qquad \qquad \qquad + A_\eta \theta_\eta (\xi-\eta) {\partial}_y \bar w_{\xi-\eta} - \eta I_y[\bar w_\eta] A_{\xi-\eta} {\partial}artial_{yy} \theta_{\xi-\eta} \bigr) \,\mathrm{d}\eta
\label{eq:dy:nonlinear:terms:b}
\\
{\partial}artial_y \Mcal_\xi(\bar w,A)
&= -i \int_{\RR} \bigl( \eta A_\eta \theta_\eta {\partial}_y \bar w_{\xi-\eta} + \eta A_\eta I_y[\theta_\eta] {\partial}_{yy} \bar w_{\xi-\eta} \bigr) \,\mathrm{d} \eta
\label{eq:dy:nonlinear:terms:c}
\\
{\partial}artial_y \Bcal_\xi(\bar w,A)
&= A_\xi ({\partial}artial_t - {\partial}artial_{yy}) {\partial}artial_y \theta_\xi + {\partial}artial_y \theta_\xi {\partial}artial_t A_\xi- i\xi \bar w_\xi + i \xi A_\xi y {\partial}artial_y \theta_\xi
\ensuremath{\nonumber}otag\\
&\qquad \qquad \qquad + i \int_{\RR} \bigl( (\xi-\eta) A_\eta A_{\xi-\eta} \theta_\eta {\partial}_y \theta_{\xi-\eta} - \eta A_\eta A_{\xi-\eta} I_y[\theta_\eta] {\partial}artial_{yy} \theta_{\xi-\eta} \bigr)
\label{eq:dy:nonlinear:terms:d}
\, .
\end{align}
\end{subequations}
It turns out that we only need information on the vorticity ${\partial}artial_y \bar w$ away from ${\partial}artial \mathbb{H}= \{y= 0\}$, and for this purpose we introduce a cut-off function, $\chi = \chi(y)$, such that $0 \leq \chi' \leq 1$, satisfying
\begin{align}
\chi(y) =
\begin{cases}
0, \quad \text{ on } y \in [0, 1) \\
1, \quad \text{ on } y \ge 6 \, .
\end{cases}
\label{eq:chi:def}
\end{align}
Note that $\chi$ is independent of time.
Our goal is to estimate $\ensuremath{\nonumber}orm{\chi \, {\partial}artial_y \bar w}_{\tau,r-1/2}$. The shift in Sobolev regularity of for the vorticity norm, i.e. the change $r\mapsto r-1/2$, is essential for the energy estimate to close.
Using \eqref{eq:ODE:XY} with $f = \chi \, {\partial}artial_y \bar w$, property \eqref{eq:LeBron:0} of the weight $\rho$, and the evolution equation \eqref{eq:diff:y} we obtain
\begin{align}
&
\frac{d}{2dt} \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}^2 + (-\dot \tau) \ensuremath{\nonumber}orm{\chi |{\partial}artial_x|^{1/2} {\partial}_y \bar w}_{\tau,r-1/2}^2
\ensuremath{\nonumber}otag\\
&= \brak{\chi \, {\partial}_t {\partial}_y \bar w, \chi \, {\partial}_y \bar w}_{\tau,r-1/2} + \brak{\chi \, {\partial}_y \bar w {\partial}_t (\log \rho), \chi \, {\partial}_y \bar w}_{\tau,r-1/2}
\ensuremath{\nonumber}otag\\
&= -\frac{1}{8{\varepsilon} (1+t/{\varepsilon})^2} \ensuremath{\nonumber}orm{y \chi \, {\partial}_y \bar w}_{\tau,r-1/2}^2
- \ensuremath{\nonumber}orm{\chi {\partial}_{yy} \bar w}_{\tau,r-1/2}^2 - \int_{\RR} \int_0^\infty {\partial}_{yy} \bar w_\xi {\partial}_y(\chi^2 \rho^2) \overline{{\partial}_y \bar w_\xi} \brak{\xi}^{2r-1} e^{2\tau|\xi|} \,\mathrm{d} y \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\qquad - i \int_{\RR} \int_0^\infty \xi \bar w_\xi \chi^2 \rho^2 \overline{{\partial}_y \bar w_\xi} \brak{\xi}^{2r-1} e^{2\tau|\xi|} \,\mathrm{d} y \,\mathrm{d} \xi - T_{{\partial}_y \Ncal} - T_{{\partial}_y \Lcal} - T_{{\partial}_y \Mcal} - T_{{\partial}_y \Bcal} \,.
\label{eq:Kobe:0}
\end{align}
Here we have used the cancellation property
\begin{align*}
i \int_{\RR} \int_0^\infty y \xi \abs{{\partial}_y \bar w_\xi}^2 \chi^2 \rho^2 \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} y \,\mathrm{d} \xi = 0
\end{align*}
and have denoted
\begin{subequations}
\label{eq:Kobe:1}
\begin{align}
T_{{\partial}_y \Ncal} &= \int_{\RR} \int_0^\infty {\partial}_y \Ncal_\xi (\bar w, \bar w) \overline{{\partial}_y \bar w_\xi} \chi^2 \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r-1} \,\mathrm{d} y \,\mathrm{d} \xi
\label{eq:Kobe:1:a}
\\
T_{{\partial}_y \Lcal} &= \int_{\RR} \int_0^\infty {\partial}_y \Lcal_\xi(\bar w,A) \overline{{\partial}_y \bar w_\xi} \chi^2 \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r-1} \,\mathrm{d} y \,\mathrm{d} \xi
\label{eq:Kobe:1:b}
\\
T_{{\partial}_y \Mcal} &= \int_{\RR} \int_0^\infty {\partial}_y \Mcal_\xi(\bar w,A) \overline{{\partial}_y \bar w_\xi} \chi^2 \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r-1} \,\mathrm{d} y \,\mathrm{d} \xi
\label{eq:Kobe:1:c}
\\
T_{{\partial}_y \Bcal} &= \int_{\RR} \int_0^\infty {\partial}_y \Bcal_\xi(\bar w,A) \overline{{\partial}_y \bar w_\xi} \chi^2 \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r-1} \,\mathrm{d} y \,\mathrm{d} \xi
\label{eq:Kobe:1:d}
\end{align}
\end{subequations}
with ${\partial}_y \Ncal_\xi, {\partial}_y \Lcal_\xi, {\partial}_y \Mcal_\xi, {\partial}_y \Bcal_\xi$, as defined in \eqref{eq:dy:nonlinear:terms}.
From \eqref{eq:Kobe:0}, the Cauchy-Schwarz inequality and the definitions of $\chi$ and $\rho$ we obtain
\begin{align}
&
\frac{d}{2dt} \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}^2 + (-\dot \tau) \ensuremath{\nonumber}orm{\chi |{\partial}artial_x|^{1/2} {\partial}_y \bar w}_{\tau,r-1/2}^2 + \frac{1-{\varepsilon}}{8{\varepsilon} (1+t/{\varepsilon})^2} \ensuremath{\nonumber}orm{y \chi \, {\partial}_y \bar w}_{\tau,r-1/2}^2 + \frac 14\ensuremath{\nonumber}orm{\chi {\partial}_{yy} \bar w}_{\tau,r-1/2}^2
\ensuremath{\nonumber}otag\\
&\qquad \leq \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r}^2 + \ensuremath{\nonumber}orm{ \bar w}_{\tau,r} \ensuremath{\nonumber}orm{ {\partial}_y \bar w}_{\tau,r}
+ \abs{T_{{\partial}_y \Ncal}} +\abs{T_{{\partial}_y \Lcal}} +\abs{T_{{\partial}_y \Mcal}} +\abs{T_{{\partial}_y \Bcal}} \,.
\label{eq:Kobe:2}
\end{align}
Here we have used that $\brak{\xi}\geq 1$. The remaining four error terms on the right side of \eqref{eq:Kobe:2} are bounded in Lemma~\ref{lem:main}, estimate~\eqref{eq:Shaq:dy:w}.
\subsection{Nonlinear estimates}
The following lemma summarizes the available estimates for the error terms in \eqref{eq:dt:A:energy}, \eqref{eq:LeBron:3}, and \eqref{eq:Kobe:2}.
\begin{lemma}[\bf Main Nonlinear Lemma] \label{lem:main}
Assume that $r>2$ and that $t \le {\varepsilon}$. For the error terms in the $A$ energy estimate~\eqref{eq:dt:A:energy} we have
\begin{subequations}
\label{eq:Shaq:A}
\begin{align}
\abs{T_{\mathcal A,1}} &\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}
\\
\abs{T_{\mathcal A,2}} &\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}^2 \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} + \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}^2
\end{align}
\end{subequations}
for the error terms in the $\bar w$ energy estimate~\eqref{eq:LeBron:3} it holds that
\begin{subequations}
\label{eq:Shaq:w}
\begin{align}
\abs{T_{\mathcal N}} &\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r}
\label{eq:Barkley:*}
\\
\abs{T_{\mathcal L}} &\lesssim
\ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} +
\left(\ensuremath{\nonumber}orm{\bar w}_{\tau,r}^2 + \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r}^2\right) \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}
\label{eq:Jordan:*}
\\
\abs{T_{\mathcal M}} &\lesssim
\ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}} \left( \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r} + \ensuremath{\nonumber}orm{y \chi {\partial}_y \bar w}_{\tau,r-1/2} \right)
\label{eq:Jordan:**}
\\
\abs{T_{\mathcal B}} &\lesssim
\frac{1}{{\varepsilon}} \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} +
\ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} A}_{\tau,r} \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r} + \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} \bar w}_{\tau,r}^2
\ensuremath{\nonumber}otag\\
&\quad +
\ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\bar w}_{\tau,r} +
\ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}
\label{eq:Magic:*}
\end{align}
\end{subequations}
while for the error terms in the energy estimate~\eqref{eq:Kobe:2} for ${\partial}_y \bar w$ the estimates
\begin{subequations}
\label{eq:Shaq:dy:w}
\begin{align}
\abs{T_{{\partial}_y \mathcal N}} &\lesssim
\ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \chi {\partial}_y \bar w}_{\tau,r-1/2}^2 \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r} + \ensuremath{\nonumber}orm{\chi {\partial}_{yy} \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\chi {\partial}_{y} \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r}
\label{eq:Barkley:@}
\\
\abs{T_{{\partial}_y \mathcal L}} &\lesssim
\ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}^2 \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}
+ \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}
\ensuremath{\nonumber}otag\\
&\quad + \ensuremath{\nonumber}orm{\chi |{\partial}_x|^{1/2} {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\chi \brak{{\partial}_x}^{1/2} {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}
\label{eq:Jordan:@}
\\
\abs{T_{{\partial}_y \mathcal M}} &\lesssim \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}^2 \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}} + \ensuremath{\nonumber}orm{y \chi {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\chi {\partial}_{yy} \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}}
\label{eq:Jordan:@:*}
\\ \ensuremath{\nonumber}
\abs{T_{{\partial}_y \mathcal B}} &\lesssim
\frac{1}{{\varepsilon}} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\chi {\partial}_y\bar w}_{\tau,r-1/2}
+ \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\chi {\partial}_y\bar w}_{\tau,r-1/2} + \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}
\\ &+ \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}^2 \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2} + \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}
\label{eq:Magic:@}
\end{align}
\end{subequations}
hold. The implicit constants in the above estimates are independent of $\tau$, $t$, and ${\varepsilon}$ (they depend solely on $r$).
\end{lemma}
The proof of Lemma~\ref{lem:main} is given in Section~\ref{sec:main:lemma} below. Assuming that this lemma is established, we continue with the proof of the main theorem. Before doing so, we summarize the bounds proven in Lemma~\ref{lem:main} using the total norms defined in \eqref{eq:cluster:fuck} above. Estimate \eqref{eq:Shaq:A} shows that
\begin{align}
\left( \abs{T_{\mathcal A,1}} + \abs{T_{\mathcal A,2}} \right) \lesssim \left( 1 + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}\right) \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}^2 + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^3.
\label{eq:A:error:total}
\end{align}
The bounds \eqref{eq:Shaq:w} and an ${\varepsilon}$-Young inequality for the second term in \eqref{eq:Magic:*} yields
\begin{align}
&\abs{T_{\Ncal}}+\abs{T_{\Lcal}}+\abs{T_{\Mcal}}+\abs{T_{\Bcal}}\ensuremath{\nonumber}otag\\
&\quad \lesssim \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}^2
\left( {\varepsilon}^{-1} + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau} + \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau}\right)
\ensuremath{\nonumber}otag\\
&\qquad + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^2 \left({\varepsilon}^{-1} + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau} + \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \right)
+ {\varepsilon} \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}^2
\label{eq:w:error:total}
\end{align}
while the inequality \eqref{eq:Shaq:dy:w} implies
\begin{align}
&\delta^{-2}\left(\abs{T_{{\partial}_y \Ncal}}+\abs{T_{{\partial}_y \Lcal}}+\abs{T_{{\partial}_y \Mcal}}+\abs{T_{{\partial}_y \Bcal}}\right)\ensuremath{\nonumber}otag\\
&\quad \lesssim \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}^2 \left( \delta^{-1}+ \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} + (1+ \delta^{-1} ) \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}\right) + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}
\ensuremath{\nonumber}otag\\
&\qquad + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^2 \left( {\varepsilon}^{-1} \delta^{-1} + \delta^{-1} + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}+ (1+ \delta^{-1} ) \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau} \right)
\,.
\label{eq:dw:error:total}
\end{align}
The implicit constants in \eqref{eq:A:error:total}, \eqref{eq:w:error:total}, and \eqref{eq:dw:error:total} only depend on $r$, since we have assumed $t , {\varepsilon} \leq 1$.
\subsection{Proof of the Main Theorem}
In order to prove Theorem~\ref{thm:main} we couple together the energy estimates \eqref{eq:dt:A:energy}, \eqref{eq:LeBron:3}, and \eqref{eq:Kobe:2} multiplied by the small factor $\delta^{-2}$, together with the error estimates \eqref{eq:A:error:total}, \eqref{eq:w:error:total}, and \eqref{eq:dw:error:total}, to obtain, for universal constants $C_0, \widetilde{C}_0$,
\begin{align}
&\frac{d}{2dt} \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^2 + (-\dot \tau) \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}^2 + \frac 18 \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}^2 + \frac{1-{\varepsilon}}{8 {\varepsilon} (1+t/{\varepsilon})^2} \ensuremath{\nonumber}orm{\bar w}_{H_\tau}^2
\ensuremath{\nonumber}otag\\
&\le C_0 \Big( \delta^{-2} \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}^2 + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}
\ensuremath{\nonumber}otag\\
&\quad + \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}^2 \left( ({\varepsilon}^{-1} + \delta^{-1}) + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} +(1+ \delta^{-1}) \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}+ \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \right)
\ensuremath{\nonumber}otag\\ \ensuremath{\nonumber}
&\quad + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^2 \Big( ( {\varepsilon}^{-1} + {\varepsilon}^{-1} \delta^{-1} + \delta^{-1})+ \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}+ (1+ \delta^{-1}) \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau} + \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \Big) \\ \ensuremath{\nonumber}
& \le \widetilde{C}_0 \delta^{-2} \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}^2 + \frac{1}{100} \| \bar{w} \|_{Z_\tau}^2 + \widetilde{C}_0 \| \bar{w} \|_{H_\tau}^2 \| (\bar{w}, A) \|_{Y_\tau}^2
\ensuremath{\nonumber}otag\\
&\quad +\widetilde{C}_0 \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}^2\left( ( {\varepsilon}^{-1} + \delta^{-1}) + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} + (1+ \delta^{-1}) \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}+ \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \right)
\ensuremath{\nonumber}otag\\ \label{diesel:1}
&\quad + \widetilde{C}_0\ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^2 \left( ({\varepsilon}^{-1} + {\varepsilon}^{-1}\delta^{-1} + \delta^{-1})+ \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}+ (1+ \delta^{-1}) \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau} + \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \right).
\end{align}
\ensuremath{\nonumber}oindent To go from the second inequality to the final inequality, we have simply used Young's inequality for products to split the trilinear term, and denoted by $\widetilde{C}_0$ the resulting (universal) constant. The constant $\widetilde{C}_0$ is independent of the parameters $\delta, {\varepsilon}$. We now take $\delta \gg 1$ so as to ensure that
\begin{align} \label{choice:delta}
\frac{1}{100} + \frac{\widetilde{C}_0}{\delta^{2}} \leq \frac{1}{16},
\end{align}
\ensuremath{\nonumber}oindent upon which the first two $\| \bar{w} \|_{Z_\tau}^2$ terms in \eqref{diesel:1} can be absorbed to the left-hand side. This yields the bound
\begin{align} \ensuremath{\nonumber}
&\frac{d}{2dt} \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^2 + (-\dot \tau) \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}^2 + \frac{1}{16} \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}^2 + \frac{1-{\varepsilon}}{8 {\varepsilon} (1+t/{\varepsilon})^2} \ensuremath{\nonumber}orm{\bar w}_{H_\tau}^2 \\
&\quad \leq \widetilde{C}_1 \| (\bar{w}, A) \|_{Y_\tau}^2 \Big( \| \bar{w} \|_{H_\tau}^2 + \left( {\varepsilon}^{-1} + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}+ \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \right) \Big)
\ensuremath{\nonumber}otag\\ \ensuremath{\nonumber}
&\qquad \qquad + \widetilde{C}_1 \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^2 \left( {\varepsilon}^{-1} + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}+ \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau} + \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \right),
\end{align}
for another universal constant $\widetilde{C}_1$, which is again independent of ${\varepsilon}$, and large values of $\delta$.
By multiplying through by a sufficiently large universal constant, and taking ${\varepsilon} \leq 1/64$, we obtain
\begin{align*}
&\frac{d}{dt} \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^2 + (-\dot \tau) \ensuremath{\nonumber}orm{(\bar w,A)}_{Y_\tau}^2 + \frac{1}{16} \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}^2 + \frac{1}{64 {\varepsilon} } \ensuremath{\nonumber}orm{\bar w}_{H_\tau}^2 \\
&\quad \leq \Gamma_1(t) \| (\bar{w}, A) \|_{Y_\tau}^2 + \Gamma_2(t) \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}^2,
\end{align*}
where we have defined
\begin{subequations}
\begin{align} \label{def:gamma:1}
&\Gamma_1(t) := C_1 \Big( \| \bar{w} \|_{H_\tau}^2 + \left( {\varepsilon}^{-1} + \frac 14 \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}+ \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \right) \Big) \\ \label{def:gamma:2}
&\Gamma_2(t) := C_2 \Big( {\varepsilon}^{-1} + \frac{1}{4} \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}+ \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau} + \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \Big).
\end{align}
\end{subequations}
for some universal constants $C_1, C_2 \geq 1$.
We now make the selection of
\[
\dot{\tau} = - \Gamma_1 -1
\]
from which the following integral identity and inequality follow:
\begin{align} \label{Int:id}
&\tau(t) = \tau_0 - t - \int_0^t \Gamma_1(s) \,\mathrm{d} s, \qquad E(t) \le E(0) + \abs{ \int_0^t \Gamma_2(s) \| (\bar{w}, A) \|_{X_\tau(s)}^2 \,\mathrm{d} s },
\end{align}
where the total analytic energy, $E(t)$, has been defined in \eqref{def:E}. The main result will now follow from:
\begin{lemma}
\label{lem:eps:choice}
Fix the parameter $\delta$ according to \eqref{choice:delta}. There exist universal constants $C_1, C_2$ so that if the parameters ${\varepsilon}$ and the time of existence $T_\ast$ satisfy simultaneously the inequalities
\begin{subequations}
\label{eq:eps:choice}
\begin{align} \label{choice:eps:a}
&\frac{3}{2} C_1 {\varepsilon} E_0 \leq \frac{\tau_0}{8}, \\ \label{choice:T:b}
&T_\ast^{\frac 1 4} C_2 (1 + \delta) (T_\ast^{\frac 12} {\varepsilon}^{-1} + 1) \le 1, \\ \label{choice:T:c}
&T_\ast^{\frac 1 2} C_1 (1 + \delta) (T_\ast^{\frac 1 2} + 1) \left(\frac{3}{2}E_0\right)^{\frac{1}{2}} \leq \frac{\tau_0}{8}, \\ \label{Tast.tau}
&T_\ast \leq \frac{\tau_0}{4} , \\
&T_\ast^{\frac 14} (1 + E_0^{\frac 14}) \leq \frac{1}{16} \, .
\end{align}
\end{subequations}
Then $|E(t)| \le \frac{3}{2}E(0)$ for all $t \in [0, T_\ast]$ and $\tau(t) \ge \frac{\tau_0}{2}$ for all $t \in [0, T_\ast]$. Moreover, it is possible to select the parameter ${\varepsilon}$ and the time of existence $T_\ast$ so as to achieve the inequalities \eqref{eq:eps:choice}.
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{lem:eps:choice}] We first establish, using the definition of $\Gamma_2$ in \eqref{def:gamma:2}, the following estimate
\begin{align} \ensuremath{\nonumber}
\abs{\int_0^{T_\ast} \Gamma_2(s) \,\mathrm{d} s} \le & C_2 \int_0^{T_\ast} ( {\varepsilon}^{-1} + \ensuremath{\nonumber}orm{\bar w}_{Z_\tau}+ \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau} + \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} ) \,\mathrm{d} s \\ \ensuremath{\nonumber}
\le & C_2 \left(T_\ast {\varepsilon}^{- 1} + T_\ast^{\frac 1 2} \Big\| \| \bar{w} \|_{Z_\tau} \Big\|_{L^2(0, T_\ast)} + T_\ast \sup_{t \in [0, T_\ast]} \| (\bar{w}, A) \|_{X_\tau} + \delta T_\ast^{\frac 1 2} \Big\| \| \bar{w} \|_{H_\tau} \Big\|_{L^2(0, T_\ast)} \right) \\ \label{seq:1}
\le & C_2 (1 + \delta) (T_\ast {\varepsilon}^{-1} + T_\ast^{\frac 1 2})(1 + E(T_\ast)^{\frac 1 2} ).
\end{align}
Next, we establish using \eqref{def:gamma:1},
\begin{align}
\abs{\int_0^{T_\ast} \Gamma_1(s) \,\mathrm{d} s } \le & C_1 \abs{\int_0^{T_\ast} \Big( \| \bar{w} \|_{H_\tau}^2 + \left( {\varepsilon}^{-1} + \frac 14 \ensuremath{\nonumber}orm{\bar w}_{Z_\tau} + \ensuremath{\nonumber}orm{(\bar w,A)}_{X_\tau}+ \delta \ensuremath{\nonumber}orm{\bar w}_{H_\tau} \right) \Big) \,\mathrm{d} s }
\ensuremath{\nonumber}otag \\
\le & C_1 {\varepsilon} \Big\| \frac{1}{64 {\varepsilon}} \| \bar{w} \|_{H_\tau} \Big\|_{L^2(0,T_\ast)}^2 + C_1 \Big(T_\ast {\varepsilon}^{-1} + T_\ast^{\frac 1 2} \Big\| \| \bar{w} \|_{Z_\tau} \Big\|_{L^2(0, T_\ast)} \ensuremath{\nonumber}otag \\
&\qquad \qquad \qquad \qquad \qquad \qquad + T_\ast \Big\| \| (\bar{w}, A) \|_{X_\tau} \Big\|_{L^\infty(0, T_\ast)} + \delta T_\ast^{\frac 1 2} \Big\| \| \bar{w} \|_{H_\tau} \Big\|_{L^2(0, T_\ast)} \Big) \ensuremath{\nonumber}otag \\
\le & C_1 {\varepsilon} E(T_\ast) + C_1 (1 + \delta) (T_\ast {\varepsilon}^{-1} + T_\ast^{\frac 1 2}) E(T_\ast)^{\frac{1}{2}}.
\label{seq:2}
\end{align}
We now select ${\varepsilon}$ via
\begin{align} \label{eps:choice}
\frac{3}{2} C_1 {\varepsilon} E_0 < \frac{\tau_0}{8}.
\end{align}
Once ${\varepsilon}$ has been selected ({\em depending only on the initial datum}) through \eqref{eps:choice}, we pick $T_\ast$ depending on $\delta, {\varepsilon}$ in order to satisfy simultaneously the two inequalities
\begin{align} \label{T:choice}
C_2 (1 + \delta)(T_\ast^{\frac 34} {\varepsilon}^{-1} + T_\ast^{\frac 1 4}) \le 1, \qquad C_1 (1 + \delta) (T_\ast {\varepsilon}^{-1} + T_\ast^{\frac 1 2}) \left(\frac{3}{2}E_0\right)^{\frac{1}{2}} < \frac{\tau_0}{8}.
\end{align}
Such a choice is possible because every parameter other than $T_\ast$ in \eqref{T:choice} has been fixed already, so we can take $T_\ast$ small enough so as to achieve \eqref{T:choice}.
Inserting the first inequality in \eqref{T:choice} into the second integral inequality in \eqref{Int:id}, we obtain the nonlinear inequality
\begin{align*}
E(T_\ast) &\le E_0 + \sup_{t \in [0, T_\ast]} \| (\bar{w}, A) \|_{X_{\tau(s)}} \|^2 \int_0^{T_\ast} \Gamma_2(s) \,\mathrm{d} s| \\
&\le E_0 + C_2 (1 + \delta) (T_\ast {\varepsilon}^{-1} + T_\ast^{\frac 1 2})(1 + E(T_\ast)^{\frac 1 2} ) E(T_\ast) \\
&\le E_0 + T_\ast^{\frac 1 4} E(T_\ast) + T_\ast^{\frac 1 4} E(T_\ast)^{\frac 3 2},
\end{align*}
which implies the desired bound, $E(T_\ast) \le \frac 3 2 E_0$ by selecting $T_\ast$ small enough to obey
\[
T_\ast^{\frac 14} (1 + E_0^{\frac 14}) \leq \frac{1}{16} \, .
\]
We subsequently insert the inequality $E(T_\ast) \le \frac 3 2 E_0$ together with the two inequalities \eqref{eps:choice} and the second inequality of \eqref{T:choice} into \eqref{seq:2} so as to ensure for all $t \in [0, T_\ast]$
\begin{align} \label{ineq:tau}
|\tau(t)| \ge \tau_0 - |t| - \abs{\int_0^{t} \Gamma_1(s) \,\mathrm{d} s} \ge \tau_0 -T_\ast - \frac{\tau_0}{8} - \frac{\tau_0}{8} \ge \frac{\tau_0}{2},
\end{align}
where we have appealed to the last inequality on $T_\ast$, \eqref{Tast.tau}, to establish the final inequality in \eqref{ineq:tau}. In summary, one first chooses $\delta$, then ${\varepsilon}$, and $T_\ast$ is picked last.
\end{proof}
\section{Proof of the Main Nonlinear Lemma}
\label{sec:main:lemma}
Before turning to the proof, we recall a few technical results which are used in the proof of Lemma~\ref{lem:main}.
\subsection{Properties the weight and the lift function}
Let us record the following straightforward inequality
\begin{align} \label{Iy:theta:est}
\abs{I_y[\theta_\xi]} = \int_0^y \theta_\xi(y', t) \,\mathrm{d} y' \le \int_0^y 1 \,\mathrm{d} y' \le y,
\end{align}
and emphasize that \eqref{Iy:theta:est} is \textit{independent} of the parameter ${\varepsilon}$. Recalling \eqref{eq:rho:def}, we note that $\theta_\xi$ obeys
\begin{align}
(1-\theta_\xi) \rho = e^{- \frac{y^2 \langle \xi \rangle^2}{2(1 + \frac{t}{{\varepsilon}} )}} e^{\frac{y^2}{8(1 + \frac t {\varepsilon})}} = e^{-\frac{y^2(4\langle \xi \rangle^2 - 1)}{8(1 + \frac{t}{{\varepsilon}})}} \leq e^{-\frac{3 y^2 \langle \xi \rangle^2}{8(1 + \frac{t}{{\varepsilon}})}}
\label{eq:weight:choice}
\end{align}
from which we may deduce the the inequality
\begin{align}
\label{theta:rho:bound}
&\| (1-\theta_\xi) \rho \|_{L^2_y} \lesssimssim \frac{(1 + t/{\varepsilon} )^{1/4}}{\langle \xi \rangle^{1/2}} \lesssimssim \frac{1}{\langle \xi \rangle^{1/2}} ,
\end{align}
which will be used frequently below.
It will be convenient to appeal to the bound
\begin{align}
\ensuremath{\nonumber}orm{I_y[f]}_{L^\infty_y} \lesssim (1+t/{\varepsilon})^{1/4} \ensuremath{\nonumber}orm{\rho f}_{L^2([0,\infty))} \lesssim \ensuremath{\nonumber}orm{\rho f}_{L^2([0,\infty))}
\label{eq:v:L:infty}
\end{align}
which is a consequence of the estimate $\ensuremath{\nonumber}orm{\rho^{-1}}_{L^2(0,\infty)} \lesssim (1+t/{\varepsilon})^{1/4}$ and
\begin{align*}
\ensuremath{\nonumber}orm{I_y[f]}_{L^\infty_y} \leq \int_0^\infty \rho(y) f(y) \rho(y)^{-1} dy \leq \ensuremath{\nonumber}orm{\rho f}_{L^2([0,\infty))} \ensuremath{\nonumber}orm{\rho^{-1}}_{L^2[0,\infty)} .
\end{align*}
As a consequence of the proof of \eqref{eq:v:L:infty}, the fundamental theorem of calculus, we have that whenever $f|_{y=0}=0$, the estimate
\begin{align}
\ensuremath{\nonumber}orm{f}_{L^\infty_y} \leq \ensuremath{\nonumber}orm{I_y[{\partial}_y f]}_{L^\infty_y} \lesssim (1+t/{\varepsilon})^{1/4} \ensuremath{\nonumber}orm{\rho {\partial}_y f}_{L^2([0,\infty))} \lesssim \ensuremath{\nonumber}orm{\rho {\partial}_y f}_{L^2([0,\infty))}
\label{eq:w:L:infty}
\end{align}
also holds. The following weighted Poincar\'e/Hardy inequality will be useful for our proof.
\begin{lemma}
\label{lem:Hardy}
Let the weight function $\rho$ be as defined in \eqref{eq:rho:def}. Assume that $f$ is such that $\rho {\partial}_y f \in L^2$. Then we have
\begin{align*}
\ensuremath{\nonumber}orm{\rho f}_{L^2}^2 + \frac{1}{4(1+t/{\varepsilon})} \ensuremath{\nonumber}orm{y \rho f}_{L^2}^2 \leq 4(1+t/{\varepsilon}) \ensuremath{\nonumber}orm{\rho {\partial}artial_y f}_{L^2}^2.
\end{align*}
\end{lemma}
\begin{proof}[Proof of Lemma~\ref{lem:Hardy}]
\begin{align*}
\int_0^\infty \rho^2 f^2 = \int_0^\infty e^{\frac{y^2}{4(1+t/{\varepsilon})}} f^2 &= \int_0^\infty {\partial}_y \{ y \} e^{\frac{y^2}{4(1+t/{\varepsilon})}} f^2 \ensuremath{\nonumber}otag \\
&= \frac{-1}{2(1+t/{\varepsilon})} \int_0^\infty y^2 e^{\frac{y^2}{4(1+t/{\varepsilon})}} f^2 -2 \int_0^\infty y e^{\frac{y^2}{4(1+t/{\varepsilon})}} f {\partial}_y f.
\end{align*}
Due to the monotone increasing and super-exponential nature of our weight, the negative term on the right side of the above is the key contribution. Rearranging yields
\begin{align*}
\ensuremath{\nonumber}orm{\rho f}_{L^2}^2 + \frac{1}{2(1+t/{\varepsilon})} \ensuremath{\nonumber}orm{y \rho f}_{L^2}^2
&\leq 2 \left| \int_0^\infty y \rho^2 f {\partial}_y f\right| \leq 2 \ensuremath{\nonumber}orm{y \rho f}_{L^2} \ensuremath{\nonumber}orm{\rho {\partial}_y f}_{L^2} \ensuremath{\nonumber}otag\\
& \leq \frac{1}{4(1+t/{\varepsilon})} \ensuremath{\nonumber}orm{y \rho f}_{L^2}^2 + 4 (1+t/{\varepsilon}) \ensuremath{\nonumber}orm{\rho {\partial}_y f}_{L^2}^2 \, ,
\end{align*}
which concludes the proof of the lemma.
\end{proof}
\subsection{Error terms in the $A$ energy}
First, using \eqref{eq:v:L:infty} and the Cauchy-Schwartz inequality it follows that the term $T_{\mathcal A,1}$ defined in \eqref{eq:T:A12:a} may be bounded as
\begin{align*}
\abs{T_{\mathcal A,1}}
&\leq \ensuremath{\nonumber}orm{|\xi|^{1/2} \brak{\xi}^r e^{\tau |\xi|} I_\infty[\bar w_\xi]}_{L^2_\xi L^\infty_y} \ensuremath{\nonumber}orm{|\xi|^{1/2} \brak{\xi}^r e^{\tau |\xi|} A_\xi}_{L^2_\xi}
\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}
\,.
\end{align*}
In order to estimate the term $T_{\mathcal{A},2}$ defined in \eqref{eq:T:A12:b}
we use that for any $r\geq 0$ we have
\begin{align*}
\brak{\xi}^r \lesssim \brak{\xi-\eta}^r + \brak{\eta}^r
\end{align*}
where the implicit constant depends solely on $r$, and that $\brak{\xi}^{1/2} \leq \brak{\eta}^{1/2} \brak{\xi-\eta}^{1/2}$, to conclude that
\begin{align*}
\abs{T_{\mathcal A,2}}
&\lesssim \int_{\RR} \int_{\RR} \abs{\xi-\eta} \left(\brak{\xi-\eta}^{r-1/2} + \brak{\eta}^{r-1/2}\right) \abs{A_\eta} e^{\tau |\eta|} \abs{A_{\xi-\eta}} e^{\tau |\xi-\eta|} \brak{\xi}^{r+1/2} \abs{A_\xi} e^{\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{A_\eta e^{\tau |\eta|}}_{L^1_\eta} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}}
+ \ensuremath{\nonumber}orm{|\xi-\eta| \brak{\xi-\eta}^{1/2} A_{\xi-\eta} e^{\tau |\xi-\eta|}}_{L^1_{\xi-\eta}} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}^2
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}^2 \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} + \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}^2.
\end{align*}
In the last inequality above we have used that for $r>3/2$ we have $\brak{\xi}^{-r+1} \in L^2_\xi$. This proves~\eqref{eq:Shaq:A}.
\subsection{Error terms in the $\bar w$ energy}
\label{sec:bar:w:energy}
\subsubsection{The $T_{\Ncal}$ term}
According to \eqref{eq:N:def} and \eqref{eq:T:cal:1}, we decompose $T_{\Ncal}$ as
\begin{align}
T_{\Ncal}
&= i \int_{\RR} \int_{\RR} \int_0^\infty \bar w_\eta (\xi-\eta) \bar w_{\xi-\eta} \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d}\eta\,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\quad- i \int_{\RR} \int_{\RR} \int_0^\infty \eta I_y[\bar w_\eta] {\partial}artial_y \bar w_{\xi-\eta} \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d}\eta\,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&=: T_{\Ncal}^{(1)} + T_{\Ncal}^{(2)}
\,.
\label{eq:Barkley:0}
\end{align}
For the first term above we appeal to the inequality
\[
|\xi-\eta|^{1/2} \brak{\xi}^r \lesssim (|\eta|^{1/2} + |\xi|^{1/2}) \left(\brak{\eta}^r + \brak{\xi-\eta}^r\right),
\]
to the triangle inequality of the exponential term, and to the bound \eqref{eq:w:L:infty} to conclude
\begin{align}
\abs{T_{\Ncal}^{(1)}}
&\lesssim \int_{\RR} \int_{\RR} |\eta|^{1/2} \ensuremath{\nonumber}orm{\rho \bar w_\eta}_{L^2_y} |\xi-\eta|^{1/2}\ensuremath{\nonumber}orm{\rho\bar w_{\xi-\eta}}_{L^2_y} \left(\brak{\eta}^r + \brak{\xi-\eta}^r\right) \brak{\xi}^{r} \ensuremath{\nonumber}orm{\bar w_\xi}_{L^\infty_y} e^{2\tau|\xi|} \,\mathrm{d}\eta\,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\quad + \int_{\RR} \int_{\RR} \ensuremath{\nonumber}orm{\bar w_\eta}_{L^\infty_y} |\xi-\eta|^{1/2}\ensuremath{\nonumber}orm{\rho \bar w_{\xi-\eta}}_{L^2_y} \left(\brak{\eta}^r + \brak{\xi-\eta}^r\right) |\xi|^{1/2} \brak{\xi}^{r} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \,\mathrm{d}\eta\,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|\zeta|^{1/2} \rho \bar w_{\zeta} e^{\tau|\zeta|}}_{L^1_{\zeta} L^2_y}
+ \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r}^2 \ensuremath{\nonumber}orm{ \rho {\partial}_y \bar w_{\zeta} e^{\tau|\zeta|}}_{L^1_{\zeta} L^2_y}
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r}^2 \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r}
\label{eq:Barkley:1}
\end{align}
where in the last inequality we have used that $r>1/2$.
For the second term in \eqref{eq:Barkley:0} we proceed similarly, but appeal to the bound \eqref{eq:v:L:infty}, which yields
\begin{align}
\abs{T_{\Ncal}^{(2)}}
&\lesssim \int_{\RR} \int_{\RR} |\eta|^{1/2} \brak{\eta}^r \ensuremath{\nonumber}orm{I_y[\bar w_\eta]}_{L^\infty_y} |\xi-\eta|^{1/2}\ensuremath{\nonumber}orm{\rho {\partial}_y \bar w_{\xi-\eta}}_{L^2_y} \brak{\xi}^{r} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \,\mathrm{d}\eta\,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\quad + \int_{\RR} \int_{\RR} |\eta|^{1/2} \brak{\eta}^r \ensuremath{\nonumber}orm{I_y[\bar w_\eta]}_{L^\infty_y} \ensuremath{\nonumber}orm{\rho {\partial}_y \bar w_{\xi-\eta}}_{L^2_y} |\xi|^{1/2} \brak{\xi}^{r} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \,\mathrm{d}\eta\,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\quad + \int_{\RR} \int_{\RR} |\eta| \ensuremath{\nonumber}orm{I_y[\bar w_\eta]}_{L^\infty_y} \brak{\xi-\eta}^r \ensuremath{\nonumber}orm{\rho {\partial}_y \bar w_{\xi-\eta}}_{L^2_y} \brak{\xi}^{r} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \,\mathrm{d}\eta\,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\bar w}_{\tau,r}\ensuremath{\nonumber}orm{|\xi-\eta|^{1/2} \rho {\partial}_y \bar w_{\xi-\eta} e^{\tau|\xi-\eta|}}_{L^1_{\xi-\eta} L^2_y}
\ensuremath{\nonumber}otag\\
&\quad + \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r}^2 \ensuremath{\nonumber}orm{\rho {\partial}_y \bar w_{\xi-\eta} e^{\tau|\xi-\eta|}}_{L^1_{\xi-\eta} L^2_y} + \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|\eta| \rho \bar w_{\eta} e^{\tau|\eta|}}_{L^1_{\eta} L^2_y}
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r}
\label{eq:Barkley:2}
\end{align}
since $r>1$. Combining the estimates \eqref{eq:Barkley:1}--\eqref{eq:Barkley:2} yields the desired bound \eqref{eq:Barkley:*}.
\subsubsection{The $T_{\Lcal}$ term} Recall that the term $T_{\Lcal}$ is defined in \eqref{eq:T:cal:1}, via~\eqref{eq:NLMB:def}, as
\begin{align*}
T_{\Lcal}
&= i \int_{\RR} \int_{\RR} \int_0^\infty \rho^2 e^{2 \tau |\xi|} \langle \xi \rangle^{2r} \overline{\bar{w}_\xi} \eta \biggl( \bar w_{\xi-\eta} A_{\eta} \theta_{\eta} + A_{\xi-\eta} \theta_{\xi-\eta} \bar w_{\eta} - I_y[\bar w_\eta] A_{\xi-\eta} {\partial}artial_y \theta_{\xi-\eta} \biggr) \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag \\
&= T_{\Lcal}^{(1)} + T_{\Lcal}^{(2)} + T_{\Lcal}^{(3)}
\,.
\end{align*}
We estimate each of the above terms individually. Using that $0\leq \theta_\xi \leq 1$ pointwise in $t,y,\xi$, that $|\eta|^{1/2} \leq |\xi|^{1/2} + |\xi-\eta|^{1/2}$, and $\brak{\xi}^r \lesssim \brak{\eta}^r + \brak{\xi-\eta}^r$, for the $T_{\Lcal}^{(1)}$ term we have
\begin{align}
\abs{T_{\Lcal}^{(1)}}
&\le \int_{\RR} \int_{\RR} \int_0^\infty \rho^2 e^{\tau |\xi|} e^{\tau |\xi-\eta|} e^{\tau |\eta|} \brak{\xi}^{2r} \abs{\bar{w}_\xi} |\eta|^{1/2} (|\xi|^{1/2} + |\xi-\eta|^{1/2}) \abs{\bar{w}_{\xi-\eta}} \abs{A_{\eta}} \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag \\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\rho e^{\tau |\xi-\eta|} \abs{\bar w_{\xi-\eta}}}_{L^1_{\xi-\eta} L^2_y}
\ensuremath{\nonumber}otag \\
&\qquad \qquad + \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\rho |\xi-\eta|^{1/2} e^{\tau |\xi-\eta|} \abs{\bar w_{\xi-\eta}}}_{L^1_{\xi-\eta} L^2_y}
\ensuremath{\nonumber}otag \\
&\qquad \qquad + \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{ \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|\eta|^{1/2} e^{\tau |\eta|} \abs{A_\eta}}_{L^1_{\eta}}
\ensuremath{\nonumber}otag \\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}
\label{eq:Jordan:1}
\end{align}
since $r>1$. Similarly, for the $T_{\Lcal}^{(2)}$ term we have
\begin{align}
\abs{T_{\Lcal}^{(2)}}
&\le \int_{\RR} \int_{\RR} \int_0^\infty \rho^2 e^{\tau |\xi|} e^{\tau |\xi-\eta|} e^{\tau |\eta|} \brak{\xi}^{2r} \abs{\bar{w}_\xi} |\eta|^{1/2} (|\xi|^{1/2} + |\xi-\eta|^{1/2}) \abs{\bar{w}_{\eta}} \abs{A_{\xi-\eta}} \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag \\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r}^2 \ensuremath{\nonumber}orm{e^{\tau |\xi-\eta|} \abs{A_{\xi-\eta}}}_{L^1_{\xi-\eta}}
+ \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{|\xi-\eta|^{1/2} e^{\tau |\xi-\eta|} \abs{A_{\xi-\eta}}}_{L^1_{\xi-\eta}}
\ensuremath{\nonumber}otag \\
&\qquad \qquad + \left( \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} +\ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} \right) \ensuremath{\nonumber}orm{\rho |\eta|^{1/2} e^{\tau |\eta|} \abs{\bar w_\eta}}_{L^1_{\eta}L^2_y}
\ensuremath{\nonumber}otag \\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r}^2 \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} + \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}
\label{eq:Jordan:2}
\, .
\end{align}
The $T_{\Lcal}^{(3)}$ term is treated slightly differently due to the presence of ${\partial}artial_y \theta_{\xi-\eta}$. Here we use that
\begin{align*}
\rho \brak{\xi}^{-1/2} {\partial}_y \theta_{\xi}
=\frac{\rho(y,t) \brak{\xi}^{3/2} y}{ (1+t/{\varepsilon})} e^{-\frac{y^2 \brak{\xi}^2}{2(1+t/{\varepsilon})}}
&\lesssim \frac{\brak{\xi}^{1/2}}{(1+t/{\varepsilon})^{1/2}} e^{\frac{y^2}{8(1+t/{\varepsilon})}} e^{-\frac{y^2 \brak{\xi}^2}{4(1+t/{\varepsilon})}}
\ensuremath{\nonumber}otag\\
&\lesssim \frac{\brak{\xi}^{1/2}}{(1+t/{\varepsilon})^{1/2}} e^{-\frac{y^2 \brak{\xi}^2}{8(1+t/{\varepsilon})}} \lesssim \brak{\xi}^{1/2} e^{-\frac{y^2 \brak{\xi}^2}{8(1+t/{\varepsilon})}}
\end{align*}
from which it follows upon taking an $L^2$ norm in $y$ and a supremum over $\xi\in \RR$ that
\begin{align}
\ensuremath{\nonumber}orm{\rho \brak{\xi}^{-1/2} {\partial}_y \theta_{\xi}}_{L^\infty_{\xi} L^2_y}
\lesssim 1 \,.
\label{eq:Jordan:0}
\end{align}
Appealing also to \eqref{eq:v:L:infty} with $f=\bar w_\eta$,
and to the inequality
\begin{align*}
|\eta| \brak{\xi-\eta}^{1/2} \brak{\xi}^r \lesssim |\eta| \brak{\xi-\eta}^{r+1/2} + |\eta|^{1/2} \brak{\eta}^r |\xi|^{1/2} \brak{\xi-\eta}^{1/2} + |\eta|^{1/2} \brak{\eta}^r |\xi-\eta|^{1/2} \brak{\xi-\eta}^{1/2}
\end{align*}
we obtain
\begin{align}
\abs{T_{\Lcal}^{(3)}}
&\leq \int_{\RR} \int_{\RR} \int_0^\infty \rho e^{\tau |\xi|} e^{\tau |\xi-\eta|} e^{\tau |\eta|} \brak{\xi}^{2r} \abs{\bar{w}_\xi} |\eta| \brak{\xi-\eta}^{1/2} \abs{A_{\xi-\eta}} \abs{I_y[\bar{w}_{\eta}]} \frac{\rho \abs{{\partial}_y \theta_{\xi-\eta}}}{\brak{\xi-\eta}^{1/2}} \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \int_{\RR} \int_{\RR} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} \brak{\xi}^r e^{\tau |\xi|} \abs{A_{\xi-\eta}} e^{\tau|\xi-\eta|} \ensuremath{\nonumber}orm{\rho \bar w_\eta}_{L^2_y} e^{\tau |\eta|} |\eta| \brak{\xi-\eta}^{1/2} \brak{\xi}^r \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{|\eta| \ensuremath{\nonumber}orm{\rho \bar w_\eta}_{L^2_y} e^{\tau |\eta|}}_{L^1_\eta}
+ \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r}^2 \ensuremath{\nonumber}orm{\brak{\xi-\eta}^{1/2} A_{\xi-\eta} e^{\tau |\xi-\eta|}}_{L^1_{\xi-\eta}}
\ensuremath{\nonumber}otag\\
&\qquad \qquad + \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\brak{\xi-\eta} A_{\xi-\eta} e^{\tau |\xi-\eta|}}_{L^1_{\xi-\eta}}
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\bar w}_{\tau,r}^2 \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}}
+ \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r}^2 \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} + \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}
\label{eq:Jordan:3}
\end{align}
since $r> 3/2$. Summing \eqref{eq:Jordan:1}, \eqref{eq:Jordan:2}, and \eqref{eq:Jordan:3}, and massaging the resulting terms we arrive at \eqref{eq:Jordan:*}.
\subsubsection{The $T_{\Mcal}$ term}
This term has the distinguished feature of losing a $y$-weight, which is why we have introduced the vorticity ${\partial}_y \bar w$ in the first place. Recall from \eqref{eq:M:def} and \eqref{eq:T:cal:1} that
\begin{align*}
T_{\Mcal} = -i \int_{\RR} \int_{\RR} \int_0^\infty \rho^2 e^{2 \tau |\xi|} \langle \xi \rangle^{2r} \overline{\bar{w}_\xi} \eta A_\eta I_y[\theta_\eta] {\partial}_y \bar w_{\xi-\eta} \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi
\, .
\end{align*}
Recall from \eqref{Iy:theta:est} that $\abs{I_y [\theta_\eta]} \leq y$, and thus, with the cutoff $\chi$ defined in \eqref{eq:chi:def} we have that
\begin{align*}
\abs{(1-\chi(y)) I_y [\theta_\eta]} \lesssim 1
\end{align*}
pointwise in $\eta$. Therefore, the contribution to $T_{\Mcal}$ coming from the support of $1-\chi(y)$ may be bounded as
\begin{align}
&\abs{\int_{\RR} \int_{\RR} \int_0^\infty \rho^2 e^{2 \tau |\xi|} \langle \xi \rangle^{2r} \overline{\bar{w}_\xi} \eta A_\eta I_y[\theta_\eta] {\partial}_y \bar w_{\xi-\eta} (1-\chi(y)) \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi }
\ensuremath{\nonumber}otag\\
& \lesssim \int_{\RR} \int_{\RR} \int_0^\infty e^{\tau |\xi|}e^{\tau |\eta|}e^{\tau |\xi-\eta|} \langle \xi \rangle^{2r} \rho \abs{\bar{w}_\xi} |\eta| \abs{A_\eta} \rho \abs{{\partial}_y \bar w_{\xi-\eta}} \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\rho \abs{{\partial}_y \bar w_{\xi-\eta}}e^{\tau |\xi-\eta|} }_{L^1_{\xi-\eta} L^2_y}
+ \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|\eta|^{1/2} A_\eta e^{\tau|\eta|}}_{L^1_\eta}
\ensuremath{\nonumber}otag\\
&\qquad + \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\rho |\xi-\eta|^{1/2} \abs{{\partial}_y \bar w_{\xi-\eta}}e^{\tau |\xi-\eta|} }_{L^1_{\xi-\eta} L^2_y}
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}}
\,.
\label{eq:Jordan:4}
\end{align}
On the other hand, the contribution to $T_{\Mcal}$ from the support of $\chi(y)$ requires us to use the third term on the left side of \eqref{eq:Kobe:2}, but with $r$ replaced by $r-1/2$. More precisely, we use that $\chi(y) \abs{I_y[\theta_\eta]} \leq y \chi(y)$ pointwise in $\eta$, and the inequality
\begin{align*}
|\eta| \brak{\xi}^{2r} \lesssim |\eta| \brak{\eta}^{r-1/2} \brak{\xi}^{r+1/2} + |\eta| \brak{\xi-\eta}^{r-1/2} \brak{\xi}^{r+1/2}
\end{align*}
to deduce that
\begin{align}
&\abs{\int_{\RR} \int_{\RR} \int_0^\infty \rho^2 e^{2 \tau |\xi|} \langle \xi \rangle^{2r} \overline{\bar{w}_\xi} \eta A_\eta I_y[\theta_\eta] {\partial}_y \bar w_{\xi-\eta} \chi(y) \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi }
\ensuremath{\nonumber}otag\\
& \lesssim \int_{\RR} \int_{\RR} e^{\tau |\xi|}e^{\tau |\eta|}e^{\tau |\xi-\eta|} \langle \xi \rangle^{2r} \ensuremath{\nonumber}orm{\rho \bar{w}_\xi}_{L^2_y} |\eta| \abs{A_\eta} \ensuremath{\nonumber}orm{\rho y \chi(y) {\partial}_y \bar w_{\xi-\eta}}_{L^2_y} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \left( \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{ \rho y \chi(y) {\partial}_y \bar w_{\xi-\eta} e^{\tau |\xi-\eta|}}_{L^1_{\xi-\eta} L^2_y} + \ensuremath{\nonumber}orm{y \chi {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{|\eta| A_\eta e^{\tau|\eta|}}_{L^1_\eta} \right) \ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{y \chi {\partial}_y \bar w}_{\tau,r-1/2}
\ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}}
\label{eq:Jordan:5}
\end{align}
since $r$ was taken to be sufficiently large. Combining \eqref{eq:Jordan:4} and \eqref{eq:Jordan:5} we obtain the estimate \eqref{eq:Jordan:**}.
\subsubsection{The $T_{\Bcal}$ term}
According to \eqref{eq:B:def:*} and \eqref{eq:T:cal:1}, we decompose the $T_{\Bcal}$ term as
\begin{align}
T_{\Bcal}
&= \int_{\RR} \int_0^\infty A_\xi ({\partial}artial_t - {\partial}artial_{yy})\theta_\xi \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\qquad + \int_{\RR} \int_0^\infty \left(\theta_\xi -1\right) {\partial}artial_t A_\xi \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\qquad + i \int_{\RR} \int_0^\infty \xi \left( I_\infty[\bar w_\xi] - I_y[\bar w_\xi] \right) \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\qquad +i \int_{\RR} \int_0^\infty \xi A_\xi \bigl( y (\theta_\xi-1) - ( I_\infty[1-\theta_\xi] - I_y[1-\theta_\xi] )\bigr) \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\qquad +i \int_{\RR} \int_{\RR} \int_0^\infty \xi A_\eta A_{\xi-\eta} \left( \theta_\eta \theta_{\xi-\eta} -1\right) \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\qquad -i \int_{\RR} \int_{\RR} \int_0^\infty \eta A_\eta A_{\xi-\eta} I_y[\theta_\eta] {\partial}artial_y \theta_{\xi-\eta} \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&= T_{\Bcal}^{(1)} + T_{\Bcal}^{(2)} + T_{\Bcal}^{(3)} + T_{\Bcal}^{(4)} + T_{\Bcal}^{(5)} + T_{\Bcal}^{(6)}
\label{eq:Magic:0}
\end{align}
We bound the six terms above individually, and note that the second term, $T_{\Bcal}^{(2)}$, is the most involved one, as it involves analyzing the four terms arising from the $A_\xi$ evolution in \eqref{eq:A:evo}.
We bound the most difficult term first. Combining \eqref{eq:Magic:0} and \eqref{eq:A:evo} we rewrite
\begin{align}
T_{\Bcal}^{(2)}
&= -i \int_{\RR} \int_0^\infty \left(\theta_\xi -1\right) \xi c_{\theta,\xi} A_\xi \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi \ensuremath{\nonumber}otag\\
&\qquad -i \int_{\RR} \int_0^\infty \left(\theta_\xi -1\right) \xi |\xi| A_\xi \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi \ensuremath{\nonumber}otag\\
&\qquad + i \int_{\RR} \int_0^\infty \left(\theta_\xi -1\right) \xi I_\infty[\bar w_\xi] \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\qquad -i \int_{\RR} \int_{\RR} \int_0^\infty \left(\theta_\xi -1\right) A_\eta A_{\xi-\eta} (\xi-\eta) \overline{\bar w_\xi} \rho^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d}\eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&= T_{\Bcal}^{(2,1)} + T_{\Bcal}^{(2,2)} + T_{\Bcal}^{(2,3)} + T_{\Bcal}^{(2,4)} \,.
\label{eq:Magic:2:0}
\end{align}
Using the definition of $c_{\theta,\xi}$ in \eqref{c:theta:xi} and the bound \eqref{theta:rho:bound} for the $L^2_y$ norm of $\rho \left(\theta_\xi -1\right)$, we obtain
\begin{align}
\abs{T_{\Bcal}^{(2,1)}} \lesssim \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}\,.
\label{eq:Magic:2:1}
\end{align}
For the second term, we use the one dimensional Hardy inequality $\ensuremath{\nonumber}orm{f/y}_{L^2_y} \lesssim \ensuremath{\nonumber}orm{{\partial}_y f}_{L^2_y} \lesssim \ensuremath{\nonumber}orm{\rho {\partial}_y f}_{L^2_y}$, valid for $f$ such that $f|_{y=0}$ since $\rho \geq 1$, and the bound
\begin{align}
\brak{\xi}^{3/2} \ensuremath{\nonumber}orm{y (\theta_\xi-1) \rho^2}_{L^2_y}
&\lesssim \brak{\xi}^{3/2} \ensuremath{\nonumber}orm{ y e^{-\frac{y^2 \brak{\xi}^2}{4(1+t/{\varepsilon})}}}_{L^2_y} \lesssim \brak{\xi}^{1/2} \ensuremath{\nonumber}orm{e^{-\frac{y^2 \brak{\xi}^2}{8(1+t/{\varepsilon})}}}_{L^2_y}
\lesssim 1
\label{eq:strangely:good}
\end{align}
which follows similarly to \eqref{theta:rho:bound}, and obtain
\begin{align}
\abs{T_{\Bcal}^{(2,2)}}
&\leq \int_{\RR} \int_0^\infty |\xi|^{3/2} \abs{y \rho^2 \left(\theta_\xi -1\right)} |\xi|^{1/2} \abs{A_\xi} \abs{\frac{\overline{\bar w_\xi}}{y}} e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} y \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\tau,r} \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r} \,.
\label{eq:Magic:2:2}
\end{align}
For the third term on the right side of \eqref{eq:Magic:2:0} we simply appeal to \eqref{theta:rho:bound} and \eqref{eq:v:L:infty} to obtain
\begin{align}
\abs{T_{\Bcal}^{(2,3)}}
&\lesssim \int_{\RR} |\xi| \ensuremath{\nonumber}orm{\rho (\theta_\xi-1)}_{L^2_y} \abs{I_\infty[\bar w_\xi]} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d}\xi
\ensuremath{\nonumber}otag\\
&\lesssim \int_{\RR} |\xi|^{1/2} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y}^2 e^{2\tau|\xi|} \brak{\xi}^{2r} d\xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \,.
\label{eq:Magic:2:3}
\end{align}
Lastly, for the nonlinear term in \eqref{eq:Magic:2:0} we similarly have
\begin{align}
\abs{T_{\Bcal}^{(2,4)}}
&\lesssim \int_{\RR} \int_{\RR} |\xi-\eta| \ensuremath{\nonumber}orm{\rho (\theta_\xi-1)}_{L^2_y} \abs{A_\eta} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} \eta \,\mathrm{d}\xi
\ensuremath{\nonumber}otag\\
&\lesssim \int_{\RR} |\xi-\eta| \left(\brak{\eta}^{r-1/2} + \brak{\xi-\eta}^{r-1/2}\right) \abs{A_\eta} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \brak{\xi}^{r+1/2} d\xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}\,.
\label{eq:Magic:2:4}
\end{align}
Combined, the bounds \eqref{eq:Magic:2:1}--\eqref{eq:Magic:2:4} yield an estimate for the $T_{\Bcal}^{(2)}$ term. Next, we estimate the remaining five terms on the right side of \eqref{eq:Magic:0}.
In order to bound $T_{\Bcal}^{(1)}$
we note from \eqref{eq:theta:def} that
\begin{align*}
\abs{{\partial}artial_t \theta_\xi}
&= \frac{y^2 \brak{\xi}^2}{2 {\varepsilon} (1+t/{\varepsilon})^2} e^{-\frac{y^2 \brak{\xi}^2}{2(1+t/{\varepsilon})}}
\leq \frac{1}{{\varepsilon}(1+t/{\varepsilon})} e^{-\frac{y^2 \brak{\xi}^2}{3(1+t/{\varepsilon})}} \lesssim \frac{1}{{\varepsilon}} e^{-\frac{y^2 \brak{\xi}^2}{3(1+t/{\varepsilon})}}
\end{align*}
and
\begin{align*}
\abs{y {\partial}artial_{yy} \theta_\xi}
\leq \frac{y \brak{\xi}^2}{(1+t/{\varepsilon}) } e^{-\frac{y^2 \brak{\xi}^2}{2(1+t/{\varepsilon})}}
+ \frac{y^3 \brak{\xi}^4}{(1+t/{\varepsilon})^2} e^{-\frac{y^2 \brak{\xi}^2}{2(1+t/{\varepsilon})}}
\lesssim \brak{\xi} e^{-\frac{y^2 \brak{\xi}^2}{3(1+t/{\varepsilon})}}
\end{align*}
from which we deduce
\begin{subequations}
\begin{align}
\|\rho {\partial}_t \theta_\xi \|_{L^2_y} & \lesssim \frac{1}{{\varepsilon} \brak{\xi}^{1/2}} \label{theta:est:2}
\\
\|y \rho^2 {\partial}_{yy} \theta_\xi \|_{L^2_y} &\lesssim \brak{\xi}^{1/2} \label{theta:est:1}
\, .
\end{align}
\end{subequations}
With estimates \eqref{theta:est:1}--\eqref{theta:est:2}, the Hardy inequality and the fact that $\rho \geq 1$, we may estimate
\begin{align*}
\abs{T_{\Bcal}^{(1)}}
&\leq \int_{\RR} \abs{A_\xi} \ensuremath{\nonumber}orm{\rho {\partial}artial_t \theta_\xi}_{L^2_y} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} \xi + \int_{\RR} \abs{A_\xi} \ensuremath{\nonumber}orm{y \rho^2 {\partial}artial_{yy} \theta_\xi}_{L^2_y} \ensuremath{\nonumber}orm{\frac{\bar w_\xi}{y}}_{L^2_y} e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} \xi
\ensuremath{\nonumber}otag \\
&\lesssim \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} + \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}}
\, .
\label{eq:Magic:1}
\end{align*}
In order to estimate the $T_{\Bcal}^{(3)}$ term in \eqref{eq:Magic:0}, we note that upon applying Lemma~\ref{lem:Hardy} to $ f = I_\infty[\bar w_\xi] - I_y[\bar w_\xi]$, we have that
\begin{align}
\ensuremath{\nonumber}orm{\rho(y) \left(I_\infty[\bar w_\xi] - I_y[\bar w_\xi]\right)}_{L^2_y}
&\lesssim \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y}
\end{align}
and thus
\begin{align}
\abs{T_{\Bcal}^{(3)}}
\lesssim \int_{\RR} |\xi| \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y}^2 e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} \xi
\lesssim \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} \bar w}_{\tau,r}^2\,.
\label{eq:Magic:3}
\end{align}
For the $T_{\Bcal}^{(4)}$ term in \eqref{eq:Magic:0}, we use \eqref{eq:strangely:good} to obtain that $\ensuremath{\nonumber}orm{y \rho (\theta_\xi-1)}_{L^2_y} \lesssim (1+t/{\varepsilon})^{3/4} \brak{\xi}^{-3/2}$, and the estimate
\begin{align}
\ensuremath{\nonumber}orm{\rho (I_\infty[1-\theta_\xi] - I_y[1-\theta_\xi])}_{L^2_y}
\lesssim \ensuremath{\nonumber}orm{\int_y^\infty e^{-\frac{(y')^2 \brak{\xi}^2}{2(1+t/{\varepsilon})}} \,\mathrm{d} y'}_{L^2_y}
\lesssim \frac{1}{\brak{\xi}^{3/2}}
\label{eq:PatriotsCheat}
\end{align}
to conclude
\begin{align}
\abs{T_{\Bcal}^{(4)}}
&\leq \int_{\RR} |\xi| \abs{A_\xi} \left( \ensuremath{\nonumber}orm{\rho y (1-\theta_\xi)}_{L^2_y} + \ensuremath{\nonumber}orm{\rho (I_\infty[1-\theta_\xi] - I_y[1-\theta_\xi])}_{L^2_y} \right) \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \brak{\xi}^{2r} \,\mathrm{d} \xi
\ensuremath{\nonumber}otag \\
&\lesssim \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}
\, .
\label{eq:Magic:4}
\end{align}
Lastly, we turn to the two nonlinear terms in \eqref{eq:Magic:0}. First, we use \eqref{theta:rho:bound} to estimate
\begin{align*}
\ensuremath{\nonumber}orm{\rho \abs{\theta_\eta\theta_{\xi-\eta}-1}}_{L^2_y}
\leq \ensuremath{\nonumber}orm{\rho\abs{\theta_{\xi-\eta}-1}}_{L^2_y} + \ensuremath{\nonumber}orm{\rho \abs{\theta_\eta-1}}_{L^2_y} \lesssim \frac{1}{\brak{\xi-\eta}^{1/2}} + \frac{1}{\brak{\eta}^{1/2}}
\end{align*}
and then use the bound
\begin{align*}
\brak{\xi}^{r} |\xi|^{1/2}
\lesssim \brak{\eta}^{r} |\eta|^{1/2} + \brak{\xi-\eta}^{r} |\xi-\eta|^{1/2}
\end{align*}
to conclude
\begin{align}
\abs{T_{\Bcal}^{(5)}}
&\leq \int_{\RR} \int_{\RR} |\xi|^{1/2} \brak{\xi}^r \abs{A_\eta} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\rho (\theta_{\eta} \theta_{\xi-\eta} -1)}_{L^2_y} |\xi|^{1/2} \brak{\xi}^r \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag \\
&\lesssim \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}
\, .
\label{eq:Magic:5}
\end{align}
For the last term in \eqref{eq:Magic:0}, we use that
\begin{align*}
\ensuremath{\nonumber}orm{\rho\, I_y[\theta_\eta] {\partial}_y \theta_{\xi-\eta}}_{L^2_y} \leq \ensuremath{\nonumber}orm{\rho y {\partial}_y \theta_{\xi-\eta}}_{L^2_y} \lesssim \ensuremath{\nonumber}orm{e^{\frac{y^2}{8(1+t/{\varepsilon})}} e^{-\frac{y^2 \brak{\xi-\eta}^2}{4(1+t/{\varepsilon})}}}_{L^2_y} \lesssim \frac{1}{\langle \xi -\eta\rangle^{1/2}}
\end{align*}
and the triangle inequality $|\eta|^{1/2} \leq |\xi-\eta|^{1/2} + |\xi|^{1/2}$ to conclude that
\begin{align}
\abs{T_{\Bcal}^{(6)}}
&\leq \int_{\RR} \int_{\RR} |\eta|^{1/2} (|\xi-\eta|^{1/2} + |\xi|^{1/2}) \brak{\xi}^r \abs{A_\eta} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\rho I_y[\theta_{\eta}] {\partial}_y \theta_{\xi-\eta}}_{L^2_y} \brak{\xi}^r \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} e^{2\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag \\
&\lesssim \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}
\, .
\label{eq:Magic:6}
\end{align}
Upon collecting the bounds \eqref{eq:Magic:2:1}--\eqref{eq:Magic:2:4}, \eqref{eq:Magic:1}--\eqref{eq:Magic:3}, and \eqref{eq:Magic:4}--\eqref{eq:Magic:6}, we conclude the proof of \eqref{eq:Magic:*}.
\subsection{Error terms in the ${\partial}_y \bar w$ energy}
The estimates in this section are very similar to those in Section~\ref{sec:bar:w:energy}, however, several modifications are in order: we are testing the equation with the conjugate of ${\partial}_y \bar w$, $\brak{\xi}^{2r}$ becomes $\brak{\xi}^{2r-1}$, and we may use that the cutoff $\chi$ vanishes near $\{ y=0\}$.
\subsubsection{The $T_{{\partial}_y \Ncal}$ term}
From \eqref{eq:dy:nonlinear:terms:a} we see that ${\partial}_y \Ncal_\xi(\bar w,\bar w) = \Ncal_\xi(\bar w,{\partial}_y \bar w)$, and thus the estimates are very similar to the $T_{\Ncal}$ term. From \eqref{eq:dy:nonlinear:terms:a} and \eqref{eq:Kobe:1:a} we have
\begin{align*}
\abs{ T_{{\partial}_y \Ncal} }
&\lesssim \int_\mathbb{R}\int_\mathbb{R}\ensuremath{\nonumber}orm{\rho {\partial}_y \bar w_\eta}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_{\xi-\eta}}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \ensuremath{\nonumber}otag\\
&\qquad \qquad\qquad \qquad \times \abs{\xi-\eta} ( \brak{\eta}^{r-1/2} + \brak{\xi-\eta}^{r-1/2}) \brak{\xi}^{r-1/2} e^{2\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\quad + \int_\mathbb{R}\int_\mathbb{R}\ensuremath{\nonumber}orm{\rho \bar w_\eta}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_{yy} \bar w_{\xi-\eta}}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \ensuremath{\nonumber}otag\\
&\qquad \qquad\qquad \qquad \times \abs{\eta} ( \brak{\eta}^{r-1/2} + \brak{\xi-\eta}^{r-1/2}) \brak{\xi}^{r-1/2} e^{2\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \chi {\partial}_y \bar w}_{\tau,r-1/2}^2 \ensuremath{\nonumber}orm{{\partial}_y \bar w}_{\tau,r}
\ensuremath{\nonumber}otag\\
&\quad + \ensuremath{\nonumber}orm{\chi {\partial}_{yy} \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\chi {\partial}_{y} \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} \bar w}_{\tau,r}
\end{align*}
where we have used that $r>2$. The above estimate gives the proof of \eqref{eq:Barkley:@}.
\subsubsection{The $T_{{\partial}_y \Lcal}$ term}
The term $T_{{\partial}_y \Lcal}$ is defined via \eqref{eq:dy:nonlinear:terms:b} and \eqref{eq:Kobe:1:b}, and may be split into four terms $T_{{\partial}_y \Lcal}^{(j)}$ with $j\in \{1,\ldots,4\}$, according to the four terms in the integrand of \eqref{eq:dy:nonlinear:terms:b}. For the first term we have
\begin{align}
\abs{T_{{\partial}_y \Lcal}^{(1)}}
&\lesssim \int_\mathbb{R}\int_{\RR} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\eta}_{L^2_y} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} |\xi-\eta| \brak{\xi}^{2r-1} e^{2\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}^2 \ensuremath{\nonumber}orm{|{\partial}_x|^{1/2} A}_{\widetilde{\tau,r}}
\, .
\label{eq:Jordan:@:1}
\end{align}
Using $\chi\equiv 0$ on $[0,1]$, we have that $\chi(y) \lesssim y^2 \chi(y)$, which we may combine with the pointwise bound
$
\abs{y^2 {\partial}_y \theta_\xi} \lesssim \brak{\xi}^{-1},
$
to estimate
\begin{align}
\abs{T_{{\partial}_y \Lcal}^{(2)}}
&\lesssim \int_\mathbb{R}\int_{\RR} \ensuremath{\nonumber}orm{ \rho \bar w_{\eta}}_{L^2_y} \ensuremath{\nonumber}orm{\chi y^2 {\partial}_y \theta_{\xi-\eta}}_{L^\infty_y} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} |\xi-\eta| \brak{\xi}^{2r-1} e^{2\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}
\,.
\label{eq:Jordan:@:2}
\end{align}
For the third term in the definition of $T_{{\partial}_y \Lcal}$ we have
\begin{align}
\abs{T_{{\partial}_y \Lcal}^{(3)}}
&\lesssim \int_\mathbb{R}\int_{\RR} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_{\xi-\eta}}_{L^2_y} \abs{A_{\eta}} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} |\xi-\eta| \brak{\xi}^{2r-1} e^{2\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\chi |{\partial}_x|^{1/2} {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\chi \brak{{\partial}_x}^{1/2} {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}
\,.
\label{eq:Jordan:@:3}
\end{align}
For the last term, we use again that $\chi(y) \lesssim y^2 \chi(y)$, and that similarly to \eqref{theta:est:1} (and the equation two lines above it) we have
\[
\abs{\rho^2 y^2 {\partial}_{yy} \theta_\xi} \lesssim \Big(y^2 \brak{\xi}^2 + y^4 \brak{\xi}^4 \Big) e^{\frac{y^2}{4(1+t/{\varepsilon})}} e^{-\frac{y^2 \brak{\xi}^2}{2(1+t/{\varepsilon})}} \lesssim e^{-\frac{y^2 (\brak{\xi}^2-1)}{4(1+t/{\varepsilon})}} \lesssim 1
\]
which implies
\begin{align}
\abs{T_{{\partial}_y \Lcal}^{(4)}}
&\lesssim \int_\mathbb{R}\int_{\RR} \ensuremath{\nonumber}orm{I_y[\bar w_{\eta}]}_{L^\infty_y} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\rho^{-1}}_{L^2_y} \ensuremath{\nonumber}orm{\chi y^2 \rho^2 {\partial}_{yy} \theta_{\xi-\eta}}_{L^\infty_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} |\eta| \brak{\xi}^{2r-1} e^{2\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \int_\mathbb{R}\int_{\RR} \ensuremath{\nonumber}orm{\rho \bar w_{\eta}}_{L^2_y} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} |\eta| \brak{\xi}^{2r-1} e^{2\tau|\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} \bar w}_{\tau,r} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} \,.
\label{eq:Jordan:@:4}
\end{align}
Combining the bounds \eqref{eq:Jordan:@:1}, \eqref{eq:Jordan:@:2}, \eqref{eq:Jordan:@:3}, and \eqref{eq:Jordan:@:4}, we obtain the proof of \eqref{eq:Jordan:@}.
\subsubsection{The $T_{{\partial}_y \Mcal}$ term}
The $T_{{\partial}_y \Mcal}$ terms is defined via \eqref{eq:dy:nonlinear:terms:c} and \eqref{eq:Kobe:1:c} as $T_{{\partial}_y \Mcal}^{(1)} + T_{{\partial}_y \Mcal}^{(2)}$, where the decomposition is between the two terms in the integrand of \eqref{eq:dy:nonlinear:terms:c}. For the first term we use that $\abs{\theta_\eta} \leq 1$ to obtain
\begin{align}
\abs{T_{{\partial}_y \Mcal}^{(1)}}
&\lesssim \int_{\RR} \int_{\RR} |\eta| |A_\eta| \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_{\xi-\eta}}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_{\xi}}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}^2 \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}}
\label{eq:Jordan:@:5}
\end{align}
while for the second term we appeal to $\abs{\theta_\eta} \leq y$, which gives
\begin{align}
\abs{T_{{\partial}_y \Mcal}^{(2)}}
&\lesssim \int_{\RR} \int_{\RR} |\eta| |A_\eta| \ensuremath{\nonumber}orm{\chi \rho {\partial}_{yy} \bar w_{\xi-\eta}}_{L^2_y} \ensuremath{\nonumber}orm{y \chi \rho {\partial}_y \bar w_{\xi}}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{y \chi {\partial}_y \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\chi {\partial}_{yy} \bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}}
\, .
\label{eq:Jordan:@:6}
\end{align}
Combining \eqref{eq:Jordan:@:5} and \eqref{eq:Jordan:@:6} we obtain the proof of \eqref{eq:Jordan:@:*}.
\subsubsection{The $T_{{\partial}_y \Bcal}$ term}
According to \eqref{eq:dy:nonlinear:terms:d} and \eqref{eq:Kobe:1:d} we write
\[
T_{{\partial}_y \Bcal} = \sum_{j=1}^{6} T_{{\partial}_y \Bcal}^{(j)},
\]
where the decomposition is according to the six terms in \eqref{eq:dy:nonlinear:terms:d}.
Since we are doing estimates on the support of $\chi$, i.e. for $y\geq 1$, $y$-derivatives of the lift function $\theta_\xi$ can be made arbitrarily small on this region, resulting in simpler estimates.
For instance, similarly to \eqref{theta:est:2}--\eqref{theta:est:1}, we may show that
\begin{align*}
\|\chi \rho^2 {\partial}_t {\partial}_y \theta_\xi \|_{L^\infty_y} &\lesssim \| \rho^2 y {\partial}_t {\partial}_y \theta_\xi \|_{L^\infty_y}\lesssim \frac{1}{{\varepsilon} }
\\
\|\chi \rho^2 {\partial}_{yyy} \theta_\xi \|_{L^\infty_y} &\lesssim \| \rho^2 y^3 {\partial}_{yyy} \theta_\xi \|_{L^\infty_y} \lesssim 1
\end{align*}
and therefore
\begin{align}
\abs{T_{{\partial}_y \Bcal}^{(1)}}
&\lesssim \int_{\RR} \abs{A_\xi} \|\chi \rho^2 {\partial}_t {\partial}_y \theta_\xi \|_{L^\infty_y} \ensuremath{\nonumber}orm{\rho^{-1}}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \xi \ensuremath{\nonumber}otag\\
&\qquad + \int_{\RR} \abs{A_\xi} \|\chi \rho^2 {\partial}_{yyy} \theta_\xi \|_{L^\infty_y} \ensuremath{\nonumber}orm{\rho^{-1}}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \xi \ensuremath{\nonumber}otag\\
&\lesssim \frac{1}{{\varepsilon} } \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\chi {\partial}_y\bar w}_{\tau,r-1/2}
\label{eq:Magic:@:1}
\end{align}
where we have used that ${\varepsilon} \lesssim 1$ and $t\lesssim 1$. We may also directly estimate
\begin{align}
\abs{T_{{\partial}_y \Bcal}^{(3)}}
\lesssim \int_{\RR} \abs{\xi} \ensuremath{\nonumber}orm{\rho \bar w_\xi}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \xi
\lesssim \ensuremath{\nonumber}orm{\chi {\partial}_y\bar w}_{\tau,r-1/2} \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} \bar w}_{\tau,r}
\label{eq:Magic:@:3}
\end{align}
and similarly to \eqref{eq:Magic:@:1} we have
\begin{align}
\abs{T_{{\partial}_y \Bcal}^{(4)}}
&\lesssim \int_{\RR} \abs{A_\xi} \abs{\xi} \|\chi \rho^2 y {\partial}_y \theta_\xi \|_{L^\infty_y} \ensuremath{\nonumber}orm{\rho^{-1}}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \xi \ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\chi {\partial}_y\bar w}_{\tau,r-1/2}
\,.
\label{eq:Magic:@:4}
\end{align}
It remains to treat the ${\partial}artial_t A_\xi$ and the nonlinear terms in \eqref{eq:dy:nonlinear:terms:d}.
For the ${\partial}artial_t A_\xi$ contribution, namely $T_{{\partial}_y \Bcal}^{(2)}$, we need to use a decomposition that is analogous to \eqref{eq:Magic:2:0}. The main difference is that the $\theta_\xi-1$ are now replaced by ${\partial}_y \theta_\xi$, and as mentioned earlier, $\chi(y) \abs{{\partial}_y \theta_\xi}$ can be made arbitrarily small. In particular, we may use the bound
\begin{align}
\brak{\xi}^j \ensuremath{\nonumber}orm{\chi \rho^2 {\partial}_y \theta_\xi}_{L^\infty_y} \lesssim \brak{\xi}^j \ensuremath{\nonumber}orm{ \rho^2 y^{1+j} {\partial}_y \theta_\xi}_{L^\infty_y} \lesssim 1,
\label{eq:Celtics:no:good}
\end{align}
combined with \eqref{eq:A:evo} to estimate
\begin{align}
\abs{T_{\Bcal}^{(2)}}
&\lesssim \int_{\RR} \abs{{\partial}artial_t A_\xi} \ensuremath{\nonumber}orm{\chi \rho^2 {\partial}_y\theta_\xi}_{L^\infty_y} \ensuremath{\nonumber}orm{\rho^{-1}}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \int_{\RR} \abs{\xi} c_{\theta,\xi} \abs{A_\xi} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \xi
+ \int_{\RR} \frac{1}{\brak{\xi}} \abs{\xi}^2 \abs{A_\xi} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\qquad + \int_{\RR} \frac{\abs{\xi}}{\brak{\xi}^{1/2}} \abs{I_\infty[\bar w_\xi]} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\qquad + \int_{\RR}\int_{\RR} \frac{|\xi|}{\brak{\xi}} \abs{A_\eta} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\brak{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2} + \ensuremath{\nonumber}orm{\bar w}_{\tau,r} \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2} + \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}^2 \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}
\, .
\label{eq:Magic:@:2}
\end{align}
Lastly, for the two nonlinear contributions, arising due to the the last two terms in \eqref{eq:dy:nonlinear:terms:d}, we again appeal to \eqref{eq:Celtics:no:good} and estimate
\begin{align}
\abs{T_{\Bcal}^{(5)}}
&\lesssim
\int_{\RR}\int_{\RR} |\xi-\eta| \abs{A_\eta} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\chi \rho^2 {\partial}_y \theta_{\xi-\eta}}_{L^\infty_y} \ensuremath{\nonumber}orm{\rho^{-1}}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \int_{\RR}\int_{\RR} \abs{A_\eta} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}}^2 \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2}
\label{eq:Magic:@:5}
\end{align}
and
\begin{align}
\abs{T_{\Bcal}^{(6)}}
&\lesssim
\int_{\RR}\int_{\RR} |\eta| \abs{A_\eta} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\chi \rho^2 y {\partial}_{yy} \theta_{\xi-\eta}}_{L^\infty_y} \ensuremath{\nonumber}orm{\rho^{-1}}_{L^2_y} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \int_{\RR}\int_{\RR} |\eta| \abs{A_\eta} \abs{A_{\xi-\eta}} \ensuremath{\nonumber}orm{\chi \rho {\partial}_y \bar w_\xi}_{L^2_y} \brak{\xi}^{2r-1} e^{2\tau |\xi|} \,\mathrm{d} \eta \,\mathrm{d} \xi
\ensuremath{\nonumber}otag\\
&\lesssim \ensuremath{\nonumber}orm{\abs{{\partial}_x}^{1/2} A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{A}_{\widetilde{\tau,r}} \ensuremath{\nonumber}orm{\chi {\partial}_y \bar w}_{\tau,r-1/2} \, .
\label{eq:Magic:@:6}
\end{align}
By summing the bounds \eqref{eq:Magic:@:1}--\eqref{eq:Magic:@:4}, and \eqref{eq:Magic:@:2}--\eqref{eq:Magic:@:6}, we obtain the bound \eqref{eq:Magic:@}.
\appendix
\section{Review of the incompressible Triple Deck over a flat 2D plate}
\label{sec:appendix}
We roughly follow the presentation from~\cite{Smith82}. We first introduce the variables
\begin{align} \label{scalings}
X = \frac{x-1}{\ensuremath{\nonumber}u^{3/8}}, \qquad \bar Y = \frac{y}{\ensuremath{\nonumber}u^{1/2}}, \qquad Y = \frac{y}{\ensuremath{\nonumber}u^{5/8}}, \qquad \widetilde Y = \frac{y}{\ensuremath{\nonumber}u^{3/8}}, \qquad T = \frac{t}{\ensuremath{\nonumber}u^{1/4}} \, .
\end{align}
Here $\bar{Y}$, $Y$, and $\widetilde{Y}$ are the fast vertical variables in the main deck, lower deck, and upper deck, respectively. The $X$ variable is the fast variable in the vicinity of the trailing edge, situated at $x=1, y=0$.
On the fast time scale $T$, to leading order only perturbances in the lower deck are active, while the other decks the fast time dependence does not enter the momentum equation. Throughout this section we abuse notation and write $\ensuremath{\nonumber}u$ instead of an inverse Reynolds number, i.e. we treat $\ensuremath{\nonumber}u$ as if it is dimensionless.
\subsection{Main deck}
The ansatz on the solution of the 2D Navier-Stokes equation in this region is
\begin{align}
(u_M,v_M,p_M) = \left( U_B(\bar Y) + \ensuremath{\nonumber}u^{\frac 1 8} u_1(X,\bar Y, T), \ensuremath{\nonumber}u^{\frac 1 4} v_1( X,\bar Y, T), \ensuremath{\nonumber}u^{\frac 1 4} p_1(X,\bar Y,T) \right) + \mbox{lower order terms}.
\label{eq:main:deck:ansatz}
\end{align}
\ensuremath{\nonumber}oindent Above, $U_B$ is defined to be the Blasius boundary layer, introduced in \eqref{Blasius.a} as $U_B:= f'(\frac{\bar{Y}}{\sqrt{x}})$, where $f$ solves \eqref{Blasius.b}. To ease notation, we suppress the $x$ dependence of $U_B$, and denote by $U_B' = {\partial}_{\bar{Y}}U_B$, since our scaling ensures that we are very close to $x = 1$. Inserting ansatz \eqref{eq:main:deck:ansatz} into the 2D Navier-Stokes equations and collecting only the leading order terms we obtain the {\em inviscid type system}
\begin{subequations}
\label{eq:main:deck}
\begin{align}
U_B {\partial}artial_{X} u_1 + v_1 U_B' &= 0\\
{\partial}artial_{\bar Y} p_1 &= 0 \\
{\partial}artial_{X} u_1 + {\partial}artial_{\bar Y} v_1 &= 0
\, .
\end{align}
\end{subequations}
Note that both the time derivative and the dissipation term in the tangential momentum equation drop out, as they are lower order in $\ensuremath{\nonumber}u$.
The system \eqref{eq:main:deck} has as a solution
\begin{align}
u_1= A(X,T) U_B'(\bar Y), \qquad v_1 = - {\partial}artial_{X}A(X,T) U_B(\bar Y), \qquad p_1 = P(X,T) \,,
\label{eq:main:deck:sol}
\end{align}
for some unknown functions $A(X,T)$ and $P(X,T)$. Note that the solution \eqref{eq:main:deck:sol} satisfies the boundary condition $u_1|_{\bar{Y} \to \infty} = 0$. This type of matching condition enforces, from \eqref{eq:main:deck:ansatz}, that the horizontal velocity in the main deck converges rapidly to the ambient Blasius flow, which is what is observed when the boundary layer separates.
The boundary condition
\begin{align*}
A(X, T) \rightarrow 0 \text{ as } X \rightarrow -\infty
\end{align*}
ensures that, approaching from the left, i.e. as $ x \to 1^{-}$ the main deck profile matches with the Blasius boundary layer profile $U_B\left( \frac{\bar Y}{\sqrt{x}} \right)$. Therefore, at the lateral boundary $x=1^-$ in original variables, which is the same as the boundary $X \to -\infty$ in rescaled variables (as $\ensuremath{\nonumber}u \to 0$).
\subsection{Lower deck}
Notice now that the main deck, \eqref{eq:main:deck}, contributes a non-zero trace onto $\bar{Y} = 0$, which needs to be adjusted. Thus, it does not suffice to take the main deck as the full flow, as it does not satisfy the no-slip boundary condition. This is the purpose of introducing the lower deck. More precisely, we compute
\begin{align} \ensuremath{\nonumber}
u_M|_{\bar{Y} \to 0} &= U_B(\bar{Y}) + \ensuremath{\nonumber}u^{\frac 1 8} u_1(X, \bar{Y}, T) \sim \bar{Y} U_B'(0) + \ensuremath{\nonumber}u^{\frac 1 8} A(X,T) U_B'(0) \\ \label{B:trace}
&\sim \ensuremath{\nonumber}u^{\frac 1 8} U_B'(0) (\ensuremath{\nonumber}u^{-\frac 1 8} \bar{Y} + A(X,T)) \sim \ensuremath{\nonumber}u^{\frac 1 8} U_B'(0) (Y + A(X,T)),
\end{align}
\ensuremath{\nonumber}oindent where we have used the scaling $Y = \ensuremath{\nonumber}u^{-\frac 1 8} \bar{Y}$ which relates the lower deck scaling and main deck/ Prandtl scaling. This then suggests that in order to correct for the boundary trace, \eqref{B:trace}, we need to seek a lower deck expansion in of magnitude $\ensuremath{\nonumber}u^{\frac 1 8}$. The ansatz on the solution $(u_L,v_L,p_L)$ of the 2D Navier-Stokes equation in this region is thus
\begin{align}
(u_L,v_L,p_L) = \left(\ensuremath{\nonumber}u^{\frac 1 8} U (X, Y, T), \ensuremath{\nonumber}u^{\frac 3 8} V( X, Y, T), \ensuremath{\nonumber}u^{\frac 1 4} P( X, Y, T) \right) + \mbox{lower order terms}
\label{eq:lower:deck:ansatz}
\,.
\end{align}
Inserting ansatz \eqref{eq:lower:deck:ansatz} into the 2D Navier-Stokes equations and collecting only the leading order terms we obtain the {\em boundary layer type system}
\begin{subequations}
\label{eq:lower:deck}
\begin{align}
{\partial}artial_{T} U + U {\partial}artial_{X} U + V {\partial}artial_{Y} U + {\partial}artial_{X} P - {\partial}artial_{Y}^2 U &= 0\\
{\partial}artial_{Y} P &= 0 \\
{\partial}artial_{X} U + {\partial}artial_{Y} V &= 0
\, .
\end{align}
\end{subequations}
Matching the tangential velocity as $Y\to \infty$ in \eqref{eq:lower:deck:ansatz}, with the tangential velocity as $\bar Y \to 0$ in \eqref{B:trace}, we arrive at the boundary condition (for simplicity, take $U_B'(0) = 1$)
\begin{align}
\lim_{Y \to \infty} \left( U(X, Y, T) - Y \right)= A(X, T)
\label{eq:lower:deck:BC:1}
\,.
\end{align}
On the other hand, at the boundary of the plate we impose Dirichlet boundary conditions
\begin{align}
U(X,0,T) = V(X,0,T) = 0 \, .
\label{eq:lower:deck:BC:2}
\end{align}
\subsection{Upper deck}
The flow in this region is to leading order of steady potential inviscid type. That is, the leading order is an Euler flow, which takes as argument the unscaled variables, $(t, x, y)$, from \eqref{scalings}. In comparison to the perturbations, which in this deck are functions of $(X, \widetilde{Y})$, every Euler flow fluctuates slowly, and so, without loss of generality, we take the outer Euler flow to be the constant shear flow $(1, 0)$. Thiss yields the ansatz
\begin{align}
(u_U,v_U,p_U) = \left(1 + \ensuremath{\nonumber}u^{\frac 1 4} u_2(X, \widetilde Y, T), \ensuremath{\nonumber}u^{\frac 1 4} v_2( X,\widetilde Y, T), \ensuremath{\nonumber}u^{\frac 1 4} p_2( X,\widetilde Y, T) \right) + \mbox{lower order terms}
\, .
\label{eq:upper:deck:ansatz}
\end{align}
Inserting this ansatz into the 2D Navier-Stokes equations and collecting only the leading order terms we obtain the {\em potential type system}
\begin{subequations}
\label{eq:upper:deck}
\begin{align}
{\partial}artial_X u_2 + {\partial}artial_X p_2&= 0\\
{\partial}artial_X v_2 + {\partial}artial_{\widetilde Y} p_2 &= 0 \\
{\partial}artial_{X} u_2 + {\partial}artial_{\widetilde Y} v_2 &= 0
\, .
\end{align}
\end{subequations}
The matching condition at $\widetilde Y = 0$ with the flow in the main deck as $\bar Y \to \infty$ requires that
\begin{align}
p_2(X,0,T) = P(X,T), \qquad v_2(X,0,T) = - {\partial}artial_X A(X,T)
\label{eq:upper:deck:BC}
\end{align}
which, according to \eqref{eq:main:deck}, cancels out the normal velocity component from the main deck. Matching the upper deck velocity with the outer Euler solution as $\widetilde Y \to \infty$ yields
\begin{align*}
\lim_{\widetilde Y \to + \infty} p_2(X,\widetilde Y ,T) = \lim_{\widetilde Y \to + \infty} v_2(X,\widetilde Y ,T) = 0 \, .
\end{align*}
\subsection{The closed coupled system}
From \eqref{eq:upper:deck} we deduce that the pressure $p_2$ and the normal velocity $v_2$ are harmonic in the variables $X, \widetilde Y$. But one can say more: the functions $p_2$ and $v_2$ are harmonic conjugates. Therefore, we may view $v_2$ as the real part of an analytic function, and $p_2$ as its imaginary part. Therefore, their traces at the boundary of the half space are related via the Hilbert transform
\begin{align}
P(X,T) = p_2(X,0,T) = H v_2(X,0,T) = - (H {\partial}_X) A(X,T) = \frac{1}{{\partial}i} p.v. \int_{\RR} \frac{{\partial}artial_{X'} A(X')}{X - X'} \,\mathrm{d} X'
\label{eq:lower:deck:BC:3}
\end{align}
which concludes the proof of \eqref{eq:TD:A:p}. Recall that $- H {\partial}_X = \abs{{\partial}_X}$. Therefore, the system \eqref{eq:lower:deck} together with the boundary conditions \eqref{eq:lower:deck:BC:1}, \eqref{eq:lower:deck:BC:2}, and \eqref{eq:lower:deck:BC:3} form a closed evolution system, which is called the Triple Deck model. Once the solution in the lower deck is determined, we derive from \eqref{eq:main:deck:sol} the leading order solution in the main deck, while from \eqref{eq:upper:deck:BC} and harmonic extension in the upper half space, we determine the leading order solution in the upper deck.
\subsection{The scalings}
Now that we have presented the derivation of the model, we briefly discuss the idea behind the scalings \eqref{scalings}. We roughly follow the exposition in \cite{Lagree}. \textit{a-priori}, one wants to rescale the $x$ variable near the point of separation, which may physically correspond to the trailing edge of a flat plate, or if a plate were to have a disturbance. In our presentation, $x = 1$ is this point. To achieve this, one introduces a fast, horizontal variable. Next, one scales the magnitude of the deviation from Blasius in the main deck, which represents the separation effect. Summarizing the starting point:
\begin{align} \label{ans:2}
X = \frac{x - 1}{L}, \qquad u_M = U_B(\bar{Y}) + \ell u_1(X, \bar{Y}, T), \qquad v_M = \frac{\sqrt{\ensuremath{\nonumber}u} \ell}{L} v_1(X, \bar{Y}, T).
\end{align}
for scalings $L, \ell$ to be determined in terms of $\ensuremath{\nonumber}u$. An inspection of \eqref{scalings} shows that $L = \ensuremath{\nonumber}u^{\frac 3 8}$ and $\ell = \ensuremath{\nonumber}u^{\frac 1 8}$.
In the next step, one introduces a lower deck to cancel out the boundary contribution from the main deck. This contribution, the content of \eqref{B:trace}, is now of the form
\begin{align*}
u_M|_{\bar{Y} \to 0} \sim \bar{Y} U_B'(0) + \ell A(X,T) U_B'(0) \sim \ell U_B'(0) (Y + A(X,T)),
\end{align*}
where the lower deck fast variable and magnitude are now
\begin{align*}
Y := \ell^{-1} \bar{Y}, \qquad u_L := \ell U.
\end{align*}
In the lower deck step, equating the convection $u_L {\partial}_x$ and the viscosity $\ensuremath{\nonumber}u {\partial}_{yy}$ just as in the standard Prandtl theory gives the relation
\begin{align*}
\ell^2 L^{-1} \sim u_L {\partial}_x u_L = \ensuremath{\nonumber}u {\partial}_{yy} u_L \sim \ensuremath{\nonumber}u \frac{1}{\ensuremath{\nonumber}u} \frac{1}{\ell^2} \ell,
\end{align*}
which gives $\ell^3 = L$. The final physical determination comes from the upper deck. One realizes that the contribution of $v_M$ at the top of the Main Deck needs to match the order of the pressure, which is $\ell^2$. Thus,
\begin{align*}
\frac{\sqrt{\ensuremath{\nonumber}u}}{ \ell^2} = \ell^2 \quad \Rightarrow \quad \ell = \ensuremath{\nonumber}u^{\frac 1 8}, \quad L = \ensuremath{\nonumber}u^{\frac 3 8} \, ,
\end{align*}
which concludes the scale analysis.
\subsection*{Acknowledgments}
The authors are grateful to Stephen Childress for pointing them to a number of references concerning the physical origins of the Triple Deck. We also thank Klaus Widmayer for discussions concerning the Benjamin-Ono equation.
The work of S.I. was supported by the NSF grant DMS-1802940.
The work of V.V. was partially supported by the NSF grant DMS-1911413.
\begin{bibdiv}
\begin{biblist}
\bib{AlexandreWangXuYang14}{article}{
author={Alexandre, R.},
author={Wang, Y.-G.},
author={Xu, C.-J.},
author={Yang, T.},
title={Well-posedness of the {P}randtl equation in {S}obolev spaces},
date={2014},
journal={J. Amer. Math. Soc.},
}
\bib{BardosTiti13}{article}{
author={Bardos, C.W.},
author={Titi, E.S.},
title={Mathematics and turbulence: where do we stand?},
journal={J. Turbul.},
volume={14},
date={2013},
number={3},
pages={42--76},
issn={1468-5248},
}
\bib{Blasius08}{article}{
author={Blasius, H.},
title={{G}renzschichten in {F}l\"ussigkeiten mit kleiner {R}eibung},
date={1908},
journal={Z. Math. Phys.},
volume={56},
pages={1\ensuremath{\nonumber}dash 37},
}
\bib{BonaGrujicKalisch10}{article}{
author={Bona, Jerry L.},
author={Gruji\'{c}, Zoran},
author={Kalisch, Henrik},
title={A KdV-type Boussinesq system: from the energy level to analytic
spaces},
journal={Discrete Contin. Dyn. Syst.},
volume={26},
date={2010},
number={4},
pages={1121--1139},
}
\bib{Carter74}{inproceedings}{
author={Carter, J.},
title={Solutions for laminar boundary layers with separation and
reattachment},
date={1974},
booktitle={7th fluid and plasmadynamics conference},
pages={583},
}
\bib{CatherallMangler66}{article}{
author={Catherall, D.},
author={Mangler, K.W.},
title={The integration of the two-dimensional laminar boundary-layer
equations past the point of vanishing skin friction},
date={1966},
journal={Journal of Fluid Mechanics},
volume={26},
number={1},
pages={163\ensuremath{\nonumber}dash 182},
}
\bib{CebeciCousteix05}{book}{
author={Cebeci, T.},
author={Cousteix, J.},
title={Modeling and computation of boundary layer flows},
publisher={Springer},
address={Cambridge},
date={2005},
}
\bib{CollotGhoulIbrahimMasmoudi18}{article}{
author={Collot, C.},
author={Ghoul, T.E.},
author={Ibrahim, S.},
author={Masmoudi, N.},
title={On singularity formation for the two dimensional unsteady
Prandtl's system},
date={2018},
journal={arXiv:1808.05967},
}
\bib{CollotGhoulMasmoudi18}{article}{
author={Collot, C.},
author={Ghoul, T.E.},
author={Masmoudi, N.},
title={Singularity formation for Burgers equation with transverse
viscosity},
date={2018},
journal={arXiv:1803.07826},
}
\bib{CousteixMauss07}{book}{
author={Cousteix, J.},
author={Mauss, J.},
title={Asymptotic analysis and boundary layers},
series={Scientific Computation},
publisher={Springer},
address={Berlin},
date={2007},
ISBN={978-3-540-46488-4},
note={With a preface by Jean-Pierre Guiraud, Translated and extended
from the 2006 French original},
}
\bib{DDLM18}{article}{
author={Dalibard, A.-L.},
author={Dietert, H.},
author={G{\'e}rard-Varet, D.},
author={Marbach, F.},
title={High frequency analysis of the unsteady interactive boundary
layer model},
date={2018},
journal={SIAM Journal on Mathematical Analysis},
volume={50},
number={4},
pages={4203\ensuremath{\nonumber}dash 4245},
}
\bib{DalibardMasmoudi18}{article}{
author={Dalibard, A.-L.},
author={Masmoudi, N.},
title={Separation for the stationary {P}randtl equation},
date={2018},
journal={arXiv:1802.04039},
}
\bib{DiGV18}{article}{
author={Dietert, H.},
author={G{\'e}rard-Varet, D.},
title={Well-posedness of the prandtl equations without any structural
assumption},
date={2019},
journal={Annals of PDE},
volume={8},
number={6},
}
\bib{Duck87}{incollection}{
author={Duck, P.W.},
title={Unsteady triple-deck flows leading to instabilities},
date={1987},
booktitle={Boundary-layer separation},
publisher={Springer},
pages={297\ensuremath{\nonumber}dash 312},
}
\bib{EEngquist97}{article}{
author={E, W.},
author={Engquist, B.},
title={Blowup of solutions of the unsteady {P}randtl's equation},
date={1997},
ISSN={0010-3640},
journal={Comm. Pure Appl. Math.},
volume={50},
number={12},
pages={1287\ensuremath{\nonumber}dash 1293},
}
\bib{FeiTaoZhang16}{article}{
author={Fei, M.},
author={Tao, T.},
author={Zhang, Z.},
title={On the zero-viscosity limit of the {N}avier-{S}tokes equations in
the half-space},
date={2016},
journal={arXiv:1609.03778},
}
\bib{FeiTaoZhang18}{article}{
author={Fei, N.},
author={Tao, T.},
author={Zhang, Z.},
title={On the zero-viscosity limit of the {N}avier--{S}tokes equations
in ${R}^3_+$ without analyticity},
date={2018},
ISSN={0021-7824},
journal={J. Math. Pures Appl.},
volume={112},
pages={170 \ensuremath{\nonumber}dash 229},
}
\bib{FoiasTemam89}{article}{
author={Foias, C.},
author={Temam, R.},
title={Gevrey class regularity for the solutions of the Navier-Stokes
equations},
journal={J. Funct. Anal.},
volume={87},
date={1989},
number={2},
pages={359--369},
}
\bib{GarganoSammartinoSciacca09}{article}{
author={Gargano, F.},
author={Sammartino, M.},
author={Sciacca, V.},
title={Singularity formation for {P}randtl's equations},
date={2009},
ISSN={0167-2789},
journal={Phys. D},
volume={238},
number={19},
pages={1975\ensuremath{\nonumber}dash 1991},
url={http://dx.doi.org/10.1016/j.physd.2009.07.007},
}
\bib{GerardVaretDormy10}{article}{
author={G{{\'e}}rard-Varet, D.},
author={Dormy, E.},
title={On the ill-posedness of the {P}randtl equation},
date={2010},
ISSN={0894-0347},
journal={J. Amer. Math. Soc.},
volume={23},
number={2},
pages={591\ensuremath{\nonumber}dash 609},
url={http://dx.doi.org/10.1090/S0894-0347-09-00652-3},
}
\bib{GerardVaretMaekawa18}{article}{
author={G\'erard-Varet, D.},
author={Maekawa, Y.},
title={Sobolev stability of {P}randtl expansions for the steady
{N}avier-{S}tokes equations},
date={2019},
journal={Arch. Ration. Mech. Anal.},
}
\bib{GerardVaretMaekawaMasmoudi16}{article}{
author={G{{\'e}}rard-Varet, D.},
author={Maekawa, Y.},
author={Masmoudi, N.},
title={{G}evrey stability of {P}randtl expansions for {2D}
{N}avier-{S}tokes},
date={2018},
volume={167},
number={13}
pages={2531\ensuremath{\nonumber}dash 2631},
journal={Duke Math. J.},
}
\bib{GerardVaretMasmoudi13}{article}{
author={G{{\'e}}rard-Varet, D.},
author={Masmoudi, N.},
title={Well-posedness for the {P}randtl system without analyticity or
monotonicity},
date={2015},
ISSN={0012-9593},
journal={Ann. Sci. \'{E}c. Norm. Sup\'er. (4)},
volume={48},
number={6},
pages={1273\ensuremath{\nonumber}dash 1325},
url={https://doi.org/10.24033/asens.2270},
}
\bib{GVMV18}{article}{
author={G\'{e}rard-Varet, D.},
author={Masmoudi, N.},
author={Vicol, V.},
title={Well-posedness of the hydrostatic {N}avier-{S}tokes equations},
date={2018},
journal={arXiv:1804.04489},
}
\bib{GerardVaretNguyen12}{article}{
author={G\'{e}rard-Varet, D.},
author={Nguyen, T.},
title={Remarks on the ill-posedness of the {P}randtl equation},
date={2012},
journal={Asymptotic Analysis},
volume={77},
pages={71\ensuremath{\nonumber}dash 88},
}
\bib{Goldstein48}{article}{
author={Goldstein, S.},
title={On laminar boundary-layer flow near a position of separation},
date={1948},
journal={The Quarterly Journal of Mechanics and Applied Mathematics},
volume={1},
number={1},
pages={43\ensuremath{\nonumber}dash 69},
}
\bib{Grenier00}{article}{
author={Grenier, E.},
title={On the nonlinear instability of {E}uler and {P}randtl equations},
date={2000},
ISSN={0010-3640},
journal={Comm. Pure Appl. Math.},
volume={53},
number={9},
pages={1067\ensuremath{\nonumber}dash 1091},
url={http://dx.doi.org/10.1002/1097-0312(200009)53:9<1067::AID-CPA1>3.3.CO;2-H},
}
\bib{GrenierGuoNguyen14b}{article}{
author={Grenier, E.},
author={Guo, Y.},
author={Nguyen, T.},
title={Spectral stability of {P}randtl boundary layers: an overview},
date={2015},
journal={Analysis},
volume={35},
number={4},
pages={343\ensuremath{\nonumber}dash 355},
}
\bib{GrenierGuoNguyen14c}{article}{
author={Grenier, E.},
author={Guo, Y.},
author={Nguyen, T.},
title={Spectral instability of characteristic boundary layer flows},
date={2016},
journal={Duke Math. J.},
volume={165},
number={16},
pages={3085\ensuremath{\nonumber}dash 3146},
}
\bib{GrenierNguyen17}{article}{
author={Grenier, E.},
author={Nguyen, T.T.},
title={On nonlinear instability of {P}randtl's boundary layers: the case
of {R}ayleigh's stable shear flows},
date={2017},
journal={arXiv:1706.01282},
}
\bib{GrenierNguyen18a}{article}{
author={Grenier, E.},
author={Nguyen, T.T.},
title={{$L^\infty$} instability of {P}randtl layers},
date={2018},
journal={arXiv:1803.11024},
}
\bib{GuoIyer18a}{article}{
author={Guo, Y.},
author={Iyer, S.},
title={Steady {P}randtl layer expansions with external forcing},
date={2018},
journal={arXiv:1810.06662},
}
\bib{GuoIyer18}{article}{
author={Guo, Y.},
author={Iyer, S.},
title={Validity of steady prandtl layer expansions},
date={2018},
journal={arXiv:1805.05891},
}
\bib{GuoNguyen11}{article}{
author={Guo, Y.},
author={Nguyen, T.},
title={A note on {P}randtl boundary layers},
date={2011},
ISSN={0010-3640},
journal={Comm. Pure Appl. Math.},
volume={64},
number={10},
pages={1416\ensuremath{\nonumber}dash 1438},
}
\bib{GuoNguyen14}{article}{
author={Guo, Y.},
author={Nguyen, T.T.},
title={Prandtl boundary layer expansions of steady {N}avier--{S}tokes
flows over a moving plate},
date={2017},
ISSN={2199-2576},
journal={Annals of PDE},
volume={3},
number={1},
pages={10},
url={https://doi.org/10.1007/s40818-016-0020-6},
}
\bib{IgnatovaVicol16}{article}{
author={Ignatova, M.},
author={Vicol, V.},
title={Almost global existence for the {P}randtl boundary layer
equations},
date={2016},
ISSN={0003-9527},
journal={Arch. Ration. Mech. Anal.},
volume={220},
number={2},
pages={809\ensuremath{\nonumber}dash 848},
url={https://doi.org/10.1007/s00205-015-0942-2},
}
\bib{Klingenberg83}{thesis}{
author={Klingenberg, C.},
title={On the stability of triple deck flow},
type={Ph.D. Thesis},
date={1983},
}
\bib{KukavicaMasmoudiVicolWong14}{article}{
author={Kukavica, I.},
author={Masmoudi, N.},
author={Vicol, V.},
author={Wong, T.K.},
title={On the local well-posedness of the {P}randtl and the hydrostatic
{E}uler equations with multiple monotonicity regions},
date={2014},
journal={SIAM J. Math. Anal.},
volume={46},
number={6},
pages={3865\ensuremath{\nonumber}dash 3890},
}
\bib{KukavicaVicol13}{article}{
author={Kukavica, I.},
author={Vicol, V.},
title={On the local existence of analytic solutions to the {P}randtl
boundary layer equations},
date={2013},
journal={Commun. Math. Sci.},
volume={11},
number={1},
pages={269\ensuremath{\nonumber}dash 292},
}
\bib{KukavicaVicol09}{article}{
author={Kukavica, I.},
author={Vicol, V.},
title={On the radius of analyticity of solutions to the three-dimensional
Euler equations},
journal={Proc. Amer. Math. Soc.},
volume={137},
date={2009},
number={2},
pages={669--677},
}
\bib{KukavicaVicolWang19}{article}{
author={Kukavica, I.},
author={Vicol, V.},
author={F, Wang.},
title={The inviscid limit for the {N}avier-{S}tokes equations with data
analytic only near the boundary},
date={2019},
journal={arXiv:1904.04983},
}
\bib{KukavicaVicolWang17}{article}{
author={Kukavica, I.},
author={Vicol, V.},
author={Wang, F.},
title={The van {D}ommelen and {S}hen singularity in the {P}randtl
equations},
date={2017},
ISSN={0001-8708},
journal={Adv. Math.},
volume={307},
pages={288\ensuremath{\nonumber}dash 311},
}
\bib{Lagree10}{incollection}{
author={Lagr\'ee, P.-Y.},
title={Interactive boundary layer ({IBL})},
date={2010},
booktitle={Asymptotic methods in fluid mechanics: survey and recent
advances},
publisher={Springer},
pages={247\ensuremath{\nonumber}dash 286},
}
\bib{Lagree}{article}{
author={Lagree, P.Y},
title={Notes on triple deck},
date={2016},
url={http://www.lmm.jussieu.fr/~lagree/COURS/CISM/TriplePont_CISM.pdf},
}
\bib{LeBalleur90}{incollection}{
author={Le~Balleur, J.-C.},
title={New possibilities of viscous-inviscid numerical techniques for
solving viscous flow equations with massive separation},
date={1990},
booktitle={Numerical and physical aspects of aerodynamic flows iv},
publisher={Springer},
pages={71\ensuremath{\nonumber}dash 96},
}
\bib{LevermoreOliver97}{article}{
author={Levermore, C.D.},
author={Oliver, M.},
title={Analyticity of solutions for a generalized Euler equation},
journal={J. Differential Equations},
volume={133},
date={1997},
number={2},
pages={321--339},
}
\bib{LiYang16}{article}{
author={Li, W.-X.},
author={Yang, T.},
title={Well-posedness in {G}evrey space for the {P}randtl equations with
non-degenerate critical points},
date={2016},
journal={arXiv:1609.08430},
}
\bib{Lighthill53}{article}{
author={Lighthill, M.J.},
title={On boundary-layer and upstream influence : Ii. supersonic flows
without separation},
date={1953},
journal={Proc. R. Soc., Ser. A},
number={217},
pages={478\ensuremath{\nonumber}dash 507},
}
\bib{LiuYang17}{article}{
author={Liu, C.-J.},
author={Yang, T.},
title={Ill-posedness of the {P}randtl equations in {S}obolev spaces
around a shear flow with general decay},
date={2017},
journal={J. Math. Pures Appl.},
volume={108},
number={2},
pages={150\ensuremath{\nonumber}dash 162},
}
\bib{LombardoCannoneSammartino03}{article}{
author={Lombardo, M.C.},
author={Cannone, M.},
author={Sammartino, M.},
title={Well-posedness of the boundary layer equations},
date={2003},
ISSN={0036-1410},
journal={SIAM J. Math. Anal.},
volume={35},
number={4},
pages={987\ensuremath{\nonumber}dash 1004 (electronic)},
}
\bib{Maekawa14}{article}{
author={Maekawa, Y.},
title={On the inviscid limit problem of the vorticity equations for
viscous incompressible flows in the half-plane},
date={2014},
ISSN={1097-0312},
journal={Comm. Pure Appl. Math.},
volume={67},
number={7},
pages={1045\ensuremath{\nonumber}dash 1128},
url={http://dx.doi.org/10.1002/cpa.21516},
}
\bib{MaekawaMazzucato16}{article}{
author={Maekawa, Y.},
author={Mazzucato, A.},
title={The inviscid limit and boundary layers for {N}avier-{S}tokes
flows},
date={2016},
journal={Handbook of Mathematical Analysis in Mechanics of Viscous
Fluids},
pages={1\ensuremath{\nonumber}dash 48},
}
\bib{MasmoudiWong15}{article}{
author={Masmoudi, N.},
author={Wong, T.K.},
title={Local-in-time existence and uniqueness of solutions to the
{P}randtl equations by energy methods},
date={2015},
ISSN={0010-3640},
journal={Comm. Pure Appl. Math.},
volume={68},
number={10},
pages={1683\ensuremath{\nonumber}dash 1741},
url={https://doi.org/10.1002/cpa.21595},
}
\bib{Messiter70}{article}{
author={Messiter, A.F.},
title={Boundary layer flow near the trailing edge of a flat plate},
date={1970},
journal={SIAM J. Appl. Math.},
volume={18},
pages={241\ensuremath{\nonumber}dash 257},
}
\bib{Meyer82}{techreport}{
author={Meyer, R.E.},
title={Stewartson's {T}riple {D}eck.},
institution={U. Wisconsin-Madison MRC Technical Summary Report 2391},
date={1982},
}
\bib{Meyer83}{article}{
author={Meyer, R.E.},
title={A view of the triple deck},
date={1983},
journal={SIAM J. Appl. Math.},
volume={43},
number={4},
pages={639\ensuremath{\nonumber}dash 663},
}
\bib{Neiland69}{article}{
author={Neiland, V.~Ya.},
title={Propagation of perturbation upstream with interaction between a
hypersonic flow and a boundary layer},
date={1969},
journal={Mekh. Zhid. Gaz.},
volume={4},
pages={53\ensuremath{\nonumber}dash 57},
}
\bib{NguyenNguyen18}{article}{
author={Nguyen, T.T.},
author={Nguyen, T.T.},
title={The inviscid limit of Navier-Stokes equations for analytic data on
the half-space},
journal={Arch. Ration. Mech. Anal.},
volume={230},
date={2018},
number={3},
pages={1103--1129},
}
\bib{Oleinik66}{article}{
author={Oleinik, O.A.},
title={On the mathematical theory of boundary layer for an unsteady flow
of incompressible fluid},
date={1966},
ISSN={0021-8928},
journal={J. Appl. Math. Mech.},
volume={30},
pages={951\ensuremath{\nonumber}dash 974},
}
\bib{OleinikSamokhin99}{book}{
author={Oleinik, O.A.},
author={Samokhin, V.N.},
title={Mathematical models in boundary layer theory},
series={Applied Mathematics and Mathematical Computation},
publisher={Chapman \& Hall/CRC, Boca Raton, FL},
date={1999},
volume={15},
ISBN={1-58488-015-5},
}
\bib{OliverTiti00}{article}{
author={Oliver, M.},
author={Titi, E.S.},
title={Remark on the rate of decay of higher order derivatives for
solutions to the Navier-Stokes equations in ${\bf R}^n$},
journal={J. Funct. Anal.},
volume={172},
date={2000},
number={1},
pages={1--18},
}
\bib{Prandtl1904}{article}{
author={Prandtl, L.},
title={{\"{U}}ber {F}l{\"u}ssigkeitsbewegung bei sehr kleiner
{R}eibung},
date={1904},
journal={Verh. III Intern. Math. Kongr. Heidelberg, Teuber, Leipzig},
pages={485\ensuremath{\nonumber}dash 491},
}
\bib{SammartinoCaflisch98a}{article}{
author={Sammartino, M.},
author={Caflisch, R.E.},
title={Zero viscosity limit for analytic solutions, of the
{N}avier-{S}tokes equation on a half-space. {I}. {E}xistence for {E}uler and
{P}randtl equations},
date={1998},
ISSN={0010-3616},
journal={Comm. Math. Phys.},
volume={192},
number={2},
pages={433\ensuremath{\nonumber}dash 461},
url={http://dx.doi.org/10.1007/s002200050304},
}
\bib{SammartinoCaflisch98b}{article}{
author={Sammartino, M.},
author={Caflisch, R.E.},
title={Zero viscosity limit for analytic solutions of the
{N}avier-{S}tokes equation on a half-space. {II}. {C}onstruction of the
{N}avier-{S}tokes solution},
date={1998},
ISSN={0010-3616},
journal={Comm. Math. Phys.},
volume={192},
number={2},
pages={463\ensuremath{\nonumber}dash 491},
url={http://dx.doi.org/10.1007/s002200050305},
}
\bib{Schlichting60}{book}{
author={Schlichting, H.},
title={Boundary layer theory},
series={Translated by J. Kestin. 4th ed. McGraw-Hill Series in Mechanical
Engineering},
publisher={McGraw-Hill Book Co.},
address={Inc., New York},
date={1960},
}
\bib{Smith79}{article}{
author={Smith, F.T.},
title={On the non-parallel flow stability of the {B}lasius boundary
layer},
date={1979},
journal={Proceedings of the Royal Society of London. A. Mathematical and
Physical Sciences},
volume={366},
number={1724},
pages={91\ensuremath{\nonumber}dash 109},
}
\bib{Smith82}{article}{
author={Smith, F.T.},
title={On the high {R}eynolds number theory of laminar flows},
date={1982},
journal={IMA Journal of Applied Mathematics},
volume={28},
number={3},
pages={207\ensuremath{\nonumber}dash 281},
}
\bib{SmithBrown12}{book}{
author={Smith, F.T.},
author={Brown, S.N.},
title={Boundary-layer separation: Proceedings of the iutam symposium
london, august 26--28, 1986},
publisher={Springer Science \& Business Media},
date={2012},
}
\bib{Stewartson68}{article}{
author={Stewartson, K.},
title={On the flow near the trailing edge of a flat plate},
date={1968},
journal={Proceedings of the Royal Society of London. Series A.
Mathematical and Physical Sciences},
volume={306},
number={1486},
pages={275\ensuremath{\nonumber}dash 290},
}
\bib{Stewartson69}{article}{
author={Stewartson, K},
title={On the flow near the trailing edge of a flat plate ii},
date={1969},
journal={Mathematika},
volume={16},
number={1},
pages={106\ensuremath{\nonumber}dash 121},
}
\bib{SychevEtAl98}{book}{
author={Sychev, V.V.},
author={Ruban, A.I.},
author={Sychev, V.V.},
author={Korolev, G.L.},
title={Asymptotic theory of separated flows},
publisher={Cambridge University Press, Cambridge},
date={1998},
ISBN={0-521-45530-8},
url={https://mathscinet.ams.org/mathscinet-getitem?mr=1659235},
note={Translated from the 1987 Russian original by Elena V. Maroko and
revised by the authors.},
}
\bib{CowleyTutty85}{article}{
author={Tutty, O.R.},
author={Cowley, S.J.},
title={On the stability and the numerical solution of the unsteady
interactive boundary-layer equation},
date={1986},
journal={J. Fluid Mech.},
volume={168},
pages={431\ensuremath{\nonumber}dash 456},
}
\bib{VanDommelenShen80}{article}{
author={van Dommelen, L.L.},
author={Shen, S.F.},
title={The spontaneous generation of the singularity in a separating
laminar boundary layer},
date={1980},
ISSN={0021-9991},
journal={J. Comput. Phys.},
volume={38},
number={2},
pages={125\ensuremath{\nonumber}dash 140},
}
\bib{WangWangZhang17}{article}{
author={Wang, C.},
author={Wang, Y.},
author={Zhang, Z.},
title={Zero-viscosity limit of the {N}avier--{S}tokes equations in the
analytic setting},
date={2017},
journal={Arch. Ration. Mech. Anal.},
volume={224},
number={2},
pages={555\ensuremath{\nonumber}dash 595},
}
\bib{XinZhang04}{article}{
author={Xin, Z.},
author={Zhang, L.},
title={On the global existence of solutions to the {P}randtl's system},
date={2004},
ISSN={0001-8708},
journal={Adv. Math.},
volume={181},
number={1},
pages={88\ensuremath{\nonumber}dash 133},
}
\end{biblist}
\end{bibdiv}
\end{document}
|
\begin{document}
\title{Detection power of separability criteria based on a correlation tensor: a case study}
\author{Gniewomir Sarbicki}
\affiliation{Institute of Physics, Faculty of Physics, Astronomy and Informatics,
Nicolaus Copernicus University, Grudziadzka 5/7, 87-100 Toru\'{n},
Poland}
\author{Giovanni Scala}
\affiliation{Dipartimento Interateneo di Fisica, Università degli Studi di Bari,
I-70126 Bari, Italy}
\affiliation{INFN, Sezione di Bari, I-70125 Bari, Italy}
\author{Dariusz Chru\'{s}ci\'{n}ski}
\affiliation{Institute of Physics, Faculty of Physics, Astronomy and Informatics,
Nicolaus Copernicus University, Grudziadzka 5/7, 87-100 Toru\'{n},
Poland}
\date{\today}
\begin{abstract}
Detection power of separability criteria based on a correlation tensor is tested within a family of generalized isotropic state in $d_1 \otimes d_2$. For $d_1 \neq d_2$ all these criteria are weaker than positive partial transposition (PPT) criterion. Interestingly, our analysis supports the recent conjecture that a criterion based on symmetrically informationaly complete positive operator-valued measure (SIC-POVMs) is stronger than realignment criterion.
\end{abstract}
\pacs{33.15.Ta}
\keywords{detect entanglement, SIC-POVMs, realignment criterion, entanglement witnesses}
\maketitle
\section{\label{sec:level1}Introduction}
Quantum entanglement defines a key feature of quantum
theory. It becomes a crucial resource for quantum information theory and for modern quantum based
technologies like quantum communication, quantum cryptography,
and quantum calculations \cite{HHHH,QIT}. There are several separability criteria developed during the last two decades which enable to distinguish between separable and entangled states (cf. \cite{GT,HHHH,TOPICAL}). In this paper we analyze a special family of separability criteria based on correlation tensor. The prominent example is provided be the realignment criterion or computable cross-norm (CCNR) criterion \cite{R1,R2,R3}. Apart from CCNR this class contains de Vicente criterion {(dV)} \cite{Vicente}, separability criterion derived in \cite{Fei} and recent criterion based on SIC POMVs (ESIC) \cite{GUHNE}. Interestingly, all these criteria were unified in \cite{GGD1}. It was shown \cite{GGD1} that the above criteria are just special examples of a 2-parameter family of criteria. In this paper we provide a comparative anaysis of these criteria testing a simple family of bipartite states in $d_1 \otimes d_2$.
Let us fix notation: $\mathbb{C}^{\mathrm{d_{1}}}\otimes\mathbb{C}^{\mathrm{d_{2}}}$
be the Hilbert space of a bipartite system with dimension of subsystems
$d_{1}$ and $d_{2}$ respectively. In what follows we assume that $d_2 \geq d_1$. Let $\{G_{\alpha}^{(1)}\}_{\alpha=1}^{d_{1}}$
and $\{G_{\beta}^{(2)}\}_{\beta=1}^{d_{2}}$ be arbitrary orthonormal
bases in $\mathcal{B}(\mathbb{C}^{\mathrm{d_{1}}})$ and $\mathcal{B}(\mathbb{C}^{\mathrm{d_{2}}})$,
namely $\langle G_{\alpha}^{(1)}|G_{\alpha'}^{(1)}\rangle_{{\rm HS}}=\delta_{\alpha,\alpha'}$
and the same for $G_{\beta}^{(2)}$, where $\langle X|Y\rangle_{\mathrm{HS}}={\rm Tr(X^{\dagger}Y)}$
is the Hilbert-Schmidt inner product. One defines a correlation matrix
\begin{equation}
C_{\alpha\beta}=\langle G_{\alpha}^{(1)}\otimes G_{\beta}^{(2)}\rangle_{\rho}={\rm Tr\left(\rho G_{\alpha}^{(1)}\otimes G_{\beta}^{(2)}\right)}.\label{key}
\end{equation}
Let us now restrict ourselves to orthonormal bases (referred as \textit{canonical bases}) such that $G^{(i)}_0 = \boldsymbol{1}_{\mathrm{d_{i}}}/\sqrt{d_{i}}$ (the remaining basis elements are then pairwise orthogonal hermitian traceless operators of norm one). Since now $C^{\mathrm{can}}$ stands for the correlation matrix in canonical bases.
One proves \cite{GGD1}) the following
\begin{Theorem} \label{TH-1} If $\rho$ is separable, then
\begin{equation}
\|D_{x}^{(1)}C^{{\rm can}}D_{y}^{(2)}\|_1 \leq \mathcal{N}_{x,d_{1}}\mathcal{N}_{y,d_{2}},\label{xy}
\end{equation}
where
\begin{equation}
D_{x}^{(1)}=\mathrm{diag}\left\{ x,1,\dots,1\right\} ,\qquad D_{y}^{(2)}=\mathrm{diag}\left\{ y,1,\dots,1\right\}
\end{equation}
and
\begin{equation}
\mathcal{N}_{x,d_{1}}=\sqrt{\frac{d_{1}-1+x^{2}}{d_{1}}},\,\ \ \mathcal{N}_{y,d_{2}}=\sqrt{\frac{d_{2}-1+y^{2}}{d_{2}}},\label{NANB}
\end{equation}
for arbitrary $x,y\geq0$.
\end{Theorem}
Interestingly, for $(x,y)=(1,1)$ the above criterion reduces to CCNR. Moreover, for $(x,y)=(0,0)$, $(x,y)=\sqrt{2/d_{1}},\sqrt{2/d_{2}})$, and
$(x,y)=(\sqrt{d_{1}+1},\sqrt{d_{2}+1})$ one recovers separability criteria developed in \cite{Vicente}, \cite{Fei} and \cite{GUHNE}, respectively. The last criterion \cite{GUHNE} was constructed in terms of SIC POMVs (ESIC criterion) and it was conjectured that it is stronger than original CCNR criterion. In this paper we provide a comparative analysis of CCNR, ESIC and dV criteria for a class of bipartite states being a generalization of well known isotropic states
\begin{align}
\rho_{p} & =\frac{1-p}{d_{1}d_{2}} \oper_{d_1} \otimes \oper_{d_2}
+ p |\psi^+_{d_1}\rangle \langle \psi^+_{d_1}| , \label{isotropic}
\end{align}
where $|\psi^+_{d_1}\rangle = 1/\sqrt{d_1} \sum_{i=1}^{d_1} |e_i\otimes f_i\rangle$, $|e_i\rangle$ defines orthonormal basis in $\mathbb{C}^{d_1}$ and $|f_i\rangle$ defines orthonormal set in $\mathbb{C}^{d_2}$. It is well known \cite{GT} that this state is separable if and only if it is PPT which is equivalent to
\begin{equation}\label{PPT}
p \leq \frac{1}{d_2 + 1} .
\end{equation}
In the paper we provide the upper bound for `$p$' implied by condition (\ref{xy}). Moreover, we show that ESIC criterion detects more entangled state than the standard CNNR criterion. This supports the conjecture made in \cite{GUHNE} that ESIC is stronger than CCNR. Finally, we analyze so called enhanced realignment criterion \cite{ZZZ} which states that for separable states
\begin{equation}\label{ZZZ}
\| \mathcal{R}(\rho_{AB})\|_1 \leq \sqrt{1- {\rm Tr} \rho_A^2}\sqrt{1-{\rm Tr}\rho_B^2} ,
\end{equation}
with $\rho_A = {\rm Tr}_B \rho_{AB}$ and $\rho_B = {\rm Tr}_A \rho_{AB}$ being marginal states. Enhanced realignment
criterion turns out to be the strongest effectively computable simplification of Correlation Matrix Criterion\cite{COV-1,COV-2,COV-3} (see also \cite{COV-U} for the unifying approach). In the recent papers \cite{GGD2} we showed that this criterion is equivalent to (\ref{xy}) for all $x,y\geq 0$. Here we show intricate relation of (\ref{ZZZ}) and (\ref{xy}) using a family of isotropic states.
\section{\label{sec:level2} $XY$-criterion for isotropic states}
The main result of our paper consists in the following
\begin{Theorem} \label{I} If an isotropic state $\rho_{p}$ is separable, then $p\leq p_{xy}$, where
\begin{align}
p_{xy} & =\Gamma\frac{\sqrt{\left(1+\tilde{x}\right)\left(1+\tilde{y}\right)}-\sqrt{\tilde{x}\left(\left(1+\gamma\right)\tilde{y}+\gamma\tilde{x}+\gamma\right)}}{1-\gamma\tilde{x}}\label{eq:pxy}
\end{align}
with
\begin{align}
&\tilde{x}= \frac{x^{2}}{d_{1}-1},\qquad\tilde{y}=\frac{y^{2}}{d_{2}-1}, \\
\gamma= & \frac{\left(d_{2}-d_{1}\right)}{d_{2}\left(d_{1}-1\right)\left(d_{1}+1\right)^{2}},\quad
\Gamma= \frac{d_1}{d_1^2-1} \frac{\sqrt{d_1-1}\sqrt{d_2-1}}{\sqrt{d_1d_2}}\nonumber
\end{align}
for arbitrary $x,y\geq0$.
\end{Theorem}
For the proof cf. Appendix. Note, that for $x=y$ and $d_{1}=d_{2}=d$ one finds
\begin{equation}
p_{x,x} = \frac{1}{d+1} ,
\end{equation}
and hence one recovers the PPT condition (\ref{PPT}). In general, however,
\begin{equation}\label{}
p_{x,y} > \frac{1}{d_2+1} ,
\end{equation}
and hence this criterion is weaker than PPT condition. In particular, we have the following bounds for de Vicente criterion ($x=y=0$), realignment criterion $x=y=1$, and for ESIC criterion ($x=\sqrt{d_1+1}$, $y=\sqrt{d_2+1}$):
\begin{align}
p_{dV} & = \frac{d_1}{d_1^2-1} \frac{\sqrt{d_1-1}\sqrt{d_2-1}}{\sqrt{d_1d_2}} \\
p_{R}&=\frac{\left(d_{1}^{2}-1\right)d_{2}-\sqrt{d_{1}^{3}d_{2}-3d_{1}d_{2}+d_{2}^2+1}}{d_{2}d_{1}^{3}-2d_{1}d_{2}+1}\\
p_{E}&=\frac{2\left(d_{1}-1\right)d_{2}-\sqrt{\frac{d_{1}^{3}d_{2}^{2}-2d_{1}d_{2}^{2}+3d_{2}^{2} +\left(d_{1}^{3}-5d_1\right)d_{2}+d_{1}+1}{d_{1}+1}}}{d_{1}^2d_2-d_{1}d_2-d_{2}+1}.\label{eq:pe}
\end{align}
We skip the expression $p_F$ for the criterion from \cite{Fei} ($x=\sqrt{2/d_1}$,$y=\sqrt{2/d_2}$) since it is quite complicated. Again, for $d_1=d_2$ one has
$$ p_{dV} = p_R = p_E = \frac{1}{d+1} . $$
\begin{Proposition} For $d_1 \neq d_2$ one has
\begin{equation}\label{}
p_E < p_R ,
\end{equation}
that is , ESIC criterion detects more entangled isotropic state that realignment.
\end{Proposition}
For the proof cf. Appendix.
\section{Comparison with Enhanced Realignment Criterion}
Separability of $\rho_p$ implies $p \leq p_{xy}$. Clearly, $p_{xy}$ depends on $(x,y)$ and hence the most efficient criterion corresponds to minimal value of $p_{xy}$. Let us calculate the minimum of the expression (\ref{eq:pxy}). One has
\begin{equation} \label{partial_y}
\partial_{\tilde{x}} p_{xy} = 0 \iff (1+\gamma)\tilde{y} = \tilde{x} -\gamma.
\end{equation}
This is a necessary condition for minimum. One can check, that substituting (\ref{partial_y}) to (\ref{eq:pxy}) a constant value $\Gamma/\sqrt{1+\gamma}$ is obtained, hence we have the whole line $(1+\gamma)\tilde{y} = \tilde{x} -\gamma$ (hyperbola in $x$,$y$) of minima of $p_{xy}$. One can summarise the above observations in the following
\begin{Theorem}
The minimum of $p_{xy}$ is attained in points of the hyperbola:
\begin{equation} \label{eq:hyperbola}
\frac{x^2}{d_1-1} - (1+\gamma) \frac{y^2}{d_2-1} = \gamma
\end{equation}
and the value of the minimum reads as follows
\begin{equation}
p_{\rm min} = \frac{\Gamma}{\sqrt{1+\gamma}} = \sqrt{\frac{d_2-1}{d_2(d_1^2+d_1-1)-1}}.
\end{equation}
\end{Theorem}
The enhanced realignment criterion states that if $\rho_{p}$ is separable, then
\begin{equation}
\|\mathcal{R}(\rho_{p}-\rho_{1}\otimes\rho_{2})\|_{1}\leq\sqrt{1-{\rm Tr}\rho_{1}^{2}}\sqrt{1-{\rm Tr}\rho_{2}^{2}},\label{RR}
\end{equation}
where
\begin{equation}
\rho_{1} =\frac{\oper_{\mathrm{d_{1}}}}{d_{1}}, \qquad\rho_{2}=\frac{1-p}{d_{1}}\oper_{\mathrm{d_{2}}} + \frac{p}{d_{1}} \sum_{i=1}^{d_1} |f_i\rangle \langle f_i|
\end{equation}
are local states in the subsystems.
\begin{Theorem} $\rho_p$ satisfies (\ref{RR}) if and only if
\begin{equation}\label{}
p \leq p_{\rm ER} ,
\end{equation}
where
\begin{equation}
p_{\mathrm{ER}}=\sqrt{\frac{d_{2}-1}{d_{2}\left(d_{1}^{2}+d_{1}-1\right)-1}}.\label{eq:per}
\end{equation}
\end{Theorem}
For the proof cf. Appendix. Hence, the enhanced realignment criterion is equivalent to the to the family of $XY$-criteria minimising $p_{xy}$. Interestingly, in \cite{GGD2} we proved that this equivalence is always realized for large values of $ x $ and $ y $, but for isotropic states in Eq. \eqref{isotropic} it is enough consider $ x,y $ belonging to the hyperbola in Eq. \eqref{eq:hyperbola}. Indeed, the figure \ref{fig:contourplot} illustrates the hyperbola of minima of $p_{xy}$ (reducing to line if dimensions are equal) and four characteristic points representing the four criteria distinguished in the literature.
\begin{figure}
\caption{Contour plot with the gradient direction and plot of the threshold function $p_{xy}
\label{c}
\label{fig:contourplot}
\end{figure}
\section{Conclusions}
We provided a comparative analysis of the detection power of a $XY$-family of separability criteria in the case of generalized isotropic state in $d_1 \otimes d_2$. Due to the high symmetry of the isotropic state one can derive analytical formula for the separability bound $p_{xy}$ which for $d_1 \neq d_2$ is always higher than a PPT bound $p_{\mathrm{PPT}} = 1/(d_2+1)$. Interestingly, minimising over $(x,y)$ we showed that the most efficient $p_{\rm min}$ is exactly the same as the one derived in term of so called enhanced realignment criterion. Finally, it is shown that for a family of generalized isotropic states the ESIC criterion from \cite{GUHNE} detects more entangled state than original CCNR criterion. Hence, it supports a conjecture raised in \cite{GUHNE} that ESIC is stronger than CCNR. It is clear that a similar analysis can be performed for a Werner-like state \cite{Werner}
\begin{equation}\label{}
\rho_q = \frac{1-q}{d_1 d_2 } \oper_{d_1} \otimes \oper_{d_2} + \frac{q}{d_1} \sum_{i,j=1}^{d_1} |e_i \rangle \langle e_j| \otimes |f_j \rangle \langle f_i| .
\end{equation}
As a resume of this simple analysis, which provides a clear illustration of the detection power for a family of separability criteria based on correlation tensor, on figure is shown the general trend for large value of $ d_1 $ and $d_2$.
\begin{figure}
\caption{The difference of the thresholds which realize the equality in Eqs.
\ref{eq:ineq_DV}
\label{fig:comparison}
\end{figure}
\begin{acknowledgments}
DC and GSa were supported by the Polish National Science Centre project
2018/30/A/ST2/00837. GSc is supported by Istituto Nazionale di Fisica
Nucleare (INFN) through the project ``QUANTUM''. We acknowledge
the Toru\'{n} Astrophysics/Physics Summer Program TAPS 2018 and the
project PROM at the Nicolaus Copernicus University. GSc thanks S.
Pascazio, P. Facchi and F. V. Pepe for invaluable human and scientific
support, for suggestions and encouragements which led to the realization
of the present work.
\end{acknowledgments}
\appendix
\section{Proof of Theorem \ref{I}}
Introducing a vectorization of an operator \cite{Watrous,Gilchrist} $A=\sum_{i,j}A_{ij}|i\rangle\langle j|$
via $|A\rangle=\sum_{i,j}A_{ij}|i\rangle\otimes|j\rangle$ one has
$\mathcal{R}(A\otimes B)=|A\rangle\langle B^{*}|$, where the complex
conjugation is taken w.r.t. the basis used for the vectorization.
The resulting matrix $\mathcal{R}(\rho_{p})$ is the correlation tensor $C(\rho_{p})$
for a choice of bases: $\{|e_{i}\rangle\langle e_{j}|\}_{i,j=1}^{d_{1}}\subset\mathcal{B}(\mathbb{C}^{d_{1}})$
and $\{|f_{i}\rangle\langle f_{j}|\}_{i,j=1}^{d_{2}}\subset\mathcal{B}(\mathbb{C}^{d_{2}})$.
These bases are orthonormal, but not hermitian, hence the matrix $C$
can have complex entries, but its singular values and trace norm are
the same as the case we choose hermitian orthonormal bases. One has
\begin{align}
C = & \mathcal{R}(\rho_{p})=(1-p)\frac{1}{d_{1}d_{2}}\sum_{i=1}^{d_{1}}\sum_{j=1}^{d_{2}}|e_{i} \otimes e_i \rangle\langle f_{j}\otimes f_j |\nonumber \\
& \qquad + \frac{p}{d_{1}} \sum_{i,j=1}^{d_{1}}|e_{i}\otimes e_j \rangle\langle f_{i}\otimes f_j | \\
= & (1-p)\left|\frac{\oper_{\mathrm{d_{1}}}}{d_{1}}\right\rangle \left\langle \frac{\oper_{\mathrm{d_{2}}}}{d_{2}}\right| + \frac{p}{d_{1}} \sum_{i,j=1}^{d_{1}}|e_{i}\otimes e_j\rangle\langle f_{i}\otimes f_j| , \nonumber \label{eq:Crho}
\end{align}
where we used
$$ | \oper_{d_1}\rangle = \sum_{i=1}^{d_1} |e_i \otimes e_i \rangle \ , \ \ | \oper_{d_2}\rangle = \sum_{i=1}^{d_2} |f_i \otimes f_i \rangle . $$
To recast $D_{x}^{\left(1\right)},D_{y}^{\left(2\right)}$
in the not hermitian basis we sandwich the above matrix with
$$ \mathbb{D}^{(1)}_x = \oper_{d_1} \otimes \oper_{d_1} +\frac{x-1}{d_{1}}|\oper_{\mathrm{d_{1}}} \rangle\langle\oper_{\mathrm{d_{1}}}| , $$
and
$$ \mathbb{D}^{(2)}_y = \oper_{d_2} \otimes \oper_{d_2} +\frac{y-1}{d_{2}}|\oper_{\mathrm{d_{2}}} \rangle\langle\oper_{\mathrm{d_{2}}}| . $$
In other words, $D_{x}^{\left(1\right)}$ is obtained replacing the first
entry $1$ with $x$ using the projector $|\oper_{\mathrm{d_{1}}} \rangle\langle\oper_{\mathrm{d_{1}}}|/d_1$. Analogously for $D_{y}^{\left(2\right)}$. Then matrix $C_{xy}$ follows
\begin{widetext}
\begin{eqnarray}\label{}
C_{xy} &=& \left(\oper_{d_1} \otimes \oper_{d_1} +\frac{x-1}{d_{1}}|\oper_{\mathrm{d_{1}}} \rangle\langle\oper_{\mathrm{d_{1}}}|\right) \Big( (1-p)\left|\frac{\oper_{\mathrm{d_{1}}}}{d_{1}}\right\rangle \left\langle \frac{\oper_{\mathrm{d_{2}}}}{d_{2}}\right| + \frac{p}{d_{1}} \sum_{i,j=1}^{d_{1}}|e_{i}\otimes e_j\rangle\langle f_{i}\otimes f_j| \Big) \left(\oper_{d_2} \otimes \oper_{d_2} +\frac{y-1}{d_{2}}|\oper_{\mathrm{d_{2}}} \rangle\langle\oper_{\mathrm{d_{2}}}|\right) \nonumber \\ &=&
\frac{\left(y-p\right)x}{d_{1}d_{2}}\left|\oper_{\mathrm{d_{1}}}\right\rangle \left\langle \oper_{\mathrm{d_{2}}}\right| + \frac{p}{d_{1}} \sum_{i,j=1}^{d_{1}}|e_{i}\otimes e_j\rangle\langle f_{i}\otimes f_j|
+ p\frac{\left(x-1\right)}{d_{1}^{2}} \sum_{i,j=1}^{d_{1}}|e_{i}\otimes e_i\rangle\langle f_{j}\otimes f_j| .
\end{eqnarray}
\end{widetext}
Note that
\begin{eqnarray*}
\mathbb{D}^{(1)}_x C \mathbb{D}^{(2)}_y =& U {D}^{(1)}_x U^\dagger C V D^{(2)}_y V^\dagger \\
=& U {D}^{(1)}_x C^{\rm can} D^{(2)}_y V^\dagger
\end{eqnarray*}
where $ C^{\rm can}= U^\dagger C V , $
defines the canonical correlation tensor. Hence, one has
\begin{equation}\label{}
\| C_{xy} \|_1 = \| {D}^{(1)}_x C^{\rm can} D^{(2)}_y \|_1 .
\end{equation}
To compute the trace-norm of $C_{xy}$ note that
\begin{align}
C_{xy}C_{xy}^{\dagger}= & \Bigg(\frac{x^{2}\left(y^{2}-p^{2}\right)}{d_{1}^{2}d_{2}}+\frac{p^{2}\left(x^{2}-1\right)}{d_{1}^{3}}\Bigg)\left|\bm{1}_{\mathrm{d_{1}}}\right\rangle \left\langle \bm{1}_{\mathrm{d_{1}}}\right|\nonumber \\
& +\frac{p^{2}}{d_{1}^{2}}\oper_{d_{1}} \otimes \oper_{d_2} .
\end{align}
Now, since $\left|\bm{1}_{\mathrm{d_{1}}}\right\rangle \left\langle \bm{1}_{\mathrm{d_{1}}}\right|$
commutes with $\oper{d_{1}} \otimes \oper_{d_2}$, they share the same
set of eigenvectors, therefore the spectrum $\sigma$ (expressed with
the geometric multiplicity of the eigenvalues) reads
\begin{align}
\sigma\left(C_{xy}C_{xy}^{\dagger}\right)= & \left(d_{1}^{2}-1\right)\times\left\{ \frac{p^{2}}{d_{1}^{2}}\right\} \nonumber \\
\cup & \Bigg\{\frac{x^{2}}{d_{1}d_{2}}\left(y^{2}+p^{2}\frac{d_{2}-d_{1}}{d_{1}}\right)\Bigg\}.
\end{align}
Finally we have that separability of an isotropic state with unequal dimension $\rho_{p}$
implies
\begin{align}
\|C_{xy}\|_1 = & \frac{d_{1}^{2}-1}{d_{1}}p+\frac{x}{\sqrt{d_{1}d_{2}}}\sqrt{y^{2}+p^{2}\frac{d_{2}-d_{1}}{d_{1}}}\nonumber \\
\le & \sqrt{\frac{d_{1}-1}{d_{1}}+\frac{x^{2}}{d_{1}}}\sqrt{\frac{d_{2}-1}{d_{2}}+\frac{y^{2}}{d_{2}}}.\label{Cxynorm}
\end{align}
In particular, for $x,y=0$, $x,y=1$, $x=\sqrt{2/d_{1}},y=\sqrt{2/d_{2}}$
and $x=\sqrt{d_{1}+1},y=\sqrt{d_{2}+1}$, the above condition implies
\begin{align}
p\frac{d_{1}^{2}-1}{d_{1}}\le\sqrt{\frac{d_{1}-1}{d_{1}}}\sqrt{\frac{d_{2}-1}{d_{2}}},\label{eq:ineq_DV}\\
\frac{d_{1}^{2}-1}{d_{1}}p+\frac{1}{\sqrt{d_{1}d_{2}}}\sqrt{1+p^{2}\left(\frac{d_{2}}{d_{1}}-1\right)}\le1,\\
\frac{d_{2}\left(d_{1}^{2}-1\right)p+\sqrt{4+2p^{2}\left(\frac{d_{2}}{d_{1}}-1\right)d_{2}}}{\sqrt{d_{1}^{2}-d_{1}+2}\sqrt{d_{2}^{2}-d_{2}+2}}\le1,\\
\frac{d_{1}^{2}-1}{d_{1}}p+\sqrt{\frac{d_{1}+1}{d_{1}d_{2}}}\sqrt{d_{2}+1+p^{2}\left(\frac{d_{2}}{d_{1}}-1\right)}\le2.\label{eq:ineq_ESIC}
\end{align}
If the state $\rho_{p}$ is separable, then
\begin{align}
0 & \geq\frac{d_{1}^{2}-1}{d_{1}}p+\frac{x}{\sqrt{d_{1}d_{2}}}\sqrt{y^{2}+p^{2}\frac{d_{2}-d_{1}}{d_{1}}} -\mathcal{N}_{x,d_{1}}\mathcal{N}_{y,d_{2}}\label{eq:criterion_strong}
\end{align}
which obviously implies a weaker condition
\begin{align}
0 & \geq\frac{d_{1}^{2}-1}{d_{1}}p-\mathcal{N}_{x,d_{1}}\mathcal{N}_{y,d_{2}}.\label{eq:criterion_weak}
\end{align}
Let
\begin{equation}
p_{0}:=\frac{d_{1}}{d_{1}^{2}-1}\mathcal{N}_{x,d_{1}}\mathcal{N}_{y,d_{2}}.
\end{equation}
It is clear that if $p>p_{0}$, then a weaker condition (\ref{eq:criterion_weak}) detects entanglement of $\rho_p$. If $p \leq p_0$, then we can use a tighter inequality (\ref{eq:criterion_strong})
\begin{equation}
-\frac{x}{\sqrt{d_{1}d_{2}}}\sqrt{y^{2}+p^{2}\frac{d_{2}-d_{1}}{d_{1}}}\geq\frac{d_{1}^{2}-1}{d_{1}}p-\mathcal{N}_{x,d_{1}}\mathcal{N}_{y,d_{2}}.
\end{equation}
Under the assumption that we do not detect entanglement with the weaker
condition \eqref{eq:criterion_weak}, both the terms of the inequality
are negative, hence we can square and inverse the inequality:
\begin{align}\label{eq:quadratic_inequality}
\mathcal{F}\left(p\right)
& = \left( \frac{d_{1}^{2}-1}{d_{1}}p-\mathcal{N}_{x,d_{1}}\mathcal{N}_{y,d_{2}} \right)^2 \nonumber \\
& - \frac {x^2}{d_{1}d_{2}} \left( y^{2}+p^{2}\frac{d_{2}-d_{1}}{d_{1}} \right) \nonumber \\
& = a_{d_{1},d_{2}}\left(x\right)p^{2}+b_{d_{1},d_{2}}\left(x,y\right)p+c_{d_{1},d_{2}}\left(x,y\right)\geq0,
\end{align}
where
\begin{align}
a\equiv a_{d_{1},d_{2}}\left(x\right)= & \frac{\left(d_{1}^{2}-1\right)^{2}}{d_{1}^2}-x^{2}\frac{d_{2}-d_{1}}{d_{1}^{2}d_{2}},\label{eq:a}\\
b\equiv b_{d_{1},d_{2}}\left(x,y\right)= & -2\frac{d_{1}^{2}-1}{d_{1}}\mathcal{N}_{x,d_{1}}\mathcal{N}_{y,d_{2}},
\end{align}
\begin{align}
c\equiv c_{d_{1},d_{2}}\left(x,y\right)= & \left(\mathcal{N}_{x,d_{1}}\mathcal{N}_{y,d_{2}}\right)^{2}-\frac{x^{2}y^{2}}{d_{1}d_{2}}.
\end{align}
A direct calculation shows always $\Delta=b^{2}-4ac\geq0$. Let $p_{\pm}=\frac{-b\pm\sqrt{\Delta}}{2a}$ {be the roots of $\mathcal{F}\left(p\right)$}
.
Notice, that $\mathcal{F}\left(p_0\right) \le 0$ (equality only for $x=0$), $c\geq0$, $b<0$. Hence Vieta's formulas implies
\begin{enumerate}
\item $a>0\Longrightarrow p_{xy}^{\left(+\right)}>p_{0}>p_{xy}^{\left(-\right)}>0$
\item $a<0\Longrightarrow p_{0}>p_{xy}^{\left(-\right)}>0>p_{xy}^{\left(+\right)}$.
\end{enumerate}
In both cases the solution of the inequality (\ref{eq:quadratic_inequality}) reads as $p \in [0, p_-]$ and due to the continuity of $\mathcal{F}$ the limit formula for $a=0$ agrees with the solution of the linear inequality.
\section{Proof of Proposition 1}
The thresholds $p_E$ and $p_R$ are defined as the lowest roots of the quadratic polynomials:
\begin{align}
2(d_1^3d_2-2d_1d_2+1)p_R^2-4d_2(d_1^2-1)p_R\nonumber\\+2(d_1d_2-1)\stackrel{df}{=}f_R(p_R)=0 \label{fR} \\
(d_1^3d_2-2d_1d_2+d_1-d_2+1)p_E^2-4d_2(d_1^2-1)p_E\nonumber\\+(3d_1d_2-d_1-d_2-1)
\stackrel{df}{=}f_E(p_E)=0 \label{fE}
\end{align}
We will prove the relation between roots $p_E\le p_R$ by showing that all roots of $f_R$ and $f_E$ are in a set: $\{x: f_E(x) < f_R(x)\}$. To show it let us calculate the difference $f_E-f_R$:
\begin{align}
(f_E-f_R)(x) = -(d_1^3d_2-2d_1d_2+d_2-d_1+1)x^2 \nonumber\\
+ (d_1-1)(d_2-1)
\end{align}
It is positive in the range $[-x_0,x_0]$, where:
\begin{align}
x_0=\sqrt{\frac{d_2-1}{(d_1+1)d_1d_2-(d_2+1)}}.
\end{align}
We will show that $f_R$ and $f_E$ are positive in the above range, showing that
\begin{enumerate}
\item $f_R(x_0) = f_E(x_0)$,
\item $f_R$ is descending in $x_0$.
\end{enumerate}
One has
\begin{align}
f_R(x_0) =& f_E(x_0) \nonumber \\
=& d_2\frac{2d_1^3d_2+d_1^2d_2-3d_1d_2-d_1^3-d_1^2+2}{(d_1+1)d_1d_2-(d_2+1)} \nonumber\\
-&2d_2(d_1^2-1)\sqrt{\frac{d_2-1}{(d_1+1)d_1d_2-(d_2+1)}}.
\end{align}
Hence, we want to prove that
\begin{align} \label{ineq_pEpR}
2d_1^3d_2+d_1^2d_2-3d_1d_2-d_1^3-d_1^2+2 \nonumber\\
- 2(d_1^2-1)\sqrt{(d_2-1)((d_1+1)d_1d_2-(d_2+1))} \ge 0.
\end{align}
To do this, we will rewrite the above as:
\begin{align*}
(d_1^2-1) + d_1d_- \frac{2d_1+3}{2d_1+2} \ge \\
\sqrt{d_-^2(d_1^2+d_1-1)+d_-d_1(2d_1+3)(d_1-1)+(d_1^2-1)^2},
\end{align*}
where $d_-=d_2-d_1$. After squaring the latter simplifies to:
\begin{displaymath}
d_1^2d_-^2(2d_1+3)^2 \ge 4d_-^2(d_1+1)^2(d_1^2+d_1-1),
\end{displaymath}
what finally gives:
\begin{displaymath}
d_-^2(d_1^2+4d_1+4) = (d_2-d_1)^2(d_1+2)^2 \ge 0.
\end{displaymath}
We prove the second property showing, that the minimum of $f_R$ is greater than $x_0$:
\begin{displaymath}
\frac{d_2(d_1^2-1)}{d_2d_1^3-2d_1d_2+1} > \sqrt{\frac{d_2-1}{d_1^2d_2+d_1d_2-d_2-1}}
\end{displaymath}
While $d_1 \le d_2$, we can estimate the RHS from above by $1/(d_1+1)$ and prove that the inequality holds for the estimation. The latter reduces to:
\begin{displaymath}
0 < d_2(d_1^2-1)(d_1+1) - (d_2d_1^3-2d_1d_2+1) = d_2d_1^2 - d_2 - 1
\end{displaymath}
and holds for $d_1,d_2 \ge 2$.
One has $f_E \ge f_R \ge 0$ in $[-x_0,x_0]$ and $f_E < f_R$ for $x> x_0$. The threshold $p_E$ and $p_R$ are roots on the left of the vertices of the parables $f_E$ and $f_R$ in Eq. \eqref{fR} and \eqref{fE} respectively. Both $p_E$ and $p_R$ are obviously greater than $x_0$ where $f_E < f_R$. It implies that $f_E$ reaches $0$ first and hence $p_E < p_R$.
\section{Proof of Theorem 3}
One finds
\begin{align}
\rho_{p}-\rho_{1}\otimes\rho_{2}&=\nonumber\\
=\frac{p}{d_{1}}\sum_{i,j=1}^{d_{1}}&\left|e_{i}\right\rangle \left\langle e_{j}\right|\otimes\left|f_{i}\right\rangle \left\langle f_{j}\right| - \frac{p}{d_{1}^{2}}\oper_{d_{1}} \otimes \sum_{i=1}^{d_1} |f_i\rangle \langle f_i| ,
\end{align}
and using again a vectorization technique one obtains
\begin{align}
C_{\mathrm{ER}} \equiv &C\left(\rho_{p}-\rho_{1}\otimes\rho_{2}\right) \nonumber\\
=&\frac{p}{d_{1}} \sum_{i,j=1}^{d_1} |e_i \otimes e_j\rangle \langle f_i \otimes f_j| - \frac{p}{d_{1}^{2}}\left|\oper_{\mathrm{d_{1}}}\right\rangle \left\langle \oper_{\mathrm{d_{1}}}\right|.
\end{align}
$C_{\mathrm{ER}}$ stands for enhanced realignment correlation matrix
and the spectrum of $C_{\mathrm{ER}}$$C_{\mathrm{ER}}^{\dagger}$
reads
\begin{equation}
\sigma\left(C_{\mathrm{ER}}C_{\mathrm{ER}}^{\dagger}\right) = \left\{ \frac{p^{2}}{d_{1}^{2}}\right\} \times\left(d_{1}^{2}-1\right)\cup\left\{ 0\right\}.
\end{equation}
This brings to the condition
\begin{equation}
\frac{d_{1}^{2}-1}{d_{1}}p \leq \sqrt{ \frac{d_{1}-1}{d_{1}}}\sqrt{\frac{d_{2}-1}{d_{2}}-p^{2}\left(\frac{d_{2}-d_{1}}{d_{1}d_{2}}\right) }.
\end{equation}
The equality holds for
\begin{equation}
p_{\mathrm{ER}}=\sqrt{ \frac{d_{2}-1}{d_{2}\left(d_{1}^{2}+d_{1}-1 \right)-1} } .
\end{equation}
\end{document}
|
\begin{document}
\title[On the generation of rank $3$ simple matroids]{On the generation of rank $3$ simple matroids \\ with an application to Terao's freeness conjecture}
\begin{abstract}
In this paper we describe a parallel algorithm for generating all non-isomorphic rank $3$ simple matroids with a given multiplicity vector.
We apply our implementation in the HPC version of \GAP to generate all rank $3$ simple matroids with at most $14$ atoms and an integrally splitting characteristic polynomial.
We have stored the resulting matroids alongside with various useful invariants in a publicly available, ArangoDB-powered database.
As a byproduct we show that the smallest divisionally free rank $3$ arrangement which is not inductively free has $14$ hyperplanes and exists in all characteristics distinct from $2$ and $5$.
Another database query proves that Terao's freeness conjecture is true for rank $3$ arrangements with $14$ hyperplanes in any characteristic.
\end{abstract}
\thanks{This work is a contribution to Project II.1 of SFB-TRR 195 'Symbolic Tools in Mathematics and their Application' funded by Deutsche Forschungsgemeinschaft (DFG).
The fourth author was supported by ERC StG 716424 - CASe, a Minerva Fellowship of the Max Planck Society and the Studienstiftung des deutschen Volkes.}
\keywords{
rank $3$ simple matroids,
integrally splitting characteristic polynomial,
Terao's freeness conjecture,
recursive iterator,
tree-iterator,
leaf-iterator,
iterator of leaves of rooted tree,
priority queue,
parallel evaluation of recursive iterator,
noSQL database,
ArangoDB
}
\subjclass[2010]{
05B35,
52C35,
32S22,
68R05,
68W10
}
\maketitle
\section{Introduction} \label{sec:Intro}
In computational mathematics one often encounters the problem of scanning (finite but) large sets of certain objects.
Here are two typical scenarios:
\begin{itemize}
\item Searching for a counter-example of an open conjecture among these objects.
\item Building a database of such objects with some of their invariants.
\end{itemize}
A database is particularly useful when the questions asked are relational, i.e., involve more than one object (cf.~\Cref{rmrk:ind_div_using_db}).
Recognized patterns and questions which a database answers affirmatively may lead to working hypotheses or even proofs by inspection (cf.~\Cref{thm:terao}).
In any such scenario there is no need to simultaneously hold the entire set in RAM.
It is hence important to quickly \emph{iterate} over such sets in a memory efficient way rather than to enumerate them.
The central idea is to represent each such set $T$ as the set of leaves of a rooted tree $T_\bullet$ (cf.~\Cref{sec:trees}).
In other words, we embed $T$ as the set of leaves in the bigger set of vertices $V(T_\bullet)$.
We then say that $T_\bullet$ \textbf{classifies} $T$.
The internal vertices of the tree $T_\bullet$ are usually of different nature than the elements of $T$.
Their sole purpose is to encode common pre-stages of the leaves.
To iterate over the vertices of the rooted tree $T_\bullet$ we introduce the data structure of a tree-iterator $t$ (cf.~\Cref{defn:recursive_iterator}).
In this article we will describe how to use tree-iterators to classify all nonisomorphic simple rank $3$ matroids with up to $14$ atoms and integrally splitting characteristic polynomial.
A simple matroid $M$ of rank $3$ on $n$ labeled points corresponds to a bipartite graph $G_M$ (cf.~\Cref{rmrk:monoidal_partition}).
We denote by $(m_2, \ldots, m_{n-1})$ the \textbf{multiplicity vector} of $M$ where $m_k$ is the number of coatoms of multiplicity $k$, i.e., the degree in the bipartite graph corresponding to $M$ (cf.~\Cref{def:mult_vector}).
The multiplicity vector determines the characteristic polynomial of $M$:
\begin{equation} \label{eq:chiM} \tag{*}
\frac{\chi_M(t)}{t-1} = t^2 - (n-1) t + (b_2-(n-1)) \quad \mbox{with} \quad b_2 \coloneqq \sum_{k=2}^{n-1} m_k(k-1) \mbox{.}
\end{equation}
In fact, two simple rank $3$ matroids (or more generally, two paving matroids) have the same multiplicity vector $(m_2, \ldots, m_{n-1})$ iff their Tutte polynomials coincide \cite{Bryl72}.
After extending the notions of inductive and divisional freeness from arrangements to matroids (see Definitions~\ref{def:if} and~\ref{def:df}) we get the following table of cardinalities\footnote{Apart from the number of simple matroids, we were unable to find any of the sequences in the above table in the OEIS database.} of certain classes of nonisomorphic simple rank $3$ matroids.
A matroid is called \emph{Tutte-unique} or \emph{T-unique} if it is determined up to isomorphism by its Tutte polynomial\footnote{The Tutte polynomial of all rank $3$ matroids with an integrally splitting characteristic polynomial and up to $13$ atoms was computed using the $\mathtt{GAP}$ package $\mathtt{alcove}$ \cite{alcove}.} (see~\cite{MN05} for a survey on T-unique matroids).
The content of the table can be reconstructed using the database \cite{matroids_split}.
{\scriptsize
\begin{figure}\label{tbl:simple_matroids}
\end{figure}
}
The total number of simple rank $3$ matroids with $n \leq 12$ (unlabeled) atoms\footnote{\url{http://oeis.org/A058731}} is taken from \cite{MMIB}.
This number also coincides with the number of linear geometries minus one with $n \leq 12 $ (unlabeled) points\footnote{\url{http://oeis.org/A001200}} and has been determined earlier in \cite{BB99}.
Using our algorithm in \textsf{HPC-GAP} we directly computed all $815 107$ simple rank $3$ matroids with integrally splitting characteristic polynomial with up to $n=14$ atoms and stored them in the database \cite{matroids_split}.
Subsequently, we verified our counting by comparing it against the matroids with integrally splitting characteristic polynomial for $n \leq 11$ in \cite{MMIB_DB}\footnote{We wrote a short program to compute the characteristic polynomial of these matroids as the matroids come without precomputed properties in~\cite{MMIB_DB}.}.
\subsection{Applications of the Database}
Inspecting the database \cite{matroids_split} enables us to investigate questions like:
\begin{enumerate}
\item Is being divisionally or inductively free a property determined by the Tutte polynomial? \\
We answer this question negatively in \Cref{exmp:if_pair}. \label{q:a}
\item What is the smallest number of atoms of a representable rank $3$ matroid which is divisionally free but not inductively free?\footnote{
It is already known that such a matroid exists, namely the rank $3$ reflection arrangement $\mathcal{A}(G_{24})$ (with $21$ hyperplanes) of the exceptional complex reflection group $W=G_{24}$ is recursively free \cite{Muc17} but not inductively free \cite{HR15}.
Hence, an addition of $\mathcal{A}(G_{24})$ is easily seen to be divisionally free but not inductively free.
Therefore, the sequences of representable divisionally free and inductively free matroids differ at $n=22$ at the latest.} \\
We answer this question in \Cref{exmp:div_free_not_ind_free}.
\item Does the database confirm Terao's freeness conjecture for further classes of arrangements? \\
Indeed, this is \Cref{thm:terao}.
\end{enumerate}
Some of these questions require the construction of \emph{all} matroids with the corresponding number of atoms first, demonstrating the usefulness of a database.
\begin{exmp}\label{exmp:if_pair}
Consider the rank $3$ matroids $M_1$ and $M_2$ of size $11$ given below by the adjacency lists $A_1$ and $A_2$ of their corresponding bipartite graph respectively.
{\tiny
\begin{align*}
A_1 \coloneqq& \{\{1,2,3,4\},\{1,5,6,7\},\{1,8,9,10\},\{2,5,8,11\},\{3,6,9,11\},\{2,6,10\},\{2,7,9\},\{3,5,10\},\{4,5,9\},\{4,7,11\},\\
&\{1,11\},\{3,7\},\{3,8\},\{4,6\},\{4,8\},\{4,10\},\{6,8\},\{7,8\},\{7,10\},\{10,11\}\},\\
A_2 \coloneqq & \{\{1,2,3,4\},\{1,5,6,7\},\{2,5,8,9\},\{3,6,8,10\},\{4,7,9,10\},\{1,8,11\},\{2,7,11\},\{3,9,11\},\{4,6,11\},\{5,10,11\},\\
&\{1,9\},\{1,10\},\{2,6\},\{2,10\},\{3,5\},\{3,7\},\{4,5\},\{4,8\},\{6,9\},\{7,8\}\}.
\end{align*}
}
The matroids $M_1$ and $M_2$ are representable over $\Q$ and $\Q(\sqrt{5})$, respectively.
They admit the following representation matrices, respectively:
{\tiny
\begin{align*}
R_1 \coloneqq &
\begin{pmatrix*}[r]
1 & 0 & 1 & 1 & 0 & 1 & 1 & 0& 1 & 1& 0 \\
0 & 1 & 1 & \frac{1}{2} & 0 & 0 & 0 & 1& \frac{1}{2} & 1& 1 \\
0 & 0 & 0 & 0 & 1 & 1 & \frac{1}{2} & 1& \frac{1}{2} & 1& -1
\end{pmatrix*}\mbox{,}\\
R_2\coloneqq &
\begin{pmatrix*}[r]
1& 0& 1& 1& 0& 1& 1& 0& 0& 1& 1 \\
0& 1& 1& \phi + 1& 0& 0& 0& 1& 1& -\phi& -\phi \\
0& 0& 0& 0& 1& 1& \phi& -1& -\phi+1& \phi+1& \phi
\end{pmatrix*}\mbox{,}
\end{align*}
}
where $\phi=\frac{1+\sqrt{5}}{2}$ denotes the golden ratio.
Their multiplicity vectors agree and are given by $(m_k)=(m_2,m_3,m_4)=(10,5,5)$.
Hence, their Tutte polynomials also agree:
{\tiny
\[
T_{M_1}(x,y) = T_{M_2}(x,y)=y^8+3y^7+6y^6+10y^5+15y^4+x^3+5xy^2+21y^3+8x^2+15xy+23y^2+16x+16y\mbox{.}
\]
}
Both $M_1$ and $M_2$ have an integrally splitting characteristic polynomial:
\[
\chi_{M_1}(t)=\chi_{M_2}(t)=(t-1)(t-5)^2\mbox{.}
\]
Using the database we found that $M_1$ is inductively free and hence divisionally free whereas $M_2$ is not even divisionally free.
We checked with \GAP that any representation of $M_2$ is a free arrangement.
Both are not supersolvable.
The database also shows that for rank $3$ matroids this example is minimal with respect to the number of elements.
Finally, the corresponding question \eqref{q:a} for the stricter notion of supersolvability is confirmed by the database and already proven for rank $3$ matroids in \cite[Proposition 4.2]{Abe17}.
The proof is formulated for arrangements but works without changes for matroids.
\end{exmp}
\begin{exmp} \label{exmp:div_free_not_ind_free}
By inspecting the database we found that among the rank $3$ matroids with up to $14$ atoms there is a unique representable matroid $M$ with 14 atoms which is divisionally free but not inductively free.
It can be represented by the following matrix:
\[
\begin{pmatrix*}[r]
1 &0 &1 &1 & 1 & 0 &1 &1 & 1 & 0 & 1 & 1 & 0 & 1 \\
0 &1 &1 &2a-1 &2a &0 &0 &0 & 0 & 1 & -2a+2 &1 & 1 & 1 \\
0 &0 &0 &0 & 0 & 1 &1 &-2a+1 &-a+1 &a &1 & a &2a-1 &1
\end{pmatrix*}\mbox{,}
\]
where $a$ satisfies the equality $2 a^2 - 2 a + 1=0$ and the inequation $(3 a-1)(a+1)\neq 0$.
In particular $M$ is representable in any characteristic distinct from $2$ and $5$ \cite{DivFreeNotIndFree}.
Its characteristic polynomial is $\chi_M(t)=(t-1)(t-6)(t-7)$. The restriction $M''$ of $M$ to its third atom (resp. hyperplane) has characteristic polynomial $\chi_M(t)=(t-1)(t-6)$ which shows that any arrangement representing $M$ is divisionally free (cf. \cref{def:df}).
Furthermore, the Tutte polynomial of $M$ is
{\tiny
\[
y^{11}+3y^{10}+6y^9+10y^8+15y^7+21y^6+28y^5+2xy^3+36y^4+x^3+10xy^2+43y^3+11x^2+24xy+43y^2+30x+30y.
\]
}
\end{exmp}
A central notion in the study of hyperplane arrangements is freeness.
A central arrangement of hyperplanes $\A$ is called \emph{free} if the derivation module $D(\A)$ is a free module over the polynomial ring.
An important open question in this field is Terao's conjecture which asserts that the freeness of an arrangement over a field $k$ only depends on its underlying matroid and the characteristic of $k$.
It is known that Terao's conjecture holds for arrangements with up to $12$ hyperplanes in characteristic $0$ (cf. \cite{FV14,ACKN16}).
Recently, Dimca, Ibadula, and Macinic confirmed Terao's conjecture for arrangements in $\CC^3$ with up to 13 hyperplanes \cite{DIM19}.
Inspecting our database we obtain the following result:
\begin{theorem}\label{thm:terao}
Terao's freeness conjecture is true for rank $3$ arrangements with $14$ hyperplanes in any characteristic.
\end{theorem}
This article is organized as follows:
In \Cref{sec:matroids} we recall the notion of a matroid and introduce several subclasses of simple rank $3$ matroids.
In \Cref{sec:matroid_iter} we discuss the Algorithm used to construct a tree-iterator classifying all nonisomorphic simple rank $3$ matroids with up to $n=14$ atoms having an integrally splitting characteristic polynomial.
In \Cref{sec:representability} we briefly point out how to use Gröbner bases to compute the moduli space of representations (over some unspecified field $\bF$) of a matroid as an affine variety over $\operatorname{Spec} \Z$.
In \Cref{sec:proof} we finally prove \Cref{thm:terao}.
In \Cref{sec:trees} we collect some terminology about rooted trees.
In \Cref{sec:iterators} we define recursive and tree-iterators
and in \Cref{sec:parallel} we introduce algorithms to evaluate them in parallel.
\Cref{sec:WhyHPCGAP} summarizes the merits of the high performance computing (HPC) version of \GAP, which we used to implement the above mentioned algorithms.
We conclude by giving some timings in \Cref{sec:timings} to demonstrate the significance of our parallelized algorithms in the generation of (certain classes) of simple rank $3$ matroids.
\section*{Acknowledgments}
We would like to thank Rudi Pendavingh for pointing us to the paper \cite{DW89} and providing us with his \textsf{SageMath} code to compute the Dress-Wenzel condition for representability of matroids established in loc.~cit.
Using his code we could avoid computing the empty moduli spaces of ca.\ 400.000 nonrepresentable matroids with $14$ atoms.
We would also like to thank Markus Lange-Hegermann and Sebastian Posur for their comments on an earlier version of this paper.
Our thanks goes also to Sebastian Gutsche and Fabian Zickgraf who helped us to setup the VM on which the public database is running.
Last but not least, we are indebted to the anonymous referees for their careful reading and for the valuable suggestions which helped us improve the exposition.
\section{Simple Matroids} \label{sec:matroids}
\subsection{Basic Definitions}
Finite simple matroids have many equivalent descriptions.
For our purposes we prefer the one describing the lattice of flats.
\begin{defn}
A \textbf{matroid} $M=(E,\F)$ consists of a finite ground set $E$ and a collection $\F$ of subsets of $E$, called \textbf{flats} (of $M$), satisfying the following properties:
\begin{enumerate}
\item The ground set $E$ is a flat;
\item The intersection $F_1\cap F_2$ is a flat, if $F_1$ and $F_2$ are flats;
\item If $F$ is a flat, then any element in $E \setminus F$ is contained in exactly one flat covering $F$.
\suspend{enumerate}
Here, a flat is said to \textbf{cover} another flat $F$ if it is minimal among the flats properly containing $F$.
A matroid is called \textbf{simple} if
\resume{enumerate}
\item it is loopless, i.e., $\emptyset$ is a flat;
\item it contains no parallel elements, i.e., the singletons are flats, which are called \textbf{atoms}.
\end{enumerate}
\end{defn}
For a matroid $M=(E,\F)$ and $S\subseteq E$ we denote by $r(S)$ the \textbf{rank of $S$} which is the maximal length of chains of flats in $\F$ all contained in $S$.
The \textbf{rank of the matroid $M$} is defined to be $r(E)$.
A subset $S\subseteq E$ is called \textbf{independent} if $|S|=r(S)$ and otherwise \textbf{dependent}.
A maximal independent set is called a \textbf{basis of $M$}.
\begin{rmrk}[Basis Extension Theorem]\label{rm:basis}
Any independent subset of a matroid can be extended to a basis.
Hence, the cardinality of any basis equals the rank of the matroid.
\end{rmrk}
The flats form a poset\footnote{The poset of flats is a geometric lattice, i.e., a finite atomic semimodular lattice.
Conversely, finite atomic semimodular lattices give rise to matroids.} $\F$ by inclusion.
Dually, a \textbf{coatom} is a maximal element in $\F \setminus \{ E \}$.
An isomorphism between the matroids $(E,\F)$ and $(E',\F')$ is a bijective map $E \to E'$ which induces an isomorphism $\F \to \F'$ of posets.
Originally, matroids were introduced as an abstraction of the notion of linear (in)dependence in linear algebra.
\begin{defn}
A \textbf{central arrangement} over a field $\bF$ is a finite set $\A$ of $(n-1)$-dimensional subspaces of an $n$-dimensional $\bF$-vector space $V$.
The \textbf{lattice of flats} $L(\A)$ is the set of intersections of subsets of $\A$, partially ordered by reverse inclusion, where the empty (set-theoretic) intersection is defined as $V$.
The arrangement $\A$ is called \textbf{essential} if $\{0\} \in L(\A)$.
\end{defn}
The pair $(\A, L(\A))$ is a matroid of rank $n - \dim \bigcap_{H \in \A} H$, i.e., of rank $n$ iff $\A$ is essential.
We call such a pair a \textbf{vector matroid} over $\bF$.
This motivates the following definition:
\begin{defn}
A matroid is called \textbf{representable over the field $\bF$} if it is isomorphic to a vector matroid over $\bF$.
A matroid is called \textbf{representable} if it is representable over some field $\bF$.
\end{defn}
The following matroid invariant and its specialization play an important role in our study of simple rank $3$ matroids.
\begin{defn}
The \textbf{Tutte Polynomial} $T_M(x,y)$ of a matroid $M=(E,\F)$ is defined by
\[
T_M(x,y) \coloneqq \sum_{S \in \mathcal{P}(E)} (x-1)^{r(M)-r(S)} (y-1)^{|S|-r(S)}\mbox{.}
\]
A matroid is called \textbf{Tutte-unique}, if it is determined by its Tutte polynomial, i.e. any matroid with the same Tutte polynomial is isomorphic to the given one.
An important evaluation of the Tutte polynomial is the \textbf{characteristic polynomial} \[
\chi_M(t) \coloneqq (-1)^{r(M)}T_M(1-t,0) = \sum_{S \in \mathcal{P}(E)} (-1)^{|S|}t^{r(M)-r(S)}.
\]
\end{defn}
The main application of this article is the enumeration of matroids with an integrally splitting characteristic polynomial.
We will denote the class of rank $r$ matroids with \textbf{integrally splitting characteristic polynomial} by $\mathcal{ISM}_r$.
Such a factorization of $\chi_M(t)$ is often implied by stronger combinatorial or (in the representable case) algebraic/geometric properties.
The only known converse statement is that for a graphic or cographic matroid $M$ induced by a planar graph $G$ the matroid has an integrally splitting characteristic polynomial if and only if $G$ is chordal as shown in~\cite{DK98} for graphic and in~\cite{KR11} for cographic matroids, respectively.
In both cases, the fact that the characteristic polynomial is integrally splitting even implies that $M$ is supersolvable.
However, it is still safe to say that the rather small class of matroids in $\mathcal{ISM}_r$ is not yet well understood when $r \geq 3$.
\subsection{Simple Rank $3$ Matroids}
We will restrict ourselves to the case of simple rank~$3$ matroids in the following definitions.
It is worth pointing out at this point that a rank $3$ matroid is simple if and only if it is \textbf{paving} which in general means that any circuit is at least as large as the rank of the matroid.
The smallest class we will consider is that of supersolvable matroids introduced by Stanley in~\cite{Sta72}.
In the rank $3$ case the definition can be given as follows:
\begin{defn}
A matroid $M=(E,\F)$ of rank $3$ is \textbf{supersolvable} if there exists a flat $F_0\in\F$ of rank $2$ such that every intersection with other flats of rank $2$ is nonempty.
In this case the characteristic polynomial is integrally splitting with roots
\[
\chi_M(t) = \left(t -1\right)\left(t - (|F_0|-1)\right)\left(t-(|E|-|F_0|)\right) \mbox{.}
\]
Define $\mathcal{SSM}_3$ to be the class of all supersolvable rank $3$ matroids.
\end{defn}
To introduce the next combinatorial classes of matroids we need the notions of deletion and reduced contraction of a matroid with respect to an element $H$ of the ground set $E$.
The deletion just removes the element $H$ from the matroid and for a representable matroid the reduced contractions is the matroid that arises by intersecting all hyperplanes with $H$:
\begin{defn}
Let $M=(E,\F)$ be a matroid and $H\in E$.
Define the \textbf{deletion of $H$} to be the matroid $M' \coloneqq M\setminus H\coloneqq(E',\F')$ where
\begin{align*}
E' \coloneqq & E\setminus H \coloneqq E\setminus \{H\},\\
\F' \coloneqq & \F\setminus H \coloneqq\{F\setminus\{H\}\mid F\in\F\}.
\end{align*}
The \textbf{reduced contraction\footnote{This definition mimics the usual notion of restriction for hyperplane arrangements. Note that it differs from the matroid-theoretic contraction since it does not contain loops and parallel elements.} of $H$}
is the matroid $M''\coloneqq M^H \coloneqq (E'',\F'')$ where
\[
\F'' \coloneqq \F^H \coloneqq \{F \in \F \mid \{H\}\subseteq F\},
\]
and its atoms $E''=E^H$ are identified with the flats of rank $1$ in $\F^H$.
If $\{H\}$ is a flat in $M$ then $M^H$ is a simple matroid.
In particular, if $M$ is simple then so are $M\setminus H$ and $M^H$.
\end{defn}
The following two classes stem from the theory of free hyperplane arrangements.
The first one generalizes Terao's notion of inductively free hyperplane arrangements to matroids~\cite{Ter80}.
Intuitively, a matroid $M$ is inductively free if there is a pair of a deletion and reduced contraction with respect to one atom both of which are inductively free and the characteristic polynomial of the reduced contraction divides the one of $M$.
As the start of the induction one defines all rank $2$ matroids and all Boolean matroids to be inductively free.
\begin{defn}\label{def:if}
We define the class $\mathcal{IFM}_3 $ of \textbf{inductively free rank $3$ matroids} to be the smallest class of simple rank $3$ matroids containing
\begin{itemize}
\item the Boolean matroid $M_3 \coloneqq (\{1,2,3\},\mathcal{P}(\{1,2,3\}))$ and
\item $M=(E,\F)$ with $|E|>3$ if there exists an $H\in E$ such that $\chi_{M^H}(t)|\chi_M(t)$ and $M\setminus H\in \mathcal{IFM}_3$.
\end{itemize}
\end{defn}
Recently, Abe introduced a larger class of combinatorially free arrangements in~\cite{Abe16}.
This class is defined in a similar fashion just without the assumption on the deletion of a matroid.
\begin{defn}\label{def:df}
The class $\mathcal{DFM}_3 $ of \textbf{divisionally free rank $3$ matroids} is the smallest class of simple rank $3$ matroids containing
\begin{itemize}
\item the Boolean matroid $M_3 \coloneqq (\{1,2,3\},\mathcal{P}(\{1,2,3\}))$ and
\item $M=(E,\F)$ with $|E|>3$ if there exists an $H\in E$ such that $\chi_{M^H}(t)|\chi_M(t)$.
\end{itemize}
\end{defn}
\begin{rmrk}\label{rm:strict_inclusions}
The following strict inclusions hold
\[
\mathcal{SSM}_3 \subsetneq \mathcal{IFM}_3 \subsetneq \mathcal{DFM}_3 \subsetneq \mathcal{ISM}_3 \mbox{,}
\]
where the first strict inclusion is shown in~\cite{JT84} and the second inclusion in \cite{Abe16} (for strictness of the inclusion in rank $3$ cf.~\Cref{exmp:div_free_not_ind_free} and for vector matroids of rank at least $4$ cf.~loc.~cit.).
The last inclusion holds by the definition of divisional freeness and since $\chi_M(t)=(t-1)(t-(|E|-1))$ for any simple matroid $M=(E,\F)$ of rank $2$ (for strictness see~\Cref{tbl:simple_matroids}, for example).
\end{rmrk}
\begin{rmrk} \label{rmrk:ind_div_using_db}
Due to the recursive nature of the definition of inductive freeness, a database containing the simple rank $3$ matroids with up to $n$ atoms is extremely useful when deciding the inductive freeness of those with $n+1$ atoms.
This is how we determined the subclass $\mathcal{IFM}_3$ in our database.
\end{rmrk}
\section{Generating Rank $3$ Matroids with Integrally Splitting Characteristic Polynomials} \label{sec:matroid_iter}
Since we will focus on the rank $3$ case we prefer to describe them as special instances of bipartite graphs.
And as already mentioned in the introduction, the description of tree-iterators $T_\bullet$ generating simple rank $3$ matroids will rely on the language of bipartite graphs.
Our description is a special case of $m$-partitions which describes general paving matroids of rank $m+1$~\cite[Proposition 2.1.24]{Oxley2011}.
\begin{defn}\label{defn:matroidal_bipartitegraph}
A (proper) \textbf{$2$-partition} of a finite set $E$ is a set $\mathcal{E}$ of nonempty (proper) subsets of $E$, called blocks, such that
\begin{enumerate}
\item each block contains at least $2$ elements;
\item each pair of elements is contained in exactly one block.\label{defn:matroidal_bipartitegraph.b}
\end{enumerate}
\end{defn}
Condition \eqref{defn:matroidal_bipartitegraph.b} means that $\{ \binom{F}{2} \mid F \in \mathcal{E} \}$ is a partition of $\binom{E}{2} \coloneqq \{ \{a,b\} \subseteq E \mid a \neq b\}$.
\begin{rmrk} \label{rmrk:monoidal_partition}
Let $\mathcal{E}$ be a $2$-partition of $E$.
Then
\begin{itemize}
\item $\bigcup \mathcal{E} = E$.
\item $|F \cap F'| \leq 1$ for all $F, F' \in \mathcal{E}$ with $F \neq F'$.
\item $\sum_{F \in \mathcal{E}} \binom{|F|}{2} = \binom{|E|}{2}$.
\item The union $E \cup \mathcal{E}$ defines the vertices of a bipartite graph with adjacency given by membership.
We call bipartite graphs admitting such a description \textbf{matroidal}, if the $2$-partition is proper.
Connecting, as in \Cref{fig:braid_arr}, the elements of $E$ with an initial element and the blocks with a terminal element we obtain a geometric lattice of flats of a simple rank $3$ matroid.
Hence, there is a bijective correspondence between simple rank $3$ matroids with ground set $E$ and proper $2$-partitions of $E$.
Therefore, we will henceforth call the elements of $E$ \textbf{atoms} and those of $\mathcal{E}$ \textbf{coatoms}.
\end{itemize}
Since each pair of atoms is contained in exactly one coatom, the left hand side of the last equation counts the number of pairs of atoms which are joined by the coatoms. This count must be equal to the number of all pairs of atoms which is the right hand side of the equation.
\end{rmrk}
We divide the matroid generation by only considering those with a fixed number of coatoms of each size at a time.
To this end, we define size vectors of coatoms which satisfy the condition in~\Cref{rmrk:monoidal_partition} as multiplicity vectors:
\begin{defn}\label{def:mult_vector}
For $n \in \N$ we call $(m_k) \coloneqq (m_k)_{k=2,\ldots,n-1} \coloneqq (m_2, \ldots, m_{n-1})$ a \textbf{multiplicity vector} of size $n$ if $\sum_{k=2}^{n-1} m_k \binom{k}{2} = \binom{n}{2}$.
Each $2$-partition $\mathcal{E}$ gives rise to an \textbf{associated multiplicity vector} $m(\mathcal{E}) = (m_k)$ with $m_k = |\{ F \in \mathcal{E} : |F| = k \}|$.
\end{defn}
An example of such a matroidal bipartite graph corresponding to the rank $3$ braid arrangement $\A_3$ is given in \Cref{fig:braid_arr}.
It has the multiplicity vector $(m_2,m_3)=(3,4)$ and the characteristic polynomial $\chi_{\A{_3}}(t)=(t-1)(t-2)(t-3)$.
\begin{center}
\begin{minipage}{0.9\linewidth}
\begin{center}
\begin{tikzpicture}[scale=0.55]
\node[label=left:{$\bm{x}$}] at (-10, 0) (x) {\LARGE$\bullet$};
\node[label=left:{$\bm{y}$}] at (-6, 0) (y) {\LARGE$\bullet$};
\node[label=left:{$\bm{z}$}] at (-2, 0) (z) {\LARGE$\bullet$};
\node[label=right:{$\bm{x-y}$}] at (2, 0) (xy) {\LARGE$\bullet$};
\node[label=right:{$\bm{x-z}$}] at (6, 0) (xz) {\LARGE$\bullet$};
\node[label=right:{$\bm{y-z}$}] at (10, 0) (yz) {\LARGE$\bullet$};
\node[label=left:{\color{blue}$\bm{3}$}] at (-12, 7) (c1) {\LARGE$\bullet$};
\node[label=left:{\color{blue}$\bm{3}$}] at (-8, 7) (c2) {\LARGE$\bullet$};
\node[label=left:{\color{blue}$\bm{3}$}] at (-4, 7) (c3) {\LARGE$\bullet$};
\node[label=left:{\color{blue}$\bm{3}$}] at (0, 7) (c4) {\LARGE$\bullet$};
\node[label=right:{\color{blue}$\bm{2}$}] at (4, 7) (c5) {\LARGE$\bullet$};
\node[label=right:{\color{blue}$\bm{2}$}] at (8, 7) (c6) {\LARGE$\bullet$};
\node[label=right:{\color{blue}$\bm{2}$}] at (12, 7) (c7) {\LARGE$\bullet$};
\node at (0, 10) (0) {$\bullet$};
\node at (0, -3) (V) {$\bullet$};
\draw[thick] (x.center) -- (c1.center) -- (y.center);
\draw[thick] (xy.center) -- (c1.center);
\draw[thick] (x.center) -- (c2.center) -- (z.center);
\draw[thick] (xz.center) -- (c2.center);
\draw[thick] (y.center) -- (c3.center) -- (z.center);
\draw[thick] (yz.center) -- (c3.center);
\draw[thick] (xy.center) -- (c4.center) -- (xz.center);
\draw[thick] (yz.center) -- (c4.center);
\draw[thick] (x.center) -- (c7.center) -- (yz.center);
\draw[thick] (y.center) -- (c6.center) -- (xz.center);
\draw[thick] (z.center) -- (c5.center) -- (xy.center);
\draw[thick, dashed] (V.center) -- (x.center);
\draw[thick, dashed] (V.center) -- (y.center);
\draw[thick, dashed] (V.center) -- (z.center);
\draw[thick, dashed] (V.center) -- (xy.center);
\draw[thick, dashed] (V.center) -- (xz.center);
\draw[thick, dashed] (V.center) -- (yz.center);
\draw[thick, dashed] (0.center) -- (c1.center);
\draw[thick, dashed] (0.center) -- (c2.center);
\draw[thick, dashed] (0.center) -- (c3.center);
\draw[thick, dashed] (0.center) -- (c4.center);
\draw[thick, dashed] (0.center) -- (c5.center);
\draw[thick, dashed] (0.center) -- (c6.center);
\draw[thick, dashed] (0.center) -- (c7.center);
\end{tikzpicture}
\end{center}
\captionof{figure}{The lattice of flats of the $\A_3$ braid arrangement with atoms in the bottom row and coatoms in the top row.
The linear forms depicted next to the atoms are a possible representations of the matroid.
The numbers denoted in blue are the multiplicities of the coatoms.}\label{fig:braid_arr}
\end{minipage}
\end{center}
To generate the multiplicity vectors of all simple rank $3$ matroids of fixed size $n$ we can naively iterate over all vectors in $\{0,\ldots,n\}^{n-2}$ satisfying the equation in \Cref{def:mult_vector}.
Additionally, we can assume that any matroid has at least as many coatoms as atoms by a theorem of de Bruijn and Erd\H{o}s~\cite{dBE48}.
Finally, we are only considering those multiplicity vectors $(m_k)$ such that the corresponding characteristic polynomial as in~\eqref{eq:chiM} is integrally splitting.
Let $T$ denote the set of isomorphism classes of simple rank $3$ matroids on $n$ \emph{unlabeled} atoms\footnote{Cf.~(\url{http://oeis.org/A058731}).}.
In what follows we will embed $T$ as the set of leaves in a rooted tree $T_\bullet$.
To make this precise we will use the language of rooted trees and tree-iterators which we summarized in \Cref{sec:trees} and \Cref{sec:iterators}, respectively.
We start by describing a rooted tree $\widetilde{T}_\bullet$ with an action of the symmetric group $\Sym(n)$, such that the quotient tree $T_\bullet \coloneqq \widetilde{T}_\bullet / \Sym(n)$ (in the sense of \Cref{exmp:orbit}) classifies the set $T$.
The set of leaves $\widetilde{T} \coloneqq \lim \widetilde{T}_\bullet$ is then the set of isomorphism classes of simple rank $3$ matroids on $n$ \emph{labeled} atoms.\footnote{Cf.~(\url{http://oeis.org/A058720}, for $k=3$).}
We subdivide this problem by describing a subtree $\widetilde{T}_\bullet^{(m_k)} \subseteq \widetilde{T}_\bullet$ such that $T_\bullet^{(m_k)} \coloneqq \widetilde{T}_\bullet^{(m_k)} / \Sym(n)$ classifies the set $T^{(m_k)}$ of all nonisomorphic simple rank $3$ matroids with given multiplicity vector $(m_k)$.
Our goal is to build a locally uniform tree-iterator $t^{(m_k)}$ having the tree $T_\bullet^{(m_k)}$ as its tree of relevant leaves in the language of \Cref{rmrk:B3}.
In order to describe the larger tree $T_\bullet^{t^{(m_k)}}$ associated to $t^{(m_k)}$ (in the sense of \Cref{rmrk:B2}) we define the theoretically possible pre-stages of $2$-partitions with associated multiplicity vector $(m_k)$ in the following sense:
\begin{defn} \label{defn:admissible}
We call a set $A = \{ A_i \}$ of subsets of $\{1, \ldots, n\}$ an \textbf{admissible partial $2$-partition} of level $k_0 \in \{1, \ldots, n-2\}$ for a multiplicity vector $(m_k)=(m_2, \ldots, m_{n-1})$ of size $n$ if
\begin{itemize}
\item $|\{i : |A_i| = k\}| = m_k$ for all $k$ with $k_0 < k \leq n-1$;
\item $|A_i \cap A_j| \leq 1$ for all $i < j$.
\end{itemize}
\end{defn}
The rooted tree $T_\bullet^{t^{(m_k)}}$ can now be described as follows:
We set $T_0^{t^{(m_k)}} \coloneqq \{*\}$ and for $1 \leq i \leq n-2$ let $T_i^{t^{(m_k)}}$ consist of all admissible partial $2$-partitions of level $k_0 = n - i - 1$ modulo the action of $\Sym(n)$.
All maps $T_i^{t^{(m_k)}} \leftarrow T_{i+1}^{t^{(m_k)}}$ are evident and surjective.
The rooted tree $T^{t^{(m_k)}}$ differs from its subtree $T^{(m_k)}$ by the possible dead ends, i.e., by those admissible partial $2$-partitions that cannot be completed to a proper $2$-partition.
To describe the iterator $t^{(m_k)}$ we propose an algorithm that takes as input an admissible partial $2$-partition $A$ of some level~$k_0$ and iterates over all possible extensions to admissible partial $2$-partition of the next nontrivial smaller level $k_1$ with $k_1 < k_0$.
In this computation we only consider lexicographically minimal extensions with respect to the stabilizer of $A$ under the action of the symmetric group $\Sym(n)$ to avoid iterating over isomorphic matroids multiple times.
The details of this procedure are given in Algorithm~\ref{algo:state} (\textbf{IteratorFromState}).
Finally, to build the tree-iterator of all simple rank $3$ matroids with $n$ atoms and multiplicity vector $(m_k)$ (as bipartite graphs) we apply \textbf{IteratorFromState} to the initial state
\[
s^{(m_k)} \coloneqq \big(n, (m_k), k_0 \coloneqq \max\{ k \mid m_k > 0 \}, A \coloneqq () \big) \mbox{.}
\]
For the proof of \Cref{thm:terao} we are only interested in those matroids with an \textbf{integrally splitting characteristic polynomial} (see \Cref{sec:proof}).
Since the multiplicity vector of a rank $3$ matroid $M$ determines the characteristic polynomial $\chi_M$ by \eqref{eq:chiM} we only consider tree-iterators $t^{(m_k)}$ such that the corresponding characteristic polynomial defined by \eqref{eq:chiM} is integrally splitting.
\begin{algorithm}[H]
\SetKwIF{If}{ElseIf}{Else}{if}{then}{elif}{else}{}
\DontPrintSemicolon
\SetKwProg{IteratorFromState}{IteratorFromState}{}{}
\SetKwProg{Next}{Next}{}{}
\LinesNotNumbered
\KwIn{
state $s$ consisting of
\begin{itemize}
\item a number $n$ of atoms
\item a multiplicity vector $(m_k)=(m_2, \ldots, m_{n-1})$ of size $n$ \tcp*{cf.~\Cref{sec:Intro}}
\item an integer $n-1 \geq k_0 \geq 2$ with $m_{k_0} > 0$
\item an admissible partial $2$-partition $A$ of level $k_0$ for $(m_k)$ \tcp*{cf.~\Cref{defn:admissible}}
\end{itemize}
}
\KwOut{tree-iterator $\mathtt{iter}$ for which \textbf{Next}($\mathtt{iter}$) returns one of the following:
\begin{itemize}
\item \textbf{IteratorFromState}(state satisfying above specifications for $k_1$ defined in line~\ref{line:k_1}),
\item adjacency list, or
\item $\mathtt{fail}$
\end{itemize}
}
\IteratorFromState(){($s := ( n, (m_k), k_0, A)$, $S$)}{
\nl Initialize an iterator $\mathtt{iter}$ and equip it with\;
\nl \, \textbullet\, an empty list $APP$ to store the produced admissible partial $2$-partitions,\;
\nl \, \textbullet\, an integer $k_1 \coloneqq \max (\{1\} \cup \{k' < k_0 \mid m_{k'} > 0\}) \geq 1$, and\; \label{line:k_1}
\nl \, \textbullet\, a function \textbf{Next} as defined in line~\ref{line:Next}\;
\nl \label{line:Next} \Next(){($\mathtt{iter}$)}{
\tcc{
find the next block of coatoms of multiplicity $k_0$:
}
\nl \uIf{next $A' = \{A'_1, \ldots, A'_{m_{k_0}} \}$ exists with \tcp*{find $m_{k_0}$ new coatoms} \label{line:A'}
\nl \, \textbullet\, $A \cup A'$ admissible partial $2$-partition of level $k_1$\; \label{line:admissible}
\tcc{
the following line guarantees the generation of pairwise nonisomorphic bipartite graphs, the justification will be provided in \Cref{rmrk:sym_red}
}
\nl \, \textbullet\, the lexicographically minimal element $A''$ in the orbit of $A'$ under $\Stab_{\Sym(n)}(A)$ is not contained in $APP$\label{algo:ln:min_image} \; \label{line:sym}
\tcc{
Lines \ref{line:A'},\ref{line:admissible},\ref{line:sym} can again be realized by an iterator which returns the next $A'$ or $\mathtt{fail}$ if no such $A'$ exists.
}
}{
\nl save $A''$ in $APP$\;
\nl $A'' := A \cup A''$ \tcp*{augment the current partial $2$-partition}
\nl \uIf{$k_1 \geq 2$}{
\nl $s' \coloneqq (n, (m_k), k_1, A'')$ \tcp*{define the new state}
\tcc{return \textbf{IteratorFromState} applied to the new state $s'$}
\nl \Return{{\upshape{\textbf{IteratorFromState}}($s'$)}}}
\nl \Else{
\nl \Return{$A''$} \tcp*{return the complete adjacency list}
}
}
\nl \Else{
\nl \Return{$\mathtt{fail}$}
}
}
\Return{$\mathtt{iter}$}
}
\caption{IteratorFromState \label{algo:state}}
\end{algorithm}
We have implemented Algorithm~\ref{algo:state} as part of the \GAP-package $\mathtt{MatroidGeneration}$ \cite{MatroidGeneration}.
We show in \Cref{sec:parallel} how to evaluate recursive iterators in parallel.
We applied Algorithm~\ref{algo:state} to all multiplicity vectors with an integrally splitting characteristic polynomial and stored the resulting matroids in the database \cite{matroids_split} using the \GAP-package $\mathtt{ArangoDB}$\-$\mathtt{Interface}$ \cite{ArangoDBInterface}.
\begin{rmrk} \label{rmrk:sym_red}
Line~\ref{algo:ln:min_image} in Algorithm~\ref{algo:state} ensures that the iterator $\mathtt{iter}$ instantiated by the state $s$ does not create two isomorphic adjacency lists $A''$ and $A_2''$ with a common sublist $A$.
Furthermore, the lexicographically minimal element of the orbit of\footnote{$A \cup A'$ is considered in line~\ref{line:admissible}} $A \cup A'$ under $\Stab_{\Sym(n)}(A)$ is nothing but $A \cup A''$, namely the union of $A$ and the lexicographically minimal element $A''$ of the orbit of $A'$ under $\Stab_{\Sym(n)}(A)$ (considered in line~\ref{line:sym}).
This is due to the fact that sets in $A'$ are of different cardinality than those in $A$.
\end{rmrk}
\begin{rmrk} \label{rmrk:balanced}
A simple rank $3$ matroid $M$ (of size $n$) is \emph{weakly atom balanced on dependent coatoms}, i.e., every atom is contained in at most $\frac{n-1}{2}$ coatoms of cardinality at least $3$ (these are all dependent in $M$).
\end{rmrk}
\begin{proof}
Let $M$ be a simple rank $3$ matroid of size $n$ and consider a fixed atom $k$.
Let $F_1,\dots,F_\ell$ be the coatoms of size at least $3$ containing the atom $k$.
This implies $|F_i\setminus\{k\}|\ge 2$ for all $1\le i \le \ell$.
Furthermore, by definition of a simple matroid it holds that $F_i\cap F_j=\{k\}$ for all pairs $1\le i < j \le \ell$.
This means that the coatoms $F_1,\dots,F_\ell$ contain each at least two atoms of the $n-1$ atoms which are different from $k$ and moreover these additional atoms are all pairwise distinct.
This immediately yields $\ell \le \frac{n-1}{2}$ which proves the claim.
\end{proof}
\begin{rmrk}
The computationally difficult part of Algorithm~\ref{algo:state} is to find admissible completions of partial $2$-partitions in Line~\ref{line:A'}.
Na\"ively, one needs to loop over all subsets of $\{1,\dots,n\}$ of a given size and discard all those which contain a pair of atoms which is already contained in a previous coatom.
To speed up this part of the computation we use the following methods:
\begin{itemize}
\item Following \Cref{rmrk:balanced} we can discard atoms in computations of multiplicity $k_0\ge 3$ if they are already contained in $\frac{n-1}{2}$ coatoms.
\item We can assume that within one level the coatoms are ordered lexicographically.
Thus, we discard all subsets of $\{1,\dots,n\}$ that are lexicographically smaller than the last coatom of the same size before calling the search algorithm.
\item If at any step a transposition $(e_1 \: e_2)$ is in the stabilizer of all previous coatoms we treat the atoms $e_1$ and $e_2$ as equivalent at this step of the algorithm.
This means that we loop over subsets of $\{1,\dots,n\}$ modulo all such equivalences instead of the entire set $\{1,\dots,n\}$.
To ensure we do not miss relevant subsets we also need to consider subsets of these representatives of smaller sizes and fill up the resulting subsets to the correct size with atoms that are not contained in any coatom yet.
We can choose the first unused atoms in the lexicographic order for this matter.
\end{itemize}
\end{rmrk}
\begin{rmrk} \label{rmrk:min_img}
To calculate lexicographically minimal elements of orbits we use the \texttt{Ferret} and \texttt{Images} packages, by the third author:
\begin{itemize}
\item \texttt{Ferret} is a reimplementation of Jeffrey Leon's Partition Backtrack Algorithm \cite{Leo91}, with a number of extensions \cite{JPW18}.
\item \texttt{Images} provides algorithms which, given a permutation group \(G\) on a set \(\Omega\) and a set \(S \subseteq \Omega\), find the lexicographically minimal image of \(S\) under \(G\), or a canonical image of the orbit of \(S\) under \(G\). \texttt{Images} uses the algorithms of Jefferson et al.~\cite{JJPW18}.
\end{itemize}
For this project, both \texttt{Ferret} and \texttt{Images} were extended to be compatible with \textsf{HPC-GAP}.
\end{rmrk}
\begin{rmrk}\label{rmrk:find_matroids}
In the database~\cite{matroids_split} we store lexicographically minimal elements of the list of coatoms computed by the \textsf{GAP} package \texttt{Images}.
This specific form enables the lookup of an arbitrary matroid in the database by computing its uniquely defined minimal image under the action of the symmetric group.
We have also used this lookup procedure to compute the inductive freeness property since this property depends by definition on the inductive freeness of matroids of smaller size (cf. \cref{rmrk:ind_div_using_db}).
\end{rmrk}
\begin{rmrk}\label{rm:not_terminated}
The computations to generate all simple rank $3$ matroids with integrally splitting characteristic polynomial terminated on all $695$ possible multiplicities vectors except for the two vectors $(m_3,m_4,m_5)=(21,3,1)$ and $(m_2,m_3,m_4,m_5,m_6)=(1,23,1,0,1)$.
The latter multiplicity vector is in any case not relevant for Terao's conjecture as any matroid with this multiplicity vector would not be coatom balanced (cf.~\cref{def:balanced}).
In \Cref{prop:no_matroid}, we prove that there are no matroids with one of the above multiplicities vectors.
Hence, these computations which did not terminate do not impose any restrictions on \Cref{thm:terao} or \Cref{tbl:simple_matroids}.
\end{rmrk}
\begin{prop}\label{prop:no_matroid}
Let $v_1$ and $v_2$ be the multiplicity vectors $(m_3,m_4,m_5)=(21,3,1)$ and $(m_2,m_3,m_4,m_5,m_6)=(1,23,1,0,1)$ respectively.
Then, there exists no simple rank $3$ matroid of size $14$ having either $v_1$ or $v_2$ as its associated multiplicity vector.
\end{prop}
\begin{proof}
Given an admissible partial $2$-partition $A$ and an atom $e$ we denote by $d_A(e)$ the \emph{deficiency} of $e$ in $A$ which is the number of atoms that are not contained in a common coatom with $e$ in $A$.
For both multiplicity vectors we investigate the admissible partial $2$-partitions that contain all coatoms of size greater than $3$.
We will argue based on the parity of their deficiencies that all of them cannot be completed to a matroid with the remaining coatoms of size $3$ (and one coatom of size $2$ in the case of $v_2$) which completes the proof.
Consider a step in which we add the coatom $C\coloneqq \{e_1,e_2,e_3\}$ to a list of coatoms $A$ and obtain a new list $A'$.
Then we have $d_{A'}(e_i)=d_A(e_i)-2$ for $1\le i\le 3$ and the remaining deficiencies remain constant.
In particular, the parity of all deficiencies is constant in this step.
In the case of the multiplicity vector $v_1$ we can without loss of generality assume that all admissible partial $2$-partitions with all coatoms of size greater than $3$ contain the coatom $\left[1,\dots,5\right]$ and the atom $4$ is not contained in any coatom of size $4$.
Thus, we have $d_A(4)=13-4=9$ for all such lists $A$.
Since this number is odd but the deficiency of any matroid is~$0$ the above discussion proves that all such partial list of coatoms of $v_1$ can not be completed to a matroid.
To prove the remaining statement regarding the multiplicity vector $v_2$, we start our parallel matroid generation algorithm but terminate after completing all levels of size greater than $3$.
We obtain the two partial admissible $2$-partitions
\[
A_1 \coloneqq [ [ 1, 2, 3, 4, 5, 6 ], [ 1, 7, 8, 9 ]], \quad A_2 \coloneqq [ [ 1, 2, 3, 4, 5, 6 ], [ 7, 8, 9, 10 ] ].
\]
Now, we need to add coatoms of size $3$ and exactly one coatom of size $2$ to the admissible partial $2$-partitions $A_{1},A_{2}$.
An analogous argument as in the first case shows that the number of atoms with odd deficiency of the lists $A_{1},A_{2}$ must be exactly two.
Computing deficiencies of the atoms in the lists $A_1$ and $A_2$ yields that $1$ is the only atom with an odd deficiency in $A_1$ whereas all atoms in $A_2$ have an even deficiency.
Thus, there exists no matroid with multiplicity vector~$v_2$.
\end{proof}
\section{How to Decide Representability of a Matroid?} \label{sec:representability}
The Basis Extension Theorem for matroids (cf.~\Cref{rm:basis}) implies that the (possibly empty) space $\mathcal{R}(M)$ of \emph{all} representations (over some unspecified field $\bF$) of a matroid $M = (E, \F)$ is an \emph{affine variety}, namely an affine subvariety $V(I') \subseteq \AA_\Z^{rn + 1}$, where $r$ is the rank of $M$ and $n$ its number of atoms.
More precisely, let $\AA^{rn+1}_\Z \coloneqq \Spec R[d]$, where $R \coloneqq \Z[ a_{ij} \mid i = 1, \ldots, r,\, j = 1, \ldots, n ]$ and $d$ a further indeterminate.
To describe the ideal $I'$ set $A \coloneqq (a_{ij}) \in R^{r \times n}$.
For a subset $S \subseteq E$ denote by $A_S$ the submatrix of $A$ with columns in $S$.
Further, let $\mathcal{B}(M) = \{ B_1, \ldots, B_b \}$ be the set of bases of $M$.
Then
\[
I' = \left\langle \det(A_D) \mid D \subseteq E \mbox{ dependent}, |D|=r \right\rangle + \left\langle 1 -d \prod_{B \in \mathcal{B}(M)} \det(A_B) \right\rangle \unlhd R[d] \mbox{.}
\]
It follows that $M$ is representable (over some field $\bF$) if and only if $1 \notin I'$.
This ideal membership problem can be decided by computing a Gröbner basis of $I'$.
This is basically the algorithm suggested in \cite{Oxley2011}.
If the ideal $I'$ is a maximal ideal in $R[d]$ the moduli space of representations $\Spec R[d]/I'$ of the matroid $M$ contains only one point.
In this case, the matroid $M$ has a unique representation (up to equivalence) and we call $M$ \emph{uniquely representable over $\Spec \Z$}.
However, it is computationally more efficient to represent $\mathcal{R}(M)$ as a
quasi-affine set $V(I) \setminus V(J) \subseteq \AA_\Z^{rn} = \Spec R$, where $J$ is a principal ideal.
Denote by $J_S \coloneqq \langle \det( A_S ) \rangle$ the principal ideal generated by the maximal minor corresponding to $S$, provided $|S| = r$.
Then
\begin{align*}
I &= \sum \{ J_D \mid D \subseteq E \mbox{ dependent}, |D|=r \}, \\
J &= \prod \{ J_B \mid B \in \mathcal{B}(M) \}.
\end{align*}
In particular, $J$ is a principal ideal.
It follows that $M$ is representable (over some field $\bF$) iff $\det(A_S) \notin \sqrt{I}$ for all $S \subseteq E$ basis.
The ideal $I$ can be replaced by the saturation
\[
\widetilde{I} \coloneqq I : \left(\prod_{B \subseteq E \text{ basis}}\det(A_B)\right)^\infty = I : \det(A_{B_1})^\infty : \cdots : \det(A_{B_b})^\infty \mbox{.}
\]
Then $M$ is representable iff $1 \notin \widetilde{I}$.
For the Gröbner basis computations over $\Z$ we used \textsc{Singular} \cite{Singular412} from within the GAP package $\mathtt{ZariskiFrames}$ \cite{ZariskiFrames}, which is part of the $\mathtt{CAP/homalg}$ project \cite{homalg-project,BL,GPSSyntax}.
We used a more efficient approach which does not involve working over $\AA^{rn+1}_\Z$ but fixes certain values of the matrix $A$ to $0$ or $1$ as described in~\cite[p. 184]{Oxley2011}.
Firstly, we choose a basis $B\in \mathcal{B}(M)$ and fix the corresponding submatrix $A_B$ to be the unit matrix.
Without loss of generality we can assume $B=\{1,\dots,r\}$.
Secondly, we consider the fundamental circuits with respect to this basis $B$, i.\ e.\ for each $k\in E\setminus B$ let $C(k,B)$ be the unique circuit of the matroid $M$ contained in $B\cup k$.
The entries of $A$ in the column $k\in E\setminus B$ which do not appear in $C(k,B)$ can be fixed to $0$.
Lastly, the first nonzero entry in every column and the first nonzero entry in every row of $A$ can be taken as $1$ by column and row scaling respectively.
We have added this algorithm to $\mathtt{alcove}$ \cite{alcove}.
For another approach to the rational moduli space cf.~\cite{Cun11}.
\section{Proof of \Cref{thm:terao}} \label{sec:proof}
If a matroid has an atom which is contained in many coatoms or conversely a coatom which contains many atoms any realization satisfies Terao's conjecture.
This statement will be a crucial ingredient in the proof of ~\Cref{thm:terao}.
To formalize it we make the following definition.
\begin{defn}\label{def:balanced}
Let $M$ be a simple matroid of rank $3$ and assume $\chi_M(t)=(t-1)(t-a)(t-b)$ for some integers $a,b\in\Z$ such that $a\le b$.
\begin{itemize}
\item We call $M$ \textbf{atom balanced} if each atom is contained in at most $a$-many coatoms.
\item We call $M$ \textbf{coatom balanced} if each coatom contains strictly less than $a$-many atoms.
\item If $M$ is both atom and coatom balanced we call it \textbf{strongly balanced}.
\end{itemize}
\end{defn}
The importance of balancedness in our context stems from the next proposition.
\begin{prop}\label{prop:balancedness}
Let $M$ be a simple matroid of rank $3$ and assume $\chi_M(t)=(t-1)(t-a)(t-b)$ for some integers $a,b\in\Z$ such that $a\le b$.
If $M$ is \emph{not} strongly balanced then the freeness of any arrangement of hyperplanes representing $M$ can be decided combinatorially.
These representations therefore satisfy Terao's freeness conjecture.
\end{prop}
\begin{proof}
To begin assume that $M$ is not atom balanced for some atom $A$ which is contained in $n_{M,A}$ many coatoms with $n_{M,A} > a$.
Then, Theorem 1.1 and Corollary 1.2 in~\cite{Abe14} show that any representation of $M$ is free if and only if $n_{M,A}\in \{a+1,b+1\}$.
Instead assume that $M$ is not coatom balanced.
In this case, Lemma 2.10 in~\cite{ACKN16} shows that $M$ cannot be atom balanced either which finishes the proof by the first part.
\end{proof}
Now we have all ingredients to prove \cref{thm:terao}.
\begin{proof}[Proof of \cref{thm:terao}]
It suffices to check Terao's freeness conjecture for all representations of matroids of size $14$ which do not fall into any of the following classes of arrangements for which Terao's conjecture is known to be true:
\begin{itemize}
\item If the characteristic polynomial of the arrangement is not integrally splitting the arrangement is combinatorially nonfree by Terao's Factorization Theorem~\cite{Ter81}.
\item Representations of nonstrongly balanced simple rank $3$ matroids satisfy Terao's conjecture by \cref{prop:balancedness}.
\item Any representation of an inductively free matroid is a free arrangement~\cite{Ter80}.
\item If a matroid has a unique representation over the integers\footnote{i.e., the moduli space $\Spec R / \widetilde{I} \to \Spec \Z$ of representations is $\Spec \mathbb{F}_p \to \Spec \Z$, a singleton.} it trivially satisfies Terao's conjecture.
\end{itemize}
Querying the database \cite{matroids_split} there are
\begin{itemize}
\item $783280$ rank $3$ matroids of size $14$ with integrally splitting characteristic polynomial,
\item $1574$ thereof are representable over some field,
\item $174$ thereof are not inductively free,
\item $64$ thereof are strongly balanced.
\end{itemize}
All of these $64$ remaining matroids have $\Spec \mathbb{F}_5$ as their moduli space, i.e., each of them is only representable over fields of characteristic $5$ and any such representation is equivalent to a unique representation over $\mathbb{F}_5$; hence they are all irrelevant for Terao's freeness conjecture.
This completes the proof.
\end{proof}
\begin{rmrk}
The situation of matroids of size $14$ is surprisingly simple in that respect.
This is not the case for matroids of smaller size since there are $9$ matroids which avoid all of the above classes and exhibit a nontrivial moduli space of representations (among them the example of a free but not rigid arrangement of size $13$ described in~\cite{ACKN16}).
We will describe their moduli spaces over $\Spec \Z$ and the nonfree locus therein in a subsequent article \cite{BK} which will establish Terao's conjecture for rank $3$ arrangements with up to $14$ hyperplanes in any characteristic.
\end{rmrk}
\appendix
\section{Rooted Trees} \label{sec:trees}
In this Appendix we discuss rooted trees and give simple examples for their use as a tool to iterate over desired sets.
Instead of the classical definition of rooted trees we use an alternative mathematical model of rooted trees in which one can easily interpret the data structure of tree-iterators and their evaluations which we introduce in \Cref{sec:iterators}.
Expressed in this model, the (parallelized) evaluation of tree-iterators (Algorithm~\ref{algo:peri}) can then be understood as a limiting process.
For the design of our algorithms we represent a finite \textbf{rooted forest} (or set of \textbf{rooted trees}) as a finite sequence of the form
\[
T_\bullet: T_0 \xleftarrow{\phi_1} T_1 \xleftarrow{\phi_2} T_2 \xleftarrow{\phi_3} \cdots \xleftarrow{\phi_d} T_d \mbox{,}
\]
where $T_i$ is the finite set of \textbf{vertices} of \textbf{depth} $i$.
We call $d$ the \textbf{depth} of $T_\bullet$.
In particular, $T_0$ is the set of \textbf{roots}.
We denote the set of \textbf{leaves} of $T_\bullet$ by $T := \lim T_\bullet$, which is the set of non-images in $T_\bullet$.\footnote{The notation $\lim T_\bullet$ can be justified as follows: $T_\bullet$ is a sequential inverse system in the category of finite sets with the set of leaves as its limiting object.}
As mentioned in the introduction we then say that $T_\bullet$ \textbf{classifies} $T$.
A forest of rooted trees can be understood as a single rooted tree by adding a constant map $T_{-1} := \{*\} \leftarrow T_0$ and then increase all indices by $1$.
\begin{conv}
So without loss of generality we will henceforth assume $T_\bullet$ to be a rooted tree of depth $d$, i.e., $T_0 = \{*\}$ a singleton.
\end{conv}
If all maps in the inverse system are surjective then the natural map $T_d \leftarrow T$ (which is part of the limit datum) is bijective and the set leaves $T = T_d$.
In this case all leaves have the same depth $n$ and we call $T_\bullet$ \textbf{uniform (of depth $d$)}.
More generally, we call a tree $T_\bullet$ \textbf{locally uniform} if each vertex that has a leaf as a child only has leaves as children, i.e., if for each vertex $v$ of depth $i$ the following holds: $\phi_i^{-1}(v) \cap T \neq \emptyset \implies \phi_i^{-1}(v) \subseteq T$.
Many inequivalent representations of such rooted trees classifying the same set $T$ might exist:
\Cref{exmp:matched,exmp:magma} are inequivalent families of rooted trees $T^{(n)}_\bullet$ (indexed by a natural number $n$) classifying the same family of sets $T^n$ of cardinality $C_n$, the $n$-th Catalan number.
\begin{exmp}[Matched parentheses] \label{exmp:matched}
For $i \in \N$ denote by $T_i$ the set containing $i+1$ pairs of correctly matched parentheses:
\[
T_0 := \{ () \}, T_1 := \{ (()), ()() \}, T_2 := \{ ()(()), (()()), ((())), (())(), ()()() \}, \ldots
\]
Define $T_{i-1} \xleftarrow{\phi_i} T_i$ to be the map removing the left most\footnote{or right most, ...} pair of parentheses containing no other ones.
For a fixed $n \in \N$ the sequence $T_\bullet: T_0 \xleftarrow{\phi_1} T_1 \xleftarrow{\phi_2} T_2 \xleftarrow{\phi_3} \cdots \xleftarrow{\phi_{n-1}} T_{n-1}$ is a finite rooted tree of uniform depth $n-1$.
The cardinality of the set of leaves $\lim T_\bullet = T_{n-1}$ is the $n$-th Catalan number\footnote{Cf.~(\url{http://oeis.org/A000108}).} $C_n = \binom{2n}{n} - \binom{2n}{n+1} = \frac{1}{n+1}\binom{2n}{n}$.
\end{exmp}
\begin{exmp}[Magma evaluation] \label{exmp:magma}
For $n \in \N_{>0}$ denote by $T^{(n)}$ the set of all possible ways to evaluate the product of the sorted list of free generators of a free magma $M_n = \langle a_0,\ldots, a_n \rangle$ of rank $n+1$:
\[
\begin{array}{c|c|c|c}
n & 1 & 2 & 3 \\
\hline \hline
M_n \rule{0em}{1.2em} & \langle a, b \rangle & \langle a, b, c \rangle & \langle a, b,c, d \rangle \\[0.3em]
T^{(n)} & {\color{gray} \{} ab {\color{gray} \}} & {\color{gray} \{} (ab)c, a(bc) {\color{gray} \}} & {\color{gray} \{} ((ab)c)d, (a(bc))d, (ab)(cd), a((bc)d), a(b(cd)) {\color{gray} \}}
\end{array}
\]
The set $T_i^{(n)}$ for $i \in \N$ arises from $T^{(n)}$ by deleting all pairs of parentheses of depth higher than $i$.
The maps $T^{(n)}_{i-1} \xleftarrow{\phi_i} T^{(n)}_i$ are evident.
\[
\begin{array}{c|c|c|c}
n & 1 & 2 & 3 \\
\hline \hline
T^{(n)}_0 \rule{0em}{1.2em} & {\color{gray} \{} ab {\color{gray} \}} & {\color{gray} \{} {\color{gray} abc} {\color{gray} \}} & {\color{gray} \{} {\color{gray} abcd} {\color{gray} \}} \\[0.3em]
T^{(n)}_1 & & {\color{gray} \{} (ab)c, a(bc) {\color{gray} \}} & {\color{gray} \{} {\color{gray} (abc)d}, (ab)(cd), {\color{gray} a(bcd)} {\color{gray} \}} \\[0.3em]
T^{(n)}_2 & & & {\color{gray} \{} ((ab)c)d, (a(bc))d, a((bc)d), a(b(cd)) {\color{gray} \}}
\end{array}
\]
The gray entries in the above table are the internal nodes of the rooted tree $T^{(n)}_\bullet$.
The latter is not locally uniform for $n\geq 3$.
The set of leaves $\lim T^{(n)}_\bullet$ coincides with $T^{(n)}$, by construction.
The cardinality of $T^{(n)}$ is again the $n$-th Catalan number $C_n$.
\end{exmp}
In the following example the sets of leaves are themselves sets of rooted trees.
We hope this does not cause confusion.
\begin{exmp}[Phylogenetic trees with labeled leaves] \label{exmp:phylo}
A phylogenetic tree is a labeled rooted tree.
A phylogenetic tree with $n \in \N_{>0}$ leaves corresponds to a total partition of $n$.
Let $T^{(n)}$ be the set of phylogenetic trees with $n$ (labeled) leaves.\footnote{Cf.~(\url{http://oeis.org/A000311}).}
{\scriptsize
\[
\begin{array}{c|c|c|c}
n & 1 & 2 & 3 \\
\hline \hline
T^{(n)} \rule{0em}{1.7em} & {\color{gray} \Big\{} \{1\} {\color{gray} \Big\}} & {\color{gray} \Big\{} \{\{1\},\{2\}\} {\color{gray} \Big\}} & {\color{gray} \Big\{} \{\{1\},\{2\},\{3\}\}; \{\{1\},\{\{2\},\{3\}\}\}; \{\{2\},\{\{1\},\{3\}\}\}; \{\{3\},\{\{1\},\{2\}\}\} {\color{gray} \Big\}}
\end{array}
\]
}
Truncating a phylogenetic tree at depth $i$ means to contract all edges below depth $i$ and multi-label the new leaves at depth $i$ by all their child leaves.
For $i \in \N$ denote by $T^{(n)}_i$ the set of all truncations of trees in $T^{(n)}$ at depth $i$.
Again, all maps $T^{(n)}_{i-1} \xleftarrow{\phi} T^{(n)}_i$ are evident.
{\small
\[
\begin{array}{c|c|c|c}
n & 1 & 2 & 3 \\
\hline \hline
T^{(n)}_0 \rule{0em}{1.7em} & {\color{gray} \Big\{} \{1\} {\color{gray} \Big\}} & {\color{gray} \Big\{} {\color{gray} \{1,2\}} {\color{gray} \Big\}} & {\color{gray} \Big\{} {\color{gray} \{1,2,3\}} {\color{gray} \Big\}} \\
T^{(n)}_1 & & {\color{gray} \Big\{} \{\{1\},\{2\}\} {\color{gray} \Big\}} & {\color{gray} \Big\{} \{\{1\},\{2\},\{3\}\}; {\color{gray} \{\{1\},\{2,3\}\}}; {\color{gray} \{\{2\},\{1,3\}\}}; {\color{gray} \{\{3\},\{1,2\}\}} {\color{gray} \Big\}} \\
T^{(n)}_2 & & & {\color{gray} \Big\{} \{\{1\},\{\{2\},\{3\}\}\}; \{\{2\},\{\{1\},\{3\}\}\}; \{\{3\},\{\{1\},\{2\}\}\} {\color{gray} \Big\}}
\end{array}
\]
}
The rooted tree $T^{(n)}_\bullet$ is not locally uniform for $n \geq 3$.
The set of leaves $\lim T^{(n)}_\bullet$ coincides with $T^{(n)}$, by construction.
\end{exmp}
Factoring out symmetries of rooted trees again yields rooted trees:
\begin{rmrk}[Rooted trees of group orbits] \label{exmp:orbit}
Let $G$ be a group.
A rooted tree $T_\bullet$ is called a \textbf{rooted $G$-tree} if each $T_i$ is a $G$-set and all maps $\phi_i$ are $G$-equivariant.
A rooted $G$-tree $\lim T_\bullet$ induces a rooted tree of orbits $T_\bullet / G$.
Furthermore $\lim (T_\bullet/G) = \lim(T_\bullet)/G$, naturally.
\end{rmrk}
\begin{exmp}[Phylogenetic trees with nonlabeled leaves]
Applying \Cref{exmp:orbit} to the previous \Cref{exmp:phylo} yields a rooted tree classifying phylogenetic trees with unlabeled leaves.
More precisely, the action of $\Sym(n)$ on $\{1,\ldots,n\}$ turns the rooted tree $T_\bullet$ in \Cref{exmp:phylo} into a rooted $\Sym(n)$-tree.
The rooted tree of orbits $T_\bullet/\Sym(n)$ then classifies $T/\Sym(n)$ which is the set of phylogenetic trees with unlabeled leaves.\footnote{Cf.~(\url{http://oeis.org/A000669}).}
\end{exmp}
Our primary family of examples of rooted tree was discussed in \Cref{sec:matroid_iter}.
They have rank $3$ matroids as their set of leaves.
\section{Recursive Iterators and Tree-Iterators} \label{sec:iterators}
In this appendix we introduce the data structure of so-called tree-iterators, which we use to recursively iterate over the vertices of a rooted tree.
This data structure is a central ingredient of all algorithms.
\begin{defn} \label{defn:recursive_iterator}
Let $T$ be a set.
\begin{itemize}
\item A \textbf{recursive iterator $t$ within $T$} is an iterator which upon popping produces either $\mathtt{Next}(t) = \mathtt{fail} \notin T$ or a \textbf{child} $\mathtt{Next}(t)$ which is either
\begin{enumerate}
\item a new recursive iterator within $T$, or
\item an element of $T$.
\end{enumerate}
If the pop result $\mathtt{Next}(t)$ is $\mathtt{fail}$ then any subsequent pop result of $t$ remains $\mathtt{fail}$.
We call $T$ the \textbf{ambient set} of $t$.
\item A \textbf{full evaluation} of a recursive iterator recursively pops all recursive iterators until each of them pops $\mathtt{fail}$.
\item If $t$ is a recursive iterator then the subset of elements $T(t) \subseteq T$ produced upon full evaluation is called the \textbf{set of leaves of $t$ in $T$}.
We say that $t$ \textbf{classifies} $T(t) \subseteq T$.
\item A recursive iterator is called \textbf{locally uniform} if every descendant either pops recursive iterators or leaves, exclusively (if not $\mathtt{fail}$).
\item A recursive iterator $t$ within $T$ is called a \textbf{tree-iterator} if upon full evaluation each element of $T(t) \subseteq T$ is the pop result of exactly one descendant of $t$.
\end{itemize}
\end{defn}
In order to iterate over a tree $T_\bullet$ with set of leaves $T = \lim T_\bullet$ it is somewhere between convenient and almost unavoidable to construct a tree-iterator $t$ within $T$ which might iterate over a larger tree $T^t_\bullet$ having a set of leaves $T^t = \lim T^t_\bullet$ which is larger than $T(t)$, i.e., with dead ends being all tree-iterators which are descendants of $t$ but have no own descendants.
In our application to the classification of simple rank $3$ matroids the dead ends are the admissible partial $2$-partitions which cannot be completed to a proper $2$-partition (cf.~\Cref{defn:admissible}).
\begin{rmrk} \label{rmrk:B2}
A tree-iterator $t$ within $T$ or any of its descendants can be understood as a vertex of the rooted tree
\[
T^t_\bullet: T^t_0 \coloneqq \{t\} \xleftarrow{\phi^t_1} T^t_1 \xleftarrow{\phi^t_2} T^t_2 \xleftarrow{\phi^t_3} \cdots \xleftarrow{\phi^t_d} T^t_d \mbox{,}
\]
inductively described as follows:
Let $t'$ be any descendant of $t$ interpreted as an element $t' \in T^t_i$.
If $\mathtt{Next}(t') = \mathtt{fail}$ then $t'$ has no (further) preimages under $\phi_{i+1}$.
Otherwise each evaluation $\mathtt{Next}(t') \in T^t_{i+1}$, which is a preimage of $t'$ under $\phi^t_{i+1}$.
We call $T^t_\bullet$ the \textbf{tree associated to $t$}.
Its set of leaves $T^t \coloneqq \lim T^t_\bullet$ is the union of $T(t) \subseteq T$ and the set of all tree-iterators which are descendants of $t$ but have no own descendants.
\end{rmrk}
\begin{rmrk} \label{rmrk:B3}
Given a tree-iterator $t$ within $T$ with corresponding tree $T^t_\bullet$ as in \Cref{rmrk:B2} we define the subtree
\[
T_\bullet: T_0 \xleftarrow{\phi_1} T_1 \xleftarrow{\phi_2} T_2 \xleftarrow{\phi_3} \cdots \xleftarrow{\phi_d} T_d \mbox{.}
\]
with $\lim T_\bullet = T(t) \subseteq T$, i.e., the subtree $T_\bullet \subseteq T^t_\bullet$ consisting of the leaves in $T(t)$ and all their predecessors.
We call $T_\bullet$ the \textbf{tree of relevant leaves of $t$}.
In order to iterate over a tree $T_\bullet$ with set of leaves $T = \lim T_\bullet$ we use the freedom to construct a tree-iterator $t$ within $T$ having $T_\bullet$ as its tree of relevant leaves, even though its associated tree $T^t_\bullet$ might be considerably larger.
\end{rmrk}
\section{Parallel Evaluation of Recursive Iterators} \label{sec:parallel}
In this Appendix we describe the three algorithms
\begin{itemize}
\item \textbf{ParallellyEvaluateRecursiveIterator} (Algorithm~\ref{algo:peri}),
\item \textbf{EvaluateRecursiveIterator} (Algorithm~\ref{algo:eri}),
\item \textbf{LeafIterator} (Algorithm~\ref{algo:leaf-iterator}),
\end{itemize}
which constitute our general parallelization scheme for recursive iterators.
They are independent of any specific recursive iterator (e.g., the one defined by \textbf{IteratorFromState} in Algorithm~\ref{algo:state}).
Furthermore, the recursive iterators can be implemented in classical sequential code, i.e., this organization requires no pre-knowledge in parallel programming in order to implement a recursive iterator and evaluate it in parallel.
We have implemented the three algorithms in the High-Performance-Computing (HPC) version\footnote{Since version 4.9.1 \GAP can be compiled with the option \verb+--enable-hpcgap+.} of \GAP 4.9.2 \cite{GAP492} as part of the \GAP-package $\mathtt{ParallelizedIterators}$ \cite{ParallelizedIterators}.
The combination of these three algorithms takes a recursive iterator $t$ (within $T$) as input and returns an iterator $\ell(t)$ which iterates over the set of leaves $T(t) \subseteq T$.
We call $\ell(t)$ the \textbf{leaf-iterator} associated to $t$.
If $t$ is a tree-iterator then $\ell(t)$ produces no duplicates.
We now briefly explain the role of each of the three algorithms and the way they interact:
Algorithm~\ref{algo:leaf-iterator} is executed in the main thread with a recursive iterator as input.
In the main application of this paper the input is the tree-iterator $t^{(m_k)}$ of all rank $3$ matroids of a given multiplicity vector $(m_k)$, constructed using Algorithm~\ref{algo:state}.
Algorithm~\ref{algo:leaf-iterator} then initializes a global FIFO $L$ of leaves and invokes Algorithm~\ref{algo:peri}.
The latter creates a shared priority queue $P$, launches as many workers (threads) as specified by the user, triggers Algorithm~\ref{algo:eri} in each of them, and then terminates.
The shared\footnote{Implementations of priority queues exist both for shared memory and distributed operation. We have chosen to use a simple shared memory implementation, as contention for our workloads is very low, so we do not have to worry about the priority queue becoming a serialization bottleneck.
} priority queue
stores the list of recursive iterators still to be searched along with their priority, which in our case is the depth at which they were created.
The instance of Algorithm~\ref{algo:eri} running in each thread asks for the highest priority iterator $t'$ in the priority queue $P$ and evaluates $t'' \coloneqq \mathtt{Next}(t')$.
If $t''$ is an element of $T$ then $t''$ is added to the FIFO $L$ of leaves and $t'$ is returned to $P$ with the same priority.
If $t''$ is again an iterator then $t'$ and $t''$ are returned to $P$; $t'$ is returned with the same priority and $t''$ with the priority of $t'$ increased by one.
Finally if $t'' = \mathtt{fail}$ then nothing is done.
After any of the three actions the instance of Algorithm~\ref{algo:eri} starts over again.
In particular, our use of a priority queue avoids the need for a central process supervising the workers.
\begin{algorithm}[H]
\SetKwIF{If}{ElseIf}{Else}{if}{then}{elif}{else}{}
\DontPrintSemicolon
\SetKwProg{ParallellyEvaluateRecursiveIterator}{ParallellyEvaluateRecursiveIterator}{}{}
\LinesNotNumbered
\KwIn{
\begin{itemize}
\item A recursive iterator $t$
\item a number $n \in \N_{>0}$ of workers
\item a global FIFO $L = ()$, accessible by the subprocesses of the workers
\end{itemize}
}
\KwOut{no return value; the side effect is to fill the FIFO $L$ with the leaves in $T(t)$}
\ParallellyEvaluateRecursiveIterator(){($t$, $n$, $L$)}{
\nl Initialize a farm $w$ of $n$ workers $w_1,\ldots, w_n$\;
\nl Initialize a \emph{shared} priority queue $P$ of iterators and set $P = ()$\;
\nl Initialize a \emph{shared} counter $j$ of jobs in process and pending and set $j = 1$\;
\nl Initialize a \emph{shared} semaphore $s \geq 0$ and set $s = 0$\;
\nl $P := ( (t,0) )$\;
\nl \For{$i=1,\ldots, n$}{
\nl $\mathtt{EvaluateRecursiveIterator}(n,L,P,s,j)$ within worker $w_i$\;
}
\nl $\mathtt{SignalSemaphore}(s)$\;
\nl \Return{none}\;
}
\caption{ParallellyEvaluateRecursiveIterator \label{algo:peri}}
\end{algorithm}
Algorithm~\ref{algo:peri} gets as input a recursive iterator, a number $n$ of workers, and a global FIFO $L$.
It initializes a shared priority queue $P$, adds $P$ as the only job with priority $0$, triggers $n$ workers (running in threads) each executing Algorithm~\ref{algo:eri}.
If a worker produces a leaf it writes it to the FIFO $L$.
\begin{algorithm}[H]
\SetKwIF{If}{ElseIf}{Else}{if}{then}{elif}{else}{}
\DontPrintSemicolon
\SetKwProg{EvaluateRecursiveIterator}{EvaluateRecursiveIterator}{}{}
\LinesNotNumbered
\KwIn{
\begin{itemize}
\item a number $n \in \N_{>0}$ of all workers
\item a global FIFO $L = ()$, accessible by the other $n-1$ workers
\item a shared priority queue $P$
\item a shared semaphore $s$
\item a shared counter $j$ of jobs in process or pending
\end{itemize}
}
\KwOut{no return value; the side effect is to evaluate the recursive iterators in the priority queue which get processed by this worker and save the leaves in the FIFO $L$}
\EvaluateRecursiveIterator(){($n$, $L$, $P$, $s$, $j$)}{
\nl \While{$\mathtt{true}$}{
\nl $\mathtt{WaitSemaphore}(s)$ \label{line:sem} \tcp*{wait until the semaphore $s>0$}
\nl \If(\tcp*[f]{if the priority queue is empty}){$P = ()$}{
\nl \Return{none} \label{line:return_none} \tcp*{terminate the worker}
}
\nl $(t_i,p_{t_i}) :=\mathtt{Pop}(P)$ \tcp*{get the highest priority job from $P$}
\nl $r_i \coloneqq \mathtt{Next}(t_i)$ \label{line:next} \tcp*{pop the recursive iterator $t_i$}
\nl \uIf(\tcp*[f]{the result $r_i$ is a leaf}){$r_i \in T$}{
\nl $\mathtt{Add}(L,r_i)$ \label{line:AddToFIFO} \tcp*{add the leaf $r_i$ to the FIFO $L$ of leaves}
\nl $\mathtt{Add}(P, (t_i,p_{t_i}))$ \label{line:Add} \tcp*{return the recursive iterator $t_i$ back to $P$}
}
\nl \uElseIf(\tcp*[f]{the result $r_i$ is a recursive iterator}){$r_i \neq \mathtt{fail}$}{
\nl $\mathtt{Add}(P, (t_i,p_{t_i}))$ \tcp*{return the recursive iterator $t_i$ back to $P$}
\nl $\mathtt{SignalSemaphore}(s)$ \tcp*{increase the semaphore by 1}
\nl $\mathtt{Add}(P, (r_i,p_{t_i}+1))$ \tcp*{add the new recursive operator $r_i$ to $P$}
\nl $\mathtt{SignalSemaphore}(s)$ \tcp*{increase the semaphore by 1}
\nl $j := j + 1$ \tcp*{increase the job counter $j$ by 1}
}
\nl \Else(\tcp*[f]{the result $r_i$ is $\mathtt{fail}$}){
\nl $j := j - 1$ \tcp*{decrease the job counter $j$ by 1}
}
\nl \If(\tcp*[f]{no recursive iterator is in process or pending}){$j=0$}{
\nl $\mathtt{Add}(L, \mathtt{fail})$ \tcp*{add $\mathtt{fail}$ to the FIFO $L$ of leaves}
\nl \For(\tcp*[f]{for each worker}){$i=1,\ldots, n$}{
\nl $\mathtt{SignalSemaphore}(s)$ \tcp*{increase the semaphore by 1}
\tcc{the first worker who realizes that there are no jobs left writes $\mathtt{fail}$ in the FIFO $L$ of leaves and increases the semaphore by $n$ to enable all workers to bypass line~\ref{line:sem}, reach line~\ref{line:return_none} and terminate}
}
}
}
}
\caption{EvaluateRecursiveIterator \label{algo:eri}}
\end{algorithm}
Algorithm~\ref{algo:eri} is the one executed by each worker.
It gets the global state consisting of the number $n$ of workers, the FIFO $L$ of leaves, the priority queue $P$, the semaphore $s$, and the counter $j$ of jobs in process or pending.
A semaphore is a globally shared variable with nonnegative integers as admissible values, which we use to tell workers when to start looking for jobs to process.
The command $\mathtt{SignalSemaphore}(s)$ increases $s$ by $1$.
The command $\mathtt{WaitSemaphore}(s)$ halts until $s>0$ and then decreases $s$ by $1$.
Algorithm~\ref{algo:eri} could be refined for \emph{locally uniform} recursive iterators as follows: Whenever a recursive iterator starts to evaluate leaves then do not add it back to the priority queue (line~\ref{line:Add}) but evaluate it fully (by repeating lines~\ref{line:next} and \ref{line:AddToFIFO}).
In Algorithms~\ref{algo:peri} and \ref{algo:eri} the FIFO $L$ can be equipped with a capacity $k$.
Once this capacity is reached line~\ref{line:AddToFIFO} of Algorithm~\ref{algo:eri} will automatically pause the worker until some other process, e.g. Algorithm~\ref{algo:leaf-iterator}, pops the FIFO $L$.
Algorithm~\ref{algo:leaf-iterator} turns a recursive iterator $t$ within $T$ into a single iterator $\ell(t)$ which enumerates $T(t) \subseteq T$.
\begin{algorithm}[H]
\SetKwIF{If}{ElseIf}{Else}{if}{then}{elif}{else}{}
\DontPrintSemicolon
\SetKwProg{LeafIterator}{LeafIterator}{}{}
\LinesNotNumbered
\KwIn{
\begin{itemize}
\item A recursive iterator $t$
\item a number $n \in \N_{>0}$ of workers
\end{itemize}
}
\KwOut{The associated leaf-iterator $\ell(t)$}
\LeafIterator(){($t$, $n$, $k$)}{
\nl Initialize a FIFO $L := ()$\;
\nl Trigger $\mathbf{ParallellyEvaluateRecursiveIterator}(t,n,L)$\;
\nl Initialize the leaf-iterator $\ell$:\;
\nl \phantom{aa} Define $\mathtt{IsDone}(\ell)$ to check if first entry of $L$ is $\mathtt{fail}$\footnote{Recall, $\mathtt{fail} \notin T$.}\;
\nl \phantom{aa} Define $\mathtt{Next}(\ell)$ to return the first entry of $L$ which is an element of $T(t)$\;
\nl \Return{$\ell$}\;
}
\caption{LeafIterator (Leaf-iterator of a recursive iterator) \label{algo:leaf-iterator}}
\end{algorithm}
\section{Why \HPCGAP?} \label{sec:WhyHPCGAP}
We list some advantages of our implementation in \HPCGAP:
\begin{enumerate}
\item More threads can be added on the fly; they simply start to pull jobs from the priority queue (if nonempty);
\item One can even notify single threads to terminate once they finish evaluating a recursive-iterator;
\item \HPCGAP supports global shared memory and therefore allows us to use a simple and efficient shared memory implementation for priority queues, as described in Section~\ref{sec:iterators};
\item \HPCGAP allows for objects to be moved efficiently from one thread to another by reassigning ownership of those objects to the new thread, rather than inefficiently performing a full structural copy or using serialization.
\end{enumerate}
The most obvious drawback of our implementation is the following:
The state of evaluation of a recursive iterator is defined by the priority queue (residing in a shared region) and by the iterators that are being evaluated in the threads.
So if a thread dies or hangs\footnote{either manually terminated or due to an instability of \HPCGAP, which rarely happens in the current version} while evaluating a recursive-iterator then the latter (which was adopted by the thread from the priority queue) with all its leaves (e.g., matroids) are lost.
In particular, it is impossible to terminate the running \HPCGAP process without losing the state of evaluation.
A second drawback is that it is currently impossible to use a distributed computational model since in our implementation the state of evaluation of a recursive iterator can only be defined and managed by a single \HPCGAP process.
One way to avoid these drawbacks is to store the state of evaluation into a (temporary) database.
In particular \emph{all} yet nonfully evaluated recursive-iterators should be stored in the database, while those in process should be marked as such using a unique fingerprint of the evaluating process.
This allows a distributed access on the one side.
On the other side an iterator with a deadlock can be manually (or maybe even automatically by a watchdog) be freed for evaluation by other threads searching for jobs.
Our implementation performs best for recursive-iterators where the evaluation time of each produced iterator is considerably longer than the organizational overhead in \HPCGAP caused by redefining regions, etc.
\section{Timings} \label{sec:timings}
It is worth noting that $97\%$ of the $404$ tree-iterators of the different multiplicity vectors for $n=13$ atoms can be evaluated in less than a day of CPU time.
For $n=14$ the corresponding number are still $93\%$ of $695$.
\begin{rmrk}
While processing all relevant multiplicity vectors is an ``embarrassingly parallel'' problem, the reader may have noticed that the parallel evaluation of a single tree-iterator corresponding to one such multiplicity vector is much more involved.
\end{rmrk}
The gain of the parallelized evaluation of tree-iterators of rank $3$ matroids with given multiplicity vector depends on the number $n$ of atoms.
The longest CPU time of an evaluation of a tree-iterator with $n=13$ atoms was that of the one with multiplicity vector $(m_3,m_4) = (18,4)$ which took $16.2$ CPU days but finished in $5.59$ days using $8$ workers, a factor of $2.9$.
The gain for $n=14$ was more significant:
The multiplicity vector with the largest number of matroids is $(m_2,m_3,m_4,m_5) = (14,9,5,2)$.
It generated $168352$ matroids ($45$ of them are representable) in about $22.8$ hours of CPU time, but finished in $112$ minutes using 24 workers, a factor of $12.2$.
The multiplicity vector with the longest CPU time for evaluating the tree-iterator is $(m_2,m_3,m_4,m_5) = (3,18,4,1)$.
It generated $34$ matroids (only one of them is representable) and took $495.7$ CPU days but finished in $74.3$ days using $8$ workers, a factor of $6.7$.
\newcommand{\includebibliography}[1]{
}
\input{ParallelizedIterators.bbl}
\end{document}
|
\begin{document}
\title{Entanglement Swapping for Generation of Heralded Time-Frequency-Entangled Photon Pairs}
\author{Dashiell L. P. Vitullo}
\affiliation{Department of Physics and Oregon Center for Optical, Molecular, \& Quantum Science, University of Oregon, Eugene, Oregon 97403, USA}
\author{M. G. Raymer}
\affiliation{Department of Physics and Oregon Center for Optical, Molecular, \& Quantum Science, University of Oregon, Eugene, Oregon 97403, USA}
\author{B. J. Smith}\email{Corresponding author: [email protected]}
\affiliation{Department of Physics and Oregon Center for Optical, Molecular, \& Quantum Science, University of Oregon, Eugene, Oregon 97403, USA}
\author{Micha\l{} Karpi\'nski}
\affiliation{Faculty of Physics, University of Warsaw, Pasteura 5, 02-093 Warszawa, Poland}
\author{L. Mejling}
\affiliation{Department of Photonics Engineering, Technical University of Denmark, 2800 Kgs.~Lyngby, Denmark}
\author{K. Rottwitt}
\affiliation{Department of Photonics Engineering, Technical University of Denmark, 2800 Kgs.~Lyngby, Denmark}
\begin{abstract}
Photonic time-frequency entanglement is a promising resource for quantum information processing technologies. We investigate swapping of continuous-variable entanglement in the time-frequency degree of freedom using three-wave mixing in the low-gain regime with the aim of producing heralded biphoton states with high purity and low multi-pair probability. Heralding is achieved by combining one photon from each of two biphoton sources via sum-frequency generation to create a herald photon. We present a realistic model with pulsed pumps, investigate the effects of resolving the frequency of the herald photon, and find that frequency-resolving measurement of the herald photon is necessary to produce high-purity biphotons. We also find a trade-off between the rate of successful entanglement swapping and both the purity and quantified entanglement resource (negativity) of the heralded biphoton state.
\end{abstract}
\pacs{03.65.Ud, 03.67.Bg, 03.67.Hk, 42.50.Ex, 42.65.Lm}
\maketitle
\section{Introduction}
Entangled photon pairs are an important resource for quantum communication, quantum metrology, and quantum networking \cite{Gisin2007,Torres2011}. Their generation is typically not deterministic, for either fundamental reasons, as in sources based on spontaneous nonlinear optical processes, or for technical reasons, as is the case of single quantum emitters, such as quantum dots or atoms, where losses arise from imperfect coupling of photon pairs into the desired optical modes \cite{Takeuchi2013}. The optical-field state generated by these sources necessarily contains an undesired vacuum component, and additional unwanted multi-pair components are present for spontaneous nonlinear optical sources. Sources that deterministically generate exactly the desired number of entangled photon pairs would enable large-scale quantum information processing and would be an important resource for secure quantum communication.
In the absence of true deterministic entangled pair generation, the heralding approach, where a pair generation event is heralded by an accompanying signal, enables efficient realization of quantum operations \cite{Barz2010}. Heralding removes the vacuum component from the optical-field state at the cost of reduced generation probability and can be implemented in ways that remove higher-order components that contain more than the desired number of photon pairs.
Entanglement swapping has been proposed as a means to convert two nondeterminalistically generated photon pairs into a single heralded entangled photon pair \cite{Yurke1992, Zukowski1993}. In this scheme, two independent nondeterministic sources each create an entangled photon pair. One photon of each pair is in a spatial mode which we will call {\em active}, and the other in the {\em bystander} mode. The active modes from each source are jointly measured, and the measurement result indicates whether or not entanglement has been successfully swapped. A measurement result indicating successful swapping heralds creation of entanglement between the remaining pair of bystander photons, and indicates that the swapping process has erased information about the state of the converted active photons in the entangled degree of freedom. Bystander photons prepared in this manner are entangled despite having never been in the same place at the same time. Entanglement swapping is particularly relevant for spontaneous parametric downconversion (SPDC) and spontaneous four wave mixing (SFWM) sources, which are inherently probabilistic \cite{Torres2011}, and has been analyzed and demonstrated in many degrees of freedom of the optical field \cite{Jennewein2001, Riedmatten2005,Takei2005,Kaltenbaek2009,Takesue2009, Sangouard2011,Jin2015, Zhang2016}.
The spectral-temporal degree of freedom of light has been recently recognized as a promising framework for quantum information science, since it enables multidimensional encoding of quantum information in a way compatible with existing guided-wave and free-space optical infrastructure \cite{Humphreys2013,Nunn2013,Roslund2014,Kowligy2014,Donohue2014,Brecht2015,Lukens2017,Wright2017,Karpinski2017,Kues2017,Villegas2017,FiberEntanglement}.
Moreover, spectral-temporal entanglement naturally arises in SPDC and SFWM as a consequence of energy conservation in parametric optical processes.
\begin{figure*}
\caption{Entanglement swapping setup. The infinity symbols denote entangled photon pairs. Pulsed pump beams, denoted in blue, are directed into PPKTP-waveguide-based SPDC sources. The active fields comprise the signal from source 1 ($s_1$) and idler from source 2 ($i_2$), while the remaining $i_1$ and $s_2$ are the bystander fields. Dichroic filters (DC) separate the signal and idler fields within the sources, and combine the active fields. $\text{PPKTP}
\label{fig:setup}
\end{figure*}
Realization of spectral-temporal entanglement swapping (STES) will provide another important tool for implementing quantum information processing using time-frequency modes (i.e.\ temporal modes). The necessary joint measurement of the active downconversion modes can be implemented using sum-frequency generation (SFG) in a nonlinear optical medium, as proposed by Molotkov and Nazin \cite{Molotkov1999}. These authors analyzed STES using an idealized model in which the pump laser is monochromatic and the SFG phase-matching bandwidth is infinite.
In this paper, we analyze an experimentally realistic implementation of STES by SFG on photons generated by SPDC. Our design includes pulsed pump beams, which are necessary for clocked operation in quantum information processing networks, and realistic phase matching constraints, which crucially affect the joint measurement of the active modes. We discuss the necessity of frequency-resolving herald detection and propose a design that produces high-purity entangled photon-pair states. We verify that the scheme not only creates heralded entangled pairs, but also that the heralded pair states contain greater entanglement than is present in the states produced by the SPDC sources. The scheme also suppresses multi-pair generation events. The major limitation of the method is the low heralding rate, although we point out that the needed joint measurement, performed using SFG of two individually generated single photons (without any additional pumping), has been demonstrated experimentally in \cite{Guerreiro2014}.
\section{Concept}
Our design for spectral-temporal entanglement swapping is presented in Fig.\ \ref{fig:setup}. Pulsed pump beams of central frequency $\omega_p$ are sent through periodically-poled potassium titanyl phosphate (PPKTP) waveguides in crystals of length $L$ and poling period $\Lambda$ to create photon pairs via SPDC in the type-II phase matching configuration. Dichroic filters (DC) within each source separate the frequency-nondegenerate signal and idler fields. The active modes, composed of the signal from source 1 ($s_1$) and the idler from source 2 ($i_2$), are directed into a third PPKTP waveguide of length $L_{\text{SFG}}$ where the sum-frequency generation (SFG) process probabilistically combines them into a photon at the original pump frequency. This erases information about the difference frequency between the converted photons. A subsequent dichroic filter ($\text{DC}_\text{SFG}$) directs successfully converted light to an ideal spectrometer, to measure $\wsfg$. This heralds the generation of a spectrally entangled two-photon state between the bystander fields, $i_1$ and $s_2$, which have never interacted. The other output of $\text{DC}_\text{SFG}$ is monitored by a single-photon-counting module, constituting the ``fail'' detector, $D_{\text{fail}}$. Simultaneous detection in the spectrometer and at $D_{\text{fail}}$ indicates that more than one pair-generation event took place in at least one source, and the bystander modes contain more photons than desired. Similarly, detection of more than one photon in the spectrometer indicates the bystander modes have more than the desired number of photons. Thus, conditioning the use of the output photons on a successful SFG frequency measurement in a single bin and no detection at $D_{\text{fail}}$ prepares a heralded single photon-pair output state with entanglement between the bystander modes and substantially suppressed contributions from higher-order photon number terms.
It is important to consider whether the swapping process can distinguish two photon-pair creation events that occur in the same source from events that occur in separate sources. The creation process in source 1 is statistically independent of the process in source 2. Thus, during a given pump pulse, the probability that two photon-pair generation events will happen in a single source is the same as the probability that a single photon-pair generation event occurs in each of the two sources. If the active field photons are indistinguishable, then two pair-generation events in the same crystal would give rise to false herald detections (so-called because the output photons occupy the same field and are not usefully entangled) with probabilities comparable to those for true herald detections. In our design, the phase matching in the SFG crystal is satisfied only when both a signal and an idler photon (one each from two separate sources) are present. This avoids the false herald pitfall because two pair-generation events in the same crystal generate two photons that do not satisfy the phase mismatch requirement for generation of an SFG photon.
\section{Theory} \label{sec:theory}
A single spontaneous parametric down conversion (SPDC) source generates the state
\begin{equation}
\begin{split}
&\ket{\Psi} = \sqrt{1-|\xi|^2 - \mathcal{O}(|\xi|^4)}\vacuum \\
+ \xi \int_0^\infty &\text{d}\wi \text{d}\ws \bar{\Phi}(\wi, \ws) \ai^\dagger(\wi) \as^\dagger(\ws) \vacuum + \mathcal{O}(\xi^2), \label{eq:SPDCstate}
\end{split}
\end{equation}
where $\xi$ is the probability amplitude for creating a photon-pair, the $s$ and $i$ subscripts denote the signal and idler fields respectively, and $\Phi(\wi, \ws) = \xi \bar{\Phi}(\wi, \ws)$ is the two-frequency joint spectral amplitude (JSA). We consider the low-gain regime where the probability of a single biphoton creation event $|\xi|^2 \ll 1$. The higher-order terms represented with order $\mathcal{O}(\xi^2)$ make non-negligible contributions to the state input to the swapping process, but we configure this process so these terms make negligible contributions to the state prepared upon herald detection. For clarity, we neglect terms of order $\mathcal{O}(\xi^2)$ and above in the following derivation of the state prepared by the entanglement swapping process with the caveat that this is justified only when the active modes into the swapping process are distinguishable.
The Hamiltonian governing the SFG process is
\begin{equation}
\hat{H}_{\text{SFG}}=\int_\mathcal{V}\text{d}\mathbf{r}\tilde{\chi}^{(2)}\hat{\mathbf{E}}_\text{a1}^{(+)}\hat{\mathbf{E}}_\text{a2}^{(+)}\hat{\mathbf{E}}^{(-)}_\text{SFG}+\hc,
\end{equation}
where $\tilde{\chi}^{(2)}$ is the second-order nonlinear susceptibility tensor, subscripts a1 and a2 refer to the active fields, respectively from sources 1 and 2, that are converted to the sum frequency field, $\hc$ is the Hermitian conjugate. The electric field operators are defined as
\begin{equation}
\hat{\mathbf{E}}_j(\mathbf{r},t) = \hat{\mathbf{E}}_j^{(+)}(\mathbf{r},t)+\hat{\mathbf{E}}_j^{(-)}(\mathbf{r},t),
\end{equation}
where $j$ indexes the active signal and idler fields and the SFG field over the subscripts $\{a1, a2, \text{SFG} \}$, and
\begin{align}
\hat{\mathbf{E}}_j^{(+)}(\mathbf{r},t)&=i\int_0^\infty \frac{\text{d}\omega_j}{2 \pi} \hat{\mathbf{e}}_j\mathcal{E}_j (\mathbf{r}) e^{\ii [\mathbf{k}_j(\omega_j)\cdot \mathbf{r}- \omega_j t]} \hat{a}_j(\omega_j), \\
\hat{\mathbf{E}}_j^{(-)}(\mathbf{r},t)&=i\int_0^\infty \frac{\text{d}\omega_j}{2 \pi} \hat{\mathbf{e}}_j^* \mathcal{E}_j^* (\mathbf{r}) e^{-\ii[\mathbf{k}_j(\omega_j)\cdot \mathbf{r}- \omega_j t]} \hat{a}_j^\dagger(\omega_j),
\end{align}
where $\hat{\mathbf{e}}_j$ is the unit polarization vector, $\mathbf{k}_j$ is the wavevector, $\omega_j$ the angular frequency, $\hat{a}_j^\dagger (\omega_j)$ and $\hat{a}_j (\omega_j)$ are respectively the creation and annihilations operators with commutator
\begin{equation}
[ \hat{a} (\omega_j) , \hat{a}^\dagger (\omega_j')] = 2 \pi \, \delta (\omega_j - \omega_j'),
\end{equation}
and $\mathcal{E}_j (\mathbf{r}) = \sqrt{\frac{\hslash \omega}{2 \epsilon_0 n_j (\omega_j) c}} u_j (\mathbf{r})$ is the single-photon electric field amplitude with material refractive index $n_j(\omega_j)$, speed of light in vacuum $c$, and waveguide mode $u_j(\mathbf{r})$. Similar approaches are detailed in \cite{Fiorentino2007,Milonni1995,Blow1990a}.
We select a crossed-polarization scheme and take $\tilde{\chi}^{(2)}$ to be the element from the full nonlinear tensor that couples the zyy crystallographic axes, which allows us to reduce the vector equations to a scalar problem \cite{Bierlein}. For simplicity we collect constant factors into $\chi^{(2)}$ in this theory section (the absence of the overtilde indicating the presence of the constants), but they are shown in detail in Appendix \ref{sec:CountRates}.
With this, the Hamiltonian simplifies to
\begin{equation} \label{eq:Hamiltonian}
\begin{split}
&\hat{H}_{\text{SFG}}=\chi^{(2)}\int_\mathcal{V}\text{d}\mathbf{r}\int_0^\infty \text{d}\omega_{a1} \text{d}\omega_{a2}\text{d}\wsfg \\
&\left\{ \exp\big[ \ii \left( \mathbf{r}\cdot \Delta \mathbf{k} - \Delta \omega t \right) \big] \hat{a}_{a1}\hat{a}_{a2}\hat{a}^\dagger_\text{SFG}+\hc\right\},
\end{split}
\end{equation}
where
\begin{align}
&\Delta \mathbf{k} = \mathbf{k}_{a1}(\omega_{a1})+\mathbf{k}_{a2}(\omega_{a2})-\mathbf{k}_\text{SFG}(\wsfg) + \mathbf{k}_\Lambda, \\
&\Delta \omega = \omega_{a1}+\omega_{a2}-\wsfg,
\end{align}
with $\mathbf{k}_\Lambda$ accounting for the quasi-phase matching contribution. We have assumed that the field amplitudes are slowly varying in frequency and can be taken outside the integrals and then absorbed them into $\chi^{(2)}$. To first order, the state output after the SFG waveguide is described as
\begin{equation}
\ket{\Psi(t)}_\text{out} \approx \ket{\Psi(t_0)}_\text{in}-\frac{\ii}{\hslash} \int_{t_0}^t\hat{H}_\text{SFG}(t')\, \text{d}t'\ket{\Psi(t_0)}_\text{in}.\label{eq:outputState}
\end{equation}
We select our SFG waveguide parameters such that SFG can only take place if a photon from each source is present, and take the input state to be
\begin{equation}
\begin{split}
\ket{\Psi (t_0)}_\text{in}=\int_0^\infty \text{d}\wione\text{d}\witwo\text{d}\wsone\text{d}\wstwo\Phi_1(\wione,\wsone)& \\
\times \Phi_2(\witwo,\wstwo) \aione^\dagger\aitwo^\dagger\asone^\dagger\astwo^\dagger \vacuum &, \label{eq:inputState}
\end{split}
\end{equation}
where the frequency dependence of the creation operators has been suppressed and where $\Phi_j$ is the JSA of source $j \in \{1,2\}$. We assign the active and bystander fields as a1$\leftrightarrow$s1, b1$\leftrightarrow$i1, a2$\leftrightarrow$i2, and b2$\leftrightarrow$s2. Combining Eq.~\eqref{eq:outputState} and \eqref{eq:inputState}, extending the limits of the temporal integral to be from $-\infty$ to $\infty$, writing the phase-matching in a general form as
\begin{equation}
\Pi(\waone,\watwo,\wsfg) = \int_{0}^{L} \text{d}z \exp \left( -i \, \Delta \mathbf{k} \, z \right),
\end{equation}
suppressing the time-dependence in the states, and noting that the Hermitian conjugate term of the Hamiltonian acting on the input state gives zero, we find
\begin{equation}
\begin{split} \label{eq:threeFreqState}
&\ket{\Psi}_\text{out}= \ket{\Psi}_\text{in}-\frac{\ii \chi^{(2)}}{\hslash}
\int_0^\infty \text{d}\wsfg\text{d}\wbone \text{d}\wbtwo\\
&\times \psi(\wbone,\wbtwo,\wsfg) \asfg^\dagger(\wsfg)\abone^\dagger(\wbone)\abtwo^\dagger(\wbtwo)\vacuum,
\end{split}
\end{equation}
where the three-frequency joint spectral amplitude is
\begin{equation} \label{eq:psi}
\begin{split}
\psi(\wbone,\wbtwo,\wsfg)= \int_0^\infty \text{d}\watwo \text{d}\waone \Pi(\waone, \watwo, \wsfg)& \\
\times \delta(\wsfg - \waone - \watwo) \Phi_1(\wbone,\waone)\Phi_2(\watwo,\wbtwo)&.
\end{split}
\end{equation}
Note that entanglement can only be swapped if there is entanglement in the input states to begin with, \ie $\Phi_1$ and $\Phi_2$ are both inseparable in their frequency arguments such that $\Phi(\wi,\ws) \neq F(\wi) G(\ws)$ where $F$ and $G$ are arbitrary functions that depend upon their arguments only. Armed with this three-photon state, our task is now to determine what phase matching function (SFG crystal parameters), and heralding measurement swap the input entanglement to generate the most desirable output entangled biphoton state.
\subsection{Heralding Swapped Entanglement}
It is worthwhile to consider the use of two categories of herald detectors. The first category detect the arrival of a photon without resolving its frequency and we refer to them as ``non-resolving'' detectors. The second category, ``frequency resolving'' detectors, report the frequency of the herald photon to within some resolution limit. A dispersive element can be combined with an array of non-resolving detectors, as shown in Fig.\ \ref{fig:setup}, to make a frequency resolving detector. Frequency non-resolving detectors offer simplicity and lower cost as advantages over frequency resolving detectors, so we start with consideration of entanglement swapping with frequency non-resolving detection.
Just as heralding one photon from an SPDC source with a frequency non-resolving detector can produce either a pure or a mixed single photon state depending on the separability of $\Phi(\wi,\ws)$ \cite{URen2005,Smith2009}, the purity of the output biphoton state after measurement of the SFG photon is set by the separability characteristics of $\psi(\wbone,\wbtwo,\wsfg)$. A pure state biphoton is created only if $\psi$ can be factored such that
\begin{equation} \label{eq:separabilityCrit}
\psi(\wbone,\wbtwo,\wsfg)=P(\wbone,\wbtwo)Q(\wsfg),
\end{equation}
where $P$ is a function that is non-separable in $\wbone$ and $\wbtwo$ and $Q$ is a function of $\wsfg$ only. If frequency non-resolving heralding is performed on a $\psi$ that does not meet this separability criterion, then the output biphoton will be in an undesirable mixed state.
\subsubsection{Simple Model: Infinitely Long Crystals} \label{sec:toy}
The simplest model that achieves heralded entanglement swapping is perfect anticorrelated phase-matching in the SFG crystal. Let $k'(\omega) = \partial k (\omega)/\partial \omega = 1/ \nu_g$ be the inverse of the group velocity $\nu_g$, referred to as the group slowness. Anticorrelated phase-matching means
\begin{align}
k' (\bar{\omega}_{a1}) &= k' (\bar{\omega}_{a2}) \label{eq:anticorrelated} \\
k' (\bar{\omega}_{a1}) &\neq k' (\bar{\omega}_{\text{SFG}}) \\
\Pi = \delta(\waone& + \watwo - \bar{\omega}_{\text{SFG}}), \label{eq:toyPMdelta}
\end{align}
where $\bar{\omega}_i$ is the central frequency of $\omega_i$, around which $k(\omega_i)$ is Taylor expanded, and the Dirac delta function in Eq.\ \eqref{eq:toyPMdelta} results from assuming perfect phase-matching due to an infinitely long SFG crystal. Frequencies that satisfy this phase-matching condition are oriented along the difference-frequency axis, as illustrated in Fig.\ \ref{fig:sourceJSI}, from whence the term anticorrelated. With this phase-matching, $\psi$ becomes manifestly separable in $\wsfg$ with the form
\begin{equation}
\begin{split} \label{eq:toyEta}
\psi(\wbone,\wbtwo,\wsfg)= \delta (\bar{\omega}_{\text{SFG}} -\omega_{\text{SFG}}) \\
\times \int_0^\infty d\waone \Phi_1 (\wbone, \waone) \Phi_2 (\bar{\omega}_{\text{SFG}} - \waone, \wbtwo).
\end{split}
\end{equation}
The integral enforces entanglement of the bystander modes, as can be seen by taking the source lengths $L \to \infty$ and assuming the source and SFG crystals are identical, (which implies $\bar{\omega}_{\text{SFG}} = \bar{\omega}_p$), yielding
\begin{equation} \label{eq:explicitDeltaPsi}
\psi (\wbone,\wbtwo,\wsfg) = \delta (\bar{\omega}_{p} -\omega_{\text{SFG}}) \delta(\wbone + \wbtwo - \bar{\omega}_{p}).
\end{equation}
Eq.\ \eqref{eq:explicitDeltaPsi} satisfies the separability criterion (Eq.\ \eqref{eq:separabilityCrit}), so using the SFG photon as a herald leaves the remaining signal/idler biphoton in an ideal state that is both pure and maximally entangled.
In contrast, perfect correlated phase matching (satisfied by frequencies oriented along the sum-frequency axis of Fig.\ \ref{fig:sourceJSI}) with
\begin{align}
\left[ k'(\bar{\omega}_{\text{a}1}) - k'(\bar{\omega}_{\text{SFG}}) \right] &= -[k'(\bar{\omega}_{\text{a}2}) - k'(\bar{\omega}_{\text{SFG}})] \\
\Pi &= \delta \left( \waone - \bar{\omega}_{\text{a1}} - \watwo + \bar{\omega}_{\text{a2}} \right)
\end{align}
in an infinitely long SFG crystal gives,
\begin{equation}
\begin{split}
\psi(\wbone, \wbtwo,\wsfg) = &\Phi_1 \left[ \wbone , \left(\wsfg - \Delta \bar{\omega}_a\right)/2 \right] \\
\times& \Phi_2 \left[ \left(\wsfg + \Delta \bar{\omega}_a \right)/2,\wbtwo \right],
\end{split}
\end{equation}
where $\Delta \bar{\omega}_a = \bar{\omega}_{\text{a1}} - \bar{\omega}_{\text{a2}}$. Taking $L \to \infty$,
\begin{equation}
\begin{split}
\psi(\wbone, \wbtwo,\wsfg) = & \delta \left( \frac{\wsfg - \Delta \bar{\omega}_a}{2}+\wbone- \bar{\omega}_p \right) \\
\times& \delta \left( \frac{\wsfg + \Delta \bar{\omega}_a}{2}+\wbtwo - \bar{\omega}_p \right),
\end{split}
\end{equation}
and it is clear that $\wsfg$ is manifestly inseparable from both $\wbone$ and $\wbtwo$. Thus, with monochromatic pumps, long crystals, and heralding with the SFG photons directed to frequency non-resolving detectors, correlated phase-matching in the SFG crystal produces undesirable output states, while anticorrelated phase-matching heralds pure-state entangled biphotons.
This can be understood through the availability or erasure of frequency information. Correlated phase matching allows determination of $\waone$ and $\watwo$ through measurement of $\wsfg$, which simultaneously collapses the values of $\wbone$ and $\wbtwo$. Anticorrelated SFG erases information about the difference between the input frequencies, so measurement of $\wsfg$ does not allow determination of the input frequencies and the quantum superposition of the bystander modes is preserved.
No real experimental system will have perfectly delta-correlated phase matching, so it is necessary to consider mathematical tools for assessment of the effects of finite length and pulsed pump beams on the purity of the heralded biphoton state.
\subsubsection{Analytic Model: The Gaussian Phase-Matching Approximation}
To facilitate analytical investigation, we approximate both the pump pulses and the phase-matching functions as Gaussians, such that
\begin{align}
&\Pi(\waone, \watwo, \wsfg) \approx L \exp \left[- \frac{(L \, \Delta \widetilde{k} )^2 }{2 \sigma_\pi^2}\right] \label{eq:GaussPM} \\
&\Phi(\ws,\wi) \approx A \, L \exp \left[ -\frac{(\ws + \wi - \wpbar)^2}{2 \sigma_p^2} -\frac{(L \, \Delta \widetilde{k})^2}{2 \sigma_\pi^2} \right] \label{eq:GaussPhi}
\end{align}
where $A$ is the pump peak power, $\sigma_\pi$ is the Gaussian width (explained below), and we assume that the wavevectors are co-oriented along the waveguide ($z$) axis, allowing the use of a scalar $\Delta k$ with implicit frequency dependence. We neglect the phase factor in $\Pi(\waone, \watwo, \wsfg)$ as it is irrelevant to the analysis in this Section. Recall that $k(\omega) = n(\omega) \omega/c$. It is convenient for separability analysis to define $\Delta \widetilde{k} = c\, \Delta k$ and absorb the factor of $c$ into the definition of $\sigma_\pi = \kappa c/L$, where $\kappa=12.8831$ is the fit parameter that best matches a Gaussian width to the exact sinc functional form. The Gaussian approximation avoids the added complexity of evaluating the integrals of products of sinc functions, which are found in exact phase-matching models, (and are resolved numerically in section \ref{sec:simulation}), by allowing analytic integration of
\begin{widetext}
\begin{align} \label{eq:psiGaussian}
&\psi(\wbone,\wbtwo,\wsfg) = \int_0^\infty \diff \waone \, \Pi (\waone, \wsfg - \waone, \wsfg) \Phi_1 (\wbone, \waone) \Phi_2 (\wsfg - \waone, \wbtwo) \\
&= L_{\text{SFG}} L^2 A^2 \int_0^\infty \diff \waone \, \exp \Big[ -\frac{\left(\wbone +\waone - \wpbar \right)^2+ \left(\wsfg - \wbone +\wbtwo - \wpbar \right)^2}{2 \sigma_p^2} -\frac{\left( \Delta \widetilde{k}_1\right)^2 + \left( \Delta \widetilde{k}_2\right)^2}{2 \sigma_\pi^2} - \frac{\left( \Delta \widetilde{k}_{\text{SFG}} \right)^2}{2 \sigma_{\text{SFG}}^2} \Big], \nonumber
\end{align}
\end{widetext}
where $\sigma_{\text{SFG}} = \kappa c/L_{\text{SFG}}$. Taking the refractive index variation over the wavelength range of each field to be small, we set $n_j = n(\omega_j) \approx n(\bar{\omega}_j)$, for $j \in \{p, s, i\}$. Eq. \eqref{eq:psiGaussian} satisfies the separability criterion (Eq.\ \eqref{eq:separabilityCrit}) when the prefactors for the cross-terms $\wsfg \wbone$ and $\wsfg \wbtwo$ are both zero. However, this occurs only when
\begin{equation}
-(n_p - n_s) (n_p - n_i) = \frac{\sigma_\pi^2}{\sigma_p^2},
\end{equation}
which \emph{is the condition for separable input states}, i.e.\ $\Phi(\ws,\wi) = F(\ws) G(\wi)$, which have no entanglement to be swapped. Thus, the frequency non-resolving measurement will produce mixed states for any realistic source that produces entangled output biphotons.
Why is separability in accordance with Eq.\ \eqref{eq:separabilityCrit} achievable in the infinite crystal limit, but not with finite crystals? Because using an infinitely long SFG crystal produces a monochromatic output field, which resolves $\wsfg$. This implies that we must use a frequency-resolving heralding scheme to achieve high-purity output biphoton states.
\subsubsection{Figures of Merit: Purity and Negativity}
In this Section, we develop a discretized description of the quantum state and review the mathematical machinery necessary for numerical simulation of a realistic model system. Consider heralding through frequency-resolving measurement of the SFG photon with outcomes forming a discrete set of disjoint frequency bins. We assume perfect detection with unit quantum efficiency, no dark counts, and lossless optical elements. Thus, herald detections occur uniquely after successful SFG (the second term in Eq.\ \eqref{eq:threeFreqState}), and always indicate the presence of a biphoton in the bystander fields.
Using a discretized frequency-bin description, the density matrix entries with $\wbone$ indexed by $\{j,j'\}$, $\wbtwo$ indexed by $\{k,k'\}$, and $\wsfg$ indexed by $\{l,l'\}$ are
\begin{equation} \label{eq:reducedDensityMatrix}
\begin{split}
\rho(j,k,l,j',k',l')= & \\
\psi(\omega_j, \omega_k, \omega_l) \psi^*(&\omega_{j'}, \omega_{k'}, \omega_{l'}) \Delta \wbone \, \Delta \wbtwo \, \Delta \wsfg,
\end{split}
\end{equation}
where $\Delta \omega_\alpha$ refers to the spacing between the frequency grid points for field $\alpha \in \{\text{b1, b2, SFG}\}$, so $\rho$ values here are probabilities, not probability densities. The spacings must, of course, be set smaller than the scale of the smallest structures in $\psi$ in order to resolve those features, and it is useful to keep in mind the experimentally accessible spectroscopic resolution limit of the SFG field of about 25 GHz (0.16 rad/ps) \cite{Davis2016, Kuo2016}.
If the SFG photon is measured with a frequency non-resolving detector, then $\wsfg$ is traced out, yielding the reduced density matrix \cite{Kolenderski2009,Kolenderski2009b}
\begin{equation} \label{eq:reducedDensityMatrix2}
\begin{split}
\rho_r(j,k,j',k')= \sum_l \psi(\omega_j, \omega_k, \omega_l) \psi^*(\omega_{j'}, \omega_{k'}, \omega_l)\\
\times \Delta \wbone \, \Delta \wbtwo \, \Delta \wsfg.
\end{split}
\end{equation}
Loss of information about the value of $\wsfg$ degrades the purity of the output state
\begin{equation} \label{eq:purity}
\mathcal{P} = \text{Tr} (\rho^2 ),
\end{equation}
where Tr is the trace operation.
To describe heralding with a frequency-resolving detector, let the spectroscopic measurement of $\wsfg$ be described by a set of projective measurement operators $\{\hat{\Omega}_m \}$ with
\begin{align}
\hat{\Omega}_m &= \int_{\bar{\omega}_m-\Delta'/2}^{\bar{\omega}_m+\Delta'/2} \ket{\omega} \bra{\omega} \diff \omega, \label{eq:Omega} \\
\sum_{m=1}^N \hat{\Omega}_m &= \mathbb{I}, \label{eq:completeness}\\
\hat{\Omega}_m \hat{\Omega}_{m'} &= \delta_{mm'} \hat{\Omega}_m, \label{eq:orthonormality}
\end{align}
where $\ket{\omega} = \hat{a}^{\dagger} (\omega) \ket{\text{vac}}_{\text{SFG}}$ is a single photon in the SFG spatial mode with frequency $\omega$, $N$ is the number of frequency bins, $\bar{\omega}_m$ is the central frequency of the $m$th bin, $\Delta'$ is the frequency-space width of a measurement bin, $\mathbb{I}$ is the identity matrix, and $\delta_{mm'}$ is the Kronecker delta. In order for Eq. \eqref{eq:completeness} to hold, the range of frequencies measured $N \Delta'$ must exceed the range of frequencies produced via SFG . Otherwise, successful entanglement swapping events will go undetected. Conditioning on a herald detection alleviates the need for a vacuum outcome in Eq.\ \eqref{eq:completeness}, and Eq. \eqref{eq:completeness} and \eqref{eq:orthonormality} together imply that $\hat{\Omega}_m^\dagger \hat{\Omega}_m = \hat{\Omega}_m$. Detection of the frequency of the SFG photon projects the output state into
\begin{equation} \label{eq:RhoMeasured}
\hat{\rho}_m' (\wbone,\wbtwo,\wbone',\wbtwo') = \frac{\hat{\Omega}_m \hat{\rho} \, \hat{\Omega}_m}{\text{Tr} \left( \hat{\Omega}_m \hat{\rho} \right)},
\end{equation}
with $\wsfg = \bar{\omega}_m$.
To account for the limited resolution of a realistic measurement, let $\Delta$ be the separation between the frequency grid values used for computation and enforce $\Delta < \Delta'$. If we posit that $\Delta'/\Delta = Q$, which we take to be a positive integer, then we can divide Eq.\ \eqref{eq:completeness} by grouping together the $Q$ operators that comprise the $n$th measurement outcome, starting at $n=1$, to make the operator for the resolution-limited measurement
\begin{equation}\label{eq:binDef}
\hat{\widetilde{\Omega}}_{n} = \sum_{m=(n-1)Q+1}^{nQ} \hat{\Omega}_{m}.
\end{equation}
Thus Eq.\ \eqref{eq:RhoMeasured} generalizes to a linear combination of measurement operators at the discretization size with the straightforward substitutions $\hat{\Omega}_m \to \hat{\widetilde{\Omega}}_{n}$ and $\hat{\rho}_m' \to \hat{\widetilde{\rho}}_n'$. The populations $\hat{\widetilde{\rho}}_n' (\wbone,\wbtwo,\wbone,\wbtwo)$ compose the JSI of the biphoton prepared upon detection of the $n$th herald outcome. In principle, the upper bound on $n$ can be an arbitrarily large integer, but in practice this upper bound is resource constrained. The tradeoff between the number of grid points considered and the computational resources required to calculate the density matrix are discussed in Appendix \ref{sec:Simulation}. $Q$ must be chosen to give a good estimate for the purity, but computational resources requirements scale sharply with $Q$.
The entanglement in the state of the heralded bystander photons can be quantified using the negativity of the density matrix \cite{Vidal2002}
\begin{equation}
\mathcal{N}(\hat{\rho})\equiv (\norm{\hat{\rho}^{\Gamma_\textrm{i1}}}-1)/2, \label{eq:negDef}
\end{equation}
where ${\Gamma_\textrm{i1}}$ denotes the partial transpose operation with respect to subsystem i1 and $\norm{\hat{\rho}}\equiv\trace\left[(\hat{\rho}^\dagger\hat{\rho})^{1/2}\right]$. The presence of negative eigenvalues $\mu_i$ of the partially transposed density matrix $\hat{\rho}^{\Gamma_\textrm{i1}}$ implies entanglement, and the negativity can also be expressed as the sum of these negative eigenvalues
\begin{equation}
\mathcal{N}(\hat{\rho}) = \sum_i \left| \mu_i \right| .
\end{equation}
The system is entangled in subsystem i1 if the negativity is positive. The negativity gives an upper bound on the amount of entanglement distillable from the state for teleportation \cite{Bouwmeester1997}. An investigation of the behavior of the negativity using simple density matrices is provided in Appendix \ref{app:negativity}.
\section{Numerical Simulation} \label{sec:simulation}
What SFG crystal parameters optimize the entanglement swapping process and produce output biphotons with the highest negativity and purity? In this Section, we answer this question using numerical studies that include exact phase-matching functions. Let the SFG waveguide poling period $\Lambda_{\text{SFG}} = \Lambda$, so the central frequencies of the interacting fields $\{\wpbar, \bar{\omega}_s, \bar{\omega}_i \}$ are the same in all crystals, but allow the SFG waveguide length $L_{\text{SFG}}$ to be free to vary. We set the average pump power $P_{\text{avg}}$ such that the probability of a single photon-pair from a source is $|\xi|^2 = 0.1$, calculate $\psi$, use $\psi$ to calculate the herald count rate
\begin{equation}
R_H = (2 \pi)^3 \Delta \wbone \, \Delta \wbtwo \, \Delta \wsfg \sum_{j,k,l} |\psi(j,k,l)|^2 R_R,
\end{equation}
where $R_R$ is the pump laser repetition rate, and calculate both negativities and purities via the appropriate density matrices.
We model sources 1 and 2 in Fig.\ \ref{fig:setup} as periodically-poled potassium titanyl phosphate (KTP) crystal waveguide with length $L$ and a poling period $\Lambda=8.33\, \mu$m. The type-II phase matching function of the source is
\begin{align}
&\Pi_{\text{source}} = L\, \text{sinc} \left(L \,\Delta k/2 \right) \text{Exp}\left( - i \, L \,\Delta k/2 \right), \label{eq:PMexact} \\
&\Delta k = \frac{n_y (\omega_p) \omega_p}{c} - \frac{n_z (\omega_i) \omega_i}{c} - \frac{n_y (\ws) \ws}{c} - \frac{2 \pi}{\Lambda}, \label{eq:dkSource}
\end{align}
where $n_j (\omega)$ is the frequency-dependent refractive index along the crystallographic axis $j \in \{y, z\}$ \cite{Kato2002}, and the last term in Eq.\ \eqref{eq:dkSource} includes the first-order quasi-phase-matching effects of periodic poling \cite{Fejer1992}. We assume that that the only spatial mode excited in all frequency bands of all PPKTP waveguides is the fundamental.
We set the length of the source crystals $L=0.5$ mm so the simulation can be carried out in a reasonable time and then match the pump bandwidth $\sigma_p$ to the approximate phase-matching bandwidth, $\sigma_\pi = \sigma_p =7.7245$ rad/ps. When pumped with a laser that has central wavelength $\bar{\lambda}_p=405.0$ nm ($\bar{\omega}_p = 4.651$ rad/fs), these sources create signal and idler fields at $\bar{\lambda}_s = 609.6$ nm ($\bar{\omega}_s = 3.090$ rad/fs) and $\bar{\lambda}_i = 1207$ nm ($\bar{\omega}_i = 1.561$ rad/fs) respectively. We chose our simulation parameters based on real pump laser systems \cite{Taccor} with the highest repetition rates $R_R$ where appropriate pump power is achievable. Appendix \ref{sec:Simulation} contains further simulation details and the parameters used herein are listed in Table \ref{tab:realisticParameters}.
Assuming a Gaussian pump spectral profile
\begin{equation} \label{eq:PMbandwidth}
A(\omega_p) = \sqrt{\frac{P_{\text{avg}}}{\hslash \omega_p R_R \sigma_p \sqrt{\pi}}} \exp \left[ -\frac{(\omega_p - \bar{\omega}_p)^2}{2 \sigma_p^2} \right],
\end{equation}
with average power $P_{\text{avg}} = 1.380$ W and repetition rate $R_R = 1$ GHz, the joint spectral intensity (JSI, $|\Phi(\wi,\ws)|^2$) of a single source is shown in Fig.\ \ref{fig:sourceJSI}. The average power is set such that the probability of a photon pair being generated in a single source $|\xi|^2=0.1$, which sets the probability of the next-highest-order contribution to $|\xi|^4=0.01$.
\begin{figure}
\caption{$|\Phi(\wi,\ws)|^2$, the joint spectral intensity (JSI) from a single source with the phase matching function and parameters described in Sec.\ \ref{sec:simulation}
\label{fig:sourceJSI}
\end{figure}
Guided by insight from the simpler model of Section \ref{sec:toy}, we choose a pump wavelength and poling period to give an anticorrelated input JSA with a narrow width $\Delta_+$ along the sum-frequency axis (set by the pump bandwidth $\sigma_p$ and the phase-matching bandwidth $\sigma_\pi(L)$), and a broad width $\Delta_-$ along the difference-frequency axis (set by dispersion and $\sigma_\pi(L)$). An extreme aspect ratio (e.g.\ $\Delta_-/\Delta_+ \gg 1$), which indicates a large number of time-frequency modes \cite{Nunn2013}, in addition to a small probability amplitude for the vacuum term in Eq.\ \eqref{eq:SPDCstate} are good heuristics for large negativity.
The SFG crystal has the same phase-matching function as given in Eq.\ \eqref{eq:PMexact}-\eqref{eq:dkSource} but with $L \to L_{\text{SFG}}$ and $\omega_p \to \wsfg$. Taking $L_{\text{SFG}} = L= 0.5$ mm, the three-frequency JSI $|\psi(\wbone,\wbtwo,\wsfg)|^2$ is visualized in Fig.\ \ref{fig:eta}. The expected rate at which heralded biphotons are produced with these lengths (whether or not $\wsfg$ is resolved) is $5.2\times10^{-3}$ biphotons/second, corresponding to one heralding event every 3.2 minutes.
\begin{figure*}
\caption{3D Contour surface showing $|\psi(\wbone,\watwo,\wsfg)|^2$ with contour plots of the projections on the back planes. The 3D contour surface connects values of one-tenth of the maximum value of $|\psi|^2$. The tilt angle resulting from the correlation between $\wsfg$ and the sum frequency $\wbone+\wbtwo$, is more apparent in the output JSIs of Fig.\ \ref{fig:outputJSIs}
\label{fig:eta}
\end{figure*}
Consider a spectroscopic measurement that resolves $\wsfg$ into 8 possible outcomes, indexed by $n$, each with frequency size $\Delta' = 3.862\times 10^{-3}$ rad/fs, i.e.\ 614.7 GHz. Let the probability-valued spectrum be denoted $p(\omega_{\text{SFG}}^n)$. To account for finite resolution, we use the incoherent sum prescribed by Eq.\ \eqref{eq:binDef} with $Q=3$ grid points. This choice underestimates the purity by a few percent due to discretization error, which means the stated purity values should be understood as lower bounds, but allows a full computational run to be carried out in a reasonable amount of time (see Appendix \ref{sec:Simulation}).
We now allow $L_{\text{SFG}}$ to vary while holding $L$ fixed at 0.5 mm. Fig.\ \ref{fig:spectrumAndRates} shows the $\wsfg$ spectrum and swapping success rate for many values of $L_{\text{SFG}}$. It is clear that $L_{\text{SFG}}$ sets the width and height of the $\wsfg$ spectrum, and thus the count rate. Fig.\ \ref{fig:ResolvedValues} displays the behavior of the negativity $\mathcal{N}$ and purity $\mathcal{P}$ w.r.t.\ the $\wsfg$ measurement outcome for several $L_{\text{SFG}}$ values. The broader frequency distributions output from short crystals (due to larger conversion bandwidths) correspond to more gradual changes in the state along the $\wsfg$ axis, which leads to higher purity. Broader distributions correspond to entanglement over more frequency modes and increased negativity. These improvements in negativity and purity trade-off with entanglement swapping success rates, which are higher with longer SFG crystals.
Fig.\ \ref{fig:negativityResolved} shows a correlation between larger $\wsfg$ measurement values and larger negativity values because the distribution of the prepared biphoton is correspondingly broader along the difference-frequency axis. This can be clearly seen by comparing the JSIs for each $\wsfg$ measurement outcome, shown in Fig.\ \ref{fig:outputJSIs}. The slight shifts in the sum frequency of each JSI shifts with the $\wsfg$ measurement outcome so the total output photon energy is equal to the input photon energy.
It is illuminating to compare these entanglement-swapping-prepared biphoton states, which have negativities ranging from $9$ to $19$, to that of the biphoton state from a single SPDC source. Using our design parameters, negativity of the biphoton state from a single source is $2.89$. If the vacuum contribution were to be eliminated and the source produced only biphotons, then the negativity would increase to $28.9$ (a factor of $1/|\xi|^2$ increase in agreement with the linear relationship given in Eq.~\eqref{eq:toyNegativityScaling}). The vacuum contribution is removed by the swapping process, but the conversion process produces fewer entangled frequency modes within the resulting biphoton than are present in the source, so no outcome exceeds the ideal negativity of the source with the vacuum contribution removed (see Fig.\ \ref{fig:negativityResolved}). The net result is that probability of having a photon pair in a known time window is increased from 0.1 in the case of an a single SPDC source to near unity after detection of a herald signals successful swapping, which also substantially increases the negativity.
\begin{figure*}
\caption{Scaling of the (a) $\wsfg$ spectrum $p(\omega_{\text{SFG}
\label{fig:probability}
\label{fig:CountRatesVSLsfg}
\label{fig:spectrumAndRates}
\end{figure*}
\begin{figure*}
\caption{Figures of merit with frequency-resolving detection. The (a) negativity $\mathcal{N}
\label{fig:negativityResolved}
\label{fig:purityResolved}
\label{fig:ResolvedValues}
\end{figure*}
To compare the purity of biphoton states prepared through entanglement swapping to those prepared by a single SPDC source, it is important to note that the state of a biphoton generated through SPDC depends on the phase $\phi$ of the pump field used to create it. If the phase of the pump field is not resolved through measurement (and it is common practice to not resolve this phase), then the coherence elements between the biphoton and vacuum subspaces are lost \cite{Chou2005}. Appendix \ref{app:negativity} describes this in more detail. The purity of the biphoton state output from a single SPDC source in our design is 0.82. As shown in Fig.\ \ref{fig:purityResolved}, $L_{\text{SFG}}$ can be chosen such that all $\wsfg$ measurement values exceed 0.82, but as $L_{\text{SFG}}$ increases, outcomes with lower purity can occur.
\begin{figure*}
\caption{Comparison of the (a) negativity and (b) purity of output states prepared via either frequency resolving or frequency non-resolving heralding over five $L_{\text{SFG}
\label{fig:NegativityVSLsfg}
\label{fig:PurityVSLsfg}
\label{fig:valueScalingVLSsfg}
\end{figure*}
The purity and negativity will vary from shot to shot in accordance with the $\wsfg$ measurement outcome. A weighted average over the measurement outcomes
\begin{equation} \label{eq:avgNegativity}
\bar{\mathcal{A}} = \left( \sum_{n=1}^N p(\omega_{\text{SFG}}^n) \mathcal{A}_m \right) \Bigg/ \sum_{n=1}^N p(\omega_{\text{SFG}}^n),
\end{equation}
where $\mathcal{A}$ stands in for either the negativity $\mathcal{N}$ or the purity $\mathcal{P}$, gives the average values, i.e.\ expected performance, over many successful swapping events. If $\wsfg$ is not resolved, then the mixed output state gives negativity and purity values that are the same for each shot. Thus, the expected performance of entanglement swapping with frequency-resolving heralding can be compared to frequency non-resolving heralding by comparing the weighted averages for the frequency-resolving configurations to the unresolved values, as shown in Fig.\ \ref{fig:valueScalingVLSsfg} for many $L_{\text{SFG}}$ values. Frequency-resolving heralding clearly yields superior performance for both purity and negativity.
The negativity of all configurations in Fig.\ \ref{fig:NegativityVSLsfg} exceed the negativity of the biphoton state output from a single source (2.89), so entanglement swapping purifies (in the entanglement sense) the output state. In contrast, Fig.\ \ref{fig:PurityVSLsfg} shows that frequency non-resolving heralding offers inferior purity compared to a single source for all $L_{\text{SFG}}$ choices. Even though some $\wsfg$ measurement outcomes give biphoton sates with purity below that produced by a single source (see Fig.\ \ref{fig:purityResolved}), the average purity is improved for all frequency-resolving heralding configurations here considered. These averages could be further improved by rejecting heralding events that produce biphoton states with undesirable properties, at the cost of production rate.
\begin{figure*}
\caption{Output JSIs with $L_{\text{SFG}
\label{fig:outputJSIs}
\end{figure*}
\section{Discussion}
While entanglement swapping with frequency-resolving heralding produces entangled biphotons with superior negativity and purity compared to SPDC sources, the count rates are inferior by roughly 7 orders of magnitude. This is compensated in a sense by creating, through heralding, near-unity probability of having an entangled pair in a known time window. While use of high repetition rate pump lasers can improve the count rate, the low probability of success during any given pump pulse remains a substantial challenge. Count rates can be modestly increased through increasing the source crystal lengths $L$ and maximimizing $L_{\text{SFG}}$ as allowed by negativity and purity tolerances, but keeping the sources in the low-gain regime with $|\xi|^2 \approx 0.1$ limits these improvements. Additionally the computational resources required to perform the simulation scale sharply with the source lengths $L$.
Increasing the pump repetition rate $R_R$ to limits set by technical constraints such as achievable pump power and detector recovery times will increase the rate of successful swapping (Eq.\ \eqref{eq:swapRate}). However, this seems like a less promising approach compared to increasing the single shot success probability while avoiding the false heralding problem due to indistinguishable inputs discussed in the introduction of Section \ref{sec:simulation}. Thus, investigation of whether four-wave mixing can offer better count rates through higher effective nonlinearity is a natural extension for future research. Investigation of entanglement swapping outside the low-gain regime, where higher-order Hamiltonian terms that we neglect in Eq.\ \eqref{eq:outputState} contribute, may offer higher count rates and the heralded preparation of higher number states.
In summary, we propose a design for a source of heralded time-frequency-entangled photon pairs with high-dimensional frequency-bin encoding and give a detailed description of the mathematical machinery necessary for its characterization. Heralding with frequency-resolving detection produces high-purity ($\mathcal{P} \approx 0.97$) output biphoton states and improves the negativity compared to an SPDC source by roughly a factor of 5 (depending on the exact configuration chosen). The length of the SFG crystal and the size and number of detection bins sets the negativity, purity, and rate of successful entanglement swapping. Shorter SFG crystals offer superior negativity and purity due to broader conversion bandwidth, but inferior count rates, so a balance must be struck for a particular application.
The quantum erasure of which-path information via SFG that is essential for entanglement swapping has recently been identified as a key resource for quantum illumination and entanglement-enhanced metrology \cite{Lloyd08,zhuang17a, zhuang17b}, and the detailed calculations and design considerations we present are pertinent to implementation of those schema. One promising extension of this work is to produce output biphotons with the same central frequencies for all $\wsfg$ outcomes by deterministically frequency shifting the photon frequencies output from entanglement swapping depending on which SFG bin is detected \cite{Wright2017}. Another is measurement of photons produced with this apparatus in a pulse-mode (``temporal mode'') basis \cite{Reddy2015} would increase experimental complexity, but may offer even better negativity and purity values due to what appears to be a more natural basis choice. With improved count rates, this scheme would offer an ideal source for distributing entanglement resource over a network.
\section{Count Rates} \label{sec:CountRates}
In this Appendix, we include all constants in our calculations and predict photon pair creation rates for each source (cf.\ \cite{Fiorentino2007}), and for the entire entanglement swapping process. The sum-frequency generation (SFG) interaction Hamiltonian and output state for the single-biphoton subspace from an SPDC source in the low-gain regime are
\begin{align}
&\hat{H}_{\text{SFG}} = \frac{\epsilon_0}{2} \int \diff^3 \mathbf{r} \, d \,\hat{E}_{\text{SFG}}^{(-)} \hat{E}_{\text{s1}}^{(+)} \hat{E}_{\text{i2}}^{(+)} + \text{H.c.} \\
&\ket{\psi}_{k} = \int_0^\infty \diff \omega_{\text{s}k} \diff \omega_{\text{i}k} \Phi_k (\omega_{\text{i}k}, \omega_{\text{s}k}) \hat{a}_{\text{s}k}^\dagger (\omega_{\text{s}k}) \hat{a}_{\text{i}k}^\dagger (\omega_{\text{i}k}) \ket{\text{vac}}, \label{eq:singleSource} \\
&\Phi_k ( \omega_{\text{i}k}, \omega_{\text{s}k}) = b d \frac{(2 \pi)^2}{\sqrt{A_I}} \ell (\omega_{\text{s}k} + \omega_{\text{i}k}) \ell (\omega_{\text{s}k}) \ell (\omega_{\text{i}k}) \nonumber \\
& \quad \quad \times \Pi (\omega_{\text{s}k} + \omega_{\text{i}k}, \omega_{\text{s}k}, \omega_{\text{i}k}) \alpha (\omega_{\text{s}k} + \omega_{\text{i}k})
\end{align}
where $\epsilon_0$ is the permittivity of free space, $\hat{E}_j^{(\pm)}$ are the positive and negative frequency components of the quantized electric field operators, $\text{H.c.}$ is the Hermitian conjugate, $k \in \{1,2\}$ indexes the source, $\ws$ is the angular frequency of the signal photon, $\omega_i$ is the angular frequency of the idler photon, $\omega_p$ is the angular frequency of the pump and energy conservation $\omega_p = \ws + \wi$ is strictly enforced. Here $d$ is the effective nonlinear coefficient set by the material, $\ell (\omega)$ is the electric field per photon, $A_I$ is the effective area of the interaction (defined below) which depends on the transverse spatial distributions $u_j (x,y)$ of the interacting fields $j \in \{p, s, i \}$, $\Pi (\omega_p, \ws, \wi)$ is the phase matching function of the medium, and $\alpha (\omega_p)$ is the spectral pump pulse profile (assumed to have a Gaussian distribution).
Refining those definitions:
\begin{align}
b &=\frac{\epsilon_0}{2 \hslash (2 \pi)^3} \label{eq:b} \\
\ell (\omega) &= \sqrt{\frac{\hslash \omega}{2 \epsilon_0 n (\omega) c}}\\
\Pi(\omega_p, \ws, \wi) &= L \, \sinc \left[ L \Delta k (\omega_p, \ws, \wi) / 2 \right] \nonumber \\
& \times \text{Exp}[-i\, L \Delta k (\omega_p, \ws, \wi) / 2] \\
A_I = 1\Big / \Big( &\int_{-\infty}^\infty \diff x \diff y \,u_p (x,y) u_i^* (x,y) u_s^* (x,y) \Big)^{2} \\
\Delta k (\omega_p, \ws, \wi) &= k(\ws) + k(\wi) - k(\omega_p)+ \frac{q 2 \pi}{\Lambda} \\
\alpha (\omega_p) &= \sqrt{\frac{P_{\text{ave}}}{\hslash \omega_p \sigma_p \sqrt{\pi} R_R}} \exp \left[- \frac{(\omega_p - \bar{\omega}_p)^2}{2 \sigma_p^2} \right]
\end{align}
where $n(\omega)$ is the refractive index experienced by the photon in the source along the relevant polarization axis, $L$ is the longitudinal length of the nonlinear material that constitutes the source, $\sinc(x) = \sin(x)/x$ with $\sinc(0)=1$, $\Delta k$ is the momentum mismatch along the waveguide ($z$) axis, $q$ is the order of the quasi-phase matching and we use $q=1$. Additionally, $\Lambda$ is the crystal poling period, $\hslash$ is Planck's constant divided by $2\pi$, average pump power $P_{\text{ave}}$, repetition rate $R_R$, $\bar{\omega}_p$ is the central frequency of the pump, and $\sigma_p$ is the spectral bandwidth of the pump pulse.
Our design produces output photons with non-degenerate frequencies, and we use signal (idler) to refer to the higher (lower) frequency photon in accordance with historical convention. Given a pump pulse, the probability of photon-pair creation from a single source (with the source index $k$ suppressed) is given by
\begin{widetext}
\begin{align}\label{eq:sourceProbExact}
|\xi|^2 = \braket{\psi | \psi} = \frac{(2 \pi)^4 b^2 d^2}{A_I} &\int_0^\infty \diff \ws \diff \wi \,\ell^2 (\ws + \wi) \ell^2 (\ws ) \ell^2 (\wi) |\alpha (\ws + \wi)|^2 |\Pi (\ws + \wi,\ws, \wi)|^2 \\
= \frac{ d^2 L^2 P_{\text{ave}}}{2^7 (2 \pi)^2 c^3 \sqrt{\pi} \epsilon_0 R_R \sigma_p A_I} &\int_0^\infty \diff \ws \diff \wi \frac{1}{n_y(\ws+\wi) n_y(\ws) n_z(\wi)}\exp \left[ - \frac{(\ws + \wi - \bar{\omega}_p)^2}{\sigma_p^2} \right] \times \nonumber \\
& \, {\sinc}^2 \left[ \frac{L \Delta k (\ws + \wi, \ws, \wi)}{2} \right].
\end{align}
\end{widetext}
Each SPDC source creates an output state of the form given in Eq. \eqref{eq:SPDCstate}, so the full input is the product state $\ket{\Psi}_1 \ket{\Psi}_2$. The annihilation operators in the SFG Hamiltonian given in Eq.\ \eqref{eq:Hamiltonian} remove contributions where only a single source produces photons. The lowest-order term of the input state that contributes to the SFG output state, using the configuration as shown in Fig.\ \ref{fig:setup}, is
\begin{align} \label{eq:inputStateA}
\ket{\Psi}_{\text{in}} = &\int_0^\infty \diff \waone \diff \watwo \diff \wbone \diff \wbtwo \, \Phi_1 (\wbone, \waone) \Phi_2 (\watwo, \wbtwo) \nonumber \\
& \cdot \hat{a}_{\text{a1}}^\dagger (\waone) \hat{a}_{\text{a2}}^\dagger (\watwo) \hat{b}_{\text{b1}}^\dagger (\wbone) \hat{b}_{\text{b2}}^\dagger (\wbtwo) \ket{\text{vac}} ,
\end{align}
where the argument ordering in the $\Phi$ functions indicates that the active photons directed into the SFG element are the signal from source 1 and the idler from source 2. We discuss the effects of higher-order terms at the end of this Appendix.
Eq.\ \eqref{eq:inputStateA} assumes that the pumps for each source are phase synchronized, as the pair creation process gets a phase imprint from the pump and if the pump lasers are not phase synchronized, phase diffusion introduces a relative phase shift between the photons sent into the SFG element. This phase difference will not affect conversion efficiency, but the output biphoton component of the state will acquire a phase shift that varies from shot to shot. Locking the pump phases to each other solves this problem.
Eq.\ \eqref{eq:threeFreqState}, representing the state output from the SFG crystal, can be rewritten as
\begin{equation} \label{eq:threeFreqStateShort}
\ket{\Psi}_\text{out}= \ket{\Psi}_\text{in}-\ket{\psi}_{\text{SFG}},
\end{equation}
where $\ket{\psi}_{\text{SFG}}$ is the state produced by successful SFG. The corresponding probability of successful SFG is
\begin{align}
&|\Xi|^2 = \braket{\psi | \psi}_{\text{SFG}} \nonumber \\
= (2 \pi)^3 & \int_0^\infty \diff \wsfg \diff \wbone \diff \wbtwo \, |\psi (\wbone,\wbtwo,\wsfg)|^2, \label{eq:Psfg}
\end{align}
where
\begin{widetext}
\begin{equation} \label{eq:psiFull}
\begin{split}
\psi(\wbone,\wbtwo,\wsfg) = b d \frac{(2 \pi)^3}{\sqrt{A_I}}\int_0^\infty \diff \watwo \, \ell(\wsfg) \ell(\wsfg - \watwo) \ell(\watwo) \Pi(\wsfg-\watwo, \watwo, \wsfg)\\ \times \Phi_1(\wbone,\wsfg-\watwo) \Phi_2(\watwo,\wbtwo).
\end{split}
\end{equation}
\end{widetext}
The count rate for successful swapping is then
\begin{equation} \label{eq:swapRate}
R_H = |\Xi|^2 R_R.
\end{equation}
The higher-order contributions to the input state where both sources generate photon pairs and at least one source generates more than one photon pair are selected against by the fail detector. The most likely contribution to this is two pairs generated in one source and a single pair generated in the other, which triggers the fail detector whether or not an SFG photon is generated from two of the three input photons. In the case where both sources produce the same number of photon pairs and that number is greater than one, it is possible for all input photons to be converted to SFG photons, which would not be detected by the fail detector, but is likely to trigger two simultaneous detections in the spectrometer. Though this contribution is negligible in our configuration as when $L_{\text{SFG}} = L = 0.5$ mm, the rate of false events from the leading-order term is $|\Xi|^4 R_R = 4.10 \times 10^{-14}$ events/sec, using SFG media with higher effective nonlinearities could result in this contribution being non-negligible. The use of a herald detection system that resolves the number of SFG photons in each frequency bin protects against this pitfall.
The ability of the fail detector to suppress multi-photon contributions is limited by its quantum efficiency $\gamma$. Given a herald detection, the leading-order probability that an extra photon-pair was generated in a source, but the remaining active photon was not detected by the fail detector is
\begin{equation}
P^{\text{swap}}_{\text{multi}} = (1 - \gamma) |\xi|^2.
\end{equation}
A superconducting nanowire single photon detector with $\gamma \approx 0.9$ and our scheme's $|\xi|^2 = 0.1$ gives $P^{\text{swap}}_{\text{multi}} = 0.01$, which means the output state generated after a herald detection has approximately 99\% entangled biphoton probability and 1\% multi-photon probability. In comparison, a single SPDC source generates a state with approximate probabilities of 10\% entangled biphoton, 1\% multi-photon, and 89\% vacuum. Larger quantum efficiency is better, but even in the worst case limit where $\gamma \to 0$, the state prepared by our entanglement swapping scheme has 90\% entangled biphoton probability, which is a substantial improvement over the SPDC state.
\section{Numerical Simulation Details} \label{sec:Simulation}
We use Mathematica \cite{Mathematica} and Matlab \cite{Matlab} with the QETLAB toolbox \cite{QETLAB} to perform the numerical calculations presented in this paper. Table \ref{tab:realisticParameters} gives the parameters used in our presented calculations. $\psi$ is calculated from Eq.\ \eqref{eq:psiFull} with the integral evaluated numerically. The {\tt integrationPoints} parameter in Table \ref{tab:realisticParameters} is how many points are used with a trapezoidal rule method to perform this numerical integration.
The number of entries in the full three-frequency density matrix scales quickly with the number of signal, idler, and SFG frequency grid elements as
\begin{equation}
N_{\text{total}}= (N_s \times N_i \times N_{\text{SFG}})^2.
\end{equation}
We make efficient use of computational resources with utilization of sparse matrices, vectorized code, and direct calculation of the post-frequency-measurement density matrix of Eq.\ \eqref{eq:RhoMeasured}, which contains only
\begin{equation} \label{eq:nMeasured}
N_{\text{measured}}= N_{\text{SFG}} \times (N_s \times N_i)^2
\end{equation}
elements. Longer crystal lengths ($L,L_{\text{SFG}}$) correspond to narrower phase-matching bandwidths, which in turn requires finer frequency grid spacing. The negativity must be calculated in the signal/idler basis and entangled JSIs are oriented along diagonals, so this finer spacing requires more points in both the signal and idler directions. Memory requirements scale steeply with resolution improvement, \emph{e.g.\ }doubling the number of frequency grid points for both the signal and idler frequency grids requires 16 times more total memory. Thus, longer crystal lengths require substantially more memory and processor time for computation of purities and negativities. We choose the parameters in Table \ref{tab:realisticParameters} as realistic parameters that allow for calculations that complete in a reasonable amount of time. A full computational run for all data presented herein completes in 40 hours on a workstation with two 3.06 GHz, 6 core Xeon processors and 96 GB of RAM.
\begin{table}[t]
\caption{Parameters used in the presented simulation.}
\begin{ruledtabular}
\begin{tabular}{ll}
\textbf{Parameter} &\textbf{Value} \\ \hline
Source length, $L$ & 0.50 mm \\
Central pump frequency, $\bar{\omega}_p$ & 4.651 rad/fs \\
Central signal frequency, $\bar{\omega}_s$ & 3.090 rad/fs \\
Central idler frequency, $\bar{\omega}_i$ & 1.561 rad/fs \\
Pump Gaussian bandwidth, $\sigma_p$ & 7.725 rad/ps \\
SFG Frequency Spacing, $\Delta \wsfg$ & 1.287 rad/ps \\
Frequency Spacings, $\Delta \ws = \Delta \omega_i$ & 4.544 rad/ps \\
Poling Periods, $\Lambda=\Lambda_{\text{SFG}}$ & 8.33 $\mu$m \\
Average pump power, $\Pavg$ & 1.380 W \\
Pump repetition rate, $R_R$ & 1.0 GHz \\
Nonlinear parameter, $d_{24}$ & 3.92 pm/V\\
Effective nonlinearity, $d$ & $2 d_{24}/\pi$ pm/V\\
Effective area, $A_I$ & 15 $\mu\textrm{m}^2$ \\
Grid points per outcome, $Q$ & 3 \\
{\tt integrationPoints} & 300 \\
\end{tabular}
\end{ruledtabular}
\label{tab:realisticParameters}
\end{table}
The step size for bystander frequencies is $\Delta \ws=\Delta \omega_i=\sigma_p / 1.7$, while $\Delta \omega_\text{SFG} = \sigma_p / 6$ is set for finer resolution. The SFG spectroscopic pixel bin size is set to $\Delta' = 3.862$ rad/ps (614.7 GHz), and includes $Q=3$ points from the underlying SFG frequency grid. $d_{24}$ is the nonlinear parameter for type-II phase-matching with the pump and signal polarized along the crystallographic y-axis and the idler polarized along the crystallographic z-axis. The pump bandwidth is set to match the phase-matching bandwidth for $L_{\text{SFG}}=L=0.5$ mm, which corresponds to $\sigma_p = \sigma_\pi = 7.725$ rad/ps.
\section{Negativity Behavior} \label{app:negativity}
In this Appendix, we investigate the behavior of the negativity as we adjust a simple model to give a sense of how it behaves. The four standard entangled two-qubit Bell states, \emph{e.g.} ($\ket{00} - \ket{11})/\sqrt{2}$, all have negativity 1/2 \cite{Rangamani2014}.
To investigate the behavior of the negativity for higher-dimensional quantum information encoding, we model the state out of a photon-pair source that directs one photon each to two parties, Alice ($A$) and Bob ($B$) with respective photon creation operators $\hat{a}^\dagger$ and $\hat{b}^\dagger$, as
\begin{equation}\label{eq:toyState}
\ket{\psi} = \sqrt{1-\eta} \ket{\text{vac}} + \sqrt{\eta} e^{i \phi} \sum^N_{j,k} \Psi_{j,k} \hat{a}^\dagger_{j} \hat{b}^\dagger_{k} \ket{\text{vac}},
\end{equation}
where $\phi$ is the pump phase, $j$ and $k$ are frequency mode labels with integer values in the range $[1,N]$, and $\Psi_{j,k}$ is the complex-valued discretized joint spectral amplitude. For simplicity, we use a two-level model, which is good for approximating an SPDC source when $\eta \ll 1$. For convenience, we also introduce the shorthand notation
\begin{equation}
\ket{1}_A \ket{1}_B = \sum^N_{j,k} \Psi_{j,k} \hat{a}^\dagger_{j} \hat{b}^\dagger_{k} \ket{\text{vac}},
\end{equation}
where the frequency labels have been suppressed on the left-hand side of the equality.
As pointed out in the supplementary material of Chou \emph{et al.}, photon-pair creation processes such as SPDC are sensitive to the phase of the pump used to generate them \cite{Chou2005}. Naively creating the density matrix $\rho = \ket{\psi} \bra{\psi}$ includes the coherence terms $\ket{1}_A \ket{1}_B \bra{\text{vac}}_{AB} + \ket{\text{vac}}_{AB} \bra{1}_A \bra{1}_B$. In this case the vacuum could be used to coherently transfer quantum information, and plotting the negativity as a function of $\eta$ (see Fig.\ \ref{fig:toyCoherent}) shows a turn-around-point where the negativity decreases with increasing $\eta$. As $\eta \to 1$, the vacuum mode probability goes to zero. This reduction in the number of excited modes offers an intuitive explanation for the turn-around behavior.
\begin{figure}
\caption{Negativity as a function of pair-production efficiency $\eta$. Coherences between the vacuum and the biphoton subsystem are included. Straight lines connect calculated points to serve as an eye guide.}
\label{fig:toyCoherent}
\end{figure}
In a real system, this coherence is preserved if the phase of the pump is measured, but decays at very fast optical frequencies otherwise. If the pump phase is not resolved, $\phi$ is traced out and the coherences between the vacuum and one-pair subsystem vanish, yielding
\begin{equation} \label{eq:rhoCombo}
\rho = (1-\eta) \ket{\text{vac}}_{AB} \bra{\text{vac}}_{AB} + \eta \ket{1}_A \ket{1}_B \bra{1}_A \bra{1}_B.
\end{equation}
Using a maximally-entangled frequency-anticorrelated state,
\begin{equation}
\Psi_{j,k} = \delta_{j,N+1-k}/\sqrt{N},
\end{equation}
where $\delta_{a,b}$ is the Kronecker delta, the negativity of the incoherent combination of Eq.\ \eqref{eq:rhoCombo} is shown in Fig.\ \ref{fig:toyNmode} for many values of the number of modes $N$, and follows the simple expression
\begin{equation} \label{eq:toyNegativityScaling}
\mathcal{N} = \frac{N-1}{2} \eta,
\end{equation}
which agrees with the Bell state negativity for $N=2$ and $\eta=1$. Thus, if quantum information is encoded in frequency bins, the number of bins chosen will influence the negativity. The negativity is not an intrinsic property of the continuous-variable state prepared by the SFG conversion process, but depends on the discretization imposed in detection of the herald and biphoton.
\phantom{t}
\begin{figure*}
\caption{Negativity $\mathcal{N}
\label{fig:toyNmode}
\end{figure*}
\end{document}
|
\begin{document}
\begin{center}
{\large \textbf{On the study of cellular automata on modulo-recurrent words}}\\
\vspace*{1cm}
Moussa Barro \\
\footnotesize {\textit{D\'{e}partement de Mathématiques, UFR-SEA\\
Université Nazi BONI\\
Bobo-Dioulasso, Burkina Faso}\\
[email protected] }\\
01 BP 1091 Bobo-Dioulasso\\
\vspace*{0.5cm}
K. Ernest Bognini\\
\footnotesize{\textit{Centre Universitaire de Kaya (CU-Kaya)\\
Universit\'{e} Joseph KI-ZERBO\\
Ouagadougou, Burkina Faso}\\
[email protected]}\\
03 BP 7021 Ouagadougou\\
\vspace*{0.5cm}
Boucar\'{e} Kient\'{e}ga\\
\footnotesize{\textit{Institut Universitaire Professionnnalisant, IUP\\
Universit\'{e} de D\'{e}dougou, Burkina Faso}\\
[email protected]
}\\
BP 176 D\'{e}dougou\\
\begin{abstract}
\noindent
In this paper, we study some class of cellular automata (CA) preserving modulo-recursive, stability by reflection and richness called stable cellular automata (SCA). After applying these automata on Sturmian words, we establish some combinatorial properties of these new words. Next, the classical and palindromic complexity functions of these words are also determined. Finally, we show that these words are $2$-balanced and we establish their abelian complexity function.
\\[2mm]
{\textbf{Keywords:} cellular automata (CA), modulo-recursive, Sturmian words, palindrome, complexity function.
\\[2mm]
{\textbf{2020 Mathematics Subject Clasification:} 37B15, 68Q80, 68R15, 11B85
}
}
\end{abstract}
\end{center}
\section{Introduction}
A cellular automaton is a series of cells evolving into a precise set of rules, resulting in a new generation of cells. These automata were introduced in \cite{b0} with objective to realize some dynamic systems capable to model complex sefl-reproduction phenomena. Later, in the 1970s, the concept was popularized by the work of John Horton Conway with his famous \emph{game of life} on two-dimensional cellular automata (CA). Thus, CA have become a multidisciplinary field of study going from physics to computer science and from biology to mathematics.
Modulo-recurrent words are recurrent words in which any factor appears in all positions modulo its length. For instance, we have Sturmian words and Champernowne word. They were introduced in $\cite{KT-f}$ and intensively sudied in $\cite{BKT, CKT, b23}$.
A palindrome is a word which is the same when read left to right or from right to left. The study of palindromes in combinatorics on words allows the characterization of some infinite words (see $\cite{b7,b9,b24}$).
Given a finite or infinite word, the complexity function $p$ of this word is the number of distinct factors of given length in this later. Their study allows to characterize some families of infinite words $\cite{b6}$. This notion also allowed to establish many characterizations and various properties on Sturmian words (see $\cite{b27, b5, b20,b28,b12,b16}$). Depending on the specificity of the factors included in the word (finite or infinite), we distinguish several types of complexity functions: palindromic, abelian, etc. The palindromic complexity function computes the number of distinct palindromic factors of certain length in this word. As for the abelian complexity function, it counts the number of Parikh vectors for each given length in this word. It was intensively studied in $\cite{BKT-p, BKT}$. All these two notions allow us to characterize Sturmian words $\cite{b11}$.
In this work, we study combinatorial properties of infinite words obtained by application of CA. It is organized as follows. After giving some definitions and notation, we recall some properties of Sturmian and modulo-recurrent words in Section 2. Next, we apply CA to infinite words and show that these automata preserve some properties such that modulo-recursive, periodicity in Section 3. In Section 4, we define a class of CA called stable cellular automata (SCA) and we establish that they preserve stability by reflection and richness. Lastly, we discuss the combinatorial study of words obtained by applying these SCA on Sturmian words in Section 5.
\section{Preliminaries}
\subsection{ Definitions and notation}
An alphabet $\mathcal{A}$, is a finite set whose elements are called letters. A word is a finite or infinite sequence of elements over $\mathcal{A}$. We denote by $\mathcal{A}^\ast $, the set of finite words over $\mathcal{A}$ and $\varepsilon$ the empty word. For all $u\in \mathcal{A}^*$, $|u|$ denotes the length of $u$ and $|u|_x$ for all $x$ over $\mathcal{A}$, the number of occurrence of $x$ in $u$. A word $u$ of length $n$ constitued by a single letter $x$ is simply denoted $u=x^n$; by convention $x^0=\varepsilon $. Let $u=u_1u_2 \dotsb u_n$ be a finite word with $u_i\in \mathcal{A}$ for all $i\in\left\{1,2,\dots ,n\right\}$. The word $\overline{u}=u_n\dotsb$ $u_2u_1$ is called the reflection of $u$. Given two finite words $u$ and $v$ then, we have $\overline{uv}=\overline{v}$
$\overline{u}$. The word $u$ is called palindrome if $\overline{u}=u$.
We denote by $\mathcal{A}^{\omega}$ (respectively, $ \mathcal{A}^\infty=\mathcal{A}^*\cup \mathcal{A}^{\omega}$ ), the set of infinite (respectively, finite and infinite) words.
An infinite word $\textbf{u}$ is ultimately periodic if there are two words $v\in \mathcal{A}^*$ and $w\in \mathcal{A}^+$ such that $\textbf{u}=vw^\omega$. The word $\textbf{u}$ is said recurrent if each of its factors appears an infinitely. If moreover $v=\varepsilon$ then $\textbf{u}$ is said periodic. The $n$-th power for some finite word $w$ is denoted by $w^n$.
Let $\textbf{u}\in \mathcal{A}^\infty$ and $v\in \mathcal{A}^*$. We say that $v$ is a factor of $\textbf{u}$ if there exists $u_1\in \mathcal{A}^*$ and $\textbf{u}_2\in \mathcal{A}^\infty$ such that $\textbf{u}=u_1v\textbf{u}_2$. In other words, we say that $\textbf{u}$ contains $v$.
We also say that $u_1$ is a prefix of $\textbf{u}$ and we note $u_1=\text{Pref}_{|u_1|}(\textbf{u})$. If in particular $\textbf{u} \in \mathcal{A}^*$ then $\textbf{u}_2$ is said suffix of $\textbf{u}$.
Let $w$ be a factor of an infinite word $\textbf{u}$ and $x$ a letter over $\mathcal{A}$. Then, $L_n(\textbf{u})$ denotes the set of factors of length $n$ of $\textbf{u}$ and $L(\textbf{u})$ that of all factors of $\textbf{u}$. The letter $x$ is said to be a left (respectively, right) extension of $w$ if $xw$ (respectively, $wx$) belongs to $L(\textbf{u})$. Let us denote by $\partial^-w$ (respectively, $\partial^+w$) the number of left (respectively, right) extension of $w$ in $\textbf{u}$. When $\partial^+w=k$ with $k\geq 2$, $w$ is said to be right $k$-prolongeable. In the same way, we can define the notion of left $k$-prolongeable factor. A factor $w$ of $\textbf{u}$ is said to be right (respectively, left) special if $\partial^+w>1$ (respectively, $\partial^-w>1$). Any factor that is both right and left special is called a bispecial factor.
Given an infinite word $\textbf{u}$. Then the map of $\mathbb{N}$ into $\mathbb{N^*}$ defined by $p_\textbf{u}(n) = \# L_n(\textbf{u})$, is called complexity function of $\textbf{u}$, where $\# L_n(\textbf{u})$ denotes the cardinal of $L_n(\textbf{u})$. This function is related to the special factors by the relation (see \cite{b8}):
$$\hspace{0 cm} p_\textbf{u}(n+1)-p_\textbf{u}(n)= \displaystyle\sum_{w\in L_n(\textbf{u})} (\partial^+(w)-1). $$
We denotes by $\text{Pal}_n(\textbf{u})$, the set of palindromic factors of length $n$ and by $\text{Pal}(\textbf{u})$, the set of all palindromic factors in $\textbf{u}$. The palindromic complexity function of $\textbf{u}$, noted $p^{al}_\textbf{u}$, is the map of $\mathbb{N}$ into $\mathbb{N}$ which counts the number of distint palindromic factors of length $n$ contained in $\textbf{u}$:
$$p^{al}_\textbf{u}(n) = \# \left\{w\in L_n(\textbf{u}) : \overline{w}=w \right\}.$$
When for all $w\in L(\textbf{u})$, we have $\overline{w}\in L(\textbf{u})$. Then, $\textbf{u}$ is said stable by reflection.
Let $w$ be a factor of an infinite word $\textbf{u}$ over an alphabet $\mathcal{A}_q=\{a_{1}, a_{2}, \cdots, a_{q}\}$. Then, the $q$-uplet
$\chi (w)=(| w |_{a_{1}}, | w|_{a_{2}}, \cdots, | w |_{a_{q}})$ is called the Parikh vector of $w$. The set of Parikh vectors of factor of length $n$ in $\textbf{u}$ is denoted by: $$\chi_{n}(\textbf{u})=\{\chi(w): w\in L_{n}(\textbf{u})\}.$$ The abelian complexity function of $\textbf{u}$, is the map defined of $\mathbb{N}$ into $\mathbb{N}^*$ by: $$\rho^{ab}_\textbf{u}(n)= \# \chi_{n}(\textbf{u}).$$
The window complexity function of $\textbf{u}$ is the map, $p^f_\textbf{u}$ of $\mathbb{N}$ into $\mathbb{N}^*$, defined by $$p^f_\textbf{u}(n)=\#\left\{u_{kn}u_{kn+1}\cdots u_{n(k+1)-1} : k\geq 0\right\}.$$
The shift $S$, is the application $S$ on $\mathcal{A}^\omega$ which erases the first letter of some given word. For instance $S(\textbf{u})=u_1u_2u_3\cdots$. A substitution $\varphi$ is a map of $\mathcal{A}^*$ into itself such that $\varphi (uv)=\varphi(u)\varphi(v)$, for
any $u$, $v\in \mathcal{A}^*$.
\subsection{Sturmian words and modulo-recurrent words}
In this subsection, we recall some properties of Sturmian words and modulo-recurrent words that will be used in the following. In this part, the alphabet is $\mathcal{A}_{2}=\left\{a,b\right\}$.
\begin{definition}
An infinite word $\textbf{u}$ over $\mathcal{A}_{2}$ is said to be Sturmian if for any integer $n,\ p_\textbf{u}(n)=n+1$.
\end{definition}
The well-known Sturmian word is the famous Fibonacci word. It is the fixed point of the substitution $\varphi$ defined over $\mathcal{A}_{2}^\ast$ by:
$$\varphi(a)=ab \ \textrm{and} \ \varphi(b)=a.$$
It is noted:
$$\mathbf{F}=\displaystyle \lim_{n\rightarrow \infty}\varphi^n(a).$$
\begin{definition}
A Sturmian word is said to be $a$-Sturmian (respectively, $b$-Sturmian) when it contains $ a^2 $ (respectively, $ b^2$).
\end{definition}
\begin{definition} A word $\textbf{u}=u_0u_1u_2 \dotsb$ is said to be modulo-recurrent if any factor of $\textbf{u}$ appears in all positions modulo $i,\ i\geq 1$.
\end{definition}
\begin{definition} Let $w$ be a factor of some infinite word $\textbf{u}$. We say that $w$ is a window factor when it appears in $\textbf{u}$ at a multiple position its length.
\end{definition}
\begin{Proposition}$\cite{CKT}$\label{propo-comp-mod-rec}
Let $\textbf{\emph{u}}$ be a modulo-recurrent word. Then, for all integers $n$, the set of window factors of length $n$ in $\textbf{\emph{u}}$ is equal to $L_n(\textbf{\emph{u}})$.
\end{Proposition}
\begin{definition} An infinite word $\textbf{u}$ is said to be $\alpha$-balanced if $\alpha$ is the smallest integer such that for any pair ($v$, $w$) of factors in $\textbf{u}$ of the same length and for all letter $x$ over $\mathcal{A}$, we have:
\begin{center}
$||v|_x-|w|_x|\leq \alpha$.
\end{center}
If $\alpha =1$, then $\textbf{u}$ is said simply balanced.
\end{definition}
The following theorem gives us some classical characterization of Sturmian words.
\begin{theorem}\label{theo-stur} \cite{b11, b9} Let $\textbf{\emph{u}}$ be an infinite binary word. Then, the following assertions are equivalent:
\begin{enumerate}
\item $\textbf{\emph{u}}$ is Sturmian.
\item $\textbf{\emph{u}}$ is non-ultimately periodic and balanced.
\item For all $n\in \mathbb{N}^*, \hspace{0.3cm} \rho^{ab}_\textbf{\emph{u}}(n)=2$.
\item For all $n\in \mathbb{N}, \hspace{0.3cm}$ $$ p^{al}_\textbf{\emph{u}}(n) = \left \{
\begin{array}{l}
\hspace{0cm}1 \hspace{0.3cm} if \hspace{0.3cm} n \hspace{0.3cm} is \hspace{0.3cm} even \hspace{0.1cm} \\
\hspace{0cm}2 \hspace{0.33cm} otherwise.
\end{array}
\right. $$
\end{enumerate}
\end{theorem}$
$
\begin{theorem}\label{stur-puis}$\cite{b16}$
Let $\textbf{\emph{v}}$ be an $a$-sturmian word over $\mathcal{A}_2$. Then, there exists a sturmian sequence $(\epsilon_i)_{i\geq 1}$ over the alphabet $\left\{0,1\right\}$ and integer $l$ such that $\textbf{\emph{v}}$ is written:
$\textbf{\emph{v}}=a^{l_0}ba^{l+\epsilon_1}ba^{l+\epsilon_2}ba^{l+\epsilon_3}b \dotsb$ avec $l_o\leq l+1$.
\end{theorem}
It is proved in $\cite{b23}$ that Sturmian words are modulo-recurrent.
\begin{theorem}\label{stur-mod}$\cite{CKT}$
Let $\textbf{\emph{u}}$ be an infinite recurrent word. Then, the following assertions are equivalent:
\begin{enumerate}
\item $\textbf{\emph{u}}$ is a modulo-recurrent word.
\item For all integers $n\geq 1,\ p^f_\textbf{\emph{u}}(n)= p_\textbf{\emph{u}}(n)$.
\end{enumerate}
\end{theorem}
\section{Cellular automata (CA)}
In this section, we define a class of CA that we apply to infinite words. In all the rest of this paper, $\textbf{u}\in \mathcal{A}^{\infty}$ and $F$ is a CA defined over $\mathcal{A}^r$.
\begin{definition}
Let $\mathcal{A},\mathcal{B}$ be two alphabets, $r\geq 1$ and $f :\mathcal{A}^r \longrightarrow \mathcal{B},\ w_i \longmapsto x_i$ a morphism. We call CA, any map $F:\mathcal{A}^*\longrightarrow\mathcal{B}^*$ verifying:
$$ \left \{
\begin{array}{l}
\hspace{0cm}F(w)=\varepsilon \hspace{3.3cm} if \hspace{0.3cm} |w|< r \hspace{0.1cm} \\
\hspace{0cm}F(xyz)= f(xy)F(yz) \hspace{1cm} if \hspace{0.3cm} x\in \mathcal{A}, \ |y|=r-1,\ z\in \mathcal{A}^{\infty}.
\end{array}
\right. $$
\end{definition}
From this definition, we have the following remarks:
\begin{Remarque} \label{rem-long}
\begin{enumerate}
\item The map $F$ is a surjection.
\item For all finite word $w$, we have $|F(w)|=|w|-r+1$ if $|w|\geq r$.
\item If $r=1$ then $F$ is a projection.
\end{enumerate}
\end{Remarque}
\begin{Proposition}\label{fac-conserv}
Let $\textbf{\emph{u}},\textbf{\emph{v}}\in \mathcal{A}^{\infty}$ and $F$ be a CA. Then, we have:
\begin{enumerate}
\item If $u_1\in L(\textbf{\emph{u}})$ then $F(u_1)\in L(F(\textbf{\emph{u}}))$.
\item If $ L(\textbf{\emph{u}})\subset L(\textbf{\emph{v}})$ then $L(F(\textbf{\emph{u}}))\subset L(F(\textbf{\emph{v}}))$.
\item $L(F(\textbf{\emph{u}}))= F(L(\textbf{\emph{u}}))$.
\end{enumerate}
\end{Proposition}
\begin{theorem}\label{cor-inj} For any infinite word $\textbf{\emph{u}}$, we have $p_{F(\textbf{\emph{u}})}(n)\leq p_\textbf{\emph{u}}(n+r-1)$. Moreover if, $F$ is an injection. Then, $p_{F(\textbf{\emph{u}})}(n)= p_\textbf{\emph{u}}(n+r-1)$.
\end{theorem}
\textbf{Proof:} \\
$\bullet$ By Remark \ref{rem-long}, any factor of $F(\textbf{u})$ of length $n$ comes from some factor of $\textbf{u}$ of length $n+r-1$. Moreover, as $F$ is surjective, we have $\# L_{n}(F(\textbf{u}))\leq \# L_{n+r-1}(\textbf{u})$. Hence, $p_{F(\textbf{u})}(n)\leq p_\textbf{u}(n+r-1)$.\\
$\bullet$ Now suppose that $F$ is injective. Then each factor of $F(\textbf{u})$ of length $n$ comes from only one factor of $\textbf{u}$ of length $n+r-1$. Thus, we obtain $\# L_{n}(F(\textbf{u}))\geq \# L_{n+r-1}(\textbf{u})$. Hence, $P_{F(\textbf{u})}(n)= P_\textbf{u}(n+r-1)$.
$ \square$
\begin{theorem}\label{mod}
Let $\textbf{\emph{u}}$ be an infinite word. Then, $F(\textbf{\emph{u}})$ is modulo-recurrent if and only if $\textbf{\emph{u}}$ is modulo-recurrent.
\end{theorem}
\textbf{Proof:} Suppose that $\textbf{u}$ is modulo-recurrent. Let $w\in L(F(\textbf{u}))$. As $F$ is surjective, there exists $u_1\in L(\textbf{u})$ such that $w=F(u_1)$. In addition, the factors $u_1$ and $F(u_1)$ appear in the same positions respectively in the words $\textbf{u}$ and $F(\textbf{u})$. Furthermore, $u_1$ apppears in all positions $\mod |u_1|$ in $\textbf{u}$. Hence, $F(u_1)$ appears also in all positions $\mod |u_1|$ in $F(\textbf{u})$. Since $|F(u_1)|\leq |u_1|$ then $F(u_1)$ appears in all positions $\mod |F(u_1)|$ in $F(\textbf{u})$. Therefore, $F(\textbf{u})$ is modulo-recurrent.
Conservely suppose that $F(\textbf{u})$ is modulo-recurrent. Let $u_1\in L_{n+r-1}(\textbf{u})$, then by Proposition \ref{fac-conserv}, we have $F(u_1)\in L_n(F(\textbf{u}))$. The word $F(\textbf{u})$ being modulo-recurrent, then the factor $F(u_1)$ appears in all positions modulo $n$ in $F(\textbf{u})$. As $u_1$ and $F(u_1)$ are respectively at the same position in $\textbf{u}$ and $F(\textbf{u})$, then $u_1$ appears in all positions modulo $n$ in $\textbf{u}$. It remains to show that $u_1$ appears at positions $i \mod \hspace{0.1cm} (n+r-1)$ with $i\in \lbrace n+1,\dots,n+r-1 \rbrace$. Since $F(\textbf{u})$ is in particular recurrent then there exists a factor $\delta\in L_{r-1}(F(\textbf{u}))$ such that $F(u_1)\delta \in L_{n+r-1}(F(\textbf{u}))$. Furthermore, $u_1$ and $F(u_1)\delta$ appear at respectively the same position in $\textbf{u}$ and $F(\textbf{u})$. Since $F(\textbf{u})$ is modulo-recurrent, so $\textbf{u}$ is modulo-recurrent.
$\square$
\begin{Lemme} Let $\textbf{\emph{u}}$ be an infinite word. Then the following assertions hold.
\begin{enumerate}
\item If $\textbf{\emph{u}}$ is periodic then $F(\textbf{\emph{u}})$ is periodic.
\item If $F$ is bijective and $F(\textbf{\emph{u}})$ periodic then $\textbf{\emph{u}}$ is periodic.
\end{enumerate}
\end{Lemme}
\textbf{Proof:}
\begin{enumerate}
\item Suppose that $\textbf{u}$ is periodic. Then, there exists a finite word $u_1$ such that $\textbf{u}=u_1^\omega.$ As a result,
$$\textbf{u}=u_1^\omega\Longrightarrow F(\textbf{u})=v_{1}^\omega,$$
where $v_1= F(\text{Pref}_{|u_1|+r-1}( \textbf{u}))=\text{Pref}_{|u_1|}( F(\textbf{u}))$. Consequently, $F(\textbf{u})$ is periodic.
\item Suppose that $F(\textbf{u})$ is periodic. Then, there exists a factor $v_{1}$ in $F(\textbf{u})$ such that $F(\textbf{u})=v_{1}^{\omega}$. Since $F$ is bijective, so there exists a unique factor $w\in L_{\vert v_{1}\vert+r-1}$ such that $F(w)=v_1$. By putting $u_1=\text{Pref}_{\vert v_1\vert}(w)$ then we get $\textbf{u}=u_1^{\omega}$. Hence, $\textbf{u}$ is periodic.
\end{enumerate}
$\square$
\begin{Remarque}
If $\textbf{\emph{u}}$ and $F(\textbf{\emph{u}})$ are two periodic words. Then, they have the same period.
\end{Remarque}
\begin{theorem}
Let $\textbf{\emph{u}}$ be a recurrent word and $F$ a CA such that:
$$ \left \{
\begin{array}{l}
\hspace{0cm}F(x_{1}y_{1})= F(x_{2}y_{2})\hspace{1cm} if\ x_{1}= x_{2} \\
\hspace{0cm}F(x_{1}y_{1})\neq F(x_{2}y_{2}) \hspace{1cm} \text{otherwise},
\end{array}
\right.$$
where $x_{1}, x_{2}\in \mathcal{A}$ and $\ y_{1}, y_{2}\in \mathcal{A}^{r-1}$. Then, $F(\textbf{\emph{u}})$ is balanced if and only if $\textbf{\emph{u}}$ is balanced.
\end{theorem}
\textbf{Proof:} Suppose that $\textbf{u}$ is balanced and $F(\textbf{u})$ is non-balanced. The word $F(\textbf{u})$ being non-balanced, there exists a factor $v_1\in L(F(\textbf{u}))$ such that $x_1v_1x_1$, $x_2v_1x_2\in L(F(\textbf{u}))$. Since $F$ is surjective, there are two factors $u_1,\ u_2\in L(\textbf{u})$ such that $x_1v_1x_1=F(u_1)$ and $x_2v_1x_2=F(u_2)$. Thus, we can write $u_1=au_1'a\delta_1$ and $u_2=bu_1'b\delta_2$ with $\delta_1,\ \delta_2\in L_{r-1}(\textbf{u})$ and for some finite word $u'_{1}$. As a result, we have $au_1'a,\ bu_1'b\in L(\textbf{u})$. We get a contradiction because $\textbf{u}$ is balanced.
Reciprocaly suppose that $F(\textbf{u})$ is balanced and $\textbf{u}$ non-balanced. As $\textbf{u}$ non-balanced, there exists a factor $u_1\in L(\textbf{u})$ such that $au_1a,\ bu_1b\in L_n(\textbf{u})$, for all $a,\ b\in \mathcal{A}$. In addition,there are $\delta_1,\ \delta_2\in L_{r-1}(\textbf{u})$ such that $au_1a\delta_1,\ bu_1b\delta_2\in L_{n+r-1}(\textbf{u})$. Furthermore, $F(au_1a\delta_1)=x_1v_1x_1$ and $F(bu_1b\delta_2)=x_2v_1x_2$ are some factors of $L(F(\textbf{u}))$. This contradicts our hypothesis.
From all the above, we obtain the desired equivalence.
$\square$
\begin{theorem}\label{spec} Let $\textbf{\emph{u}}$ be a recurrent word and $F$ a CA such that:
$$ \left \{
\begin{array}{l}
\hspace{0cm}F(x_{1}y_{1})= F(x_{2}y_{2})\hspace{1cm} if\ x_{1}= x_{2} \\
\hspace{0cm}F(x_{1}y_{1})\neq F(x_{2}y_{2}) \hspace{1cm} \text{otherwise},
\end{array}
\right.$$
where $x_{1}, x_{2}\in \mathcal{A}$ and $\ y_{1}, y_{2}\in \mathcal{A}^{r-1}$. Then, any right (respectively, left) special factor of $F(\textbf{\emph{u}})$ comes from a right (respectively, left) special factor of $\textbf{\emph{u}}$. \end{theorem}
\textbf{Proof:} Let $v_1$ be a right special factor of $F(\textbf{u})$. Then, we have $v_1x_1,v_1x_2\in L(F(\textbf{u}))$ with $x_{1},x_{2}\in\mathcal{A}$. Since $F$ is surjective, there exists $u'_1,\delta_1, \delta_2\in L(\textbf{u})$ such that $v_1x_1=F(u'_1a\delta_1)$ and $v_1x_2=F(u'_1b\delta_2)$ with $|\delta_1|=|\delta_2|=r-1$. Hence, $u'_1a\delta_1, u'_1b\delta_2 \in L(\textbf{u})$; i.e, $u'_1a, u'_1b \in L(\textbf{u})$. Whence, $u'_1$ is right special in $\textbf{u}$.\\
Let $v_1$ be a left special factor of $F(\textbf{u})$. Then, we have $x_1v_1,x_2v_1\in L(F(\textbf{u}))$ with $x_{1},x_{2}\in\mathcal{A}$. As $F$ is surjective, so there exists $u'_1\in L(\textbf{u})$ such that $x_1v_1=F(au'_1)$ and $x_2v_1=F(bu'_1)$ with $a,b\in\mathcal{A}$. Hence, $au'_1, bu'_1 \in L(\textbf{u})$. Consequently, $u'_1$ is left special in $\textbf{u}$.
$\square$
\begin{Corollaire}
Any bispecial factor of $F(\textbf{\emph{u}})$ comes from a bispecial factor of $\textbf{\emph{u}}$.
\end{Corollaire}
\section{Stable cellular automata(SCA)}
In this section, we study some class of cellular automata that we call stable cellular automata (SCA).
\begin{definition}
A cellular automaton $F:\mathcal{A}^r\longrightarrow\mathcal{A}$ is invariant if for any infinite word $\textbf{u}$, we have $F(\textbf{u})=\textbf{u}$.
\end{definition}
The following proposition gives us a characterization of an invariant cellular automaton.
\begin{Proposition}\label{eq1} Let $F:\mathcal{A}^r\longrightarrow\mathcal{A}$, be a CA. Then, the following assertions are equivalent.
\begin{enumerate}
\item $F$ is invariant.
\item $F(xy)=x$, for all $x\in \mathcal{A}\ \text{and} \ y\in \mathcal{A}^{r-1}$.
\end{enumerate}
\end{Proposition}
\textbf{Proof:} Let $\textbf{u}$ be an infinite word in the form $\textbf{u}=x_0x_1x_2\cdots$. Then,
$$F(\textbf{u})=F(x_0x_1\cdots x_{r-1})F(x_1x_2\cdots x_{r})F(x_2x_3\cdots x_{r+1})\cdots.$$ As a result, we have the following equivalent.
\begin{align*}
F\ \text{is invariant}&\Longleftrightarrow F(\textbf{u})=\textbf{u}\\
&\Longleftrightarrow F(x_0x_1\cdots x_{r-1})F(x_1x_2\cdots x_{r})F(x_2x_3\cdots x_{r+1})\cdots=x_0x_1x_2x_3 \cdots\\
&\Longleftrightarrow F(x_ix_{i+1}\cdots x_{i+r-1})=x_i,\ \forall i\in \mathbb{N}\\
&\Longleftrightarrow F(xy)=x,\ \forall\ x\in \mathcal{A},\ y\in \mathcal{A}^{r-1}.
\end{align*}
$\square$
\begin{Lemme}\label{Lem-Ech}
Let $F:\mathcal{A}_2^r\longrightarrow\mathcal{A}_2$ be an invariant CA and $E$, the exchanges map defined over $\mathcal{A}_2$ . Then, $F \circ E=E \circ F$.
\end{Lemme}
\textbf{Preuve:} Let $\textbf{u}$ be an infinite word over $\mathcal{A}_2$ and $u_1\in L(\textbf{u}) $. Then, we distinguish two cases:\\
\noindent\textbf{Case 1}: $|u_1|< r$. Then, we have $F(u_1)=\varepsilon$. As a result, $E (F(u_1))=\varepsilon$. In addition, $|E(u_1)|<r$. Thus, $F(E(u_1))=\varepsilon$. Hence, $F(E(u_1))=\varepsilon=E (F(u_1))$.
\noindent\textbf{Case 2}: $|u_1|\geq r$. Let $w$ be a factor of length $r$ in $u_1$. Then, without loss of generality, let us assume that the first letter of $w$ is $a$. Thus, by Proposition \ref{eq1}, we have $F(E(w))=b$. Furthemore, $F(w)=a$, i.e, $E(F(u_1))=b$. As a result, we obtain $F(E(w))=E(F(w))$. It follows that $F(E(u_1))=E(F(u_1))$.\\
In all cases, $F \circ E=E \circ F$.
$\square$
\begin{definition} Let $F$ be a CA. Then, $F$ is said stable if $F(\overline{w})=F(w)$, for all $w\in \mathcal{A}^r$.
\end{definition}
\begin{Lemme}\label{Lem-stable}
Let $\textbf{\emph{u}}$ be an infinite word and $F$ a SCA. Then, for all $u_1\in L(\textbf{\emph{u}})$, we have $F(\overline{u_1})=\overline{F(u_1)}$.
\end{Lemme}
\textbf{Proof:} Let $u_1\in L(\textbf{u})$. Then, we distinguish two cases. \\
\noindent\textbf{Case 1}: $|u_1|< r$. Then, $F(u_1)=F(\overline{u_1})=\varepsilon=\overline{F(u_1)}$.
\noindent\textbf{Case 2}: $|u_1|\geq r$. Then, we have $u_1\in L_{n+r}(\textbf{u})$ and $F(u_1)=F(w_0)\cdots F(w_n)$ where $w_i=\text{Pref}_r(S^i(u_1))$, for all $i\in \{0,\dots ,n\}$. In addition,
\begin{align*}
\overline{F(u_1)}&=\overline{F(w_0)F(w_1)\cdots F(w_n)}\\
&=\overline{F(w_n)}\cdots \overline{F(w_1)}\ \overline{F(w_0)}\\
&=F(w_n)\cdots F(w_1)F(w_0),\ \text{because}\ F(w_i)\in \mathcal{A}, \ \text{for all}\ i\in \{0\dots,n\}.
\end{align*}
Furthermore, we have $F(\overline{ u}_1)=F(\overline{ w}_n)\cdots F(\overline{ w}_1)\ F(\overline{ w}_0)$. Since $F$ is stable we have, for all $i\in \{0,\dots,n\}$, $F(w_i)=F(\overline{ w}_i)$ with $w_i\in L_r(\textbf{u})$. Thus, $F(\overline{u}_1)=F(w_n)\cdots F(w_1)F(w_0)$.\\
Hence, $\overline{F(u_1)}=F(\overline{u}_1)$.
$\square$
\begin{theorem}\label{Theo-stable}
Let $\textbf{\emph{u}}$ be an infinite word and $F$ a SCA. Then, $F(\textbf{\emph{u}})$ is stable by reflection if and only if $u$ is.
\end{theorem}
\textbf{Proof:} Suppose that $\textbf{u}$ is stable by reflection. Let $u_1\in L_{n+r}(\textbf{u})$. Then, by Proposition \ref{fac-conserv} we have $F(u_1)\in L_{n+1}(F(\textbf{u}))$. By writing $F(u_1)=F(w_0)F(w_1)\cdots F(w_n)$ with $w_i=\text{Pref}_r(S^i(u_1)$, for all $i\in \{0,\dots,n\}$. Then, By Lemma \ref{Lem-stable}, $\overline{F(u_1)}=F(\overline{u}_1)$. But, $F(\overline{u}_1)\in L(F(\textbf{u}))$. Thus, $\overline{F(u_1)}\in L(F(\textbf{u}))$. Hence, $F(\textbf{u})$ is stable by reflection.
Conversely, suppose that $F(\textbf{u})$ is stable by reflection. Let $v_1\in L_{n+1}(F(\textbf{u}))$ then, there exists $u_1\in L_{n+r}(\textbf{u})$ such that $v_1=F(u_1)$. As a result, $F(u_1)\in L_{n+1}(F(\textbf{u}))$ and $\overline{F(u_1)}\in L_{n+1}(F(\textbf{u}))$ because $F$ is stable. But, by Lemma \ref{Lem-stable}, $\overline{F(u_1)}=F(\overline{u}_1)$, i.e, $F(\overline{u}_1) \in L(F(\textbf{u}))$. Since $F$ is surjective we have $\overline{u}_1\in L(\textbf{u})$. Hence, $u$ is stable by reflection.
$\square$
\begin{Corollaire}\label{pal1} Let $\textbf{\emph{u}}$ be an infinite word stable by reflection and $F$ an injective SCA. Then, any factor $u_1$ of $\textbf{\emph{u}}$ is a palindromic factor if and only if $F(u_1)$ is.
\end{Corollaire}
\textbf{Proof:} Suppose $u_1$ a palindromic factor of $u$. Then, By Lemma \ref{Lem-stable}, we have $F(\overline{u}_1)=\overline{F(u_1)}$. In addition, $F(\overline{u}_{1})=F(u_1)$. Hence, $\overline{F(u_1)}=F(u_1)$.
Reciprocaly suppose that $\overline{F(u_1)}=F(u_1)$. As $F(\overline{u}_1)=\overline{F(u_1)}$, by Lemma \ref{Lem-stable} we get
$F(\overline{u}_1)=F(u_1)$. From the injectivity of $F$, we deduce that $\overline{u_1}=u_1$.
$\square$
\begin{Corollaire}\label{pal2} Let $\textbf{\emph{u}}$ be an infinite word stable by reflection and $F$ an injective SCA. Then, for all integers $n$, we have:
$$p^{al}_{F(\textbf{\emph{u}})}(n)=p^{al}_{\textbf{\emph{u}}}(n+r-1).$$
\end{Corollaire}
\textbf{Proof:} Use Theorem \ref{cor-inj} and Corollary \ref{pal1}.
$\square$
\begin{definition}
Let $\textbf{u}\in \mathcal{A}^{\infty}$. We say that $u$ is rich if any factor $w$ of $u$, has exactly $|w|+1$ distincts palindromes including the empty word.
\end{definition}
The result below in $\cite{bucci}$ characterizes the rich words stable by reflection.
\begin{theorem}\label{Bucci}
Let $\textbf{\emph{u}}$ be an infinite word such that the set of these factors is stable by reflection. Then, $\textbf{\emph{u}}$ is rich if and only if for all $n \in \mathbb{N}:$
$$p_\textbf{\emph{u}}^{al}(n)+p_\textbf{\emph{u}}^{al}(n+1)=p_{\textbf{\emph{u}}}(n+1)-p_\textbf{\emph{u}}(n)+2.$$
\end{theorem}
\begin{theorem}\label{riche}
Let $\textbf{\emph{u}}$ be an infinite word stable by reflection and $F$ an injective SCA. Then, $F(\textbf{\emph{u}})$ is rich if and only if $\textbf{\emph{u}}$ is.
\end{theorem}
\textbf{Proof:} As $F$ is injective and stable, we have respectively by Theorem \ref{cor-inj} and Corollary \ref{pal2}, for all $n\in \mathbb{N}^*$, $p_{F(\textbf{u})}(n)=p_u(n+r-1)$ and $p^{al}_{F(\textbf{u})}(n)=p^{al}_u(n+r-1)$.\\
In addition, $p_{F(\textbf{u})}(n+1)-p_{F(\textbf{u})}(n)=p_\textbf{u}(n+r)-p_\textbf{u}(n+r-1)$. Moreover, $\textbf{u}$ being recurrent and reflection stable then, by Theorem \ref{Bucci}, we have:
\begin{align*}
\textbf{u}\ \text{riche} \Longleftrightarrow p_\textbf{u}(n+r)-p_\textbf{u}(n+r-1)+2 &=p_\textbf{u}^{al}(n+r)+p_\textbf{u}^{al}(n+r-1)\\
&=p_{F(\textbf{u})}^{al}(n+1)+p_{F(\textbf{u})}^{al}(n),\ \text{because}\ F \ \text{is injective}.
\end{align*}
Consequently, $p_{F(\textbf{u})}(n+1)-p_{F(\textbf{u})}(n)+2=p_{F(\textbf{u})}^{al}(n+1)+p_{F(\textbf{u})}^{al}(n)$, i.e, $F(\textbf{u})$ is rich.
$\square$
\section{Cellular automata and sturmian words}
In this section, we apply the CA to Sturmian words.
\begin{definition} Let $F$ be a CA. Then, $F$ is said Sturmian if, for any sturmian word, the image by the map $F$ is also Sturmian.\\
Moreover if, $F(\textbf{u})=\textbf{u}$ for any $\textbf{u}$ then, $\textbf{u}$ is said fixed point of $F$.
\end{definition}
\begin{Exemple}
Let us consider $\textbf{\emph{u}}$ a Sturmian word over $\mathcal{A}_2$. Then, for the CA defined by:
$$ \left \{
\begin{array}{l}
\text{for all}\ x\in \mathcal{A}_{2}\ \text{and}\ |y|=r-1 \\
H(xy)=x\\
G(xy)=E(x),
\end{array}
\right.$$
we have $H(\textbf{\emph{u}})=\textbf{\emph{u}}$ and $G(\textbf{\emph{u}})=E(H(\textbf{\emph{u}}))=H(E(\textbf{\emph{u}}))=E(\textbf{\emph{u}})$ which are respectively fixed point and Sturmian.
\end{Exemple}
Note that $H$ and $G$ are SCA.
\subsection{Classical and window complexity}
Let $\textbf{v}$ be a Sturmian word in the form $\textbf{v}=a^{l_0}ba^{l+\epsilon_1}ba^{l+\epsilon_2}ba^{l+\epsilon_3}b \dotsb$ of Theorem \ref{stur-puis}.
Let us consider the SCA $F$ defined over $\mathcal{A}^{l+1}$ by:
$$F(w)= \left \{
\begin{array}{l}
\hspace{0cm}a \hspace{0.2cm} if \hspace{0.2cm} w=a^{l+1} \\
\hspace{0cm} b \hspace{0.2cm} otherwise.
\end{array}
\right. $$
Then, we get:
$$F(\textbf{v})=\left \{
\begin{array}{l}
ab^{l_0+1}a^{\epsilon_1}b^{l+1} a^{\epsilon_2}b^{l+1}a^{\epsilon_3}b^{l+1}a^{\epsilon_4}b^{l+1} \dotsb \hspace{0.2cm} if \hspace{0.2cm} l_o=l+1 \\
b^{l_0+1}a^{\epsilon_1}b^{l+1} a^{\epsilon_2}b^{l+1}a^{\epsilon_3}b^{l+1}a^{\epsilon_4}b^{l+1} \dotsb \hspace{0.5cm} otherwise.
\end{array}
\right. $$
In the following, we assume $n_0=k_0(l+1)$ where $k_0$ is the maximum power of $a^{l+\epsilon_i}b$ in $\textbf{v}$. We study some combinatorial properties then classical and window complexity of the word $F(\textbf{v})$ thus obtained.
\begin{Proposition}
For all $n\geq 0,\ p^f_{F(\textbf{\emph{v}})}(n)=p_{F(\textbf{\emph{v}})}(n).$
\end{Proposition}
\textbf{Proof:} Since $\textbf{v}$ is modulo-recurrent, then it is the same for $F(\textbf{v})$, by Theorem \ref{mod}. Hence, $p^f_{F(\textbf{v})}(n)=p_{F(\textbf{v})}(n)$ by Theorem \ref{stur-mod}.
$\square$
\begin{Lemme}\label{Lem1-auto-stur}
Let $v_1$ be a factor of $F(\textbf{\emph{v}})$ such that $|v_1|>n_0$. Then, $v_1$ comes from a unique factor of $\textbf{\emph{v}}$.
\end{Lemme}
\textbf{Proof:} Note that, any factor of $F(\textbf{v})$ containing the letter $a$ comes from only one factor of $\textbf{v}$. Indeed, $a$ has only one antecedent which is $a^{l+1}$. But, any factor of $F(\textbf{v})$ of length strictely greater than $n_0$ contains at least one occurrence of $a$. Hence, it comes necessary of only one antecedent in $\textbf{v}$.
$\square$
\begin{theorem}\label{cc}
The classical complexity function of the word $F(\textbf{\emph{v}})$ is given by: $$p_{F(\textbf{\emph{v}})}(n)= \left \{
\begin{array}{l}
\hspace{0cm}n+1 \hspace{2.5cm} if \ n\leq n_0-l \\
\hspace{0cm}2n-n_0+l+1 \hspace{0.7cm} if \ n_0-l< n\leq n_0 \\
\hspace{0cm} n+l+1 \hspace{1.9cm} if \ n>n_0.
\end{array}
\right. $$
\end{theorem}
\textbf{Proof:} Let us proceed by disjunction of cases according to the length $n$ of the factors.\\
\noindent\textbf{Case 1}: $1\leq n\leq n_0-l$. Then, $L_n(F(\textbf{v}))=\left\{b^n,\ b^iab^{n-i-1} \:\ i=0,\dots, n-1 \right\}.$ Therefore, $ p_{F(\textbf{v})}(n)=n+1$, for all $n\leq n_0-l$. Let us observe that for all $ n\leq n_0-l-1$, $b^n$ is the only right special factor of length $n$ of $F(\textbf{v})$. Similarly, the right special factor of $n_0-l$ in $F(\textbf{v})$ are $b^{n_0-l}$ and $ab^{n_0-l-1}$. Hence, we obtain the following equalities:
\begin{align*}
p_{F(\textbf{v})}(n_0-l+1)&=p_{F(\textbf{v})}(n_0-l)+2\\
&=(n_0-l+1)+2\\
&=n_0-l+3.
\end{align*}
\noindent\textbf{Case 2}: $n_0-l+1\leq n\leq n_0$. Then:
$$L_n(F(\textbf{v}))=\left\{b^n,\ b^iab^{n-i-1}, \ b^jab^{n_0-l-1}ab^{n-n_0+l-1-j} \:\ i=0,1,\dots, n-1; \ 0\leq j \leq n-n_0+l-1 \right\}.$$
As a result, we get:
\begin{align*}
p_{F(\textbf{v})}(n)&=1+n+(n-n_0+l-1+1)\\
&=2n-n_0+l+1.
\end{align*}
\noindent\textbf{Case 3}: $n> n_0$. Then, any factor of $F(\textbf{v})$ of length $n$ comes from only one factor of length $n+r-1$ in $\textbf{v}$, by Lemma \ref{Lem1-auto-stur}. By applying Theorem \ref{cor-inj}, we obtain the following equalities:
\begin{align*}
p_{F(\textbf{v})}(n)&=p_\textbf{v}(n+r-1)\\
&=n+r\\
&=n+l+1.
\end{align*}
$\square$
\begin{Remarque}
\begin{enumerate}
\item The complexity function $p_{F(\textbf{\emph{v}})}$ is continuous over $\mathbb{N}$.
\item The word $F(\textbf{\emph{v}})$ is a quasi-sturmian word.
\item The sets of return words for the letters $a$ and $b$ in $F(\textbf{\emph{v}})$ are respectively: $$\left\{ab^{n_0-l-1}, \ ab^{n_0} \right\}\ \text{and}\ \left\{b, \ ba \right\}.$$
\end{enumerate}
\end{Remarque}
\subsection{Palindromic properties}
In this subsection, we study the palindromic complexity function and the richness of $F(\textbf{v})$.
\begin{Lemme}\label{Lem2-auto-stur} Any palindromic factor of length greather than $n_0$ of $F(\textbf{\emph{v}})$ comes from a palindromic factor of $\textbf{\emph{v}}$.
\end{Lemme}
\textbf{Proof:} First, note that $F$ satisfies the conditions of Theorem \ref{Theo-stable}, i.e, $F(\textbf{v})$ is stable by reflection. Let $v_1$ be a palindromic factor of $F(\textbf{v})$ such that $|v_1|> n_0$. Then, by Lemma \ref{Lem1-auto-stur}, $v_1$ comes from only one factor $u_1$ of $\textbf{v}$. Thus, by Corollary \ref{cor-inj}, we deduce that $u_1$ is a palindrome.
$\square$
\begin{theorem}\label{cp}
The palindromic complexity function of the word $F(\textbf{\emph{v}})$ is given by:
\begin{enumerate}
\item If $n \leq n_0-l$,
$$p^{al}_{F(\textbf{\emph{v}})}(n)= \left \{
\begin{array}{l}
\hspace{0cm}1 \hspace{0.2cm} if \hspace{0.2cm} n \hspace{0.1cm} is \hspace{0.1cm} even \\
\hspace{0cm}2 \hspace{0.2cm} otherwise.
\end{array}
\right. $$
\item If $n_0-l< n \leq n_0$,
\begin{itemize}
\item for $n_0$ even, we have: $ p^{al}_{F(\textbf{\emph{v}})}(n)= \left \{
\begin{array}{l}
\hspace{0cm}1 \hspace{0.2cm} if \hspace{0.2cm} l \hspace{0.1cm} and \hspace{0.1cm} n \hspace{0.1cm} are \hspace{0.1cm} even \\
\hspace{0cm}2 \hspace{0.2cm} if \hspace{0.2cm} l \hspace{0.1cm} is \hspace{0.1cm} odd\\
\hspace{0cm}3 \hspace{0.2cm} otherwise,
\end{array}
\right. $
\item for $n_0$ odd, we have: $p^{al}_{F(\textbf{\emph{v}})}(n)=2.$
\end{itemize}
\item If $n> n_0$,
$$p^{al}_{F(\textbf{\emph{v}})}(n)= \left \{
\begin{array}{l}
\hspace{0cm}1 \hspace{0.2cm} if \hspace{0.2cm} n+l \hspace{0.1cm} is \hspace{0.1cm} even \\
\hspace{0cm}2 \hspace{0.2cm} otherwise.
\end{array}
\right. $$
\end{enumerate}
\end{theorem}
\textbf{Proof:}
\begin{enumerate}
\item If $ n \leq n_0-l$, then we have: $$L_n(F(\textbf{v}))=\left\{b^n,\ b^iab^{n-i-1} \:\ i=0,1,\dots, n-1 \right\}.$$
Hence, the word $b^iab^{n-i-1}$ is a palindromic factor of length $n$ if and only if $i=n-i-1$, i.e, $n=2i+1$. As a result, we obtain: $$\text{Pal}_n(F(\textbf{v}))= \left \{
\begin{array}{l}
\left\{b^n \right\}
\hspace{3cm}if\hspace{0.1cm} n \hspace{0.1cm}is \hspace{0.1cm} even\\
\left\{b^n; \hspace{0.1cm}b^{\frac{n-1}{2}} ab^{\frac{n-1}{2}} \right\} \hspace{0.8cm} otherwise.
\end{array}
\right. $$
\item If $n_0-l< n\leq n_0$ then:
$$L_n(F(\textbf{v}))=\left\{b^n,\ b^iab^{n-i-1}, \ b^jab^{n_0-l-1}ab^{n-n_0+l-1-j} \ :\ i=0,1,\dots, n-1; \ 0\leq j \leq n-n_0+l-1 \right\}.$$
Hence, the word $b^jab^{l+1}ab^{n-n_0+l-1-j}$ is a palindromic factor of length $n$ of $F(\textbf{v})$ if and only if $j=n-n_0+l-1-j$. It follows that, $n+l=2j+n_0+1$. Thus, let us now reason according to the parity of $n_0$ to ensure that the word $b^jab^{l+1}ab^{n-n_0+l-1-j}$ be a palindromic factor of $F(\textbf{v})$:
\begin{itemize}
\item for $n_0$ even, as $n+l=2j+n_0+1$, we deduce that $l$ and $n$ are different parities. As a conseqence, we obtain: $$\text{Pal}_n(F(\textbf{v}))= \left \{
\begin{array}{l}
\left\{b^n \right\} \hspace{7.8cm} if\hspace{0.1cm} l \hspace{0.1cm}and\hspace{0.1cm} n\hspace{0.1cm} are\hspace{0.1cm} even\\
\left\{b^n;\hspace{0.1cm} b^{\frac{n-1}{2}} ab^{\frac{n-1}{2}} \right\} \hspace{5.5cm}if\hspace{0.1cm}l \hspace{0.1cm} and\hspace{0.1cm} n\hspace{0.1cm} are\hspace{0.1cm} odd\\
\left\{b^n;\hspace{0.1cm} b^{\frac{n-n_0+l-1}{2}}ab^{n_0-l-1}ab^{\frac{n-n_0+l-1}{2}} \right\} \hspace{2.5cm}if\hspace{0.1cm}l \hspace{0.1cm}is \hspace{0.1cm}odd\hspace{0.1cm} and\hspace{0.1cm} n \hspace{0.1cm}is \hspace{0.1cm}even\\
\left\{b^n;\hspace{0.1cm} b^{\frac{n-1}{2}} ab^{\frac{n-1}{2}};\hspace{0.1cm} b^{\frac{n-n_0+l-1}{2}}ab^{n_0-l-1}ab^{\frac{n-n_0+l-1}{2}} \right\} \hspace{0.5cm}if\hspace{0.1cm}l \hspace{0.1cm}is \hspace{0.1cm}even\hspace{0.1cm} and\hspace{0.1cm} n \hspace{0.1cm}is \hspace{0.1cm}odd.
\end{array}
\right. $$
\item for $n_0$ odd, since $n+l=2j+n_0+1$ then the integers $n$ and $l$ are different parities. Thus: $$\text{Pal}_n(F(\textbf{v}))= \left \{
\begin{array}{l}
\left\{b^n;\hspace{0.1cm} b^{\frac{n-1}{2}} ab^{\frac{n-1}{2}} \right\} \hspace{5cm}if\hspace{0.1cm} n\hspace{0.1cm} is\hspace{0.1cm} odd\\
\left\{b^n;\hspace{0.1cm} b^{\frac{n-n_0+l-1}{2}}ab^{n_0-l-1}ab^{\frac{n-n_0+l-1}{2}} \right\} \hspace{2cm}if\hspace{0.1cm} n \hspace{0.1cm}is \hspace{0.1cm}even.
\end{array}
\right. $$
\end{itemize}
\item If $n\geq n_0$. Then firstly, it is known that any factor of length $n$ of $F(\textbf{v})$ comes from a factor of length $n+l$ of $\textbf{v}$. Secondly, by Lemma \ref{Lem2-auto-stur}, any palindromic factor of $F(\textbf{v})$ of length $n> n_0$ comes from only one palindromic factor of $\textbf{v}$. In addition, by applying the Theorem \ref{theo-stur}, we deduce that $F(\textbf{v})$ has a palindromic factor of length $n$ if $n+l$ is even and two otherwise.
\end{enumerate}
$\square$
\begin{Corollaire}
The word $F(\textbf{\emph{v}})$ is rich.
\end{Corollaire}
\textbf{Proof:} The proof follows from the Theorem \ref{riche}.
$\square$
\subsection{Abelian complexity function}
In this subsection, we determine the balance, Parikh vectors and abelian complexity function of $F(\textbf{v})$.
\begin{Proposition}$\cite{BKT-p}$\label{pab-binaire}
Let $\textbf{\emph{u}}$ be an infinite $\beta$-balanced word over $\left\{a,b\right\}$. Then, for all integers $n$, we have: $$\rho^{ab}_\textbf{\emph{u}}(n)\leq \beta+1.$$
\end{Proposition}
\begin{Lemme}\label{eq}
The word $F(\textbf{\emph{v}})$ is $2$-balanced.
\end{Lemme}
\textbf{Proof:} Note that for all factors $u_1$ and $u_2$ of $\textbf{v}$, we have:
$\hspace{2cm}|u_1|=|u_2|\hspace{0.2cm} \Longrightarrow \hspace{0.2cm} ||u_1|_{a^{l+1}}-|u_2|_{a^{l+1}}|\leq 2$ $\hspace{0.2cm}$ because $\textbf{v}$ is Sturmian.
Let $v_1, v_2\in L_n(F(\textbf{v}))$ then, there are two factors $u_1, u_2\in L_{n+l}(\textbf{v})$ such that $v_1=F(u_1)$ and $v_2=F(u_2)$. As $|u_1|=|u_2|$, we have $||u_1|_{a^{l+1}}|-|u_2|_{a^{l+1}}||\leq 2$. This implies that $||F(u_1)|_a-|F(u_2)|_a|\leq 2$, i.e, $||v_1|_a-|v_2|_a|\leq 2$. Moreover, $b^{n_0-l+1},\ ab^{n_0-l-1}a,\ b^{n_0}ab^{n_0-l+1}$ and $ab^{n_0}ab^{n_0-l-1}a$ are factors of $F(\textbf{v})$. Hence, $F(\textbf{v})$ is $2$-balanced.
$\square$
\begin{theorem}\label{ca}
The abelian complexity function of $F(\textbf{\emph{v}})$ is given for all $n\in \mathbb{N}^*$ by:
\begin{enumerate}
\item For $n \leq n_0-l$, $\rho^{ab}_{F(\textbf{\emph{v}})}(n)=2$.
\item For $n_0-l+1\leq n\leq n_0$, $\rho^{ab}_{F(\textbf{\emph{v}})}(n)=3$.
\item For $n> n_0 $, $\rho^{ab}_{F(\textbf{v})}(n)\in \left\{2,3 \right\}$.
\end{enumerate}
\end{theorem}
\textbf{Proof:} We distinguish the following cases according to the length $n$ of factors.
\noindent\textbf{Case 1}: $1\leq n \leq n_0-l$. Then, since
$L_n(F(\textbf{v}))=\left\{b^n,\ b^iab^{n-i-1} \:\ i=0,1,\dots, n-1 \right\}$, we obtain $$\chi_n(F(\textbf{v}))=\left\{(0,n), (1,n-1)\right\}.$$
Hence, $\rho^{ab}_{F(\textbf{v})}(n)= 2$.
\noindent\textbf{Case 2}: $n_0-l+1\leq n\leq n_0$. Then, we have:
$$L_n(F(\textbf{v}))=\{b^n,\ b^iab^{n-i-1},\ b^jab^{n_0-l-1}ab^{n-n_0+l-1-j} \:\ i=0,\dots, n-1; \ 0\leq j \leq n-n_0+l-1\}.$$
It follows that, $\chi_n(F(\textbf{v}))=\left\{(0,n),\ (1,n-1),\ (2,n-2)\right\}.$ Consequently, $\rho^{ab}_{F(\textbf{v})}(n)= 3$.
\noindent\textbf{Case 3}: Let us consider $n> n_0$. Then, by Theorem \ref{cc}, the classical complexity function of $F(\textbf{v})$ is unbound. Therefore, $F(\textbf{v})$ is non-ultimately periodic. Thus, $\rho^{ab}_{F(\textbf{v})}(n)\geq 2$. Moreover, by Lemma \ref{eq}, the word $F(\textbf{v})$ is $2$-balanced. In addition, $F(\textbf{v})$ being a binary word, we deduce by Proposition \ref{pab-binaire} that $\rho^{ab}_{F(\textbf{v})}(n)\leq 3$. Hence, $\rho^{ab}_{F(\textbf{v})}(n)\in \left\{2,\hspace{0.1cm} 3 \right\}$.
$\square$
\begin{Remarque}
The sequence $(\rho^{ab}_{F(\textbf{\emph{v}})}(n))_{n\in \mathbb{N}}$ is non-ultimately periodic.
\end{Remarque}
\end{document}
|
\beta egin{equation}gin{document}
\beta egin{equation}gin{abstract}
In this paper we study the long time behavior of a discrete approximation in time and space of the cubic nonlinear Schr\"odinger equation on the real line. More precisely, we
consider a symplectic time splitting integrator applied to a discrete nonlinear Schr\"odinger equation with additional Dirichlet
boundary conditions on a large interval.
We give conditions ensuring the existence of a numerical soliton which is close in energy norm to the continuous soliton. Such result is valid under a CFL condition of the form $\tau h^{-2} \ell eq C$ where $\tau$ and $h$ denote
the time and space step size respectively. Furthermore we prove that if the initial
datum is symmetric and close to the continuous soliton $ \text{e} ta$ then the associated numerical
solution remains close to the orbit of $ \text{e} ta$,
$ \Gamma amma=\cup_ \alpha lpha\{e^{i \alpha lpha} \text{e} ta\}$, for very long
times.
\text{e} nd{abstract}
\sigma ubjclass{ 37M15, 65P40, 37K40 }
\keywords{Discrete nonlinear Schr\"odinger equation, Numerical soliton, Stability, Backward error analysis, Modified Hamiltonian}
\thanks{
}
\maketitle
\sigma ection{Introduction}
We study numerical approximations of solitons of the focusing nonlinear Schr\"odinger equation (NLS) on the real line:
\beta egin{equation} \ell ambda bel{nls}
i\psi_t=-\psi_{xx}-|\psi|^2\psi, \quad x\in \mathbb{R} ,\ t\in \mathbb{R} .
\text{e} e
This equation is a Hamiltonian partial differential equation (PDE) associated with the Hamiltonian function
\beta egin{equation}gin{equation}
\ell ambda bel{hc}
H(\psi):=\int_ \mathbb{R} \ell eft[ \ell eft|\psi_x\right|^2-\frac{ \ell eft|\psi\right|^4}{2} \right]
\text{d} x,
\text{e} nd{equation}
and preserving the $L^2$ norm
\beta egin{equation}gin{equation}
\ell ambda bel{nc}
N(\psi):=\int_{ \mathbb{R} } \ell eft|\psi\right|^2 \text{d} x.
\text{e} nd{equation}
The goal of this paper is to understand the long time behavior of numerical
integration algorithms for initial data close to the solitary wave solution
$\psi(t,x)=e^{i \ell ambda t} \text{e} ta(x)$ where
\beta egin{equation}gin{equation}
\ell ambda bel{phis}
\text{e} ta(x):=\frac{1}{ \sigma qrt
2}{\rm sech} \ell eft( \frac{x}{2}\right),
\text{e} nd{equation}
and $ \ell ambda mbda \in \mathbb{R} $ is the Lagrange multiplier associated with the
minimization of $H$ under the constraint $N=1$. It is well known, see
for instance \cite{Weinstein85,Grill87,Grill90,Frohlich04} that this
solution is { \text{e} m orbitaly stable} in the sense that for a small
pertubation of the initial data, the exact solution remains close to
the orbit of $ \text{e} ta$ for all times. Here we will only consider { \text{e} m
symmetric} initial conditions satisfying $\psi(x) = \psi(-x)$, a
property that is preserved by the flow of \text{e} qref{nls}. In this
setting, the orbital stability of the continuous soliton can be
described as follows: Let
\beta egin{equation}gin{equation}
\ell ambda bel{eq:defgamma}
\Gamma amma := \beta igcup_{ \alpha lpha \in \mathbb{R} } \{Êe^{i \alpha lpha} \text{e} ta(x)\}Ê
\text{e} nd{equation}
and assume that $\psi(0,\cdot)$ is a symmetric function satisfying $\displaystylet(\psi(0,\cdot), \Gamma amma) \ell eq \delta$ for some $\delta$ sufficiently small, then for all times $t > 0$, if $\psi(t,\cdot)$ denotes the solution of \text{e} qref{nls}, we have
\beta egin{equation}gin{equation}
\ell ambda bel{eq:bite}
\forall\, t > 0, \quad
\displaystylet(\psi(t,\cdot), \Gamma amma) < C \delta,
\text{e} nd{equation}
where $C$ is a constant independent of $\delta$ and $t$, and where the
distance is measure in $H^1$ norm. The present paper deals with the
persistence of this result by fully discrete numerical methods. It is
an old problem that was pointed out in several papers in the last 30
years, see for instance \cite{DFP81,SZ86,Duran00,Borgna08}, and
the numerical approximation of \text{e} qref{phis} over long times has now
become a classical benchmark to test the performance and stability of
numerical schemes, see for instance \cite{Akrivis,Fei95,Besse} and the
references therein. However, as far as we know, no result of the
form \text{e} qref{eq:bite} has been proven in the literature for fully
discrete approximations of \text{e} qref{nls} (see
however \cite{BP10,Borgna08} for the space discretized case).
In particular, the effect of the time discretization yields many mathematical difficulties. Dur\'an \& Sanz-Serna gave in \cite{Duran00} some asymptotic expansion of the numerical solution close to a soliton, but the lack of a modified energy acting on $H^1$ and preserved over long time by the numerical scheme (the so called { \text{e} m backward error analysis}) was an obstruction to define a possibly stable numerical soliton. Here, we take advantage of a recent construction of such a modified energy given by Faou \& Gr\'ebert in \cite{FG11} to show the existence and stability of a modified soliton that is close to \text{e} qref{phis} in energy norm.
In this paper, the discretization of \text{e} qref{nls} we consider are made of three levels of approximations:
\beta egin{equation}gin{itemize}
\item\textbf{A space discretization}, where we use a grid with mesh
size $h > 0$ made of an infinite collection of equidistant points of
$ \mathbb{R} $. The equation \text{e} qref{nls} is then approximated by the discrete
nonlinear Schr\"odinger equation (DNLS) where the Laplace operator is
replaced by its finite difference approximation over three points.
\item \textbf{A Dirichlet cut-off}, where we replace the integrability condition at infinity of the derivative of $\psi$ by a Dirichlet boundary condition at the boundary of a large window of size $2Kh$ where $K >> 1$.
\item \textbf{A time discretization algorithm} to integrate the DNLS equation with Dirichlet boundary condition. This discretization introduces a last parameter $\tau$ which represents the time step. To do this we consider
a symplectic time splitting algorithm where the kinetic part and
potential part are solved alternatively as described for instance
in \cite{Weideman86}.
\text{e} nd{itemize}
Each of these three levels of discretization relies on discretization parameters. In this paper, we prove orbital stability in the sense of \text{e} qref{eq:bite} for the numerical solution, where the distance to $ \Gamma amma$ is estimated in terms of the three discretization parameters $h$, $K$ and $\tau$.
We first present some numerical experiments showing that the solitary
wave rapidly disappears if either the algorithm of integration is not symplectic, or if it is symplectic, but used with a too large CFL number $\tau h^{-2}$.
The proof is organized as follows: we first recall in Section 4 the main arguments of the proof of the orbital stability result in the continuous case, following in essence the presentation made in \cite{Frohlich04}. We then give in Section 5 an abstract result showing that if the energy space $H^1$ is well approximated by the space discretization, and if the numerical scheme preserves - or almost preserves - modified $L^2$ norm and energy functions that are close to the exact ones, we can obtain orbital stability results with precise bounds depending on the parameters. We then apply this formalism in Section 6 to our three levels of discretization.
As the proof of orbital stability result is based on the variational characterization of the solitary wave and thus heavily relies on the preservation of the energy and $L^2$ norm, long time bounds can be straightforwardly obtained for energy and $L^2$ norm preserving schemes such as the Dufour-Fortin-Payre scheme, see \cite{DFP81}. This follows directly from the analysis of the space discretized case (see also Remark \ref{BFG}).
The cornerstone of the analysis of splitting method is the construction of the modified energy. Recall that in the finite dimensional case, the existence of modified energy is guaranteed by Hamiltonian interpolation: see \cite{BG94,HLW,Reich99} but cannot be applied straightforwardly to Hamiltonian PDEs unless unreasonable a priori assumptions are made on the regularity of the numerical solution, which prevents a fair use of the bootstrap argument underlying the orbital stability methodology.
Here we take advantage of the recent backward error analysis result of \cite{FG11} to construct a modified energy { \text{e} m acting on $H^1$} for splitting methods applied to \text{e} qref{nls}. Actually we give a simplified proof of a simpler
version of the result presented in \cite{FG11} or \cite{F11}, which has some interest in itself.
Using this result, we then prove an orbital stability result for fully discrete splitting method applied to \text{e} qref{nls} with a CFL restriction, and over very long times of the form $n\tau \sigma im \tau^{-M}$, where $M \geq 0$ is an integer number depending on the CFL.
\sigma ection{Three discretization levels and main results} \ell ambda bel{theo}
We now describe more precisely the three levels of approximation of \text{e} qref{nls} mentioned in the introduction. At each step, we state the orbital result that we obtain.
\sigma ubsection{Space discretization}Ê
Having fixed a positive parameter $h$ we discretize space by
substituting the sequence
$\psi_{ \ell } \sigma imeq\psi(h \ell )$, $ \ell \in \mathbb{Z} $ for the function $\psi(x)$, and the second order operator of finite difference
$\Delta_h$ defined by
\beta egin{equation}gin{equation}
\ell ambda bel{deltamu}
(\Delta_h \psi)_{ \ell }:=\frac{\psi_{ \ell +1}+\psi_{ \ell -1}-2\psi_{ \ell }}{h^2},
\text{e} nd{equation}
for the Laplace operator $-\partial_{xx}$.
The NLS is thus reduced to the discrete nonlinear Schr\"odinger
equation (DNLS):
\beta egin{equation}gin{equation}
\ell ambda bel{dnls} i\dot
\psi_ \ell =-\frac{1}{h^2}(\psi_{ \ell +1}+\psi_{ \ell -1}-2\psi_ \ell )-
|\psi_ \ell |^2\psi_ \ell , \quad \text{e} ll \in \mathbb{Z} \ .
\text{e} nd{equation}
where $t \mapsto \psi(t) = (\psi_ \text{e} ll(t))_{ \text{e} ll \in \mathbb{Z} }$ is an application from $ \mathbb{R} $ to $ \mathbb{C} ^ \mathbb{Z} $. With this equation is associated a Hamiltonian function and a discrete $L^2$ norm given by
\beta egin{equation}gin{equation}
\ell ambda bel{eq:discrham}
H_h(\psi)=h \sigma um_{j\in \mathbb{Z} } \ell eft[ \ell eft|\frac{\psi_j
-\psi_{j-1}}{h}\right|^2
-\frac{|\psi_j|^4} {2}\right]
\quad \mbox{and}\quad
N_h(\psi)=h \sigma um_{j\in \mathbb{Z} } |\psi_j|^2.
\text{e} nd{equation}
The discrete space of functions is
$$
V_h = \{ \psi_j \in \mathbb{C} ^{ \mathbb{Z} }\, | \, \psi_j = \psi_{-j}\}
$$
equipped with the discrete norm
$$
\mathbb{N} orm{\psi}{h} = 2h \sigma um_{j\in \mathbb{Z} } \frac{|\psi_{j+1}-\psi_j |^2}{h^2}+h \sigma um_{j\in \mathbb{Z} } |\psi_j|^2.
$$
Following \cite{BP10}, we identify $V_h$ with a finite element subspace of
$H^1( \mathbb{R} ; \mathbb{C} )$. More precisely, defining the function $s: \mathbb{R} \to \mathbb{R} $ by
\beta egin{equation}gin{equation}
\ell ambda bel{fe.1}
s(x)=
\beta egin{equation}gin{cases}
0\qquad\qquad &{\rm if}\quad |x|>1,\\
x+1\quad &{\rm if}\quad -1 \ell eq x \ell eq 0,\\
- x+1\quad &{\rm if}\quad 0 \ell eq x \ell eq 1,
\text{e} nd{cases}
\text{e} nd{equation}
the identification is done through the map $i_h: V_h \to H^1( \mathbb{R} ; \mathbb{C} )$ defined by
\beta egin{equation}gin{equation}
\ell ambda bel{eq:ih}
\ell eft\{\psi_j\right\}_{j\in \mathbb{Z} }\mapsto (i_h\psi)(x) := \sigma um_{j\in \mathbb{Z} } \psi_j \ell eft(\frac{x}{h}-j\right) \ .
\text{e} nd{equation}
Recall that $ \Gamma amma$ is the curve of minima of the continuous Hamiltonian and is given by \text{e} qref{eq:defgamma}.
With these notations, we have the following result
\beta egin{equation}gin{theorem}
\ell ambda bel{th:dnls}
There exist $\delta_0$ and $h_0$ such that for all $\delta < \delta_0$ and $h \ell eq h_0$, if $(\psi^0)_{j \in \mathbb{Z} } \in V_h$ is such that
$$
\displaystylet(i_h \psi^0, \Gamma amma) \ell eq \delta,
$$
where the distance is measured in the continuous $H^1( \mathbb{R} ; \mathbb{C} )$ norm, then the solution $(\psi_j(t))_{j \in \mathbb{Z} }$ of \text{e} qref{dnls} satisfies
$$
\forall\, t \geq 0, \quad
\displaystylet(i_h \psi(t), \Gamma amma) \ell eq C (\delta + h)
$$
for some constant $C$ independent of $h$ and $\delta$.
\text{e} nd{theorem}
Notice that the DNLS flow is not defined globally everywhere, i.e. for all initial data in $V_h$ and all times $t$. However since a solution of DNLS issued from an initial datum close to $ \Gamma amma$ remains unconditionally close to $ \Gamma amma$, such solution is automatically global.
\sigma ubsection{Dirichlet cut-off}
In order to come down to a finite dimensional system we fix a large number
$K \geq 1$, substitute the sequence $-K,...,K$ for the set $ \mathbb{Z} $ in \text{e} qref{dnls},
and add Dirichlet boundary conditions $\psi_{-K-1}=\psi_{K+1}=0$. The equation we consider is thus the (large) ordinary differential system
\beta egin{equation}
\ell ambda bel{dnlsdir}
\ell eft\{ \beta egin{equation}gin{array}{rcl}
i\dot \psi_ \ell &=&-\displaystyleplaystyle\frac{1}{\mu^2}(\psi_{ \ell +1}+\psi_{ \ell -1}-2\psi_ \ell )- |\psi_ \ell |^2\psi_ \ell ,\quad -K \ell eq \ell \ell eq K\\[2ex]
\psi_{\pm (K+1)} &=& 0.
\text{e} nd{array}\right.
\text{e} e
Note that here, we use the convention that $\psi_ \text{e} ll = 0$ for all
$| \text{e} ll| \geq K+1$, so that the previous system is indeed a closed set of
differential equations. The corresponding discrete function space is
\beta egin{equation}gin{equation}
\ell ambda bel{eq:VhK}
V_{h,K} := \{ (\psi_j)_{j \in \mathbb{Z} } \in V_h \, | \, \psi_j =
0\quad \mbox{for} \quad |j| \geq K+1\},
\text{e} nd{equation}
on which we can define the Hamiltonian function and discrete $L^2$ norm $H_{h,K} := H_h|_{V_{h,K}}$ and $N_{h,K}:= N_{h}|_{V_{h,K}}$ as restrictions of the functions \text{e} qref{eq:discrham} to $V_{h,K} \sigma ubset V_h$. Similarly, we define $i_{h,K} = i_h|_{V_{h,K}}$. In the following, we often use the notation $(\psi_j)_{j =-K}^K$ to denote an element of $V_{h,K}$ with the implicit extension by $0$ for $|j| \geq K+1$ to define an element of \text{e} qref{eq:VhK}.
With these notations, we have the following result:
\beta egin{equation}gin{theorem}
\ell ambda bel{th:dnlsdir}
There exist constants $C_1$, $C_2$, $\delta_0$ and $\varepsilonilon_0$ such that for all $\delta < \delta_0$ and all $h$ and $K$ such that $ h + \frac{1}{h^2} e^{-C_1 Kh} \ell eq \varepsilonilon_0$, if $(\psi^0_j)_{j= -K}^K \in V_{h,K}$ is such that
$$
\displaystylet(i_{h,K} \psi^0, \Gamma amma) \ell eq \delta,
$$
then the solution $(\psi_j(t))_{j = -K}^K$ of \text{e} qref{dnlsdir} satisfies
$$
\forall\, t \geq 0, \quad
\displaystylet(i_{h,K} \psi(t), \Gamma amma) \ell eq C_2 \Big(\delta + h + \frac{1}{h^2} e^{-C_1 Kh}\Big).
$$
\text{e} nd{theorem}
\beta egin{equation}gin{remark}
The exponentially small term in the previous estimate represents the effect of the Dirichlet cut-off. As we will see below, it directly comes from the fact that the function $ \text{e} ta$ is exponentially decreasing at infinity.
\text{e} nd{remark}
\sigma ubsection{Time discretization}
In this work the time discretization of \text{e} qref{nls} that we consider is a splitting scheme: we construct $\psi^{n}$ the approximation of the solution $\psi(t)$ of \text{e} qref{nls} at time $n\tau$ iteratively by the formula
$$
\psi^{n+1} = \mathcal{P} hi_A^\tau \circ \mathcal{P} hi_P^\tau(\psi^{n} ),
$$
where the flow $ \mathcal{P} hi_P^\tau$ is by definition the exact solution of
$$
i \dot \psi_ \text{e} ll = - |\psi_ \text{e} ll|^2 \psi_ \text{e} ll, \quad \text{e} ll = -K, \ell dots,K \, ,
$$
in $V_{h,K}$
which is given explicitly by formula $ \mathcal{P} hi_P^\tau( \psi)_ \text{e} ll = \text{e} xp( i \tau |\psi_ \text{e} ll|^2) \psi_ \text{e} ll$. The flow $ \mathcal{P} hi_A^\tau$, is by definition the solution of
\beta egin{equation}gin{equation}
\ell ambda bel{eq:phiA}
i \dot \psi_ \text{e} ll = -\frac{1}{h^2}(\psi_{ \ell +1}+\psi_{ \ell -1}-2\psi_ \ell ), \quad \text{e} ll = -K, \ell dots,KÊ\, ,
\text{e} nd{equation}
with the convention $\psi_{ \text{e} ll} = 0$ for $| \text{e} ll | \geq K+1$. The
implementation of this numerical scheme requires the computation of an
exponential of a tridiagonal matrix at each step. It could also be
done in discrete Fourier space in which the operator on right-hand side is diagonal. The
main advantage of this splitting method is that it is an explicit and
symplectic scheme.
Our main result is the following
\beta egin{equation}gin{theorem}
\ell ambda bel{th:splitting}
There exist constants $C_1$, $C_2$, $\delta_0$ and $\varepsilonilon_0$ such that for all $\delta < \delta_0$ and all $h$, $\tau$ and $K$ such that $ h + \frac{1}{h^2} e^{-C_1 Kh} \ell eq \varepsilonilon_0$ and the following CFL condition is satisfied
\beta egin{equation}gin{equation}
\ell ambda bel{eq:CFL}
(2 M + 3) \frac{\tau}{h^2} < \frac{2\pi}{3},
\text{e} nd{equation}
then
if $(\psi^0_j)_{j= -K}^K \in V_{h,K}$ is such that
$$
\displaystylet(i_{h,K} \psi^0, \Gamma amma) \ell eq \delta,
$$
we have
\beta egin{equation}gin{equation}
\ell ambda bel{eq:estSPLIT}
\forall\, n\tau \ell eq \tau^{-M}, \quad
\displaystylet(i_{h,K} ( \mathcal{P} hi^\tau_A \circ \mathcal{P} hi_P^\tau)^n \psi^0, \Gamma amma) \ell eq C_2 \Big(\delta + h + \frac{\tau}{h} + \frac{1}{h^2} e^{-C_1 Kh}\Big).
\text{e} nd{equation}
\text{e} nd{theorem}
\beta egin{equation}gin{remark}
In the last estimate \text{e} qref{eq:estSPLIT}, the term $\tau/h$ represents the error induced by the modified energy constructed with the method of \cite{FG11} (see Section 7 below). Note that under the condition \text{e} qref{eq:CFL}, this term is actually of order $\mathcal{O}(h)$.
\text{e} nd{remark}
\beta egin{equation}gin{remark} \ell ambda bel{BFG}
An alternative time approximation of \text{e} qref{dnlsdir} is the modified Crank-Nicolson scheme given by Delfour-Fortin-Payre see \cite{DFP81,SZ84} defined as the application $\psi^n \mapsto \psi^{n+1}$ such that
$$
\psi^{n+1}_ \text{e} ll = \psi^{n}_ \text{e} ll + \frac{i\tau}{2}(\Delta_h (\psi^{n+1} + \psi^{n}))_ \text{e} ll + \frac{i \tau}{4} (|\psi^{n+1}_ \text{e} ll|^2 + |\psi^{n+1}_ \text{e} ll|^2) (\psi^{n+1}_ \text{e} ll + \psi^{n}_ \text{e} ll),
$$
for $ \text{e} ll = -K, \ell dots,K$.
It can be shown using a fixed point argument that for $\tau$ sufficiently small, $\psi^{n+1}$ is well defined, and that this scheme preserves exactly the discrete $L^2$ norm and discrete energy \text{e} qref{eq:discrham}. Using this property, it can easily be shown that the conclusions of Theorem \ref{th:dnlsdir} extends straightforwardly to this specific fully discrete case. Notice that this method has the disadvantage to be strongly implicit.
\text{e} nd{remark}
\sigma ection{Numerical experiments} \ell ambda bel{num}
In this section, we would like to illustrate the results given in
Theorem \ref{th:splitting}, and prove that if the CFL
condition \text{e} qref{eq:CFL} is not satisfied, the stability
estimate \text{e} qref{eq:estSPLIT} is no longer true. In contrast, we
show that if the CFL number is small enough, a numerical stability
can be indeed observed. On the other hand, we show that for non
symplectic integrators, even used with a very small CFL number,
numerical instabilities appear.
In a first example, we take $h = 0.1875$, $K = 80$ (so that $Kh = 15$), $\tau = 0.2$ and the initial condition \text{e} qref{phis}. The CFL number is equal to 5.7. We consider the integrator $ \mathcal{P} hi_A^\tau \circ \mathcal{P} hi_P^\tau$ defined above. As mentioned in the previous section, the flow of $ \mathcal{P} hi_P\tau$ can be calculated explicitely, while the computation of $ \mathcal{P} hi_A$ - see \text{e} qref{eq:phiA} - is performed using the {\tt expm} MATLAB procedure.
In Figure \ref{fig4}, we plot the absolute value of the fully discrete numerical solution $\psi^{n} = ( \mathcal{P} hi_A^\tau \circ \mathcal{P} hi_P^\tau)^n (\psi^0)$. We can observe that the shape of the soliton is destroyed between the times $t = 100$ and $200$.
\beta egin{equation}gin{figure}[ht]
\beta egin{equation}gin{center}
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL57_70.pdf}}}
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL57_130.pdf}}}
\\
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL57_150.pdf}}}
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL57_200.pdf}}}
\text{e} nd{center}
\caption{Instability for $\tau/h^2 = 5.7$}
\ell ambda bel{fig4}
\text{e} nd{figure}
In a second example, we take the same initial data and parameters $K = 80$ and $h = 0.1875$, except that we take a much smaller $\tau = 0.001$ making the CFL number equal to 0.028. However, we break artificially the symplecticity of the integrator by replacing the exact evaluation of the exponential in the flow $ \mathcal{P} hi_A^\tau$ by its Taylor approximation of order 2:
$$
\text{e} xp(\tau A) \sigma imeq I + \tau A + \frac{\tau^2}{2} A^2.
$$
\beta egin{equation}gin{figure}[ht]
\beta egin{equation}gin{center}
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL0028_440.pdf}}}
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL0028_500.pdf}}} \\
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL0028_560.pdf}}}
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL0028_620.pdf}}}
\text{e} nd{center}
\caption{Instability of non symplectic integrators}
\ell ambda bel{fig10}
\text{e} nd{figure}
As before, we observe in Figure \ref{fig10} some instability phenomenon after some time, despite the fact that the CFL number is very small. Such an instability is due to the non symplectic nature of the integrator, which prevents the existence of a modified energy preserved by the numerical scheme.
Finally, we consider the same initial condition and numbers $K$ and $h$, but we take $\tau = 0.02$ making the CFL number be equal to 0.57 and we compute the exponential exactly making the scheme symplectic.
\beta egin{equation}gin{figure}[ht]
\beta egin{equation}gin{center}
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL057_1e3.pdf}}}
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL057_1e4.pdf}}} \\
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL057_1e5.pdf}}}
\rotatebox{0}{\resizebox{!}{0.33 \ell inewidth}{
\includegraphics{CFL057_1e6.pdf}}}
\text{e} nd{center}
\caption{Long time stability for $\tau/h^2 = 0.57$}
\ell ambda bel{fig6}
\text{e} nd{figure}
In Figure \text{e} qref{fig6} we can observe that the soliton is preserved for a very long time, up to $t = 10^6$ which corresponds to $2.10^8$ iterations. This result illustrates our Theorem \ref{th:splitting}.
\sigma ection{The continuous case}
Before giving the proofs of the Theorems presented above, we recall
here the main lines of the proof of the orbital stability result in
the continuous and symmetric case obtained first by \cite{Weinstein85}
(see also \cite{Grill87,Grill90, Frohlich04}). The proofs of the
discrete results will be essentially variations on the same theme.
The method is based on the variational characterization of the soliton
$ \text{e} ta$ as the unique real symmetric minimizer of the problem
\beta egin{equation}gin{equation}
\ell ambda bel{min}
\min_{N(\psi) = 1} H(\psi)
\text{e} nd{equation}
where $H$ is the Hamiltonian \text{e} qref{hc} and $N$ the norm \text{e} qref{nc}.
\beta egin{equation}gin{remark}
\ell ambda bel{lag}
By the method of Lagrange multipliers there exists $ \ell ambda mbda>0$ such that
$$
-\partial_{xx} \text{e} ta- \text{e} ta^3=- \ell ambda mbda \text{e} ta.
$$
\text{e} nd{remark}
\beta egin{equation}gin{remark}
We only consider the case where $N( \text{e} ta) = 1$ in order to avoid the introduction of a supplementary parameter. It is clear to the reader that we could also consider the numerical approximation of any given soliton, provided that its $L^2$ norm enters into all the constants appearing in the estimates below.
\text{e} nd{remark}
In the following, we set
$$
V = \{\; \psi \in H^1( \mathbb{R} ; \mathbb{C} ) \quad | \quad \psi(-x) = \psi(x)\; \}.
$$
We also define the real scalar product
$$
\ell ambda ngle \varphi, \psi \rangle = \mathrm{Re} \int_{ \mathbb{R} } \varphi(x) \overline erline{\psi(x)} \text{d} x.
$$
This scalar product allows to identify $H^1( \mathbb{R} ; \mathbb{C} )$ with the product $H^1( \mathbb{R} ; \mathbb{R} ) \times H^1( \mathbb{R} ; \mathbb{R} )$ as follows:
If $\psi = \frac{1}{ \sigma qrt{2}}(q + ip)$ and $\varphi = \frac{1}{ \sigma qrt{2}}(q' + ip')$ where $p$, $q$, $p'$ and $q'$ are real symmetric $H^1( \mathbb{R} ; \mathbb{R} )$ functions, then we have
$$
\ell ambda ngle \varphi, \psi \rangle = \frac12 \int_{ \mathbb{R} } q(x)q'(x) + p(x)p'(x) \text{d} x.
$$
The real scalar product on $H^1( \mathbb{R} ; \mathbb{C} ) \sigma imeq H^1( \mathbb{R} ; \mathbb{R} ) \times H^1( \mathbb{R} ; \mathbb{R} )$ is then given by
$$
(\varphi, \psi) = \ell ambda ngle \varphi, \psi \rangle + \ell ambda ngle \partial_x \varphi, \partial_x \psi \rangle,
$$
and we set
$$
\mathbb{N} orm{\varphi}{H^1}^2 := (\varphi,\varphi) = \frac12 \int_{ \mathbb{R} } |\partial_x p|^2 + |\partial_x q|^2 + |p|^2 + |q|^2 \text{d} x
$$
for $\varphi = \frac{q + ip}{ \sigma qrt{2}}$. In the rest of this paper, we often amalgamate the two complex and real notations.
In the following, we set
\beta egin{equation}gin{equation}
\ell ambda bel{eq:UR}
\mathcal{U} c(R) = \{ \varphi \in V\, | \, \displaystylet( \psi, \Gamma amma) < R\},
\text{e} nd{equation}
where $ \Gamma amma$ is defined in \text{e} qref{eq:defgamma}, and the distance is measured in $H^1$ norm.
Note that the Hamiltonian function $H$ and the norm $N$ are smooth in $H^1$ (using the fact that $H^1$ is an algebra). Moreover, these functions are gauge invariant, in the sense that for all $\varphi \in H^1$ and all $ \alpha lpha\in \mathbb{R} $, we have $H(e^{i \alpha lpha}\varphi) = H(\varphi)$ and $N(e^{i \alpha lpha}\varphi) = N(\varphi)$.
Due to this invariance, it is
immediate to realize that the whole manifold $ \Gamma amma$ is formed by
minima of the minimization problem \text{e} qref{min}. Then it is well known \cite{Weinstein85,Grill87,Grill90,Frohlich04} that these minima are
nondegenenerate in the directions transversal to the orbit $ \Gamma amma$ defined in \text{e} qref{eq:defgamma}, for symmetric functions.
More precisely, following \cite{Frohlich04}, we define the following
set of coordinates in the vicinity of $ \Gamma amma$: set
\beta egin{equation}gin{equation}
\ell ambda bel{eq:W}
W = \{Êu \in VÊ\, |Ê\, \ell ambda ngle u , \text{e} ta \rangle = \ell ambda ngle u ,i \text{e} ta \rangle = 0\},
\text{e} nd{equation}
equipped with the $H^1$ norm induced by the space $V$. As $i \text{e} ta$ is
tangent to the curve $ \Gamma amma$ and orthogonal\footnote{Recall that here
$ \ell ambda ngle\, \cdot \, , \, \cdot \, \rangle$ is a real scalar product.}
to $ \text{e} ta$, the previous $W$ can be interpreted as the space orthogonal
to the plane containing the planar curve $ \Gamma amma$. Note that $W$ is invariant under the multiplication by complex
number: for any $z \in \mathbb{C} $, if $u \in W$ then $zu \in W$.
We define the map $\chi$ as follows:
\beta egin{equation}gin{equation}
\ell ambda bel{eq:chi}
\mathbb{T} \times \mathbb{R} \times W \ni ( \alpha lpha,r,u) \mapsto \chi( \alpha lpha,r,u) = e^{i \alpha lpha}( (1+r) \text{e} ta + u) \in V,
\text{e} nd{equation}
where $ \mathbb{T} = \mathbb{R} \sigma lash (2\pi \mathbb{Z} )$ is the one-dimensional torus.
The following Lemma can be found in \cite[Section 5, Proposition 1]{Frohlich04}. In our symmetric situation, we give here an independent proof that will later be easily transfered to the situation of discrete systems:
\beta egin{equation}gin{lemma}
\ell ambda bel{lem:1}
There exist constants $r_0$ and $R$ such that the application $\chi$ is smooth and bounded with bounded derivatives from $ \mathbb{T} \times [-r_0,r_0] \times B(R)$ to $V$, and such for all $\varphi \in \mathcal{U} c(R)$, there exists $( \alpha lpha,r,u) \in \mathbb{T} \times \mathbb{R} \times W$ such that $\varphi = \chi( \alpha lpha,r,u)$. Moreover, the application $\chi^{-1}$ is smooth with bounded derivatives on $ \mathcal{U} c(R)$, and there exists a constant $C$ such that for all $\psi \in \mathcal{U} c(R)$, we have
\beta egin{equation}gin{equation}
\ell ambda bel{eq:bdmerde}
\mathbb{N} orm{u(\psi)}{H^1} \ell eq C \displaystylet(\psi, \Gamma amma).
\text{e} nd{equation}
\text{e} nd{lemma}
\beta egin{equation}gin{proof}
The first part of this lemma is clear using the explicit formula for $\chi$. To prove the second one, let us consider the projection of $\psi$ onto the plane generated by $( \text{e} ta,i \text{e} ta)$:
$$
\ell ambda ngle \psi, \text{e} ta \rangle \text{e} ta + \ell ambda ngle \psi, i \text{e} ta \rangle i \text{e} ta =: z(\psi) \text{e} ta
$$
with $z(\psi) = \ell ambda ngle \psi, \text{e} ta \rangle + i \ell ambda ngle \psi, i \text{e} ta \rangle=\int \psi \beta ar \text{e} ta \in \mathbb{C} $. Note that the application $\psi \mapsto z(\psi)$ is smooth with bounded derivatives from $V$ to $ \mathbb{C} $. Moreover, we have
$$
\displaystylet(\psi, \Gamma amma)^2 \geq \inf_{ \alpha lpha} N(\psi - e^{i \alpha lpha} \text{e} ta) \geq ||z(\psi)|^2 - 1|.
$$
Hence for $R \ell eq 1/2$ and for all $\psi\in \mathcal{U} c(R)$, we have $|z(\psi)| \in [1/2,3/2]$.
This shows that the applications
$$
\mathcal{U} c(R) \ni\psi \mapsto \mathbb{h} at \alpha lpha(\psi) = \alpha rg(z(\psi) ) \in \mathbb{T}
$$
and
$$
\mathcal{U} c(R) \ni\psi \mapsto \mathbb{h} at r(\psi) = |z(\psi)| - 1 \in [-1/2,1/2]
$$
are well defined and smooth with bounded derivatives on $ \mathcal{U} c(R)$ (as composition of smooth functions with bounded derivatives). Moreover, we have $\psi - z(\psi) \text{e} ta \in W$: as $W$ is invariant under the multiplication by complex numbers, the function
$$
\mathbb{h} at u(\psi) := e^{-i \mathbb{h} at \alpha lpha(\psi)} \psi - (1+ \mathbb{h} at r(\psi)) \text{e} ta = e^{-i \mathbb{h} at \alpha lpha(\psi)} (\psi - z(\psi) \text{e} ta)
$$
is in $W$, smooth for $\psi \in \mathcal{U} c(R)$, and satisfies $\psi = \chi( \mathbb{h} at \alpha lpha(\psi), \mathbb{h} at r(\psi), \mathbb{h} at u(\psi))$.
To prove \text{e} qref{eq:bdmerde} let $\psi^* \in \Gamma amma$ be the element of $ \Gamma amma$ realizing the minimum in the right-hand side (which exists by compactness of $ \Gamma amma$). As $\psi^* \in \Gamma amma$ we have $ \mathbb{h} at u(\psi^*) = 0$. As the fonction $\psi \mapsto \mathbb{h} at u(\psi)$ is uniformly Lipschitz in $ \mathcal{U} c(R)$, we have
$$
\mathbb{N} orm{ \mathbb{h} at u(\psi)}{H^1}Ê \ell eq C \mathbb{N} orm{\psi - \psi^*}{H^1} = C \displaystylet(\psi, \Gamma amma),
$$
which gives the result.
\text{e} nd{proof}
Let us now define the function $u \mapsto r(u)$ from $W$ to $ \mathbb{R} $ by the implicit relation
$$
N(\chi( \alpha lpha,r(u), u)) = 1.
$$
By explicit calculation, we have
\beta egin{equation}gin{equation}
\ell ambda bel{eq:rNu}
r(u) = -1 + \sigma qrt{1 - N(u)},
\text{e} nd{equation}
from which we deduce that $r(u)$ is well defined and smooth in a
neighborhood of $0$ in $H^1$, and moreover that $ \mathbb{N} orm{r(u)}{H^1}
= \mathcal{O}( \mathbb{N} orm{u}{H^1}^2)$ if $u$ is sufficiently small. Hence,
$( \alpha lpha,u) \mapsto \chi( \alpha lpha,r(u), u)$ is a local parametrization
of $ \mathcal{S} $ in a neighborhood of $ \Gamma amma \sigma ubset \mathcal{S} $, where
\beta egin{equation}gin{equation}
\ell ambda bel{s}
\mathcal{S} := \ell eft\{\psi\in V\, | \, N(\psi)=1 \right\}.
\text{e} nd{equation}
Now let us define the function
\beta egin{equation}gin{equation}
\ell ambda bel{eq:defHc}
\mathcal{H} (u) = H(\chi( \alpha lpha,r(u),u)),
\text{e} nd{equation}
which is well defined on $W$ by gauge invariance of $H$. Moreover, this function is smooth in a neighborhood of $0$.
Then it can be shown (see \cite{Frohlich04}) that $u = 0$ is a non degenerate minimum of $ \mathcal{H} (u)$: we have
$$
\text{d} \mathcal{H} (0) = 0, \quad \mbox{and}\quad \forall\, U \in W, \quad \text{d} ^2 \mathcal{H} (0)(U,U) \geq c \mathbb{N} orm{U}{H^1}^2.
$$
Note that as $ \mathcal{H} $ is smooth with locally bounded derivatives, the last coercivity estimate extends to a neighborhood of $0$ uniformly: there exist positive constants $c$ and $\rho$ such that
\beta egin{equation}gin{equation}
\ell ambda bel{eq:coerci}
\forall\, u \in B(\rho), \quad\forall\, U \in W, \quad \text{d} ^2 \mathcal{H} (u)(U,U) \geq c \mathbb{N} orm{U}{H^1}^2,
\text{e} nd{equation}
where $B(\rho)$ denotes the ball of radius $\rho$ in $W$. In other words, the function $ \mathcal{H} $ is strictly convex on $B(\rho)$ and has a strict minimum at $u = 0$.
With these results at hand, let $\psi \in \mathcal{S} $, and assume that $\displaystylet(\psi, \Gamma amma)$ is small enough so that we can write
$$
\psi = e^{i \alpha lpha}( (1 + r(u)) \text{e} ta + u),
$$
for some $( \alpha lpha,u) \in \mathbb{T} \times W$.
Then for some constant $C$ an sufficiently small $u$, we have
$$
\displaystylet(\psi, \Gamma amma) \ell eq \mathbb{N} orm{\psi - e^{i \alpha lpha} \text{e} ta}{H^1} \ell eq C ( r(u) + \mathbb{N} orm{u}{H^1}) \ell eq C \mathbb{N} orm{u}{H^1}.
$$
Now as $u = 0$ is a minimum of the strictly convex function $ \mathcal{H} $ on the ball $B(\rho)$, we can write
$$
H(\psi) - H( \text{e} ta) = \mathcal{H} (u) - \mathcal{H} (0) > \gamma mma \mathbb{N} orm{u}{H^1}^2 > c \displaystylet(\psi, \Gamma amma)^2
$$
for some constants $\gamma mma$ and $c > 0$ depending only on $\rho$. Then a Taylor expansion of $ \mathcal{H} $ around $u = 0$ shows that
$$
| \mathcal{H} (u) - \mathcal{H} (0) | \ell eq C \mathbb{N} orm{u}{H^1}^2,
$$
for some constant $C$ depending on $\rho$ and $H$ but not on $u \in B(\rho)$. Hence using \text{e} qref{eq:bdmerde} we obtain
the existence of constants $c$, $C$ and $R_0 > 0$ such that for all $\psi \in \mathcal{S} $ such that $\displaystylet(\psi, \Gamma amma) < R_0$, we have
$$
c\displaystylet(\psi, \Gamma amma)^2 \ell eq |ÊH(\psi) - H( \text{e} ta)| \ell eq C \displaystylet(\psi, \Gamma amma)^2.
$$
The stability result \text{e} qref{eq:bite} is then an easy consequence of
this relation: Assume that $\psi_0 \in \mathcal{S} $ satifies
$\displaystylet(\psi(0), \Gamma amma) \ell eq \delta < \delta_0$ where $\delta_0 < R_0$,
and let $\psi(t)$, $t >0$ be the solution of \text{e} qref{nls} starting at
$\psi(0) \text{e} quiv \psi_0$. Then by preservation of the energy $H$ and
norm $N$, we have $\psi(t) \in \mathcal{S} $ for all $t > 0$, and moreover as
long as $\psi(t)$ is such that $\displaystylet(\psi(t), \Gamma amma) < R_0$ we can
write
\beta egin{equation}gin{equation}
\ell ambda bel{eq:aslongas}
c\displaystylet(\psi(t), \Gamma amma)^2 \ell eq |ÊH(\psi(t)) - H( \text{e} ta)| = |ÊH(\psi(0)) - H( \text{e} ta)| \ell eq C \displaystylet(\psi(0), \Gamma amma)^2.
\text{e} nd{equation}
Hence if $\delta_0$ is small enough, this shows that for all $t$, $\displaystylet(\psi(t), \Gamma amma) < R_0$ and that \text{e} qref{eq:aslongas} is in fact valid for all times $t > 0$.
This implies \text{e} qref{eq:bite} in the case $N(\psi)=1$.
\sigma ection{An abstract result}
In this section, we prove an abstract result for the existence and stability of discrete solitons. We first give conditions ensuring that a discrete Hamiltonian acting on a discrete subspace of $H^1$ possesses a minimizing soliton. We then show how the existence of a discrete flow (almost) preserving the Hamiltonian and the $L^2$ norm ensures the numerical orbital stability over long times. In the next sections, we will apply this result to the three levels of discretization described above.
\sigma ubsection{Approximate problems}
We consider a set of parameter $\Sigma\in \mathbb{R} ^p$ and a function $\varepsilonilon:\Sigma \to \mathbb{R} ^+$. This function will measure the ``distance" between the discrete and continuous problems.
For all $ \mu \in \Sigma$, we consider a Hilbert space $V_ \mu$ equipped with a norm $ \mathbb{N} orm{\cdot}{\mu}$. For a given number $R$, we denote by $B_\mu(R)$ the ball of radius $R$ in $V_\mu$. Moreover, for a given $k \geq 0$ a function $F: V_\mu \to \mathbb{C} $ of class $\mathcal{C}^k$, and a given $\psi_\mu \in V_\mu$, we set for all $n = 0, \ell dots,k$
$$
\mathbb{N} orm{ \text{d} ^n F(\psi_\mu)}{\mu} = \sigma up_{U^1, \ell dots,U^n \in V_\mu \beta ackslash{\{0\}}} \frac{| \text{d} ^n F(\psi_\mu)(U^1, \ell dots,U^n)|}{ \mathbb{N} orm{U^1}{\mu} \ell dots \mathbb{N} orm{U^n}{\mu}}
$$
and we set
$$
\mathbb{N} orm{F}{\mathcal{C}^k(B_\mu(R))} = \sigma up_{n = 0, \ell dots,k}\, \sigma up_{\psi_\mu \in B_\mu(R)} \mathbb{N} orm{ \text{d} ^n F(\psi_\mu)}{\mu}.
$$
Moreover, we say that $F$ is { \text{e} m gauge invariant}Ê if it satisfies, for all $ \alpha lpha\in \mathbb{T} $ and all $\psi_\mu \in V_\mu$, $F(e^{i \alpha lpha} \psi_\mu) = F(\psi_\mu)$. Similarly, we say that $G: V_\mu\times V_\mu \to \mathbb{C} $ is gauge invariant if for all $\varphi_\mu$ and $\psi_\mu$ in $V_\mu$, and all $ \alpha lpha \in \mathbb{T} $, we have $G(e^{i \alpha lpha} \varphi_\mu, e^{i \alpha lpha} \psi_\mu) = G(\varphi_\mu,\psi_\mu)$.
We assume that the family $(V_\mu)_{\mu \in \Sigma}$ satisfies the following assumptions:
\beta egin{equation}gin{itemize}
\item[\textbf{(i)}] For all $\mu \in \Sigma$, there exist a linear embedding $i_ \mu : V_ \mu\to H^1$ and a projection $\pi_ \mu: H^1 \mapsto V_ \mu$ that are gauge invariant in the sense that for all $ \alpha lpha \in \mathbb{T} $ and $\psi_\mu \in V_ \mu$, $e^{i \alpha lpha} i_ \mu \psi_ \mu = i_ \mu e^{i \alpha lpha} \psi_ \mu$ and for all $\psi \in V$, $e^{i \alpha lpha} \pi_ \mu \psi = \pi_ \mu e^{i \alpha lpha} \psi$. Morever, we assume that $i_ \mu$ and $\pi_ \mu$ are real in the sense that $ \overline erline {i_\mu \psi_\mu} = i_\mu \overline erline \psi$ and $ \overline erline {\pi_\mu \psi_\mu} = \pi_\mu \overline erline \psi$, and that they satisfy the relation $\pi_\mu \circ i_\mu = \mathrm{id} \mathbb{h} space{-0,15cm}\mid_{V_\mu}$. Finally, we assume that there exists a constant $R_0 > 1$ such that for all $\mu \in \Sigma$, and $\varphi_\mu \in B_\mu(R_0)$,
$$
\beta ig| \mathbb{N} orm{\varphi_\mu}{\mu}^2 - \mathbb{N} orm{i_\mu \varphi_\mu}{H^1}^2 \beta ig| \ell eq \varepsilonilon(\mu) \mathbb{N} orm{i_\mu \varphi_\mu}{H^1}^2.
$$
\item[\textbf{(ii)}]
For all $\mu \in \Sigma$, there exists a gauge invariant real scalar product $ \ell ambda ngle \, \cdot\, , \, \cdot\, \rangle_\mu$ such that setting $N_\mu(\psi_\mu) = \ell ambda ngle \psi_\mu, \psi_\mu \rangle_\mu$, we have $N_\mu(\psi_\mu) \ell eq \mathbb{N} orm{\psi_\mu}{\mu}^2$ and
$$
\mathbb{N} orm{N \circ i_ \mu - N_ \mu}{\mathcal{C}^2(B_ \mu(R_0))} \ell eq \varepsilonilon(\mu).
$$
\item[\textbf{(iii)}]
For all $ \mu \in \Sigma$, there exists a gauge invariant function $H_ \mu: V_ \mu \to \mathbb{R} $ which is a modified Hamiltonian in the sense that
$$
\mathbb{N} orm{H \circ i_ \mu - H_ \mu}{\mathcal{C}^2(B_ \mu(R_0))} \ell eq \varepsilonilon(\mu).
$$
\item[\textbf{(iv)}]
If $ \text{e} ta$ is the continuous soliton \text{e} qref{phis} defined in the previous section, we have for all $\mu \in \Sigma$
\beta egin{equation}gin{equation}
\ell ambda bel{eq:approxeta}
\mathbb{N} orm{i_ \mu\pi_ \mu \text{e} ta - \text{e} ta}{H^1} \ell eq \varepsilonilon( \mu).
\text{e} nd{equation}
\text{e} nd{itemize}
Note that using \textbf{(i)}, there exist constants $c$, $C$ and
$\varepsilonilon_0$ such that for $\psi_\mu \in V_\mu$ and $\mu \in \Sigma$
such that $\varepsilonilon(\mu) < \varepsilonilon_0$, we have
\beta egin{equation}gin{equation}
\ell ambda bel{eq:normmu}
c \mathbb{N} orm{i_ \mu \psi_ \mu}{H^1} \ell eq \mathbb{N} orm{\psi_ \mu}{ \mu} \ell eq C \mathbb{N} orm{i_ \mu \psi_ \mu}{H^1}.
\text{e} nd{equation}
In the rest of this Section, we will assume that the hypothesis \textbf{(i)--(iv)} are satisfied.
\sigma ubsection{Local coordinate system}
We will assume here that all the $\mu \in \Sigma$ considered satisfy the relation $\varepsilonilon(\mu) < \varepsilonilon_0$ for some constant $\varepsilonilon_0$ to be precised along the text.
In echo to \text{e} qref{s} we define for all $\mu \in \Sigma$
$$
\mathcal{S} _\mu = \{ \psi_\mu \in V_\mu \, | \, N_\mu(\psi_\mu) = 1\},
$$
and the tangent space to $\pi_\mu \text{e} ta$ (compare \text{e} qref{eq:W}):
$$
W_\mu = \{Êu_\mu \in V_\mu \, | \, \ell ambda ngle u_\mu, \pi_\mu \text{e} ta \rangle_\mu = \ell ambda ngle u_\mu, i \pi_\mu \text{e} ta \rangle_\mu = 0 \}.
$$
Note that $i_\mu W_\mu$ is not included in $W$.
By a slight abuse of notation, we will write $u_\mu \in B_\mu(\gamma mma)$ the ball of radius $\gamma mma$ in $W_\mu$ (instead of $B_\mu(\gamma mma) \cap W_\mu$) for $\gamma mma > 0$. We also set for $R > 0$ (compare \text{e} qref{eq:UR})
\beta egin{equation}gin{equation}
\ell ambda bel{eq:URmu}
\{Ê\psi_\mu \in V_\mu\, | \, \displaystylet_\mu(\psi_\mu,\pi_\mu \Gamma amma) \ell eq \gamma mma\},
\text{e} nd{equation}
where $\displaystylet_\mu$ denotes the distance measured in the norm $ \mathbb{N} orm{\cdot}{\mu}$ and where
$$
\pi_\mu \Gamma amma := \beta igcup_{ \alpha lpha \in \mathbb{R} } \{ e^{i \alpha lpha} \pi_ \text{e} ta \text{e} ta\}.
$$
We then define the discrete application $\chi_\mu$ (see \text{e} qref{eq:chi}):
$$
\mathbb{T} \times \mathbb{R} \times W_\mu \ni ( \alpha lpha,r,u_\mu) \mapsto \chi_\mu( \alpha lpha,r,u_\mu) = e^{i \alpha lpha}( (1+r)\pi_\mu \text{e} ta + u_\mu) \in V_\mu.
$$
\beta egin{equation}gin{lemma}
\ell ambda bel{lem:2}
There exist constants $\varepsilonilon_0$, $r_0$, $C$ and $R$ such that for
all $\mu \in \Sigma$ with $\varepsilonilon(\mu) < \varepsilonilon_0$, the
application $\chi_\mu$ is smooth and bounded with uniformly bounded
derivatives (with respect to $\mu$) from $ \mathbb{T} \times [-r_0,r_0] \times
B_\mu(R)$ to $V$, and such for all $\varphi_\mu \in \mathcal{U} c_\mu(R)$, there
exists $( \alpha lpha,r,u_\mu) \in \mathbb{T} \times \mathbb{R} \times W$ such that
$\varphi_\mu = \chi_\mu( \alpha lpha,r,u_\mu)$. Moreover, the application
$\chi_\mu^{-1}$ is smooth with uniformly bounded derivatives on
$ \mathcal{U} c_\mu(R)$, and for all $\psi_\mu \in \mathcal{U} c_\mu(R)$, we have
\beta egin{equation}gin{equation}
\ell ambda bel{eq:bdmerde2}
\mathbb{N} orm{u_\mu(\psi_\mu)}{H^1} \ell eq C \displaystylet_\mu(\psi_\mu, \pi_\mu \Gamma amma).
\text{e} nd{equation}
\text{e} nd{lemma}
\beta egin{equation}gin{proof}
The proof is exactly the same as the one of Lemma \ref{lem:1} by replacing $ \ell ambda ngle\, \cdot\, , \, \cdot \, \rangle$ by $ \ell ambda ngle\, \cdot\, , \, \cdot \, \rangle_\mu$, $N$ by $N_\mu$ and $ \text{e} ta$ by $\pi_\mu \text{e} ta$. The fact that the constants are uniform in $\mu$ is a consequence of the direct construction made in the proof of this Lemma and of the hypothesis $\textbf{(i)-(iv)}$. Note that we use the fact that
\beta egin{equation}gin{equation}
\ell ambda bel{eq:Nmu1}
|N_ \mu(\pi_ \mu \text{e} ta) - 1 | \ell eq C \varepsilonilon(\mu),
\text{e} nd{equation}
for some constant $C$ independent on $\mu$, which is a consequence of
\textbf{(ii)} and \text{e} qref{eq:approxeta},
provided $\varepsilonilon(\mu) < \varepsilonilon_0$ is small enough to ensure that $ \mathbb{N} orm{\pi_\mu \text{e} ta}{\mu} < R_1$ (which is possible upon using \text{e} qref{eq:approxeta} and \text{e} qref{eq:normmu}).
\text{e} nd{proof}
Note that using the gauge invariance of $i_\mu$, we have for all $( \alpha lpha,r,u_\mu) \in \mathbb{T} \times \mathbb{R} \times W_\mu$
$$
i_\mu \chi_\mu( \alpha lpha,r,u_\mu) - \chi( \alpha lpha,r,i_\mu u_\mu) =
e^{i \alpha lpha}(1+r)( i_\mu \pi_\mu \text{e} ta - \text{e} ta)
$$
and hence for all $u_\mu \in W_\mu$, and $r \in \mathbb{R} $,
\beta egin{equation}gin{equation}
\ell ambda bel{eq:ichi}
\mathbb{N} orm{i_\mu \chi_\mu( \alpha lpha,r,u_\mu) - \chi( \alpha lpha,r,i_\mu u_\mu)}{H^1} \ell eq (1 + |r|) \varepsilonilon(\mu).
\text{e} nd{equation}
Following the formalism of the previous section, we define for all $\mu \in \Sigma$ the function $u_\mu \mapsto r_\mu(u_\mu)$ on $W_\mu$ by the implicit relation
$$
N_\mu(\chi_\mu( \alpha lpha,r_\mu(u_\mu),u_\mu)) = 1,
$$
so that $( \alpha lpha,u_\mu)$ is a local coordinate system close to a
rescaling of $\pi_\mu \Gamma amma$. Using the definition of $N_\mu$ and
$\chi_\mu$, we immediately obtain that
$$
r_\mu(u_ \mu) = -1 + \sigma qrt{1 - \frac{N_ \mu(u_ \mu)}{N_ \mu(\pi_ \mu \text{e} ta)}}.
$$
With this explicit expression, and using again \textbf{(ii)} and \text{e} qref{eq:ichi}
there exist constants $\rho_0$, $C$ and $\varepsilonilon_0$ such that for all $\mu \in \Sigma$ with $\varepsilonilon(\mu) < \varepsilonilon_0$, $r_\mu$ is $\mathcal{C}^2(B_\mu(\rho_0))$, and
\beta egin{equation} \ell ambda bel{estimr}
\mathbb{N} orm{r_\mu - r\circ i_\mu}{\mathcal{C}^2(B_\mu(\rho_0))} \ell eq C \varepsilonilon(\mu),
\text{e} e
where the function $r$ is defined in \text{e} qref{eq:rNu}.
Now defining (compare \text{e} qref{eq:defHc})
$$
\mathcal{H} _\mu(u_ \mu) := H_\mu(\chi_\mu( \alpha lpha, r_\mu(u_\mu), u_\mu)),
$$
the previous relations, together with \textbf{(iii)} and \text{e} qref{eq:ichi} imply that if $\rho_0$ is sufficiently small, $ \mathcal{H} _\mu$ is well defined on $B_\mu(\rho_0)$, and moreover
\beta egin{equation}gin{equation}
\ell ambda bel{eq:approxHcal}
\mathbb{N} orm{ \mathcal{H} \circ i_\mu - \mathcal{H} _\mu}{\mathcal{C}^2(B_ \mu(\rho_0))} \ell eq C \varepsilonilon(\mu).
\text{e} nd{equation}
for some constant $C$ independent of $\mu$, and for all $\mu \in \Sigma$ such that $\varepsilonilon(\mu) < \varepsilonilon_0$.
\sigma ubsection{Existence of a discrete soliton}
In the previous section, we have shown that the continuous function $ \mathcal{H} $ can be approximated by a function $ \mathcal{H} _\mu$ on balls of fixed radius $\rho_0$ in $V_\delta$. This is the key argument to prove the following result:
\beta egin{equation}gin{theorem}
Under the previous hypothesis, there exists $\varepsilonilon_0$ such that for all $ \mu \in \Sigma$ with $\varepsilonilon( \mu) \ell eq \varepsilonilon_0$, there exists a discrete soliton $ \text{e} ta_ \mu \in V_ \mu$ that realizes the minimum of
$H_ \mu$ under the constraint $N_ \mu(\psi_ \mu) = 1$,
and such that
\beta egin{equation} \ell ambda bel{5.7bis}
\mathbb{N} orm{ \text{e} ta_ \mu - \pi_ \mu \text{e} ta}{V_ \mu} \ell eq \varepsilonilon( \mu).
\text{e} e
Moreover, there exist constants $C$, $\delta_0$ and $\gamma mma_0$ such that for all $\mu \in \Sigma$ with $\varepsilonilon(\mu) < \varepsilonilon_0$, and all $\delta < \delta_0$,
\beta egin{equation}
\ell ambda bel{eq:ctrl}
\displaystylet( i_ \mu \psi_ \mu, \Gamma amma)^2 \ell eq C( |H_ \mu(\psi_ \mu) - H_ \mu( \text{e} ta_ \mu)| + \varepsilonilon(\mu) + \delta ),
\text{e} e
for all $\psi_\mu$ such that $\displaystylet( i_ \mu \psi_ \mu, \Gamma amma) \ell eq \gamma mma_0$ and $|N_\mu(\psi_\mu) - 1| \ell eq \delta$.
\text{e} nd{theorem}
\beta egin{equation}gin{proof}
Let us take $\varepsilonilon_0$ and $\rho_0$ as in the previous section. Recall that as $ \text{e} ta$ is a minimizer of the continuous Hamiltonian $H$, and by definition of $ \mathcal{H} $, we have $ \text{d} \mathcal{H} (0) = 0$.
Using \text{e} qref{eq:approxHcal}, we deduce that for all $\mu \in \Sigma$ such that $\varepsilonilon(\mu) < \varepsilonilon_0$,
\beta egin{equation}gin{equation}
\ell ambda bel{eq:dHc}
\mathbb{N} orm{ \text{d} \mathcal{H} _\mu(0)}{\mu} \ell eq C \varepsilonilon( \mu).
\text{e} nd{equation}
Moreover, for all $U \in W_\mu$, and $u_\mu \in B_\mu(\rho_0)$, we have using again \text{e} qref{eq:approxHcal}
$$
| \text{d} ^2 \mathcal{H} _\mu(u_\mu)(U,U) - \text{d} ^2 \mathcal{H} (i_\mu u_\mu)(i_\mu U,i_\mu U)| \ell eq C \varepsilonilon(\mu) \mathbb{N} orm{U}{\mu}^2 .
$$
Using \text{e} qref{eq:coerci} and \text{e} qref{eq:normmu}, this shows that $ \mathcal{H} _\mu$ is uniformly strictly convex in $B_\mu(\rho_0)$, i.e. satisfies
$$
\forall\, u_\mu \in B_\mu(\rho_0), \quad
\forall\, U \in W_\mu, \quad
\text{d} ^2 \mathcal{H} _\mu(u_\mu)(U,U) \geq c_0 \mathbb{N} orm{U}{\mu}^2 ,
$$
with a constant $c_0$ independent on $\mu$ such that $\varepsilonilon(\mu) < \varepsilonilon_0$ small enough.
As $ \mathcal{H} _\mu$ is strictly convex on the closed ball $ \overline erline{B_\mu}(\rho_0)$, $ \mathcal{H} _\mu$ reaches its minimum on $ \overline erline{B_\mu}(\rho_0)$ at some point $u_\mu^*\in \overline erline{B_\mu}(\rho_0)$ (see for instance \cite{Ciarlet}). We want to prove that the minimum is reached in the interior of the ball. So assume on the contrary
that $u_\mu^*$ is such that $ \mathbb{N} orm{u_\mu^*}{\mu} = \rho_0$, then we have
$$
\mathcal{H} _\mu(u_\mu^*) - \mathcal{H} _\mu(0) = \text{d} \mathcal{H} _\mu(0) \cdot u_\mu^* + h(u_\mu^*)
$$
with $h(u_\mu^*) > c_0 \mathbb{N} orm{u_\mu^*}{\mu}^2$. Hence, as $| \text{d} \mathcal{H} _\mu(0) \cdot u_\mu^*| \ell eq C \varepsilonilon(\mu) \mathbb{N} orm{u_\mu^*}{\mu}$ (see \text{e} qref{eq:dHc}) we get
$$
\mathcal{H} _\mu(u_\mu^*) - \mathcal{H} _\mu(0) > c_0 \rho_0^2 - C\varepsilonilon(\mu) \rho_0.
$$
This shows that for $\varepsilonilon_0$ sufficienly small, $ \mathcal{H} _\mu(u_\mu^*) > \mathcal{H} _\mu(0)$ which is a contradiction. Hence the $u_\mu^*$ is in the open ball $B_\mu(\rho_0)$ and thus
$$
\text{d} \mathcal{H} _\mu(u^*_\mu) = 0.
$$
Moreover, as $ \mathcal{H} _\mu$ is uniformly convex on the ball $B_\mu(\rho_0)$, we have
$$
\mathbb{N} orm{u^*}{\mu} \ell eq C \mathbb{N} orm{ \text{d} \mathcal{H} _\mu(u^*) - \text{d} \mathcal{H} _\mu(0) }{\mu} \ell eq C \varepsilonilon(\mu).
$$
for some constant $C$ independent on $\mu$.
Then setting
\beta egin{equation} \ell ambda bel{estimu}
\text{e} ta_ \mu := \chi_ \mu(0,r_ \mu(u_ \mu^*),u_ \mu^*)=(1+r_\mu(u_\mu^*))\pi_ \mu \text{e} ta+u_ \mu^*,
\text{e} e
we verify using \text{e} qref{estimr} and \text{e} qref{estimu} that we have $ \mathbb{N} orm{\pi_ \mu \text{e} ta - \text{e} ta_ \mu}{ \mu} \ell eq C\varepsilonilon( \mu)$ for some constant $C$ independent on $\mu$.
It remains to prove \text{e} qref{eq:ctrl}. Let $\psi_ \mu \in V_\mu$ and $ \alpha lpha \in \mathbb{T} $, we have
\beta egin{equation}gin{eqnarray*}
\mathbb{N} orm{i_\mu \psi_\mu - e^{i \alpha lpha} \text{e} ta}{H^1} & \ell eq& \mathbb{N} orm{i_\mu \psi_\mu - e^{i \alpha lpha} i_\mu \pi_\mu \text{e} ta }{H^1}Ê+ \mathbb{N} orm{i_\mu \pi_\mu \text{e} ta - \text{e} ta }{H^1}\\
& \ell eq& C \mathbb{N} orm{\psi_ \mu - e^{i \alpha lpha} \pi_\mu \text{e} ta }{\mu} + C\varepsilonilon( \mu),
\text{e} nd{eqnarray*}
where we used \text{e} qref{eq:normmu}. Hence we have for all $\psi_\mu$
\beta egin{equation}gin{equation}
\ell ambda bel{eq:ineq1}
\displaystylet( i_\mu \psi_\mu, \Gamma amma) \ell eq C\displaystylet_\mu( \psi_\mu, \pi_\mu \Gamma amma) + C \varepsilonilon(\mu)
\text{e} nd{equation}
for some constant independent of $\mu$.
Similarly we prove that
\beta egin{equation}gin{equation}
\ell ambda bel{eq:ineq2}
\displaystylet_\mu( \psi_\mu, \pi_\mu \Gamma amma) \ell eq C \displaystylet( i_\mu \psi_\mu, \Gamma amma) + C \varepsilonilon(\mu),
\text{e} nd{equation}
for some constant $C$ independent on $\mu$.
Now let $\psi_\mu$ be a function such that $\displaystylet( i_\mu \psi_\mu, \Gamma amma) < \gamma mma_0$, with $\gamma mma_0$ small enough.
Assume first that $N_\mu(\psi_\mu) = 1$. Using \text{e} qref{eq:ineq2}, $\psi_\mu$ belongs to a set $ \mathcal{U} c_\mu(\gamma mma)$ with a constant $\gamma mma$ depending on $\gamma mma_0$ and $\varepsilonilon_0$. If these parameters are sufficiently small, we can define an element $u_\mu$ of $B_\mu(\rho_0)$ and $ \alpha lpha\in \mathbb{T} $ such that $\psi_\mu = \chi_\mu( \alpha lpha,r_\mu(u_\mu), u_\mu)$ (recall that $N_\mu(\psi_\mu) = 1)$ with $u_\mu$ satisfying \text{e} qref{eq:bdmerde2}.
Hence we have
$$
|ÊH_\mu(\psi_\mu) - H_\mu( \text{e} ta_\mu) | = | \mathcal{H} (u_\mu) - \mathcal{H} (u_\mu^*)|,
$$
where $u_\mu^*$ is the minimizer of $ \mathcal{H} $, associated with the discrete soliton $ \text{e} ta_\mu$. This implies that
there exists a constant $C$ independent of $\mu$ such that
$$
\mathbb{N} orm{u_\mu - u_\mu^*}{\mu}^2 \ell eq C |ÊH_\mu(\psi_\mu) - H_\mu( \text{e} ta_\mu) | .
$$
Then using that $ \mathbb{N} orm{u_\mu^*}{\mu} \ell eq C \varepsilonilon(\mu)$, that $ \mathbb{N} orm{u_\mu}{\mu} = \displaystylet_\mu(\psi_\mu,\pi_\mu \Gamma amma)+O(\varepsilonilon(\mu))$, and the inequalities \text{e} qref{eq:ineq1} and \text{e} qref{eq:ineq2} we obtain \text{e} qref{eq:ctrl} in the case $N_\mu(\psi_\mu) = 1$. Now if $N_\mu(\psi_\mu) \neq 1$ but $|N(\psi_\mu) - 1 | \ell eq \delta$ with $\delta$ sufficiently small, there exists a point $v_\mu$ such that $ \mathbb{N} orm{v_\mu}{\mu} \ell eq \delta$ and $N(\psi_\mu -v_\mu) = 1$. We can then apply the previous estimate to $\psi_\mu - v_\mu$, and we use the uniform bounds on the derivative $H_\mu$ to conclude. The approximation $\psi_\mu \sigma im v_\mu$ gives rise to the terms $C \delta$ in \text{e} qref{eq:ctrl}.
\text{e} nd{proof}
\sigma ubsection{Discrete orbital stability}
In the previous paragraph, we have shown that the conditions \textbf{(i)--(iv)} are sufficient to ensure the existence of a modified soliton for the modified energy $H_\mu$, and that this soliton is sufficiently close to the exact soliton $ \text{e} ta$ to allow the control of the distance between $ \Gamma amma$ and $\psi_\mu$ via the distance between the Hamiltonian of $H_\mu(\psi_\mu)$ and $H_\mu( \text{e} ta_\mu)$, see \text{e} qref{eq:ctrl}. As a consequence we obtain the following stability result
\beta egin{equation}gin{theorem}
\ell ambda bel{th:stab}
Assume that the hypothesis { \text{e} m{\textbf{(i)--(iv)}}} are satisfied, and assume moreover that for all $R_0$ and all $\mu \in \Sigma$ there exist $ \beta egin{equation}ta(\mu) >0$ and an application $ \mathcal{P} hi_ \mu: B_\mu(R_0) \to V_ \mu$ such that
$$
\forall\, \psi_\mu \in B_\mu(R_0), \quad
N_ \mu( \mathcal{P} hi_ \mu(\psi_ \mu)) = N_ \mu(\psi_ \mu)
$$
and
\beta egin{equation}gin{equation}
\ell ambda bel{eq:presham}
\forall\, \psi_\mu \in B_\mu(R_0), \quad |H_ \mu( \mathcal{P} hi_ \mu(\psi_ \mu)) - H_ \mu(\psi_ \mu)| \ell eq \beta egin{equation}ta( \mu).
\text{e} nd{equation}
Then there exist $\delta_0> 0$ and a constant $C$ such that for all positive $\delta < \delta_0$ and all $\mu\in \Sigma$ such that $\varepsilonilon(\mu) < \varepsilonilon_0$ and $\psi^0_\mu$ satisfying $\displaystylet(i_ \mu \psi_ \mu^0, \Gamma amma) \ell eq \delta$ then the sequence $(\psi_\mu^{n})_{n \geq 0}$ defined by
$$
\forall\, n \geq 0, \quad
\psi^{n+1}_ \mu = \mathcal{P} hi_ \mu(\psi^n_ \mu)
$$
satisfies
$$
\forall\, n \geq 0, \quad \displaystylet(i_ \mu \psi_ \mu^n, \Gamma amma) \ell eq C( \delta + \varepsilonilon( \mu))
$$
as long as $n \beta egin{equation}ta(\mu) \ell eq \varepsilon( \mu) + \delta$.
\text{e} nd{theorem}
\beta egin{equation}gin{proof}
Using the hypothesis on $\psi_\mu^0$ and \text{e} qref{eq:normmu}, there exists $R_0$ depending only on $\delta_0$ such that $\psi_\mu^0 \in B_\mu(R_0/2)$ uniformly in $\mu$ and there exists $\tilde \nu\in \Gamma amma$ such that $ \mathbb{N} orm{i_\mu\psi_\mu^0 - \tilde \nu}{H^1} \ell eq \delta$. Thus using the gauge invariance of $H$, we have $|H(i_\mu \psi_\mu^0)-H( \text{e} ta)| \ell eq C\delta$. Then with hypothesis {{\textbf{(iii)}}} and \text{e} qref{5.7bis}, we get
$$
|H_\mu(\psi_\mu^0) - H_\mu( \text{e} ta_\mu)| \ell eq C (\delta + \varepsilonilon(\mu)).
$$
On the other hand, using \text{e} qref{eq:presham}, we have for all $n \geq 0$
\beta egin{equation}gin{eqnarray*}
|H_\mu(\psi_\mu^n) - H_\mu( \text{e} ta_\mu)| & \ell eq & |H_\mu(\psi_\mu^0) - H_\mu( \text{e} ta_\mu)| + \sigma um_{k = 0}^{n-1} |H_\mu(\psi_\mu^{k+1}) - H_\mu(\psi_{\mu}^{k})|\\
& \ell eq& C (\delta + \varepsilonilon(\mu)) + n \beta egin{equation}ta(\mu) \ell eq (C+1)( \delta + \varepsilonilon(\mu))
\text{e} nd{eqnarray*}
as long as $n \beta egin{equation}ta(\mu) \ell eq \varepsilon( \mu) + \delta$ and $ \mathbb{N} orm{\psi_\mu}{\mu} \ell eq R_0$. Using the fact that $N_\mu(\psi_\mu^n) = N_\mu(\psi_\mu^0) = 1 + \mathcal{O}(\delta)$ and \text{e} qref{eq:ctrl}, we get
\beta egin{equation}gin{equation}
\ell ambda bel{eq:poc}
\displaystylet(i_ \mu \psi_ \mu^n, \Gamma amma) \ell eq \tilde{C}( \delta + \varepsilonilon(\mu))
\text{e} nd{equation}
as long as $ \mathbb{N} orm{\psi_\mu}{\mu} \ell eq R_0$ and for some constant $\tilde C$ independent of $\mu$ and $n$. Then by a bootstrap argument, there exists $\delta_0$ and $\varepsilonilon_0$ sufficiently small such that, for $0<\delta<\delta_0$ and $0<\varepsilonilon<\varepsilonilon_0$, \text{e} qref{eq:poc} ensures that this is the case for $n \beta egin{equation}ta( \sigma igma) \ell eq \varepsilon( \mu) + \delta$. This proves the result.
\text{e} nd{proof}
\sigma ection{Applications}
We now prove the three Theorems presented in Section 2. We only need to verify the hypothesis \textbf{(i)-(iv)} and to precise the constants $\varepsilonilon(\mu)$ and $ \beta egin{equation}ta(\mu)$.
\sigma ubsection{Discrete Schr\"odinger equation}
Consider the DNLS equation \text{e} qref{dnls} for a given positive number $h > 0$. In the previous formalism, we set $\Sigma = \{ h \in \mathbb{R} ^+\}$, and the natural modified Hamiltonian and $L^2$ norm are given by \text{e} qref{eq:discrham}.
We also define the real scalar product
$$
\ell ambda ngle \psi, \varphi\rangle_h := \mathbb{R} e \Big(h \sigma um_{j \in \mathbb{Z} } \psi_j \overline erline{\varphi_j} \Big).
$$
For all $\mu \in \Sigma$, the embedding $i_h$ is defined by \text{e} qref{eq:ih}, and the projection $\pi_h$ by the application
$$
\forall\, j \in \mathbb{Z} ,\quad (\pi_h \psi)_j = \psi(jh),
$$
for some $\psi \in H^1$.
Defining the semi norm
$$
\SNorm{\psi}{h}^2 = 2h \sigma um_{j\in \mathbb{Z} } \frac{|\psi_{j+1}-\psi_j |^2}{h^2}
$$
on $V_h$, we have by explicit calculation that
\beta egin{equation}gin{equation}
\ell ambda bel{eq:seminorm}
\SNorm{\psi}{h} = \SNorm{i_h \psi}{H^1}
\text{e} nd{equation}
where $\SNorm{\psi}{H^1}$ denotes the semi norm in $H^1$. This fact allows to prove \textbf{(i)} and \textbf{(ii)} with the function $\varepsilonilon: \Sigma \to \mathbb{R} $ defined by $\varepsilonilon(h) = h$. This has already been proved in \cite[Lemma 4.2]{BP10}. Similarly, \textbf{(iii)} has been proved in \cite[Proposition 4.1]{BP10} with $\varepsilonilon(h) = h$.
Finally, by classical arguments on finite elements approximation, there exists an universal constant $C$ such
that for any function $\psi\in H^2$
\beta egin{equation}gin{equation}
\ell ambda bel{estim:discret}
\mathbb{N} orm{\pi_h\psi -\psi}{H^1} \ell eq C h
\mathbb{N} orm{\psi}{H^2}. \text{e} nd{equation}
This proves \textbf{(iv)} upon using \text{e} qref{eq:normmu}.
Let us define $ \mathcal{P} hi_h^t(\psi)$ the flow associated with the Hamiltonian $H_h$. Using standard estimates, one shows that it is well defined for sufficiently small $t$, say $0 \ell eq t<t_0$, uniformly in $h$. Theorem \ref{th:dnls} is then a consequence of Theorem \ref{th:stab} with $ \beta egin{equation}ta(h) = 0$ and $ \mathcal{P} hi_\mu = \mathcal{P} hi_h^t$ with $t \in (0,t_0)$. Remark that, in particular, since $ \mathcal{P} hi^{nt}_\mu=( \mathcal{P} hi^t_\mu)^n$ remains localized around the curve $ \Gamma amma$ of solitons for all $n$ and for all $t\in(0,t_0)$, the flow $ \mathcal{P} hi_h^t(\psi)$ is defined globally.
\sigma ubsection{Dirichlet cut-off}
Recall that in comparison with the previous case, the space $V_{h,K}$ defined in \text{e} qref{eq:VhK} is a finite dimensional space included in $V_h$. We have seen that the modified energy and norm $H_{h,K}$ and $N_{h,K}$, and the embedding $i_{h,K}$ are defined by restriction to $V_{h,K}$. To define the projection $\pi_{h,K}$, we set
$$
(\pi_{h,K}(\psi))_j =
\ell eft\{
\beta egin{equation}gin{array}{ll}
\psi(jh) & \mbox{if}\quad |j| \ell eq K\\[2ex]
0 & \mbox{if}\quad |j| > K.
\text{e} nd{array}
\right.
$$
With these definitions, it is clear that the hypothesis \textbf{(i)-(iii)} are satisfied with $\Sigma = \{(h,K) \in \mathbb{R} ^+, \times \mathbb{N} \}$ and with { \text{e} m a priori} $\varepsilonilon(\mu) = h$ for $\mu = (h,K)$. However, the estimate \text{e} qref{eq:approxeta} is no longer true with the space cut-off without changing the definition of $\varepsilonilon(\mu)$.
To have an estimate of $ \mathbb{N} orm{i_{h,K} \pi_{h,K} \text{e} ta - \text{e} ta}{H^1}$, we only need to estimate $ \mathbb{N} orm{\pi_{h,K} \text{e} ta - \pi_{h} \text{e} ta}{h}$ which is equal to
$$
\mathbb{N} orm{\pi_{h,K} \text{e} ta - \pi_{h} \text{e} ta}{h}^2 = 2 h \sigma um_{|j| > K} \frac{| \text{e} ta(jh)|^2}{h^2} + h \sigma um_{|j| > K}| \text{e} ta(jh)|^2
$$
By definition of $ \text{e} ta$, there exist constants $C_1$ and $\nu$ such that for all $x \in \mathbb{R} $, $| \text{e} ta(x)| \ell eq C_1 e^{-\nu|x|}$.
Substituting this estimate in the previous one, we get
\beta egin{equation}gin{eqnarray*}
\mathbb{N} orm{\pi_{h,K} \text{e} ta - \pi_{h} \text{e} ta}{h}^2 & \ell eq& 2 C_1^2 h \sigma um_{|j| >K} \frac{e^{-2 \nu jh}}{h^2} + h \sigma um_{|j| > K} e^{-2 \nu jh}\\
& \ell eq & \frac{4 C_1^2 + 2}{h^2} h \sigma um_{n > K} e^{-2 \nu n h}\\
& \ell eq& \frac{\gamma mma}{h^2} \text{e} xp(- \nu K h)
\text{e} nd{eqnarray*}
for some constant $\gamma mma$, and provided $h < h_0$ sufficiently small.
This shows that \textbf{(iv)} is valid with the function
\beta egin{equation}gin{equation}
\ell ambda bel{eq:epsi1}
\varepsilonilon(\mu) = h + \frac{1}{h^2} \text{e} xp(-\nu Kh), \quad \mu = (h,K) \in \Sigma
\text{e} nd{equation}
With these notations, Theorem \ref{th:dnlsdir} is a consequence of Theorem \ref{th:stab} with $ \beta egin{equation}ta(\mu) = 0$.
\sigma ubsection{Time splitting method}
Let us now consider the case where \text{e} qref{dnlsdir} is discretized in time by a splitting method of the form $ \mathcal{P} hi_A^\tau \circ \mathcal{P} hi_P^\tau$ as described in Section 2. The space discretization being the same as in the previous Sections, the hypothesis \textbf{(i)-(iii)} will be automatically fulfilled with the function $\varepsilonilon$ defined in \text{e} qref{eq:epsi1}. In particular, we can check directly that the norm $N_{h,K}$ is preserved by splitting schemes. However, splitting methods do not preserve the energy $H_{h,K}$ for given $h$ and $K$: more precisely, taking $H_\mu = H_{h,K}$ in \text{e} qref{eq:presham} only yields an error of order $ \beta egin{equation}ta(\mu) = h\tau$.
In this section, we set
$$
\Sigma := \{ (h,K,\tau) \in \mathbb{R} ^+ \times \mathbb{N} \times \mathbb{R} ^+\}.
$$
For $\mu = (h,K,\tau) \in \Sigma$, we set $V_{\mu} = V_{h,K}$, $i_{\mu} = i_{h,K} = i_h$, and $\pi_{\mu} = \pi_{h,K}$.
In the next section we will prove
\beta egin{equation}gin{theorem}
\ell ambda bel{prop.split}
Let $R_0>0$ and $M \in \mathbb{N} $ be fixed. There exist $\tau_0$ and $h_0$ such that for all $\mu = (h,K,\tau) \in \Sigma$ satisfying $\tau < \tau_0$, $h < h_0$ and
\beta egin{equation}gin{equation}
\ell ambda bel{sp.0cfl}
(2M+3)\frac{\tau}{h^2}<\frac{2\pi}{3}.
\text{e} nd{equation}
then there exist a constant $C$, depending only on $R_0$ and $M$, and a smooth gauge invariant polynomial function
$H_{\mu} = H_{h,K,\tau}$ defined on $V_\mu$
such that
\beta egin{equation}gin{equation}
\ell ambda bel{sp.01}
\mathbb{N} orm{H_{\mu} -H \circ i_{\mu}}{C^2(B_{\mu}(R_0))} \ell eq
C\frac{\tau}{h}
\text{e} nd{equation}
and
\beta egin{equation}gin{equation}
\ell ambda bel{sp.02}
\mathbb{N} orm{ \mathcal{P} hi_P^\tau\circ \mathcal{P} hi_A^\tau(\psi)- \mathcal{P} hi^\tau_{H_\mu}(\psi)}{\mu} \ell eq C\tau^{M+1}\quad \text{for all }\psi\in V_\mu \text{ with } \mathbb{N} orm{\psi}{\mu} \ell eq R_0.
\text{e} nd{equation}
\text{e} nd{theorem}
With this result, the final statement of Theorem \ref{th:splitting} is a consequence of Theorem \ref{th:stab} applied with
$$
\varepsilonilon(\mu) = h + \frac{1}{h^2} \text{e} xp(-\nu Kh) + \frac{\tau}{h}
$$
and $ \beta egin{equation}ta(\mu) = \tau^{M+1}$.
The proof of Theorem \ref{prop.split} occupies the rest of this paper, and is a variant of the
theory developed in \cite{BG94,FG11}. Here we summarize it and repeat the
proofs with some details in order to have a quite self contained
presentation.
\sigma ection{Construction of the modified energy}
\sigma ubsection{Formal part. } We start by recalling the algorithm of
construction of the modified energy $H_\mu$ introduced in the previous section. As a variant of the theory developed in \cite{FG11}, we work here at the level of the vector fields instead of Hamiltonian functions. Recall that at the continuous level, we identified the space $H^1( \mathbb{R} ; \mathbb{C} ) \sigma imeq H^1( \mathbb{R} ; \mathbb{R} )^2$ through the identification $\psi = \frac{1}{ \sigma qrt{2}}(q + ip)$. This identification obviously transfers to the space $V$ of symmetric functions, and to the discretized space $V_{h,K}$ via the identification
\beta egin{equation}gin{equation}
\ell ambda bel{eq:idef}
\psi_j = \frac{1}{ \sigma qrt{2}}(q_j + i p_j), \quad j = -K, \ell dots,K.
\text{e} nd{equation}
Hence we can endow $V_{h,K}$ with the Hamiltonian structure induced by the symplectic form
$ \sigma um_{j = -K}^K \text{d} p_j \wedge \text{d} q_j$. In the following we make the constant identification between $\psi = (\psi_j)_{j = -K}^{K}$ and $(q,p) = (q_j,p_j)_{j = -K}^K$ given by \text{e} qref{eq:idef}. For a given real functional $H(\psi) = H(q,p)$, we associate the Hamiltonian vector fied $X_H$ by
\beta egin{equation}gin{equation}
\ell ambda bel{eq:XH}
X_H(q,p):= \ell eft( \frac{\partial H}{\partial p_ \ell }(q,p),-\frac{\partial H}{\partial q_ \ell }(q,p)\right)_{ \ell =-K}^K.
\text{e} nd{equation}
Note that this formula makes sense, because all the Hamiltonian functions $H(\psi)$ that we consider are real valued.
In this setting, $A$ and $P$ denote the vector fields associated respectively to the real Hamiltonian functions
$$
H_A(\psi)= h \sigma um_{ \ell =-K}^K \frac{|\psi_ \ell -\psi_{ \ell -1}|^2}{h^2},\quad \mbox{and}\quad
H_P(\psi)= -\frac{h}{2} \sigma um_{ \ell =-K}^K |\psi_ \ell |^4,
$$
which can obviously be expressed in terms of $(q_j,p_j)$. Note that $A$ and $P$ depend on $h$, but we omit this dependence in the notation.
We look for
a formal vector field, namely a formal power series
\beta egin{equation}gin{equation} \ell ambda bel{sp.0}
Z(\varepsilon):= \sigma um_{n\geq 0}Z_j\varepsilon^n,
\text{e} nd{equation}
where each $Z_n$ is a Hamiltonian vector field on $V_{h,K}$,
such that
\beta egin{equation}gin{equation}
\ell ambda bel{sp.1}
\forall \, |\varepsilon| \ell eq \tau, \quad
\mathcal{P} hi^\varepsilon_{P}\circ \mathcal{P} hi^1_{A_0}= \mathcal{P} hi^1_{Z(\varepsilon)}, \quad A_0:=\tau A\ .
\text{e} nd{equation}
Here $ \mathcal{P} hi_X^t$ denotes the Hamiltonian flow on $V_{h,K}$ associated with the vector field $X$ at time $t$. \\
Notice that, in particular, at order zero \text{e} qref{sp.1} implies
\beta egin{equation}gin{equation}
\ell ambda bel{sp.1.1}
Z_0:=A_0=\tau A\ .
\text{e} nd{equation}
Ideally, the approximate Hamiltonian we are looking for would be $H_{h,K,\tau}:=\frac 1 {\tau} H_{Z(\tau)}$ (see \text{e} qref{Hamtau}) but the formal series defining $Z$ is not convergent and we will have to truncate the sum in \text{e} qref{sp.0}.\\
It is well known that it is convenient to look at the
equality \text{e} qref{sp.1} in a dual way, namely to ask that the following
equality is fulfilled for any smooth function $w:V_{h,K}\to \mathbb{C} $:
\beta egin{equation}gin{equation}
\ell ambda bel{sp.2}
w( \mathcal{P} hi^\varepsilon_{P}\circ \mathcal{P} hi^1_{A_0} )=w( \mathcal{P} hi^1_{Z(\varepsilon)}
)\ .
\text{e} nd{equation}
The key ingredient of the construction is given by the formal
formula
\beta egin{equation}gin{align}
\ell ambda bel{sp.3}
\forall\, t, \quad
e^{t L_X}w=w\circ \mathcal{P} hi^t_X\ ,
\text{e} nd{align}
where $L_X$ is the Lie operator associated with $X$. In our Hamiltonian case if $X:=(X^{j}_q,X^{j}_p)_{j=-K}^K$ is a vector field (according to the decomposition \text{e} qref{eq:XH}), we have in real coordinated $(q_j,p_j)$,
$$
L_X w:= \sigma um_{j = -K}^K X^{j}_p\frac{\partial w}{\partial p_j}-X^{j}_q\frac{\partial w}{\partial q_j},
$$
and the exponential is defined in a formal way by
$$
e^{\varepsilon L_X}w:= \sigma um_{k\geq 0}\frac{1}{k!}\varepsilon^kL^k_Xw\ .
$$
In this formalism \text{e} qref{sp.2} reads
$$
e^{L_{A_0}}e^{\varepsilon L_P}w=e^{L_{Z(\varepsilon)}}w.
$$
Deriving with respect to $\varepsilon$ one gets (by working on the power
series)
\beta egin{equation}gin{equation}
\ell ambda bel{sp.5}
e^{L_{A_0}}e^{\varepsilon L_P} L_Pw=e^{L_{Z(\varepsilon)}}L_{Q(\varepsilon)}w,
\text{e} nd{equation}
where
\beta egin{equation}gin{equation}
\ell ambda bel{sp.6}
Q(\varepsilon):= \sigma um_{k\geq
0} \frac{1}{(k+1)!}\operatorname{ad}^k_{Z(\varepsilon)}Z'(\varepsilon)\quad \mbox{with}\quad \operatorname{ad} _Z X:= \ell eft[ Z, X\right],
\text{e} nd{equation}
where $[\, \cdot , \cdot \,]$ denote the Lie bracket of two vector fields.
Finally \text{e} qref{sp.5} leads to
the equation $Q(\varepsilon)=P$ from which we are going to construct
$Z(\varepsilon)$. The construction goes as follows: first one remarks that the
r.h.s. of \text{e} qref{sp.6} has the formal aspect of an operator applied to
$Z'(\varepsilon)$, so the idea is first of all to invert such an operator. We
remark that the power series defining the wanted operator is
$ \sigma um_{k\geq 0}x^k/(k+1)!=(e^x-1)/x$, so that one would expect its
inverse to be $x/(e^x-1) \text{e} quiv \sigma um_{k\geq0} x^k(B_k/k!)$, where $B_k$
are the so called Bernoulli numbers and the power series is convergent
provided $|x|<2\pi$. So one is tempted to rewrite $Q(\varepsilon)=P$ in the
form
\beta egin{equation}gin{equation} \ell ambda bel{Ehomo2}
\forall\, |\varepsilon| \ell eq \tau, \quad Z'(\varepsilon) = \sigma um_{k \geq 0} \frac{B_k}{k!} {\operatorname{ad}}^k_{Z(\varepsilon)} P.
\text{e} nd{equation}
Plugging an Ansatz expansion $Z(\varepsilon) = \sigma um_{ \text{e} ll \geq 0} \varepsilon^ \text{e} ll Z_ \text{e} ll$
into this equation, we get, for $n \geq 0$, the recursive equations
\beta egin{equation}gin{equation}
\ell ambda bel{Erec}
(n+1) Z_{n+1} = \sigma um_{k \geq 0 }\frac{B_k}{k!} A_k^{(n)}, \quad
\mbox{with}\quad
A_k^{(n)}:= \sigma um_{ \text{e} ll_1 + \cdots + \text{e} ll_k =
n} \mathrm{ad}_{Z_{ \text{e} ll_1}} \cdots \mathrm{ad}_{Z_{ \text{e} ll_k}} P .
\text{e} nd{equation}
\beta egin{equation}gin{remark}
The analysis made to obtain this recursive equation is formal. To
obtain our main result, we will verify that some of the series we manipulate
are in fact convergent series, while the others will be truncated in
order to get meaningfull expressions.
\text{e} nd{remark}
\beta egin{equation}gin{remark}
\ell ambda bel{order}
Assume that $P$ is a polynomial of degree $r_0$ (in our case $r_0=3$), and that $Z_ \text{e} ll$ is a collection of vector fields satisfying the previous relation,
then for all $n$, $Z_n$ is a polynomial of degree $(n-1)(r_0-1)+r_0$.
\text{e} nd{remark}
\beta egin{equation}gin{remark}
\ell ambda bel{ham}
If the vector fields $P$ and $A_0$ are Hamiltonian then the same is
true for the vector fields $Z_n$. This is an immediate consequence of
the fact that all the construction involves only Lie Brackets, which
are operations preserving the Hamiltonian nature of the vector
fields.
\text{e} nd{remark}
\sigma ubsection{Analytic estimates} \ell ambda bel{analytic} We first introduce a
suitable norm for measuring the size of the polynomials. In echo
with the notations of the previous sections, we consider in the
following a fixed $\mu = (h,K,\tau) \in \Sigma$. Recall that the space
$V_\mu = V_{h,K}$ does not depend on $\tau$, as well as the norm
$ \mathbb{N} orm{\cdot }{\mu}$.
If $X$ is a vector field on $V_{\mu}$ which is a homogeneous
polynomial of degree $s$ we
can associate to it a symmetric multilinear form
$\widetilde{X}(\psi_1, \ell dots,\psi_{s_1})$ such that $X(\psi)
= \widetilde{X}(\psi, \ell dots,\psi)$. We put
$$
\mathbb{N} orm{X}{\mu}:= \sigma up_{ \sigma ubstack{ \mathbb{N} orm{\psi_i}{\mu} = 1 \\Êi =
1, \ell dots,s_1}} \mathbb{N} orm{\widetilde{X}(\psi_1, \ell dots,\psi_{s_1})}{\mu} \ .
$$
We then extend this norm to general polynomial vector field $X$ by
defining its norm as the sum of the norms of the homogeneous
components.
\beta egin{equation}gin{definition} We denote by $ \mathcal{P} _s$ the space of the
polynomials of degree less than $s$, which furthermore have a finite norm
$ \mathbb{N} orm{\cdot}{\mu}$. \text{e} nd{definition}
\beta egin{equation}gin{remark}
\ell ambda bel{rk.important}
With this definition, we note that the norm $ \mathbb{N} orm{P}{\mu}$ is
uniformly bounded with respect to $\mu$.
\text{e} nd{remark}
\beta egin{equation}gin{lemma}
\ell ambda bel{ad.1}
Let $s_1 \geq 1$ and $s_2\geq 1$, and let $X\in \mathcal{P} _{s_1}$ and
$Y\in \mathcal{P} _{s_2}$. Then $[X,Y] \in \mathcal{P} _{s_1 + s_2 - 1}$, and
\beta egin{equation}gin{equation}
\ell ambda bel{sp.lem.1.1}
\mathbb{N} orm{[X,Y]}{\mu} \ell eq (s_1 + s_2) \mathbb{N} orm{X}{\mu} \mathbb{N} orm{Y}{\mu}.
\text{e} nd{equation}
\text{e} nd{lemma}
\proof We give the proof in the case of homogeneous polynomials, the
general case immediately follows. Denote again by $\widetilde X$ and
$\widetilde Y$ the symmetric multilinear forms associated to $X$ and
$Y$, then one has
$$
[X,Y](\psi)=s_1\widetilde{X}(Y(\psi),\psi...,\psi)-s_2\widetilde{Y}(X(\psi),\psi...,\psi),
$$
from which the result immediately follows. \qed
\beta egin{equation}gin{lemma}
\ell ambda bel{ad.2}
For $h \ell eq \frac{1}{ \sigma qrt{2}}$, the operator $A_0 = -\tau \Delta_h$ satisfies
\beta egin{equation}gin{equation}
\ell ambda bel{sp.lem.df}
\mathbb{N} orm{A_0}{\mu}
\ell eq 3 \frac{\tau}{h^2}.
\text{e} nd{equation}
\text{e} nd{lemma}
\proof
Let us first note that if $(u_j)_{j = -K}^K$ is in $V_\mu$, we have
\beta egin{equation}gin{equation}
\ell ambda bel{eq:discrSob}
\mathbb{N} orm{u}{\mu}^2 = 2 h \sigma um_{j = -K}^K \frac{|u_{j+1} - u_j|^2}{h^2} + h \sigma um_{j = -K}^{K} | u_j|^2 \ell eq (\frac{4}{h^2} + 1) \Big(h \sigma um_{j = -K}^{K} | u_j|^2\Big).
\text{e} nd{equation}
Note that $A_0=-\tau\Delta_\mu$ is homogeneous of degree one. Moreover, we can write
$$
(A_0\psi)_ \ell = \tau\frac{\psi_{ \ell +1}+\psi_{l-1}-2\psi_ \ell }{\mu^2} = \frac{\tau}{h} (a_{ \text{e} ll} - a_{ \text{e} ll-1}),
$$
where $a_{ \text{e} ll} = (\psi_{ \text{e} ll +1} - \psi_{ \text{e} ll})/h$. Using the discrete Sobolev inequality \text{e} qref{eq:discrSob} and the Minkowski inequality, we get that
$$
\mathbb{N} orm{A_0 \psi}{\mu} \ell eq 2 \sigma qrt{(\frac{4}{h^2} + 1)} \frac{\tau}{h}\Big(h \sigma um_{j = -K}^{K} | a_j|^2\Big)^{1/2}.
$$
We conclude by remarking that
$$
\Big(h \sigma um_{j = -K}^{K} | a_j|^2\Big) \ell eq \frac{1}{2} \mathbb{N} orm{\psi}{\mu}^2.
$$
We deduce that
$$
\mathbb{N} orm{A_0\psi}{\mu} \ell eq \frac{\tau}{h^2} \sigma qrt{8 + 2
h^2}\norma{\psi_\mu}\ ,
$$
which shows the result.
\qed
\beta egin{equation}gin{remark}
Lemmas \ref{ad.1} and \ref{ad.2} can be rephrased in a form suitable
for the following by saying that, for $X\in \mathcal{P} _s$, one has that the
operator
\beta egin{equation}gin{equation*}
\operatorname{ad}_X: \mathcal{P} _{s_1}\to \mathcal{P} _{s+s_1-1}
\text{e} nd{equation*}
is bounded and its norm (induced by the norm $ \mathbb{N} orm{\cdot}{\mu}$ and for fixed $s$ and $s_1$) fulfills
\beta egin{equation}gin{equation}
\ell ambda bel{ad.3}
\mathbb{N} orm{\operatorname{ad} X}{\mu} \ell eq (s+s_1) \mathbb{N} orm{X}{\mu}\ .
\text{e} nd{equation}
In particular, using the previous result we have for a given $s_1 \geq 2$
\beta egin{equation}gin{equation}
\ell ambda bel{ad.4a}
\operatorname{ad}_{A_0}: \mathcal{P} _{s_1}\to \mathcal{P} _{s_1}
\quad \mbox{and}\quad
\norma{\operatorname{ad}_{A_0}}_\mu \ell eq 3(s_1+1)\frac{\tau}{\mu^2}\ .
\text{e} nd{equation}
\text{e} nd{remark}
\beta egin{equation}gin{proposition}
\ell ambda bel{sp.imp}
Let $M$ be an integer satisfying
\beta egin{equation}gin{equation}
\ell ambda bel{boh}
(2M+3)\frac{\tau}{\mu^2}< \frac{2\pi}{3}\ .
\text{e} nd{equation}
Then, for all $ n \ell eq M$, $Z_n$ is well defined and
$Z_n\in \mathcal{P} _{r_n}$ with $r_n=2n+1$, and the norm of $Z_n$ is uniformly bounded with respect to $\mu$.
\text{e} nd{proposition}
\proof We prove the proposition by induction. We set $Z_0 = A_0$. Assume that $Z_ \ell \in \mathcal{P} _{r_ \ell }$ for $ \ell \ell eq n \ell eq M-1$ are constructed. Let us prove that \text{e} qref{Erec} defines a term $Z_{n+1} \in \mathcal{P} _{r_{n+1}}$.
Rewrite \text{e} qref{Erec}
by incorporating the terms containing $Z_0=A_0$ and by substituting the
estimate of the single terms to the $\operatorname{ad}$ terms. The advantage of
doing that is that the product of the estimates is commutative, while
the multiplication of the $\operatorname{ad}$ operators is not.
We get first
\beta egin{equation}gin{align*}
\mathbb{N} orm{A^{(n)}_k}{\mu} \ell eq \sigma um_{i=1}^{k} \mathbb{N} orm{\operatorname{ad}_{A_0}}{\mu}^{k-i}
\frac{k!}{(k-i)!i!} \sigma um_{ \sigma ubstack{ \text{e} ll_1+...+ \text{e} ll_i=n \\ \text{e} ll_j\geq
1}} \mathbb{N} orm{\operatorname{ad}_{Z_{ \text{e} ll_1}}}{\mu}... \mathbb{N} orm{\operatorname{ad}_{Z_{ \text{e} ll_i}}}{\mu} \mathbb{N} orm{P}{\mu}
\\
\ell eq \sigma um_{i=1}^{n} \mathbb{N} orm{\operatorname{ad}_{A_0}}{\mu}^{k-i}
\frac{k!}{(k-i)!i!} \sigma um_{ \sigma ubstack{ \text{e} ll_1+...+ \text{e} ll_i=n \\ \text{e} ll_j\geq
1}} (2r_M)^n \mathbb{N} orm{{Z_{ \text{e} ll_1}}}{\mu}... \mathbb{N} orm{{Z_{ \text{e} ll_i}}}{\mu} \mathbb{N} orm{P}{\mu},
\text{e} nd{align*}
where we used that, if $i>n$ and $ \text{e} ll_j>0$ then $ \text{e} ll_1+...+ \text{e} ll_i>n$ and the
fact that, since by hypothesis the involved polynomials have degrees
smaller then $r_M$, one has $ \mathbb{N} orm{\operatorname{ad}_{Z_ \text{e} ll}}{\mu} \ell eq
2r_M\norma{Z_ \text{e} ll}{\mu}$ for $ \text{e} ll \ell eq n$.
Remarking that the result of the above sum with respect to
$ \ell _1,\cdots, \ell _i$ does not depend on $k$, using \text{e} qref{ad.4a} with $s_1=r_M$, and noticing that $ \mathbb{N} orm{P}{\mu}$ is uniformly bounded with respect to $\mu$, we get
\beta egin{equation}gin{align*}
\mathbb{N} orm{Z_{n+1}}{\mu}& \ell eq \frac{1}{n+1} \sigma um_{k\geq
0}\frac{B_k}{k!} \sigma um_{i=0}^{n} \ell eft(\frac{r_M\tau}{\mu^2}\right)^{k-i}
\frac{k!}{(k-i)!i!} C_n\\
&= \frac{C_n}{n+1} \ell eft[ \sigma um_{i=0}^{n}\frac{ \text{d} ^i}{ \text{d} x^i} \ell eft( \sigma um_{k\geq
0}\frac{B_k}{k!}x^k\right) \right]_{x=\frac{r_M\tau}{\mu^2}},
\text{e} nd{align*}
for some constant $C_n$ independent of $\mu$.
This shows that the series defining $Z_{n+1}$ is convergent, that $Z_{n+1}\in \mathcal{P} _{r_{n+1}}$ and that $ \mathbb{N} orm{Z_{n+1}}{\mu}$ is finite and uniformly bounded with respect to $\mu$. \qed
\sigma ubsection{Proof of Theorem \ref{prop.split} }
First remark that in our case all the vector fields are
Hamiltonian. Explicitely, by Poincar\'e Lemma, the Hamiltonian
function of a Hamiltonian vector field $X$ is given by
\beta egin{equation}gin{equation}
\ell ambda bel{ham.x}
H_X(\psi):=\int_0^1s \omega ega(X(s\psi),\psi) \text{d} s\ ,
\text{e} nd{equation}
where $ \omega ega$ is the symplectic form. In particular, this formula
shows that the Hamiltonian function of a smooth polynomial vector
field is also a smooth polynomial function. For $\varepsilon \ell eq \tau$, let us define $$Z^{(M)}(\varepsilon):= \sigma um_{j=0}^{M}\varepsilon^jZ_j\ .$$
By construction $Z^{(M)}(\varepsilon)$ satisfies \text{e} qref{Ehomo2} up to order $\varepsilon^{M}$ included from which we deduce that it satisfies \text{e} qref{sp.1} up to order $\varepsilon^{M}$ (see \cite{FG11} Theorem 4.2 for details).
Therefore defining for $\mu = (h,K,\tau)$,
\beta egin{equation}gin{equation}
\ell ambda bel{Hamtau}
H_\mu:=\frac 1 \tau H_{Z^{(M)}(\tau)}= \sigma um_{j=0}^{M}\tau^{j-1}H_{Z_j}\ ,
\text{e} nd{equation}
estimate \text{e} qref{sp.02} holds true with a constant independent of $\mu$.
It remains to compare the two Hamiltonians $H_\mu = H_{h,K,\tau}$ and $H_{h,K}$ in the $C^2$ norm on the ball centered at the origin and of arbitrary radius $R_0$ in $V_\mu$.\\
Let us define
$$
H^{(1)}_\mu =\frac 1 \tau ( H_{Z_0}+\tau H_{Z_1})$$
and recall that $Z_0=A_0=\tau A$, and that by construction
\beta egin{equation}gin{equation}
\ell ambda bel{eq:Z1}
Z_1= \sigma um_{k\geq 0}\frac{\tau^k B_k}{k!} \operatorname{ad}_{A_0}^k P\ .
\text{e} nd{equation}
Now we have
$$
H_\mu^{(1)}-H_\mu= \sigma um_{j=2}^{M}\tau^{j-1}H_{Z_j}.
$$
But using \text{e} qref{ham.x} and the fact that $Z_{j}$ is of degree $r_j$, we get for all $\psi \in B_{\mu}(R_0)$,
\beta egin{equation}gin{equation}
\ell ambda bel{h1}
|H_\mu^{(1)}(\psi)-H_\mu(\psi)| \ell eq \sigma um_{j=2}^{M}\tau^{j-1} \mathbb{N} orm{Z_j}{\mu} R_0^{r_{j} +1 } \ell eq C\tau\ ,
\text{e} nd{equation}
for some constant $C$ independent of $h$, $K$ and $\tau \ell eq \tau_0$ sufficiently small.
To estimate $H_\mu^{(1)} - H_{h,K}$, we notice using \text{e} qref{eq:Z1},
\beta egin{equation}gin{equation}
X_{H^{(1)}_\mu}-X_{H_{h,K}}= Z_1-P
=\tau \ell eft[ \sigma um_{k\geq 0}\frac{\tau^k B_{k+1}}{(k+1)!} \operatorname{ad}_{A_0}^k\right]\operatorname{ad}_{A_0} P.
\text{e} nd{equation}
But in view of \text{e} qref{sp.0cfl}, $\frac{3\tau}{\mu^2}< \pi$, and thus the operator $ \ell eft[ \sigma um_{k\geq 0}\frac{\tau^k B_{k+1}}{(k+1)!} \operatorname{ad}_{A_0}^k\right]$ is bounded on $ \mathcal{P} _3$, uniformly with respect to $\mu$. Therefore for $\psi \in B_\mu(R_0)$, we have
$$
|H^{(1)}_\mu(\psi)-H_{h,K}(\psi)| \ell eq C | H_{\operatorname{ad}_{A_0}P}(\psi)|.$$
for some constant $C$ independent on $\mu = (h,K,\tau)$.
Now we calculate explicitly that the Hamiltonian associated with $\operatorname{ad}_{A_0}P$ is given by
\beta egin{equation}gin{align*}
H_{\operatorname{ad}_{A_0}P}(\psi)=&\frac{ i\tau}{ \mu^2} \sigma um_{-K}^K ( \overline {\psi_{ \ell +1}}+ \overline {\psi_{ \ell -1}}-2 \overline {\psi_ \ell })|\psi_ \ell |^2\psi_ \ell -(\psi_{ \ell +1}+\psi_{ \ell -1}-2\psi_ \ell )|\psi_ \ell |^2 \overline {\psi_ \ell }\\
=&\frac{\tau}{ \mu^2} \sigma um_{-K}^K \Im((\psi_{ \ell +1}+\psi_{ \ell -1}-2\psi_ \ell )|\psi_ \ell |^2 \overline {\psi_ \ell }).
\text{e} nd{align*}
But we have
\beta egin{equation}gin{align*}
& \sigma um_{ \text{e} ll = -K}^K (\psi_{ \ell +1}+\psi_{ \ell -1}-2\psi_ \ell )|\psi_ \ell |^2 \overline {\psi_ \ell } \\
&= \sigma um_{ \text{e} ll = -K}^K (\psi_{ \ell +1} - \psi_ \ell )|\psi_ \ell |^2 \overline {\psi_ \ell } - (\psi_{ \ell } - \psi_{ \ell -1})|\psi_ \ell |^2 \overline {\psi_ \ell } \\
& = \sigma um_{ \text{e} ll = -K}^K (\psi_{ \ell +1} - \psi_ \ell )|\psi_ \ell |^2 \overline {\psi_ \ell } - \sigma um_{ \text{e} ll = -K - 1}^{K-1} (\psi_{ \ell +1} - \psi_{ \ell })|\psi_{ \ell +1}|^2 \overline {\psi_{ \ell +1}}\\
&= \sigma um_{ \text{e} ll = -K}^{K-1} (\psi_{ \ell +1} - \psi_ \ell )(|\psi_ \ell |^2 \overline {\psi_ \ell } - |\psi_{ \ell +1}|^2 \overline {\psi_{ \ell +1}}) - \psi_K|\psi_K|^2 \overline {\psi_K} + \psi_{-K}|\psi_{-K}|^2 \overline {\psi_{-K}}
\text{e} nd{align*}
using the boundary conditions $\psi_{K+1} = \psi_{-K - 1} = 0$. Taking the imaginary part, we obtain
$$
H_{\operatorname{ad}_{A_0}P}(\psi)
=\frac{\tau}{ \mu^2} \sigma um_{ \text{e} ll = -K}^{K-1} \Im( (\psi_{ \ell +1} - \psi_ \ell )(|\psi_ \ell |^2 \overline {\psi_ \ell } - |\psi_{ \ell +1}|^2 \overline {\psi_{ \ell +1}}) ).
$$
But we have
$$
\ell eft| \Im( (\psi_{ \ell +1} - \psi_ \ell )(|\psi_ \ell |^2 \overline {\psi_ \ell } - |\psi_{ \ell +1}|^2 \overline {\psi_{ \ell +1}}) ) \right|Ê \ell eq
5 |Ê\psi_{ \ell +1} - \psi_ \ell |^2 (|\psi_ \text{e} ll|^2 + |\psi_{ \ell + 1}|^2).
$$
Then we use that
$$|\psi_{ \ell +1}- \psi_ \ell |^2 \ell eq \mu \mathbb{N} orm{\psi}{\mu}^2,$$
to obtain
$$|H_{\operatorname{ad}_{A_0}P}(\psi)| \ell eq 2\frac{\tau}{\mu} \mathbb{N} orm{\psi}{\mu}^4$$
and therefore for $\psiÊ\in B_\mu(R_0)$,
\beta egin{equation}gin{equation}
\ell ambda bel{h2}
|H^{(1)}_\mu(\psi)-H_{h,K}(\psi)| \ell eq C\frac{\tau}{\mu}\ .
\text{e} nd{equation}
Combining \text{e} qref{h1} and \text{e} qref{h2} we get, for all $\psiÊ\in B_\mu(R_0)$,
\beta egin{equation}gin{equation}
\ell ambda bel{h3}
|H_\mu(\psi)-H_{h,K}(\psi)| \ell eq C\frac{\tau}{\mu}\ .
\text{e} nd{equation}
Furthermore, since both functionals are analytic in $\psi$ and the above estimate is uniform in $\psiÊ\in B_\mu(R_0)$, we have similar estimates for the first and the second derivative of $\psi\mapsto H_\mu(\psi)-H_{h,K}(\psi)$.
\qed
\beta egin{equation}gin{thebibliography}{99}
\beta ibitem{Akrivis}
{\rm G. D. Akrivis, V. A. Dougalis and O. A. Karakashian},
{ \text{e} m On fully discrete Galerkin methods of second-order temporal accuracy for the nonlinear Schr\"odinger equation},
Numer. Math. 59 (1991) 31-53.
\beta ibitem{BP10}
D.~Bambusi and T. Penati \text{e} mph{Continuous approximation of breathers in one and two dimensional DNLS lattices }, Nonlinearity 23 (2010), no. 1, 143Ð157.
\beta ibitem{BG94}
{\rm G. Benettin and A. Giorgilli},
{ \text{e} m On the Hamiltonian interpolation of near to the identity symplectic mappings with application to symplectic integration algorithms}, J. Statist. Phys. 74 (1994), 1117--1143.
\beta ibitem{Besse}
{\rm C. Besse},
A relaxation scheme for the nonlinear Schr\"odinger equation,
SIAM J. Numer. Anal. 42 (2004) 934--952.
\beta ibitem{Borgna08}
{\rm J. P. Borgna and D. F. Rial}
{ \text{e} m Orbital stability of numerical periodic nonlinear Schr\"odinger equation},
Commun. Math. Sci. 6 (2008) 149--169.
\beta ibitem{Ciarlet}
{\rm P.G Ciarlet, B. Miara and J.-M. Thomas},
{ \text{e} m Introduction to numerical linear algebra and optimisation},
Cambridge University Press, 1989.
\beta ibitem{DFP81}
{M. Delfour, M. Fortin, G. Payre},
Finite-difference solutions of a non-linear Schr\"odinger equation,
J. Comput. Phys. 44 (1981) 277-288.
\beta ibitem{Duran00}
{\rm A. Dur\'an and J. M. Sanz-Serna},
{ \text{e} m The numerical integration of relative equilibrium solutions. The nonlinear Schr\"odinger equation},
IMA J. Numer. Anal. 20 (2000) 235-261.
\beta ibitem{Fei95}
{\rm Z. Fei, V.M. P\'erez-Garc\'ia and L. V\'asquez},
{ \text{e} m Numerical simulation of nonlinear Schr\"odinger systems: A new conservative scheme},
Appl. Math. Comput. 71 (1995) 165-177.
\beta ibitem{F11}
{\rm E. Faou},
{ \text{e} m Geometric numerical integration and Schr\"odinger equations}. European Math. Soc., 2012.
\beta ibitem{FG11}
{\rm E. Faou and B. Gr\'ebert},
{ \text{e} m Hamiltonian interpolation of splitting approximations for nonlinear PDE's}.
Found. Comput. Math. 11 (2011) 381--415
\beta ibitem{Frohlich04}
{\rm J. Fr\"ohlich, S. Gustafson, L. Jonsson and I.M. Sigal}
{ \text{e} m Solitary wave dynamics in an external potential}, Comm. Math. Phys. 250 (2004), 613--642
\beta ibitem{Grill87}
{\rm M. Grillakis, H. Shatah and W. Strauss},
{ \text{e} m Stability theory of solitary waves in the presence of symmetry. I.},
J. Funct. Anal., 74 (1987) 160--197.
\beta ibitem{Grill90}
{\rm M. Grillakis, H. Shatah and W. Strauss},
{ \text{e} m Stability theory of solitary waves in the presence of symmetry. II.},
J. Funct. Anal., 94 (1990) 308--348.
\beta ibitem{HLW}
{\rm E. Hairer, C. Lubich and G. Wanner},
{ \text{e} m Geometric Numerical Integration. Structure-Preserving Algorithms for Ordinary Differential Equations}. Second Edition. Springer 2006.
\beta ibitem{Reich99}
{\rm S. Reich},
{ \text{e} m Backward error analysis for numerical integrators}, SIAM J. Numer. Anal. 36 (1999) 1549--1570.
\beta ibitem{SZ84}
{\rm J. M. Sanz-Serna},
{ \text{e} m Methods for the solution of the nonlinear Schroedinger equation},
Math. Comp. 43 (1984) 21--27.
\beta ibitem{SZ86}
{\rm J. M. Sanz-Serna and J. G. Verwer},
Conservative and nonconservative schemes for the solution of the nonlinear Schr\"odinger equation,
IMA J. Numer. Anal. 6 (1986) 25-42.
\beta ibitem{Weideman86}
{\rm J. A. C. Weideman, B. M. Herbst},
{ \text{e} m Split-step methods for the solution of the nonlinear Schr\"odinger equation},
SIAM J. Numer. Anal. 23 (1986) 485-507.
\beta ibitem{Weinstein85}
{\rm M. I. Weinstein},
{ \text{e} m Modulational stability of ground states of nonlinear Schr\"odinger equations},
SIAM J. Math. Anal. 16 (1985) 472--491.
\text{e} nd{thebibliography}
\text{e} nd{document}
|
\begin{document}
\newcommand{\mathbb{Z}_3}{\mathbb{Z}_3}
\newcommand{\mathbb{Z}_9}{\mathbb{Z}_9}
\newcommand{\freefactor}{\mathfrak{L} \left (\mathbf{F} _\frac{11}{3}\right)}
\newcommand{\mathfrak{L} \left (\mathbf{F} _{t}\right)}{\mathfrak{L} \left (\mathbf{F} _{t}\right)}
\newcommand{\inff\otimes R}{\mathfrak{L} \left (\mathbf{F} _{t}\right)\otimes R}
\newcommand{\freefactor\otimes R_{0}}{\freefactor\otimes R_{0}}
\newcommand{\left (\tenpro\right )}{\left (\freefactor\otimes R_{0}\right )}
\newcommand{\left ( \tenpro\right )\rtimes_{\gamma}\gruppo}{\left ( \freefactor\otimes R_{0}\right )\rtimes_{\gamma}\mathbb{Z}_3}
\newcommand{\obstruction}{e^{\frac{2\pi i}{3}}}
\newcommand{\obstruconj}{e^{\frac{-2\pi i}{3}}}
\newcommand{\jonesinv}{e^{\frac{2\pi i}{9}}}
\newcommand{\jonesconj}{e^{\frac{-2\pi i}{9}}}
\newcommand{R_{-1}\rtimes _{\theta}\luna}{R_{-1}\rtimes _{\theta}\mathbb{Z}_9}
\newcommand{M_{9}(\mathbb{C})}{M_{9}(\mathbb{C})}
\newcommand{\bar{\delta}}{\bar{\delta}}
\newcommand{\mathcal {M}}{\mathcal {M}}
\newcommand{\operatorname{Ad\,}}{\operatorname{Ad\,}}
\newcommand{\{\operatorname{Ad} _{N}u\, |\, u\in N\text{ is fixed by }G\}}{\{\operatorname{Ad} _{N}u\, |\, u\in N\text{ is fixed by }G\}}
\newcommand{\overline{\mbox{Fint}}}{\overline{\mbox{Fint}}}
\newcommand{clos\{\Ad u\, |\, u \text{ is fixed by }\gruppo \}}{clos\{\operatorname{Ad\,} u\, |\, u \text{ is fixed by }\mathbb{Z}_3 \}}
\newcommand{Id\otimes (\Ad U_0 ^{*}\,\beta)}{Id\otimes (\operatorname{Ad\,} U_0 ^{*}\,\beta)}
\newcommand{\widehat{\mathbb{Z}}_{3}}{\widehat{\mathbb{Z}}_{3}}
\newcommand{\M\rtimes_{\widehat{\gamma}}\dualgr}{\mathcal {M}\rtimes_{\widehat{\gamma}}\widehat{\mathbb{Z}}_{3}}
\newcommand{$
\blacksquare$}{$
\blacksquare$}
\newcommand{\operatorname{Ad}}{\operatorname{Ad}}
\newcommand{\mathrm{Re}}{\mathrm{Re}}
\newcommand{\operatorname{Int}(M)}{\operatorname{Int}(M)}
\newcommand{\operatorname{Int}(N)}{\operatorname{Int}(N)}
\newcommand{\operatorname{Int}(R)}{\operatorname{Int}(R)}
\newcommand{\operatorname{Int}\left (\freefactor\right )}{\operatorname{Int}\left (\freefactor\right )}
\newcommand{\operatorname{Int}\pten}{\operatorname{Int}\left (\tenpro\right )}
\newcommand{\operatorname{Ct}(M)}{\operatorname{Ct}(M)}
\newcommand{\operatorname{Ct}(N)}{\operatorname{Ct}(N)}
\newcommand{\operatorname{Ct}(R_{0})}{\operatorname{Ct}(R_{0})}
\newcommand{\operatorname{Ct}\pten}{\operatorname{Ct}\left (\tenpro\right )}
\newcommand{\operatorname{Ct}\left (\pippo\right )}{\operatorname{Ct}\left (\inff\otimes R\right )}
\newcommand{\operatorname{Aut}(M)}{\operatorname{Aut}(M)}
\newcommand{\operatorname{Aut}(R)}{\operatorname{Aut}(R)}
\newcommand{\operatorname{Aut}\pten}{\operatorname{Aut}\left (\tenpro\right )}
\newcommand{\operatorname{Aut}\left (\freefactor\right )}{\operatorname{Aut}\left (\freefactor\right )}
\newcommand{\operatorname{Aut}(N)}{\operatorname{Aut}(N)}
\newcommand{\operatorname{Out}(M)}{\operatorname{Out}(M)}
\newcommand{\operatorname{Out}(N)}{\operatorname{Out}(N)}
\newcommand{\overline{\operatorname {Int}(M)}}{\overline{\operatorname {Int}(M)}}
\newcommand{\overline{\operatorname{Int}(N)}}{\overline{\operatorname{Int}(N)}}
\newcommand{\overline{\operatorname{Int}(R)}}{\overline{\operatorname{Int}(R)}}
\newcommand{\overline{\operatorname{Int}\left (\freefactor\right )}}{\overline{\operatorname{Int}\left (\freefactor\right )}}
\newcommand{\overline{\operatorname{Int}\pten}}{\overline{\operatorname{Int}\left (\tenpro\right )}}
\title[On a Subfactor Construction]{On a Subfactor Construction of a Factor Non-Antiisomorphic to Itself}
\author{by Maria Grazia Viola}
\operatorname{Ad}dress{\hskip-\parindent Department of Mathematics \\Texas A\&M University \\ College Station TX 77843, USA\\ Fax: (979) 845-3643.}
\email{[email protected]}
\subjclass[2000]{46L37, 46L40, 46L54}
\begin{abstract}
We define a $\mathbb{Z}_3$-kernel $\alpha$ on $\freefactor$ and a $\mathbb{Z}_3$-kernel $\beta$ on the
hyperfinite factor $R$, which have conjugate obstruction to lifting. Hence, $\alpha\otimes\beta$ can be perturbed by
an inner automorphism to produce an action $\gamma$ on $\freefactor\otimes R_{0}$. The aim of this paper is to show that the factor
$\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$, which is similar to Connes's example of a $II_{1}$ factor non-antiisomorphic to itself, is the enveloping algebra of an
inclusion of $II_{1}$ factors $A\subset B$. Here $A$ is a free group
factor and $B$ is isomorphic to the crossed product $A\rtimes_{\theta}\mathbb{Z}_9$,
where $\theta$ is a $\mathbb{Z}_3$-kernel of $A$\ with non-trivial obstruction to
lifting. By using an argument due to Connes, which involves the invariant $\chi (\mathcal {M})$, we
show that $\mathcal {M}$ is not anti-isomorphic to itself. Furthermore, we
prove that for one of the generator of $\chi (\mathcal {M})$, which we will denote by $\sigma$,
the Jones invariant $\varkappa (\sigma)$ is equal to $\jonesinv$.
\end{abstract}
\maketitle
\section{Introduction}
A von Neumann algebra $M$ is anti-isomorphic to itself if there exists a vector space isomorphism $\Phi :M\longrightarrow M$ with the properties $\Phi (x^{*})=\Phi (x)^*$ and $\Phi (xy)=\Phi (y)\Phi (x)$ for every $x,y\in M$. This is equivalent to saying that $M$ is isomorphic to its conjugate algebra $M^{c}$ (defined in Section 6).
With his examples of a II$_{1}$ factor non-antiisomorphic to itself (cf. \cite{Connes6}), A. Connes gave in the '70s an answer to a crucial problem posed by F. Murray and J. von Neumann a few decades earlier, concerning the possibility of realizing every II$_{1}$ factor as the left regular representation of a discrete group. His example was obtained from the II$_{1}$ factor $\mathfrak{L} \left (\mathbf{F} _{4}\right)\otimes R$, where $R$ denotes the hyperfinite II$_{1}$ factor, using a crossed product construction with a $\mathbb{Z}_3$-action. After the innovative work on subfactors done by V. Jones in the '80s (see \cite{Jones3}), it was a natural question to ask whether Connes's factor could be obtained through a subfactor construction of finite index. Although extensive work (\cite{Connes1}, \cite{Connes6}, and \cite{Jones1}) has been done by both Connes and Jones on examples of II$_{1}$ and III$_{\lambda}$ factors non-antiisomorphic to itself, it does not seems that this problem has been addressed before and there is little literature on the subject.
In this paper we provide a positive answer to this question by giving an explicit subfactor construction for our example of a II$_{1}$ factor non-antiisomorphic to itself. Our model is a variation of Connes's example (\cite{Connes4} and
\cite{Connes6}), since we utilize in our approach the recently developed theory of interpolated free group factors (\cite{Radulescu1} and \cite{Dykema2}). The II$_{1}$ factor $\mathcal {M}$ we are going to study is constructed from the tensor product of the interpolated free group factor $\freefactor$ and the hyperfinite II$_{1}$ factor $R$. We use two $\mathbb{Z}_3$-kernels, $\alpha\in\operatorname{Aut}\left (\freefactor\right )$ and $\beta\in\operatorname{Aut}(R)$,
which have conjugate obstructions to lifting, to generate an action of $\mathbb{Z}_3$ on $\freefactor\otimes R$. The action is given, up to
an inner automorphism, by $\alpha\otimes\beta$, and the factor $\mathcal {M}$ is equal to the crossed product $\left (\freefactor\otimes R\right )\rtimes _{\gamma}\mathbb{Z}_3$ (cf. Section $4$).
The main result of this paper is Theorem \ref{main}, where we show that $\mathcal {M}$ is the enveloping algebra of an inclusion
$A\subset B$ of interpolated free group factors. Here $A$ is isomorphic to $\mathfrak{L}\left (\mathbf{F} _{\frac{35}{27}} \right)$ (Proposition \ref{proposition4.3}),
and $B$ is equal to the the crossed product $A\rtimes _{\theta}\mathbb{Z}_9$, for a $\mathbb{Z}_9$-action $\theta$ of $A$ with outer period 3, and obstruction $\obstruction$ to lifting. The proof is based on Voiculescu's random matrix model for circular and semicircular elements introduced
in \cite{Vocu}. An analogous argument has been used by F. R\v{a}dulescu in \cite{Radulescu2}, to prove that a variation of the example
given by Jones of a II$_{1}$ factor with Connes invariant equal to $\mathbb{Z}_{2}\otimes\mathbb{Z} _{2}$, has a subfactor construction.
We also show in Section 5 that the Connes invariant of our factor $\mathcal {M}$ is equal to $\mathbb{Z}_9$, a result announced by Connes in \cite{Connes4}. This invariant, which is defined for every factor $M$ with a separable predual, was introduced by Connes in \cite{Connes6}. It consists of an abelian subgroup of the group of outer automorphisms, and it is denoted by $\chi (M)$. It is an important tool for distinguishing factors, since it is an isomorphism invariant of the factor $M$. It is trivial for some of the most common II$_{1}$ factors, like the interpolated free group factors and the hyperfinite II$_{1}$ factor, as well as for any tensor product of these factors. However, a crossed product construction yields in general a non-trivial $\chi (M)$. To
compute the Connes invariant of the factor $\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$, we use the short exact sequence described by Connes in \cite{Connes6}.
In addition, we show that if $\sigma$ denotes the generator of $\chi (\mathcal {M})$ described in Remark \ref{add}, then the invariant $\varkappa (\sigma)$, introduced by Jones in \cite{Jones1} is equal to $\jonesinv$. The Jones invariant is defined for any element $\theta$ of $\chi (M)$, where $M$ is a II$_{1}$ factor without non-trivial hypercentral sequences, and it consists of a complex number of modulus one. It is a finer invariant than $\chi (M)$ and it is constant on the conjugacy class of $\theta$ in the group of outer automorphisms. Moreover, it behaves nicely with respect to antiautomorphisms of $M$, in the sense that conjugation by an anti automorphism changes $\varkappa (\theta )$ by complex conjugation (see \cite{Jones1} for details).
Lastly, in Section 6 we use an argument of Connes \cite{Connes4} to show that $\mathcal {M}=$\linebreak $\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ is not anti-isomorphic to itself. The two main ingredients of this argument are the uniqueness (up to inner automorphism) of the decomposition of $\gamma$ into the product of an approximately inner automorphism and a centrally trivial automorphism, and the fact that the unique subgroup of order $3$ in $\chi (\mathcal {M})$ is
generated by the dual action $\widehat{\gamma}$ on $\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$. Using this decomposition we obtain a canonical way to associate to the II$_{1}$ factor $\mathcal {M}$ a complex number, the obstruction to lifting of the approximately inner automorphism in the decomposition of $\gamma$, which is invariant under isomorphism, and distinguishes $\mathcal {M}$ from its conjugate algebra $\mathcal {M} ^{c}$.
\section{Definitions}
Let $M$ be a factor with separable predual. Denote by $\operatorname{Aut}(M)$ the group of automorphisms of M endowed with the $u$-topology, i.e., a sequence of automorphisms $\alpha _{n}$ converges to $\alpha$\, if and only if\, $\|\varphi\circ\alpha _{n}-\varphi\circ\alpha\|\rightarrow 0$ for all $\varphi\in M_{*}$.
The definition of the Connes invariant $\chi (M)$ involves three normal subgroups of the group of automorphisms $\operatorname{Aut}(M)$. For a unitary $u$ in $M$ denote by $\operatorname{Ad} _M (u)$ the inner automorphism of $M$ defined by $\operatorname{Ad} _M (u)(x)=uxu^{*}$, for any $x$ in $M$. Let $\operatorname{Int}(M)$ be the subgroup of $\operatorname{Aut}(M)$ formed by all inner automorphisms. Then $\operatorname{Int}(M)$ is normal in $\operatorname{Aut}(M)$ since $\alpha\operatorname{Ad} _M (u)\alpha ^{-1}=\operatorname{Ad} _M\alpha(u)$ for every $\alpha\in\operatorname{Aut}(M)$. Let $\overline{\operatorname {Int}(M)}$ denote the closure of the group $\operatorname{Int}(M)$ in the $u$-topology. The group $\operatorname{Int}(M)$ of inner automorphisms and the group $\overline{\operatorname {Int}(M)}$ of approximately inner automorphisms are two of the groups involved in the definition of the Connes invariant.
We restrict now our attention to II$_{1}$ factors. Recall that if
$\tau$ denotes the unique faithful trace on $M$, then $M$ inherits an $L^{2}$-norm from the inclusion $M\subset L^{2}(M)$, given by $\|x\|_{2}=\tau(x^{*}x)^{1/2}$ for all $x\in M$. Note also that for a II$_{1}$ factor $M$, the
$u$-topology on $\operatorname{Aut}(M)$ is equivalent to the topology for which a sequence of automorphisms $\alpha _{n}$ on $M$ converges to $\alpha$ iff $\displaystyle\lim _{n\rightarrow\infty} \|\alpha _{n}(x)-\alpha (x)\|_{2}\rightarrow 0$. Since our study of automorphisms is simplified in the II$_1$ case, we will always assume that our factors are II$_{1}$, unless otherwise specified.
The last group of automorphisms involved in the definition of the Connes invariant is formed by the centrally trivial automorphisms of M.
Given $a,b\in M$ set $[a,b]=ab-ba$. We say that a bounded sequence $(x_{n})_{n\geq 0}$ in $M$ is central if $\displaystyle\lim_{n \rightarrow \infty}{\|[x_n, y]\|_{2}}=0$ for every $y\in M$.
\begin{definition}
An automorphism $\alpha \in \operatorname{Aut}(M)$ is centrally trivial if for any central sequence $(x_n)$ in $M$ we have $\displaystyle\lim_{n \rightarrow \infty}{\| \alpha (x_n)-x_n \|_{2}}=0.$
\end{definition}
We denote by $\operatorname{Ct}(M)$ the set of centrally trivial automorphisms of $M$, which is a normal subgroup of $\operatorname{Aut}(M)$.
Let $\operatorname{Out}(M)=\displaystyle\frac{\operatorname{Aut}(M)}{\operatorname{Int}(M)}$ be the group of outer automorphisms of $M$, and denote by $\xi :\operatorname{Aut}(M)\to\operatorname{Out}(M)$ the quotient map. The Connes invariant was introduced by Connes in {\cite{Connes6} (see also \cite{Connes4}).
\begin{definition}
Let $M$ be a II$_{1}$ factor with separable predual. The Connes invariant of $M$ is the abelian group
$$
\chi(M) =\frac{\operatorname{Ct}(M)\cap\overline{\operatorname {Int}(M)}}{\operatorname{Int}(M)}\subset\operatorname{Out}(M).
$$
\end{definition}
Note that the hyperfinite II$_{1}$ factor $R$ has trivial Connes invariant since $\mbox{Ct (R)}=\operatorname{Int}(R)$.
Next we want to define a particular class of central sequences, the hypercentral sequences.
\begin{definition}
A central sequence $(x_{n})$ is hypercentral if
$\displaystyle \lim_{n\rightarrow\infty}\|[x_n , y_n]\|_{2}=0$ for every
central sequence $(y_{n})$ in $M$.
\end{definition}
Let $\omega$ be a free ultrafilter over $\mathbb{N}$ and $M$ a II$_{1}$ factor. Denote by $\ell ^{\infty}(\mathbb{N}, M)$ the algebra of bounded sequences in $M$, and by $C_{\omega}$ the subalgebra of the bounded sequence $(x_{n})_{n\in\mathbb{N}}$ in $M$ with $\displaystyle\lim_{n\rightarrow\omega}\|[x_{n},y]\|_2 =0$, for all $y\in M$. Let $\mathfrak{I}_{\omega}$ be the subalgebra of $\ell ^{\infty}(\mathbb{N}, M)$ consisting of the sequences for which $\displaystyle\lim _{n\rightarrow\omega}\|x_{n}\|_{2}=0$. Set
\[M^{\omega}=\frac{\ell ^{\infty}(\mathbb{N}, M)}{\mathfrak{I}_{\omega}}\;\text{ and }\; M_{\omega}=\displaystyle\frac{C_{\omega}}{\mathfrak{I}_{\omega}\cap C_{\omega}}\]
Then, $M^{\omega}$ and $M_{\omega}$ are finite von Neumann algebras and
\begin{equation}
\label{purple}
M_{\omega}=M^{\omega}\cap M^{\prime}.
\end{equation}
\begin{remark}
\label{uno}
By \cite{McDuff} the existence of non-trivial hypercentral sequences is
equivalent to the non-triviality of the center of $M_{\omega}$ for some
(and then for all) free ultrafilter $\omega$.
\end{remark}
\begin{definition}
A $\mathbb{Z}_{k}$-kernel on a von Neumann algebra $M$ is an automorphism
$\alpha\in\operatorname{Aut}(M)$ such that there exists a unitary $U$ in $M$ with
the property $\alpha ^{k} =\operatorname{Ad} _M U$.
\end{definition}
Note that if $\alpha$ is a $\mathbb{Z}_{k}$-kernel then $\alpha (U)=\lambda U$,
for $\lambda$ a k-th root of unity. If $\lambda\neq 1$ we say that
$\alpha$ has obstruction $\lambda$ to lifting, meaning that the homomorphism $\varphi :\mathbb{Z}_{k}\longrightarrow \operatorname{Out}(M)$ defined by $\varphi (1)=[\alpha]$ cannot be lifted to an homomorphism $\Phi :\mathbb{Z}_{k}\longrightarrow\operatorname{Aut}(M)$ with $\Phi (1)=\alpha$.
We conclude this section by defining an invariant $\varkappa (\theta )$ for
any element $\theta$ in $\chi (M)$.
\begin{definition}
Let $M$ be a II$_1$ factor without non-trivial hypercentral sequences
and take $\theta\in\chi (M)$. Let $\phi$ be an automorphism in $\operatorname{Ct}(M)\cap\overline{\operatorname {Int}(M)}$ with
$\xi (\phi )=\theta$, and $u_{n}$ a sequence of unitaries such that $\phi=\displaystyle\lim _{n\rightarrow\infty}\operatorname{Ad\,} u_{n}$. Since the sequence $(u_{n}^{*}\phi (u_{n}))_{n\geq 0}$ is hypercentral, there exists a sequence of scalars $(\lambda _{n})_{n\geq 0}$ with the properties that $\displaystyle\lim_{n\rightarrow\infty}\| \phi (u_n )-\lambda _{n}u_{n}\|_{2}=0$, and $(\lambda _{n})_{n\geq 0}$ converges to some $\lambda _{\phi}\in\mathbb{T}$ (Lemmas 2.1 and 2.2 in \cite{Jones1}). Hence,
\[\lim_{n\rightarrow\infty}{\| \phi (u_n )-\lambda_{\phi }u_n\|}_{2}=0 \]
and the Jones invariant is $\varkappa (\theta)=\lambda_{\phi }$.
\end{definition}
Jones proved in \cite{Jones1} that this definition makes sense
(i.e. $\varkappa (\theta )$ does not depend on the choice of $\phi$ or $u_n$)
and that $\varkappa$ is a conjugacy invariant, meaning that if $\alpha,\beta$ belong to $\operatorname{Ct}(M)\cap\overline{\operatorname {Int}(M)}$ and there exists $\psi\in\operatorname{Aut}(M)$ such that
$\psi\alpha\psi ^{-1}=\beta$, then $\varkappa(\xi (\alpha))=\varkappa (\xi
(\beta))$.
\section{Preliminaries}
Let $M$ be a factor with separable predual. $M$ is said to be full if $\operatorname{Int}(M)$ is closed in $\operatorname{Aut}(M)$ with respect to the $u$-topology. Obviously
all type $I$ factors are full, while the hyperfinite factor $R$ provides an
example of a II$_{1}$ factor which is not full since $\overline{\operatorname{Int}(R)}=\operatorname{Aut}(R)\neq
\operatorname{Int}(R)$.
\begin{remark}
\label{due}
For an arbitrary factor, being full is equivalent to having no non-trivial
central sequence (see \cite{Connes2}).
\operatorname{Ad}dvspace{
amount}
\end{remark}
The following result, due to Connes, is an easy consequence of Lemma 4.3.3 in \cite{Sakai} and Corollary 3.6 in \cite{Connes1}. Some of the arguments used in the proof can be found in \cite{Jones2}.
\begin{lemma}
\label{second}
Let $G$ be a discrete group containing a non-abelian free group and let
$\tau$ be the usual trace on $\mathfrak{L}(G)$. Then $\mathfrak{L}(G)$ is full.
\end{lemma}
{\bf Proof} Set $E=G-\{e\}$, where $e$ denotes the identity element in $G$.
Let $g_{1},\, g_{2}$ be two generators of the free group and $F=\{g\in E\,
|\, g=g_{1}\tilde{g},\;\tilde{g}\in E\}$. Take $x\in \mathfrak{L}(G)$. Then
$x$ can be expressed as $x=\displaystyle\sum_{g\in G}{\lambda_g \delta_g}$ and the
function $f:G\to \mathbb{C}$ defined by $f(g)=\lambda_g$ belongs to $l^2(G)$.
For such $f$ we have that
\[ \sum_{g\in E}{|f(g)|^2}=\|x-\tau (x)\|_2 ^2\,\text{ and }\,
\sum_{g\in G}{|f(g_i g g_i ^{-1})-f(g)|^2} =\|[x,\delta_{g_i}
]\|_{2}^{2}.\]
Now if $(x_n )$ is a central sequence in $\mathfrak{L}(G)$ then
$\| [x_{n},\delta_{g_i}] \|\rightarrow 0$ for $n\rightarrow\infty$,
so we can apply Lemma 4.3.3 in \cite{Sakai} and conclude that
\begin{equation}
\label{puro}
\lim _{n\rightarrow\infty}\|x_n -\tau (x_n )\|_2 =0
\end{equation}
Let $\alpha$ be any automorphism in $\overline{\operatorname {Int}(\mathfrak{L}(G))}$ and choose a sequence of unitaries $(u_{n})_{n}$ such that $\alpha=\displaystyle\lim _{n\longrightarrow\infty} \operatorname{Ad\,} (u_{n})$. Since $(u_{n}^{*}u_{n+1})_{n\geq 0}$ is a central sequence in $\mathfrak{L}(G)$, by (\ref{puro}) there exists $\lambda _{n}\in\mathbb{T}$ such that
$$
\|u_{n}^{*}u_{n+1}-\lambda _{n}1\|_{2}<\frac{1}{2^n}.
$$
Set $\displaystyle v_{n}=\left (\prod _{i=1}^{n}{\lambda _{n}}\right )u_{n+1}$. Then $(v_{n})_{n\in\mathbb{N}}$ is a Cauchy sequence so it converges to some $t$ in $\mathfrak{L}(G)$. Since
$$
\operatorname{Ad\,} (t)=\lim _{n\rightarrow\infty}\operatorname{Ad\,} (v_{n})=\lim _{n\rightarrow\infty}\operatorname{Ad\,} (u_{n+1})=\alpha
$$
we have that $\alpha\in\operatorname {Int}(\mathfrak{L}(G))$.
$
\blacksquare$
Using the following remark we can conclude that not only the free group factors are full, but also the interpolated ones.
\begin{remark}
If $N\subseteq M$ is an inclusion of II$_{1}$ factors and $p$ is a projection in $N$, then $p(N^{\prime}\cap M)p=pN^{\prime}p\cap pMp$.
\end{remark}
One inclusion of the previous remark is obvious. The other one is proved using the following argument due to S. Popa. Take any element $z\in pN^{\prime}p\cap pMp$ so that $z=x'p$ with $x^{\prime}\in N^{\prime}$. Let $q$ a maximal projection in $N$ with the properties that $p\leq q\leq 1$ and $x^{\prime}q\in M$. To show that $q=1$, suppose that $1-q\neq 0$. Then $(1-q)Np\neq 0$, so using the polar decomposition of a non-zero element in $(1-q)Np$ we can find $0\neq v\in N$ such that $v^{*}v\leq p$ and $vv^{*}\leq 1-q$. Thus $x^{\prime}vv^{*}=vx^{\prime}v^{*}=vpx^{\prime}pv^{*}\in M$ and $x^{\prime}(q+vv^{*})\in M$, contradicting the maximality of $q$.
Taking $N=A$ and $M=A^{\omega}$ in the previous remark, and using (\ref{purple}) we obtain that the compression of a full factor is also full.
\begin{remark}
\label{remark3.4}
Let $A$ be a II$_{1}$ factor with separable predual and $p$ a projection in $A$. Then $A$ is full if and only if $pAp$ is full. In particular, any interpolated free group factor $\mathfrak{L} \left (\mathbf{F} _{t}\right)$, with $t>1$, is full.
\end{remark}
\begin{proposition}
\label{sixth}
Let $\mathfrak{L} \left (\mathbf{F} _{t}\right)$, for $t\in\mathbb{R}$ and $t>1$, be any interpolated free group factor, and denote by $R$ the hyperfinite II$_{1}$ factor. Then $\mathfrak{L}(F_{t})\otimes R$ has no non-trivial hypercentral sequences.
\end{proposition}
{\bf Proof}
We start by proving the result for $\mathfrak{L}(G)\otimes R$, where $G$ is a discrete group containing a non-abelian free group. First we want to
show that any central sequence in $\mathfrak{L}(G)\otimes R$ has the form
$(1\otimes x_{n})_{n\geq 0}$, for a central sequence $(x_{n})_{n\geq 0}$ in $R$.
Denote by $g_{i}$, for $i=1,2$ the generators of $F_{2}\subseteq G$. By the proof of Lemma \ref{second}, $\mathfrak{L}(G)$ satisfies the hypothesis
of Lemma 2.11 in \cite{Connes2} with $Q_1=\mathfrak{L}(G)$, $Q_2=R$ and $b_{i}
=\delta _{g_{i}}$. Therefore, we can
apply the above mentioned lemma to any central sequence $(X_n)_{n\geq 0}$ in $\mathfrak{L}(G)\otimes R$ to obtain that $\displaystyle\lim_{n\rightarrow\infty}\|X_n-(\tau\otimes 1)(X_n)\|_2=0$.
Since $(\tau\otimes 1)(X_n)\in\mathbb{C}\otimes R$, this implies that
$X_n=1\otimes x_n$ for a central sequence $(x_{n})_{n\geq 0}$ in $R$.
Now suppose $(Y_{n})_{n\geq 0}$ is a hypercentral sequence in $\mathfrak{L}(G)\otimes R$. Since
$(Y_{n})$ is central it has the form $Y_n=1\otimes y_n$, for a hypercentral
sequence $(y_{n})$ in $R$. So we need only to prove that $R$ has
no non-trivial hypercentral sequences.
This follows immediately from Remark \ref{uno} and Theorem 15.15 in \cite{Kawi}.
In the case of the factor $\inff\otimes R$, we can find an integer $k>1$ and a projection $p$ in $\mathfrak{L}(F_{k})\otimes R$ such that $\inff\otimes R\cong p(\mathfrak{L}(\mathbf{F} _{k})\otimes R)p$. Obviously $p$ belongs to $(\mathfrak{L}(\mathbf{F} _{k})\otimes R)' _{\omega}$. Therefore, by Remark \ref{uno} it is enough to show that given a II$_{1}$ factor $M$ and a projection $p\in M_{\omega}^{\prime}$, $(pMp)_{\omega}$ is a factor if and only if $M_{\omega}$ is a factor. This is an immediate consequence of the equality $(pMp)_{\omega}=pM_{\omega}p$.
$
\blacksquare$
\begin{lemma}
\label{ninth}
If $\alpha\in\operatorname{Ct}\left (\pippo\right )$ then $\alpha=\operatorname{Ad\,} z (\nu\otimes id)$, for some unitary
$z\in\inff\otimes R$ and an automorphism $\nu$ of $\mathfrak{L} \left (\mathbf{F} _{t}\right)$.
\end{lemma}
{\bf Proof}
Let $(K_n)_{n\in\mathbb{N}}$ be an increasing sequence of finite
dimensional subfactors of $R$ generating $R$, and denote by $R_n=K_n^{'}\cap R$ the
relative commutant of $K_n$ in $R$.
Set $L_n=1\otimes R_n\subset\inff\otimes R$. Then there exists an $n_0$ such that
for all $x\in L_{n_0}$,\ \ $\|x\| \leq 1$ one has $\| \alpha(x)-x\|_2\leq
\frac{1}{2}$. In fact, otherwise it would exist a uniform bounded sequence
$(x_{n})$, $x_{n}\in L_{n}$ and $\|x_n\|\leq 1$, such that $\| \alpha(x_n)-
x_n\|_2 >\frac{1}{2}$. But $(x_{n})$ is a central sequence in $\inff\otimes R$
because for each $m$ and $n\geq m$, $x_n$ commutes with $\mathfrak{L} \left (\mathbf{F} _{t}\right)\otimes
K_m$, so we get a contradiction.
By Lemma 3.3 in \cite{Connes1}, up to inner automorphism, $\alpha$ is of the
form $\alpha _1\otimes 1_{R_{n_0}}$ where $\alpha _1$ is an automorphism
of $\mathfrak{L} \left (\mathbf{F} _{t}\right)\otimes K_{n_0}$. Set $F=1\otimes K_{n_0}$. Then F is a type
$I$\ subfactor of $\mathfrak{L} \left (\mathbf{F} _{t}\right)\otimes K_{n_0}$.
Applying [Lemma 3.11, \cite{Connes1}] to $\alpha_1 $\ we obtain that
$\alpha_1 |_{1\otimes K_{n_0}}=\operatorname{Ad\,} V |_{1\otimes K_{n_0}}$. This implies
that $\alpha=\operatorname{Ad\,} z(\nu\otimes 1)$ for some automorphism $\nu$ of
$\mathfrak{L} \left (\mathbf{F} _{t}\right)$.
$
\blacksquare$
\section{The subfactor construction of interpolated free group factors}
In this section we use Voiculescu's random matrix model for free
group algebras \cite{Vocu}, to show that the crossed product $\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ can be
realized as the enveloping algebra of an inclusion of interpolated free group factors $A\subset B$.
For this purpose we first give an explicit construction of $\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$,
by providing models for the II$_{1}$ factors $\freefactor$ and $R$.
Let $\{X_1, X_2, X_3\}$ be a free semicircular family and $u=\displaystyle\sum_{j=1}^{3}{e^{\frac{2\pi ij}{3}} e_j}$ a unitary whose spectral projections $\{e_{j}\}_{j=1} ^{3}$ have trace
$\frac{1}{3}$. Assume also that $\{u\}^{\prime\prime}$ is free with respect to
$\{X_1, X_2, X_3\}^{\prime\prime}$. Then $\freefactor$ can be thought as the von Neumann
algebra generated by $\{X_1, X_2, X_3, u\}$ as in \cite{Vocu} and
\cite{Radulescu1}.
The model for $R$ is outlined in the following lemma. It is analogous to the
construction given in the case of $\mathbb{Z}_{2}$ by R\v{a}dulescu \cite{Radulescu2}. We include it here for the sake of completeness.
\begin{lemma}
\label{tenth}
Given a von Neumann algebra $M$, let $(U_k)_{k\in\mathbb{Z}}$ be a family of unitaries in M of order $9$. Assume that each $U_{k}$ has a decomposition of the form $\displaystyle U_{k}=\sum _{j=1} ^{9}\jonesinv U_{k}^{(j)}$, where each spectral projection $U_{k}^{(j)}$ has trace $\frac{1}{9}$. Let $g=\displaystyle\sum_{j=1}^{3}{e^{\frac{2\pi i j}{3}}g_j}$ be a unitary in $M$ of order $3$ whose spectral projections $\{g_j\}_{j=1}^{3}$ have trace $\frac{1}{3}$. Suppose that the following relations hold between the $U_{k}$'s and $g$:
\begin{enumerate}
\item [(i)]
$U_k gU_k^* =\obstruconj\, g\;\text{ if }k=0,-1,\;
\textstyle{ while }\; U_k gU_k ^* =g\;\text{ if }\;k\in\mathbb{Z}\backslash\{0,-1\}$
\item [(ii)]
$U_{k} U_{k+1}U_{k}^{*}=\jonesinv\, U_{k+1},\quad\text{for } k\in\mathbb{Z}$, \item [(iii)]
$U_{i} U_{j}=U_{j} U_{i}\quad\text{ if}\quad |i-j|\geq 2$.
\end{enumerate}
The algebra generated by the $U_{k}$'s and $g$ is endowed with a trace defined by $\tau (m)=0$, for each non-trivial monomial $m$ in these unitaries.
Set
$$
R_{-1}=\{gU_{0}^{3}, U_{1}, U_{2},\hdots\}^{\prime\prime}\subset\{g,U_{0}, U_{1}, U_{2},\hdots\}^{\prime\prime}=R_{0}.
$$
This defines an inclusion of type II$_{1}$ factors of index $9$, such that $R_{-1}'\cap R_{0}=\{g\}^{\prime\prime}$.
Let $\theta=\operatorname{Ad} _{R_{-1}}(U_0)$. Then $\theta$ is a outer automorphism of
$R_{-1}$ of order $9$ with outer invariant $(3,\obstruction )$. Moreover,
$R_0$ is equal to the crossed product $R_{-1}\rtimes _{\theta}\luna$.
Also, the Jones tower for the inclusion $R_{-1}\subset R_0$ is given by
$$
R_{-1}\subset R_{0}\subset R_{1}\subset\cdots\subset R_{k-1}\subset R_{k}\subset R_{k+1}\subset\cdots ,
$$
where $R_k =\{gU_{-1}^3\cdots U_{-k}^{3}, U_{-k}, U_{-k+1},\hdots\}^{\prime\prime}$ for $k\geq 1$.
\end{lemma}
{\bf Proof}
The properties of the family $(U_k)_{k\in\mathbb{Z}}$ and of the unitary
$g$ imply immediately that $U_{0}xU_{0}^{*}\in R_{-1}$ for every $x\in R_{-1}$. Therefore, $\theta=Ad (U_{0})$ defines an automorphism of $R_{-1}$. Since $g$ commutes with $R_{-1}$, $\theta ^{3}=Ad_{R_{-1}}(U_0 ^3)=Ad_{R_{-1}}(gU_0 ^3)$ belongs to $\operatorname{Int}(R_{-1})$. Moreover $\theta (gU_{0}^{3})=\obstruconj gU_{0}^{3}$, so $\theta$ has outer invariant $(3,\obstruconj )$.
Obviously, any monomial in $R_{0}$ can be written using only one occurrence of $U_{0}$ to some power because of the relations between the generators of $R_{0}$. Moreover, by definition, the trace on the algebra generated by the $\{U_{k}\}_{k\in\mathbb{Z}}$ and $g$ (and therefore on its subalgebra $R_{0}$) is compatible with the usual trace defined on the crossed product $R_{-1}\rtimes _{\theta}\luna$, so that $R_{0}=(R_{-1}\cup\{U_{0}\})^{\prime\prime}=R_{-1}\rtimes _{\theta}\luna$.
To show that $R_{-1}^{\prime}\cap R_{0}\subset \{g\}^{\prime\prime}$, write any element $x\in R_{-1}^{\prime}\cap R_{0}$ as $\displaystyle x=\sum _{k=0}^{8}a_{k}U_{0}^{k}$. It is easy to check that $x$ belongs to $R_{-1}^{\prime}$ if and only if $a_{0}\in\mathbb{C}$, $a_{3}$ and $a_{6}$ are multiples of $g^{2}U_{0}^{6}$ and $gU_{0}^{3}$, respectively, and all the other $a_{k}$'s are zero. The other inclusion follows immediately from the relations verified by the $U_{k}$'s and $g$, thus $R_{-1}^{\prime}\cap R_{0}= \{g\}^{\prime\prime}$.
Note that $\operatorname{Ad} _{R_0}(U_{-1})(U_0)=\jonesinv U_0$, while $\operatorname{Ad} _{R_0}(U_{-1})(x)=x$ for all $x\in R_{-1}$. Hence, $\operatorname{Ad} _{R_0}(U_{-1})$ implements the dual action of $\mathbb{Z}_9$ on the crossed product $R_{-1}\rtimes _{\theta}\luna$, and the next step in the Jones tower for the inclusion $R_{-1}\subset R_0$ is given by
$$
R_1 =\{U_{-1},g,U_{0},U_{1},\hdots\}^{\prime\prime}.
$$
Similarly, the other steps in the Jones constructions are obtained by adding the unitaries $U_{-2},\, U_{-3}, \hdots$, so that the k-th step is given by
\[R_{k} =\{U_{-k},U_{-k+1},\hdots ,U_{-1}, g, U_{0}, U_{1},\hdots\}^{\prime\prime}.\]
$
\blacksquare$
Observe that to construct unitaries with the properties in the statement it is enough to consider the Jones tower for an inclusion of the form $R\subset R\rtimes_{\beta}\mathbb{Z}_9$, where $\beta$ is an automorphism of order $9$ with outer invariant $(3,\obstruconj )$. We choose the unitary $g$ of order $3$ between the elements of the relative commutant. The unitaries implementing the crossed product in the successive steps of the basic construction will satisfy the desired relations.
The next step is to give a concrete realization of the crossed product \linebreak $\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$, with $\gamma$ a $\mathbb{Z}_3$--action on $\freefactor\otimes R_{0}$.
Using the model $\freefactor=\{X_1, X_2, X_3, u\}^{\prime\prime}$, where the $X_{i}$'s are semicircular elements, $u$ is a unitary of order $3$ and $\{X_{i},u \mid \, i=1\hdots 3\}$ is a free family, we define the automorphism $\alpha$ on $\freefactor$ by:
\begin{itemize}
\item []
$\alpha (X_i)=X_{i+1}$, for $i=1,2$
\item[]$\alpha(X_3)=uX_1 u^*$, and
\item []
$\alpha (u)=\obstruction u$.
\end{itemize}
Since $\alpha ^{3}=\operatorname{Ad\,} u$, $\alpha$ is a $\mathbb{Z}_3$--kernel with obstruction
$\obstruction$ to lifting.
For the automorphism $\beta$ on the hyperfinite II$_{1}$ factor we use the model of Lemma \ref{tenth}: $R\cong R_0=\{g, U_0, U_1,\hdots\}^{\prime\prime}$ and $\beta =\operatorname{Ad} _{R_0}(U_{-1})$, with $\beta ^3 =\operatorname{Ad} _{R_0}(g)$ and
$\beta (g)=\obstruconj g$.
Observe that $\alpha\otimes\beta\in\operatorname{Aut}\left (\tenpro\right )$ has outer period $3$ and obstruction to lifting $1$, so it can be perturbed by an inner automorphism to obtain a $\mathbb{Z}_3$-action on $\freefactor\otimes R_{0}$. This action is defined by
\begin{displaymath}
\gamma=\left (\operatorname{Ad} _{\left [\freefactor\otimes R_{0}\right ]} W\right)(\alpha\otimes
\beta ),
\end{displaymath}
where $W$\ is any cube root of $u^{*}\otimes g^{*}$ which is fixed by the automorphism
$\alpha\otimes\beta$. For example, if $\delta=\jonesinv$ and $\{e_{i}\}_{i=1} ^{3}$, $\{g_{j}\}_{j=1} ^{3}$ denote the spectral projections of $u$ and $g$, respectively, take
\begin{equation}
\label{proiezione}
W = \delta E_1+\delta ^{2} E_2+E_3,
\end{equation}
where
\begin{equation}
\label{Wproj}
E_{l}=\displaystyle\sum_{\substack{i,j=1,\hdots , 3,\\ i+j\equiv l\mod 3}}e_{i}\otimes g_{j}, \text{ for }l=1\hdots , 3.
\end{equation}
Note that $\alpha$ acts on the spectral projections of $u$ as $\alpha (e_{i})=e_{i-1}$ for $i=2,3$ and $\alpha (e_{1})=e_{3}$, while $\beta$ acts on the spectral projections of $g$ as $\beta (g_{j})=g_{j+1}$ for $j=1,2$ and $\beta (g_{3})=g_{1}$. Hence, $\alpha\otimes\beta$ fixes $W$.
\begin{observation}
\label{observ}
Note that $W$ belongs to the center of the fixed point algebra of $\alpha
\otimes\beta$ since for any element $z$ in the fixed point algebra we
have
\begin{equation*}
z\,(u\otimes g)=(\alpha\otimes\beta )^{3}(z\,(u\otimes g))=\operatorname{Ad\,} (u\otimes g)
(z\,(u\otimes g))=(u\otimes g)\,z.
\end{equation*}
\end{observation}
We can now prove our main theorem, using an argument similar to the one used by R\u{a}dulescu in \cite{Radulescu2}. We show that $\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ is the enveloping algebra of an inclusion $A\subset B$ of interpolated free group factors. We divide the proof into two parts, first proving that $A$ is isomorphic to the interpolated free group factor $\mathfrak{L}\left (\mathbf{F} _{\frac{35}{27}}\right )$.
\begin{proposition}
\label{proposition4.3}
Let $v$ be the unitary implementing the crossed product $\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$. Consider the von Neumann subalgebra
$$A=\{X_{i}\otimes 1, u\otimes 1, 1\otimes g, v| i=1,\hdots ,3\}^{\prime\prime}\subset \mathcal {M},$$
endowed with a trace with respect to which $\{X_{i}\otimes 1, u\otimes 1| i=1,\hdots ,3\}$ is a free family, $\{X_{i}\otimes 1\}_{i=1}^{3}$ are semicircular elements, and $u\otimes 1, 1\otimes g, v$ are unitaries of order 3 with spectral projections of trace $\frac{1}{3}$.
Moreover, assume that the following relations are satisfied by the elements of $A$
\begin{itemize}
\item[i)] $[1\otimes g,X_{i}\otimes 1]=0$ for $i=1,\hdots , 3$, and $[1\otimes g,u\otimes 1]=0$.
\item[ii)] $v(u\otimes 1)=\obstruction\, (u\otimes 1)v$
\item[iii)] $v(1\otimes g)=\obstruconj\, (1\otimes g)v$
\item[iv)] $\operatorname{Ad\,} v(X_{i}\otimes 1)=\operatorname{Ad\,} W \circ\alpha(X_{i}\otimes 1)$, where $W=\jonesinv E_{1}+e^{\frac{4\pi i}{9}}E_{2}+E_{3}$ as in (\ref{proiezione}).
\end{itemize}
For any monomial $m$ in the variables $\{X_{i}\otimes 1, u\otimes 1, 1\otimes g| i=1,\hdots , 3\}$, suppose that the trace $\tau$ on the algebra $A$ verifies the following properties:
\begin{enumerate}
\item [(v)]
$\tau(mv^{k})=0$ for $k=1,2$,
\item [(vi)]
the trace of $m$ in $A$ coincides with its trace as element of the von Neumann algebra $\{X_{i}, u| i=1, \hdots ,3\}^{\prime\prime}\otimes\{g\}^{\prime\prime}$.
\end{enumerate}
Under these conditions $A$ is isomorphic to $\mathfrak{L}\left (\mathbf{F} _{\frac{35}{27}}\right )$.
\end{proposition}
{\bf Proof}
First we realize the algebra $A$ in terms of random matrices \cite{Vocu} and then use Voiculescu's free probability theory to show that $A$ is an interpolated free group factor. The random matrix model we give is a subalgebra of the algebra of $9\times 9$ matrices with entries in a von Neumann algebra.
Let $D$ be a von Neumann algebra with a finite trace $\widetilde{\tau}$ which contains a family of free elements $\{a_i\}_{i=1}^{18}$, with the property that the elements $\{a_{i}\}_{i=1} ^{9}$ are semicircular, while the others ones are circular. Denote by $(e_{i j})_{i,j=1,\hdots ,9}$ the canonical system of matrix units in
$M_{9}(\mathbb{C})$.
Set $\epsilon=\obstruction$. Using the same notation as before for the spectral projections of $u$ and $g$, we set
$$
e_{i}\otimes 1=\displaystyle \sum _{\substack{j=1,\hdots ,9,\\j\equiv i\mod 3}}e_{jj}\inM_{9}(\mathbb{C})\subset D\otimesM_{9}(\mathbb{C}),\text{ for } i=1,\hdots ,3,
$$
and
\begin{align*}
1\otimes g_{1}&=e_{11}+e_{55}+e_{99},\\\
1\otimes g_{2}&=e_{33}+e_{44}+e_{88}, \\
1\otimes g_{3}&=e_{22}+e_{66}+e_{77}.
\end{align*}
Therefore, $u\otimes 1=\epsilon\, (e_{1}\otimes1)+\epsilon ^{2}\, (e_{2}\otimes 1)+e_{3}$ and $1\otimes g=\epsilon\, (1\otimes g_{1})+\epsilon ^{2}\, (1\otimes g_{2})+1\otimes g_{3}$ can be written in matrix notation as
\begin{equation*}
u\otimes 1=\begin{pmatrix}
\epsilon & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & \epsilon ^2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & \epsilon & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & \epsilon ^2 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & \epsilon & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & \epsilon ^2 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1
\end{pmatrix}
\end{equation*}
\operatorname{Ad}dvspace{\baselineskip}
\begin{equation*}
1\otimes g=\begin{pmatrix}
\epsilon & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & \epsilon ^2 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & \epsilon ^2 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & \epsilon & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & \epsilon ^2 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & \epsilon
\end{pmatrix}
\end{equation*}
\operatorname{Ad}dvspace{\baselineskip}
Moreover, set
$$
v=(e_{1 2}+e_{2 3}+e_{3 1})+(e_{4 5}+e_{5 6}+e_{6 4})+(e_{7 8}+e_{8 9}+e_{9 7}),$$
i.e.,
\begin{equation*}
v=\begin{pmatrix}
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0
\end{pmatrix}.
\end{equation*}
Note that the unitaries $u\otimes 1$, $1\otimes g$, and $v$ generate a copy of $M_3 (\mathbb{C})\oplus M_3 (\mathbb{C})\oplus M_3
(\mathbb{C})\subseteqM_{9}(\mathbb{C})$.
In addition, with this choice of $u\otimes 1$, $1\otimes g$ and $v$ we obtain that
\begin{equation*}
W=\delta ^2Id\oplus Id\oplus\delta\, Id,
\end{equation*}
where $\delta=\jonesinv$ and $Id$ denotes the identity of $M_3 (\mathbb{C})$. In matrix
notation
\begin{equation*}
W=\begin{pmatrix}
\delta ^2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & \delta ^2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & \delta^2 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & \delta & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & \delta & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & \delta
\end{pmatrix}
\end{equation*}
Furthermore, $u\otimes 1$, $1\otimes g$ and $v$ satisfy the required conditions
\begin{align*}
(u\otimes 1)(1\otimes g) &=(1\otimes g)(u\otimes 1),\\
v(u\otimes 1)&=\epsilon \, (u\otimes 1) v,\\
v(1\otimes g)&=\overline{\epsilon}\, (1\otimes g)v.
\end{align*}
Denote by $\mathrm{Re}(a)=\frac{1}{2}(a+a^* )$, for $a\in D\otimesM_{9}(\mathbb{C})$.
We set:
\begin{align*}
X_{1}\otimes 1= & a_1\otimes e_{1 1}+a_2\otimes e_{22}+a_3\otimes e_{33}+a_{4}\otimes
e_{44}+a_{5}\otimes e_{55}+a_{6}\otimes e_{66} \\
& +a_{7}\otimes e_{77}+a_{8}\otimes e_{8 8}+a_{9}\otimes e_{99}+2\mathrm{Re}(a_{10}\otimes e_{15})+2\mathrm{Re}(a_{11}\otimes e_{19}) \\
& +2\mathrm{Re}(a_{12}\otimes e_{26})+2\mathrm{Re}(a_{13}\otimes e_{27})+2\mathrm{Re}(a_{14}\otimes e_{3 4})+2\mathrm{Re}(a_{15}\otimes e_{38}) \\
& +2\mathrm{Re}(a_{16}\otimes e_{48})+2\mathrm{Re}(a_{17}\otimes e_{59})+2\mathrm{Re}(a_{18}\otimes e_{67}).
\end{align*}
To find $X_2$ and $X_3$, use the relations:
\begin{equation*}
X_2\otimes 1=\operatorname{Ad\,} (W^* v)(X_1\otimes 1)\quad\text{ and }\quad X_3\otimes 1=\operatorname{Ad\,} (W^* v)(X_2\otimes 1)
\end{equation*}
Thus, using matrix notation we have:
\begin{equation*}
X_1\otimes 1=\begin{pmatrix}
a_{1} & 0 & 0 & 0 & a_{10} & 0 & 0 & 0 & a_{11} \\
0 & a_{2} & 0 & 0 & 0 & a_{12} & a_{13} & 0 & 0 \\
0 & 0 & a_{3} & a_{14} & 0 & 0 & 0 & a_{15} & 0 \\
0 & 0 & a_{14}^* & a_{4} & 0 & 0 & 0 & a_{16} & 0 \\
a_{10}^* & 0 & 0 & 0 & a_{5} & 0 & 0 & 0 & a_{17} \\
0 & a_{12}^* & 0 & 0 & 0 & a_{6} & a_{18} & 0 & 0 \\
0 & a_{13}^* & 0 & 0 & 0 & a_{18}^* & a_{7} & 0 & 0 \\
0 & 0 & a_{15}^* & a_{16}^* & 0 & 0 & 0 & a_{8} & 0 \\
a_{11}^* & 0 & 0 & 0 & a_{17}^* & 0 & 0 & 0 & a_{9}
\end{pmatrix}
\end{equation*}
\begin{equation*}
X_2\otimes 1=\begin{pmatrix}
a_{2} & 0 & 0 & 0 & \bar{\delta} ^{2} a_{12} & 0 & 0 & 0 & \bar{\delta} a_{13} \\
0 & a_{3} & 0 & 0 & 0 & \bar{\delta} ^{2} a_{14} & \bar{\delta} a_{15} & 0 & 0 \\
0 & 0 & a_{1} & \bar{\delta} ^{2} a_{10} & 0 & 0 & 0 & \bar{\delta} a_{11} & 0 \\
0 & 0 & \delta ^{2} a_{10}^{*} & a_{5} & 0 & 0 & 0 & \delta a_{17} & 0 \\
\delta ^{2} a_{12}^{*} & 0 & 0 & 0 & a_{6} & 0 & 0 & 0 & \delta a_{18} \\
0 & \delta ^{2} a_{14}^{*} & 0 & 0 & 0 & a_{4} & \delta a_{16} & 0 & 0 \\
0 & \delta a_{15}^{*} & 0 & 0 & 0 & \bar{\delta} a_{16}^{*} & a_{8} & 0 & 0 \\
0 & 0 & \delta a_{11}^{*} & \bar{\delta} a_{17}^{*} & 0 & 0 & 0 & a_{9} & 0 \\
\delta a_{13}^{*} & 0 & 0 & 0 & \bar{\delta} a_{18}^{*} & 0 & 0 & 0 & a_{7}
\end{pmatrix}
\end{equation*}
\begin{equation*}
X_3\otimes 1=\begin{pmatrix}
a_{3} & 0 & 0 & 0 & \bar{\delta} ^{4} a_{14} & 0 & 0 & 0 & \bar{\delta} ^{2} a_{15} \\
0 & a_{1} & 0 & 0 & 0 & \bar{\delta} ^{4} a_{10} & \bar{\delta} ^{2} a_{11} & 0 & 0 \\
0 & 0 & a_{2} & \bar{\delta} ^{4} a_{12} & 0 & 0 & 0 & \bar{\delta} ^{2}a_{13} & 0 \\
0 & 0 & \delta ^{4}a_{12}^{*} & a_{6} & 0 & 0 & 0 & \delta ^{2} a_{18} & 0 \\
\delta ^{4}a_{14}^{*} & 0 & 0 & 0 & a_{4} & 0 & 0 & 0 & \delta ^{2} a_{16} \\
0 & \delta ^{4}a_{10}^{*} & 0 & 0 & 0 & a_{5} & \delta ^{2} a_{17} & 0 & 0 \\
0 & \delta ^{2} a_{11}^{*} & 0 & 0 & 0 & \bar{\delta} ^{2} a_{17}^{*} & a_{9} & 0 & 0 \\
0 & 0 & \delta ^{2} a_{13}^{*} & \bar{\delta} ^{2} a_{18}^{*} & 0 & 0 & 0 & a_{7} & 0 \\
\delta ^{2} a_{15}^{*} & 0 & 0 & 0 & \bar{\delta} ^{2} a_{16}^{*} & 0 & 0 & 0 & a_{8}
\end{pmatrix}
\end{equation*}
\operatorname{Ad}dvspace{1.3\baselineskip}
Obviously $X_1\otimes 1 , X_ 2\otimes 1 ,X_3\otimes 1$ commute with $1\otimes g$. Moreover, the assumption that the family $\{a_i\}_{i=1,\hdots ,18}$\ is free implies that $\{X_{i}\otimes 1, u\otimes 1 |i=1,\hdots ,3\}$ is a free family with respect to the unique normalized trace on $D\otimesM_{9}(\mathbb{C})$. In
addition, $\{X_{i}\otimes 1, u\otimes 1|i=1,\hdots ,3\}^{\prime\prime}$ and $\{g\}^{\prime\prime}$ are independent with respect to this trace.
To show that the normalized trace of $D\otimesM_{9}(\mathbb{C})$ has the properties (v) and (vi) of the statement, let $m$ be any monomial in the variables $\{X_{i}\otimes 1, u\otimes 1, 1\otimes g| i=1,\hdots ,3\}$, and consider the product $v^k m$ with $k=1,2$. Since $m$ commutes with $1\otimes g$, it
must be of the form
\begin{equation*}
m=\begin{pmatrix}
* & 0 & 0 & 0 & * & 0 & 0 & 0 & * \\
0 & * & 0 & 0 & 0 & * & * & 0 & 0 \\
0 & 0 & * & * & 0 & 0 & 0 & * & 0 \\
0 & 0 & * & * & 0 & 0 & 0 & * & 0 \\
* & 0 & 0 & 0 & * & 0 & 0 & 0 & * \\
0 & * & 0 & 0 & 0 & * & * & 0 & 0 \\
0 & * & 0 & 0 & 0 & * & * & 0 & 0 \\
0 & 0 & * & * & 0 & 0 & 0 & * & 0 \\
* & 0 & 0 & 0 & * & 0 & 0 & 0 & *
\end{pmatrix}
\end{equation*}
If we multiply $m$ by $v$ or $v^2$, we obtain a matrix with zero on
the diagonal and a few non-zero entries outside the diagonal. This implies
that $v^k m$ has zero trace for $k=1,2$. Thus, we have built a matrix model
for $A$ which satisfies the conditions of the statement.
One can easily check that $A$ is a factor. In order to prove that $A$ is an interpolated free group factor we reduce $A$ by one of the spectral projections of $g$, and show that the new factor we obtain is an interpolated free group factor. Reduce $A$ by $g_3 =1\otimes e_{22}+1\otimes e_{66}+1\otimes e_{77}$, which has trace $\frac{1}{3}$.
Then $g_3 A g_3$ is generated by:
\begin{enumerate}
\item [(i)]
$a_{2}\otimes e_{22}+2\mathrm{Re}(a_{12}\otimes e_{26})+2\mathrm{Re}(a_{13}\otimes
e_{27})+a_{6}\otimes e_{66}+ a_{7}\otimes e_{77} \\
+2\mathrm{Re}(a_{18}\otimes e_{67})$
\item [(ii)]
$a_{3}\otimes e_{22}+2\mathrm{Re}(\bar{\delta} ^{2}a_{14}\otimes e_{26})+
2\mathrm{Re}(\bar{\delta} a_{15}\otimes e_{27})+a_{4}\otimes e_{66}+a_{8}\otimes e_{77} \\
+2\mathrm{Re}(\delta a_{16}\otimes e_{67})$
\item [(iii)]
$a_{1}\otimes e_{22}+2\mathrm{Re}(\bar{\delta} ^{4}a_{10}\otimes e_{26})+
2\mathrm{Re}(\bar{\delta} ^{2} a_{11}\otimes e_{27})+a_{5}\otimes e_{66}+a_{9}\otimes e_{77} \\
+2\mathrm{Re}(\delta ^{2} a_{17}\otimes e_{67})$
\item [(iv)]
$\epsilon ^{2}\otimes e_{22}+1\otimes e_{66}+\epsilon\otimes e_{77}$,
\end{enumerate}
Thus, by the Voiculescu's random matrix model \cite{VoDy} $g_3 A g_3$ is isomorphic
to the free group factor $\mathfrak{L}(F_{3}*\mathbb{Z}_{3})\cong\freefactor$.
This implies that $A$ is also an interpolated free group factor, and using the well-known
formula for reduced factors (\cite{Dykema1}, \cite{Dykema2} or \cite{Radulescu1})
we get that $A=\mathfrak{L}\left (\mathbf{F} _{\frac{35}{27}}\right )$.
$
\blacksquare$
\begin{theorem}
\label{main}
Set
$$
A=\{X_{i}\otimes 1,u\otimes 1, 1\otimes g,v\mid i=1\hdots 3\}^{\prime\prime}\subset B=(A\cup \{1\otimes U_{0}\})^{\prime\prime}.
$$
in $\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$. Then $A$ is isomorphic to the interpolated free group factor $\mathfrak{L}\left (\mathbb{F}_{\frac{35}{27}}\right )$, and $B$
is the crossed product of $A$ by a $\mathbb{Z}_9$-action $\theta$ on $A$, with outer invariant is $(3, \obstruction )$. Furthermore, $\mathcal {M}$ is
the enveloping algebra in the basic construction for the inclusion $A\subset B$.
\end{theorem}
{\bf Proof} First we want to describe how $\operatorname{Ad\,} (1\otimes U_0)$ acts on the subalgebra $A$ of $\mathcal {M}$.
Obviously
\begin{equation*}
\operatorname{Ad\,} (1\otimes U_{0})(X_{i}\otimes 1)=X_{i}\otimes 1\quad\text{ for }i=1,\hdots ,3,
\end{equation*}
\begin{equation*}
\operatorname{Ad\,} (1\otimes U_{0})(u\otimes 1)=u\otimes 1
\end{equation*}
and
\begin{equation*}
\operatorname{Ad\,} (1\otimes U_{0})(1\otimes g)=\obstruconj (1\otimes g).
\end{equation*}
In addition, if $\{E_{i}\}_{i=1} ^{3}$ are the projections defined in (\ref{Wproj}), then
\begin{equation}
\label{E}
\operatorname{Ad\,} (1\otimes U_{0})(E_{i})=E_{i+1}\text{ for } i=1,\hdots , 3,
\end{equation}
where $i+1$ is taken mod 3. Using (\ref{proiezione}) and (\ref{E}), together with the relation $\gamma (1\otimes U_{0})=\delta \operatorname{Ad\,} W(1\otimes U_{0})$, for $\delta=\jonesinv$, we obtain
\begin{align*}
\operatorname{Ad\,} (1\otimes U_{0})(v) & =(1\otimes U_{0})(v(1\otimes U_{0}^{*})v^* )v=\bar{\delta}\,\operatorname{Ad\,} (1\otimes U_0)(W)W^{*} v \\
& =\bar{\delta}\,(E_{1}+\delta E_{2}+\delta ^{2}E_{3})W^{*}v
=\bar{\delta ^{2}}\, (E_1 +E_2 +\obstruction E_3) v.
\end{align*}
It follows that $\operatorname{Ad\,} (1\otimes U_0)$ leaves $A$ invariant. Furthermore,
$\operatorname{Ad} _{A}(1\otimes U_{0})^{3}$ acts identically on $\{X_{i}\otimes 1, u\otimes 1, 1\otimes g| i=1,\hdots , 3\}''$,
while $\operatorname{Ad} _{A}(1\otimes U_{0})^{3} (v)=\obstruconj\, v$.
Set $\theta=\operatorname{Ad} _{A}(1\otimes U_{0})$. Then, using the fact that
$\operatorname{Ad} _{A}(1\otimes g^*)$ acts identically on $\{X_{i}\otimes 1, u\otimes 1, 1\otimes g| i=1,\hdots ,3\}^{\prime\prime}$, and $\operatorname{Ad\,} v(1\otimes g)=\obstruconj (1\otimes g)$, we conclude
that
$$
\theta ^{3}=\operatorname{Ad} _{A}(1\otimes g^*)\quad\text{ and }\quad\theta(1\otimes g^*)=
\obstruction\, (1\otimes g^*) .
$$
Thus $\theta$ is a $\mathbb{Z}_3$-kernel on $A$ with obstruction $\obstruction$
to lifting.
To complete the proof that $B=A\rtimes_{\theta}\mathbb{Z}_9$ we need to check that
any monomial $m$ in the variables $\{X_{i}\otimes 1, u\otimes 1, 1\otimes g, 1\otimes U_0 , v| i=1,\hdots ,3\}$ contains
only one occurrence of $1\otimes U_0$ to some power. We also need to
verify that any monomial containing $1\otimes U_0$ has zero trace.
Since the crossed product $\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ is implemented by the unitary $v$,
any monomial in the variables $\{X_{i}\otimes 1, u\otimes 1,1\otimes g, 1\otimes U_0 , v| i=1,\hdots ,3\}$
has the form $v^{k} m$, where $m$ is an element in $\freefactor\otimes R_{0}$ and
$k=0,1,2$. Because $1\otimes U_{0}$ commutes with the elements of $\freefactor\otimes 1$ and
$\operatorname{Ad\,} (1\otimes U_{0})(1\otimes g)=\obstruconj\, (1\otimes g)$, it follows that $1\otimes U_{0}$ can appear at most once in $m$ with some power. Thus,
any monomial in $B$ can be written using only one occurrence of
$1\otimes U_0$.
Furthermore, by the definition of the trace on the crossed product, any monomial of the form $v^k m$, for $k=1,2$, has zero trace
in $B$. Therefore, it is enough to compute the trace of any monomial $m$ in $\freefactor\otimes R_{0}$ containing $1\otimes U_{0}$. Because of the definition
of the trace on $R_{0}$ (Lemma \ref{tenth}) and hence on $\freefactor\otimes R_{0}$, we can conclude immediately that the trace of $m$ is zero. Therefore
$B=A\rtimes_{\theta}\mathbb{Z}_9$.
In addition, using an argument similar to the one used to build the model
for $R$ (Lemma \ref{tenth}), one can easily verify that the unitaries $1\otimes U_1,
1\otimes U_2,\hdots$\, implement the consecutive terms in the iterated basic
construction of $A\subset A\rtimes_{\theta}\mathbb{Z}_9$. This implies that
$\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$\ is the enveloping algebra of the above inclusion of
factors.
$
\blacksquare$
\section{The Connes invariant of the crossed product $\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$}
The arguments in this section are analogous to the ones used by Jones in
\cite{Jones1} to compute the Connes invariant for his example of a factor
which is anti-isomorphic to itself but has no involutory antiautomorphism. The definition of $K$, $K^{\bot}$ and $L$ given below are due to
Connes (see \cite{Connes6}).
Let $N$\ be a II$_{1}$ factor without non-trivial hypercentral sequences and
$G$ a finite subgroup of $\operatorname{Aut}(N)$ such that $G \cap\overline{\operatorname{Int}(N)}=\{Id\}$.
Set $K=G\cap\operatorname{Ct}(N)$ and \linebreak
$K^{\bot}=\{f:G\to\mathbb{T}\, | \text{ f is a homomorphism and } f|_{K}\equiv
1\}$.
Let $\xi :\operatorname{Aut}(N)\to\operatorname{Out}(N)$ be the usual quotient map and Fint be
the subgroup $\{\operatorname{Ad} _{N}u\, |\, u\in N\text{ is fixed by }G\}\subseteq\operatorname{Aut}(N)$. Denote by $\overline{\mbox{Fint}}$ its closure in
$\operatorname{Aut}(N)$ with respect to the pointwise weak topology, and by $G\vee\operatorname{Ct}(N)$ the subgroup of $\operatorname{Aut}(N)$ generated by $G\cup\operatorname{Ct}(N)$. Set $L=\xi ((G\vee\operatorname{Ct}(N))\cap\overline{\mbox{Fint}})\subseteq\operatorname{Out}(N)$.
Connes showed in \cite{Connes6} that there exists an exact sequence
\begin{equation*}
0\rightarrow K^{\bot}\overset{\partial}{\rightarrow}\chi (W^{*}(N,G))
\overset{\Pi}{\rightarrow} L\rightarrow 0
\end{equation*}
where $M=W^{*}(N,G)$ denotes the crossed product implemented by the action of $G$ on $N$.
We briefly describe the maps $\partial$ and $\Pi$ in the exact sequence above. Given an element $x$ in $M$, write it as
$\displaystyle\sum_{g\in G}{a_{g}u_g }$, with $a_{g}\in N$ and $\operatorname{Ad} _{M}u_{g}|_{N}=g$.
For each $\eta :G \rightarrow \mathbb{T}$ in $K^{\bot}$, define the map $\Delta (\eta):M\longrightarrow M$ by
\begin{displaymath}
\Delta (\eta )\left (\sum_{g\in G}{a_g u_g}\right )= \sum_{g\in G}{\eta (g)
a_{g}u_{g}}.
\end{displaymath}
Then $\Delta (\eta )$ belongs to $\operatorname{Ct}(M)\cap \overline{\operatorname {Int}(M)}$, and $\partial=\xi\circ\Delta$ is the desired map.
To see how $\Pi$ acts on $\chi (M)$, for any element $\sigma\in\chi (M)$ choose an automorphism $\alpha\in\operatorname{Ct}(M)\cap\overline{\operatorname {Int}(M)}$ such that $\xi (\alpha )=\sigma$.
The hypothesis $G\cap\overline{\operatorname{Int}(N)}=\{Id\}$, implies that there exists a sequence of unitaries $(u_{n})_{n\geq 0}$ in $N$, which are fixed by $G$, and a unitary $z$ in $M$ such that $\alpha=\displaystyle \operatorname{Ad\,} z\lim _{n\rightarrow\infty}{\operatorname{Ad\,} u_n}$ (Corollary 6 and Lemma 2 in \cite{Jones2}, or Lemma 15.42 in \cite{Kawi}). Set $\psi _{\sigma}\operatorname{Ad\,} (z^*)\alpha|_{N}\in\operatorname{Aut}(N)$.
One can show that $\psi_{\sigma}\in \overline{\mbox{Fint}}\cap G\vee\operatorname{Ct}(N)$, and that the composition map $\xi\circ\psi _{\sigma}$ does not depend on the choice of $\alpha$, but only on the class $\sigma=\xi (\alpha)$. Therefore, the map \,$\Pi :\chi(M)\to L$\, given by $\Pi(\sigma )=\xi (\psi_{\sigma })$ is well defined.
To show that $\Pi$ is surjective, let $\mu$ be any element in $L$ and denote by $\alpha_{\mu}\in\overline{\mbox{Fint}}\cap (G\vee\operatorname{Ct}(N))$ a representative of $\mu$, i.e. $\xi (\alpha_{\mu})=\mu$. $\alpha_{\mu}$ commutes with $G$ since it is
the limit of automorphisms with this property. Hence, the map $\beta_{\mu}$ defined by
\begin{equation}
\label{lifting}
\beta_{\mu}\left (\sum_{g\in G}{a_{g}u_g }\right )=\sum_{g\in G}{\alpha_{\mu}(a_g)u_g}.
\end{equation}
is an automorphism of $M$. In addition, we have that $\beta_{\mu}\in\operatorname{Ct}(M)\cap\overline{\operatorname {Int}(M)}$ and $\Pi (\xi (\beta_{\mu}))=\mu$.
\operatorname{Ad}dvspace{\baselineskip}
\begin{remark}[Jones]
\label{tre}
Note that if $u_{n}$ is a sequence of unitaries left invariant by $G$,
with the property $\alpha_{\mu}=\displaystyle\lim_{n\rightarrow\infty}{\operatorname{Ad\,} u_{n}}$ in $\operatorname{Aut}(N)$, and $\beta_{\mu}$ is the map defined in (\ref{lifting}), then
$\beta_{\mu}=\displaystyle\lim_{n\rightarrow\infty} {\operatorname{Ad\,} u_{n}}$ in
$\operatorname{Aut}(M)$. Hence,
\begin{equation*}
\varkappa (\varepsilon (\beta_{\mu}))=\lim_{n\rightarrow\infty}u_{n}^{*}
\beta_{\mu}(u_{n})=\lim_{n\rightarrow\infty}{u_{n}^{*}\alpha_{\mu}
(u_{n})}.
\end{equation*}
\operatorname{Ad}dvspace{\baselineskip}
\end{remark}
Our next goal is to show that if $N=\freefactor\otimes R_{0}$ and $G$ is the subgroup of $\operatorname{Aut}\pten$ generated by $\gamma =\operatorname{Ad\,} W (\alpha\otimes\beta),$ then $\chi (\mathcal {M})\cong\mathbb{Z}_9$, as it was observed by Connes in \cite{Connes4}.
For all the rest of this paper we will identify $G=\langle\gamma\rangle$ with
$\mathbb{Z}_3$. Note that by Proposition \ref{sixth}, $\freefactor\otimes R_{0}$ has no non-trivial
hypercentral sequences, so in order to use the exactness of the Connes
sequence described above, we only need to show that $\mathbb{Z}_3\cap\overline{\operatorname{Int}\pten} =
\{Id \}$. Obviously it is enough to check that $\gamma\not\in\overline{\operatorname{Int}\pten}$. This is equivalent to show that $\alpha\otimes\beta\not\in\overline{\operatorname{Int}\pten}$.
By \cite[Corollary 3.3]{Connes3} if $\alpha\otimes\beta\in\overline{\operatorname{Int}\pten}$ then $\alpha\in\overline{\operatorname{Int}\left (\freefactor\right )}$ and $\beta\in\overline{\operatorname{Int}(R)}$. But $\freefactor$ is
full (Remark \ref{remark3.4}) and $\alpha\not\in\operatorname{Int}\left (\freefactor\right )$, thus we can conclude that $\mathbb{Z}_3\cap\overline{\operatorname{Int}\pten} =\{Id\}$.
Hence, for the factor $\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ the sequence
$$
0 \rightarrow K^{\bot}\overset{\partial}{\rightarrow}\chi (\mathcal {M})\overset{\Pi}
{\rightarrow} L\rightarrow 0
$$
is exact (\cite{Connes6}). In order to compute $\chi (\mathcal {M})$ we first show that
$$
K^{\bot}=\mathbb{Z}_3\quad\text{and}\quad L=\mathbb{Z}_3.
$$
\begin{lemma}
The group $K=\mathbb{Z}_3\cap\operatorname{Ct}\pten$ is trivial.
\end{lemma}
{\bf Proof}
Obviously it is enough to show that the automorphism $\gamma=Ad\, W
(\alpha\otimes\beta )$ is not in
$\operatorname{Ct}\pten$. We have already shown in the proof of Lemma \ref{sixth}
that any central sequence in $\freefactor\otimes R_{0}$ has the form
$(1\otimes x_n )_{n\in\mathbb{N}}$ for a central sequence $(x_{n}
)_{n\in\mathbb{N}}$ in $R_{0}$. It follows that $\gamma\in\operatorname{Ct}\pten$ if and only
if $\beta\in\operatorname{Ct}(R_{0})$. Since for $\epsilon=\obstruction$, $\beta$ is outer
conjugate to the automorphism $s_{3}^{\bar{\epsilon}}$ described by Connes
in \cite{Connes5}, and $s_{3}^{\bar{\epsilon}}$ does not belong to $\operatorname{Ct}(R_{0})$ \cite[Proposition 1.6]{Connes5}, we conclude that $\beta\not\in\operatorname{Ct}(R_{0})$.
$
\blacksquare$
\begin{lemma}
\label{eleventh}
The group $L$ is isomorphic to $\mathbb{Z}_3$, and a generator of $L$ is given by
\[ \mu=\xi\left (Id\otimes (\Ad U_0 ^{*}\,\beta)\right ), \]
where $U_0$\ is the unitary in $R_{0}$ defined in Lemma \ref{tenth}, such that $\beta (U_{0})=\jonesinv U_{0}$.
\end{lemma}
{\bf Proof}
We need to show that the automorphism $Id\otimes (\Ad U_0 ^{*}\,\beta)$ belongs to \linebreak $\left (\mathbb{Z}_3\vee\operatorname{Ct}\pten\right )\cap\overline{\mbox{Fint}}$.
To prove that it is in $\mathbb{Z}_3\vee\operatorname{Ct}\pten$, multiply $Id\otimes (\Ad U_0 ^{*}\,\beta)$ by
$\gamma^{-1}=(\operatorname{Ad\,} W (\alpha\otimes\beta))^{-1}$ to obtain the automorphism
\[ \operatorname{Ad\,}\left (W^* (1\otimes U_{0}^{*} )\right )(\alpha^{-1}\otimes
Id), \]
which is in $\operatorname{Ct}\pten$, since the central sequences in $\freefactor\otimes R_{0}$ have the form $1\otimes x_{n}$, with $x_{n}$ central in $R_{0}$. Thus
$Id\otimes (\Ad U_0 ^{*}\,\beta)\in\mathbb{Z}_3\vee\operatorname{Ct}\pten$.
Next we want to show that $Id\otimes (\Ad U_0 ^{*}\,\beta)\in\overline{\mbox{Fint}}$. Therefore, we need to exhibit a
sequence $(\tilde{u} _{n})$ of unitaries in $\freefactor\otimes R_{0}$, that are invariant with respect to
$\gamma$, and satisfy $Id\otimes (\Ad U_0 ^{*}\,\beta) =\displaystyle\lim_{n\rightarrow\infty}{\operatorname{Ad\,}\tilde{u} _n}$.
Observe that the sequence $(x_n)_{n\in\mathbb{N}}$ of unitaries in $R_{0}$ given by
\begin{equation*}
x_n=\begin{cases}
U_{0}U_{1}^{*}U_{2}U_{3}^{*}\hdots U_{n}^{*},&\text{ if $n$ is odd}, \\
U_{0}U_{1}^{*}U_{2}U_{3}^{*}\hdots U_{n},& \text{ if $n$ is even} \\
\end{cases}
\end{equation*}
has the properties
\begin{equation*}
\beta =\lim_{n\rightarrow\infty}{\operatorname{Ad\,} x_n}\quad\text{ and }\quad\beta
(x_n)=\jonesinv\, x_n.
\end{equation*}
Define $u_{n}=U_{0}^{*} x_n$. Obviously
\begin{equation*}
Id\otimes (\operatorname{Ad\,} U_{0}^{*}\beta) =\lim_{n\rightarrow\infty}\operatorname{Ad\,} (1\otimes u_n)\quad\text{ and
}\quad\beta (u_n)=u_n
\end{equation*}
so that
\begin{equation}
\label{quarta}
(\alpha\otimes\beta)(1\otimes u_n)=1\otimes u_n.
\end{equation}
In addition, from Observation \ref{observ} it follows that
\[ \gamma(1\otimes u_{n})=\operatorname{Ad\,} W (\alpha\otimes\beta)(1\otimes
u_{n})=1\otimes u_{n}. \]
Thus,
\[Id\otimes (\Ad U_0 ^{*}\,\beta)\in\left (\mathbb{Z}_3\vee\operatorname{Ct}\pten\right )\cap\overline{\mbox{Fint}}.\]
Lastly we want to prove that the order 3 element $\mu =\xi\left (Id\otimes (\Ad U_0 ^{*}\,\beta)\right )$ generates
$L$. Thus we need to show that any element $\varphi$ in $\left (\mathbb{Z}_3\vee\operatorname{Ct}\pten\right )\cap\overline{\mbox{Fint}}$, is of the form $\mu ^{n}\operatorname{Ad\,} w$, for some unitary $w$ in $\freefactor\otimes R_{0}$ and $n=0,\hdots ,2$. Since $\varphi\in\mathbb{Z}_3\vee\operatorname{Ct}\pten$, there exists $k\in\{0,1,2\}$ such that $\varphi\gamma ^{k}$ is
centrally trivial. By Proposition 3.6, it follows that there exists a unitary $z\in\freefactor\otimes R_{0}$ and an automorphism $\nu\in\operatorname{Aut}\left (\freefactor\right )$ such that
$\varphi\gamma ^{n}=\operatorname{Ad\,} z(\nu\otimes id)$. Therefore,
\[ \varphi=\operatorname{Ad\,} x(\nu\alpha^{-n}\otimes\beta^{-n}), \]
with $x=z(\nu\otimes Id)(W^{n})^{*}\in\freefactor\otimes R_{0}$.
Since $\varphi\in\overline{\mbox{Fint}}\subseteq\overline{\operatorname{Int}\pten}$ and $\freefactor$
is full (Remark \ref{remark3.4}), by \cite[Corollary 3.3]{Connes3} there exists a unitary $w$ in $\freefactor$ such
that $\nu\alpha^{-n}=\operatorname{Ad\,} w$. This implies that
$\varphi=\operatorname{Ad\,} x' (Id\otimes\beta ^{-n})$, where $x'=x\,(w\otimes 1)$. Thus
$\varphi$ differs from a power of $Id\otimes (\Ad U_0 ^{*}\,\beta)$ only by an inner automorphism. Hence
$Id\otimes (\Ad U_0 ^{*}\,\beta)$ generates $L$.
$
\blacksquare$
\operatorname{Ad}dvspace{\baselineskip}
Note that if $u_{n}$ is the sequence of unitaries defined in the previous lemma
\[ (Id\otimes (\Ad U_0 ^{*}\,\beta))(1\otimes u_{n})=1\otimes U_{0}^{*}u_{n}U_0. \]
But $\displaystyle\lim_{n\rightarrow\infty}{\operatorname{Ad\,} u_n }=\operatorname{Ad\,} U_{0}^*\,\beta$ and
$\beta(U_{0})=\jonesinv\, U_{0}$, so
\[ \lim_{n\rightarrow\infty}{u_{n}^{*}U_{0}^{*}u_{n}}=\operatorname{Ad\,} U_{0}(\beta^{-1}(U_{0}^{*}))=
\jonesinv\, U_{0}^{*} \]
and
\begin{equation*}
\lim_{n\rightarrow\infty}{(1\otimes u_{n}^{*})(Id\otimes (\Ad U_0 ^{*}\,\beta) )(1\otimes u_n )}=\jonesinv.
\end{equation*}
Thus, in view of Remark \ref{tre} we obtain the following:
\begin{remark}
\label{add}
Let $\mu=\xi\left (Id\otimes (\Ad U_0 ^{*}\,\beta)\right )$ be the generator of $L$ as in the previous lemma. If $\beta_{\mu}\in\operatorname{Ct}(M)\cap\overline{\operatorname {Int}(M)}$ is the automorphism described in
equation (\ref{lifting}), and $\sigma=\varepsilon (\beta_{\mu})$, then
$\varkappa (\sigma)=\jonesinv$.
\operatorname{Ad}dvspace{
amount}
\end{remark}
\begin{theorem}
\label{twelve}
Let $\mathcal {M}$ denote the crossed product $\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$. Then $\chi (\mathcal {M})=\mathbb{Z}_9$.
\end{theorem}
{\em Proof}.
By Connes \cite{Connes6} the sequence
\begin{displaymath}
0 \rightarrow\mathbb{Z}_3\rightarrow\chi (\mathcal {M})\rightarrow\mathbb{Z}_3\rightarrow 0
\end{displaymath}
is exact. Moreover, according to (\ref{lifting}), $\mu=\xi\left (Id\otimes (\Ad U_0 ^{*}\,\beta)\right )$ lifts to the element
$\sigma =\xi (\beta _{\mu})$ of $\chi (M)$, where
\begin{displaymath}
\beta _{\mu}\left (\sum_{k=0}^{2}{a_{k} v^{k}}\right )=\sum_{k=0}^{2}{(Id\otimes (\Ad U_0 ^{*}\,\beta))(a_{k})
v^{k}}.
\end{displaymath}
Since the only possibilities for $\chi (M)$ are $\mathbb{Z}_9$ and $\mathbb{Z}_3\oplus\mathbb{Z}_3$, it is enough to show that
$\sigma ^{3}\neq 1$, i.e. $\beta _{\mu} ^{3} \not\in\operatorname{Int}(M)$.
From the relations
$$
(\alpha\otimes\beta )(1\otimes (U_{0}^{*})^{3}g)=\obstruction\, (1
\otimes (U_{0}^{*})^{3}g)
$$
and
$$
\operatorname{Ad\,} W (1\otimes (U_{0}^{*})^{3} g)=1\otimes (U_{0}^{*})^{3}g
$$
we obtain that
\begin{equation*}
\gamma (1\otimes (U_{0}^{*})^{3}g)= \obstruction (1\otimes (U_{0}^{*})^{3}g).
\end{equation*}
Since $\operatorname{Ad\,} v=\gamma$\, on $\freefactor\otimes R_{0}$, the last equality can be rewritten as
\linebreak $\operatorname{Ad\,} v(1\otimes(U_{0}^{*})^{3}g)=\obstruction (1\otimes (U_{0}^{*})^{3}\,
g)$ or
\begin{equation}
\label{vact}
\operatorname{Ad\,} (1\otimes (U_{0}^{*})^{3}\, g)(v)=\obstruconj v.
\end{equation}
Thus, using the definition of $\beta _{\mu}$, the relation $\beta ^{3}=\operatorname{Ad\,} g$, and (\ref{vact}) we obtain
\begin{displaymath}
\begin{split}
\beta _{\mu} ^{3}\left (\sum_{k=0}^{2}{a_k\, v^{k}}\right )& =\sum_{k=0}^{2}
{\left (Id\otimes(\operatorname{Ad\,} (U_{0}^{*})^{3}\, \beta ^{3})\right )(a_{k})v^{k}}=\sum_{k=0}^{2}{\operatorname{Ad} (1\otimes (U_{0}^{*})^{3}g)(a_{k})
v^{k}} \\
&=\operatorname{Ad\,} (1\otimes (U_{0}^{*})^{3}g)\left (\sum_{k=0}^{2}e^{\frac{2\pi
i k}{3}}a_{k}\, v^{k}\right ),
\end{split}
\end{displaymath}
which implies that up to an inner automorphism $\beta _{\mu}^{3}$ is a dual action, so it is outer and $\chi (\mathcal {M})\cong\mathbb{Z}_9$.
$
\blacksquare$
\section{$\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ is not anti-isomorphic to itself.}
In this section we are going to show that $\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ is not
anti-isomorphic to itself by using the dual action $\widehat{\mathbb{Z}}_{3}\to\operatorname{Aut}(M)$,
which gives rise to the only subgroup of order $3$ in $\chi (\mathcal {M})$. This
argument has been described by Connes in \cite{Connes4} and \cite{Connes6}.
First of all note that the action $\gamma$ can be decomposed as
$$
\gamma=\operatorname{Ad} W\,\gamma_{1}\gamma_{2},
$$
where $\gamma_{1}\in\operatorname{Ct}\pten$ and
$\gamma_{2}\in\overline{\operatorname{Int}\pten}$ and $W$ is a unitary in $\freefactor\otimes R_{0}$.
In fact, $\gamma=\operatorname{Ad\,} W(\alpha\otimes Id)(Id\otimes\beta)$ and
$\gamma _{1}=\alpha\otimes Id$ is centrally trivial, since any central
sequence in $\freefactor\otimes R_{0}$ has the form $(1\otimes x_{n})$, for a central sequence
$(x_n)$ in $R_{0}$. Furthermore in the proof of Lemma \ref{eleventh} we showed
that $\beta=\displaystyle\lim_{n\rightarrow\infty}{\operatorname{Ad\,} x_{n}}$ so that
$\gamma _{2}=Id\otimes\beta=\lim_{n\rightarrow\infty}{\operatorname{Ad\,} (1\otimes x_{n})}$
belongs to $\overline{\operatorname{Int}\pten}$.
Note also that this decomposition of $\gamma$ into an approximately inner
automorphism and a centrally trivial automorphism is unique up to inner
automorphisms, since $\chi\left (\tenpro\right )=1$.
Let $M$ be an arbitrary von Neumann algebra. Define the conjugate $M^{c}$
of $M$ as the algebra whose underlying vector space is the conjugate of $M$
(i.e. for $\lambda\in\mathbb{C}$ and $x\in M$ the product of $\lambda$ by $x$ in $M^{c}$
is equal to $\bar{\lambda}x$) and whose ring structure is the same as in $M$.
The opposite $M^{o}$ of $M$ is by definition the algebra whose underlying
vector space is the same as for $M$ while the product of $x$ by $y$ is equal
to $yx$ instead of $xy$. $M^{c}$ and $M^{o}$ are clearly isomorphic through the
map $x\rightarrow x^{*}$. For $\psi\in\operatorname{Aut}(M)$ we denote by $\psi ^{c}$ the
automorphism of $M^{c}$ induced by $\psi$.
For the convenience of the reader we detail here Connes's argument to show that the factor $\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ is not antiisomorphic to itself (see \cite{Connes4} and
\cite{Connes6} for Connes's argument).
\begin{theorem}
$\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ is not anti--isomorphic to itself.
\end{theorem}
{\bf Proof}
We proved in the previous section (Theorem \ref{twelve}) that
$\chi (\mathcal {M})\cong\mathbb{Z}_9$ and that the dual action $\widehat{\gamma}:\widehat{\mathbb{Z}}_{3}\to\operatorname{Aut}(M)$
produces the only subgroup of order $3$ in $\chi (\mathcal {M})$, namely $\langle\sigma
^{3}\rangle =\langle\xi (\beta _{\mu} ^{3})\rangle$.
Since $\chi (\mathcal {M})$ is an invariant of the von Neumann algebra $\mathcal {M}$, it follows
that $\widehat{\mathbb{Z}}_{3}$ is an invariant of $\mathcal {M}$. This implies that
$\M\rtimes_{\widehat{\gamma}}\dualgr$ is an invariant of $\mathcal {M}$, as it is the dual action $\widetilde{\gamma}:\mathbb{Z}_3\longrightarrow\mbox{Aut}(\M\rtimes_{\widehat{\gamma}}\dualgr )$ of $\widehat{\gamma}$.
Since, by Takesaki's duality theory [Theorem $4.5$, \cite{Take}]
\[ \M\rtimes_{\widehat{\gamma}}\dualgr \cong\left (\tenpro\right )\otimes B(\ell ^{2}(\mathbb{Z}_3 )), \]
and in this identification the dual action $\widetilde{\gamma}$ of $\widehat{\gamma}$
corresponds to the action \linebreak $\gamma\otimes\operatorname{Ad\,} (\lambda (1)^{*})$, where
$\lambda$ is the left regular representation of $\mathbb{Z}_3$ on
$\ell ^{2}(\mathbb{Z}_3 )$, we conclude that $\gamma$ is an invariant of $M$.
Consequently, the centrally trivial automorphism $\gamma _{1}$ and the approximately inner automorphism $\gamma_{2}$, which
appear in the decomposition of $\gamma$, are also invariants of $\mathcal {M}$.
Note that
\[ \gamma _{1}^{3}=\operatorname{Ad\,} (u\otimes 1)\quad\text{and}\quad\gamma_{1}(u\otimes
1)=\obstruction\, (u\otimes 1), \]
so that $\gamma_{1}$ is a $\mathbb{Z}_3$--kernel of $\freefactor\otimes R_{0}$ with obstruction
$\obstruction$ to lifting.
Observe also that in the above argument we have only used the abstract
group $\widehat{\mathbb{Z}}_{3}$ and the dual action defined on $\M\rtimes_{\widehat{\gamma}}\dualgr$. Hence we have
found a canonical way to associate to the von Neumann algebra $\mathcal {M}$ a
scalar, equal to $\obstruction$ in our case, which is invariant under
isomorphisms.
Now if $\mathcal {M}=\left ( \tenpro\right )\rtimes_{\gamma}\gruppo$ was anti--isomorphic to itself, then $\mathcal {M}$ and
$\mathcal {M} ^{c}$ would be isomorphic. But the obstruction
associated to $\gamma _{1}^{c}$, and therefore to $\mathcal {M} ^{c}$ is equal to
$\obstruconj$.
$
\blacksquare$
\section*{Acknowledgments}
We thank our advisor, Professor F. R\u{a}dulescu for his help and support,
Professor D. Bisch and Professor D. Shlyakhtenko for useful discussions,
and Dr. M. M\"{u}ger for his suggestions regarding this manuscript.
\end{document}
|
\begin{document}
\makeatletter \providecommand\@dotsep{5} \makeatother
\title[Risk-Sensitive Dividend Problems]{Risk-Sensitive Dividend Problems}
\author[N. \smash{B\"auerle}]{Nicole B\"auerle${}^*$}
\address[N. B\"auerle]{Department of Mathematics,
Karlsruhe Institute of Technology, D-76128 Karlsruhe, Germany}
\email{\href{mailto:[email protected]}
{[email protected]}}
\author[A. \smash{Ja\'{s}kiewicz}]{Anna Ja\'{s}kiewicz $^\ddag$}
\address[A. Ja\'{s}kiewicz]{Institute of Mathematics and Computer Science,
Wroc{\l}aw University of Technology, PL-50-370 Wroc{\l}aw, Poland}
\email{\href{mailto:[email protected]} {[email protected]}}
\thanks{${}^\ddag$ The second author is grateful to the Alexander von Humboldt Foundation for supporting her
research stay at the Institute of Stochastic at the KIT. This work was also partially supported
by the National Science Centre [Grant DEC-2011/03/B/ST1/00325].}
\begin{abstract}
We consider a discrete time version of the popular optimal dividend payout problem in risk theory.
The novel aspect of our approach is that we allow for a risk averse insurer, i.e., instead of maximising the expected discounted
dividends until ruin we maximise the expected {\em utility} of discounted dividends until ruin.
This task has been proposed as an open problem in \cite{gershiu}.
The model in a continuous-time Brownian motion setting with the exponential utility
function has been analysed in \cite{schach}. Nevertheless, a complete solution has not been provided.
In this work, instead we solve the problem in discrete time setup for the exponential and the power utility functions and give the structure
of optimal history-dependent dividend policies.
We make use of certain ideas studied earlier in \cite{br_mor}, where Markov decision processes with general utility functions
were treated. Our analysis, however, include new aspects, since the reward functions in this case are not bounded.
\end{abstract}
\maketitle
\begin{minipage}{14cm}
{\small
\begin{description}
\item[\rm \textsc{ Key words} ]
{\small Markov decision process, Dividend payout, Risk aversion, History-dependent policy, Fixed point problem}
\item[\rm \textsc{AMS subject classifications} ]
{\small 90C40, 91B30 }
\end{description}
}
\end{minipage}
\section{Introduction}\label{sec:intro}
The dividend payout problem in risk theory has been introduced by de Finetti \cite{deFinetti} and has since
then been investigated under various extensions
during the decades up to now; see, for instance, \cite{turkey, schach}.
The task is to find in a given model for the free surplus process of an insurance company,
a dividend payout strategy that maximises the expected discounted dividends until ruin.
Typical models for the surplus process are compound Poisson processes, diffusion processes, general renewal processes
or discrete time processes. The reader is referred to \cite{albrthonh} and \cite{avanzi}, where an excellent overview of recent results
is provided.
In \cite{gershiu} the authors propose the problem of maximising the expected {\em utility} of discounted dividends until ruin instead of
maximising the expected discounted dividends until ruin.
This means that an insurance company is equipped with some utility function that helps it to measure the accumulated dividends paid to
the shareholders. If this utility is increasing and concave, the company is risk averse (see Remark 2.2).
To the best of our knowledge, there is only one work \cite{schach},
in which this idea was taken up. More precisely, the authors in \cite{schach} consider a linear Brownian motion model
for the free surplus process and apply the exponential utility function to evaluate the discounted dividends until ruin. It turns out that
the mathematics involved in the analysis of this problem is quite different from the one used in the risk neutral case and
only partial results could be obtained. In contrast to the same problem with a risk neutral insurance company,
where the optimal dividend payout strategy is a barrier strategy (see e.g., \cite{at}), the authors in \cite{schach}
are not able to identify the structure of the optimal dividend policy rigorously. They show imposing
some further assumptions that there is a time dependent optimal barrier.
We study the same problem but with a discrete time surplus process.
The risk neutral problem within such a framework can be found in Section 9.2 in \cite{br} or in Section 1.2 in \cite{s}.
By making use of the dynamic programming approach the authors in \cite{br} and \cite{s} prove that the optimal dividend payout
policy is a stationary band-strategy. In \cite{abt}, on the other hand, the authors consider a discrete time model
that is formulated with the aid of a general L\'evy surplus process but the dividend payouts are allowed
only at random discrete time points. This version can again be solved by the dynamic programming arguments.
However, the problem with a general utility function is more demanding.
Like in the continuous time setting \cite{schach}, it requires a sophisticated analysis.
It is worth mentioning that Markov decision processes with
general utility functions
have been already studied in \cite{br_mor,kkm}. Moreover, there are also some papers, where the specific utility functions
are considered. For example, Jaquette \cite{j1,j2} and Chung and Sobel \cite{cs} are among the first who examined
discounted payoffs in Markov decision processes with the decision maker that is equipped with a constant risk aversion, i.e.,
grades her random payoffs with the help of the exponential utility function.
The common feature of all the aforementioned papers is the fact that they deal with bounded rewards or costs.
Therefore, their results cannot be directly applied to our case, where the payoffs are unbounded.
We make use of the special structure of the underlying problem and show that the optimal dividend payout policy
is a time dependent band-strategy. The value function itself can be characterised as a solution
to a certain optimality equation.
Furthermore, we also study the dividend payout model with the power utility function.
As noted in \cite{br_mor}, the original Markov decision process can then be viewed as a Markov decision process
defined on the extended state space.
We employ these techniques to solve our model, but only in the first step, where we use an approximation of the value
function in the infinite time horizon by value functions in the finite time horizons.
In contrast to the exponential utility case, we can only partly identify the structure of the optimal dividend payout policy.
However, we are able to show that there is a barrier such that when the surplus is above the barrier,
it is always optimal to pay down to a state below the barrier.
The value function is again characterised as a solution to some optimality equation.
Summing up, the optimal dividend payout problem with the exponential utility function
can be solved completely in the discrete time case, in contrast to the continuous-time problem in \cite{schach},
whilst for the case with the power
utility function we are at least able to identify the important global structure of the optimal policy.
The paper is organised as follows. In the next section we introduce the model together with mild assumptions
and general history-dependent policies. Section 3 is devoted to a study of the exponential utility case.
We show first that the value function $J$ for discounted payoffs satisfies an optimality
equation and give a lower and an upper bound for $J$. Then, we identify properties of the minimiser of the right-hand side of
the optimality equation.
This enables us to show that the minimiser indeed defines an optimal policy, which is
a non-stationary band-policy. The non-stationarity is based only on the time-dependence.
The power utility case is treated in Section 4.
We pursue here a little different approach, but it also leads to an optimality equation.
The policies obtained in this setting are really
history-dependent. Nonetheless, we are still able to show that the optimal policy is of a barrier-type.
In Section 5 we provide the policy improvement algorithm for the model with the exponential utility.
Finally, Section 6 is devoted to concluding remarks and open issues.
\section{The Model}\label{sec:mod}
We consider the financial situation of an insurance company at discrete times,
say $n \in\mathbb{N}_0:= 0, 1, 2,\ldots.$
Assume there is an initial surplus $x_0 = x\in X:=\mathbb{Z}$ and $x_0\ge 0.$
The surplus $x_{n+1}$ at time $n+1$ evolves according to the following equation
\begin{equation}
\label{maineq}
x_{n+1} = x_n-a_n+
Z_{n+1},\; \mbox{if }x_n\ge 0\quad\mbox{and}\quad
x_{n+1} = x_n,\; \mbox{if }x_n< 0.
\end{equation}
Here $a_n\in A(x_n):=\{0,\ldots,x_n\}$ denotes the dividends paid to the shareholders at time $n,$ and $Z_{n+1}$ represents the income
(possibly negative) of the company during the time interval from $n$ to $n+1.$
More precisely, $Z_{n+1}$ is the
difference between premium and claim sizes in the $(n+1)$-st time interval.
Further, we assume that
$Z_1, Z_2,\ldots$ are independent and identically distributed integer-valued random variables with distribution
$(q_k)_{k\in \mathbb{Z}}$, i.e., $\operatorname{\mathbb{\mathbb{P}P}}(Z_n=k)=q_k,$ $k\in \mathbb{Z}.$
A dividend payout problem in the risk theory can be viewed as a Markov decision process with the state space $X,$
the set of actions $A(x)$ available in state $x$ (for completeness, we put $A(x)=\{0\}$ for $x<0$)
and the transition probability $q(\cdot|x,a)$ of
the next state, when $x$ is the current state and $a$ is the amount of dividend paid to the shareholders.
Note that the dynamics of equation (\ref{maineq}) implies that
$q(y|x,a)=q_{y-x+a}$ for $x\ge 0$ and $q(x|x,a)=1$ if $x<0.$
For the set of admissible pairs $D:=\{(x,a): \;x\in X,\;a\in A(x)\}$ we define the function
$r: D\mapsto \mathbb{R}$ as $r(x,a)=a$ for $x\in X.$
The feasible history spaces are defined as follows $\Omega_0=X,$ $\Omega_k=D^k\times X$ and
$\Omega_\infty=D^\infty.$ A {\it policy} $\pi = (\pi_{k})_{k\in \mathbb{N}_0}$
is a sequence of mappings from $\Omega_k$ to $A$ such that
$\pi_k(\omega_k)\in A(x_k),$ where $\omega_k=(x_0,a_0,\ldots,x_k)\in \Omega_k.$
Let $\Gamma$ be the class of all functions $g:X\mapsto A$ such that $g(x)\in A(x).$
A Markov policy is $\pi=(g_k)_{k\in\mathbb{N}_0}$ where each $g_k\in \Gamma.$
By $\mathbb{P}i$ and $\mathbb{P}i^M$ we denote the set of all history-dependent and Markov policies, respectively.
By the Ionescu-Tulcea theorem \cite{n},
for each policy $\pi$ and each initial state $x_0=x,$ a probability measure $\mathbb{P}_x^\pi$
and a stochastic process $(x_k,a_k)_{k\in\mathbb{N}_0}$ are defined on $\Omega_\infty$
in a canonical way, where $x_k$ and $a_k$ describe the state and the decision at stage $k,$
respectively.
By $\mathbb{E}op_x^\pi$ we denote the expectation operator with respect to the probability measure $\mathbb{P}_x^\pi.$
Ruin occurs as soon as the surplus gets negative.
The epoch $\tau$ of ruin is defined as the smallest integer $n$
such that $x_n <0.$
The question arises as to how the risk-sensitive insurance company, equipped with some
utility function will choose its dividend strategy. More precisely, we shall consider
the following optimisation problem
$$\sup_{\pi\in\mathbb{P}i} \mathbb{E}op_x^\pi U_\gamma\left(\sum_{k=0}^\infty \beta^k r(x_k,a_k)\right) =
\sup_{\pi\in\mathbb{P}i} \mathbb{E}op_x^\pi U_\gamma\left(\sum_{k=0}^{\tau-1}\beta^k a_k\right),\quad x\ge 0,$$
where $\beta\in (0,1)$ is a discount factor and either
\begin{itemize}
\item[(1)] $U_\gamma$ is the exponential utility function,
i.e., $U_\gamma(x)=\frac 1{\gamma} e^{\gamma x}$ with $\gamma<0,$ or
\item[(2)] $U_\gamma$ is the power utility function, i.e., $U_\gamma(x)=x^{\gamma}$ with $\gamma\in (0,1).$
\end{itemize}
Let $Z$ be a random variable with the same distribution as $Z_1.$
Throughout the paper the following assumptions will be supposed to hold true.
\begin{itemize}
\item[(A1)] $\mathbb{E}op Z^+<+\infty,$ where $Z^+=\max\{Z,0\};$
\item[(A2)] $\operatorname{\mathbb{\mathbb{P}P}}(Z<0)>0.$
\end{itemize}
Assumption (A2) allows to avoid a trivial case, when the ruin will never occur
under any policy $\pi\in \mathbb{P}i.$
\begin{remark}
In our study, we assume that the random variables $\{Z_n\}$ only take integer values and the initial capital is
also integer. From the proof of Lemma 1.9 in \cite{s}, it follows that in our problem
we can restrict without loss of generality to the integer dividend payments. \end{remark}
\begin{remark}
If the function $U_\gamma$ is strictly concave and increasing as in our case, then the quantity $U_\gamma^{-1} \big( \mathbb{E}op[U_\gamma(X)]\big)$
is called a {\em certainty equivalent} of the random variable $X.$ From the optimisation's point of view it does not matter which value
$U_\gamma^{-1} \big( \mathbb{E}op[U_\gamma(X)]\big)$ or $ \mathbb{E}op[U_\gamma(X)]$ we study, because
the inverse function $U_\gamma^{-1}$ is monotonic. However, the certainty equivalent has an important meaning. If we apply the Taylor
expansion, then the certainty equivalent can be written as follows
$$ U^{-1}_\gamma\mathcal{B}ig(\mathbb{E}op\big[U_\gamma(X)\big]\mathcal{B}ig)\approx \mathbb{E}op X - \frac12 l(\mathbb{E}op X) Var[X],$$
where $$ l(y) = -\frac{U''_\gamma(y)}{U'_\gamma(y)}$$ is called the {\em Arrow-Pratt} function of absolute risk aversion. Hence,
the second term accounts for the variability of $X$ (for a discussion see \cite{bpli}). If $U_\gamma$ is concave like in our case,
then $l(\cdot)\ge 0$ which means that the variance is subtracted. This fact implies that the decision maker is risk averse.
\end{remark}
\section{The Exponential Utility Function}
In this section we assume that the insurer is risk averse and grades her random payoffs
by taking the expectations
of the exponential utility function of these random rewards. More precisely,
we assume that the decision maker is
equipped with the constant risk coefficient $\gamma<0.$
The objective of the risk averse insurer is to maximise
the expected discounted payoff function:
$$ \widetilde{J}_\pi(x)= \mathbb{E}op_x^\pi U_\gamma\left(\sum_{k=0}^{\infty}\beta^k r(x_k,a_k)\right)$$
and to find a policy $\pi^*\in\mathbb{P}i$ (if exists) such that
\begin{equation}
\label{optpp}
\widetilde{J}(x):=\widetilde{J}_{\pi^*}(x)=\sup_{\pi\in\mathbb{P}i} \widetilde{J}_\pi(x)
\end{equation}
for all $x\in X.$ It is obvious that the optimal policy $\pi^*$ would depend on $\gamma$ and $\beta.$
Clearly, $\widetilde{J}(x)=\frac1\gamma$ for every $x<0.$
\subsection{Optimality equation and the properties of its largest minimiser}
Our discounted model with the exponential utility function
reveals some kind of non-stationarity
that is implied by a discount factor. Therefore, one can extend the state space in the following way
$\widetilde{X}:=X\times I,$ with $I:=[\gamma,0)$ (cf. also \cite{ds1}, \cite{br_mor}).
If the process is in the state $(x,\theta)$ and the
insurer selects an action
$a\in A(x),$ then the probability
of moving to a next state $(x',\theta')$ is $q(x'|x,a),$ if $\theta'=\theta\beta$ and is $0,$ if
$\theta'\not=\theta\beta.$
The second component of the state space keeps the track of the discount factor
that changes over time in a deterministic way.
Furthermore, we can define an {\it extended history-dependent policy}
$\sigma = (\sigma_{k})_{k\in \mathbb{N}_0},$ where
$\sigma_{k}$ is a mapping from the set of extended feasible histories up to the $k$th day
to the action set $A$ such that
$\sigma_k(h_k) \in A(x_k)$ with
$h_k=(x_0,\theta, a_0,\ldots,a_{k-1},x_k,\theta\beta^k),$ $k\ge 1$ and $h_0=(x_0,\theta).$
Recall that $(x_m,a_m)\in D$ for $m\in\mathbb{N}_0.$
Let $\Xi$ be the set of all extended history-dependent policies.
Note that for any $\sigma\in\Xi$ ($\theta\in I$ is fixed), there exists a policy $\pi\in\mathbb{P}i$ that is
equivalent to $\sigma$ in the following sense:
$$\pi_k(\cdot|\omega_k):=\sigma_k(\cdot|h_k),\quad k\in \mathbb{N}_0.$$
Obviously, $\pi$ depends on $\theta\in I.$
Therefore, for simplicity of notation we shall still use the original policies $\pi\in\mathbb{P}i,$ and the
expectation operator $\mathbb{E}op_x^\pi,$
where $x$ is the first component of the initial state.
The dependence on $\theta\in I$ will be denoted by adding
the second variable to the value function.
For any initial state $(x,\theta)\in \widetilde{X}$ we define
$$J(x,\theta):=\inf_{\pi\in\mathbb{P}i} J_\pi(x,\theta),\quad\mbox{where}\quad
J_\pi(x,\theta):=\mathbb{E}op_x^\pi \left(\exp\left\{\theta\sum_{k=0}^{\infty}
\beta^k r(x_k,a_k)\right\}\right)$$
for $\pi\in\mathbb{P}i.$
Obviously, $0\le J_\pi(x,\theta)\le 1$ for all $x\in X,$ $\theta\in I$ and $\pi\in \mathbb{P}i.$
Observe next that our optimisation problem (\ref{optpp}) is equivalent to the problem of
minimising
$J_\pi(x,\gamma)$ over $\pi\in\mathbb{P}i.$
By $C(\widetilde{X})$ we denote the space of bounded continuous real-valued functions on
$\widetilde{X}.$\\
\begin{theorem}\label{theo:fixed_point}
For every $(x,\theta) \in \widetilde{X}$ the function $J$ is a solution
to the following discounted optimality equation
\begin{eqnarray}\label{eq:fixedpoint}J(x,\theta)&=&\min_{a\in A(x)}\left[e^{\theta r(x,a)}
\sum_{x'\in X} J(x',\theta\beta)q(x'|x,a)\right]\\
\nonumber &=& \min_{a\in \{0,\ldots,x\}}\left[e^{\theta a}
\left(\sum_{k=a-x}^\infty J(x-a+k,\theta\beta)q_k+ \sum_{k=-\infty}^{a-x-1} q_k\right)\right]
\end{eqnarray}
\end{theorem}
\begin{proof}
Clearly, $J(x,\theta)=1$ for $x<0$ and all $\theta\in I.$
Consider the truncated payoff functions
$r_m(x,a)=\min\{m,r(x,a)\}$ with $m\in \mathbb{N}.$
From Proposition 3.1 in \cite{ds1} there is a unique function $w_m\in C(\widetilde{X})$ such that
\begin{equation}
\label{oen}
w_m(x,\theta)=\min_{a\in A(x)}\left[e^{\theta r_m(x,a)}\sum_{x'\in X}
w_m(x',\theta\beta)q(x'|x,a)\right]
\end{equation}
and $w_m(x,\theta)=J_m(x,\theta)$ for all $(x,\theta)\in \widetilde{X}.$
Here, $J_m(x,\theta)$ denotes the optimal discounted payoff function with $r$ replaced by $r_m$ in
of $J(x,\theta).$
Clearly, the sequence $(w_m(x,\theta))_{m\in N}$ is non-increasing for each $(x,\theta)\in \widetilde{X}.$
Therefore, $\lim_{m\to \infty} w_m(x,\theta)=:w(x,\theta)$ exists.
It is obvious that
$$ w_m(x,\theta)=J_m(x,\theta)\ge J(x,\theta),\quad \mbox{ for } (x,\theta)\in \widetilde{X}.$$
Hence,
\begin{equation}
\label{o1}
w(x,\theta)\ge J(x,\theta), \quad \mbox{ for } (x,\theta)\in \widetilde{X}.
\end{equation}
On the other hand, letting $m\to\infty$ in (\ref{oen}), making use of the dominated convergence theorem
and the fact that $A(x)$ is finite for each $x\in X,$ we infer that
\begin{eqnarray}
\label{oeq}
\nonumber
w(x,\theta)&=&\lim_{m\to\infty}\min_{a\in A(x)}\left[e^{\theta r_m(x,a)}\sum_{x'\in X}
w_m(x',\theta\beta)q(x'|x,a)\right]\\
&=&\min_{a\in A(x)}\left[e^{\theta r(x,a)}\sum_{x'\in X}
w(x',\theta\beta)q(x'|x,a)\right]
\end{eqnarray}
for $(x,\theta)\in \widetilde{X}.$ Hence, for any $a\in A(x)$
$$w(x,\theta)\le e^{\theta r(x,a)}\sum_{x'\in X}
w(x',\theta\beta)q(x'|x,a).$$
Iterating this inequality $(n-1)$ times
we conclude that
$$w(x,\theta)\le \mathbb{E}op_x^{\pi} \left(\exp\left\{\theta\sum_{k=0}^{n-1}\beta^k r(x_k,a_k)\right\}
w(x_n,\theta\beta^n)\right).$$
Since $w\le w_n\le 1,$ we have that
$$w(x,\theta)\le \mathbb{E}op_x^{\pi} \left(\exp\left\{\theta\sum_{k=0}^{n-1}\beta^k r(x_k,a_k)\right\}\right).
$$
Letting $n\to\infty$ and applying the dominated convergence theorem
we have that
$w(x,\theta)\le J_\pi(x,\theta)$ for
$(x,\theta)\in \widetilde{X}.$ Since the policy $\pi$ was chosen arbitrarily, we get that
\begin{equation}
\label{o2}
w(x,\theta)\le J(x,\theta), \quad \mbox{ for } (x,\theta)\in \widetilde{X}.
\end{equation}
Now the assertion follows from (\ref{o1}) and (\ref{o2}).
\end{proof}
\begin{remark}
Among all functions $w$ which satisfy equation \eqref{eq:fixedpoint} and have the property
that $w(x,\theta)=1$ for $x<0$ and $w(x,\theta)\in (0,1]$ for all $(x,\theta)\in \widetilde{X},$
the value function $J$ is the largest solution. This fact follows from the last part of the proof.
\end{remark}
\begin{remark}
Theorem \ref{theo:fixed_point} was proved in the literature for the general state space, weakly continuous transition probabilities
and {\it bounded} costs or rewards \cite{br_mor, ds1}. However, we deal with unbounded payoffs,
therefore we have to truncate them at the level $m$
and then let $m$ tend to infinity. Such a procedure may have a meaning from the numerical point of view.
\end{remark}
Let us now consider the policy $\pi^+=(g,g\ldots),$ where $g(x)=x^+$
for every $x\in X.$ Hence, this policy asks the insurer to pay out
everything at each time point until ruin occurs.
Denote by $\vec{\pi}$ the ``1-shifted'' policy for $\pi=(\pi_k)_{k\in\mathbb{N}_0}$, that is,
$\vec{\pi}=(\vec{\pi}_k)_{k\in\mathbb{N}_0},$ where
\begin{equation}
\label{shift_p}
\vec{\pi}_k(\cdot|\omega_k)=\pi_{k+1}(\cdot|x_0,a_0,\omega_k)\quad\mbox{for}\quad \omega_k\in\Omega_k
\quad\mbox{and}\quad k\in \mathbb{N}_0.
\end{equation}
\begin{lemma}\label{lem:bounds}
For any $x\ge 0$ and $\theta\in I$ the following inequalities hold
$$ e^{\theta x} \underline{h}(\theta)\le J(x,\theta)\le e^{\theta x} \overline{h}(\theta),$$
with
$$\underline{h}(\theta):= \prod_{k=1}^\infty \mathbb{E}op\left(\exp\{\theta\beta^k Z_k^+\}\right)
\quad\mbox{and}\quad \overline{h}(\theta):=
\sum_{m=-\infty}^\infty
\mathbb{E}op_m^{\pi^+}\left(\exp\left\{\theta\sum_{k=1}^{\tau-1} \beta^k
x_k\right\}\right)q_m,
$$
where $x_1=m$ and the empty sum is $0.$
\end{lemma}
\begin{proof} We start with the upper bound. Since $x\ge 0,$ then $\tau\ge 1.$
For the policy $\pi^+$ we have that
\begin{eqnarray*}
J(x,\theta)&\le& J_{\pi^+}(x,\theta)=\mathbb{E}op_x^{\pi^+} \left(\exp\left\{\theta\sum_{k=0}^{\tau-1} \beta^k
x_k\right\}\right)
=e^{\theta x}\mathbb{E}op_x^{\pi^+} \left(\exp\left\{\theta\sum_{k=1}^{\tau-1} \beta^k
x_k\right\}\right)\\
&=& e^{\theta x} \mathbb{E}op_x^{\pi^+}\left[ \mathbb{E}op_x^{\pi^+} \left(\exp\left\{\theta\sum_{k=1}^{\tau-1} \beta^k
x_k\right\}\mathcal{B}ig|a_0,x_1\right)\right]\quad(\mbox{under $\pi^+$ we have that $x_1=Z_1$})\\
&=& e^{\theta x}\mathbb{E}op_{x}^{\pi^+} \left[\mathbb{E}op_{Z_1}^{\vec{\pi}^+}
\left(\exp\left\{\theta\sum_{k=1}^{\tau-1} \beta^k
x_k\right\}\right)\right]\\
&=& e^{\theta x}
\left(\sum_{m=-\infty}^\infty \mathbb{E}op_{m}^{\pi^+} \left(\exp\left\{\theta\sum_{k=1}^{\tau-1} \beta^k
x_k\right\}\right)q_m\right),
\end{eqnarray*}
where in the last equality we make use of the fact that $\operatorname{\mathbb{\mathbb{P}P}}(Z_1=m)=q_m$ and $\vec{\pi}^+=\pi^+.$
On the other hand, the lower bound can be obtained as follows. First, we claim that for $\pi\in\mathbb{P}i$ and
$(x,\theta)\in\widetilde{X}$ with $x\ge 0$
\begin{equation}
\label{ind}
e^{\theta x} \prod_{k=1}^{n-1} \mathbb{E}op\left(\exp\{\theta\beta^k Z_k^+\}\right)
\le
\mathbb{E}op_{x}^{\pi}\left(\exp\left\{\theta\sum_{k=0}^{n-1}\beta^k r(x_k,a_k)\right\}\right).
\end{equation}
We proceed by induction. Clearly, $e^{\theta x}\le \min_{a\in A(x)} e^{\theta r(x,a)}.$
Assume now that (\ref{ind}) holds for some $n\ge 1$ and every $x\ge 0$ and $\theta\in I.$
Let $\pi=(\pi_n)_{n\in\mathbb{N}_{0}}$ be any policy. Then, it follows that
\begin{eqnarray*}
\lefteqn{
\mathbb{E}op_{x}^{\pi}\left(\exp\left\{\theta\sum_{k=0}^{n}\beta^k r(x_k,a_k)\right\}\right)
\ge e^{\theta r(x,\pi_0(x))}\mathbb{E}op_x^{\pi}\left[
\mathbb{E}op_x^{\pi}\left(\exp\left\{\theta\sum_{k=1}^{n}\beta^k r(x_k,a_k)\right\}\mathcal{B}ig|a_0,x_1\right) \right] }\\
&\ge&
\min_{a\in A(x)}e^{\theta r(x,a)}\mathbb{E}op_{x}^{\pi}\left[ 1[x_1\ge 0]\mathbb{E}op_{x_1}^{\vec{\pi}}
\left(\exp\left\{\theta\sum_{k=1}^{n}\beta^k r(x_k,a_k)\right\}\right)+1[x_1< 0]\right]\\
&\ge& \min_{a\in A(x)}e^{\theta a}\mathbb{E}op_{x}^{\pi}\left[1[x_1\ge 0]e^{\theta\beta x_1}
\prod_{k=2}^{n} \mathbb{E}op\left(\exp\{\theta\beta^{k} Z_k^+\}\right)+1[x_1< 0]\right]
\quad\mbox{(by (\ref{ind}))}\\
&\ge& \min_{a\in A(x)}e^{\theta a}\mathbb{E}op_{x}^{\pi}\left[1[x_1\ge 0]e^{\theta\beta x_1}+1[x_1< 0]\right]
\prod_{k=2}^{n} \mathbb{E}op\left(\exp\{\theta\beta^{k} Z_k^+\}\right).
\end{eqnarray*}
Furthermore, we have that
\begin{eqnarray*}
\lefteqn{\min_{a\in A(x)}e^{\theta a}\mathbb{E}op_{x}^{\pi}\left[1[x_1\ge 0]e^{\theta\beta x_1}+1[x_1< 0]\right]}\\
&\ge&
\min_{a\in A(x)} e^{\theta a} \mathcal{B}ig( \sum_{k=a-x}^\infty e^{\theta\beta (x-a+k)}
q_k+ \sum_{k=-\infty}^{a-x-1}q_k\mathcal{B}ig) \\
&\ge & \min_{a\in A(x)} e^{\theta a} \mathcal{B}ig( \sum_{k=1}^\infty e^{\theta\beta (x-a+k)} q_k+
\sum_{k=-\infty}^{0}e^{\theta\beta (x-a)} q_k\mathcal{B}ig) \\
&= & \min_{a\in A(x)} e^{\theta a(1-\beta)} \mathcal{B}ig( \sum_{k=1}^\infty e^{\theta\beta (x+k)} q_k+
\sum_{k=-\infty}^{0}e^{ \theta \beta x}q_k\mathcal{B}ig) \\
&=& e^{\theta x(1-\beta)} e^{\theta\beta x} \mathbb{E}op e^{\theta\beta Z_1^+}=
e^{\theta x} \mathbb{E}op e^{\theta\beta Z_1^+}.
\end{eqnarray*}
Hence, we conclude that
\begin{eqnarray*}
\mathbb{E}op_{x}^{\pi}\left(\exp\left\{\theta\sum_{k=0}^{n}\beta^k r(x_k,a_k)\right\}\right) &\ge&
e^{\theta x} \mathbb{E}op e^{\theta\beta Z_1^+}
\prod_{k=2}^{n} \mathbb{E}op\left(\exp\{\theta\beta^{k} Z_k^+\}\right)\\&=& e^{\theta x}
\prod_{k=1}^{n} \mathbb{E}op\left(\exp\{\theta\beta^{k} Z_k^+\}\right).
\end{eqnarray*}
Therefore, (\ref{ind}) holds for every $n\in \mathbb{N},$ $x\ge 0,$ $\theta\in I$ and $\pi\in\mathbb{P}i.$
Now letting $n\to\infty$ in (\ref{ind}) and making use
of the dominated convergence theorem we obtain
the lower bound for $J(x,\theta).$
\end{proof}
\begin{remark}
Note that since $\beta<1,$ we obtain by Jensen's inequality and assumption (A1) that
$$ \underline{h}(\theta) \ge \exp\left(\theta \sum_{k=1}^\infty \beta^k \mathbb{E}op Z_k^+\right) > 0,\quad \theta\in I.$$
This observation is essential in Theorem \ref{theo:xi_finite}, where we have to take the logarithm of $\underline{h}$.
Also note that $\bar{h}(\theta)\le 1$ since $\theta \in I$.
\end{remark}
Let $x\ge 0.$ For any $\theta \in I$ let us denote
$$ G(x,\theta) := \sum_{k=-x}^\infty J(x+k,\theta) q_k+\sum_{k=-\infty}^{-x-1} q_k,$$
then
\begin{equation}
\label{optg}
J(x,\theta) = \min_{a\in A(x)} \left[e^{\theta a} G(x-a,\theta\beta)\right],\quad x\ge 0.
\end{equation}
\begin{lemma}\label{lem:w_decreasing}
The function $J(\cdot,\theta)$ is decreasing for each $\theta\in I$ and
$$ J(x,\theta) \le e^{\theta (x-v)} J(v,\theta)$$ for $ x\ge v\ge 0.$
\end{lemma}
\begin{proof}
Suppose $0\le v< x.$ Then, it follows that
\begin{eqnarray*}
J(x,\theta)&=&\min\left\{ G(x,\theta\beta), \ldots, e^{\theta(x-v-1)}
G(v+1,\theta\beta),\min\left\{e^{\theta (x-v)} G(v,\theta\beta),
\ldots ,e^{\theta (x-0)} G(0,\theta\beta)\right\}\right\}\\
& = & \min\left\{ G(x,\theta\beta), \ldots,
e^{\theta(x-v-1)} G(v+1,\theta\beta),e^{\theta (x-v)} J(v,\theta)\right\}\\
& \le & e^{\theta (x-v)} J(v,\theta).
\end{eqnarray*}
Observe that for $v=x-1$ we obtain from the above inequality that $$J(x,\theta)\le e^\theta J(x-1,\theta)
< J(x-1,\theta).$$
This fact finishes the proof.
\end{proof}
Now let $f^*:\widetilde{X}\mapsto A$ be the largest minimiser of the right-hand side in
(\ref{optg}) for $x\ge 0$ and let $f^*(x,\theta)=0$ for $x<0$ and all $\theta\in I.$
Then, $f^*(\cdot,\theta)\in \Gamma.$
\begin{lemma}\label{lem:no_dividend_after_payout}
For $x\ge 0$ it holds that $f^*(x-f^*(x,\theta),\theta)=0$ and
$$ J(x,\theta) = e^{\theta f^*(x,\theta)} J(x-f^*(x,\theta),\theta).$$
\end{lemma}
\begin{proof}
By (\ref{optg}) we have that $J(x-f^*(x,\theta),\theta) \le G(x-f^*(x,\theta),\theta\beta).$
By Lemma \ref{lem:w_decreasing} (set $v:= x-f^*(x,\theta)$) it follows that
\begin{eqnarray*}
J(x,\theta) &=& e^{\theta f^*(x,\theta)} G(x-f^*(x,\theta),\theta\beta)
\ge e^{\theta f^*(x,\theta)} J(x-f^*(x,\theta),\theta)\\
&\ge & J(x,\theta).
\end{eqnarray*}
Thus we have equality and, in particular, $J(x-f^*(x,\theta),\theta)=G(x-f^*(x,\theta),\theta\beta),$
which implies that $a=0$ minimises the expression $e^{\theta a}G(x-f^*(x,\theta)-a,\theta\beta).$
We claim that $a=0$ is the only minimiser of the above expression. Note that, if $f^*(x,\theta)=x,$
then $J(x-f^*(x,\theta),\theta)=J(0,\theta)$ and the result holds true. If, on the other hand,
$f^*(x,\theta)<x,$ then
$$J(x,\theta)=e^{\theta f^*(x,\theta)}G(x-f^*(x,\theta),\theta\beta)<
e^{\theta (f^*(x,\theta)+a)}G(x-f^*(x,\theta)-a,\theta\beta)$$
for $a=1,\ldots,x-f^*(x,\theta).$ This fact implies that $a=0$ is indeed the only minimiser
and, consequently,
$f^*(x-f^*(x,\theta),\theta)=0$.
\end{proof}
\begin{theorem}\label{theo:xi_finite}
Let $\xi(\theta) := \sup\{x\in\mathbb{N}_0 : \;f^*(x,\theta)=0\}$. Then
$\xi^*:=\sup_{\theta\in I}\xi(\theta)<\infty$ and
$$ f^*(x,\theta) = x-\xi(\theta),\quad \mbox{for all } \;x> \xi(\theta).$$
\end{theorem}
\begin{proof} Fix $\theta\in I$ and let $x\ge 0$ be such that $f^*(x,\theta)=0.$ Note that
such $x$ exists for each $\theta\in I$, because $f^*(0,\theta)=0$ for all $\theta\in I.$
From (\ref{optg}) we obtain that
\begin{equation}
\label{ab}
J(x,\theta)=\sum_{k=-x}^{\infty}J(x+k,\theta\beta)q_k+
\sum_{k=-\infty}^{-x-1}q_k.
\end{equation}
Furthermore, by (\ref{ab}), Lemmas \ref{lem:bounds} and \ref{lem:w_decreasing} we have that
\begin{eqnarray*}
J(x,\theta)&\ge& \sum_{k=1}^\infty J(x+k,\theta\beta)q_k+ \sum_{k=-\infty}^{0} J(x,\theta\beta)q_k\\
&\ge& \sum_{k=1}^\infty e^{\theta\beta (x+k)}\underline{h}(\theta\beta)q_k+\sum_{k=-\infty}^{0}
e^{\theta\beta x}\underline{h}(\theta\beta)q_k\\
&=& e^{\theta\beta x} \underline{h}(\theta\beta)\left(\sum_{k=1}^\infty e^{\theta\beta k}q_k+
\sum_{k=-\infty}^{0}q_k\right)= e^{\theta\beta x} \underline{h}(\theta).
\end{eqnarray*}
Hence,
$$e^{\theta\beta x} \underline{h}(\theta)\le J(x,\theta)\le e^{\theta x}\overline{h}(\theta),$$
which implies that
$$x\le \frac{\ln\overline{h}(\theta)-\ln\underline{h}(\theta)}{\theta(\beta-1)}=:s(\theta).$$
The function $s(\cdot)$ is continuous on $I$ and is finite for each $\theta\in I.$
Additionally,
\begin{eqnarray*}
\lim_{\theta\to 0^-} s(\theta)&=&\lim_{\theta\to 0^-} \left(\frac{\overline{h}'(\theta)}
{\overline{h}(\theta)}-\frac{\underline{h}'(\theta)}{\underline{h}(\theta)} \right)/(\beta-1)\\
&=&
\left(\sum_{k=1}^\infty \beta^k\mathbb{E}op (Z_k^+) -
\sum_{m=0}^\infty \mathbb{E}op_m^{\pi^+}\left(\sum_{k=1}^{\tau-1}\beta^k x_k\right)q_m\right)/(1-\beta)<+\infty,
\end{eqnarray*}
which follows by assumption (A1).
Thus, we have shown that $\sup_{\theta\in I}\xi(\theta)<+\infty$. Now let $x> \xi(\theta)$.
We know from Lemma \ref{lem:no_dividend_after_payout} that $f^*(x-f^*(x,\theta),\theta)=0,$
which implies by definition of $\xi(\theta)$ that $f^*(x,\theta)\ge x-\xi(\theta).$
On the other hand, by \eqref{optg} we obtain that
\begin{eqnarray*}
J(\xi(\theta),\theta) &\le & e^{\theta (f^*(x,\theta)-(x-\xi(\theta)))}
G\mathcal{B}ig(\xi(\theta)-f^*(x,\theta)+\big(x-\xi(\theta)\big),\theta\beta\mathcal{B}ig) \\
&=& J(x,\theta) e^{-\theta (x-\xi(\theta))} \le J(\xi(\theta),\theta),
\end{eqnarray*}
where the last inequality follows from Lemma \ref{lem:w_decreasing}.
Thus, because $f^*$ is the largest minimiser of (\ref{optg}), we obtain
$$0=f^*(\xi(\theta),\theta) \ge f^*(x,\theta)-(x-\xi(\theta))\ge 0,$$
which implies that $f^*(x,\theta)=x-\xi(\theta).$
\end{proof}
\begin{lemma}\label{lem:band1}
Let $x_0\ge 0$. If $f^*(x_0,\theta)=a_0$ and $f^*(x_0+1,\theta)>0$, then $f^*(x_0+1,\theta)=a_0+1.$
\end{lemma}
\begin{proof}
By definition of $f^*$ we have that
\begin{eqnarray*}
J(x_0,\theta) &=& e^{\theta a_0} G(x_0-a_0,\theta\beta) \left\{ \begin{array}{l}
\le e^{\theta a} G(x_0-a,\theta\beta), \;\mbox{for}\; a=0,\ldots, a_0\\
< e^{\theta a} G(x_0-a,\theta\beta), \;\mbox{for}\; a=a_0+1,\ldots, x_0,
\end{array}\right.
\end{eqnarray*}
which further yields that
\begin{eqnarray*}
e^{\theta (a_0+1)} G(x_0-a_0,\theta\beta) \left\{ \begin{array}{l}
\le e^{\theta (a+1)} G(x_0-a,\theta\beta), \;\mbox{for}\; a=0,\ldots, a_0\\
< e^{\theta (a+1)} G(x_0-a,\theta\beta), \;\mbox{for}\; a=a_0+1,\ldots, x_0.
\end{array}\right.
\end{eqnarray*}
Again from the definition of $f^*$ we obtain that
\begin{eqnarray*}
J(x_0+1,\theta) &=& \min_{a\in A(x_0+1)} e^{\theta a} G(x_0+1-a,\theta\beta) \\
&= & \min\mathcal{B}ig\{ G(x_0+1,\theta \beta), \min_{a\in A(x_0)} e^{\theta (a+1)} G(x_0-a,\theta\beta) \mathcal{B}ig\}.
\end{eqnarray*}
Since $f^*(x_0+1,\theta)>0,$ it holds that
$$ G(x_0+1,\theta\beta) \ge \min_{a\in A(x_0)} e^{\theta (a+1)} G(x_0-a,\theta\beta).$$
This fact, in turn, together with the previous observation yields by shifting the index that
\begin{eqnarray*}
e^{\theta (a_0+1)} G(x_0-a_0,\theta\beta) \left\{ \begin{array}{l}
\le e^{\theta a} G(x_0+1-a,\theta\beta), \;\mbox{for}\; a=0,\ldots, a_0+1\\
< e^{\theta a} G(x_0+1-a,\theta\beta), \;\mbox{for}\; a=a_0+2,\ldots, x_0+1.
\end{array}\right.
\end{eqnarray*}
Thus, it follows that $f^*(x_0+1,\theta)=a_0+1$.
\end{proof}
\subsection{Optimal policy and its structure }
Recall now that $\gamma$ is a constant risk averse coefficient of the insurer.
Consider the following policy $\pi^*:= (\widetilde{g}_{0},\widetilde{g}_{1},\ldots),$
where $\widetilde{g}_{n}(\cdot):=f^*(\cdot,\gamma\beta^{n}).$ We note that $\pi^*\in\mathbb{P}i^M.$
Clearly, since $\gamma\in I,$ then $\gamma\beta^n\in I$ for all $n\in\mathbb{N}_0.$
\begin{corollary}\label{ruin_0} Under policy $\pi^*$ the ruin occurs with probability 1, i.e.,
$\operatorname{\mathbb{\mathbb{P}P}}^{\pi^*}_x(\tau <+\infty)=1$ for every $x\in X.$
\end{corollary}
\begin{proof}
Assume that the surplus process equals $x_0\in X.$
If $x_0\ge 0,$ then either $x_0\le \xi(\gamma)$ or $x_0>\xi(\gamma).$
However, from Theorem \ref{theo:xi_finite} we know that in both cases the risk reserve (surplus)
just after dividend payment is
always less or equal to $\xi(\gamma)\le\xi^*.$ Therefore, the ruin will occur, if
there appears a sequence of length $\xi^*+1$ of negative incomes. But the probability
that such a sequence appears, equals
\begin{equation}
\label{compr}
\operatorname{\mathbb{\mathbb{P}P}}(Z_{1}<0,\ldots,Z_{\xi^*+1}<0)=\left(\sum_{m=-\infty}^{-1}q_m\right)^{\xi^*+1},
\end{equation}
which is positive by (A2). If the ruin has not occurred up to the $l$th day, where $l:=\xi^*+1,$
then again $x_l\le \xi(\gamma\beta^l)$ or $x_l>\xi(\gamma\beta^l).$
But from Theorem \ref{theo:xi_finite}
in both cases the risk reserve just after dividend payment is
always less or equal to $\xi(\gamma\beta^l)\le\xi^*.$ The probability that there exists
a sequence of length $\xi^*+1$ of negative incomes is $\left(\sum_{m=-\infty}^{-1}q_m\right)^{\xi^*+1}.$
Thus, considering the states $x_{k(\xi^*+1)},$ $k\in\mathbb{N}_0,$ we may
define the following events
$$A_k=\{Z_{k(\xi^*+1)+1}<0,Z_{k(\xi^*+1)+2}<0\ldots,Z_{(k+1)(\xi^*+1)}<0\},
\quad k\in\mathbb{N}_0.$$
By the second Borel-Cantelli lemma $\operatorname{\mathbb{\mathbb{P}P}}(A_k \;i.o.)=1.$ Therefore, the ruin must occur.
\end{proof}
\begin{theorem}\label{theo:optimal}
The Markov policy $\pi^*$ is optimal, i.e.,
$$\widetilde{J}(x)=\widetilde{J}_{\pi^*}(x)=\frac 1\gamma J_{\pi^*}(x,\gamma)=\frac1\gamma J(x,\gamma)$$
for $x\in X.$
\end{theorem}
\begin{proof} From Theorem \ref{theo:fixed_point} and the definition of $\pi^*$ we obtain
for every $x\in X$ that
\begin{eqnarray*}
J(x,\gamma)&=&\min_{a\in A(x)}\left[e^{\gamma r(x,a)}\sum_{x'\in X} J(x',\gamma\beta)q(x'|x,a)\right]\\
&=&e^{\gamma \widetilde{g}_0(x)}\sum_{x'\in X} J(x',\gamma\beta)q(x'|x,\widetilde{g}_0(x)).
\end{eqnarray*}
Assume that $x\ge 0.$
Iterating the last equality $n$ times under the Markov policy $\pi^*,$ we obtain that
\begin{equation}
\label{opt_pol}
J(x,\gamma) =
\mathbb{E}op_x^{\pi^*}\left(\exp\left\{\gamma\sum_{k=0}^{(\tau-1)\wedge (n-1)} \beta^k a_k\right\}
\left(J(x_n,\gamma\beta^n) 1[\tau\ge n] + 1[\tau < n] \right)\right).
\end{equation}
Observe now that
$$0\le \mathbb{E}op_x^{\pi^*}\left(\exp\left\{\gamma\sum_{k=0}^{(\tau-1)\wedge (n-1)} \beta^k a_k\right\}
J(x_n,\gamma\beta^n) 1[\tau\ge n]\right) \le \mathbb{E}op_x^{\pi^*}1[\tau\ge n]= \operatorname{\mathbb{\mathbb{P}P}}_x^{\pi^*}(\tau\ge n).$$
But by Corollary \ref{ruin_0}, $\operatorname{\mathbb{\mathbb{P}P}}_x^{\pi^*}(\tau\ge n)\to 0$ as $n\to\infty.$ Hence,
letting $n\to\infty$ in (\ref{opt_pol}) and making use of the dominated convergence theorem we obtain that
$$\inf_{\pi\in\mathbb{P}i}J_{\pi}(x,\gamma)=J(x,\gamma) =
\mathbb{E}op_x^{\pi^*}\left(\exp\left\{\gamma\sum_{k=0}^{\tau-1} \beta^k a_k\right\}
\right)=J_{\pi^*}(x,\gamma)$$
for $x\in X.$ The conclusion follows by multiplying the above display by the number $1/\gamma.$
\end{proof}
\begin{definition}
A function $g\in \Gamma$ is called a {\em band-function}, if there exists numbers $n\in\mathbb{N}_0$
and $c_0,\ldots,c_n,d_1,\ldots,d_n\in\mathbb{N}_0$ such that $d_k-c_{k-1}\ge 2$ for $k=1,\ldots,n,$
$0\le c_0\le d_1\le c_1\le d_2\le\ldots\le d_n\le c_n$ and
$$g(x)= \left\{ \begin{array}{l}
0, \mbox{ if } x\le c_0\\
x-c_k, \mbox{ if } c_k<x< d_{k+1}\\
0, \mbox{ if } d_k\le x\le c_k\\
x-c_n, \mbox{ if } x> c_{n}.
\end{array}\right.
$$
A Markov policy $\pi=(g_m)_{m\in N_0}$ is called a band-policy, if $g_m$ is a band-function for every
$n\in \mathbb{N}_0.$
\end{definition}
\begin{theorem}\label{theo:band1}
The optimal Markov policy $\pi^*$ is a band-policy.
\end{theorem}
\begin{proof}
Recall that $\widetilde{g}_n(\cdot)=f^*(\cdot,\gamma\beta^n)$ for $n\in\mathbb{N}_0.$
By Theorem \ref{theo:xi_finite} we have $\widetilde{g}_n(x)= x-\xi(\gamma\beta^n)$ for all
$x> \xi(\gamma\beta^n)$. For $x\le\xi(\gamma\beta^n)$ we have to distinguish different cases. If
$\widetilde{g}_n(x)=0$ for all $x=0,\ldots ,\xi(\gamma\beta^n)$, then clearly $\widetilde{g}_n$ is a
band-function. If there exists an $0<x_0\le \xi(\gamma\beta^n)$ such that $\widetilde{g}_n(x)=0$
for $x=0,\ldots ,x_0-1$ and $\widetilde{g}_n(x_0)>0$, then by Lemma
\ref{lem:band1}
$\widetilde{g}_n(x_0)=1$. If further $\widetilde{g}_n(x_0+m)>0$ for
$m=1,\ldots ,\xi(\gamma)-x_0-1$ then by induction
$$ \widetilde{g}_n(x_0+m)=\widetilde{g}_n(x_0+m-1)+1=\ldots = \widetilde{g}_n(x_0)+m =m+1.$$
If $\widetilde{g}_n(x_0+1)=0$ we either have $\widetilde{g}_n(x)=0$ for $x=x_0+1,\ldots,\xi(\gamma\beta^n)$
or there exists an
$x_1$ such that $x_0<x_1\le \xi(\gamma\beta^n)$ and
$\widetilde{g}_n(x_0+m)=0$ for $m=1,\ldots,x_1-x_0-1$ and $\widetilde{g}_n(x_1)>0$.
Now we proceed in the same way as with $x_0$. After a finite number
of steps we reach $\xi(\gamma\beta^n)$. In any case $\widetilde{g}_n$ is a
band-function.
\end{proof}
\begin{remark}
In the risk neutral dividend payout problem, the optimal policy is a stationary band-policy, i.e.,
it consists of the same band-function at all time points. The risk neutral problem can formally
be obtained as a limit $\lim_{\gamma \to 0} \frac1\gamma (e^{\gamma x}-1)$. Hence, the exponential utility function
only implies some kind of non-stationarity of the optimal policy and
thus does not really make it necessary to consider history-dependent policies.
\end{remark}
\section{The Power Utility Function}
In this section we assume that the insurer is equipped with the power utility function
$U_\gamma(x)= x^\gamma,$ where $\gamma $ is a fixed number from the interval $(0,1).$
The decision maker wishes to maximise the following expected discounted payoff
$$ \widehat{J}_\pi(x):= \mathbb{E}op_x^\pi U_\gamma\left(\sum_{k=0}^{\infty}\beta^k r(x_k,a_k)\right)$$
and to find a policy $\pi^*\in\mathbb{P}i$ (if exists) such that
\begin{equation}
\label{oppp}
\widehat{J}(x):=\sup_{\pi\in\mathbb{P}i} \widehat{J}_\pi(x)=\widehat{J}_{\pi^*}(x)
\end{equation}
for all $x\in X.$ Clearly, $\widehat{J}(x)=0$ for $x<0.$ In Lemma \ref{lem:pbounds}
we show that under our assumptions
$\widehat{J}(x)<+\infty$ for each $x\ge 0.$ Moreover, note that for $x\ge 0$
$$ \widehat{J}(x):= \sup_{\pi\in\mathbb{P}i} \mathbb{E}op_x^\pi U_\gamma\left(\sum_{k=0}^{\tau-1}\beta^k a_k\right).$$
\subsection{Optimality equation and the properties of its largest maximiser} Contrary to the exponential utility function,
the power utility function reveals certain non-separability, that is implied by the fact that
the expectation operator is only linear. Therefore, we again extend the state space
by defining the new state space $\widehat{X}:=X\times [0,\infty)$ (cf. \cite{br_mor}). In this case, the second component
is responsible for the accumulated payoffs so far.
If the process is in the state $(x,y)$ and the insurer selects an action $a\in A(x),$
then the probability of moving to a next state $(x',y')$ is $q(x'|x,a),$
if $y' = \frac{y+a}\beta$ and is $0,$
if $y'\not=\frac{y+a}\beta.$ Hence, we can observe that the second component is again established in a
deterministic way, but it differs from the previous case, since $y'$ depends on the action chosen by the
insurer.
Let us define the feasible extended histories of the process up to the $k$th day as follows
\footnote{We use the same symbol $h_k$ as in the previous section to denote an extended feasible history
of the process up to the $k$th day. But there is no confusion, since $h_k$
in this subsection refers only to the power
utility case. The same remark applies to the policy $\sigma,$ the set $\Xi$ and the functions $J$ and $J_\pi$ defined below.}
$$h_0=(x_0,y_0)\quad\mbox{and}\quad h_k=(x_0,y_0, a_0,x_1,y_1,\ldots,a_{k-1},x_k,y_k),\;\;k\ge 1,$$
where $(x_m,a_m)\in D$ for each $m\in\mathbb{N}_0$ and with $y_{m+1}$ given by the recurrence equation
$$y_{m+1}:=\frac{y_{m}+a_{m}}\beta,\quad m\in \mathbb{N}_0.$$
Then, we can define, as usual, an {\it extended history-dependent policy}
$\sigma = (\sigma_{k})_{k\in \mathbb{N}_0},$ where
$\sigma_{k}$ is a mapping from the set of feasible extended histories
up to the $k$th day to the action set $A$ such that
$\sigma_k(h_k) \in A(x_k)$ with $h_k$ defined above.
Let $\Xi$ be the set of all such policies.
Note that for any $\sigma\in\Xi$ ($y\ge 0$ is fixed), there exists a policy $\pi\in\mathbb{P}i$ that is
equivalent to $\sigma$ in the following sense:
$$\pi_k(\cdot|\omega_k):=\sigma_k(\cdot|h_k),\quad\omega_k\in\Omega_k,\quad k\in \mathbb{N}_0.$$
Obviously, $\pi$ must depend on $y.$
Thus again, for simplicity of notation we shall still use the original set of policies $\mathbb{P}i,$
and the expectation operator $\mathbb{E}op_x^\pi,$
where $x$ is the first component of the initial state.
The dependence on $y\ge 0$ of a policy will be indicated by writing
the second variable to the value function.
In what follows, we put for $n\in \mathbb{N},$ $\pi\in\mathbb{P}i,$ $x\ge 0$ and $y\ge 0$
$$
J_{n,\pi}(x,y) := \mathbb{E}op_x^\pi \left(\sum_{k=0}^{(n-1)\wedge(\tau-1)}\beta^k r(x_k,a_k)+y\right)^\gamma=
\mathbb{E}op_x^\pi \left(\sum_{k=0}^{(n-1)\wedge(\tau-1)}\beta^k a_k+y\right)^\gamma$$
and
$$ J_{n}(x,y) := \sup_{\pi\in\mathbb{P}i} J_{n,\pi}(x,y).$$
Moreover, for $\pi\in\mathbb{P}i,$ $x\ge 0$ and $y\ge 0$ we set
$$J(x,y)=\sup_{\pi\in\mathbb{P}i}J_\pi(x,y), \quad\mbox{where}\quad J_\pi(x,y)=
\mathbb{E}op_x^\pi \left(\sum_{k=0}^{\tau-1}\beta^k a_k+y\right)^\gamma.$$
If $x<0,$ then $J(x,y)=y^\gamma$ for $y\ge 0.$
Obviously, $ J(x,0) = \widehat{J}(x).$
Before we formulate our first result, we introduce a specific subset of policies $\widehat{\mathbb{P}i}\subset\mathbb{P}i.$
Let $F$ be the set of functions $f:\widehat{X}\mapsto A$ such that
$f(x,y)\in A(x)$ for all $y\ge 0$ and let
$(f_k)_{k\in\mathbb{N}_0}$ be a sequence of functions with $f_k\in F.$
Then, $\widehat{\mathbb{P}i}$ is the set of all policies $\pi=(\pi_k)_{k\in\mathbb{N}_0}$ defined in the following way
$$ \pi_k(\omega_k):=f_k(x_k,y_k),\quad \omega_k\in\Omega_k,\quad k\in\mathbb{N}_0,
$$
where $y_k:=\frac{y_{k-1}+a_{k-1}}\beta,$ $k\in\mathbb{N},$ and $y_0:=y\ge 0$ is a fixed number.
Furthermore, we shall identify a policy $\pi\in\widehat{\mathbb{P}i}$ with the sequence $(f_k)_{k\in\mathbb{N}_0}$ by writing
$\pi=(f_k)_{k\in\mathbb{N}_0}.$
Next for any function $h:\widehat{X}\mapsto \mathbb{R}$ we define an operator $T$ as follows
\begin{equation}
\label{T_op}
Th(x,y):=\beta^\gamma \max_{a\in A(x)} \left[ \sum_{x'\in X} h\left(x',\frac{a+y}\beta\right) q(x'|x,a) \right].
\end{equation}
Let $f\in F$ be the maximiser of the right-hand side in (\ref{T_op}), i.e., $f(x)$ attains the maximum on the right-hand side
of (\ref{T_op}) for all $x\in X$. We also set
$$T_{f}h(x,y):=\beta^\gamma \sum_{x'\in X} h\left(x',\frac{f(x,y)+y}\beta\right) q(x'|x,f(x,y)).$$
Note that $T_{f}h=Th.$
\begin{theorem}\label{theo:Jprecursive}
For each $n\in\mathbb{N}_0$ the value function $J_n$ satisfies the equation
\begin{equation}
\label{recur}
J_{n+1} = TJ_{n}
\end{equation}
with $J_n(x,y)=y^\gamma$ for $x<0$ and $J_0(x,y)= y^\gamma$.
Let $\bar{f}_{l}\in F$ be such that $J_{l+1} =T_{\bar{f}_{l}}J_{l}$ for $l=0,\ldots,n.$
Then, $\bar{\pi}=(\bar{f}_{n},\ldots,\bar{f}_{0})$
is optimal for $J_{n+1},$ i.e., $J_{n+1}=J_{n+1,\bar{\pi}}.$
\end{theorem}
\begin{proof}
Let $n=1.$ Then, by the definition of $J_1$ we have that
$$J_1(x,y)=\sup_{\pi\in\mathbb{P}i}\mathbb{E}op_x^\pi(\beta^0 a_0+y)^\gamma=\max_{a\in A(x)}(a+y)^\gamma=(x+y)^\gamma.$$
On the other hand,
$$TJ_0(x,y)=\beta^\gamma \max_{a\in A(x)}\left(\frac{a+y}\beta\right)^\gamma
=(x+y)^\gamma=T_{\bar{f}_{0}}J_{0}(x,y)$$ with $\bar{f}_0(x,y)=x.$
Assume now that $J_l=TJ_{l-1}=T_{\bar{f}_{l-1}}J_{l-1}$ with $\bar{f}_{l-1}\in F$ for all $l=1,\ldots,n$
and let $(\bar{f}_{n-1},\ldots, \bar{f}_{0})$
be an optimal policy for $J_n.$
We show that $J_{n+1}=TJ_{n}=T_{\bar{f}_{n}}J_{n}$ and $(\bar{f}_{n},\ldots, \bar{f}_{0})$ is optimal for
$J_n.$
We have that
$$J_{n+1}(x,y)=\sup_{\pi\in\mathbb{P}i} \mathbb{E}op_x^\pi \left(\sum_{k=1}^{n\wedge(\tau-1)}\beta^k
a_k+a_0+y\right)^\gamma$$
with the convention that the empty sum equals $0$ (when $\tau=1$).
Recall that $\vec{\pi}$ denotes a ``1-shifted'' policy, see (\ref{shift_p}).
We further get the following
\begin{eqnarray}
\label{nr1}
J_{n+1}(x,y)
&=&\beta^\gamma\sup_{\pi\in\mathbb{P}i} \mathbb{E}op_x^\pi \left(\sum_{k=1}^{n\wedge(\tau-1)}\beta^{k-1} a_k+
\frac{a_0+y}\beta\right)^\gamma\\\nonumber
&=&\beta^\gamma\sup_{\pi\in\mathbb{P}i} \mathbb{E}op_x^\pi \left[
\mathbb{E}op_x^\pi \left(\left(\sum_{k=1}^{n\wedge(\tau-1)}\beta^{k-1} a_k+
\frac{a_0+y}\beta\right)^\gamma\mathcal{B}ig|a_0,x_1\right)\right]\\\nonumber
&\le&\beta^\gamma\sup_{a_0\in A(x)}\left[ \sum_{m=-\infty}^\infty
\sup_{\vec{\pi}\in\mathbb{P}i}\mathbb{E}op_{x-a_0+m}^{\vec{\pi}} \left(\sum_{k=1}^{n\wedge(\tau-1)}\beta^{k-1} a_k+
\frac{a_0+y}\beta\right)^\gamma q_m\right]\\\nonumber
&=& \beta^\gamma\max_{a\in A(x)}\left[\sum_{m=-\infty}^\infty
J_{n}\left(x-a+m,\frac{a+y}\beta\right) q_m\right]=TJ_n(x,y).
\end{eqnarray}
Let $\bar{f}_n$ be such that $TJ_n(x,y)=T_{\bar{f}_n}J_n(x,y).$
Put $\bar{\pi}=(\bar{f}_n,\bar{f}_{n-1},\ldots,\bar{f}_0).$ By induction
assumption
$\vec{\bar{\pi}}=(\bar{f}_{n-1},\ldots,\bar{f}_0)$ is optimal for $J_n.$
Hence, it follows that
\begin{eqnarray}
\label{nr2}
TJ_n(x,y)&=&\beta^\gamma \max_{a\in A(x)} \left[ \sum_{m=-\infty}^\infty J_{n\vec{\bar{\pi}}}\left(x-a+m,
\frac{a+y}\beta\right) q_m \right]\\ \nonumber
&=&\max_{a\in A(x)} \beta^\gamma\sum_{m=-\infty}^\infty
\mathbb{E}op_{x-a+m}^{\vec{\bar{\pi}}}\left(\sum_{k=0}^{(n-1)\wedge(\tau-1)}\beta^ka_{k+1}+
\frac{y+a}\beta\right)^\gamma q_m\\\nonumber
&=& \max_{a\in A(x)}\sum_{m=-\infty}^\infty
\mathbb{E}op_{x-a+m}^{\vec{\bar{\pi}}}\left(\sum_{k=0}^{(n-1)\wedge(\tau-1)}\beta^{k+1}a_{k+1}+y+a\right)^\gamma
q_m\\\nonumber
&\le&
\sup_{\pi\in\mathbb{P}i}\mathbb{E}op_{x}^{\pi}\left(\sum_{k=0}^{n\wedge(\tau-1)}\beta^{k}a_{k}+y\right)^\gamma=J_{n+1}(x,y).
\end{eqnarray}
Thus, (\ref{nr1}) and (\ref{nr2}) yield that $J_{n+1}=TJ_n.$
The fact that $\bar{\pi}$ defined above is optimal for $J_{n+1}$
follows from repeating the calculations in (\ref{nr2}) applied to $T_{\bar{f}_n}J_n.$
\end{proof}
The next result can be concluded from Theorem \ref{theo:Jprecursive}.
\begin{theorem}\label{theo:Jfixedpoint}
The function $J$ satisfies the following equation
\begin{eqnarray}
\label{optp}
J(x,y) &=& \beta^\gamma \max_{a\in A(x)}
\left[ \sum_{x'\in X} J\left(x', \frac{a+y}\beta\right) q(x'|x,a) \right]\\\nonumber
&=&\beta^\gamma \max_{a\in A(x)}
\left[ \sum_{k=-\infty}^\infty J\left(x-a+k, \frac{a+y}\beta\right) q_k \right]
\end{eqnarray}
for $x\in\mathbb{N}_0$ and $ y \ge 0.$
\end{theorem}
\begin{proof}
It is obvious that the sequence of functions $(J_n(x,y))_{n\in\mathbb{N}_0}$ is increasing for each $(x,y)\in\widehat{X}.$
Hence, $w(x,y):=\lim_{n\to\infty} J_n(x,y)$
exists for every $(x,y)\in \widehat{X}.$ Since $J_n\le J,$ then it follows that $w\le J.$ On the other hand,
for any policy $\pi\in\mathbb{P}i$ we obtain that
$J_{n,\pi}\le J_n.$ Letting $n\to\infty,$ making use of the monotone convergence theorem
and taking the supremum over $\pi\in\mathbb{P}i,$ it is easily seen that
$$J(x,y)=\sup_{\pi\in\mathbb{P}i} \mathbb{E}op_x^\pi\left(\sum_{k=0}^{\tau-1}\beta^ka_k+y\right)^\gamma
\le w(x,y).$$ Consequently, $w=J.$ Equation (\ref{optp}) follows from (\ref{recur}) by letting
$n\to\infty$ and replacing the maximum with the limit.
\end{proof}
\begin{remark}
The counterpart of Theorem \ref{theo:Jfixedpoint} is Theorem 4.1(a) in \cite{br_mor}. However, again as in the exponential case
this theorem was proved for general state space, weakly continuous transitions and {\it bounded} costs.
\end{remark}
The next lemma provides the following bounds for the function $J.$
\begin{lemma}\label{lem:pbounds}
For any $x\in\mathbb{N}_0$ and $y\ge 0$ it follows that
$$ (x+y)^\gamma \le J(x,y) \le \left(x+\beta \frac{\mathbb{E}op Z^+}{1-\beta}+y\right)^\gamma.$$
\end{lemma}
\begin{proof} Let $\pi$ be a policy such that $\pi_0(\cdot|\omega_0)=x$ and
$\pi_k(\cdot|\omega_k)=0$ for $k\ge 1.$ Then, $J(x,y)\ge J_{\pi}(x,y)=(x+y)^\gamma.$
The upper bound for the function $J$ is due to the Jensen inequality and Theorem 9.2.3(a)
in \cite{br} that gives the upper bound for the risk neutral setting.
\end{proof}
For simplicity for any $x\in\mathbb{N}_0$ and $y\ge 0$ we define
$$ G(x,y) := \sum_{k=-\infty}^\infty J(x+k, y) q_k.$$
From (\ref{optp}) we obtain that
\begin{equation}
\label{optpg}
J(x,y) = \beta^\gamma \max_{a\in A(x)}
G\left(x-a, \frac{a+y}\beta\right), \quad x\in \mathbb{N}_0,\;\; y\ge 0.
\end{equation}
\begin{lemma}\label{theo:propJ}
For all $0\le v \le x$ we have that $J(x,y) \ge J\left(x-v,y+v\right)$.
\end{lemma}
\begin{proof}
Suppose that $0< v \le x.$ For part (b) observe that
\begin{eqnarray*}
J(x,y) &=& \max\left\{\left\{\beta^\gamma G\left(x,\frac{y}\beta\right), \ldots,
\beta^\gamma G\left(x-v+1,\frac{y+v-1}\beta\right)\right\},\right.\\
&&\left.
\max\left\{\beta^{\gamma} G\left(x-v,\frac{y+v}\beta\right),\ldots,\beta^{\gamma}
G\left(0,\frac{y+x}\beta\right) \right\} \right\}\\
&=& \max\left\{\left\{\beta^\gamma G\left(x,\frac{y}\beta\right), \ldots,
\beta^\gamma G\left(x-v+1,\frac{y+v-1}\beta\right)\right\},\right.\\
&&\left. \beta^\gamma\max_{a\in A(x-v)}G\left(x-v-a,\frac{y+v+a}\beta\right) \right\}\ge
J\left(x-v,y+v\right),
\end{eqnarray*}
where the last inequality is due to (\ref{optpg}).
\end{proof}
In what follows let $f^*\in F$ be the largest maximiser of the right-hand side in (\ref{optpg}).
For completeness, set $f^*(x,y)=0$ for $x<0.$
\begin{lemma}\label{lem:paydown}
For $x\in\mathbb{N}_0$ and $y\ge 0$ it follows that $f^*\big( x-f^*(x,y),y+f^*(x,y)\big)=0$.
\end{lemma}
\begin{proof}
By (\ref{optpg}) we obtain $J(x,y) \ge \beta^\gamma G\left(x,\frac{y}\beta\right),$ which implies that
$$
J(x,y) = \beta^\gamma G\left(x-f^*(x,y), \frac{y+f^*(x,y)}{\beta}\right)
\le J\left( x-f^*(x,y),y+f^*(x,y)\right)\le J(x,y),
$$
where the second inequality follows from Lemma \ref{theo:propJ}. Hence,
$$\beta^\gamma G\left(x-f^*(x,y), \frac{y+f^*(x,y)}{\beta}\right)
= J\left( x-f^*(x,y),y+f^*(x,y)\right),$$
which implies that $a=0$ maximises the expression
$\beta^{\gamma}G(x-f^*(x,y)-a,\frac{y+f^*(x,y)+a}\beta).$
We claim that $a=0$ is the only maximiser of this expression. Obviously, if $f^*(x,y)=x,$
then the result follows. If, on the other hand,
$f^*(x,y)<x,$ then
$$J(x,y)=\beta^{\gamma}G(x-f^*(x,y),\frac{y+f^*(x,y)}\beta)>
\beta^{\gamma}G(x-f^*(x,y)-a,\frac{y+f^*(x,y)+a}\beta)$$
for $a=1,\ldots,x-f^*(x,y).$ This fact, in turn, implies that $a=0$ is the only maximiser,
which concludes the proof.
\end{proof}
\begin{lemma}\label{lem:pzero}
Let $\xi(y) := \sup\{ x\in\mathbb{N}_0 : f^*(x,y) =0\}$. Then $\xi^*:=\sup_{y\ge 0} \xi(y) < \infty.$
\end{lemma}
\begin{proof} Fix $y\ge 0.$ Let $x\in \mathbb{N}_0$ be such that
$f^*(x,y)=0.$ Clearly, such $x\in\mathbb{N}_0$ exists. From (\ref{optg}) we have that
\begin{eqnarray*}
J(x,y) &=& \beta^\gamma\left( \sum_{k=-x}^\infty J\big(x+k, \frac{y}\beta\big) q_k +
\mathcal{B}ig(\frac{y}\beta\mathcal{B}ig)^\gamma \sum_{k=-\infty}^{-x-1} q_k \right)\\
&\le& \beta^\gamma\left( \sum_{k=-x}^\infty \big(x+k +\frac{\beta}{1-\beta} \mathbb{E}op Z^+ +
\frac{y}\beta\big)^\gamma q_k + \mathcal{B}ig(\frac{y}\beta\mathcal{B}ig)^\gamma \sum_{k=-\infty}^{-x-1} q_k\right) \quad
\mbox{(by Lemma \ref{lem:pbounds})}\\
&=& \sum_{k=-x}^\infty \big(\beta x+\beta k +\frac{\beta^2}{1-\beta} \mathbb{E}op Z^+ + y\big)^\gamma q_k +
{y}^\gamma \sum_{k=-\infty}^{-x-1} q_k \\
&\le& \left( \sum_{k=-x}^\infty \big(\beta x+\beta k +\frac{\beta^2}{1-\beta} \mathbb{E}op Z^+ + y\big) q_k +
{y} \sum_{k=-\infty}^{-x-1} q_k\right)^\gamma \quad\mbox{(by the Jensen inequality)}\\
&\le& \left( \beta x+\beta \mathbb{E}op Z^+ +\frac{\beta^2}{1-\beta} \mathbb{E}op Z^+ + y\right)^\gamma
= \left( \beta x+\frac{\beta}{1-\beta} \mathbb{E}op Z^+ +{y}\right)^\gamma.
\end{eqnarray*}
On the other hand, making use again of Lemma \ref{lem:pbounds} we have that
$J(x,y) \ge (x+y)^\gamma$ and, consequently,
$$ (x+y)^\gamma \le \left( \beta x+\frac{\beta}{1-\beta} \mathbb{E}op Z^+ +y\right)^\gamma$$ if and only if $x\le
\frac{\beta}{(1-\beta)^2} \mathbb{E}op Z^+,$ which is independent of $y$ and implies the result.
\end{proof}
The next result is a counterpart of Lemma \ref{lem:band1} and provides further properties of the function $f^*\in F.$
\begin{lemma}\label{lem:band2}
Let $x_0\in\mathbb{N}_0$ and $y_0\ge 1$. If $f^*(x_0,y_0)=a_0$ and $f^*(x_0+1,y_0-1)>0$, then
$f^*(x_0+1,y_0-1)=a_0+1.$
\end{lemma}
\begin{proof}
By definition of $f^*$ and (\ref{optp}) we have that
\begin{eqnarray*}
J(x_0,y_0) &=& \beta^{\gamma} G\left(x_0-a_0,\frac{y_0+a_0}\beta\right) \left\{ \begin{array}{l}
\ge \beta^{\gamma} G\left(x_0-a,\frac{y_0+a}\beta\right), \;\mbox{for}\; a=0,\ldots, a_0\\
> \beta^{\gamma} G\left(x_0-a,\frac{y_0+a}\beta\right), \;\mbox{for}\;
a=a_0+1,\ldots, x_0.
\end{array}\right.
\end{eqnarray*}
The above display implies that
\begin{eqnarray}
\label{shin}
\beta^{\gamma} G\left(x_0-a_0,\frac{y_0+a_0}\beta\right) \left\{ \begin{array}{l}
\ge \beta^{\gamma} G\left(x_0-(a-1),\frac{y_0+a-1}\beta\right), \;\mbox{for}\; a=1,\ldots, a_0+1\\
> \beta^{\gamma} G\left(x_0-(a-1),\frac{y_0+a-1}\beta\right), \;\mbox{for}\;
a=a_0+2,\ldots, x_0+1.
\end{array}\right.
\end{eqnarray}
On the other hand, we also obtain that
\begin{eqnarray*}
J(x_0+1,y_0-1) &=& \max_{a\in A(x_0+1)} \beta^{\gamma} G\left(x_0+1-a,\frac{y_0-1+a}\beta\right) \\
&= & \max\left\{ \beta^{\gamma} G\left(x_0+1,\frac{y_0-1}\beta\right),
\max_{a\in A(x_0)} \beta^{\gamma} G\left(x_0-a,\frac{y_0+a}\beta\right) \right\}.
\end{eqnarray*}
Since $f^*(x_0+1,y_0-1)>0,$ we infer that
$$ \beta^{\gamma} G\left(x_0+1,\frac{y_0-1}\beta\right) \le
\max_{a\in A(x_0)} \beta^{\gamma}G\left(x_0-a,\frac{y_0+a}\beta\right).$$
This fact and (\ref{shin}) yield that
$$ \beta^{\gamma} G\left(x_0+1-(a_0+1),\frac{y_0-1+(a_0+1)}\beta\right)
\ge \beta^{\gamma} G\left(x_0+1-a,\frac{y_0-1+a}\beta\right),
$$ for $a=0,\ldots, a_0+1$ and
$$\beta^{\gamma} G\left(x_0+1-(a_0+1),\frac{y_0-1+(a_0+1)}\beta\right) >
\beta^{\gamma} G\left(x_0+1-a,\frac{y_0-1+a}\beta\right),$$
for $a=a_0+2,\ldots, x_0+1.$
Thus, it follows that $f^*(x_0+1,y_0-1)=a_0+1$.
\end{proof}
\subsection{Optimal policy}
Let $y_0\ge 0$ be fixed and let $x_0\in X$ be the initial state. Consider the following
policy $\pi^* := (\pi^*_k)_{k\in\mathbb{N}_0}$ generated by $f^*$ in the following way
\begin{equation}
\label{s_pol}
\pi^*_k(\omega_k):=f^*(x_k,y_k),\quad \omega_k\in\Omega_k\quad
k\in\mathbb{N}_0,
\end{equation}
where
\begin{equation}
\label{s_pol_y}
y_k:=\frac{y_{k-1}+f^*(x_{k-1},y_{k-1})}\beta=\frac{y_0+\sum_{m=0}^{k-1}\beta^m f^*(x_m,y_m)}{\beta^k},
\quad k\in\mathbb{N}.
\end{equation}
Obviously, $\pi\in\widehat{\mathbb{P}i}.$
\begin{corollary}\label{ruin_p}
Under policy $\pi^*$ ruin occurs with probability 1.
\end{corollary}
\begin{proof}
We proceed along similar lines as in the proof of Corollary \ref{ruin_0}. Let $(x_0,y_0)\in\widehat{X}$ with $x_0\ge 0.$
Then, either $x_0\le\xi^*$ or $x_0>\xi^*.$ Observe that in both cases the risk reserve just after the dividend payment
is less or equal to $\xi^*.$ The first case is obvious. In the second case, we deduce from Lemma \ref{lem:paydown} that
$f^*(x_0-f^*(x_0,y_0),y_0+f^*(x_0,y_0))=0,$ which means by Lemma \ref{lem:pzero}
that $x_0-f^*(x_0,y_0)\le\xi(y_0+f^*(x_0,y_0))\le\xi^*.$
Hence, the ruin occurs, if there appears a sequence of length $\xi^*+1$ of negative incomes. The probability
of such event equals $\left(\sum_{m=-\infty}^{-1}q_m\right)^{\xi^*+1},$ see also (\ref{compr}). If the ruin has not
occurred up to the $l$th day with $l=\xi^*+1,$ then either $x_l\le\xi^*$ or $x_l>\xi^*.$ Now the remaining part follows
from the proof of Corollary \ref{ruin_0}.
\end{proof}
\begin{theorem}\label{optimalp}
For every $(x,y)\in\widehat{ X}$ it holds that $J(x,y) = J_{\pi^*}(x,y).$\end{theorem}
\begin{proof}
From Theorem \ref{theo:Jfixedpoint} and the definition of $\pi^*$ given in (\ref{s_pol}) we have that
$$ J(x,y)=\beta^\gamma\sum_{x_1\in X} J(x_1,y_1)q(x_1|x,f^*(x,y))$$
for $x\in\mathbb{N}_0$ and $y\ge 0$ with $y_1$ defined in (\ref{s_pol_y}) and $y_0:=y.$
Hence,
\begin{eqnarray}
\label{pomoc}
J(x,y)&=&\beta^\gamma\sum_{x_1\ge 0} J(x_1,y_1)q(x_1|x,f^*(x,y))
+\beta^\gamma\sum_{x_1<0} (y_1)^\gamma q(x_1|x,f^*(x,y))\\\nonumber
&=&\beta^\gamma\sum_{x_1\ge 0} J(x_1,y_1)q(x_1|x,f^*(x,y))+\sum_{x_1<0} (y+f^*(x,y))^\gamma
q(x_1|x,f^*(x,y)).
\end{eqnarray}
Iterating (\ref{pomoc}) $(n-1)$ times and making use of the policy $\pi^*$ we arrive at the following
equation
\begin{eqnarray}
\label{eq1}
\lefteqn{ J(x,y) =(\beta^\gamma)^n \mathbb{E}op_x^{\pi^*}\left(J(x_n,y_n)1[\tau>n]\right)+}\\ \nonumber
&&
\mathbb{E}op_x^{\pi^*}\left[ \left(y+\sum_{k=0}^{(n-1) \wedge (\tau-1)}\beta^k f^*(x_k,y_k)\right)^\gamma
1[\tau \le n]\right].
\end{eqnarray}
From Lemma \ref{lem:pbounds} and the concavity of $x\mapsto x^\gamma,$
we obtain the following bound for the first term in (\ref{eq1})
\begin{eqnarray}
\label{eq2} \nonumber
\lefteqn{
(\beta^\gamma)^n\mathbb{E}op_x^{\pi^*}\left(J(x_n,y_n)1[\tau>n]\right)\le(\beta^\gamma)^n
\mathbb{E}op_x^{\pi^*}\left(\left(x_n+y_n+\frac\beta{1-\beta}\mathbb{E}op Z^+\right)^\gamma1[\tau>n]\right)}\\
&\le& (\beta^\gamma)^n\mathbb{E}op_x^{\pi^*}\left(x_n^\gamma1[\tau>n]\right) +(\beta^\gamma)^n
\left(\frac\beta{1-\beta}\mathbb{E}op Z^+\right)^\gamma
+(\beta^\gamma)^n\mathbb{E}op_x^{\pi^*}\left((y_n)^\gamma 1[\tau>n]\right)
\end{eqnarray}
By (\ref{s_pol_y}) the third term in (\ref{eq2}) can be written as follows
\begin{equation}
\label{eq3}
(\beta^\gamma)^n\mathbb{E}op_x^{\pi^*}\left((y_n)^\gamma 1[\tau>n]\right)=\mathbb{E}op_x^{\pi^*}
\left[ \left(y+\sum_{k=0}^{n-1}\beta^k f^*(x_k,y_k)\right)^\gamma
1[\tau> n]\right].
\end{equation}
Next by (\ref{maineq}) we get for the first term in (\ref{eq2}) the following
\begin{eqnarray}
\label{eq4}
0&\le& (\beta^\gamma)^n\mathbb{E}op_x^{\pi^*}\left(x_n^\gamma1[\tau>n]\right)
\le\left(\beta^n\mathbb{E}op_x^{\pi^*}(x_n1[\tau>n])\right)^\gamma \\ \nonumber
&\le& \left(\beta^n\mathbb{E}op_x^{\pi^*}\left[\left(x-\sum_{k=0}^{n-1}f^*(x_k,y_k)
+\sum_{k=1}^{n}Z_k^+\right)1[\tau>n]\right]\right)^\gamma \\ \nonumber
&\le& \left(\beta^n x +\beta^n n\mathbb{E}op Z^+\right)^\gamma.
\end{eqnarray}
By our assumption (A1) and (\ref{eq4}) the first term in (\ref{eq2}) converges to $0$ as $n\to\infty.$
Observe that the same
remark also applies for the second term in (\ref{eq2}). Summing up, from (\ref{eq1}), (\ref{eq2}) and
(\ref{eq3}) we obtain that
$$J(x,y) \le \lim_{n\to\infty} \mathbb{E}op_x^{\pi^*}\left(y+\sum_{k=0}^{(n-1) \wedge (\tau-1)}\beta^k f^*(x_k,y_k)\right)^\gamma.$$
Now the monotone convergence theorem yields that
$J(x,y)\le J_{\pi^*}(x,y).$
\end{proof}
We close this section with a conclusion for our original model.
\begin{corollary}\label{optimal_policy_1}
Let $y_0:=0.$ Then, $\pi^*$ is optimal for the original
optimisation problem, i.e.,
$\widehat{J}(x) = \widehat{J}_{\pi^*}(x).$
\end{corollary}
\begin{remark}
Note that in the case of a power utility, the optimal policy is history-dependent,
but depends on the history only through the accumulated discounted dividends given by $(y_k)$ in \eqref{s_pol_y}.
\end{remark}
\begin{remark}
It is well-known that the logarithmic utility function $U(x)=\log(x)$ can be obtained as a limit from the power utility since
$$ \lim_{\gamma\to 0} \frac1\gamma \big(x^\gamma-1\big)=\log(x).$$
Indeed, the problem can then be treated for the logarithmic utility in a similar way. The optimality equation is given by
$$ J(x,y) = \log(\beta) + \max_{a\in A(x)}
\left[ \sum_{k=-\infty}^\infty J\left(x-a+k, \frac{a+y}\beta\right) q_k \right]$$
and we can follow the same line of analysis. It is worth mentioning that the power and logarithmic utility functions
are examples of the so-called HARA ({\it hyperbolic absolute risk aversion}) utilities, whereas the exponential utility function belongs to
the CARA ({\it constant absolute risk aversion}) class of utilities. The reader is referred, for instance, to \cite{br,br_mor, turkey,fs}
and references cited therein, for
further properties of the aforementioned functions.
\end{remark}
\section{Howard's Policy Improvement}
In this section we provide one numerical tool to solve these problems which is known under the name Howard policy improvement. We restrict the presentation here to the exponential utility function: Start with an arbitrary policy of the form $\pi = (f(\cdot,\gamma), f(\cdot,\gamma\beta),\ldots)$ induced by a decision rule $f$ where we assume that $f$ is such that $f(x,\theta) \ge x-s^*$ for all $x>s^*$ and all $\theta$. Note that we define $s^* := \sup_{\theta \in [\gamma,0)}s(\theta)$ (see Theorem \ref{theo:xi_finite}). Take e.g. $f(x,\theta)=x^+$. We write $J_f:= J_\pi$. Next we compute the largest minimizer $h(x,\theta)$ of the expression
$$ a \mapsto e^{\theta a} G_f(x-a,\theta \beta),\; a\in\{0,1,\ldots ,x\}$$
where $$G_f(x,\theta) := \sum_{k=-x}^\infty J_f(x+k,\theta) q_k + \sum_{k=-\infty}^{-x-1} q_k.$$
We claim now that
\begin{lemma}
The new decision rule $h$ and the corresponding value function $J_h$ have the following properties:
\begin{itemize}
\item[a)] $h\big(x-h(x,\theta),\theta\big)=0$ for all $x$ and $\theta$.
\item[b)] $h(x,\theta) \ge x-s^*$ for all $x>s^*$ and all $\theta$.
\item[c)] $e^{\theta x} \underline{h}(\theta)\le J_h \le J_f\le e^{\theta x} \overline{h}(\theta).$
\end{itemize}
\end{lemma}
\begin{proof}
\begin{itemize}
\item[a)] If $h(x,\theta)=0$ or $h(x,\theta)=x$ the statement is true. Now let $0<h(x,\theta)<x$ and suppose that $h\big(x-h(x,\theta),\theta\big)>0$, i.e. there is an $a^*>0$ s.t.
$$ e^{\theta a^*} G_f\big(x-h(x,\theta)-a^*,\theta \beta\big) \le G_f\big(x-h(x,\theta),\theta \beta\big).$$
On the other hand by the definition of $h$ we have for all $a>h(x,\theta)$:
$$ e^{\theta h(x,\theta)} G_f\big(x-h(x,\theta),\theta \beta\big) < e^{\theta a} G_f\big(x-a,\theta \beta\big).$$
Combining these inequalities leads to (note that $x-h(x,\theta)-a^*\ge 0$)
\begin{eqnarray*}
e^{\theta h(x,\theta)} G_f\big(x-h(x,\theta),\theta \beta\big) & <& e^{\theta (h(x,\theta)+a^*)} G_f\big(x-h(x,\theta)-a^*,\theta \beta\big) \\
&\le & e^{\theta h(x,\theta)} G_f\big(x-h(x,\theta),\theta \beta\big)
\end{eqnarray*}
which is a contradiction. Thus, the statement is shown.
\item[b)] We show first for $x>s^*$ and arbitrary $\theta$ that $h(x,\theta)>0$. In order to do this, consider the expression
$ e^{\theta a} G_f(x-a,\theta \beta)$ for $a=0$ and $a=f(x,\theta)$. By definition we obtain for $a=f(x,\theta)$ that
$$ e^{\theta f(x,\theta)} G_f\big(x- f(x,\theta),\theta \beta\big) =J_f(x,\theta) \le e^{\theta x} \bar{h}(\theta).$$
For $a=0$ we obtain that
\begin{eqnarray*}
G_f(x,\theta \beta) &=& \sum_{k=-x}^\infty J_f(x+k,\theta\beta) q_k + \sum_{k=-\infty}^{-x-1} q_k \\
&\ge & e^{\theta \beta x} \underline{h}(\theta\beta) \mathcal{B}ig( \sum_{k=-x}^\infty e^{\theta\beta k} q_k + \sum_{k=-\infty}^{-x-1} q_k\mathcal{B}ig)\\
&=& e^{\theta \beta x} \underline{h}(\theta\beta) \mathbb{E}op e^{\theta \beta Z^+} = e^{\theta \beta x} \underline{h}(\theta).
\end{eqnarray*}
Furthermore, observe that
$$ e^{\theta \beta x} \underline{h}(\theta) \ge e^{\theta x} \bar{h}(\theta) \quad \Leftrightarrow \quad x>s(\theta).$$
Thus, the inequality holds, in particular, if $x \ge s^* = \sup_{\theta \in [\gamma,0)}s(\theta)$. This implies that $0$ cannot be a minimiser,
so $h(x,\theta)>0$ for all $x>s^*$ and all $\theta$.
This fact and point (a) imply the conclusion.
\item[c)] From the definition of $h$ we obtain:
$$ J_f(x,\theta) = e^{\theta f(x,\theta)} G_f\big(x- f(x,\theta),\theta \beta\big) \ge e^{\theta h(x,\theta)} G_f\big(x- h(x,\theta),\theta \beta\big).$$ Iterating this inequality yields
$$ J_f(x,\theta) \ge \mathbb{E}op_x^h \mathcal{B}ig[ \exp\mathcal{B}ig(\gamma \sum_{k=0}^{(\tau-1)\wedge (n-1)} \beta^k a_k\mathcal{B}ig) (J_f(x_n,\gamma \beta^n)
1[\tau \ge n] +1[\tau < n])\mathcal{B}ig].$$ The property of $h$ shown in b) now implies that ruin occurs with probability 1 under $h$ and thus as in the proof of Theorem \ref{theo:optimal} we obtain with $n\to\infty$ that $J_f \ge J_h$.
\end{itemize}
\end{proof}
From the proof it follows that in case $f\neq h$, the inequality $J_h(x,\theta) \le J_f(x,\theta)$ is strict for at least one $(x,\theta)$.
Now suppose no improvement is possible, i.e. $h=f$. Hence $J_f$ is another solution of \eqref{eq:fixedpoint}. By
Remark 3.2 $J_f \le J$. On the other hand by the definition of $J$ we have $J_f \ge J$ which implies $J=J_f$.
Finally if the iteration does not stop we obtain a non-increasing sequence $J_{f_0} \ge J_{f_1}\ge \ldots \ge J$.
Denote $\underline{J} := \lim_{n\to\infty} J_{f_n}$. Obviously $\underline{J} \ge J$.
Next from the definition of an improvement:
$$ J_{f_{k+1}}(x,\theta) \le \min_{a \in A(x)}\mathcal{B}ig[ e^{\theta a} G_{f_k}(x-a,\theta \beta)\mathcal{B}ig] \le J_{f_k}(x,\theta).$$
Letting $k\to\infty$ we obtain (note that $\lim$ and $\min$ can be interchanged since $A(x)$ is always finite):
$$\underline{J}(x,\theta) \le \min_{a\in A(x)}\left[e^{\theta a}
\left(\sum_{k=a-x}^\infty \underline{J}(x-a+k,\theta\beta)q_k+ \sum_{k=-\infty}^{a-x-1} q_k\right)\right]
\le \underline{J}(x,\theta)$$ hence $\underline{J}$ is another solution of \eqref{eq:fixedpoint} which implies that $\underline{J}=J$.
\section {Concluding remarks}
In this paper, we study the discrete time problem, suggested by Gerber and Shiu \cite{gershiu}, of maximising
the {\it expected utility} of discounted dividends until ruin. We restrict our attention to the integer-valued surplus process
and to integer payments. To the best of our knowledge, the only paper that examines a similar issue (with the exponential utility)
is \cite{schach}, where the wealth of insurance company is driven by a Brownian motion with drift. However, the authors
have not been able to solve the problem rigorously. Namely, assuming that a certain integral equation for the barrier function $b(t)$
has a desirable solution
(see Standing Assumption in \cite{schach}), they prove that $b(t)$ is indeed the barrier they search for
(a barrier function is a band function with $n=0$
in Definition 3.1).
Moreover, the numerical experiments provided in Section 1.4 in \cite{schach} are given without their convergence proofs.
This fact and the lack of a complete solution in continuous time and any solution in discrete time to Gerber and Shiu's suggestion
since 2004 indicate that the problem is not straightforward from the mathematical point of view.
Firstly, similar as in \cite{br_mor,schach} we note that the optimal strategy is time dependent in a certain way. In order to get rid of
non-stationarity we extend the state space to the two-dimensional space. Within such a new framework our problem becomes stationary.
Secondly, since our dividend payments can be unbounded we cannot directly apply the results from \cite{br_mor, ds1} to deduce that the
value function satisfies the corresponding Bellman equation for the exponential and power utility functions. Nonetheless,
we are able to show that in both cases the value iteration algorithm works (see Theorem 3.1 and Theorem 4.1) and in the exponential
function case the Howard's policy improvement algorithm works (see Section 5). These facts, in turn,
may have a significant meaning, when one thinks of
numerical examples. Moreover, we are also able to describe the structure of optimal strategies for both utility cases
and to prove for the exponential function case that the optimal policy is a band-policy.
Numerical experiments are difficult. Let us first recall that
the maximisation of the expectation of discounted dividends in the
model given by (\ref{maineq}) with $\operatorname{\mathbb{\mathbb{P}P}}(Z_1=1)=p=1-\operatorname{\mathbb{\mathbb{P}P}}(Z_1=-N),$ where $p\in (0,1)$ and $N\in \mathbb{N},$
was a challenging analytical problem. The reader may find the complete solution, for instance, in \cite{morill},
where it was shown that the optimal
policy is of barrier type.
Our problem, as already mentioned, is non-stationary and non-separable.
Therefore, the methods that solved analytically the risk neutral problem are useless here. Moreover, as was noted by
Gerber and Shiu \cite{gershiu},
in contrast to the risk neutral problem one can expect that in the model with exponential function the optimal policy is not of barrier type.
This fact does not make easier potential calculations.
Obviously, since obtaining an analytical solution is a challenge, one can think of numerical methods used in dynamic programming such as
value iteration, policy improvement or others, see \cite{powell}.
However, our surplus process proceeds on the space $X=\mathbb{Z}$ and even the simple aforementioned case ($\operatorname{\mathbb{\mathbb{P}P}}(Z_1=1)=p=1-\operatorname{\mathbb{\mathbb{P}P}}(Z_1=-1)$)
requires some truncation of the state space to a finite one in order to obtain numerical results.
In addition, in this model we meet one more obstacle that
have not been treated so far, namely the exponential and power utility functions that imply non-stationarity and non-separability.
Therefore, the problem of calculating numerically optimal strategies and value functions
for models with these or other utility functions is left open.
\small
\end{document}
|
\betagin{document}
\noindent{\small Published version:}\\
\noindent{\emph{Probability and Mathematical Statistics (Wroclav), }}\\
\noindent{\emph{ Vol. 37, Fasc. 1 (2017), pp.~101-118 (open access)}}\\
\noindent{\emph{ doi: 10.19195/0208-4147.37.1.4 }}\\
\betagin{center}
{\sf \LARGE Cram\'{e}r Type Large Deviations for Trimmed L-statistics}\footnote{Research partially
supported by the Russian Foundation for Basic Research (grant RFBR no. SS-2504.2014.1).}
\vspace*{7mm}
{\large Nadezhda Gribkova}\footnote{E-mail: [email protected]; [email protected]}
\textit{Faculty of Mathematics and Mechanics, St.\,Petersburg State University,\\ St.\,Petersburg 199034, Russia}
\end{center}
\betagin{quote}
\noindent{\bf Abstract.} {\small
In this paper, we propose a~new approach to the investigation of asymptotic properties of trimmed
L-statistics and we apply it to the Cram\'{e}r type large deviation problem.
Our results can be compared with ones in Callaert et~al.~(1982) -- the first and, as far as we know,~the single
article, where some results on probabilities of large deviations
for the trimmed $L$-statistics were obtained, but under some strict and unnatural conditions.
Our approach is to approximate the trimmed $L$-statistic by a~non-trimmed $L$-statistic (with smooth weight function) based on Winsorized random variables. Using this method, we establish the Cram\'{e}r type large deviation results for the trimmed $L$-statistics under quite mild and natural conditions.}
\noindent{\bf Keywords:} trimmed $L$-statistics, central limit theorem, large deviations, moderate deviations.
\noindent{\bf MSC:} Primary: 62G30, 62E20; Secondary: 60F05, 60F10.
\end{quote}
\section{Introduction and main results}
\label{imtro}
Consider a~sequence $X_1,X_2,\dots $ of independent identically
distributed real-valued random variables with distribution function $F$, and let
$X_{1:n}\le \dots \le X_{n:n}$ denote the
order statistics corresponding to the first $n$ observations.
Define the trimmed L-statistic by
\betagin{equation}
\label{tn}
L_n=n^{-1}\sum_{i=k_n+1}^{n-m_n}c_{i,n}X_{i:n},
\end{equation}
where $c_{i,n}\in \mathbb{R}$, \ $k_n$, $m_n$ are two sequences of integers such that $0\le k_n<n-m_n\le n$. Put
$\alphaha_n=k_n/n$, \ $\betata_n=m_n/n$. Throughout this paper, we suppose that
$\alphaha_n \to \alphaha$, $\betata_n\to \betata$, as $n\to\infty$, where $0<\alphaha<1-\betata<1$, i.e. we focus on the case of heavy trimmed $L$-statistic.
In this paper we investigate Cram\'{e}r type large deviations, i.e. relative errors in the central limit theorem
for $L_{n}$. First we note that in the case of non-trimmed $L$-statistic ($k_n=m_n=0$) with the coefficients $c_{i,n}$ generated by a smooth weight function the
Cram\'{e}r type large and moderate deviations were studied in a~number of papers (see Vandemaele and Veraverbeke~\cite{vv82}, Bentkus and Zitikis~\cite{bz90}, Aleskeviciene~\cite{al91}). In contrast, to the best of our knowledge, there exists a~sole paper -- Callaert et al.~\cite{cvv82} -- devoted to the large deviations for the trimmed $L$-statistics.
However, the result in~\cite{cvv82} was obtained under some rigorous and unnatural conditions
imposed on the underlying distribution $F$ and the weights. The method of proof in Callaert et al.~\cite{cvv82} is based on the following two well-known facts:
1. The joint distribution
of $X_{i:n}$ coincides with the joint distribution of $F^{-1}(G(Z_{i:n}))$, $i=1,\dots,n$, where $G$ is the distribution function of the standard exponential distribution, $Z_{i:n}$ are the order statistics corresponding to a~sample of $n$ independent random variable from the distribution $G$.
2. The order statistics $Z_{i:n}$ are distributed as $\sum_{k=1}^iZ_k/(n-k+1)$, where $Z_k$ -- independent standard exponential random variables.
These two facts and the Taylor expansion together enable one to get an~approximation of $L_{n}$ by a~sum of weighed i.i.d. random variables for which some suitable known result on Cram\'{e}r type large deviations can be applied. This approach was first implemented by Bjerve~\cite{bj77} to prove a~Berry-Esseen type result for the $L$-statistics. However, use of this method requires excessive smoothness conditions imposed on $F$ and leads to the unnatural and complicated normalization of the $L$-statistic (cf.~Callaert et al.~\cite{cvv82}).
In this article, we propose another approach to the investigation of asymptotic properties of the trimmed $L$-statistics
different from that used in Bjerve~\cite{bj77} and Callaert et al.~\cite{cvv82}.
Our idea is to approximate the trimmed $L$-statistic by a~non-trimmed $L$-statistic with weights generated by a~smooth weight function, where the approximating $L$-statistic is based on the order statistics corresponding to a~sample of $n$ i.i.d. Winsorized random variables. The asymptotic properties that we are interested in are often
well studied in the case of $L$-statistics with a~smooth weight function and bounded observations, this allows us to obtain a~desired result for the trimmed $L$-statistic by applying a~result of the corresponding type to the approximating non-trimmed $L$-statistic; so it remains only to evaluate the remainder in the approximation.
Here, we apply our method to obtain a~result on probabilities of large
deviations for the trimmed $L$ -statistics, and we establish it under mild and natural conditions. This our result on large deviations can be viewed as a~strengthening of the result from Callaert et al.~\cite{cvv82}.
To conclude this introduction, we adduce a~brief review of the relevant literature.
The class of $L$-statistics is one of the most commonly used classes in statistical inferences. We refer to
monographs by David and Nagaraja~\cite{david_2003}, Serfling~\cite{serfling_90}, Shorack and Wellner~\cite{shorack}, van der Vaart~\cite{vanderv} for an~introduction to the theory and applications of $L$-statistics.
There is a~vast literature on asymptotic properties of $L$-statistics.
Since we focus on the case of heavy trimmed $L$-statistics, we will mention mainly sources appropriate to our case. The most significant contribution to the establishment of the central limit theorem for (trimmed) $L$-statistics was made by Shorack ~\cite{shor69}-\cite{shor72} and Stigler~\cite{s69}-\cite{s74}. Mason and Shorack~\cite{mas_shor_90} obtained the necessary and sufficient conditions for the asymptotic normality of the trimmed $L$-statistics. The Berry -- Esseen type bounds under different sets of conditions were obtained by Bjerve~\cite{bj77}, Helmers~\cite{helm81}-\cite{helm_e2}, Gribkova \cite{gri}. A~great contribution to the research of second order asymptotic properties for $L$-statistic was done by Helmers~\cite{helm80}-\cite{helm_e2}, who established the Edgeworth expansions for the (trimmed) L-statistics.
In papers by Bentkus et al.~\cite{bgz}, Friedfich~\cite{friedrich}, Putter and van Zwet~\cite{pz} and van Zwet~\cite{zwet}, the Berry--Esseen type bounds and Edgeworth expansions for $L$-statistics were derived as the consequences of the very general results for symmetric statistics established in these papers.
Some interesting results on Chernoff's type large deviations
(for non-trimmed $L$-statistics with smooth weight function) were obtained by Boistard~\cite{boi}. Recently, Gao and Zhao~\cite{gao_zhao} proposed a~general delta method in the theory of Chernoff's type large deviations and illustrated it by many examples including M-estimators and L-statistics.
A~survey on the $L$-statistics and some modern applications of them in the economy and theory of actuarial risks can be found in Greselin~et~al.~\cite{zit_2009}.
We will now proceed to the statement of our results. Define the left-continuous inverse of $F$:
$F^{-1}(u)= \inf \{ x: F(x) \ge u \}$, \ $0<u\le 1$, \
$F^{-1}(0)=F^{-1}(0^+)$, and let $F_n$, $F_n^{-1}$ denote the
empirical distribution function and its inverse respectively.
Let $J$ be a~function defined in an~open set $I$ such that $[\alphaha,1-\betata]\subset I\subseteq(0,1)$.
We will also consider the trimmed $L$-statistics with coefficients generated by the weight function $J$
\betagin{equation}
\label{tn0}
L_{n}^0=n^{-1}\sum_{i=k_n+1}^{n-m_n}c_{i,n}^0X_{i:n}=\int_{\alpha_n}^{1-\betata_n}J(u)F_n^{-1}(u)\,du,
\end{equation}
where $c_{i,n}^0=n\int_{(i-1)/n}^{i/n}J(u)\,du$.
To state our results, we will need the following set of assumptions.
\noindent{\bf (i)} {\it $J$ is Lipschitz in $I$, i.e. there exists a~constant $C\ge 0$ such that}
\betagin{equation}
\label{LipJ}
|J(u)-J(v)|\le C|u-v|,\quad \forall \ \ u,\,v\in I.
\end{equation}
\noindent{\bf (ii)} {\it $F^{-1}$ satisfies
a~H\"{o}lder condition of order $0<\varepsilon\le 1$ in some neighborhoods $U_{\alphaha}$ and $U_{1-\betata}$
of $\alphaha$ and $1-\betata$.}
\noindent{\bf (iii)} {\it $\max(|\alphaha_n-\alphaha|,\,|\betata_n-\betata|)=O\bigl( n^{-\frac 1{2+\varepsilon}}\bigr)$, where $\varepsilon$ is the
H\"{o}lder index from condition}~{\bf (ii)}.
\noindent{\bf (iv)} {\it with $\varepsilon$ from conditions} {\bf (ii)}-{\bf (iii)}
$$
\sum_{i=k_n+1}^{n-m_n}|c_{i,n}-c_{i,n}^0|=O(n^{\frac 1{2+\varepsilon}}).
$$
Define a~sequence of centering constants
\betagin{equation}
\label{mun}
\mu_n=\int_{\alpha_n}^{1-\betata_n}J(u)F^{-1}(u)\,du.
\end{equation}
Since $\alpha_n\to\alpha$, $\betata_n\to\beta$ as $n \to \infty$, both variables $L_{n}^0$ and $\mu_n$ are well defined for all sufficiently large $n$.
It is well known (cf.,~e.g.,~\cite{mas_shor_90},~\cite{s74},~\cite{vanderv}) that when the inverse $F^{-1}$ is continuous at two points $\alpha$ and $1-\beta$, smoothness condition~\eqref{LipJ} implies the weak convergence to the normal law: $\sqrt{n}(L_{n}^0-\mu_n)\Rightarrow N(0,\sigma^2)$, where
\betagin{equation}
\label{sigma}
\sigma^2=\sigma^2(J,F)=\int_{\alpha}^{1-\beta}\int_{\alpha}^{1-\beta} J(u)J(v)(u\wedge v-uv)\, dF^{-1}(u)\,dF^{-1}(v),
\end{equation}
and $u\wedge v=\min(u,v)$. Here and in the sequel, we use the convention that $\int_a^b=\int_{[a,b)}$ when integrating with respect to the left
continuous integrator $F^{-1}$. All along the article, we assume $\sigma>0$.
Define the distribution functions of the normalized $L_{n}$ and $L_{n}^0$ respectively
\betagin{equation}
\label{dfs}
F_{L_{n}}(x) =\textbf{P}\{\sqrt{n}(L_{n}-\mu_n)/\sigma\le x\},\quad F_{L_{n}^0}(x) =\textbf{P}\{\sqrt{n}(L_{n}^0-\mu_n)/\sigma\le x\}.
\end{equation}
Let $\Phi$ denote the standard normal distribution function. Here is our first result on Cram\'{e}r type large deviations for $L_{n}$.
\betagin{theorem}
\label{thm1}
Suppose that $F^{-1}$ satisfies condition {\bf (ii)} for some $0<\varepsilon\le 1$ and the sequences $\alpha_n$ and $\betata_n$ satisfy {\bf (iii)}. In addition, assume that the weights $c_{i,n}$ satisfy {\bf (iv)} for some function $J$ satisfying condition {\bf (i)}.
Then for every sequence $a_n\to 0$ and each $A>0$
\betagin{equation}
\label{thm_1}
\betagin{split}
1- F_{L_{n}}(x) &= [1-\Phi(x)](1+o(1)),\\
F_{L_{n}}(-x)&=\Phi(-x)(1+o(1)),
\end{split}
\end{equation}
as \,$n \to \infty$, uniformly in the range $-A\le x\le a_n n^{\varepsilon/(2(2+\varepsilon))}$.
\end{theorem}
The proof of our main results is relegated to Section~3. Theorem~\ref{thm1} directly implies the following two corollaries.
\betagin{corollary}
\label{cor1}
Suppose that the conditions of Theorem~\ref{thm1} are satisfied with $\varepsilon=1$, i.e. $F^{-1}$ is Lipschitz in
some neighborhoods $U_{\alphaha}$ and $U_{1-\betata}$
of $\alphaha$ and $1-\betata$. Then for every sequence $a_n\to 0$ and each $A>0$ relations~\eqref{thm_1} hold true, uniformly in the range $-A\le x\le a_n n^{1/6}$.
\end{corollary}
\betagin{corollary}
\label{cor2}
Let $c_{i,n}=c_{i,n}^0=n\int_{(i-1)/n}^{i/n}J(u)\,du$ \,$(k_n+1 \le i\le n-m_n)$, where $J$ is a~function satisfying
{\bf (i)}. Furthermore, assume that conditions {\bf (ii)} and {\bf (iii)} hold for some $0<\varepsilon\le 1$.
Then relations~\eqref{thm_1} with $L_{n}=L_{n}^0$ hold true for every sequence $a_n\to 0$ and each $A>0$, uniformly in the range $-A\le x\le a_n n^{\varepsilon/(2(2+\varepsilon))}$.
\end{corollary}
Theorem~\ref{thm1} can be compared with the~result by~Callaert et al.~\cite{cvv82}, where it was assumed that the derivative $H'=(F^{-1}\circ G)'$ exists and satisfies a~H\"{o}lder condition of order $0<\varepsilon\le 1$ in some
open set, containing $[G^{-1}(\alphaha),G^{-1}(1-\betata)]$, where $G$ is the standard exponential distribution function. Moreover, some unnatural condition was imposed on the weights and $H'$
(cf., conditions~(A2) and~(B), Callaert et al.~\cite{cvv82}). In contrast, we use the natural scale parameter $\sigmagma$ -- root of the asymptotic variance
of $L_{n}$ -- for the normalization, and our smoothness condition {\bf (ii)} for $F^{-1}$ is much weaker than one from Callaert et al.~\cite{cvv82}.
Our Theorem~\ref{thm1} is also related with previous results by Vandemaele and Veraverbeke~\cite{vv82} and Bentkus and Zitikis~\cite{bz90} on Cram\'{e}r type large deviations for non-trimmed $L$-statistics with smooth weight function. The method of proof in the first of these articles was based on Helmers's~\cite{helm81}-\cite{helm_e2} $U$-statistic approximation, and in the second one the $\omega^2$-von Mises statistic type approximation was applied.
We approximate our trimmed $L$-statistic by $L$-statistics with smooth weight function.
Moreover, we apply the results from the papers mentioned to our approximating non-trimmed $L$-statistic when proving Theorem~\ref{thm1}. Note also that Cram\'{e}r 's moment conditions
for the underlying distribution assumed in the cited papers are not needed in the case of the trimmed $L$-statistics, whereas the smoothness of $F^{-1}$ near $\alpha$ and $1-\beta$ becomes essential for the Cram\'{e}r type large deviations results.
Finally, we state a~version of Theorem~\ref{thm1}, where the scale factor $\sigmagma/n^{1/2}$ is replaced by $\sqrt{{\text{\em Var}}(L_{n})}$, it is parallel to Theorem~2~(ii) by Vandemaele and Veraverbeke~\cite{vv82}, but now for the trimmed $L$-statistics.
We will need the following two somewhat stronger versions of conditions {\bf (iii)} and {\bf (iv)}.
\noindent{\bf (iii')} \ {\it $\max(|\alphaha_n-\alphaha|,\,|\betata_n-\betata|)=O\Bigl( n^{-\frac{1}{2+\varepsilon}\left[ 1 + \frac{\varepsilon(1-\varepsilon)}{2}\right]}(\log n)^{-\frac{\varepsilon}{2}}\Bigr)$, where $\varepsilon$ is the
H\"{o}lder index from condition}~{\bf (ii)}.
\noindent{\bf (iv')} {\it with $\varepsilon$ from conditions} {\bf (ii)}-{\bf (iii')}
$$
\sum_{i=k_n+1}^{n-m_n}|c_{i,n}-c_{i,n}^0|=O\Bigl( n^{\frac 1{2+\varepsilon}\left[1-\frac{\varepsilon}{2}\right]}\Bigr).
$$
\betagin{theorem}
\label{thm2}
Suppose that the conditions of Theorem~\ref{thm1} are satisfied, where {\bf (iii)} and {\bf (iv)} are replaced by {\bf (iii')} and {\bf (iv')} respectively. \ \ In addition, assume that \
${\text{\em Var}}(L_{n})<\infty$ for all sufficiently large $n$. Then
\betagin{equation}
\label{thm_2}
n \sigmagma^{-2}{\text{\em Var}}(L_{n})=1 + O\bigl(n^{-\frac{\varepsilon}{2+\varepsilon}}\bigr).
\end{equation}
Furthermore, relations~\eqref{thm_1}, where $\sigmagma/n^{1/2}$ is replaced by $\sqrt{{\text{\em Var}}(L_{n})}$, hold true for every sequence $a_n\to 0$ and each $A>0$
as \,$n \to \infty$, uniformly in the range $-A\le x\le a_n n^{\varepsilon/(2(2+\varepsilon))}$.
\end{theorem}
Note that in the case of heavy trimmed $L$-statistics the condition $\textbf{E}|X_1|^{\gamma}<\infty$ (for some $\gamma>0$) is sufficient for the finiteness of ${\text{\em Var}}(L_{n})$ when $n$ gets large.
\section{Our method (representation for $L_{n}^0$ by a~non-trimmed L-statistic)}
\label{lemmas}
Let $\xi_{\nu}=F^{-1}(\nu)$, $0<\nu<1$, be the $\nu$-th quantile of $F$ and $W_i$ denote $X_i$ Winsorized outside of $(\xi_{\alphaha},\xi_{1-\betata}]$. In other words
\betagin{equation}
\label{2_2}
W_i=\left\{
\betagin{array}{ll}
\xi_{\alphaha},& X_i\le \xi_{\alphaha}, \\
X_i,& \xi_{\alphaha} < X_i \le \xi_{1-\betata},\\
\xi_{1-\betata},& \xi_{1-\betata} < X_i .
\end{array}
\right.
\end{equation}
Let $W_in$ denote the order statistics, corresponding to $W_1,\dots,W_n$ (the sample of $n$ i.i.d. auxiliary random variables).
Define the distribution function $G(x)=\textbf{P}\{W_i\le x\}$ of $W_i$, the corresponding quantile function is equal to
$G^{-1}(u)= \xi_{\alphaha} \vee (F^{-1}(u) \wedge \xi_{1-\betata})$. Here and further on $(a\vee b)=\max(a,b)$. Let $G_n$ and $G_n^{-1}$ denote the corresponding empirical distribution function and its inversion respectively.
We will approximate $L_{n}$ by a~linear combination of the order statistics $W_in$ with coefficients, generated by the weight function
\betagin{equation}
\label{2_3}
J_w(u)=\left\{
\betagin{array}{ll}
J(\alpha),& u\le \alpha, \\
J(u),& \alpha < u \le 1-\beta,\\
J(1-\beta),& 1-\beta < u,
\end{array}
\right.
\end{equation}
which is defined in $[0,1]$. It is obvious that when $J$ is Lipschitz in $I$, i.e. satisfies condition \eqref{LipJ} with some positive constant~$C$, the function $J_w$ is Lipschitz in $[0,1]$ with some constant $C_w\le C$.
Consider the auxiliary non-truncated $L$-statistic given by
\betagin{equation}
\label{Ln}
W_idetilde{L}_n=n^{-1}\sum_{i=1}^{n}W_idetilde{c}_{i,n}W_in=\int_0^1 J_w(u)G_n^{-1}(u)\,du,
\end{equation}
where $W_idetilde{c}_{i,n}=n\int_{(i-1)/n}^{i/n}J_w(u)\,du$. Define the centering constants
\betagin{equation}
\label{muL}
\mu_{W_idetilde{L}_n}=\int_0^1 J_w(u)G^{-1}(u)\,du.
\end{equation}
Since $W_i$ has the finite moments of any order and because $J_w$ is Lipschitz, the distribution of the normalized $W_idetilde{L}_n$ tends to the standard normal law (see, e.g.,~\cite{s74})
\betagin{equation*}
\label{Ln_to}
\sqrt{n}(W_idetilde{L}_n-\mu_{W_idetilde{L}_n})/\sigmagma(J_w,G) \Rightarrow N(0,1),
\end{equation*}
where the asymptotic variance
\betagin{equation}
\label{sigma}
\sigmagma^2(J_w,G)=\int_0^1\int_0^1 J_w(u)J_w(v)(u\wedge v-uv)\, dG^{-1}(u)\,dG^{-1}(v).
\end{equation}
Observe that for $u\in(\alpha,1-\beta]$ we have $J_w(u)=J(u)$, $G^{-1}(u)=F^{-1}(u)$, and that $dG^{-1}(u)\equiv 0$ for $u\notin(\alpha,1-\beta]$. This yields the equality of the asymptotic variances
\betagin{equation}
\label{sigma_eq}
\sigmagma^2(J_w,G)=\sigmagma^2(J,F)=\sigmagma^2
\end{equation}
of the truncated $L$-statistic $L_{n}^0$ and the non-truncated $L$-statistic $W_idetilde{L}_n$ based on the Winsorized random variables.
Define the binomial random variable $N_{\nu}= \sharp \{i : X_i \le
\xi_{\nu} \}$, where $0<\nu <1$. Our representation for $L_{n}^0$ is based on the following simple observation: we see that
\betagin{equation}
\label{observ}
W_in=\left\{
\betagin{array}{ll}
\xi_{\alphaha},& i\le N_{\alphaha}, \\
X_{i:n},& N_{\alphaha} < i \le N_{1-\betata},\\
\xi_{1-\betata},& i> N_{1-\betata}.
\end{array}
\right.
\end{equation}
Put $A_n=N_{\alphaha}/n$, \ $B_n=(n-N_{1-\betata})/n$.
The following lemma provides us a~useful representation which is crucial in the proof of our main results.
\betagin{lemma}
\label{lem_2.1}
\betagin{equation}
\label{lem_2.1_1}
L_{n}^0-\mu_n=W_idetilde{L}_n-\mu_{W_idetilde{L}_n}+R_n,
\end{equation}
where $R_n=R_n^{(1)}+R_n^{(2)}$,
\betagin{equation}
\label{lem_2.1_2}
R_n^{(1)}=\int_{\alpha}^{A_n} J_w(u)[F_n^{-1}(u)-\xi_{\alphaha}]\,du-\int_{1-\beta}^{1-B_n} J_w(u)[F_n^{-1}(u)-\xi_{1-\betata}]\,du
\end{equation}
and
\betagin{equation}
\label{lem_2.1_3}
\ \ \ R_n^{(2)}=\int_{\alphaha_n}^{\alpha} J(u)[F_n^{-1}(u)-F^{-1}(u)]\,du-\int_{1-\betata_n}^{1-\beta} J(u)[F_n^{-1}(u)-F^{-1}(u)]\,du.
\end{equation}
\end{lemma}
\noindent{\bf Proof.} \
First, consider the difference between the centering constants. We obtain
\betagin{equation}
\label{ptf_lem_1}
\betagin{split}
\mu_{W_idetilde{L}_n}-\mu_n=&\int_0^1 J_w(u)G^{-1}(u)\,du - \int_{\alpha_n}^{1-\betata_n}J(u)F^{-1}(u)\,du=\alpha J(\alpha)\xi_{\alphaha} \\
&+\beta J(1-\beta)\xi_{1-\betata} -\int_{\alphaha_n}^{\alpha} J(u)F^{-1}(u)\,du + \int_{1-\betata_n}^{1-\beta} J(u)F^{-1}(u)\,du .
\end{split}
\end{equation}
For the difference between $L_{n}^0$ and $W_idetilde{L}_n$ after some simple computations we get
\betagin{equation}
\label{ptf_lem_2}
\betagin{split}
L_{n}^0-W_idetilde{L}_n=&\int_{\alpha}^{1-\beta}J(u)[F_n^{-1}(u)-G_n^{-1}(u)]\,du\\
&+ \int_{\alphaha_n}^{\alpha} J(u)F_n^{-1}(u)\,du- \int_{1-\betata_n}^{1-\beta} J(u)F_n^{-1}(u)\,du\\ &-J(\alpha)\int_0^{\alpha}G_n^{-1}(u)\,du
-J(1-\beta)\int_{1-\beta}^1 G_n^{-1}(u)\,du.
\end{split}
\end{equation}
Relations~\eqref{ptf_lem_1} and \eqref{ptf_lem_2} together imply
\betagin{equation}
\label{ptf_lem_3}
L_{n}^0-W_idetilde{L}_n +(\mu_{W_idetilde{L}_n}-\mu_n)= D_n+ R_n^{(2)},
\end{equation}
where
\betagin{equation*}
\label{ptf_lem_4}
\betagin{split}
D_n := &\int_{\alpha}^{1-\beta}J(u)[F_n^{-1}(u)-G_n^{-1}(u)]\,du+ \\
&J(\alpha)\left[ \alpha\,\xi_{\alphaha} - \int_0^{\alpha}G_n^{-1}(u)\,du\right] + J(1-\beta) \left[ \beta\,\xi_{1-\betata}- \int_{1-\beta}^1 G_n^{-1}(u)\,du\right].
\end{split}
\end{equation*}
It remains to show that $D_n=R_n^{(1)}$. Let us consider three of six possible cases (treatment for the three other cases is similar and therefore omitted). We use the fact that $F_n^{-1}(u)=G_n^{-1}(u)$ for $A_n<u\le 1-B_n$, \ $G_n^{-1}(u)=\xi_{\alphaha}$ for $u \le A_n$ and $G_n^{-1}(u)=\xi_{1-\betata}$ \ for $u>1-B_n$.\\
\noindent{\bf Case 1}. $\alpha\le A_n \le 1-B_n<1-\beta$. In this case the second and third terms of $D_n$ are equal to zero, and the first one yields
\betagin{equation}
\label{ptf_lem_5}
\betagin{split}
D_n =\int_{\alpha}^{A_n}J(u)[F_n^{-1}(u)-\xi_{\alphaha}]\,du+ \int_{1-B_n}^{1-\beta} J(u) [F_n^{-1}(u)-\xi_{1-\betata}]\,du,
\end{split}
\end{equation}
and since $J(u)=J_w(u)$ for $\alpha<u\le 1-\beta$, we obtain the desired equality.\\
\noindent{\bf Case 2}. $\alpha\le A_n\le 1-\beta < 1-B_n$. In this case we have
\betagin{equation}
\label{ptf_lem_6}
\betagin{split}
D_n =& \int_{\alpha}^{A_n}J(u)[F_n^{-1}(u)-\xi_{\alphaha}]\,du\\
+ &J(1-\beta) \left[ \beta\,\xi_{1-\betata}- \int_{1-\beta}^{1-B_n} F_n^{-1}(u)\,du -B_n\,\xi_{1-\betata}\right]\\
=&\int_{\alpha}^{A_n}J(u)[F_n^{-1}(u)-\xi_{\alphaha}]\,du- \int_{1-\beta}^{1-B_n}J(1-\beta) [F_n^{-1}(u)-\xi_{1-\betata}]\,du,
\end{split}
\end{equation}
and since $J(u)=J_w(u)$ for $\alpha<u\le A_n$ and $J(1-\beta)=J_w(u)$ for $u>1-\beta$, the expression on the r.h.s. in \eqref{ptf_lem_5} is equal to $R_n^{(1)}$. \\
\noindent{\bf Case 3}. $1-\beta\le A_n\le 1-B_n$. In this case $D_n$ can be written as
\betagin{equation}
\label{ptf_lem_6}
\betagin{split}
& \int_{\alpha}^{A_n}J_w(u)[F_n^{-1}(u)-\xi_{\alphaha}]\,du\\
&- J(1-\beta)\int_{1-\beta}^{A_n} F_n^{-1}(u)\,du + J(1-\beta)\xi_{\alphaha}(A_n-(1-\beta))\\
&+J(1-\beta) \left[ \beta\,\xi_{1-\betata}-\xi_{\alphaha}(A_n-(1-\beta))- \int_{A_n}^{1-B_n} F_n^{-1}(u)\,du -B_n\,\xi_{1-\betata}\right]\\
=&\int_{\alpha}^{A_n}J_w(u)[F_n^{-1}(u)-\xi_{\alphaha}]\,du- \int_{1-\beta}^{1-B_n}J(1-\beta) [F_n^{-1}(u)-\xi_{1-\betata}]\,du =R_n^{(1)}.
\end{split}
\end{equation}
This completes the proof of representation~\eqref{lem_2.1_1}. The lemma is proved. \qed
In conclusion of this section, we note that the idea of the $L$-statistic approximation emerged as a~result of the observation of the fact that the asymptotic variances of $L_{n}^0$ and of the non-trimmed $L$-statistic $W_idetilde{L}_n$ based on the Winsorized random variables coincide.
This idea of $L$-statistic approximation can also be regarded as an~extension of the one used in Gribkova and Helmers~\cite{gh2006}-\cite{gh2007} and~\cite{gh2014} (where the second order asymptotic properties -- the Berry--Esseen bounds and Edgeworth type expansions -- were established for (slightly) trimmed means and their studentized versions) to the case of trimmed $L$-statistics. In the papers mentioned, we constructed the $U$-statistic type approximations for (slightly) trimmed means using sums of i.i.d. Winsorized observations as the linear $U$-statistic terms; in order to get the quadratic terms, we applied some special Bahadur--Kiefer representations of von Mises statistic type for (intermediate) sample quantiles (cf.~Gribkova and Helmers~\cite{gh2011}).
\section{Proof of Theorems~\ref{thm1} and~\ref{thm2} }
\label{proof}
\noindent{\bf Proof of Theorem~\ref{thm1}}.
Obviously, it suffices to prove the first of relations~\eqref{thm_1}. Set
\betagin{equation}
\label{2_1}
V_n=L_{n}-L_{n}^0=n^{-1}\sum_{i=k_n+1}^{n-m_n}(c_{i,n} -c_{i,n}^0)X_{i:n}.
\end{equation}
Lemma~\ref{lem_2.1} and relation~\eqref{2_1} together yield
\betagin{equation}
\label{proof_1}
L_{n}-\mu_n=W_idetilde{L}_n-\mu_{W_idetilde{L}_n}+R_n +V_n.
\end{equation}
In view of the classical Slutsky argument applied to~\eqref{proof_1}, $1- F_{L_{n}}(x)$ is bounded above and below by
\betagin{equation}
\label{proof_2}
\textbf{P}\{\sqrt{n}(W_idetilde{L}_n-\mu_{W_idetilde{L}_n})/\sigma> x-2\delta \}+\textbf{P}\{\sqrt{n}|R_n|/\sigma> \delta\}+\textbf{P}\{\sqrt{n}|V_n|/\sigma> \delta\}
\end{equation}
and
\betagin{equation}
\label{proof_3}
\textbf{P}\{\sqrt{n}(W_idetilde{L}_n-\mu_{W_idetilde{L}_n})/\sigma> x+2\delta \}-\textbf{P}\{\sqrt{n}|R_n|/\sigma> \delta\}-\textbf{P}\{\sqrt{n}|V_n|/\sigma> \delta\}
\end{equation}
respectively, for each $\delta>0$.
Let $z_n=n^{\varepsilon/(2(2+\varepsilon))}$. Fix an~arbitrary sequence $a_n\to 0$ and $A>0$. Without loss of generality we may assume that $a_n\ge 1/\log(1+n)$ (otherwise, we may replace $a_n$ by the~new sequence $a_n'=\max(a_n ,\, 1/\log(1+n))\ge a_n$ without affecting result). Set $\delta=\delta_n=a_n^{-1/2}/z_n$.
From~\eqref{proof_2} and~\eqref{proof_3} it immediately follows that to prove our theorem it suffices to show that
\betagin{equation}
\label{proof_4}
\textbf{P}\{\sqrt{n}(W_idetilde{L}_n-\mu_{W_idetilde{L}_n})/\sigma> x \pm 2\delta\}=[1-\Phi(x)](1+o(1)) ,
\end{equation}
\betagin{equation}
\label{proof_5}
\quad \quad \ \ \,\textbf{P}\{\sqrt{n}|R_n|/\sigma> \delta\}=[1-\Phi(x)]o(1) ,
\end{equation}
\betagin{equation}
\label{proof_6}
\quad \quad \ \ \, \textbf{P}\{\sqrt{n}|V_n|/\sigma> \delta\}=[1-\Phi(x)]o(1) ,
\end{equation}
uniformly in the range $-A\le x\le a_n z_n$.
\noindent{\bf Proof of \eqref{proof_4}}. Since $W_idetilde{L}_n$ is the non-truncated linear combination of order statistics corresponding to the sample $W_1,\dots,W_n$ of i.i.d. bounded random variables and because its weight function $J_w$ is Lipschitz in $[0,1]$, we can apply the results on probabilities of large deviations by Vandemaele and Veraverbeke~\cite{vv82} and by Bentkus and Zitikis~\cite{bz90}. Set $B=A+2\sup_{n\ge 1}\delta_n$ and $b_n=a_n+2\delta_n$. Since $a_n\ge 1/\log(1+n)$, the number $B$ exists, and $b_n\to 0$. Then, by Theorem~2~({\em i}), of Vandemaele and Veraverbeke~\cite{vv82} for $x:$ $-B\le x \pm 2\delta <0$, and by Theorem~1.1 of Bentkus and Zitikis~\cite{bz90} for $x:$ $0 \le x \pm 2\delta \le b_n n^{1/6}$), we obtain
\betagin{equation}
\label{proof_7}
\textbf{P}\{\sqrt{n}(W_idetilde{L}_n-\mu_{W_idetilde{L}_n})/\sigma> x \pm 2\delta \}=[1-\Phi(x \pm 2\delta)](1+o(1)) ,
\end{equation}
uniformly with respect to $x$ such that $-B\le x \pm 2\delta \le b_n n^{1/6}$. In particular, relation~\eqref{proof_7} holds true
uniformly in the range $-A\le x\le a_n n^{1/6}$. To prove~\eqref{proof_4}, it remains to note that
since $2\,\delta a_n z_n =2\sqrt{a_n}\to 0$, Lemma~A.1 from Vandemaele and Veraverbeke~\cite{vv82} now yields
\betagin{equation}
\label{proof_8}
1-\Phi(x \pm 2\delta)=[1-\Phi(x)](1+o(1)),
\end{equation}
as $n \to \infty$, uniformly in the range $-A\le x\le a_n z_n$.
\noindent{\bf Proof of \eqref{proof_5}}. Let $I_1^{(j)}$ and $I_2^{(j)}$ denote the first and the second terms of $R^{(j)}_n$ (cf.~\eqref{lem_2.1_2}--\eqref{lem_2.1_3}) respectively, $j=1,2$. In this notation, $R_n=I_1^{(1)}-I_2^{(1)}+I_1^{(2)}-I_2^{(2)}$ and
\betagin{equation}
\label{proof_9}
\textbf{P}\{\sqrt{n}|R_n|/\sigma> \delta\}\le \sum_{k=1}^2 \textbf{P}\{\sqrt{n}|I^{(1)}_k|/\sigma> \delta/4\}
+\sum_{k=1}^2 \textbf{P}\{\sqrt{n}|I^{(2)}_k|/\sigma> \delta/4\} .
\end{equation}
Thus, it suffices to show that for each positive $C$ (in particular, for $C=\sigma/4$),
\betagin{equation}
\label{proof_10}
\textbf{P}\{\sqrt{n}|I^{(j)}_k|>C \delta \}=[1-\Phi(x)]o(1) , \text{\ \ $k,j=1,2,$}
\end{equation}
as $n \to \infty$, uniformly in the range $-A\le x\le a_n z_n$. We will prove~\eqref{proof_10} for $I^{(1)}_1$ and $I^{(2)}_1$ (the treatment of $I^{(1)}_2$ and $I^{(2)}_2$ is similar and therefore omitted).
Consider $I^{(1)}_1$. First, note that if $\alpha < A_n$, then $\max_{u\in (\alpha,A_n)} |F_n^{-1}(u)-\xi_{\alphaha}|=\xi_{\alphaha} -X_{[n\alpha]+1:n}\le \xi_{\alphaha} -X_{[n\alpha]:n}$, as $F_n^{-1}$ is monotonic. Here and in what follows $[x]$ represents the greatest integer function. Similarly we find that if $A_n \le \alpha$, then $\max_{u\in (A_n,\alpha)} |F_n^{-1}(u)-\xi_{\alphaha}|=X_{[n\alpha]:n}-\xi_{\alphaha}$. Furthermore, by the Lipschitz condition for $J$, there exists a~positive $K$ such that $\max_{u\in [0,1]}J_w(u)\le \sup_{u\in I}J(u)\le K$. This yields
\betagin{equation}
\label{proof_11}
|I^{(1)}_1| =\left| \int_{\alpha}^{A_n} J_w(u)[ F_n^{-1}(u)-\xi_{\alphaha}]\, du \right| \le K |A_n-\alpha| |X_{[n\alpha]:n}-\xi_{\alphaha}|.
\end{equation}
Define a~sequence of intervals $\Gamma_n=[\alpha\wedge\alphaha_n,\alpha\vee\alphaha_n+1/n)$, then we obtain
\betagin{equation}
\label{proof_11_}
|I^{(2)}_1| =\left| \int_{\alphaha_n}^{\alpha}J(u) [F_n^{-1}(u)-F^{-1}(u)]\, du \right| \le K |\alphaha_n-\alpha| \emph{D}_n,
\end{equation}
where $\emph{D}_n= \max_{i:\,i/n\in \Gamma_n}|X_{i:n}-F^{-1}(i/n)|\vee |X_{i:n}-F^{-1}((i-1)/n)|$.
Let $U_1,\dots,U_n$ be a~sample of independent $(0,1)$-uniform distributed random variables, $U_{i:n}$ -- the corresponding order statistics. Set $M_{\alphaha}=\sharp \{i : U_i \le \alpha \}$. Since the joint
distribution of $X_{i:n}$ and $N_{\alphaha}$ coincides with the joint distribution of $F^{-1}(U_{i:n})$ and $M_{\alphaha}$, $i=1,\dots,n$, in order to prove~\eqref{proof_10}, it suffices to show that
\betagin{equation}
\label{proof_12}
\betagin{split}
\textbf{P}\{|M_{\alphaha}-n\alpha| |U_{[n \alpha ]:n}-\alpha|^{\varepsilon} > C\sqrt{n}\,\delta \}&=[1-\Phi(x)]o(1) ,\\
\textbf{P}\{\sqrt{n}|\alphaha_n-\alpha| \emph{D}_{n,u}^{\varepsilon} > C\,\delta \}&=[1-\Phi(x)]o(1) ,\\
\textbf{P}\Bigl( \bigcup_{i:\,i/n\in \Gamma_n}\lf U_{i:n} \notin U_{\alpha}\rf) &=[1-\Phi(x)]o(1) ,
\end{split}
\end{equation}
as $n \to \infty$, uniformly in the range $-A\le x\le a_n z_n$. Here $U_{\alpha}$ is the~neighborhood of $\alpha$ , in which $F^{-1}$ satisfies a~H\"{o}lder condition of order $\varepsilon$ (cf.~condition~{\bf (ii)}),
\betagin{equation}
\label{proof_12a}
\emph{D}_{n,u}^{\varepsilon}=\max_{i:\,i/n\in \Gamma_n}|U_{i:n}-i/n|^{\varepsilon}\vee |U_{i:n}-(i-1)/n|^{\varepsilon},
\end{equation}
and $C$ stands for a~positive constant independent of $n$, which may change its value, from line to line.
To shorten notation, let $k=[n\alpha]$. Consider the probability on the l.h.s. in the first line of~\eqref{proof_12}. It is equal to
\betagin{equation}
\label{proof_13}
\textbf{P}\{|M_{\alphaha}-n\alpha| |U_{k:n}-\alpha|^{\varepsilon}> C a_n^{-\frac 12} n^{\frac 1{2+\varepsilon}} \}
\le \textbf{P}_1+\textbf{P}_2,
\end{equation}
where
\betagin{equation*}
\betagin{split}
&\textbf{P}_1:=\textbf{P}\{|M_{\alphaha}-n\alpha| > C_1 a_n^{-\frac 1{2(1+\varepsilon)}} n^{\frac{1+\varepsilon}{2+\varepsilon}} \}, \\
&\textbf{P}_2:=\textbf{P}\{|U_{k:n}-\alpha|^{\varepsilon}> C_2 a_n^{-\frac {\varepsilon}{2(1+\varepsilon)}} n^{-\frac{\varepsilon}{2+\varepsilon}} \},
\end{split}
\end{equation*}
$C_1$, $C_2$ are any positive constants such that $C_1C_2=C$. Let us estimate $\textbf{P}_1$ and $\textbf{P}_2$. Set $h=C_1a_n^{-\frac 1{2(1+\varepsilon)}} n^{\frac{1+\varepsilon}{2+\varepsilon}-1}$. Since $h<1-\alpha$ for all sufficiently large $n$ (because $a_n\ge 1/\log(1+n)$), by Theorem~1 of Hoeffding~\cite{ho} we have
\betagin{equation}
\label{proof_14}
\textbf{P}_1 = \textbf{P}\{|M_{\alphaha}-n\alpha| > n C_1 h\}\le 2\exp(-2nh^2)=2\exp(-2C_1^2n^{\frac {\varepsilon}{2+\varepsilon}}a_n^{-\frac 1{1+\varepsilon}}).
\end{equation}
Next, we evaluate $1/(1-\Phi(x))$. Let $\phi=\Phi'$. Since $1-\Phi(x) \sigmam \phi(x)/x$ as $x\to\infty$, for $x$ such that $-A\le x\le a_n z_n$ we have
\betagin{equation}
\label{proof_phi}
\frac 1{1-\Phi(x)}\le \frac 1{1-\Phi(a_n z_n)} \sigmam \frac {a_n z_n}{\phi(a_n z_n)} = \sqrt{2\pi}\, a_n \, n^{\frac{\varepsilon}{2(2+\varepsilon)}} \exp\left( a_n^2 n^{\frac{\varepsilon}{2+\varepsilon}}/2\right),
\end{equation}
and combining \eqref{proof_14} and \eqref{proof_phi}, we obtain that
\betagin{equation}
\label{proof_15}
\textbf{P}_1 = [1-\Phi(x)]o(1), \ \text{ as} \ n\to\infty,
\end{equation}
uniformly in the range $-A\le x\le a_n z_n$.
Set $p_k=k/({n+1})$, and note that $0< \alpha-p_k<n^{-1}$. Then for $\textbf{P}_2$ we have
\betagin{equation}
\label{proof_16}
\betagin{split}
\textbf{P}_2 \le &\textbf{P}\{|U_{k:n}-p_k|> C_2^{1/\varepsilon} a_n^{-\frac 1{2(1+\varepsilon)}} n^{-\frac 1{2+\varepsilon}} - n^{-1}\},\\
=&\textbf{P}\{\sqrt{n}|U_{k:n}-p_k|> C_2^{1/\varepsilon} a_n^{-\frac 1{2(1+\varepsilon)}} n^{\frac {\varepsilon}{2(2+\varepsilon)}}-n^{-1/2} \}.\\
\end{split}
\end{equation}
Note that the term $n^{-1/2}$ on the r.h.s. in~\eqref{proof_16} is of negligible order and therefore we may omit it.
Set $\lambda: =C_2^{1/\varepsilon} a_n^{-\frac 1{2(1+\varepsilon)}} n^{\frac {\varepsilon}{2(2+\varepsilon)}} $.
We observe that $\lambda /\sqrt{n}= C_2^{1/\varepsilon} a_n^{-\frac 1{2(1+\varepsilon)}} n^{-\frac{1}{2+\varepsilon}}$, the latter quantity tends to zero, because $a_n \ge 1/\log(1+n)$, and so we can apply Inequality~1 and Proposition~1 (relation~(12)) given on pages~453 and~455 respectively in Shorack and Wellner~\cite{shorack}. Then we obtain
\betagin{equation}
\label{proof_17}
\betagin{split}
\textbf{P}_2&\leq 2\exp\Bigl( -\frac {\lambda^2}{2p_k}\,\frac 1{1+2\lambda/(3p_k \sqrt{n})}\Bigr)\\
& = 2 \exp\Bigl( -\frac {1}{2p_k} C_2^{2/\varepsilon} a_n^{-\frac 1{1+\varepsilon}} n^{\frac {\varepsilon}{2+\varepsilon}} [1+o(1)]\Bigr).
\end{split}
\end{equation}
From~\eqref{proof_phi} and~\eqref{proof_17} it follows that
\betagin{equation}
\label{proof_18}
\textbf{P}_2 = [1-\Phi(x)]o(1), \ \text{ as} \ n\to\infty,
\end{equation}
uniformly in the range $-A\le x\le a_n z_n$. So, the first relation in~\eqref{proof_12} follows directly from~\eqref{proof_13}, \eqref{proof_15} and~\eqref{proof_18}.
The next step we prove the second relation in~\eqref{proof_12}. We have
\betagin{equation}
\label{proof_19}
\betagin{split}
&\textbf{P}\{\sqrt{n}|\alphaha_n-\alpha| \emph{D}_{n,u}^{\varepsilon} > C\,\delta \}\\
\leq & \sum_{i:\,i/n\in \Gamma_n} \textbf{P}\{\sqrt{n}|\alphaha_n-\alpha| |U_{i:n}-i/n|^{\varepsilon}\vee |U_{i:n}-(i-1)/n|^{\varepsilon} > C\,\delta \} .
\end{split}
\end{equation}
By condition {\bf (iii)}, there exists $M>0$ such that $|\alphaha_n-\alpha|\le M n^{-1/(2+\varepsilon)}$ for all sufficiently large $n$, hence each item of the sum on the l.h.s. in~\eqref{proof_19} does not exceed
\betagin{equation}
\label{proof_19a}
\textbf{P}\{\sqrt{n} |U_{i:n}-i/n| >\lambda \} + \textbf{P}\{\sqrt{n} |U_{i:n}-(i-1)/n| > \lambda \}, \quad i/n\in \Gamma_n.
\end{equation}
where $\lambda=C_{\varepsilon} a_n^{-1/(2\varepsilon)} n^{\frac {\varepsilon}{2(2+\varepsilon)}}$ and $C_{\varepsilon}=(C/M)^{1/\varepsilon}$. Obviously (cf.~\eqref{proof_16}-\eqref{proof_17}), it suffices to prove the desired bound for the first of two probabilities in~\eqref{proof_19a}. Applying once more the exponential Inequality~1 for uniform order statistics (cf. Shorack and Wellner~\cite{shorack}, pp.~453,~455) and the fact that $|i/n-\alpha|\leq Mn^{-1/(2+\varepsilon)}$ for all sufficiently large $n$, we obtain
\betagin{equation*}
\textbf{P}\{\sqrt{n} |U_{i:n}-i/n| >\lambda \} \leq 2\exp\left( -\frac {1}{2\alpha} C_{\varepsilon}^2 a_n^{-\frac 1{\varepsilon}} n^{\frac {\varepsilon}{2+\varepsilon}} \Bigl[1+O(n^{-1/(2+\varepsilon)})\Bigr]\right).
\end{equation*}
Since the number of items on the r.h.s. in \eqref{proof_19} does not exceed $|n|\alpha-\alphaha_n|+1|=O(n^{\frac{1+\varepsilon}{2+\varepsilon}})$, the latter bound implies that the quantity on the r.h.s. in \eqref{proof_19} is of the order
\betagin{equation*}
\label{proof_20_}
n^{\frac{1+\varepsilon}{2+\varepsilon}} \exp\Bigl( -\frac {1}{2\alpha} C_{\varepsilon}^2 a_n^{-\frac 1{\varepsilon}} n^{\frac {\varepsilon}{2+\varepsilon}} \Bigl[1+o(1)\Bigr]\Bigr).
\end{equation*}
This together with~\eqref{proof_phi} imply the required relation.
It remains to prove the last relation in~\eqref{proof_12}. Fix some $\gamma>0$ such that $[\alpha-\gamma,\alpha+\gamma]\subseteq U_{\alpha}$, set $r_n=k\wedge k_n$, $s_n=k\vee k_n+1$, where $k_n= n\alphaha_n$ (cf.~\eqref{tn}). Then
\betagin{equation}
\label{proof_20}
\textbf{P}\Bigl( \bigcup_{i:\,i/n\in \Gamma_n}\lf U_{i:n} \notin U_{\alpha}\rf\Bigr)\leq \textbf{P}(U_{r_n:n} < \alpha -\gamma) +
\textbf{P}(U_{U_{s_n:n}:n} > \alpha+\gamma).
\end{equation}
Observe that both sequences $r_n/n$ and $s_n/n$ satisfy condition~{\bf (iii)}, along with the sequence $\alphaha_n=k_n/n$.
Let us estimate the first probability on the r.h.s. in~\eqref{proof_20} (the treatment of
the second one is similar).
Define a~binomial random variable $S_n=\sharp \{i : U_i < \alpha-\gamma \}$, then the first term on the r.h.s. in~\eqref{proof_20} is equal to
\betagin{equation}
\label{proof_12_2}
\betagin{split}
\textbf{P}(S_n \geq r_n) = &\textbf{P}\bigl(S_n-\textbf{E}S_n \geq r_n -n\alpha +\gamma n\bigr)\\
= &\textbf{P}\bigl(n^{-1}(S_n-\textbf{E}S_n)\ge \gamma+ o(1)\bigr)
\end{split}
\end{equation}
and by the~classical Hoeffding~\cite{ho} inequality, the latter quantity is no greater than
$\exp(-2n(\gamma+ o(1))^2)$, which is $[1-\Phi(x)]o(1)$, uniformly in the range $-A \le x \le a_n n^{1/2}$,
and the last relation in~\eqref{proof_12} follows.
Relations~\eqref{proof_11}-\eqref{proof_11_} and~\eqref{proof_12} directly imply~\eqref{proof_10}, which yields~\eqref{proof_5}.
\noindent{\bf Proof of \eqref{proof_6}}. \ By condition~{\bf (iv)}, there exists $b>0$ such that
\betagin{equation*}
\label{proof_21}
\sqrt{n}|V_n|\leq bn^{-\varepsilon/(2(2+\varepsilon))}(|X_{(k_n+1):n}|\vee |X_{(n-m_n):n}|),
\end{equation*}
for all sufficiently large $n$. Thus,
\betagin{equation*}
\label{proof_22}
\textbf{P}\left( \sqrt{n}|V_n|/\sigmagma >\delta \right) \leq \textbf{P}\left( |X_{(k_n+1):n}|\vee |X_{(n-m_n):n}|>\sigmagma a_n^{-1/2}\right)\leq \textbf{P}_{3}+\textbf{P}_{4},
\end{equation*}
where $\textbf{P}_{3}=\textbf{P}\bigl(|X_{(k_n+1):n}|>\sigmagma a_n^{-1/2}\bigr) $, \
$\textbf{P}_{4}=\textbf{P}\bigl(|X_{(n-m_n):n}|>\sigmagma a_n^{-1/2}\bigr) $. Let us estimate $\textbf{P}_{3}$ (the treatment for $\textbf{P}_{4}$ is same and therefore omitted). We have
\betagin{equation}
\label{proof_23}
\betagin{split}
\textbf{P}_{3}&=\textbf{P}\left( \left| F^{-1}(U_{(k_n+1):n})\right| >\sigmagma a_n^{-1/2}\right) \\
&\leq \textbf{P}\left( \left| F^{-1}(U_{(k_n+1):n})- F^{-1}(\alpha)\right| +\left| F^{-1}(\alpha)\right| >\sigmagma a_n^{-1/2}\right),\\
&\leq \textbf{P}\left( \left| U_{(k_n+1):n}- \alpha\right|^{\varepsilon} >\sigmagma a_n^{-1/2} (1+o(1))\right) + \textbf{P}(U_{(k_n+1):n} \notin U_{\alpha}).
\end{split}
\end{equation}
Observe that the first term on the r.h.s. in~\eqref{proof_23} is equal to zero, for all sufficiently large $n$, and the second one
is $[1-\Phi(x)]o(1)$, uniformly in the range $-A\le x\le a_n z_n$. This completes the proof of~\eqref{proof_6} and the theorem. \qed
\noindent{\bf Proof of Theorem~\ref{thm2}}. Let us first prove relation~\eqref{thm_2}. By Lemma~\ref{lem_2.1} and relation~\eqref{proof_1}, we have
\betagin{equation*}
\label{proof t21}
\text{\em Var}(L_{n})=\text{\em Var}(W_idetilde{L}_n)+\text{\em Var}(R_n+V_n)+2 \text{\em cov}(W_idetilde{L}_n,R_n+V_n).
\end{equation*}
Since $W_i$ are bounded, all conditions of Theorem~2\,(ii)~\cite{vv82} are satisfied, and hence
\betagin{equation*}
\label{proof t22}
\sigmagma^{-1} n^{1/2}\sqrt{\text{\em Var}(W_idetilde{L}_n)}=1+O(n^{-1/2})
\end{equation*}
(cf. \cite{vv82}, p.~431). \ Furthermore, we have
\betagin{equation*}
\label{proof t21}
\betagin{split}
n|\text{\em cov}(W_idetilde{L}_n,R_n+V_n)|&\leq n[\text{\em Var}(W_idetilde{L}_n)\text{\em Var}(R_n+V_n)]^{1/2}\\
&= \sigmagma[n\text{\em Var}(R_n+V_n)]^{1/2}(1+O(n^{-1/2})).
\end{split}
\end{equation*}
The latter three relations imply that in order to prove~\eqref{thm_2}, it suffices to show that
\betagin{equation}
\label{proof t23}
n \text{\em Var}(R_n+V_n)=O\bigl(n^{-\frac{2\varepsilon}{2+\varepsilon}}\bigr).
\end{equation}
We have
\betagin{equation}
\label{proof t24}
n \text{\em Var}(R_n+V_n)\leq n \textbf{E}(R_n+V_n)^2\leq 5n\Bigl[\sum_{i,j=1}^{2} \textbf{E}\bigl(I_j^{(i)}\bigr)^2 + \textbf{E}V_n^2 \,\Bigr],
\end{equation}
where $I_j^{(i)}$ are as in \eqref{proof_9}-\eqref{proof_10}. We will show that
\betagin{equation}
\label{proof t25}
n \textbf{E}\bigl(I_j^{(1)}\bigr)^2 =O(n^{-\varepsilon})=o\bigl(n^{-\frac{2\varepsilon}{2+\varepsilon}}\bigr), \ \ n \textbf{E}\bigl(I_j^{(2)}\bigr)^2 =O(n^{-\frac{2\varepsilon}{2+\varepsilon}}),\ j=1,2,
\end{equation}
and that
\betagin{equation}
\label{proof t26}
n \textbf{E}V_n^2 =O(n^{-\frac{2\varepsilon}{2+\varepsilon}}).
\end{equation}
Relations \eqref{proof t24}-\eqref{proof t26} imply the desired bound~\eqref{proof t23}.
We first prove~\eqref{proof t25}, and consider in detail only the case $j=1$ (the treatment in the case $j=2$ is same and therefore omitted). Let as before $k=[\alphaha n]$ and $k_n=\alphaha_n n$. By \eqref{proof_11} and the Schwarz inequality, we have
\betagin{equation*}
\label{proof t27}
\betagin{split}
\textbf{E}\bigl(I_1^{(1)}\bigr)^2 \leq & K^2 [\textbf{E}(A_n-\alpha)^4\textbf{E}(X_{k:n}-\xi_{\alphaha})^4]^{1/2}\\
= & K^2 n^{-2}[\textbf{E}(N_{\alphaha}-\alpha n)^4\textbf{E}(X_{k:n}-\xi_{\alphaha})^4]^{1/2}.
\end{split}
\end{equation*}
By well-known formula for 4-th moments of a~binomial random variable, we have $\textbf{E}(N_{\alphaha}-\alpha n)^4=3\alpha^2(1-\alpha^2)n^2(1+o(1))$. Thus, there exists a~positive constant $C$ independent of $n$ such that
\betagin{equation}
\label{proof t28}
n\textbf{E}\bigl(I_1^{(1)}\bigr)^2 \leq C [\textbf{E}(X_{k:n}-\xi_{\alphaha})^4]^{1/2}
\end{equation}
for all sufficiently large $n$. We have
\betagin{equation}
\label{proof t29}
\betagin{split}
&\textbf{E}(X_{k:n}-\xi_{\alphaha})^4= \textbf{E}(F^{-1}(U_{k:n})-F^{-1}(\alpha))^4\\
=&\textbf{E}[(F^{-1}(U_{k:n})-F^{-1}(\alpha))^4 \textbf{1}_{\{U_{k:n}\in U_{\alpha}\}}]\\
+& \textbf{E}[(F^{-1}(U_{k:n})-F^{-1}(\alpha))^4 \textbf{1}_{\{U_{k:n}\notin U_{\alpha}\}}]\\
\leq & C_H^4 \textbf{E}|U_{k:n}-\alpha|^{4\varepsilon}+ \textbf{E}[(X_{k:n}-\xi_{\alphaha})^6]^{2/3}[\textbf{P}(U_{k:n}\notin U_{\alpha})]^{1/3},
\end{split}
\end{equation}
where $C_H$ is a~constant from the H\"{o}lder condition {\bf (ii)}. Note that if $\varepsilon>1/2$, then
$\textbf{E}|U_{k:n}-\alpha|^{4\varepsilon}\leq \textbf{E}|U_{k:n}-\alpha|^{2}=O(n^{-1})$, and if $\varepsilon \leq 1/2$,
then $\textbf{E}|U_{k:n}-\alpha|^{4\varepsilon}\leq (\textbf{E}|U_{k:n}-\alpha|^{2})^{2\varepsilon}=O(n^{-2 \varepsilon})$.
Since moments of any order of $X_{k:n}$ are finite for all sufficiently large $n$ and because $\textbf{P}(U_{k:n}\notin U_{\alpha})=O(\exp(-cn))$ with some $c>0$ (cf.~\eqref{proof_20}-\eqref{proof_12_2}), the latter bounds and relations~\eqref{proof t28}-\eqref{proof t29} imply the first of relations~\eqref{proof t25}.
Consider $I_1^{(2)}$. By condition {\bf (iii')}, there exists $d>0$ such that \\ $(\alphaha_n-\alpha)^2\leq d n^{-(2+\varepsilon-\varepsilon^2)/(2+\varepsilon)}(\log n)^{-\varepsilon}$, for all sufficiently large $n$. Then in view of~\eqref{proof_11_} we obtain
\betagin{equation}
\label{proof t210}
n\textbf{E}\bigl(I_1^{(2)}\bigr)^2 \leq n K^2 (\alpha_n-\alpha)^2 \textbf{E} \emph{D}_n^2 \leq K^2n^{\frac{\varepsilon^2}{2+\varepsilon}}(\log n)^{-\varepsilon}\textbf{E} \emph{D}_n^2.
\end{equation}
Hence, to get the second bound in~\eqref{proof t25}, it suffices to show that
\betagin{equation}
\label{proof t211}
\textbf{E}\emph{D}_n^2=O\left( (\log n)^{\varepsilon}n^{-\varepsilon}\right).
\end{equation}
For all sufficiently large $n$, \ $\alphaha_n \in U_{\alpha}$ and
\betagin{equation*}
\label{proof t212}
\textbf{E}\emph{D}_n^2\leq C_H^2 \textbf{E}\left(\emph{D}_{n,u}^{\varepsilon}\right)^2=
C_H^2\textbf{E}\left( \max_{i:\,i/n\in \Gamma_n}|U_{i:n}-i/n|^{2\varepsilon}\vee |U_{i:n}-(i-1)/n|^{2\varepsilon}\right),
\end{equation*}
where $\emph{D}_n$ is as in~\eqref{proof_12a}. The latter quantity does not exceed
\betagin{equation}
\label{proof t213}
\betagin{split}
&t^{\varepsilon}C_H^2 (\log n)^{\varepsilon}n^{-\varepsilon} \\
+ &\textbf{P}
\left( \bigcup_{i:\,i/n\in \Gamma_n} \lf |U_{i:n}-i/n|\vee |U_{i:n}-(i-1)/n|> \sqrt{t\frac{\log n}n}\rf\right) \\
\leq & t^{\varepsilon}C_H^2 (\log n)^{\varepsilon}n^{-\varepsilon}+|\alpha n-k_n+1|\left( \textbf{P}_1+\textbf{P}_2\right),
\end{split}
\end{equation}
where $t$ is a constant which will be chosen later, and
\betagin{equation*}
\label{proof t214}
\textbf{P}_1= \textbf{P}\left( |U_{i:n}-i/n|> \sqrt{t\frac{\log n}n}\right),\ \ \textbf{P}_2=\textbf{P}\left( |U_{i:n}-(i-1)/n|> \sqrt{t\frac{\log n}n}\right).
\end{equation*}
It is obvious that both $\textbf{P}_1$ and $\textbf{P}_2$ are of the same order of magnitude, so it suffices to estimate $\textbf{P}_1$, where we can apply once more the Inequality~1 from Shorack and Wellner~\cite{shorack}. We have
\betagin{equation*}
\label{proof t215}
\textbf{P}_1= \textbf{P}\left( \sqrt{n}|U_{i:n}-i/n|> \sqrt{t\log n}\right)\leq 2\exp\left(-\frac{t\log n}{2\alpha}(1+O(|\alphaha_n-\alpha|))\right),
\end{equation*}
hence if we choose $t\ge 4\alpha$, we obtain $\textbf{P}_1+\textbf{P}_2=O(n^{-2})$, and the second term on
the r.h.s. in~\eqref{proof t213} becomes negligible in order relative to the first one.
This proves~\eqref{proof t211} and the second relation in~\eqref{proof t25}.
We now turn to the proof of~\eqref{proof t26}. By condition {\bf (iv')}, the exists a~constant $C>0$, not depending on
$n$, such that $\sum_{i=k_n+1}^{n-m_n}|c_{i,n} -c_{i,n}^0|\leq Cn^{\frac{2-\varepsilon}{2(2+\varepsilon)}}$, for all sufficiently large $n$, and
\betagin{equation*}
\betagin{split}
n\textbf{E}V_n^2 &\leq n^{-1} \left( \sum_{i=k_n+1}^{n-m_n}|c_{i,n} -c_{i,n}^0|\right)^2\textbf{E}\bigl(X^2_{k_n+1:n}\vee X^2_{n-m_n:n}\bigr)\\
&\leq C^2n^{-1} n^{\frac{2-\varepsilon}{2+\varepsilon}} \textbf{E}\bigl(X^2_{k_n+1:n}\vee X^2_{n-m_n:n}\bigr)=O\bigl( n^{-\frac{2\varepsilon}{2+\varepsilon}}\bigr),
\end{split}
\end{equation*}
and~\eqref{proof t26} follows.
Thus, relation~\eqref{thm_2} is proved, and we are now in a~position to prove that relations~\eqref{thm_1} hold true if we replace $\sigmagma/n^{1/2}$ by $\sqrt{{\text{\em Var}}(L_{n})}$. We prove the first of relations~\eqref{thm_1}, the second one will then follow from the first if we replace $c_{i,n}$ by $-c_{i,n}$.
Fix an~arbitrary sequence $a_n\to 0$ and $A>0$, set $\lambda_n=\sigmagma^{-1}n^{1/2} \sqrt{{\text{\em Var}}(L_{n})}$ and write
\betagin{equation}
\label{proof t217}
\frac{\textbf{P}\bigl((L_{n}-\mu_n)/ \sqrt{{\text{\em Var}}(L_{n})} >x\bigr)}{1-\Phi(x) }=\frac {1-F_{L_{n}}(\lambda_nx)}{1-\Phi(\lambda_n x)} \, \,\frac{1-\Phi(\lambda_n x)}{1-\Phi(x)}.
\end{equation}
Set $B=A\sup_{n\in \mathbb{N}}\lambda_n$ and $b_n=\lambda_n a_n$, Since $\lambda_n \to 1$, the number $B$ exists and $b_n \to 0$. Hence, by Theorem~\ref{thm1}, the first ratio on the r.h.s. in ~\eqref{proof t217} tends to $1$ as $n \to \infty$, uniformly in $x$ such that $-B \leq \lambda_n x \leq b_n z_n$, where $z_n=n^{\varepsilon/(2(2+\varepsilon))}$, in particular, uniformly in the range $-A \leq x \leq a_n z_n$. Furthermore, we see that $|\lambda_n-1|^{1/2} a_nz_n \to 0$, which is due to the fact that $|\lambda_n-1|^{1/2}=O\bigl(n^{-\frac {\varepsilon}{2(2+\varepsilon)}}\bigr)$. Hence, by Lemma A1 from Vandemaele and Veraverbeke~\cite{vv82}, the second ratio on the r.h.s. in ~\eqref{proof t217} also tends to~$1$, uniformly in the range $-A \leq x \leq a_n z_n$. The theorem is proved. \qed
{\bf Acknowledgments.} \ The author is grateful to the~referee for his valuable remarks and suggestions that led to improvement of the article.
\betagin{thebibliography}{99}
\bibitem{al91}
{A.~Aleskeviciene}, {\it Large and moderate deviations for L-statistics}, Lithuanian Math.~J.~31~(1991), pp.~145--156.
\bibitem{bgz}
{V.~Bentkus, F.~G\"otze and W.\,R.~van Zwet}, {\it An
Edgeworth expansion for symmetric statistics}, Ann.\ Statist.,~25~(1997), pp.~851--896.
\bibitem{bz90}
{V.~Bentkus and R.~Zitikis}, {\it Probabilities of large deviations for L-statistics}, Lithuanian Math.~J.,~30~(1990), pp.~215--222.
\bibitem{bj77}
{S.~Bjerve}, {\it Error bound for linear combinations of order statistics}, Ann. Statist.,~5~(1977), pp.~357--369.
\bibitem{boi}
{H.~Boistard}, {\it Large deviations for L-statistics}, Statistics \& Decisions,~25~(2007), pp.~89--125.
\bibitem{cvv82}
{H.~Callaert, M.~Vandemaele and N.~Veraverbeke}, {\it A~Cram\'{e}r type large deviations theorem for trimmed linear combinations of order statistics}, Comm. Statist. Th. Meth.,~11~(1982), pp.~2689--2698.
\bibitem{david_2003}
{H.~David and H.\,N.~Nagaraja}, {\it Order Statistics, 3rd.~ed.}, Wiley, New York 2003.
\bibitem{friedrich}
{K.\,O.~Friedrich}, {\it A Berry--Esseen bound for
functions of independent random variables}, Ann.\ Statist.,~17~(1989), pp.~170--183.
\bibitem{gao_zhao}
{F.~Gao and X.~Zhao}, {\it Delta method in large deviations and moderate deviations for estimators}
Ann. Statist.,~39~(2011), pp.~1211-1240.
\bibitem{zit_2009}
{F.~Greselin, L.~Madan, M.\,L.~Puri and R.~Zitikis}, {\it L-functions, processes, and statistics in measuring
economic inequality and actuarial risks}, Stat.~Interface,~2~(2009), pp.~227-245.
\bibitem{gri}
{N.\,V.~Gribkova}, {\it On analogues of Berry–Esseen inequality for truncated linear combinations of order statistics}, Theory Probab. Appl.,~38~(1993), pp.~142--149.
\bibitem{gri:2016}
{N.~Gribkova}, {\it
Cram\'{e}r type moderate deviations for trimmed L-statistics}, Math. Methods Statist.,~25, no.~4~(2016), P.~313-322.
\bibitem{gri:2017b}
{N.~Gribkova}, {\it
Cram\'{e}r type moderate deviations for intermediate trimmed means}, Commun. Statist.- Th. Meth. N.Y., ~46, no.~23~(2017), pp.~11918-11932.
\bibitem{gh2006}
{N.~Gribkova and R.~Helmers}, {\it The empirical
Edgeworth expansion for a Studentized trimmed mean}, Math.~Methods~Statist.,~15~(2006), pp.~61--87.
\bibitem{gh2007}
{N.~Gribkova and R.~Helmers}, {\it On the Edgeworth
expansion and the $M$ out of $N$ bootstrap accuracy for a
Studentized trimmed mean}, Math.~Methods~Statist.,~16~(2007), pp.~142--176.
\bibitem{gh2011}
{N.~Gribkova and R.~Helmers}, {\it On a Bahadur--Kiefer representation of von Mises statistic type for intermediate sample quantiles}, Probab.~Math.~Statist.,~32~(2012), pp.~255--279.
\bibitem{gh2014}
{N.\,V.~Gribkova and R.~Helmers}, {\it Second order approximations for slightly trimmed means}, Theory Probab.~Appl.,~58~(2014), pp.~383-412.
\bibitem{helm80}
{R.~Helmers}, {\it Edgeworth expansions for linear combinations of order statistics with smooth weight functions}, Ann. Statist.,~8~(1980), pp.~1361--1374.
\bibitem{helm_e1}
{R.~Helmers}, {\it Edgeworth expansions for trimmed linear
combinations of order statistics}, In: Proc.~2-nd~Prague~Symp. in
Asymptotic Statist (P.Mandl and M.Hu\v{s}kov\'a, Eds.), pp.~221-232, North-Holland, Amsterdam 1979.
\bibitem{helm81}
{R.~Helmers}, {\it A Berry -- Esseen theorem for linear combinations of order statistics}, Ann. Probab.,~9~(1981), pp.~342--347.
\bibitem{helm_e2}
{R.~Helmers}, {\it Edgeworth expansions for linear combinations of order statistics},
Mathmatical Centre Tracts~105, CWI, Amsterdam 1982.
\bibitem{ho}
{W.~Hoeffding}, {\it Probabilities inequalities for sum of
bounded random variables}, J.~Amer.~Statist.~Assoc.,~58~(1963), pp.~13--30.
\bibitem{mas_shor_90}
{D.M.~Mason and G.K.~Shorack}, {\it Necessary and sufficient conditions for asymptotic normality of trimmed $L$-statistics}, J.~Statist.~Plan.~Inference,~25~(1990), pp.~111--139.
\bibitem{pz}
{H.~Putter~H. and W.\,R.~van Zwet}, {\it Empirical Edgeworth
expansions for symmetric statistics}, Ann.\ Statist.,~26~(1998), pp.~1540--1569.
\bibitem{serfling_90}
{R.\,J.~Serfling}, {\it Approximation theorems of mathematical statistics}, Wiley, New York 1980.
\bibitem{shor69}
{G.R.~Shorack}, {\it Asymptotic normality of linear combinations of functions of order statistics}, Ann. Math. Statist.,~40~(1969), pp.~2041--2050.
\bibitem{shor72}
{G.R.~Shorack}, {\it Functions of order statistics}, Ann. Math. Statist.,~43~(1972), pp.~412--427.
\bibitem{shorack}
{G.R.~Shorack and J.A.~Wellner}, {\it Empirical
processes with application in statistics}, Wiley, New York 1986.
\bibitem{s69}
{S.M.~Stigler}, {\it Linear functions of order statistics}, Ann. Math. Statist.,~40~(1969), pp.~770--788.
\bibitem{s74}
{S.M.~Stigler}, {\it Linear functions of order statistics with smooth weight functions}, Ann. Statist.,~2~(1974), pp.~676--693.
\bibitem{vv82}
{M.~Vandemaele and N.~Veraverbeke}, {\it Cram\'{e}r type large deviations for linear combinations of order statistics}, Ann. Probab.,~10~(1982), pp.~423--434.
\bibitem{vanderv}
{A.W.~van der Vaart}, {\it Asymptotic statistics}, Cambridge Series in Statistical and Probabilistic Mathematics,~3, Cambridge Univ.~Press, Cambridge 1998.
\bibitem{zwet}
{W.\,R.~van Zwet}, {\it A Berry--Esseen bound for symmetric
statistics}, Z. Wahrsch. Verw. Gebiete,~66~(1984), pp.~425--440.
\end{thebibliography}
\end{document}
|
\begin{document}
\newcommand{\into}[0]{\ensuremath{\hookrightarrow}}
\newcommand{\onto}[0]{\ensuremath{\twoheadrightarrow}}
\newcommand{\eps}[0]{\varepsilon}
\newcommand{\Pfin}[0]{\mathcal{P}_{\mathrm{fin}}}
\newcommand{\mdim}[0]{\ensuremath{\mathrm{mdim}}}
\newcommand{\mesh}[0]{\ensuremath{\mathrm{mesh}}}
\newcommand{\widim}[0]{\ensuremath{\mathrm{widim}}}
\newtheorem{satz}{Satz}[section]
\newaliascnt{corCT}{satz}
\newtheorem{cor}[corCT]{Corollary}
\aliascntresetthe{corCT}
\providecommand*{\corCTautorefname}{Corollary}
\newaliascnt{lemmaCT}{satz}
\newtheorem{lemma}[lemmaCT]{Lemma}
\aliascntresetthe{lemmaCT}
\providecommand*{\lemmaCTautorefname}{Lemma}
\newaliascnt{propCT}{satz}
\newtheorem{prop}[propCT]{Proposition}
\aliascntresetthe{propCT}
\providecommand*{\propCTautorefname}{Proposition}
\newaliascnt{theoremCT}{satz}
\newtheorem{theorem}[theoremCT]{Theorem}
\aliascntresetthe{theoremCT}
\providecommand*{\theoremCTautorefname}{Theorem}
\newtheorem*{theoreme}{Theorem}
\theoremstyle{definition}
\newaliascnt{conjectureCT}{satz}
\newtheorem{conjecture}[conjectureCT]{Conjecture}
\aliascntresetthe{conjectureCT}
\providecommand*{\conjectureCTautorefname}{Conjecture}
\newaliascnt{defiCT}{satz}
\newtheorem{defi}[defiCT]{Definition}
\aliascntresetthe{defiCT}
\providecommand*{\defiCTautorefname}{Definition}
\newaliascnt{remCT}{satz}
\newtheorem{rem}[remCT]{Remark}
\aliascntresetthe{remCT}
\providecommand*{\remCTautorefname}{Remark}
\newaliascnt{exampleCT}{satz}
\newtheorem{example}[exampleCT]{Example}
\aliascntresetthe{exampleCT}
\providecommand*{\exampleCTautorefname}{Example}
\begin{abstract}
For a countable amenable group $G$ and a fixed dimension $m\geq 1$, we investigate when it is possible to embed a $G$-space $X$ into the $m$-dimensional cubical shift $([0,1]^m)^G$.
We focus our attention on systems that arise as an extension of an almost finite $G$-action on a totally disconnected space $Y$, in the sense of Matui and Kerr.
We show that if such a $G$-space $X$ has mean dimension less than $m/2$, then $X$ embeds into the $(m+1)$-dimensional cubical shift.
If the distinguished factor $G$-space $Y$ is assumed to be a subshift of finite type, then this can be improved to an embedding into the $m$-dimensional cubical shift.
This result ought to be viewed as the generalization of a theorem by Gutman--Tsukamoto for $G=\mathbb Z$ to actions of all amenable groups, and represents the first result supporting the Lindenstrauss--Tsukamoto conjecture for actions of groups other than $G=\mathbb{Z}^k$.
\end{abstract}
\maketitle
\section*{Introduction}
It is a ubiquitous phenomenon in mathematics that if one deals with a category of objects with any kind of rich structure, there are often some natural distinguished examples that are \emph{large} enough to study conditions under which a general object embeds into the distinguished examples.
Depending on the precise context, the solution to such a problem can reveal an a priori surprising hierarchy present in the objects under consideration, or in the best case scenario give rise to new invariants that may have applications far beyond the original embedding problem.
In order to illustrate the historical importance of embedding problems, one need not look further than geometry and/or topology.
The Whitney embedding theorem, asserting that every $m$-dimensional Riemannian manifold embeds smoothly as a submanifold of $\mathbb R^{2m}$, was not only impactful for its statement, but introduced various concepts in its proof that remain fundamental in the area of differential geometry to this day.
The Menger--Nöbeling theorem, asserting that every compact metrizable space with covering dimension $m$ embeds continuously into the cube $[0,1]^{2m+1}$, not only generalizes this kind of phenomenon from the geometric context, but shows that covering dimension introduces a level of hierarchy among spaces having consequences beyond simply acting as a simple-minded obstruction to embeddings.
This short article aims to make progress on a similar embedding problem for topological dynamical systems, i.e., countably infinite discrete groups $G$ acting via homeomorphisms on compact metrizable spaces.
In this case the distinguished examples are given by so-called cubical shifts.
That is, given a natural number $m\geq 1$, we may consider $G$ acting on the space $([0,1]^m)^G$ by sending $g\in G$ to the homeomorphism $[(x_h)_{h\in G}\mapsto (x_{g^{-1}h})_{h\in G}]$; we refer to this as the \emph{$m$-dimensional cubical shift}.
A priori, it is not at all clear how to determine when a given action $\alpha: G\curvearrowright X$ embeds into such an example, not even how to determine that it does not.
Given that $X$ embeds into the Hilbert cube as a consequence of Urysohn--Tietze, say via $\iota: X\into [0,1]^{\mathbb N}$, it is a triviality to obtain the equivariant embedding into the analogous Hilbert cube shift $([0,1]^{\mathbb N})^G$ via $x\mapsto (\iota(\alpha_g(x)))_{g\in G}$.
Since $([0,1]^{\mathbb N})^G$ is actually homeomorphic to $([0,1]^{m})^G$, this begs the question how much of a hierarchy there really is between cubical shifts of different dimensions regarding the class of dynamical systems that embed into them.
The first really substantial embedding result was in the PhD thesis of Jaworski, who showed that every aperiodic homeomorphism on a finite-dimensional space (which we view as a free $\mathbb Z$-system) embeds equivariantly into $[0,1]^{\mathbb Z}$.
Later on this led to the question by Auslander, asking whether this holds for arbitrary aperiodic homeomorphisms on any space.
This problem remained open for over a decade before it was settled in the negative by the introduction of mean topological dimension, the ideas of which initially appeared in Gromov's work \cite{Gromov99} and were subsequently fleshed out by Lindenstrauss--Weiss \cite{LindenstraussWeiss00}.
Under the assumption that $G$ is amenable, every topological dynamical system $\alpha: G\curvearrowright X$ can be assigned its mean dimension $\mdim(X,\alpha)\in [0,\infty]$, which respects embeddings. (Although one should perhaps mention that mean dimension has since been extended to sofic groups \cite{Li13}, the methods in this paper reveal nothing new beyond the amenable case, hence we shall ignore sofic mean dimension here.)
In a nutshell, mean dimension is a dimensional analog of entropy and is designed to be useful for distinguishing systems of infinite topological entropy.
The conceptual difference between these notions can be summarized by the slogan that entropy measures the number of bits per second needed to describe points in a system, whereas mean dimension measures the number of real parameters per second.
From this intuitive perspective, it is not surprising that the mean dimension of every $m$-dimensional cubical shift is equal to $m$.
By the mere existence of free minimal actions with arbitrarily large mean dimension --- see \cite[§3]{LindenstraussWeiss00} and \cite{Krieger09} --- one gets plenty of examples that cannot embed into the $m$-dimensional cubical shift.
In a suprising twist at the time, Lindenstrauss in \cite{Lindenstrauss99} proved that (extensions of) minimal homeomorphisms with mean dimension less than $m/36$ do embed, however.
This has triggered the search for the optimal embedding result that can be seen as the dynamical generalization of the Menger--Nöbeling theorem.
Although the situation for completely general systems is rather subtle and unsolved even for $G=\mathbb Z$, there has been amazing progress for aperiodic or even minimal homeomorphisms.
Building on various substantial precursor results \cite{LindenstraussTsukamoto14, GutmanTsukamoto14, Gutman15, Gutman17}, the optimal embedding result was recently proved by Gutman--Tsukamoto \cite{GutmanTsukamoto20} for minimal homeomorphisms:\ Every minimal homeomorphism with mean dimension less than $m/2$ embeds into the $m$-dimensional cubical shift.
A generalization of this result for $\mathbb Z^k$-actions was successfully pursued in \cite{Gutman11, GutmanQiaoSzabo18, GutmanLindenstraussTsukamoto16, GutmanQiaoTsukamoto19}, the final approach of which involves extremely sophisticated tools from signal analysis to take advantage of the surrounding geometry for these groups.
As was noted in the introduction of \cite{GutmanTsukamoto20}, ``the generalization
to non-commutative groups seems to require substantially new ideas''.
Indeed there has been no progress on the embedding problem for dynamical systems over nonabelian groups to the best of the authors' knowledge, and this article aims to change that.
Our main result (\autoref{thm:embedding-result}+\autoref{cor:optimal-embedding}) asserts:
\begin{theoreme}
Suppose $G$ is a countable amenable group.
Let $\beta: G\curvearrowright Y$ be an almost finite action on a compact totally disconnected metrizable space.
Let $\alpha: G\curvearrowright X$ be an action on a compact metrizable space that arises as an extension of $\beta$.
Let $m \geq 1$ be a natural number and suppose that $\mdim(X, \alpha) < \frac{m}{2}$.
Then there exists an embedding of $G$-spaces $X\into ([0,1]^{m+1})^G$.
If $(Y,\beta)$ is assumed to be a subshift of finite type, then there exists an embedding of $G$-spaces $X\into ([0,1]^{m})^G$.
\end{theoreme}
In the context of the above theorem, we remark that the concept of almost finiteness for actions, introduced in \cite{Matui12, Kerr20} with a motivation towards $\mathrm{C}^*$-algebraic applications, is a kind of freeness property that is designed as a topological version of the Ornstein--Weiss lemma \cite{OrnsteinWeiss87} for free probability measure preserving actions.
Since it is by now known for a large class of groups that almost finiteness for $\beta$ follows if $\beta$ is assumed to be free (see \autoref{rem:almost-finite-actions}), our main result should be viewed as a generalization of Gutman--Tsukamoto's approach from \cite{GutmanTsukamoto14} to the setting of amenable groups.
This is indeed reflected not just in the similarity of the main result, but at the level of our proof.
More specifically, there are clear parallels between \autoref{lem:dense-eps-embeddings}, \autoref{thm:embedding-result} and \autoref{cor:optimal-embedding} on the one hand, and \cite[Proposition 3.1, Theorem 1.5, Corollary 1.8]{GutmanTsukamoto14} on the other hand.
In a nutshell, almost finiteness of $\beta$ in our proof acts as the correct substitute of the well-known clopen Rokhlin lemma for aperiodic homeomorphisms on the Cantor set.
We further point out that, to the best of our knowledge, this provides the first application of almost finiteness to prove a new result in topological dynamics that is entirely unrelated to questions about crossed product $\mathrm{C}^*$-algebras.
The problem whether the above result is true for all free actions $\alpha: G\curvearrowright X$, regardless of whether it admits well-behaved factor systems, remains open.
In light of the technical difficulties already present in the state-of-the-art for $\mathbb Z^k$, however, we expect this challenge to be rather difficult to tackle without ideas that go substantially beyond our present work.
\section{Preliminaries}
We start with some basic remarks on notation and terminology.
Throughout the article we fix a countable amenable group $G$.
We write $F\Subset G$ to mean that $F$ is a finite subset of $G$.
Given $K\Subset G$ and a constant $\delta>0$, we say that a non-empty set $F\Subset G$ is \emph{$(K,\delta)$-invariant}, if $|KF\setminus F|\leq\delta|F|$.
We will freely use the well-known characterization of amenability via the F{\o}lner criterion, i.e., $G$ is amenable precisely when every pair $(K,\delta)$ admits some $(K,\delta)$-invariant finite subset in $G$.
If $G$ is countable, we call a sequence $(F_n)_{n\in\mathbb N}$ with $F_n\Subset G$ a \emph{F{\o}lner sequence}, if for every pair $(K,\delta)$, there is some $n_0\in\mathbb N$ such that $F_n$ is $(K,\delta)$-invariant for all $n\geq n_0$.
The letters $X$ and $Y$ will always be reserved to denote compact metrizable spaces.
Under a \emph{topological dynamical system} (over $G$) or {\emph{$G$-space}} we understand a pair $(X,\alpha)$, where $X$ is a compact metrizable space and $\alpha: G\curvearrowright X$ is an action by homeomorphisms.
In some cases when there is no ambiguity on what action is considered on $X$, we sometimes just talk of the $G$-space $X$ to lighten notation.
An action $\alpha$ is called \emph{free} if for every point $x\in X$, its orbit map $[g\mapsto\alpha_g(x)]$ is injective.
Given another action $\beta: G\curvearrowright Y$, we say that a continuous map $\phi: X\to Y$ is \emph{equivariant (w.r.t.\ $\alpha$ and $\beta$)}, if $\phi\circ\alpha_g=\beta_g\circ\phi$ for all $g\in G$, in which case we indicate this by writing $\phi: (X,\alpha)\to (Y,\beta)$.
Using the alternate arrow $\onto$ means that the map is surjective, whereas using $\into$ means that the map is injective, in which case we also speak of an embedding.
If we are given an equivariant surjective map $\pi: (X,\alpha)\onto (Y,\beta)$, then one calls $(Y,\beta)$ a \emph{factor} of $(X,\alpha)$ and refers to $\pi$ as the \emph{factor map}.
On the flip side, one says that $(X,\alpha)$ is an \emph{extension} of $(Y,\beta)$.
Of particular importance for this work is the example given by cubical shifts over a group $G$.
That is, given a natural number $m\geq 1$, the \emph{$m$-dimensional cubical shift} is the action $\sigma: G\curvearrowright ([0,1]^m)^G$ given by $\sigma_g\big( (x_h)_{h\in G} \big)=(x_{g^{-1}h})_{h\in G}$.
Let us now introduce the concepts underpinning this article, as well as some known results from the literature.
\subsection{Almost finiteness}
\begin{defi}
Let $\alpha: G\curvearrowright X$ be an action.
\begin{enumerate}[leftmargin=*,label=$\bullet$]
\item A {\it tower} is a pair $(V,S)$ consisting of a subset $V$ of $X$ and a finite subset $S$ of $G$ such that the sets $\alpha_s(V)$ for $s\in S$ are pairwise disjoint.
\item Given such a tower, the set $V$ is the {\it base} of the tower, the set $S$ is the {\it shape} of the tower, and the sets $\alpha_s(V)$ for $s\in S$ are the {\it levels} of the tower.
\item The tower $(V,S)$ is {\it open} if $V$ is open.
It is called {\it clopen} if $V$ is clopen.
\item A {\it castle} is a finite collection of towers $\{ (V_i , S_i) \}_{i\in I}$
such that for all $i,j\in I$ and $s\in S_i$, $t\in S_j$, we have that $\alpha_{s}(V_i)\cap\alpha_{t}(V_j)=\emptyset$ if $i\neq j$ or $s\neq t$.
\item The castle is {\it open} if each of the towers is open, and {\it clopen} if each of the towers is clopen.
\end{enumerate}
\end{defi}
The following definition originates in \cite[Definition 6.2]{Matui12} for principal ample groupoids, which was then adapted in \cite[Definition 8.2]{Kerr20} for actions of amenable groups on arbitrary spaces.
Although not trivially identical to the general version, the definition below is known to be an equivalent one in our setting due to \cite[Theorem 10.2]{Kerr20}.
\begin{defi}
Let $\beta: G\curvearrowright Y$ be an action on a totally disconnected space.
We say that $\beta$ is \emph{almost finite}, if for every $K\Subset G$ and $\delta>0$, there exists a clopen castle $\{ (W_i , S_i) \}_{i\in I}$ such that $Y=\bigsqcup_{i\in I} \bigsqcup_{s\in S_i} \beta_s(W_i)$ and for every $i\in I$, the shape $S_i$ is $(K,\delta)$-invariant.
\end{defi}
\begin{rem} \label{rem:almost-finite-actions}
One of the possible ways to view almost finiteness is as a strong topological variant of the Ornstein--Weiss tower lemma \cite[Theorem 5]{OrnsteinWeiss87} that characterizes freeness of probability measure preserving actions in ergodic theory, which was recently strengthened in \cite{CJKMSTD18}.
Conjecturally, every free action $\beta: G\curvearrowright Y$ on a totally disconnected space is almost finite.\footnote{We note that the converse is not true for all groups $G$. In general one can only conclude from almost finiteness that the action is \emph{essentially free}, i.e., sets of the form $\{y\in Y\mid \beta_g(y)=y\}$ vanish under all $\beta$-invariant Borel probability measures. Examples of almost finite but non-free actions are found among generalized Odometers; see \cite{OrtegaScarparo20}.}
This is not so hard to see for $G=\mathbb Z$, as almost finiteness just boils down to the well-known clopen Rokhlin tower lemma for aperiodic homeomorphisms; see for example \cite[Proposition 3]{BezuglyiDooleyMedynets05}.
Although the general case is still open, the following partial results are by now known:
\begin{enumerate}[leftmargin=*,label=$\bullet$]
\item For any amenable group $G$, almost finite actions on the Cantor set are generic among all free minimal $G$-actions; see \cite[Theorem 4.2]{CJKMSTD18}.
\item The conjecture holds when $G$ has local subexponential growth, i.e., given any $F\Subset G$, one has $\lim_{n\to\infty}\frac{|F^{n+1}|}{|F^n|}=1$.
This was shown in \cite{KerrSzabo18} as a consequence of \cite{DownarowiczZhang17}.
\item Let $H\leq G$ be a normal subgroup so that the above conjecture holds for $H$-actions.
If $G/H$ is finite or cyclic, then the conjecture holds for all $G$-actions; see \cite{KerrNaryshkin21}.
In particular, the conjecture is verified for all elementary amenable groups.
\end{enumerate}
\end{rem}
\subsection{Mean dimension}
\begin{defi} \label{def:open-covers}
Given a finite open cover $\mathcal U$ of a topological space $X$, we define its \emph{order} as the minimal number $n\geq 0$ such that every point $x\in X$ is an element of at most $n+1$ members of $\mathcal U$.
If $X$ is equipped with a metric $d$, then $\mesh_d(\mathcal U)$ is defined as the maximal diameter of a member of $\mathcal U$.
Given a constant $\eps>0$, one defines
\[
\widim_\eps(X,d) = \min\{ \operatorname{ord}(\mathcal U) \mid \mathcal U \text{ is an open cover with } \mesh_d(\mathcal U)\leq\eps\}.
\]
\end{defi}
Before we can define mean dimension, we recall the following technical result, which is a non-trivial consequence of the Ornstein--Weiss quasitiling machinery.
\begin{theorem}[see {\cite[Theorem 6.1]{LindenstraussWeiss00}}] \label{thm:subadditive-convergence}
Let $G$ be a countable amenable group.
Denote by $\Pfin(G)$ the set of all non-empty finite subsets of $G$.
Suppose we are given a function $\phi: \Pfin(G)\to [0,\infty)$ satisfying the following conditions:
\begin{enumerate}[leftmargin=*,label=$\bullet$]
\item $\phi(F_1) \leq \phi(F_2)$ whenever $F_1 \subseteq F_2$;
\item $\phi(Fg) = \phi(F)$ for all $F \Subset G$ and $g \in G$;
\item $\phi(F_1 \cup F_2) \leq \phi(F_1) + \phi(F_2)$ for all $F_1, F_2\Subset G$.
\end{enumerate}
Then there exists $b\geq 0$ such that for every $\eps > 0$ there exists $K\Subset G$ and $\delta > 0$ such that $\big| b - \frac{\phi(F)}{|F|} \big| \leq \eps$ for every $(K, \delta)$-invariant set $F\Subset G$.
\end{theorem}
\begin{prop}[see {\cite[Proposition 10.4.1]{Coornaert}}]
Let $G$ be a countable amenable group and $\alpha: G\curvearrowright X$ a topological dynamical system.
For a compatible metric $d$ on $X$ and $F\Subset G$, define the metric $d^\alpha_F$ via
\[
d^\alpha_F(x,y)=\max_{g\in F} \ d(\alpha_g(x),\alpha_g(y)).
\]
Let $\eps>0$ be a constant.
Then the map
\[
\Pfin(G)\ \ni \ F\mapsto \widim_\eps(X,d^\alpha_F)
\]
has the properties as required by \autoref{thm:subadditive-convergence}.
Consequently, if $F_n\Subset G$ is a F{\o}lner sequence, then the limit
\[
\mdim_\eps(X,\alpha,d)=\lim_{n\to\infty} |F_n|^{-1}\widim_\eps(X,d^\alpha_{F_n}) \ \in \ [0,\infty]
\]
exists and is independent of the choice of $(F_n)_n$.
\end{prop}
\begin{defi} \label{def:mdim}
Let $G$ be a countable amenable group and $\alpha: G\curvearrowright X$ a topological dynamical system.
The \emph{mean dimension} of $(X, \alpha)$ is defined as
\[
\mdim(X,\alpha) = \sup_{\eps>0} \ \mdim_\eps(X,\alpha,d) \ \in \ [0,\infty],
\]
where $d$ is some compatible metric on $X$.\footnote{This definition contains the implicit claim that this supremum does not depend on the chosen metric. This is not completely trivial, but it is well-known; see \cite[Theorem 10.4.2]{Coornaert}.}
In the cases where the choice of the action $\alpha$ is implicitly clear from context, we just write $\mdim(X)$.
\end{defi}
\begin{example}[see {\cite[Proposition 3.3]{LindenstraussWeiss00}}]
For every natural number $m\geq 1$, we can consider the $m$-dimensional cubical shift $\sigma: G\curvearrowright ([0,1]^m)^G$ as defined before.
Then $\mdim(([0,1]^m)^G)=m$.
\end{example}
\begin{rem}
It is an easy consequence of its definition that mean dimension respects inclusions.
That is, given an equivariant inclusion $X_1\into X_2$ of $G$-spaces, one has the inequality $\mdim(X_1)\leq\mdim(X_2)$.
In light of the above, it follows immediately that mean dimension provides an obstruction to the embeddibility of a $G$-space $X$ into the $m$-dimensional cubical shift.
\end{rem}
\section{The embedding result}
\begin{defi}
Let $(X,d)$ be a compact metric space and $\eps>0$ a constant.
A continuous map $f: X\to Z$ into another topological space is called an \emph{$\eps$-embedding}, if $\operatorname{diam}(f^{-1}(z))<\eps$ for all $z\in Z$.
\end{defi}
The following lemma by Gutman-Tsukamoto plays the same role in our proof of the main result as it did in the proof of theirs.
\begin{lemma}[{\cite[Lemma 2.1]{GutmanTsukamoto14}}] \label{lem:widim}
Let $(X,d)$ be a compact metric space, $m\geq 1$ a natural number and $f_0 :X\to [0,1]^m$ a continuous map.
Suppose that the numbers $\delta,\eps>0$ satisfy the implication
\[
d(x,y)<\eps\quad\implies\quad\|f_0(x)-f_0(y)\|_\infty <\delta.
\]
If $\widim_\eps(X,d) < m/2$, then there exists an $\eps$-embedding $f: X\to [0,1]^m$ satisfying
\[
\|f-f_0\|_\infty:=\max_{x\in X} \|f(x)-f_0(x)\|_\infty <\delta.
\]
\end{lemma}
\begin{defi}
Let $(X,\alpha)$ be a topological dynamical system, $m\geq 1$ a natural number and $f: X\to [0,1]^m$ a continuous map.
We then define a continuous equivariant map $I_f: X\to ([0,1]^m)^G$ via $I_f(x)=(f(\alpha_g(x)))_{g\in G}$.
\end{defi}
\begin{lemma} \label{lem:dense-eps-embeddings}
Let $\beta: G\curvearrowright Y$ be an almost finite action on a compact totally disconnected space.
Let $\alpha: G\curvearrowright X$ be an action on a compact metrizable space that arises as an extension of $\beta$ via the factor map $\pi: (X,\alpha)\onto (Y,\beta)$.
Let $m \geq 1$ be a natural number and suppose that $\mdim(X, \alpha) < \frac{m}{2}$.
Choose a compatible metric $d$ on $X$.
Then for any $\eta > 0$ the set of functions
\[
A_{\eta} = \{ f \in \mathcal C(X,[0,1]^m) \mid I_f \times \pi \text{ is an $\eta$-embedding} \}
\]
is dense in $\mathcal C(X,[0,1]^m)$ with respect to $\|\cdot\|_\infty$.
\end{lemma}
\begin{proof}
Let $f_0: X\to [0,1]^m$ be a continuous map and let $\eta, \delta>0$.
We shall argue that there exists $f\in A_\eta$ with $\|f-f_0\|_\infty<\delta$.
Since $f_0$ is uniformly continuous, we can find some $0<\eps\leq\eta$ that fits into the implication
\[
d(x,y)<\eps \quad\implies\quad\|f_0(x)-f_0(y)\|_\infty <\delta.
\]
By assumption, we have $\mdim_\eps(X,\alpha,d)\leq\mdim(X,\alpha)<m/2$.
Since $\mdim_\eps(X,\alpha,d)$ arises as a limit in the sense of \autoref{thm:subadditive-convergence}, we can find a constant $\gamma>0$ and $K\Subset G$ such that for every $(K,\gamma)$-invariant set $S\Subset G$, we have $\widim_\eps(X,d^\alpha_S)<|S|m/2$.
Since we assumed $\beta$ to be almost finite, we may find a clopen castle $\{(W_i,S_i)\}_{i\in I}$ with $(K,\gamma)$-invariant shapes and $Y=\bigsqcup_{i\in I}\bigsqcup_{s\in S_i} \beta_s(W_i)$.
By defining the pullbacks $Z_i=\pi^{-1}(W_i)$ for $i\in I$, we obtain the clopen castle $\{(Z_i,S_i)\}_{i\in I}$ partitioning $X$.
Given $i\in I$, we have in particular that $\widim_\eps(X,d^\alpha_{S_i})\leq |S_i|m/2$.
Consider the continuous map $F^0_i: X\to [0,1]^{|S_i|m}\cong ([0,1]^m)^{S_i}$ given by $F_i^0(x)=(f_0(\alpha_s(x)))_{s\in S_i}$.
Note that by design, we have the implication
\[
d^\alpha_{S_i}(x,y)<\eps \quad\implies\quad \|F^0_i(x)-F^0_i(y)\|_\infty <\delta.
\]
Using \autoref{lem:widim}, we may choose a continuous $\eps$-embedding $F_i: X\to ([0,1]^m)^{S_i}$ with respect to the metric $d^{\alpha}_{S_i}$ such that $\|F_i-F_i^0\|_\infty<\delta$.
We now define the continuous function $f: X\to [0,1]^m$ as follows.
If $x\in X$ is a point, choose the unique index $i\in I$ and $s\in S_i$ with $x\in\alpha_s(Z_i)$, and set $f(x)=F_i(\alpha_s^{-1}(x))(s)$.
Since this assignment is clearly continuous on each clopen set belonging to a partition of $X$, $f$ is indeed a well-defined continuous map.
We claim that $\|f-f_0\|_\infty<\delta$ and $f\in A_\eta$.
The first of these properties holds because given $x\in X$ as above, we see that
\[
f(x)=F_i(\alpha_s^{-1}(x))(s) \approx_\delta F_i^0(\alpha_s^{-1}(x))(s)=f_0(\alpha_s(\alpha_s^{-1}(x)))=f_0(x).
\]
So let us argue $f\in A_\eta$.
Suppose $x,y\in X$ are two points such that $(I_f\times\pi)(x)=(I_f\times\pi)(y)$.
Then certainly $\pi(x)=\pi(y)$.
Since the clopen partition $X=\bigsqcup_{i\in I}\bigsqcup_{s\in S_i} \alpha_s(Z_i)$ is the pullback from a clopen partition of $Y$, we see that there is a unique index $i\in I$ and $s\in S_i$ with $x,y\in\alpha_s(Z_i)$.
Since we have $I_f(x)=I_f(y)$, or in other words $f(\alpha_g(x))=f(\alpha_g(y))$ for all $g\in G$, it follows for all $t\in S_i$ that $\alpha_{ts^{-1}}(x), \alpha_{ts^{-1}}(y)\in \alpha_t(Z_i)$, so
\[
F_i(\alpha^{-1}_s(x))(t)= f(\alpha_{ts^{-1}}(x))=f(\alpha_{ts^{-1}}(y))=F_i(\alpha^{-1}_s(x))(t).
\]
Since $t\in S_i$ is arbitrary, it follows that $F_i(\alpha_s^{-1}(x))=F_i(\alpha_s^{-1}(y))$.
Since $F_i$ was an $\eps$-embedding with respect to the metric $d^\alpha_{S_i}$ and $s\in S_i$, we may finally conclude $d(x,y)<\eps\leq\eta$.
This finishes the proof.
\end{proof}
\begin{theorem} \label{thm:embedding-result}
Let $\beta: G\curvearrowright Y$ be an almost finite action on a compact totally disconnected metrizable space.
Let $\alpha: G\curvearrowright X$ be an action on a compact metrizable space that arises as an extension of $\beta$ via the factor map $\pi: (X,\alpha)\onto (Y,\beta)$.
Let $m \geq 1$ be a natural number and suppose that $\mdim(X, \alpha) < \frac{m}{2}$.
Then the set of functions $f\in\mathcal C(X,[0,1]^m)$ for which
\[
I_f\times\pi: (X,\alpha)\to \big( ([0,1]^m)^G\times Y, \sigma\times\beta \big)
\]
is an embedding, is dense with respect to $\|\cdot\|_\infty$.
Consequently, there exists an embedding of $G$-spaces $X\into ([0,1]^{m+1})^G$.
\end{theorem}
\begin{proof}
Let us first explain the last sentence of the claim.
Since $Y$ is totally disconnected, it can be embedded into $[0,1]$, say via a continuous map $\psi$.
This implies that $\bar{\psi}: Y\to [0,1]^G$ given by $y\mapsto(\psi(\beta_g(y)))_{g\in G}$ is an equivariant embedding.
So assuming the rest of the claim holds, we obtain a chain of embeddings of $G$-spaces
\[
X\stackrel{I_f\times\pi}{\longrightarrow} ([0,1]^m)^G\times Y \stackrel{\operatorname{id}\times\bar{\psi}}{\longrightarrow} ([0,1]^{m})^G\times ([0,1])^G \cong ([0,1]^{m+1})^G.
\]
If we adopt the notation from \autoref{lem:dense-eps-embeddings}, it is clear that the set of functions in question is equal to the intersection $\bigcap_{n\geq 1} A_{1/n}$.
In light of the fact that $\mathbb C(X,[0,1]^m)$ a closed subset of the Banach space $\mathcal C(X,\mathbb R^m)$ with respect to $\|\cdot\|_\infty$, the claim follows immediately from the Baire category theorem if we show that the sets $A_\eta$ are open for all $\eta>0$.
So let us briefly argue that this is the case.
Recall that we have chosen a compatible metric $d$ on $X$.
Let $f\in A_\eta$.
Given an infinite tuple $(c_g)_{g\in G}$ of strictly positive numbers with $\sum_{g\in G} c_g=1$, we define the constant $\delta$ via
\[
2\delta = \inf\Big\{ \sum_{g\in G} c_g\|f(\alpha_g(x))-f(\alpha_g(y)) \|_\infty \Big| x,y\in X, \pi(x)=\pi(y), d(x,y)\geq\eta \Big\}.
\]
Keep in mind that the assignment
\[
\big( (z^{(1)}_g)_{g\in G}, (z^{(2)}_g)_{g\in G} \big) \mapsto\sum_{g\in G} c_g\|z^{(1)}_g-z^{(2)}_g\|_\infty
\]
defines a compatible metric on $([0,1]^m)^G$.
Since $I_f$ is continuous, $I_f\times\pi$ is an $\eta$-embedding and $X$ is compact, it follows that $\delta>0$.
We claim that the open $\delta$-ball around $f$ is contained in $A_\eta$.
Indeed, let $f_0\in\mathcal C(X,[0,1]^m)$ with $\|f-f_0\|<\delta$.
Suppose that $x,y\in X$ satisfy $(I_{f_0}\times\pi)(x)=(I_{f_0}\times\pi)(y)$.
Then $\pi(x)=\pi(y)$ and it follows from the triangle inequality that
\[
\sum_{g\in G} c_g\|f(\alpha_g(x))-f(\alpha_g(y))\|_\infty < \sum_{g\in G} c_g(2\delta+\|f_0(\alpha_g(x))-f_0(\alpha_g(y))\|_\infty) = 2\delta.
\]
By the definition of $\delta$, it follows that $d(x,y)<\eta$.
Since $x$ and $y$ were arbitrary, we conclude $f_0\in A_\eta$ and the proof is finished.
\end{proof}
We also record an improved version of the embedding result, which is an immediate consequence of the above if we assume more about the system $(Y,\beta)$.
\begin{cor} \label{cor:optimal-embedding}
Let $\beta: G\curvearrowright Y$ be an almost finite action on a compact totally disconnected metrizable space.
Suppose that $\beta$ is a subshift of finite type, i.e., there exists some natural number $\ell\geq 2$ and an embedding $Y\into\{1,\dots,\ell\}^G$ of $G$-spaces.
Let $\alpha: G\curvearrowright X$ be an action on a compact metrizable space that arises as an extension of $\beta$.
Let $m \geq 1$ be a natural number and suppose that $\mdim(X, \alpha) < \frac{m}{2}$.
Then there exists an embedding of $G$-spaces $X\into ([0,1]^{m})^G$.
\end{cor}
\begin{proof}
Find some embedding $\varphi: [0,1]\times\{1,\dots,\ell\}\into [0,1]$, which gives rise to an equivariant embedding
\[
\bar{\varphi}: [0,1]^G\times\{1,\dots,\ell\}^G \cong \big([0,1]\times\{1,\dots,\ell\}\big)^G \into [0,1]^G
\]
by applying $\varphi$ componentwise.
This allows us to proceed exactly as in the last part of \autoref{thm:embedding-result}, except that we may appeal to the embedding
\[
\begin{array}{ccl}
([0,1]^m)^G\times Y &\into& ([0,1]^m)^G\times\{1,\dots,\ell\}^G \\
&\cong& ([0,1]^{m-1})^G\times \big([0,1]\times\{1,\dots,\ell\}\big)^G \\
&\stackrel{\operatorname{id}\times\bar{\varphi}}{\longrightarrow}& ([0,1]^{m-1})^G\times ([0,1])^G\ \cong \ ([0,1]^{m})^G.
\end{array}
\]
\end{proof}
\begin{rem}
In light of the fact that almost finiteness is a concept that can be defined for actions on arbitrary spaces, one might wonder how far the main result of this note can be generalized.
Suppose $\gamma: G\curvearrowright Z$ is an almost finite action on a not necessarily disconnected space.
It is then well-known that $\gamma$ has the small boundary property and therefore also $\mdim(Z,\gamma)=0$; see \cite[Theorem 5.6]{KerrSzabo18} and \cite[Theorem 5.4]{LindenstraussWeiss00}.\footnote{Note that a priori, this reference in \cite{KerrSzabo18} assumes freeness of the action. However, the statement involves an ``if and only if'' statement where we only need the ``if'' part, which does not need freeness of the involved action in any way.}
Can one prove directly that $(Z,\gamma)$ embeds into the 1-dimensional cubical shift?
If so, is the statement of \autoref{thm:embedding-result} true if we replace $\beta: G\curvearrowright Y$ by $\gamma: G\curvearrowright Z$?
Although this would seem plausible, the proof does by no means generalize in any obvious way to this more general case.
The first named author has proved a partial result in this direction in his master thesis \cite{Lanckriet21}, namely under the assumption that $Z$ has finite covering dimension $d$.
In that case, a version of \autoref{thm:embedding-result} is true, where the conclusion is weakened to obtain an embedding into the $(m(d+2)+1)$-dimensional cubical shift.
Since this dimensional upper bound is far from what we expect to be optimal, and since it does not actually recover \autoref{thm:embedding-result} as a special case, we decided not to include this generalized approach in this note.
\end{rem}
\textbf{Acknowledgements.}
The second named author has been supported by research project C14/19/088 funded by the research council of KU Leuven, and the project G085020N funded by the Research Foundation Flanders (FWO).
\end{document}
|
\begin{document}
\title{Disentangling the structure of ecological bipartite networks from observation processes}
\section*{Abstract}
The structure of a bipartite interaction network can
be described by providing a clustering for each of the two types of nodes.
Such clusterings are outputted by fitting a Latent Block Model (LBM) on an observed network
that comes from a sampling of species interactions in the field.
However, the sampling is limited and possibly uneven. This
may jeopardize the fit of the LBM and then the description of the
structure of the network by detecting structures which result from the sampling and not from
actual underlying ecological phenomena.
If the observed interaction network consists of a weighted bipartite network where the
number of observed interactions between two species is available, the sampling efforts for
all species can be estimated and used to correct the LBM fit. We propose
to combine an observation model that accounts for sampling and an LBM for
describing the structure of underlying possible ecological interactions.
We develop an original inference procedure for this model, the efficiency of
which is demonstrated in simulation studies.
The practical interest in ecology of our model is highlighted
on a large dataset of plant-pollinator network.
\textbf{Keywords:} Latent Block Model, Sampling Effect, Stochastic Expectation Maximization, Nestedness, Modularity
\section{Introduction}
\paragraph{Networks in ecology}
Analysing the structure of ecological networks has proven very enlightening to understand the functioning and response to perturbation of ecological communities, leading to a strong growth in the number of publications in the field \citep{ings_review_2009}.
Networks have been widely used to study food webs, before being used for other types of ecological interactions requiring the study of bipartite networks \citep{ings_review_2009}, including mutualistic relationships such as between plants and pollinators \citep{Lara-Romero-2019},\citep{kaiser-bunbury_robustness_2010}, and plants and ants \citep{bluthgen2004bottom}, or antagonist relationships such as between hosts and parasites \citep{hadfield_tale_2014}.
Several metrics are used to study the structure of bipartite ecological networks as a whole, such as nestedness \citep{Lara-Romero-2019,de_manincor_how_2020,terry_finding_2020,fortuna_nestedness_2010} or modularity \citep{de_manincor_how_2020,terry_finding_2020,fortuna_nestedness_2010,guimera_missing_2009}.
Another possibility to deal with a bipartite network is to use a latent block model (LBM) \citep{govaert2010latent}, as done in some studies on ecological networks \citep{terry_finding_2020,sander_what_2015,leger_clustering_2015,Kefi}. Whenever an LBM is fitted on a bipartite network, it produces two clusterings, one for each set of species. These clusterings can be seen as groups of interacting species. In this model, the probability that two species are in interaction depends on which groups the species belong to. Multiple methods exist to estimate the parameters of an LBM \citep{brault_estimation_2014,kuhn_properties_2020}.
\paragraph{Sampling issues}
In ecology, sampling a plant pollinator network is labor-intensive and many datasets only reveal a subset of the existing interactions \citep{chacoff_evaluating_2012,jordano_sampling_2016}. The network can be sampled with different methods, such as timed observations \citep{Lara-Romero-2019,kaiser-bunbury_robustness_2010}, transects \citep{de_manincor_how_2020,magrach_plantpollinator_nodate} or study of pollen \citep{de_manincor_how_2020}. These methods have an asymmetrical focus on either insects or plants, and therefore only allow an asymmetrical sampling effort control that could bias the observed network. \cite{de_manincor_how_2020} have shown that different types of sampling on the same network can lead to different structures. Differences in networks could also be caused by the observed abundance of the different species or by the duration of the study \citep{ings_review_2009}.
Completeness of sampling for a species is defined as the proportion of its observed interactions over the total number of its actual interactions.
Some propositions to evaluate this completeness rely on either accumulation curves, which model the rate of new interaction observations over time \citep{rivera2012effects},
or through external data assessing the abundance of the different interacting species \citep{bluthgen2004bottom}.
The sampling process can induce enormous biases in the statistical analyses of the networks that have not been taken into account in most analyses of ecological network structure. This raises doubts regarding the current understanding of the structure of pollination networks \citep{bluthgen2008interaction}.
In particular, recent studies debate whether the observed nestedness in interaction networks simply results from sampling effects \citep{staniczenko2013ghost,krishna_neutral-niche_2008}.
Moreover, the impact of sampling completeness on many metrics such as modularity or nestedness has been highlighted. \citep{rivera2012effects}.
We demonstrate in the following motivating example how a credible scenario of plant-pollinator interaction sampling effects leads to discovering an artificial structure in the data.
\paragraph{Motivating example} This example is illustrated in \cref{fig0}. All the following elements are properly defined later in the paper. Let us consider a binary bipartite interaction network that can be represented by its incidence matrix (black dots for interaction and white dots otherwise). A simple probabilistic model to generate such a network is based on the Erdős–Rényi model \citep{erdosrenyi1959}
where the probability of connection between two species is constant and the connections are drawn independently. The Erdős–Rényi model is considered as unstructured and is equivalent to the LBM with a unique block per set of species.
If the interactions are sub-sampled uniformly, the inference of an LBM on this network data leads to finding a unique block with a lower estimated probability of connection. However, if the sub-sampling is not uniform, a structure may be detected. In the simulation setting in \cref{fig0}, the intensity of the sampling depends on each species, for example due to differences in species abundances or unequal sampling effort among species. Even if most of the interactions have been sampled ($\sim 70\%$), a structure is found with two blocks when inferring an LBM (fig. 1C). This structure which is only caused by the sampling effort distribution could be misinterpreted as a true ecological phenomenon. Most studies using LBM do not take into account the potential missing interactions in the inference of the structure.
\begin{figure}
\caption{Example of a situation where an unstructured matrix which has been sub-sampled in a non-uniform way leads to a seemingly nested LBM. \textbf{A)}
\label{fig0}
\end{figure}
\paragraph{Our contribution}
Both existing literature and the motivating example highlight the need for taking the observation process into account.
We focus on cases where the data at hand consists of a weighted network where the counts of interaction between pairs of species are reported. No external data such as species abundance or reports of time of observation are available.
This kind of situation is rather common among the available datasets.
Our underlying assumption is that the observed weighted network is a result of two phenomena: an ecological phenomenon that could be considered as a binary network providing the possible or impossible interactions and an observation phenomenon which is related to the relative abundances of species and their involvement in pollination interactions
at the sampling time, and also related to the sampling protocol.
The latter phenomenon results in smaller or larger counts and possibly to zero counts for actual but not observed interactions.
Our goal is then to disentangle the two phenomena to recover in particular the structure of the binary network which accounts for the
possible ecological interactions.
In order to achieve this goal, we developed a new latent variable probabilistic model called the Corrected Observation Process for Latent Block Model (CoOP-LBM). This model assumes that the observed data is the product of a binary LBM which represents the interactions that are possible or not, with a sampling scheme following Poisson distributions. In this setting, non observed interactions (a $0$ in the incidence matrix) can have two sources: they can come from the binary network (impossible interaction) or they can come from the sampling scheme (missed interaction), but we cannot distinguish impossible interactions from missed interactions.
We prove the identifiability of the model and we propose an algorithm to estimate the parameters of this model based on a Stochastic version of the Expectation-Maximisation (SEM) algorithm \citep{celeux1992stochastic}. Our SEM alternates between maximisation steps, where parameters are updated, and two simulation steps, during which the location of missing interactions and the block belongings are simulated. An integrated complete likelihood criterion \citep{biernacki2000} is proposed to select the number of groups for the clustering. A simulation study is performed where we show the performance of our method to recover the clusters and to detect missing interactions.
We also show how the imputation of missing interactions can correct the estimation of some metrics commonly used in ecology.
We applied our method on a large dataset \citep{dore_relative_2021}
that contains 70 networks with counting data. We show the goodness of fit of our model to the data and show that the detected structure is rather different when using a CoOP-LBM compared to an LBM on binarised networks.
\paragraph{Related work}
Several studies have started to include missing data within SBM, but often in cases where the missing interactions are properly labelled.
Estimation of the unipartite stochastic block model (SBM) with missing data has been studied for the binary case by \cite{tabouy_variational_2019}, adapting a variational expectation maximization algorithm, and for the weighted case by \cite{Aicher_2014} using a Bayesian approach. Contrary to our study, both have access to where the data is missing.
A few recent studies in ecology also aimed to estimate missing interactions. \cite{terry_finding_2020} provide and compare different methods of estimation and imputation of missing interactions. To estimate the number of missing interactions, \cite{terry_finding_2020} and \cite{macgregor_estimating_2017} use the Chao estimator \citep{chao_coverage-based_2012}, which will be compared to our method. Our model differs from the degree-corrected LBM \citep{zhao_variational_2022}, which do not take into account the possibility of missing an interaction.
\cite{tabouy_variational_2019} also demonstrated how the inference of the Stochastic Block Model is jeopardized by the observation process.
\paragraph{Outline of the paper}
We give the mathematical definition of the model and prove its identifiability in \cref{Definition}. Then, \cref{inference} details the inference algorithm and the model selection procedure. The simulation study is presented in \cref{simulation}. All the simulations are reproducible and available on \url{https://anakokemre.github.io/CoOP-LBM/index.html}.
Eventually, the application to 70 plant-pollinator networks is done and discussed in \cref{secappdata}.
\section{Definition of the model}\label{Definition}
The available data are given as a matrix $R = (R_{i,j})_{ i = 1, \dots, n_1, j = 1, \dots, n_2)}$ where the $n_1$
rows correspond to a type of species (plants e.g.) and the $n_2$ columns correspond to the other type of species in interaction (pollinators e.g.). For all $i = 1, \dots, n_1$ and $j = 1, \dots, n_2$, the elements $R_{ij}\in\mathbb{N}$ since they denote the
counts of observed interactions between species $i$ and $j$.
We assume that $R = M \odot N $ is the Hadamard product of two matrices,
with $M$ being a realization of an LBM and the elements of $N$ follow Poisson distributions independent on $M$. More precisely, for $M$,
we assumed that latent variables $\mathbf{Z}^1 = (Z^1_{ik} ;\ i = 1,\dots, n_1;\ k =1,\dots,Q_1)$ and $\mathbf{Z}^2 = (Z^2_{jl}; \ j = 1,\dots, n_2;\ l =1,\dots,Q_2)$
provide respectively a partition of species in rows and in colums.
The variable $Z^1_{1k}$ is a binary indicator such that $Z^1_{ik}=1$ if species $i$ (in row) belongs to block $k$, $0$ otherwise. The same for $Z^2_{jl}=1$ if species $j$ (in column) belongs to cluster $l$, $0$ otherwise.
We assume that these latent variables are independent and that
$\mathbb{P}(Z^1_{ik}=1)=\alpha_k$ for all $1\le i\le n_1$, $1\le k\le Q_1$ and $\mathbb{P}(Z^2_{jl}=1)=\beta_l$ for all $1\le j\le n_2$, $1\le l\le Q_2$.
Given the latent variables, the distribution of the elements of $M$ is given by
$$\mathbb{P}(M_{i,j} = m | Z^1_{ik}=1, Z^2_{jl}=1) = \pi_{kl}^m (1-\pi_{kl})^{1-m}, \quad m\in\{0,1\}.$$
Moreover, it is supposed for all $1\le i\le n_1,\ 1\le j\le n_2$ that $N_{i,j} \sim \mathcal{P}(\lambda_i\mu_jG)$ with $\lambda_i \in (0,1]$, $\mu_j \in (0,1]$, with $\max_{i=1,\dots,n_1}\lambda_i=1$ ,$\max_{j=1,\dots,n_2}\mu_j=1$ and $G>0$. The parameter $\lambda_i$ represents the relative sampling effort of row species $i$, $\mu_j$ represents the relative sampling effort of column species $j$, and $G$ is a constant representing the global sampling effort of the network.
Therefore, for all $1\le i\le n_1,\ 1\le j\le n_2$, the probability distribution of $R_{i,j} = M_{i,j} N_{i,j}$ given the latent variables $\mathbf{Z}^1,\mathbf{Z}^2$ is
$$
\mathbb{P}(R_{i,j} = r| Z^1_{ik}=1,Z^2_{jl}=1;\boldsymbol{\theta}) = \left\{
\begin{array}{ll}
\ \pi_{kl} \frac{(\lambda_i\mu_jG)^{r}}{r!}e^{-\lambda_i\mu_jG}& \mbox{if } r>0 \\
\\
1 - \pi_{kl} ( 1 - e^{-\lambda_i\mu_jG})& \mbox{if } r=0
\end{array}
\right.
$$
where $\boldsymbol{\theta} = (\boldsymbol{\alpha},\boldsymbol{\beta},\boldsymbol{\pi},\boldsymbol{\lambda},\boldsymbol{\mu},G)$ with
$\boldsymbol{\alpha}=(\alpha_k)_{1\le k\le Q_1}\in [0,1]^{Q_1}$, $\boldsymbol{\beta}=(\beta_l)_{1\le l\le Q_2}\in [0,1]^{Q_2}$, $\boldsymbol{\pi}=(\pi_{kl})_{1\le k\le Q_1,1\le l\le Q_2}\in[0,1]^{Q_1\times Q_2}$, $\boldsymbol{\lambda}=(\lambda_i)_{1\le i\le n_1}\in(0,1]^{n_1}$, ${\boldsymbol{\mu}=(\mu_j)_{1\le j\le n_2}\in(0,1]^{n_2}}$, $G>0$,
denotes all the parameters to be estimated.
The full log-likelihood is then
$$\log\mathcal{L}(R,\mathbf{Z}^1, \mathbf{Z}^2;\boldsymbol{\theta}) = \log\mathcal{L}(\mathbf{Z}^1;\boldsymbol{\alpha}) + \log\mathcal{L}( \mathbf{Z}^2;\boldsymbol{\beta}) + \log\mathcal{L}(R|\mathbf{Z}^1, \mathbf{Z}^2;\boldsymbol{\pi},\boldsymbol{\lambda},\boldsymbol{\mu},G) $$
with
\begin{align}
\log\mathcal{L}(\mathbf{Z}^1;\boldsymbol{\alpha})=& \sum_{i=1}^{n_1}\sum_{k=1}^{Q_1} Z^1_{ik} \log(\alpha_k), \quad
\log\mathcal{L}(\mathbf{Z}^2;\boldsymbol{\beta}) =\sum_{j=1}^{n_2}\sum_{l=1}^{Q_2} Z^2_{jl} \log(\beta_l), \notag\\
\log\mathcal{L}(R|\mathbf{Z}^1, \mathbf{Z}^2;\boldsymbol{\pi},\boldsymbol{\lambda},\boldsymbol{\mu},G) =& \sum_{\substack{i,j\\R_{i,j}>0}}
\sum_{k=1}^{Q_1}\sum_{l=1}^{Q_2}
Z^1_{ik} Z^2_{jl}(\log(\pi_{kl}) + R_{i,j}\log(\lambda_i\mu_j G) - \lambda_i\mu_j G-\log(R_{i,j}!)) \notag\\
&+ \sum_{\substack{i,j\\R_{i,j}=0}}\sum_{k=1}^{Q_1}\sum_{l=1}^{Q_2} Z^1_{ik} Z^2_{jl}(\log(1-\pi_{kl}(1-e^{\lambda_i\mu_j G})))\notag\,.
\end{align}
Since the variables $\mathbf{Z}^1$ and $\mathbf{Z}^2$ are latent, they are integrated out in order to obtain the observed log-likelihood:
\begin{equation}\label{eqobslik}
\log \mathcal{L}(R;\boldsymbol{\theta}) = \sum_{(\mathbf{Z}^1,\mathbf{Z}^2) \in \{1,\ldots,Q_1\}^{n_1}\times \{1,\ldots,Q_2\}^{n_2}}\log\mathcal{L}(R,\mathbf{Z}^1, \mathbf{Z}^2;\boldsymbol{\theta})\,.
\end{equation}
This model is identifiable according to the following theorem.
\begin{theorem}
{Sufficient condition for identifiability.
Under the following assumptions on the parameters in $\boldsymbol{\theta}$ and the size of the network:
\begin{itemize}
\item (A1) for all $1 \leq k \leq Q_1,\alpha_k > 0$ and the coordinates of vector $\tau = \boldsymbol{\pi} \boldsymbol{\beta}$ are distinct,
\item (A2) for all $1 \leq k \leq Q_2,\beta_k > 0$ and the coordinates of vector $\sigma= \boldsymbol{\alpha}^\top \boldsymbol{\pi} $ are distinct (where $\boldsymbol{\alpha}^\top$ is the transpose of $\boldsymbol{\alpha}$),
\item (A3) $n_1 \geq 2Q_2 -1$ and $n_2 \geq 2Q_1-1$ ,
\item (A4) $G>0$, $\lambda_i \in ]0,1]$, $\mu_j \in ]0,1]$, with $\max_{i=1,\dots,n_1}\lambda_i=1$ ,$\max_{j=1,\dots,n_2}\mu_j=1$,
\end{itemize}
}
then CoOP-LBM{} is identifiable.
\end{theorem}
\begin{proof}{}
The proof is given in the appendix and is in two parts: the first part deals with the identifiability of $(\boldsymbol{\lambda},\boldsymbol{\mu},G)$, the second part, adapted from the proof of \cite{celisse_consistency_2012} and \cite{brault_estimation_2014}, deals with the identifiability of $(\boldsymbol{\alpha},\boldsymbol{\beta},\boldsymbol{\pi})$.
\end{proof}
\paragraph{Ecological justification of the CoOP-LBM.}
The CoOP-LBM connects well with the way species interaction networks are viewed in ecology because ecological studies make a clear distinction between the "true" network describing all possible interactions among species and the observed network resulting from sampling process and the relative abundance of the species at a given location. For instance, for pollination networks, unobserved interactions have been categorized as either forbidden interactions (i.e. the interaction cannot occur because species do not co-occur or have mismatching traits) or missing interactions (i.e. the interaction exists but would require additional sampling to be detected) \citep{olesen_missing_2011,jordano_sampling_2016}. Our model of interaction sampling process also relates well with classical assumptions and results on species interactions in ecology. Abundances of plants and pollinators, which could relate in our model with species relative sampling effort, are known to greatly correlate with interaction frequency in pollination networks \citep{fort_abundance_2016}.
The probability of interaction among species is moreover often assumed to be directly proportional to the product of species relative abundances in ecological networks under the mass action hypothesis \citep[e.g.][]{staniczenko2013ghost}. It should be noted however that our model assumes no species preference, which could also affect species interaction probability in ecological networks \citep{ staniczenko2013ghost}.
\section{Inference and model selection}\label{inference}
From the observation of an interaction network where the counts of occurring interactions are recorded, our goal is to infer our CoOP-LBM.
This inference can be separated into two parts: inferring the parameters when the numbers of blocks in rows ($Q_1$) and columns ($Q_2$) are known, selecting these numbers of blocks. The former part cannot be dealt with by directly maximising the observed likelihood given in Equation \eqref{eqobslik} since the sums over the set $\{1,\ldots,Q_1\}^{n_1}\times\{1,\ldots,Q_2\}^{n_2}$ quickly becomes intractable as $Q_1$, $Q_2$, $n_1$ or $n_2$ grow.
A classical solution in mixture models with latent variables is to make recourse to an Expectation-Maximisation (EM) algorithm \citep{dempster1977maximum}. However, due to dependencies between the latent variables when conditioned to the observed data, the EM algorithm is not practicable in the SBM or the LBM.
Several alternatives have been proposed such as a variational approximation of the EM (VEM) algorithm \citep{daudin_mixture_2008,govaert2008block} or stochastic version of the EM algorithm \citep{brault_estimation_2014,kuhn_properties_2020}.
In Section \ref{ssecInf},
for a given $Q_1$ and $Q_2$, we provide our inference procedure which is inspired by a version of a stochastic EM algorithm \citep{brault_estimation_2014}.
In Section \ref{ssecMod}, we derive a penalised likelihood criterion to
select the numbers of blocks $Q_1$ and $Q_2$. This criterion is an Integrated Classification Likelihood (ICL) which has proven its practical efficiency in many blockmodels \citep{daudin_mixture_2008,brault_estimation_2014}.
\subsection{Estimation of parameters}\label{ssecInf}
The estimation of the parameters given $Q_1$ and $Q_2$ follows this scheme described in Algorithm \ref{algo:vem:nmar}. The steps are detailed in the text hereafter.
\begin{algorithm}[h]
\SetSideCommentLeft
\DontPrintSemicolon
\KwSty{Initialisation:} Provide $\mathbf{Z}^1_{(0)},\mathbf{Z}^2_{(0)}, \boldsymbol{\pi}_{(0)},\Tilde{M}_{(0)}$.\;
\Repeat{Convergence or max number of iterations reached.}{
\begin{enumerate}
\item M-step a) : update $\boldsymbol{\alpha}_{(n+1)},\boldsymbol{\beta}_{(n+1)}|\mathbf{Z}^1_{(n)}, \mathbf{Z}^2_{(n)}$,
\item
M-step b) : update $\boldsymbol{\lambda}_{(n+1)},\boldsymbol{\mu}_{(n+1)},G_{(n+1)}|\Tilde{M}_{(n)}$,
\item
S-step a): simulate $ \Tilde{M}_{(n+1)} |\mathbf{Z}^1_{(n)}, \mathbf{Z}^2_{(n)} ,\boldsymbol{\pi}_{(n)},\boldsymbol{\lambda}_{(n+1)},\boldsymbol{\mu}_{(n+1)},G_{(n+1)}$,
\item
M-step c) : update $\boldsymbol{\pi} _{(n+1)} |\Tilde{M}_{(n+1)},\mathbf{Z}^1_{(n)}, \mathbf{Z}^2_{(n)}$ ,
\item
S-step b) : simulate $\mathbf{Z}^1_{(n+1)}, \mathbf{Z}^2_{(n+1)} |\boldsymbol{\alpha}_{(n+1)},\boldsymbol{\beta}_{(n+1)},\boldsymbol{\pi}_{(n+1)},\Tilde{M}_{(n+1)}$.
\end{enumerate}
}
\caption{Stochastic EM for CoOP-LBM{} inference}
\label{algo:vem:nmar}
\end{algorithm}
The initial clusterings of nodes $\mathbf{Z}^1_{(0)}$ and $\mathbf{Z}^2_{(0)}$ can be obtained with hierarchical, spectral or k-means clustering algorithms. The initial matrix of probabilities of connection $\boldsymbol{\pi}_{(0)}$ is computed as the maximum likelihood estimator with $\mathbf{Z}^1_{(0)}$ and $\mathbf{Z}^2_{(0)}$ given, and $\Tilde{M}_{(0)}$ is initialized with the matrix $(\mathbbm{1}_{\{R_{i,j}>0\}})_{1\le i\le n_1,1\le j\le n_2}$.
After the burn-in period, the algorithm ends when $||\hat{\boldsymbol{\theta}}_{(n)}-\hat{\boldsymbol{\theta}}_{(n+1)}||_2< \epsilon $ with $\hat{\boldsymbol{\theta}}_{(n)} = \frac{1}{n} \sum_{i=1}^n {\boldsymbol{\theta}}_{i}$ or when the maximum number of iterations is reached.
Convergence of similar stochastic EM algorithms to a local maximum has been proved by \cite{delyon_convergence_1999}.
\paragraph{M-Step a)}
To update $\boldsymbol{\alpha},\boldsymbol{\beta}$ given $\mathbf{Z}^1_{(n)}, \mathbf{Z}^2_{(n)}$, we use the
maximum likelihood estimators:
$$\forall 1\le k\le Q_1,\ 1\le l\le Q_2,\quad \alpha_k= \frac{1}{n_1}\sum_{i=1}^{n_1}
Z^1_{ik},\quad \beta_l =\frac{1}{n_2} \sum_{j=1}^{n_2}
Z^2_{jl}. $$
\paragraph{M-Step b)}
$\lambda$ is updated with the following fixed point algorithm :
$$\lambda \propto \frac{\sum_{j=1}^{n_2}R_{k,j}}{\sum_{j=1}^{n_2}m_{k,j}\frac{\sum_{i=1}^{n_1}R_{i,j}}{\sum_{i=1}^{n_1}m_{i,j}\lambda_{j}}}. $$
As $max_i \lambda_i =1$, a solution $\lambda$ that has been found by the fixed point algorithm can be divided by its maximum coordinate in order to have an estimation of $\lambda$. Once $\lambda$ calculated, we estimate $$G\mu_l= \frac{\sum_{i=1}^{n_1}R_{i,l}}{\sum_{i=1}^{n_1}m_{i,l}\lambda_{l}}.$$ Finally, we deduce $G$ and $\mu$ by fixing $\max_j \mu_j = 1$.
As the matrix $M$ is not observed, it is replaced by a simulated version of it $\Tilde{M}_{(n)}$.
Additional details are given in the supplementary material \ref{M-Steb b}.
\paragraph{S-Step a)}
The purpose of $\Tilde{M}_{(n)}$ is to distinguish zeros that come from the $M$ matrix (impossible interaction) and the $N$ matrix (missing interaction) by completing all zeros of the matrix $(\mathbbm{1}_{\{R_{i,j}>0\}})_{i,j}$ by a Bernoulli variable with probability $\mathbb{P}(M_{i,j}=1|R_{i,j}=0,\lambda_i, \mu_j,G,\pi, Z^1,Z^2)$.
\begin{align}
&\mathbb{P} (M_{i,j}=1|R_{i,j}=0,\lambda_i, \mu_j,G,\pi, Z^1_{ik}=1,Z^2_{jl}=1)\notag\\
& \notag\\
= &\frac{\mathbb{P}(M_{i,j}=1 , R_{i,j}=0 |\lambda_i, \mu_j,G,\pi, Z^1_{ik}=1,Z^2_{jl}=1)}{\mathbb{P}(R_{i,j}=0 |\lambda_i, \mu_j,G,\pi, Z^1_{ik}=1,Z^2_{jl}=1)}\notag\\
=&\frac{\pi_{kl}e^{-\lambda_i \mu_j G}}{1-\pi_{kl}(1-e^{-\lambda_i\mu_jG})}\notag.
\end{align}
$\Tilde{M}_{(n+1)}$ is simulated with the following scheme:
$
\Tilde{M}_{i,j} =
\begin{cases}
1 ,& \text{if } r_{i,j}> 0\\
Y_{i,j},& \text{if } r_{i,j}= 0
\end{cases}
$
where $Y_{i,j}$ follows a Bernoulli distribution with parameter
$\sum_{(k,l)}Z^1_{i,k}Z^2_{j,l}\frac{\pi_{kl}e^{-\lambda_i \mu_j G}}
{1-\pi_{kl}(1-e^{-\lambda_i\mu_j G})}\,.$
\paragraph{M-Step c)}
Update $\boldsymbol{\pi}_{(n+1)}$ with the maximum likelihood estimator given $M$, where $M$ is replaced by $\Tilde{M}$
$$\forall 1\le k\le Q_1,\ 1\le l\le Q_2,\quad \pi_{kl}= \frac{\sum_{i,j}Z^1_{ik}Z^2_{jl}\Tilde{M}_{i,j}}{\sum_{i,j}Z^1_{ik}Z^2_{jl}}\,. $$
\paragraph{S-Step b)}
The latent variables $\mathbf{Z}^1_{(n+1)}$ given $\mathbf{Z}^2_{(n)}$ and the parameters are simulated first then the other latent variables $\mathbf{Z}^2_{(n+1)}$ given $Z^1_{(n+1)}$ and the parameters are simulated.
Then for all $1\le k \le Q_1$, $1\le i\le n_1$:
\begin{align}
&\mathbb{P}(Z_{ik}^1=1|R,\boldsymbol{\theta},\mathbf{Z}^2)\notag\\ &\propto \mathbb{P}(R |\boldsymbol{\theta},Z^1_{ik}=1,\mathbf{Z}^2)\mathbb{P}(Z^1_{ik}=1) \notag\\
&\propto \alpha_k\prod_{j=1}^{n_2}\prod_{l=1}^{Q_2} \left(\pi_{kl} \left(\frac{e^{-\lambda_i\mu_jG}}{R_{i,j}}(\lambda_i\mu_jG)^{R_{i,j}} \right)^{\mathbbm{1}_{R_{i,j}>0}}\left(1-\pi_{kl}(1-e^{-\lambda_i\mu_jG}) \right)^{\mathbbm{1}_{R_{i,j}=0}}\right)^{\mathbbm{1}_{Z_{jl}=1}}.
\end{align}
The simulation of $\mathbf{Z}^2_{(n+1)}$ is done in a symmetrical way with $\mathbf{Z}^1_{(n+1)}$ being given.
\paragraph{Algorithm output}
At the end, all the iterations following a ``burn-in'' phase $\hat{\boldsymbol{\theta}}_{(n)}=(\boldsymbol{\alpha}_{(n)},\boldsymbol{\beta}_{(n)},\boldsymbol{\pi}_{(n)},\boldsymbol{\lambda}_{(n)},\boldsymbol{\mu}_{(n)},G_{(n)})$ are averaged as $\hat{\boldsymbol{\theta}} = \frac{1}{n} \sum_n \hat{\boldsymbol{\theta}}_{(n)}$. Even if the methods allow us to give the probabilities of belonging to each block, a final hard clustering is determined with the majority rule.
\subsection{Model selection}\label{ssecMod}
In most cases, the number of groups $Q_1$ and $Q_2$ are not known.
Then, in order to choose the number of blocks in a CoOP-LBM{}, we propose to use an ICL criterion similarly to the ICL criteria developed in \cite{daudin_mixture_2008} or \cite{govaert2008block}. The parameter $\boldsymbol{\theta} = (\boldsymbol{\alpha},\boldsymbol{\beta},\boldsymbol{\pi},\boldsymbol{\lambda},\boldsymbol{\mu},G)$ lies in $\Theta = A \times B \times \Pi$ where $A$ is the $Q_1$-dimensional simplex, $B$ is the $Q_2$-dimensional simplex, and $\Pi = ]0,1[^{Q_1 \times Q_2} \times {\{\boldsymbol{\lambda}\in]0,1] ^{n_1} | max(\lambda_i)=1 \}} \times {\{\boldsymbol{\mu}\in]0,1] ^{n_2} | max(\mu_j)=1 \}} \times \mathbb{R}_{+}^\star$. Let $g(\boldsymbol{\theta} |m_{Q_1,Q_2})$ be the prior distribution of the parameters for a model $m_{Q_1,Q_2}$ with $Q_1$ and $Q_2$ classes. The ICL
criterion is an approximation of the complete-data integrated
likelihood defined as
$$\mathcal{L}(R,\mathbf{Z}^1,\mathbf{Z}^2|m_{Q_1,Q_2}) = \int_{\Theta} \mathcal{L}(R,\mathbf{Z}^1,\mathbf{Z}^2|\boldsymbol{\theta},m_{Q_1,Q_2})g(\boldsymbol{\theta},m_{Q_1,Q_2})d\boldsymbol{\theta}\,.$$
\begin{theorem}
For a model $m_{Q_1,Q_2}$ with $Q_1$ and $Q_2$ blocks the corresponding ICL criterion is:
\begin{align}
ICL(m_{Q_1,Q_2}) =& \notag \max_{\boldsymbol{\theta}}\mathcal{L}(R,\widehat{\mathbf{Z}^1}, \widehat{\mathbf{Z}^2} |\boldsymbol{\theta}, m_{Q_1,Q_2})\\
&-\frac{Q_1-1}{2}\log(n_1) - \frac{Q_2-1}{2}\log(n_2) - \frac{Q_1Q_2 + n_1 +n_2-1}{2}\log(n_1n_2),
\end{align}
with $\widehat{\mathbf{Z}^1}, \widehat{\mathbf{Z}^2}$ the hard clusterings outputted by the inference algorithm.
\end{theorem}
\begin{proof}
The proof is similar to the proof in \cite{daudin_mixture_2008}, and is given in the supplementary material \ref{ICL}.
\end{proof}
The algorithm starts with a number of group of $Q_1=Q_2=1$ and fits independently the CoOP-LBM for increasing value of $Q_1$ and $Q_2$, each time initializing with a clustering algorithm such as spectral, hierarchical or k-means. The ICL is calculated for each model. If the ICL keeps decreasing as $Q_1$ or $Q_2$ increases, the algorithm can either stop, or deepens the exploration with a split-merge procedure. In the second case, the algorithm will initialize again by either merging different combinations of the previously estimated clusterings, or by splitting them. At the end, the algorithm returns a list containing the estimated model for each value of $Q_1$ and $Q_2$, with the corresponding ICL, and provide the user with the pair $(Q_1,Q_2)$ which has the larger ICL value.
\section{Simulations}\label{simulation}
\subsection{Settings}
The following simulation study is fully reproducible and can be performed with any set of chosen parameters. The code is available on \url{https://anakokemre.github.io/CoOP-LBM/index.html}.
In this simulation study, we consider that $n_1 = n_2 = 100, Q_1 = Q_2 = 3, \boldsymbol{\alpha} = \boldsymbol{\beta} = (\frac{1}{3},\frac{1}{3},\frac{1}{3})$ and
$$\boldsymbol{\pi}= \begin{bmatrix}
0.95 & 0.75 & 0.50 \\
0.75 & 0.50 & 0.50 \\
0.50 & 0.50 & 0.05
\end{bmatrix} .$$ Moreover, we assumed that $G$ takes values in $\{25,100,200,\dots,600\}$ and that $\boldsymbol{\lambda} =\boldsymbol{\lambda}^*/\max(\boldsymbol{\lambda}^*)$ with $ \boldsymbol{\lambda}^* \overset{iid}{\sim} Beta(0.3,1.5)$, $\boldsymbol{\mu}=\boldsymbol{\mu}^*/\max(\boldsymbol{\mu}^*)$ with $ \boldsymbol{\mu}^* \overset{iid}{\sim} Beta(0.3,1.5)$.
The matrices $M,\ N$ and $R$ are simulated according to the CoOP-LBM{} with the parameters given above.
The rows and columns of $R$ and $M$ where there is no observation (full rows/columns of $R$ is filled with 0) are discarded. The observed support is $V_{i,j} = \mathbbm{1}\{R_{i,j}>0\}$.
For each value of $G$, $10$ simulations of $R$ are performed. On each simulation, an LBM is fitted on the corresponding binary matrix $V$ using a VEM algorithm implemented and our proposed algorithm is fitted on $R$.
$\widehat{Z^1}$ and $\widehat{Z^2}$ are initialized with a hierarchical clustering. Then, the algorithm runs for an initial burn-in of 50 iterations and then an additional 50 iterations is used to provide the estimator.
The $Beta(0.3,1.5)$ distribution is skewed, there will be many values of $\lambda_i$ and $\mu_j$ close to $0$ and few close to $1$. In practice, this is what is observed in pollination networks, some species are very well sampled ($\lambda_i$ close to 1) and many of them are much less sampled ($\lambda_i$ close to 0). Consequently, the observed matrix will have a lot of missing data. For $G = 25$, at least $2/3$ of the support is missing in comparison with the full support, and for $G= 600$, almost half the support is missing.
\subsection{Results}
We verify that our algorithm is able to correctly estimate the parameters, and evaluate the performance of our model to recover the clusters, to detect missing interactions and to retrieve the values of several metrics computed with respect to the actual $M$ which is not observed in real situations for varying sampling intensity $G$.
\subsubsection{Estimation of $\boldsymbol{\lambda},\boldsymbol{\mu}, G$}
Our simulations show that CoOP-LBM{} is able to correctly estimate $\boldsymbol{\lambda}$,$\mu$ and $G$. For different value of $G$, a RMSE has been calculated for $\boldsymbol{\lambda}$ and $\boldsymbol{\mu}$ with the following formulas:
$$RMSE_{\boldsymbol{\lambda}} = \sqrt{ \frac{1}{n_1}\sum_{i=1}^{n_1}(\lambda_i - \hat{\lambda}_i)^2}, \quad RMSE_{\boldsymbol{\mu}} =\sqrt{ \frac{1}{n_2}\sum_{j=1}^{n_2}(\mu_j - \hat{\mu}_j)^2 } .$$
In supplementary material, we show that the parameters are well retrieved by our estimation procedure.
\subsubsection{Recovery of clusterings}
The clustering on rows and columns estimated by the CoOP-LBM are assessed by computing the Adjusted Rand Index (ARI) \citep{rand} scores to the original blocks that were used to simulate $M$, and compared with the results estimated by the LBM. When the ARI score is equal to one, the two clustering are exactly the same (up to label switching). When the ARI score is close to 0, then the two clustering are totally different. An ARI score can be computed even if the two clusterings do not have the same number of groups, the score is then necessarily lower than $1$.
\begin{figure}
\caption{ARI scores for rows and columns blocks when the number of groups is unknown}
\label{fig5}
\end{figure}
As we can see in \cref{fig5}, the CoOP-LBM{} has systematically a better ARI score in this setting than the LBM, which tends to overestimate the number of groups compared to the CoOP-LBM as we explained in the introduction.
The LBM struggles to reach an ARI of $0.5$ for all values of $G$, whereas the CoOP-LBM{} has an ARI score larger than $0.5$ when $G$ is larger than $250$. The LBM performs even worse when the number of groups is given (see supplementary material \ref{secsupplfig}), which shows that the structure retrieved by the LBM is clearly different from the one from $M$.
Even if the LBM estimates the correct number of groups, its ARI score is very low because the estimated structure is different. The CoOP-LBM{} can have a better estimation of the clustering even if more than half of the interactions are missing. For both cases, as $G$ is increasing, the ARI score increases because $\mathbb{P}(N_{i,j}=0)$ decreases. Additional results can be found in the supplementary material.
\subsubsection{Recovery of the support}
The recovery of missing interactions is firstly assessed by calculating the Area Under the Curve of the ROC curve for missing interactions. The result will be compared with probabilities given by the LBM \citep{terry_finding_2020}.
We recall that with the CoOP-LBM{}, the probability of having a missing an interaction
when $R_{i,j}=0$ is $$\mathbb{P}(M_{i,j}=1|R_{i,j}=0,Z_{i,k}^1=1,Z^2_{j,l}=1)=\frac{\pi_{kl}e^{-\lambda_i \mu_j G}}
{1-\pi_{kl}(1-e^{-\lambda_i\mu_j G})}$$
and can be estimated by replacing $\mathbf{Z}^1,\mathbf{Z}^2$ by the obtained clusterings $\widehat{\mathbf{Z}}^1,\widehat{\mathbf{Z}}^2$ and $\boldsymbol{\pi}, \boldsymbol{\lambda},\boldsymbol{\mu} , G$ by their estimates.
This estimated probability tends to 0 when $\lambda_i\mu_j G$ increases, whereas it tends to $\sum_{(k,l)}Z^1_{ik}Z^2_{jl}\pi_{kl}$ if $\lambda_i\mu_j G$ decreases. This latest limit is the estimator of the probability of a missing data
given by \cite{terry_finding_2020} in the case of the classical LBM. In order to compare the results, we calculated the AUC of the ROC, which measures how much the models are capable of distinguishing between which interactions are missing and which are not. An AUC close to 1 describes a perfect separation, whereas an AUC close to 0.5 describes a model where the distinction is as good as a distinction made randomly.
\begin{figure}
\caption{AUC of the ROC curves for the prediction of missing data}
\label{fig6}
\end{figure}
It can be seen in
\cref{fig6} that on average, the CoOP-LBM{} gives a better estimation in our simulation than the LBM. The CoOP-LBM{} even reached an AUC of 0.9 for $G=475$, and is on average equal to 0.824, whereas the maximum AUC reached by the LBM was equal to 0.7, and is on average equal to 0.62. On \cref{fig7}, an example of the estimated probabilities can be seen. In the LBM that has been fit on the observed matrix, we can see large white bands in the middle where a lot of missing data has not been detected, while the same areas are reddish in the CoOP-LBM{}. On the contrary, the area on the bottom right has more red in the LBM, compared to the CoOP-LBM{}, which is more accurate to identify where the interactions are missing or not.
\begin{figure}
\caption{
Estimated probabilities of missing interaction. The figure on the left is an observed matrix $V$ for $G=600$, the missing interaction are in red, and the observed interactions ($R_{ij}
\label{fig7}
\end{figure}
\subsubsection{Coverage estimator and connectivity}
In order to estimate the sampling completeness, a coverage estimator can be used. The purpose of a coverage estimator is to estimate the rate of the number of interactions that has been sampled compared to the total number of interactions. The Chao coverage estimator \citep{chao_coverage-based_2012} is defined by
$$\hat{C}=1-\frac{f_1}{n}\frac{f_1(n-1)}{f_1(n-1)+2(f_2+1)}$$
where $f_1$ is the number of interactions observed only once, $f_2$ is number of interactions observed only twice, and $n$ is the total number of interactions. If $\hat{C}$ is close to $1$, then almost all the interactions have been sampled, if $\hat{C}$ is close to $0$ then a lot of interactions are missing in the sampling. Knowing the coverage of the sampling can help estimate the true connectivity of the network. This coverage estimator is used in \cite{terry_finding_2020} in order to estimate the number of missing interactions for each row.
In this simulation study, we wish to compare the estimation of
total connectivity given by the Chao estimator and the estimation given by the CoOP-LBM{}.
The connectivity of the completed network is estimated as $$Connectivty_{chao} = \frac{\sum_{i=1}^{n_1}\sum_{j=1}^{n_2}V_{i,j}}{n_1 n_2\hat{C}}\quad \text{and } \quad Connectivty_{CoOP} = \sum_{i=1}^{n_1}\sum_{j=1}^{n_2}\mathbb{P}(M_{i,j}=1|R_{i,j})$$ where $\hat{C}$ is the Chao coverage estimator applied to all the matrix.
\begin{figure}
\caption{Estimation of connectivity}
\label{fig11}
\end{figure}
As $M$ is an LBM, we can know its theoretical average connectivity which is given by $\alpha^\top \pi \beta$. In practice, the value of the connectivity fluctuate slightly around the average. In \cref{fig11} the average connectivity of $M$ is represented with a solid line while the dotted lines delimit the 0.05-quantile and 0.95-quantile of the different connectivity measured on $M$. We can see that CoOP-LBM{} is much better to recover the connectivity compared to using Chao estimator in this case. This is due to the fact that the size of the observed matrix $V$ does not increase as much with $G$ as the size of the sampling. Consequently, the Chao estimator quickly converges to $1$ with increasing $G$, and does not correct adequately the connectivity.
\subsubsection{Nestedness, modularity}
Several metrics are used in the ecology community to describe networks, such as nestedness \citep{terry_finding_2020,Lara-Romero-2019,de_manincor_how_2020,fortuna_nestedness_2010}, or modularity \citep{terry_finding_2020,de_manincor_how_2020,guimera_missing_2009,fortuna_nestedness_2010}.
In the ecological terminology, a specialist is a species that has very few interactions, contrary to the generalist that has a lot more.
The purpose of nestedness is to describe in an interaction network how much specialists interact with proper subsets of the species that generalists interact with. Several metrics have been develop to define or estimate this nestedness. Here we will use the NODF metric from \cite{NODF}. A network is perfectly nested if its nestedness is close to $1$, and is not nested if its nestedness reaches 0. Another tool to study the structure of a network is the modularity, which measures the strength of division of a network into modules. Networks with high modularity have dense connections between the species within modules but sparse connections between species from different modules. Modularity also ranges between 0 and 1.
The nestedness and modularity will be applied on the initial matrix $M$ and on the observed matrix $V$. Then, the matrix $V$ will be completed with three different methods to see if an imputation can correct these metrics. Let $n_{miss}\in \mathbb{N}$.
\begin{enumerate}
\item Among the coordinates $(i,j)$ where $V_{i,j}=0$, sample without replacement $n_{miss}$ coordinates with probability proportional to $\sum_{k,l}\widehat{Z^1_{ik}}\widehat{Z^2_{jl}}\widehat{\pi_{kl}}$ (probability of connection in the cluster) which has been estimated by the LBM. Substitute the $0$ at these sampled coordinates with $1$.
\item Draw a uniform random variable $U_{i,j}$ for all $(i,j)$ such that $V_{i,j}=0$. If $U_{i,j}$ is smaller than ${\mathbb{P}(M_{i,j}=1|R_{i,j}=0)}$ which has been estimated by the CoOP-LBM{}, then $V_{i,j}=1$.
\item Among the coordinates $(i,j)$ where $V_{i,j}=0$, sample uniformly without replacement $n_{miss}$ coordinates. Substitute the $0$ at these sampled coordinates with $1$.
\end{enumerate}
$n_{miss}$ is an estimation of the number of interaction missed. It could have been provided by exterior experts or other similar studies. To give a slight advantage to method 1 and 3, we suppose that we have access to an exact estimation of ${n_{miss} = \sum_{i,j}M_{i,j} - \sum_{i,j}V_{i,j}}$, the corresponding estimator for the LBM will be called the oracle LBM. The nestedness (NODF) and modularity are both calculated with the \texttt{bipartite} package.
\begin{figure}
\caption{Estimation of Nestedness (left) and modularity (right)}
\label{fig12}
\end{figure}
As we can see in \cref{fig12}, CoOP-LBM{} is able to correct the estimation of nestedness and modularity, without having the precise number of missing interactions. Meanwhile, the LBM performs even worse than the random completion to estimate
the nestedness, and does not better
than the random completion to estimate modularity.
\section{Application on real data}
\label{secappdata}
\subsection{Application on the network from Olesen \textit{et al.}, 2002}
Looking at the set of 70 quantitative pollination networks from \cite{dore_relative_2021}, two networks from \cite{olesen_invasion_2002} are the best sampled according to the sampling effort criterion, which has been calculated as the number of person-hour spent sampling divided by the number of all possible interactions. The network from \cite{olesen_invasion_2002} has been sampled in the Mauritian Ile aux Aigrettes, where 14 species of plants and 13 species of insect have been observed. The network contains 42 different interactions, sampled 1395 times in total. Both LBM and CoOP-LBM have been fitted on this network. As we can see in \cref{Olesen_graph}, both methods categorize the plants into the same group, and the insect into two groups, which can be interpreted as highly and weakly connected. Unlike the LBM, CoOP-LBM considers that the butterfly species \textit{Lycaenidae pirithous} is among the highly connected insects. This could be due to the fact that even if \textit{Lycaenidae pirithous} has been observed only 7 times, it was present on 5 different plant species. According to the CoOP-LBM, only half of the interactions of \textit{Lycaenidae pirithous} have been sampled, whereas all the other species of plants and insects have a coverage close to 100\%. A longer observation of \textit{Lycaenidae pirithous} could lead to discovering new interactions, which could justify why \textit{Lycaenidae pirithous} could be in the highly connected group.
\begin{figure}
\caption{Difference between the estimated LBM and CoOP-LBM for the network of \cite{olesen_invasion_2002}
\label{Olesen_graph}
\end{figure}
To validate the calculated probabilities and coverage, we decided to sub-sample all interactions as done in \cite{terry_finding_2020}, by considering the empirical interaction frequency to be the true network to define a multinomial distribution. We subsampled the empirical network 30 times, each time keeping between 60\% and 90\% of the original sample size, which is the number of total observations.
\begin{figure}
\caption{AUC of the estimation of probabilities of missing interactions for the network of \cite{olesen_invasion_2002}
\label{fig14}
\end{figure}
As we can see in \cref{fig14}, the LBM struggles to achieve an AUC higher than 0.5 in this situation. This situation is similar to the simulation done in \cref{fig7} where the LBM considers that the probability of having missing interaction is higher in the group that has been observed as highly connected.
\subsection{Application on the data set of Doré, Fontaine \& Thébault, 2021}
\begin{figure}
\caption{Connectivity, nestedness and modularity on the observed matrix, and on the CoOP-LBM corrected matrix, estimated on the network set from \cite{dore_relative_2021}
\label{fig15}
\end{figure}
\begin{table}
\caption{Estimated number of groups for plants (left) and insects (right) on set of networks from \cite{dore_relative_2021}. For example, the $18$ in the left table should be read as "there are 18 networks where the LBM estimated the number of groups for plants as 2, while the CoOP-LBM estimated it as 1".\label{table2}
}
\scalebox{0.8}{
\begin{tabular}{cc|ccccc|c}
\cline{3-7}
\textbf{} &
&
\multicolumn{5}{c|}{LBM} &
\\ \cline{2-8}
& \multicolumn{1}{|c|}{\cellcolor[HTML]{FFFFC7}n of groups} &
\multicolumn{1}{c|}{\cellcolor[HTML]{FFFFC7}1} &
\multicolumn{1}{c|}{\cellcolor[HTML]{FFFFC7}2} &
\multicolumn{1}{c|}{\cellcolor[HTML]{FFFFC7}3} &
\multicolumn{1}{c|}{\cellcolor[HTML]{FFFFC7}4} &
\cellcolor[HTML]{FFFFC7}5 &
\multicolumn{1}{c|}{Total} \\ \hline
\multicolumn{1}{|c|}{} &
\cellcolor[HTML]{FFFFC7}{\color[HTML]{333333} 1} &
\multicolumn{1}{c|}{2} &
\multicolumn{1}{c|}{18} &
\multicolumn{1}{c|}{9} &
\multicolumn{1}{c|}{1} &
0 &
\multicolumn{1}{c|}{30} \\ \cline{2-8}
\multicolumn{1}{|c|}{} &
\cellcolor[HTML]{FFFFC7}{\color[HTML]{333333} 2} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{16} &
\multicolumn{1}{c|}{14} &
\multicolumn{1}{c|}{3} &
0 &
\multicolumn{1}{c|}{33} \\ \cline{2-8}
\multicolumn{1}{|c|}{} &
\cellcolor[HTML]{FFFFC7}{\color[HTML]{333333} 3} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{3} &
\multicolumn{1}{c|}{2} &
0 &
\multicolumn{1}{c|}{5} \\ \cline{2-8}
\multicolumn{1}{|c|}{} &
\cellcolor[HTML]{FFFFC7}{\color[HTML]{333333} 4} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{0} &
0 &
\multicolumn{1}{c|}{0} \\ \cline{2-8}
\multicolumn{1}{|c|}{\multirow{-5}{*}{\parbox{1cm}{CoOP-LBM}}} &
\cellcolor[HTML]{FFFFC7}{\color[HTML]{333333} 5} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{1} &
1 &
\multicolumn{1}{c|}{2} \\ \hline
\multicolumn{1}{c|}{} &
Total &
\multicolumn{1}{c|}{2} &
\multicolumn{1}{c|}{34} &
\multicolumn{1}{c|}{26} &
\multicolumn{1}{c|}{7} &
1 &
\multicolumn{1}{c|}{} \\ \cline{2-8}
\end{tabular}
}
\scalebox{0.8}{
\begin{tabular}{cc|ccccc|c}
\cline{3-7}
\textbf{} &
&
\multicolumn{5}{c|}{LBM} &
\\ \cline{2-8}
&
\multicolumn{1}{|c|}{\cellcolor[HTML]{FFFFC7}n of groups}&
\multicolumn{1}{c|}{\cellcolor[HTML]{FFFFC7}1} &
\multicolumn{1}{c|}{\cellcolor[HTML]{FFFFC7}2} &
\multicolumn{1}{c|}{\cellcolor[HTML]{FFFFC7}3} &
\multicolumn{1}{c|}{\cellcolor[HTML]{FFFFC7}4} &
\cellcolor[HTML]{FFFFC7}5 &
\multicolumn{1}{c|}{Total} \\ \hline
\multicolumn{1}{|c|}{} &
\cellcolor[HTML]{FFFFC7}{\color[HTML]{333333} 1} &
\multicolumn{1}{c|}{18} &
\multicolumn{1}{c|}{24} &
\multicolumn{1}{c|}{5} &
\multicolumn{1}{c|}{0} &
1 &
\multicolumn{1}{c|}{48} \\ \cline{2-8}
\multicolumn{1}{|c|}{\parbox{1cm}{CoOP-LBM}} &
\cellcolor[HTML]{FFFFC7}{\color[HTML]{333333} 2} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{15} &
\multicolumn{1}{c|}{5} &
\multicolumn{1}{c|}{1} &
0 &
\multicolumn{1}{c|}{21} \\ \cline{2-8}
\multicolumn{1}{|c|}{} &
\cellcolor[HTML]{FFFFC7}{\color[HTML]{333333} 3} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{1} &
\multicolumn{1}{c|}{0} &
\multicolumn{1}{c|}{0} &
0 &
\multicolumn{1}{c|}{1} \\ \hline
\multicolumn{1}{c|}{} &
Total &
\multicolumn{1}{c|}{18} &
\multicolumn{1}{c|}{40} &
\multicolumn{1}{c|}{10} &
\multicolumn{1}{c|}{1} &
1 &
\multicolumn{1}{c|}{} \\ \cline{2-8}
\end{tabular}}
\end{table}
We fit the LBM and the CoOP-LBM on the 70 weighted networks from \citep[]{dore_relative_2021}. It is difficult to validate whether our method can retrieve the missing interactions because the validation process should assume that the observed matrix before the sub-sampling is the full interaction matrix, which is probably not the case. The method will discover interactions that have not been observed and will penalize the AUC score because of the false positive. However, once the parameters are calculated, it is possible to calculate the probabilities of observing a missing interaction by calculating $p_{i,j}=\mathbb{P}(R_{i,j}>0 | Z^1_{ik}=1,Z^2_{jl}=1;\theta)=\pi_{kl}\left(1-e^{\lambda_i\mu_j G}\right)$ and use this value for calculating the AUC score.
To compare the LBM and the CoOP-LBM, the following experiment has been performed: for each network, two sub-samplings A) and B) are done in the following way: for each interaction $R_{i,j}$, a random binomial variable $\mathcal{B}(R_{i,j},p)$ is drawn, with $p$ being the proportion of interaction kept. Sub-sampling A) and B) are independent. LBM and CoOP-LBM are fit on sub sampling A) and then we try to find the missing interactions of A) that are found in B). The AUC score is computed. The probability of observing missing interactions on empirical data is better estimated by the CoOP-LBM than by the LBM, AUC scores being 4\% higher in the former case than in the latter (\cref{fig16}).
The number of estimated groups by CoOP-LBM is lower compared to the LBM (table \ref{table2}) as expected, because it will merge the groups originated by the sampling.
This suggests that the CoOP-LBM can provide good indication of the probability of observing missing interactions and that it is a better fit of the data at hand.
As the structure appears different when we fit CoOP-LBM, metrics describing the structure of the network will need a correction. We correct the estimation of metrics as we have done in the previous part, by completing the matrix of interaction with Bernoulli's variable with probabilities ${\mathbb{P}(M_{i,j}=1|R_{i,j}=0)}$.
As we can see in \cref{fig15}, correcting the estimation increases the connectivity and the nestedness, but decreases the modularity.
\begin{figure}
\caption{AUC for finding missing interactions}
\label{fig16}
\end{figure}
\section{Discussion}
In this study, we presented CoOP-LBM, a model able to take into account the counting data of species interactions in order to correct the estimation of the structure of an ecological network. We also provide proof of identifiability of this model, and an original algorithm to estimate the parameters by simulating the missing interactions in the network. Our method performs better than the LBM on simulated data and on real data for finding the structure of an under-sampled network. CoOP-LBM is able to correct the network accordingly, showing that a lot of groups observed in ecological networks could be a consequence of the sampling itself. Further experiments have been performed on simulated data to test the robustness of our algorithm in different settings, for example with negative binomial distribution instead of Poisson distribution, or cases where the sampling efforts $(\lambda_i,\mu_j,G)$ may depend on some latent blocks. These experiments are available on the example vignettes of the R package CoOP-LBM at \url{https://anakokemre.github.io/CoOP-LBM/index.html}, and show that our algorithm is still able to recover well the true underlying structure.
As a byproduct, our methodology calculates the probability of having a missing interaction when no interaction has been observed. This allows correcting the connectivity of the model, and simulating the missing links to correct other metrics. Our method shows higher AUC values than the LBM. To further confirm the efficiency of our algorithm, the missing interactions estimated as highly probable by our method could be confirmed in laboratory tests, as done with genetic interaction networks.
However, there is still room for improvement: for example, recording the time at which interactions are observed may be helpful to determine the sampling progress. Timed observations can be used to build an accumulation curve of new interactions, which should allow estimating the asymptotic number of interactions that could be observed. This estimation could provide additional information on the number of missing interactions in the network.
This study also neglects the effect of preferences and supposes that the counting data only comes from a product rule between the sampling efforts of pollinators and plants. Traits such as flower shape or insect body mass, which are important for plant-pollinator interactions \citep{chamberlain2014traits}, could also be used to further improve the estimation of clusters by using them as covariates in the model. These covariates could influence both the probabilities of connection and the Poisson distribution of frequencies. Another possibility to improve the algorithm is to take into account phylogeny. Two closely related species could have a higher probability of sharing the same interactions \citep{chamberlain2014traits}, or to be in the same group. All this exterior knowledge from experts could lead to a better reconstruction of the network.
\section{Acknowledgments}
This work was partially supported
by the grant ANR-18-CE02-0010-01 of the French National Research Agency ANR (project EcoNet).
Emre Anakok was funded by the MathNum department of INRAE.
The authors thank Thomas Cortier whose Master thesis was a preliminary exploration for this article.
\appendix
\section{Proof of CoOP-LBM identifiability}
\label{secproof}
\subsection{Introduction}
Let $R = (R_{i,j} , i = 1, \dots, n_1; j = 1, \dots, n_2)$ be the data matrix where $R_{i,j}\in \mathbb{N}$ is a non-negative number. It is supposed that $R = M \odot N $ with $M$ being a binary LBM and $N$ follows a Poisson distribution independant from $M$. More precisely, it is assumed that there exists a partition $Z^1 = (Z^1_{i,k} ;\quad i = 1,\dots, n_1;\quad k \in \{1,\dots,Q_1\})$ and $Z^2 = (Z^2_{jl}; \quad j = 1,\dots, n_2;\quad l \in \{1,\dots,Q_2\})$ being binary indicators of row $i$ (resp. column $j$) belonging to row cluster $k$ (resp. column
cluster $j$), such that the random variable $M_{i,j}$ are conditionally independent knowing $Z^1$ and $Z^2$
with parameterized density
$$\mathbb{P}(M_{i,j} = m | Z^1_{ik}=1, Z^2_{jl}=1) = \pi_{kl}^m (1-\pi_{kl})^{1-m}.$$
The conditional density of $M$ knowing $Z^1$ and $Z^2$ is
$$f_M (m|Z^1,Z^2;\theta_M) = \prod_{k,l} \prod_{i,j} \left( \pi_{kl}^{m_{i,j}}(1-\pi_{kl})^{1-m_{i,j}} \right)^{Z^1_{ik} Z^2_{jl}}.$$
It is also supposed that the row and column labels are independent, meaning that $\mathbb{P}(Z^1 , Z^2) =\mathbb{P}(Z^1 )\mathbb{P}( Z^2) $ with $\mathbb{P}(Z^1) = \prod_{i,k} {\alpha_k}^{Z^1_{ik}} $ and $\mathbb{P}(Z^2) = \prod_{j,l} {\beta_l}^{Z^2_{jl}} $, where $\alpha_k = \mathbb{P}(Z_{i,k}=1), k = 1,\dots,Q_1$ and $\beta_l = {\mathbb{P}(Z_{j,l}=1)}, {l = 1,\dots,Q_2}$
The density is thus equal to
$$f_M (m, \theta_M) = \sum_{(Z^1,Z^2) \in (\mathcal{Z}^1,\mathcal{Z}^2 )}\prod_{i,k} {\alpha_k}^{Z^1_{ik}} \prod_{j,l} {\beta_l}^{Z^2_{jl}} \prod_{k,l} \prod_{i,j} \left( \pi_{kl}^{m_{i,j}}(1-\pi_{kl})^{1-m_{i,j}} \right)^{Z^1_{ik} Z^2_{jl}} $$
where $\mathcal{Z}^1 $ $\mathcal{Z}^2$ denoting the sets of possible labels $Z^1$ and $Z^2$.
The density of $m$ is parameterized by ${\theta_M = (\alpha,\beta,\pi)}$ with $\alpha = ( \alpha_1 , \dots,\alpha_{Q_1}) $, $\beta = ( \beta_1 , \dots,\beta_{Q_2}) $, and $\pi = \pi_{k,l};\quad k=1,\dots,Q_1;\quad l=1,\dots,Q_2$.
\\
Moreover, it is supposed that $N_{i,j} \sim \mathcal{P}(\lambda_i\mu_jG)$ with $\lambda_i \in ]0,1]$, $\mu_j \in ]0,1]$, with $\max_{i=1,\dots,n_1}\lambda_i=1$ ,$\max_{j=1,\dots,n_2}\mu_j=1$ and $G>0$ . The density of $N$ is then equal to
$$f_N(n,\theta_N) = \prod_{i,j} \frac{(\lambda_i\mu_jG)^{n_{i,j}}}{n_{i,j}!}e^{-\lambda_{i}\mu_{j}G}.$$
The density is parameterized by $\theta_N = (\lambda,\mu,G)$ with $\lambda = (\lambda_1,\dots,\lambda_{n_1})$ $\mu=(\mu_1,\dots,\mu_{n_2})$, and $G>0$.
\\
The density of $R_{i,j} = M_{i,j} N_{i,j}$ will depend both of the value of $Z^1,Z^2$ and of $\lambda_i, \mu_j$
$$
\mathbb{P}(R_{i,j} = r| Z^1_{ik}=1,Z^2_{jl}=1;\theta) = \left\{
\begin{array}{ll}
\ \pi_{kl} \frac{(\lambda_i\mu_jG)^{r}}{r!}e^{-\lambda_i\mu_jG}& \mbox{if } r>0 \\
\\
1 - \pi_{kl} ( 1 - e^{-\lambda_i\mu_jG})& \mbox{if } r=0
\end{array}
\right.
$$
where $\theta = (\theta_M ,\theta_N) = (\alpha,\beta,\pi,\lambda,\mu,G)$
\begin{align*}
f_R (r, \theta) = \sum_{(Z^1,Z^2) \in (\mathcal{Z}^1,\mathcal{Z}^2 )}\prod_{i,k} {\alpha_k}^{Z^1_{ik}} \prod_{j,l} {\beta_l}^{Z^2_{jl}} \prod_{k,l}\Bigg(& \prod_{\substack{i,j\\r_{i,j}>0}} \pi_{kl} \frac{(\lambda_i\mu_jG)^{r}}{r!}e^{-\lambda_i\mu_jG}\\
&\cdot \prod_{\substack{i,j\\r_{i,j}=0}} \left(1 - \pi_{kl} ( 1 - e^{-\lambda_i\mu_jG})\right)
\Bigg)\,.
\end{align*}
\begin{theorem}
With $\pi$, the matrix of the Bernoulli coefficients, $\alpha$ and $\beta$, the row and column
mixing proportions of the mixture, $\lambda_i \in ]0,1]$, $\mu_j \in ]0,1]$, with $\max_{i=1,\dots,n_1}\lambda_i=1$, $\max_{j=1,\dots,n_2}\mu_j=1$, $G>0$ and assume that $n_1 \geq 2Q_2 -1$ and $n_2 \geq 2Q_1-1 $
\begin{itemize}
\item (H1) for all $1 \leq k \leq Q_1,\alpha_k > 0$ and the coordinates of vector $\tau = \pi \beta$ are distinct
\item (H2) for all $1 \leq k \leq Q_2,\beta_k > 0$ and the coordinates of vector $\sigma= \alpha' \pi $ are distinct (where
$\alpha'$ is the transpose of $\alpha$)
\end{itemize}
then the CoOP-LBM is identifiable.
\end{theorem}
\subsection{Proof}
The goal of the proof is to show that under assumption of Theorem 1 , there exist a unique parameter $\theta = (\alpha,\beta,\pi,\lambda,\mu,G)$, up to a permutation of row and column labels,
corresponding to $\mathbb{P}_\theta(r)$ the probability distribution function of matrix $r$ having at least $2Q_2 - 1$ rows and $2Q_1 - 1$ columns. The proof is in two parts : the first part deals with the identifiability of $(\lambda,\mu,G)$, the second part is adapted from the proof of Vincent Brault \textit{et al.} \cite{brault_estimation_2014}, deals with the identifiability of $(\alpha,\beta,\pi)$
\subsubsection{Identifiability of $(\lambda,\mu,G)$}
Let $\theta =(\alpha,\beta,\pi,\lambda,\mu,G)$ and $\theta ' = (\alpha',\beta',\pi',\lambda',\mu',G') $ two sets of parameters such as $\mathbb{P}_{\theta}=\mathbb{P}_{\theta'} $
Our first goal is to show that $(\lambda,\mu)$ = $(\lambda',\mu')$
As a reminder, $R_{i,j} = M_{i,j} N_{i,j}$. $M$ and $N$ are independant, we can deduce that $\mathbb{E}_\theta [R_{i,j}] = \mathbb{E}_\theta [M_{i,j}]\mathbb{E}_\theta [N_{i,j}]$.
Let $\gamma = \mathbb{E}_\theta [M_{i,j}] = \sum_{k,l} \alpha_k \beta_l \pi_{kl}$ and $\gamma' = \mathbb{E}_{\theta'} [M_{i,j}]$ , we have
$$\mathbb{E}_\theta [R_{i,j}] = \lambda_i \mu_jG \gamma \quad = \quad \mathbb{E}_{\theta'} [R_{i,j}] = \lambda_i' \mu_j'G' \gamma'$$
\paragraph{Identifiability of $(\lambda,\mu)$}\mbox{}\\
Let
$\lambda = \lambda_1,\dots,\lambda_{n_1}$ with $\lambda_i \in ]0,1]$ and $\max\lambda_i = 1$. Without loss of generality we suppose that $\max\lambda_i = \lambda_1 = 1 $
\\
Let
$\lambda' = \lambda_1',\dots,\lambda_{n_1}'$ with $\lambda'_i \in ]0,1]$ and $\max\lambda'_i = 1$. Let $v \in \{1,\dots,n_1\}$ such that $\max\lambda'_i = \lambda'_v$ and now consider the quantities :
\begin{align}
&\mathbb{E}_\theta [R_{1,1}] = \lambda_1 \mu_1G \gamma \quad \quad \mathbb{E}_{\theta'} [R_{1,1}] = \lambda_1' \mu_1'G' \gamma' \\
&\mathbb{E}_\theta [R_{v,1}] = \lambda_v \mu_1G \gamma \quad \quad \mathbb{E}_{\theta'} [R_{v,1}] = \lambda_v' \mu_1'G' \gamma'
\end{align}
$\lambda_1 = 1$ and $ \lambda'_v=1$, moreover as $\mathbb{P}_{\theta}=\mathbb{P}_{\theta'} $ , we have that $\mathbb{E}_{\theta} [R_{1,1}] = \mathbb{E}_{\theta'} [R_{1,1}] $ and $\mathbb{E}_{\theta} [R_{v,1}] = \mathbb{E}_{\theta'} [R_{v,1}] $. From this we can see that $$\mathbb{E}_{\theta'} [R_{1,1}] = \lambda'_1 \mu'_1G' \gamma' = \lambda'_1 \mathbb{E}_{\theta'} [R_{v,1}] = \lambda'_1\mathbb{E}_{\theta} [R_{v,1}] = \lambda_1'\lambda_v\mathbb{E}_{\theta} [R_{1,1}]. $$
However, as $\mathbb{E}_{\theta} [R_{1,1}] = \mathbb{E}_{\theta'} [R_{1,1}] $, we can conclude that $\lambda_v \lambda_1'=1$ which implies that $\lambda'_1 = 1$ because both $\lambda_v\in ]0,1]$ and $\lambda'_1\in ]0,1]$.
Knowing that $\lambda'_1 = 1 = \lambda_1$ we can conclude by seeing that $$\lambda'_i = \frac{\mathbb{E}_{\theta'} [R_{i,1}]}{\mathbb{E}_{\theta'} [R_{1,1}]} = \frac{\mathbb{E}_{\theta} [R_{i,1}]}{\mathbb{E}_{\theta} [R_{1,1}]} = \lambda_i$$ which proves that $\lambda = \lambda'$
Identifiability of $\mu$ is done in a similar way.
\paragraph{Identifiability of $G$}\mbox{}\\
Without loss of generality we suppose that $\max\lambda_i = \lambda_1 = 1 $ and $\max\mu_j = \mu_1 = 1 $, thanks to the precedent part we know that $\lambda = \lambda'$ and $\mu=\mu'$. Consider this quantities :
$$\frac{\mathbb{E}_{\theta} [R_{1,1}]}{\mathbb{P}_{\theta}(R_{1,1}>0)} \quad \text{and} \quad \frac{\mathbb{E}_{\theta'} [R_{1,1}]}{\mathbb{P}_{\theta'}(R_{1,1}>0)} $$ which are equal because $\mathbb{P}_{\theta} = \mathbb{P}_{\theta'}$. We can calculate $$\frac{\mathbb{E}_{\theta} [R_{1,1}]}{\mathbb{P}_{\theta}(R_{1,1}>0)} = \frac{\lambda_1\mu_1G\gamma}{\gamma(1-e^{-\lambda_1\mu_1G})}= \frac{G}{1-e^{-G}}\quad, $$ $x\mapsto \frac{x}{1-e^{-x}}$ is an injective function and therefore $G = G'$.
\subsubsection{Identifiability of $(\alpha,\beta,\pi)$}
In the precedent part we showed that given $\mathbb{P}_{\theta}$, $(\lambda,\mu,G)$ are defined in a unique manner, hence allowing us
to determine $(\lambda,\mu,G)$. The following part is based from \cite{brault_estimation_2014} and deals with the identifiability of $(\alpha,\beta,\pi)$.
\paragraph{Identifiability of $\alpha$}\mbox{}\\
Let $$\tau_k = (\pi \beta)_k = \sum_{l=1}^{Q_2} \pi_{k,l}\beta_l $$
(H1) assures us that $(\tau_k)$ are distinct, therefore the $Q_1\times Q_1$ matrix $T$ defined by $T_{i,k} = (\tau_k)^{i-1}$ for $1\leq i\leq Q_1$ and $1\leq k\leq Q_1$
is Vandermonde, and hence invertible. Consider now $u_p$, the
probability to have a number greater than 0 on the $p$ first cells of the first row of $r$.
\begin{align}
u_p & = \mathbb{P}(r_{1,1}>0,\dots,r_{1,p}>0)\\
&= \sum_{k, l_1,\dots,l_p}\mathbb{P}(Z^1_{1k}=1)\prod_{j=1}^p\left[\mathbb{P}(r_{1,j}>0|Z^1_{1k}=1, Z^2_{jl_j}=1)\mathbb{P}(Z^2_{jl_j}=1) \right]\\
&= \sum_{k=1}^{Q_1}\mathbb{P}(Z^1_{1k}=1)\prod_{j=1}^p\left[\sum_{l_j=1}^{Q_2}
\mathbb{P}(r_{1,j}>0|Z^1_{1k}=1, Z^2_{jl_j}=1)\mathbb{P}(Z^2_{jl_j}=1) \right]\\
&=\sum_{k=1}^{Q_1}\mathbb{P}(Z^1_{1k}=1)\prod_{j=1}^p\left[\sum_{l_j=1}^{Q_2}\beta_{j_l}
\pi_{k,l_j}(1-e^{-\lambda_1\mu_jG})\right]\\
&=\sum_{k=1}^{Q_1}\mathbb{P}(Z^1_{1k}=1)\prod_{j=1}^p\left[\sum_{l_j=1}^{Q_2}\beta_{j_l}
\pi_{k,l_j}\right] \prod_{j=1}^p \left[(1-e^{-\lambda_1\mu_jG})\right]\\
&=\prod_{j=1}^p \left[(1-e^{-\lambda_1\mu_jG})\right]\sum_{k=1}^{Q_1}\mathbb{P}(Z^1_{1k}=1)\left[\sum_{l_j=1}^{Q_2}\beta_{j_l}
\pi_{k,l_j}\right]^p \\
&=\prod_{j=1}^p (1-e^{-\lambda_1\mu_jG})\sum_{k=1}^{Q_1}\alpha_k(\tau_k)^p
\end{align}
with a given $\mathbb{P}(r)$, $u_1,\dots,u_{2Q_2-1}$ are known and $K_p = \prod_{j=1}^p (1-e^{-\lambda_1\mu_jG}) >0 $ are also known thanks the indentifiability of $(\lambda,\mu,G)$. We denote $u_0 =1$, $K_0 = 1$ and let now $U$ be the $(Q_1+1) \times Q_1$ matrix defined by
$$U_{i,j} = \frac{u_{i+j-2}}{K_{i+j-2}} = \sum_{k=1}^{Q_1}(\tau_k)^{i-1}\alpha_k(\tau_k)^{j-1} $$ and let $U_i$ be
the square matrix obtained by removing the row $i$ from $U$. We can see if we write $A = diag(\alpha)$ that
$$U_{Q_1+1} = TAT' .$$
$T$ is unknown at this stage, but can be found by noticing that the coefficient $\tau$ are the root of the following polynomial \citep{celisse_consistency_2012} :
$$G(x)=\sum_{k=0}^{Q_1}(-1)^{k+Q_1}D_{k+1} x^{k}$$
where $D_{k+1} = detU_{k+1}$ and $D_{Q_1+1}\neq 0$ because $U_{Q_1+1}$ is the product of invertible matrices. Consequently it is possible to determine $\tau$ and $T$. As $T$ is invertible, we can conclude that $A = T^{-1}U_{Q_1+1}T'^{-1}$ is defined in an unique manner.
\paragraph{Identifiability of $\beta$}\mbox{}\\
The identifiability of $\beta$ is done the same way as the identifiability of $\alpha$, by considering hypothesis (H2) and
$$\sigma_l = \sum_{k=1}^{Q_1} \pi_{k,l}\alpha_k,\quad S_{j,l} = (\sigma_l)^{j-1}
$$
$$v_q = \mathbb{P}(r_{1,1}>0,\dots,r_{q,1}>0), \quad H_q = \prod_{i=1}^q (1-e^{-\lambda_i\mu_1G}) >0 $$
$$V_{i,j} = \frac{v_{i+j-2}}{H_{i+j-2}},\quad B=diag(\beta) $$ and showing that $$B = S^{-1}V_{Q_2+1}S'^{-1} $$
\subsubsection{Identifiability of $\pi$}
Consider now $w_{pq}$, the
probability to have a number greater than 0 on the $p$ first cells of the first row of $r$ and $q$ first cells of the first column of $r$ with $1\leq p \leq Q_1$ and $1\leq q \leq Q_2$
\begin{align}
w_{pq} &= \mathbb{P}(r_{1,1}>0,\dots,r_{1,p}>0,r_{2,1}>0,\dots r_{q,1}>0)\notag\\
&= \sum_{k=1}^{Q_1}\sum_{l=1}^{Q_2}\alpha_k\beta_l \pi_{kl}(1-e^{-\lambda_1\mu_1G})
\prod_{j=2}^p\left[\sum_{l_j=1}^{Q_2}\beta_{j_l}
\pi_{k,l_j}(1-e^{-\lambda_1\mu_jG})\right]
\prod_{i=2}^q\left[\sum_{k_i=1}^{Q_1}\alpha_{i_k}
\pi_{k_i,l}(1-e^{-\lambda_i\mu_1G})\right]\notag\\
&= \frac{K_p}{K_1} \frac{H_q}{H_1}(1-e^{-\lambda_1\mu_1G}) \sum_{k=1}^{Q_1}\sum_{l=1}^{Q_2}\alpha_k (\tau_k)^{p-1} \pi_{kl}
\beta_l (\sigma_l)^{q-1}\notag\\
&= \psi_{pq} \sum_{k=1}^{Q_1}\sum_{l=1}^{Q_2}\alpha_k (\tau_k)^{p-1} \pi_{kl}
\beta_l (\sigma_l)^{q-1}\notag
\end{align}
where $\psi_{pq} = \frac{K_p}{K_1} \frac{H_q}{H_1}(1-e^{-\lambda_1\mu_1G}) >0$ is known thanks the indentifiability of $(\lambda,\mu,G)$\\
Let $W_{p,q} = \frac{w_{pq}}{\psi_{pq}}$ we can see that
$$W = RA\pi BS' $$ thus
$$\pi = A^{-1}R^{-1}WS'^{-1}B^{-1} $$ is defined in a unique manner.
\section{Details on the EM algorithm}
\label{M-Steb b}
\paragraph{M-Step b)}
The likelihood of $R$ given $M$ can be written as
$$ \mathcal{L}(R|M) = \prod_{i,j}\left( e^{-\lambda_i \mu_j G} \frac{(\lambda_i \mu_j G)^{R_{i,j}}}{R_{i,j}!}\right)^{m_{i,j}} \underbrace{\left( \mathbbm{1}_{\{R_{i,j}=0\}}\right)^{1-m_{i,j}}}_{=1}.$$
The right part of the product is always equal to one because $m_{i,j}=0$ implies that $R_{i,j}=0$. Then,
$$\log \mathcal{L}(R|M) = \sum_{i,j} m_{i,j} \left[ -\lambda_i \mu_j G + R_{i,j}(\log(\lambda_i)+\log(\mu_j )+\log(G)) - \log(R_{i,j}!) \right] .$$
The derivative of this likelihood with respect to $k^{th}$ coordinate of $\lambda$ gives:
$$\frac{\partial\log\mathcal{L}}{\partial\lambda_k}= -\sum_{j=1}^{n_2}m_{k,j}\mu_{j}G+ \sum_{j=1}^{n_2}m_{k,j}\frac{R_{k,j}}{\lambda_k}\quad \text{thus} \quad
\frac{\partial\log\mathcal{L}}{\partial\lambda_k} = 0 \text{ if } \lambda_k= \frac{\sum_{j=1}^{n_2}m_{k,j}R_{k,j}}{\sum_{j=1}^{n_2}m_{k,j}\mu_{j}G}\,.$$
Note that because $m_{i,j}=0$ implies $R_{i,j}=0$ we have that $\sum_{j=1}^{n_2}m_{k,j}R_{k,j}= \sum_{j=1}^{n_2}R_{k,j} $. With the same reasoning, $$\frac{\partial\log\mathcal{L}}{\partial\mu_l} = 0 \text{ if } \mu_l= \frac{\sum_{i=1}^{n_1}R_{i,l}}{\sum_{i=1}^{n_1}m_{i,l}\lambda_{l}G}.$$
By replacing $\mu_j$ in the first derivative by the expression found in the second, we have
$$\frac{\partial\log\mathcal{L}}{\partial\lambda_k} = 0 \text{ if } \lambda_k= \frac{\sum_{j=1}^{n_2}R_{k,j}}{\sum_{j=1}^{n_2}m_{k,j}\frac{\sum_{i=1}^{n_1}R_{i,j}}{\sum_{i=1}^{n_1}m_{i,j}\lambda_{j}G}G} = \frac{\sum_{j=1}^{n_2}R_{k,j}}{\sum_{j=1}^{n_2}m_{k,j}\frac{\sum_{i=1}^{n_1}R_{i,j}}{\sum_{i=1}^{n_1}m_{i,j}\lambda_{j}}}.$$ We can then find $\lambda$ with a fixed point iteration. However the fixed point of this equation is not unique, indeed if $\lambda$ is a fixed point, then $a \lambda$ with $a>0$ is also a fixed point. Because it is supposed that $max_i \lambda_i =1$, a solution $\lambda$ that has been found by the fixed point algorithm can be divided by its maximum coordinate in order to have an estimation of $\lambda$. Once $\lambda$ calculated, we know that $G\mu_l= \frac{\sum_{i=1}^{n_1}R_{i,l}}{\sum_{i=1}^{n_1}m_{i,l}\lambda_{l}}$ can be obtained. Finally, we deduce $G$ and $\mu$ by fixing $\max_j \mu_j = 1$.
As the matrix $M$ is not observed, it is replaced by a simulated version of it $\Tilde{M}_{(n)}$.
\section{Proof of ICL formula}
\label{ICL}
The proof is similar to the proof given in \cite{daudin_mixture_2008}, the prior distribution $g$ can be decomposed as $g(\boldsymbol{\theta}|m_{Q_1,Q2}) = g(\boldsymbol{\alpha}|m_{Q_1,Q2}) \times g(\boldsymbol{\beta}|m_{Q_1,Q2} )\times g(\boldsymbol{\pi},\boldsymbol{\lambda},\boldsymbol{\mu},G|m_{Q_1,Q2})$, which allows us to write
$$\log\mathcal{L}(R,\mathbf{Z}^1, \mathbf{Z}^2,m_{Q_1,Q2}) = \log\mathcal{L}(\mathbf{Z}^1|m_{Q_1,Q2}) + \log\mathcal{L}( \mathbf{Z}^2|m_{Q_1,Q2}) + \log\mathcal{L}(R|\mathbf{Z}^1, \mathbf{Z}^2,m_{Q_1,Q2}) .$$
Using a non-informative Jeffreys prior and by replacing the missing data $\mathbf{Z}^1$ and $\mathbf{Z}^2$ by $\widehat{\mathbf{Z}^1} $ and $\widehat{\mathbf{Z}^2} $, we can approximate the first two terms by \begin{align}
&\log\mathcal{L}(Z^1|m_{Q_1,Q2}) + \log\mathcal{L}( \mathbf{Z}^2|m_{Q_1,Q2})\approx\notag\\ &\underset{\boldsymbol{\alpha}}{\max}\log\mathcal{L}(\widehat{\mathbf{Z}^1}|\boldsymbol{\alpha},m_{Q_1,Q2})-\frac{Q_1-1}{2}\log(n_1) + \underset{\boldsymbol{\beta}}{\max}\log\mathcal{L}(\widehat{\mathbf{Z}^2}|\beta,m_{Q_1,Q2}) - \frac{Q_2-1}{2}\log(n_2).
\end{align}
For the third term, we can consider that $R$ is made of $n_1\times n_2$ random variables, depending on labels and on $\boldsymbol{\pi},\boldsymbol{\lambda},\boldsymbol{\mu},G$. $\log\mathcal{L}(R|\mathbf{Z}^1, \mathbf{Z}^2, m_{Q_1,Q2})$ can be calculated using a BIC approximation:
\begin{align}
&\log\mathcal{L}(R|\mathbf{Z}^1, \mathbf{Z}^2,m_{Q_1,Q2})\approx\notag\\ &\underset{\boldsymbol{\pi},\boldsymbol{\lambda},\boldsymbol{\mu},G}{max}\log\mathcal{L}(R|\widehat{\mathbf{Z}^1}, \widehat{\mathbf{Z}^2},\boldsymbol{\pi},\boldsymbol{\lambda},\boldsymbol{\mu},G,m_{Q_1,Q2}) - \frac{Q_1Q_2 + n_1 +n_2-1}{2}\log(n_1n_2).
\end{align}
The sum of all the terms yields the final result.
\section{Supplementary figures}
\label{secsupplfig}
\begin{figure}
\caption{ARI scores for rows and columns blocks when the number of blocks is known.}
\label{fig5.2}
\end{figure}
\begin{figure}
\caption{Estimated $Q_1$ (left) and $Q_2$ (right) with LBM for different value of $G$}
\label{fig_Q0}
\end{figure}
\begin{figure}
\caption{Estimated $Q_1$ (left) and $Q_2$ (right) with CoOP-LBM for different value of $G$}
\label{fig_Q}
\end{figure}
\end{document}
|
\begin{document}
\title{Stabilizing qubit coherence via tracking-control}
\author{Daniel A. Lidar and Sara Schneider\footnote{Current address:
Atel Trading AG, Switzerland}}
\affiliation{Chemical Physics Theory Group, Chemistry Department, and Center for Quantum
Information and Quantum Control, University of Toronto, 80 St. George
Street, Toronto, Ontario M5S 3H6, Canada }
\begin{abstract}
We consider the problem of stabilizing the coherence of a single qubit
subject to Markovian decoherence, via the application of a control
Hamiltonian, without any additional resources. In this case neither quantum
error correction/avoidance, nor dynamical decoupling applies. We show that
using tracking-control, i.e., the conditioning of the control field on the
state of the qubit, it is possible to maintain coherence for finite time
durations, until the control field diverges.
\end{abstract}
\maketitle
\section{Introduction}
Protecting quantum coherence in the presence of decoherence due to coupling
to an uncontrollable environment is an important goal of quantum control,
with applications in, e.g., quantum information processing \cite{Nielsen:book}, and coherent control \cite{Brumer:book}. Various methods
have been developed for this purpose, e.g., quantum error correcting codes
\cite{Steane:99}, decoherence-free subspaces \cite{LidarWhaley:03},
and dynamical decoupling \cite{Viola:01a}, and combinations thereof
\cite{ByrdWuLidar:review}. However, none of these methods is applicable in
the simplest possible case of interest, of a \emph{single} qubit subject to
\emph{Markovian} decoherence: both quantum error correcting codes and
decoherence-free subspaces rely on an encoding of the state of the qubit
into the state of \emph{several} qubits, whereas dynamical decoupling is
\emph{inapplicable} in the fully Markovian regime, since it is effectively
equivalent to the quantum Zeno effect \cite{Facchi:03}, i.e., requires in an
essential manner that the bath retains some memory of its interaction
with the system.
In this work we show that \emph{tracking-control} is capable of stabilizing the
\emph{coherence} of a single qubit subject to Markovian decoherence.
Tracking-control has a rich history in classical control theory
\cite{Hirschorn:79,Hirschorn:81,Hirschorn:88,Jakubczyk:93,Retchkiman:95}.
It was extensively studied by
Rabitz and co-workers in the context of closed quantum systems
(specifically, molecular systems)
\cite{Chen:95,Lu:95,Gross:93,Zhu:99,Zhu:98,Zhu:03}. Here it
refers to the instantaneous adjustment of the control field based on a
continuous measurement of the state of the qubit. This is, of course, a very
strong assumption, which may be appropriate for classical control, but
cannot be satisfied in principle if non-commuting quantum observables are
involved. It turns out that we are able to overcome this obstacle by
choosing a specific form for our control fields, as explained in detail
below. In general, one can envision performing quantum tracking control
based on the incomplete information gleaned in real-time from measuring an
as large as possible set of commuting observables. It is an open problem to
estimate the quality of the (partial) tracking one can thus attain.
The tracking solution we find in our case becomes singular after a finite
time (i.e., the control fields diverge). Such singularities are a well-known
feature of tracking control \cite{Chen:95,Lu:95,Gross:93,Zhu:99}, and can in
some cases be removed \cite{Zhu:99}. We analyze the nature of the
singularity occurring in our case, finding that is is unavoidable.
A related control problem was recently addressed by Belavkin and co-workers
in Ref.~\cite{Bouten:04}, using the quantum filtering (or stochastic
master) equation \cite{Belavkin:80}. In
their work the control objective is to take a qubit from an unknown initial
state to the $|0\rangle $ state. In this approach one naturally accounts for
the unavailability of complete information from the measurement process
(using filtering), but the resulting quantum Bellman-Hamilton-Jacobi
equation is very hard to solve even for a qubit, unless one makes drastic
simplifying assumptions about the control field.
Purely unitary control in the presence of
decoherence has also been addressed by Recht {\it et al}. in Ref.~\cite{Recht:02}. It was shown there that in the case of
so-called relaxing semigroups (quantum dynamical semigroups \cite
{Alicki:87} with a unique fixed point), one can control the equilibrium
state of the dynamics. Relaxing semigroups are in fact a case that is
orthogonal to the case we study here, as they involve non-unital channels,
while we study only unital channels. These issues are clarified below.
Finally, the problem of coherent control of a qubit subject to Markovian
dynamics has been studied in detail by Altafini, using a Lie algebraic
framework \cite{Altafini:03,Altafini:04}. This work has elucidated the
corresponding conditions for accessibility, small- and finite-time
controllability, using the coherence vector representation \cite{Alicki:87}
and employing classical control-theory notions.
The structure of the present paper is as follows. In the next Section we
define the model of a single qubit subject to Markovian decoherence, and
define our control objectives in terms of the purity and coherence. In Sec.~\ref{tracking} we set up the coherence tracking problem and solve it
explicitly for a qubit subject to pure dephasing. In Sec.~\ref{equiv} we
discuss the generality of this result by defining equivalence classes of the
pure dephasing channel to which our coherence-tracking solution also
applies. In Sec.~\ref{singularity} we discuss the nature of the singularity
of our control fields, and show that this singularity cannot be avoided. We
conclude in Sec.~\ref{conclusions} with a brief summary and a list of open
questions inspired by this work.
\section{Model and Objectives}
\subsection{Purity}
\label{purity}
We consider a single qubit subject to Markovian decoherence and controlled
via a control Hamiltonian $H$. Then the system dynamics is governed by a
master equation of the form
\begin{equation}
\frac{\partial \rho }{\partial t}=-\frac{i}{\hbar }[H,\rho ]+{\mathcal{L}}
(\rho ), \label{eq:Lind}
\end{equation}
where the Lindbladian is
\begin{equation}
{\mathcal{L}}(\rho )=\frac{1}{2}\sum_{i,j}a_{ij}\left( [F_{i},\rho
F_{j}^{\dagger }]+[F_{i}\rho ,F_{j}^{\dagger }]\right) , \label{lind}
\end{equation}
where the matrix $A=(a_{ij})$ is positive semidefinite [ensuring completely
positivity of the mapping ${\mathcal{L}}(\rho )$], and where the Lindblad
operators $\{F_{i}\}$ are the coupling operators of the system to the bath
\cite{Alicki:87}. One can always diagonalize $A$ using a unitary transformation
$W=(w_{ij})$ and define new Lindblad operators $G_{i}=\sum w_{ij}F_{j}$ such
that
\begin{equation}
{\mathcal{L}}(\rho )=\frac{1}{2}\sum_{i}\gamma _{i}\left( [G_{i},\rho
G_{i}^{\dagger }]+[G_{i}\rho ,G_{i}^{\dagger }]\right) , \label{eq:lind2}
\end{equation}
where $\gamma _{i}\geq 0$ are the eigenvalues of $A$.
In such a Markovian system \emph{it is impossible to control the purity}
\begin{equation}
p\equiv \mathrm{Tr}(\rho ^{2})
\end{equation}
of a state $\rho $ in such a way so as to maintain it at its initial value
\cite{Ketterle:92,Tannor:99}. To see this note that the time derivative of $
p $ is given by
\begin{eqnarray}
\frac{\partial p}{\partial t} &=&\frac{\partial \mathrm{Tr}(\rho ^{2})}{
\partial t}=2\mathrm{Tr}(\rho \dot{\rho}) \notag \\
&=&-\frac{2i}{\hbar }\underbrace{\mathrm{Tr}(\rho \lbrack H,\rho ])}_{=0}+2
\mathrm{Tr}(\rho {\mathcal{L}}(\rho )),
\end{eqnarray}
where we used cyclic invariance under the trace. Thus the Hamiltonian
control term cannot change the first derivative of the purity, and hence
cannot keep it at its initial value. This is also known as the
\textquotedblleft no-cooling principle\textquotedblright .
Moreover, for certain Lindbladians purity is a strictly decreasing function
under the Markovian semigroup dynamics. This is the case for all unital
Lindbladians, i.e., those for which ${\mathcal{L}}(I)=0$ (e.g., \cite{Altafini:04}). In this case one finds from Eq.~(\ref{eq:lind2}) that $
\sum_{i}\gamma _{i}\left( G_{i}G_{i}^{\dagger }-G_{i}^{\dagger }G_{i}\right)
=0$. Thus a sufficient condition for unitality is that the $G_{i}$ are \emph{
normal} (e.g., unitary):\ $G_{i}G_{i}^{\dagger }=G_{i}^{\dagger }G_{i}$.
Subject to normality it is possible give a simple proof of the monotonic
decrease of $p$:
\begin{eqnarray*}
\frac{\partial p}{\partial t} &=&2\mathrm{Tr}(\rho {\mathcal{L}}(\rho )) \\
&=&\sum_{i}\gamma _{i}\mathrm{Tr}\{\rho \left( \lbrack G_{i},\rho
G_{i}^{\dagger }]+[G_{i}\rho ,G_{i}^{\dagger }]\right) \} \\
&=&2\sum_{i}\gamma _{i}\{\mathrm{Tr}(X_{i}Y_{i})-\frac{1}{2}[\mathrm{Tr}
(X_{i}X_{i}^{\dagger })+\mathrm{Tr}(Y_{i}Y_{i}^{\dagger })]\},
\end{eqnarray*}
where $X_{i}=\rho G_{i}$ and $Y_{i}=\rho G_{i}^{\dagger }$, and we used $
\rho =\rho ^{\dag }$. Now apply the arithmetic-geometric mean inequality for
matrices, $\left\Vert XY\right\Vert \leq \frac{1}{2}(\left\Vert X^{\dagger
}X+YY^{\dagger }\right\Vert )$, where $\left\Vert \cdot \right\Vert $ is any
unitarily-invariant norm (such as $\mathrm{Tr}$) \cite{Bhatia:book}[p.263].
Also, $\left\Vert X^{\dagger }X+YY^{\dagger }\right\Vert \leq \left\Vert
X^{\dagger }X\right\Vert +\left\Vert YY^{\dagger }\right\Vert $. Then:
\begin{eqnarray}
\mathrm{Tr}(X_{i}Y_{i})-\frac{1}{2}[\mathrm{Tr}(X_{i}X_{i}^{\dagger })+
\mathrm{Tr}(Y_{i}Y_{i}^{\dagger })] &\leq &\frac{1}{2}[\mathrm{Tr}
(X_{i}^{\dagger }X_{i}+Y_{i}Y_{i}^{\dagger })]-\frac{1}{2}[\mathrm{Tr}
(X_{i}X_{i}^{\dagger })+\mathrm{Tr}(Y_{i}Y_{i}^{\dagger })] \notag \\
&\leq &0.
\end{eqnarray}
Thus, using $\gamma _{i}\geq 0$, we have $\frac{\partial p}{\partial t}\leq 0
$. Purity in the case of unital Lindbladians is, therefore, a strictly
decaying function under Hamiltonian control. This conclusion is unchanged
even with feedback, namely, even if the Hamiltonian includes dependence on
the qubit state, i.e., if $H=H[\rho ]$. The situation is different for
non-unital Lindbladians \cite{Recht:02,Altafini:04}. These channels can increase the
purity even without active control. E.g., under spontaneous emission an
arbitrary qubit mixed-state is gradually purified to $|0\rangle $. In this
work we consider only unital Lindbladians.
\subsection{Coherence}
Another quantum quantity of relevance is \emph{coherence}, i.e., the
off-diagonal elements of $\rho $. This is, of course, a basis-dependent
quantity (it is not invariant under unitary transformations), so that in
what follows we assume that one has fixed a basis for physical reasons
(e.g., there is a magnetic field pointing in the $z$ direction, or there is
a pure dephasing type coupling to the bath, represented by a $\sigma _{z}$
-Lindblad operator). We will show that tracking control is capable of
stabilizing the coherence of a single qubit. A qubit is completely
characterized by its density matrix
\begin{equation}
\rho =\left(
\begin{array}{cc}
\rho _{00} & \rho _{01} \\
\rho _{10} & \rho _{11}
\end{array}
\right) ,
\end{equation}
with the additional constraints $\mathrm{Tr}(\rho )=1$, $\rho _{10}=\rho
_{01}^{\ast }$ and $\mathrm{Tr}(\rho ^{2})\leq 1$. We follow the approach in
\cite{Alicki:87} and parametrize $\rho $ using the Bloch
vector $\vec{v}$ with the \emph{real} components
\begin{equation}
v_{\alpha }=\frac{1}{2}\mathrm{Tr}(\rho \sigma _{\alpha }),
\end{equation}
where $\sigma _{\alpha }$, $\alpha \in \{x,y,z\}$, are the Pauli matrices,
and we identify the Lindblad operators as $F_{1}=\sigma _{x},F_{2}=\sigma
_{y},F_{3}=\sigma _{z}$. We then have
\begin{equation}
\rho =\frac{1}{2}{(I}+\vec{v}\cdot \vec{\sigma}). \label{eq:rho-v}
\end{equation}
The purity $p$ is then given by the \emph{Bloch sphere radius},
\begin{equation}
p=v_{x}^{2}+v_{y}^{2}+v_{z}^{2},
\end{equation}
while the coherence $c$ between levels $|0\rangle $ and $|1\rangle $ is
given by the \emph{radius in the }$x-y$\emph{\ plane},
\begin{equation}
c=v_{x}^{2}+v_{y}^{2}.
\end{equation}
\emph{Our objective is to have }$c$\emph{\ be constant during the evolution}
. Thus we impose the following constraint:
\begin{eqnarray}
\frac{1}{2}\frac{\partial c}{\partial t} &=&v_{x}\dot{v}_{x}+v_{y}\dot{v}_{y}
\notag \\
&=&0. \label{cohconst}
\end{eqnarray}
\section{Tracking Control of Coherence}
\label{tracking}
\subsection{Tracking equation}
Noticing that the traceful part (reference energy) of the control
Hamiltonian drops out of the commutator $[H,\rho ]$ we expand $H$ in the
traceless Pauli basis:
\begin{equation}
H=\frac{\hbar }{2}(\omega _{0}(t)\sigma _{z}+\omega _{1}(t)\sigma
_{x}-\omega _{2}(t)\sigma _{y})
\end{equation}
We wish to solve for the control fields $\omega _{i}(t)$ so that the
coherence constraint (\ref{cohconst}) is satisfied. Note that we are
assuming that the intrinsic system Hamiltonian either vanishes, or that we
are working in the rotating frame with respect to this Hamiltonian.
Substituting this and the expansion (\ref{eq:rho-v}) into the Lindblad
equation (\ref{eq:Lind}), and using trace-orthonormality of the Pauli
matrices, we obtain the generalized Bloch equations
\begin{eqnarray}
\dot{v}_{x}(t) &=&-\gamma _{3}v_{x}(t)+(\alpha -\omega
_{0}(t))v_{y}(t)+(\beta -\omega _{2}(t))v_{z}(t)-2\lambda , \label{vx} \\
\dot{v}_{y}(t) &=&(\alpha +\omega _{0}(t))v_{x}(t)-\gamma
_{2}v_{y}(t)+(\delta -\omega _{1}(t))v_{z}(t)-2\mu , \label{vy} \\
\dot{v}_{z}(t) &=&(\beta +\omega _{2}(t))v_{x}(t)+(\delta +\omega
_{1}(t))v_{y}(t)-\gamma _{1}v_{z}(t)-2\nu . \label{vz}
\end{eqnarray}
Here we identify the parameters from Eqs.~(\ref{vx})-(\ref{vz}) with the $
a_{ij}$ from Eq.~(\ref{lind}) as follows:
\begin{eqnarray}
\gamma _{1} &=&2(a_{22}+a_{33}),\quad \gamma _{2}=2(a_{11}+a_{33}),\quad
\gamma _{3}=2(a_{11}+a_{22}) \\
\alpha &=&2\mathrm{Re}(a_{12}),\quad \beta =2\mathrm{Re}(a_{13}),\quad
\delta =2\mathrm{Re}(a_{23}) \\
\lambda &=&\mathrm{Im}(a_{23}),\quad \mu =-\mathrm{Im}(a_{13}),\quad \nu =
\mathrm{Im}(a_{12})
\end{eqnarray}
Note from Eqs.~(\ref{vx}-\ref{vz}) that the $\gamma _{i}$ can be interpreted
as damping coefficients, while $\alpha ,\beta $ and $\delta $ play the role
of Lamb shifts (modify the control fields), and $\lambda ,\mu $ and $\nu $
are the coordinates of an affine shift of the Bloch vector (which plays a
role, e.g., in spontaneous emission). Positive semi-definiteness of $
A=(a_{ij})$ imposes various conditions on these parameters \cite{Alicki:87}.
In matrix form Eqs.~(\ref{vx}-\ref{vz}) can be written as an affine linear
transformation
\begin{equation}
\dot{\vec{v}}(t)=(M_{0}+M(t))\vec{v}(t)+\vec{k}, \label{eq:vdot1}
\end{equation}
where the decoherence is affected by
\begin{eqnarray}
M_{0} &=&\left(
\begin{array}{ccc}
-\gamma _{3} & \alpha & \beta \\
\alpha & -\gamma _{2} & \delta \\
\beta & \delta & -\gamma _{1}
\end{array}
\right) , \notag \\
\vec{k} &=&-2(\lambda ,\mu ,\nu )^{t}, \label{eq:M0k}
\end{eqnarray}
(superscript $t$ denotes transpose) and the control Hamiltonian is represented by the real, antisymmetric matrix
\begin{eqnarray}
M(t) &=&\left(
\begin{array}{ccc}
0 & -\omega _{0}(t) & -\omega _{2}(t) \\
\omega _{0}(t) & 0 & -\omega _{1}(t) \\
\omega _{2}(t) & \omega _{1}(t) & 0
\end{array}
\right) \notag \\
&=&\sum_{j=0}^{2}\omega _{j}(t)\Lambda _{j}, \label{eq:M(t)}
\end{eqnarray}
where the matrices
\begin{equation}
\Lambda _{0}=\left(
\begin{array}{ccc}
0 & -1 & 0 \\
1 & 0 & 0 \\
0 & 0 & 0
\end{array}
\right) ,\quad \Lambda _{1}=\left(
\begin{array}{ccc}
0 & 0 & 0 \\
0 & 0 & -1 \\
0 & 1 & 0
\end{array}
\right) ,\quad \Lambda _{2}=\left(
\begin{array}{ccc}
0 & 0 & -1 \\
0 & 0 & 0 \\
1 & 0 & 0
\end{array}
\right) \label{eq:lambdas}
\end{equation}
close as an $\mathrm{so}(3)$ subalgebra of $\mathrm{su}(3)$ (we use a
slightly different convention for the signs than the standard one \cite{Cornwell:84II}):
\begin{equation}
\lbrack \Lambda _{0},\Lambda _{1}]=-\Lambda _{2},\quad \lbrack \Lambda
_{1},\Lambda _{2}]=-\Lambda _{0},\quad \lbrack \Lambda _{2},\Lambda
_{0}]=-\Lambda _{1}.
\end{equation}
Solving Eq.~(\ref{vy}) for $\omega _{1}(t)$ and Eq.~(\ref{vx}) for $\omega
_{2}(t)$ we obtain
\begin{eqnarray}
\omega _{1}(t) &=&\frac{(\alpha +\omega _{0}(t))v_{x}(t)-\gamma
_{2}v_{y}(t)+\delta v_{z}(t)-2\mu -\dot{v}_{y}(t)}{v_{z}(t)}, \label{om1} \\
\omega _{2}(t) &=&\frac{-\gamma _{3}v_{x}(t)+(\alpha -\omega
_{0}(t))v_{y}(t)+\beta v_{z}(t)-2\lambda -\dot{v}_{x}(t)}{v_{z}(t)}.
\label{om2}
\end{eqnarray}
Substituting this into Eq.~(\ref{vz}) and using the imposed constraint of
constant coherence, $\dot{c}=0$, we find a non-linear first-order
differential equation for $v_{z}(t)$ given by
\begin{equation}
\dot{v}_{z}(t)=F(t)+\frac{G(t)}{v_{z}(t)}-\gamma _{1}v_{z}(t),
\label{eq:track}
\end{equation}
where we have defined
\begin{eqnarray}
F(t) &\equiv &2\beta v_{x}(t)+2\delta v_{y}(t)-2\nu , \label{eq:F} \\
G(t) &\equiv &-\gamma _{3}v_{x}^{2}(t)-\gamma _{2}v_{y}^{2}(t)+2\alpha
v_{x}(t)v_{y}(t)-2\lambda v_{x}(t)-2\mu v_{y}(t). \label{eq:G}
\end{eqnarray}
Eq.~(\ref{eq:track}) is our tracking equation.
\subsection{Solution of the tracking equation in the case of pure dephasing}
We first consider the relatively simple case of diagonal $A$ (which implies
that $\alpha =\beta =\delta =\lambda =\mu =\nu =0$), with equal damping
along the $x$ and $y$ axes ($\gamma _{2}=\gamma _{3}\equiv \gamma $), and $
\gamma _{1}=0$. This is the case known as pure dephasing (see below), and is
generalized in Section~\ref{equiv}. The Bloch equations then become:
\begin{eqnarray}
\frac{\partial \vec{v}}{\partial t} &=&\left(
\begin{array}{ccc}
-\gamma & -\omega _{0}(t) & -\omega _{2}(t) \\
\omega _{0}(t) & -\gamma & -\omega _{1}(t) \\
\omega _{2}(t) & \omega _{1}(t) & 0
\end{array}
\right) \vec{v} \notag \\
&=&\vec{\Omega}(t)\times \vec{v}(t)+\vec{\Gamma}\cdot \vec{v}(t),
\label{eq:Bloch-simp}
\end{eqnarray}
where
\begin{eqnarray}
\vec{\Omega}(t) &=&(\omega _{1}(t),-\omega _{2}(t),\omega _{0}(t)), \notag
\\
\vec{\Gamma} &=&(-\gamma ,-\gamma ,0).
\end{eqnarray}
The vector $\vec{\Omega}$ acts as an effective time-dependent magnetic field
and rotates the coherence vector in a manner designed (see below) to keep
the coherence constant for as long as possible.
\subsubsection{Uncontrolled (free) dynamics}
Under free evolution (uncontrolled scenario: $H=0$) the system dynamics is
governed by Markovian decoherence, subject to a master equation of the form
\begin{equation}
\frac{\partial \rho }{\partial t}=-\frac{i}{\hbar }[H,\rho ]+\frac{\gamma }{2
}\left( \sigma _{z}\rho \sigma _{z}-\rho \right) , \label{eq:mastereq}
\end{equation}
or, equivalently, to the following Bloch equations [Eq.~(\ref{eq:Bloch-simp}
)]:
\begin{equation}
\dot{v}_{x}(t)=-\gamma v_{x}(t),~\dot{v}_{y}(t)=-\gamma v_{y}(t),~\dot{v}
_{z}(t)=0.
\end{equation}
The solution is
\begin{eqnarray}
v_{x}(t) &=&\exp [-\gamma t]v_{x}(0), \notag \\
v_{y}(t) &=&\exp [-\gamma t]v_{y}(0), \notag \\
v_{z}(t) &=&v_{z}(0). \label{eq:free}
\end{eqnarray}
This is known as pure dephasing, or a phase-flip channel \cite{Nielsen:book}
, since in the corresponding Kraus operator-sum representation
\begin{eqnarray}
\rho (t) &=&\left(
\begin{array}{cc}
\rho _{00} & e^{-\gamma t}\rho _{01} \\
e^{-\gamma t}\rho _{01}^{\ast } & 1-\rho _{00}
\end{array}
\right) \notag \\
&=&(1-a)I\rho I+a\sigma _{z}\rho \sigma _{z},
\end{eqnarray}
the qubit undergoes a phase flip with probability $a=(1-e^{-\gamma t})/2$ ($
I $ is the identity matrix).
\subsubsection{Controlled dynamics}
Eqs.~(\ref{eq:track})-(\ref{eq:G}) simplify considerably in the pure
dephasing case, and using the expression $c=v_{x}^{2}+v_{y}^{2}$ (is
constant) for the coherence we find from Eq.~(\ref{eq:track}):
\begin{equation}
\dot{v}_{z}(t)=\frac{-\gamma c}{v_{z}(t)}.
\end{equation}
Multiplying by $v_{z}(t)$ and integrating $\frac{1}{2}d(v_{z}(t))^{2}/dt$,
the solution is
\begin{equation}
v_{z}(t)=\left( \pm \right) \sqrt{-2\gamma ct+v_{z}^{2}(0)}.
\label{eq:vz(t)}
\end{equation}
It is clear that $v_{z}(t)$ does not stay real for times $t>t_{b}$ where
\begin{equation}
t_{b}=\frac{v_{z}^{2}(0)}{2\gamma c}. \label{eq:tb}
\end{equation}
What happens is that $v_{z}(t_{b})=0$, so that purity equals coherence.
Since the control fields trade decrease in purity in return for
stabilization of coherence, at the breakdown point this trade-off becomes
impossible. Mathematically, the constraint $\dot{c}=0$ forces the
control fields to diverge. We further investigate the implications of this
breakdown below.
We can solve for the corresponding control fields from Eqs.~(\ref{om1}),(\ref{om2}), yielding
\begin{eqnarray}
\omega _{2}(t) &=&\left( \pm \right) \frac{-\gamma v_{x}(t)-\omega
_{0}(t)v_{y}(t)-\dot{v}_{x}}{\sqrt{-2\gamma ct+v_{z}^{2}(0)}}, \label{cont1}
\\
\omega _{1}(t) &=&\left( \pm \right) \frac{\omega _{0}(t)v_{x}(t)-\gamma
v_{y}(t)-\dot{v}_{y}(t)}{\sqrt{-2\gamma ct+v_{z}^{2}(0)}}. \label{cont2}
\end{eqnarray}
Note that the field $\omega _{0}(t)$ can be chosen arbitrarily. Also note
that the fields $\omega _{1}(t),\omega _{2}(t)$ depend on $v_{x}(t)$ and $
v_{y}(t)$. This is why the method is called \textquotedblleft tracking
control\textquotedblright :\ the control strategy depends on the
instantaneous state of the system we desire to control. This can be
technically highly demanding, since it implies the ability to make very fast
measurements (i.e., much faster than the decoherence time-scale), combined
with classical processing to solve for the control fields and real-time
feedback. Nevertheless, recent cavity-QED experiments haven demonstrated the
possibility of such real-time feedback control \cite{Steck:04}. For a
quantum-mechanical system such as a qubit, tracking control involves an
intrinsically undesirable feature: simultaneous knowledge of $v_{x}(t)$ and $
v_{y}(t)$ is impossible since $\sigma _{x}$ and $\sigma _{y}$ are
non-commuting observables. However, there is a simple fix for this problem,
once we realize that the time dependence of $v_{x}(t)$ and $v_{y}(t)$ is
itself induced~by the control fields. Thus, we can use fields that fix $
v_{x}(t)=v_{x}(0)$ and $v_{y}(t)=v_{y}(0)$, which is, of course, just a
particular way of keeping the coherence $c=v_{x}^{2}+v_{y}^{2}$ constant,
\emph{via linearization of the control objective} (we have taken a quadratic
control objective and replaced it by two linear objectives). We then find
that the required fields have the form
\begin{eqnarray}
\omega _{2}(t) &=&\left( \pm \right) \frac{-\gamma v_{x}(0)-\omega
_{0}(t)v_{y}(0)}{\sqrt{v_{z}^{2}(0)-2\gamma ct}}, \label{eq:om2-fin} \\
\omega _{1}(t) &=&\left( \pm \right) \frac{-\gamma v_{y}(0)+\omega
_{0}(t)v_{x}(0)}{\sqrt{v_{z}^{2}(0)-2\gamma ct}}. \label{eq:om1-fin}
\end{eqnarray}
\emph{At the breakdown time }$t_{b}$\emph{\ the control fields diverge}, as
can be seen already from Eqs.~(\ref{cont1}),(\ref{cont2}). Thus, decay of
the coherence can be prevented for $t<t_{b}$.
\subsection{Analysis}
\label{analysis}
The breakdown time $t_{b}=v_{z}^{2}(0)/(2\gamma c)$ is inversely
proportional to the desired constant initial coherence value $c$: the higher
the initial coherence we wish to maintain, the less time this can be done
for. The breakdown time also depends on $v_{z}^{2}(0)=p(0)-c$, where $p$ is
the purity: If $v_{z}(0)=0$ (coherence$=$purity) the coherence will start to
decay immediately and no control is possible. Note that $v_{z}(0)=0$ is the
state of maximum coherence, as can be seen from the condition $p(0)\leq 1$
(the radius in the $x-y$ plane equals the Bloch sphere radius). Thus,
coherence control is, clearly, strongly state-dependent.
\begin{figure}
\caption{The time for which the coherence of a qubit can be kept constant,
as a function of coherence $c$ and purity $p$, for $\protect\gamma =0.1$.}
\label{fig1}
\end{figure}
In Fig.~\ref{fig1} we plot the breakdown time $t_{b}$ as a function of
coherence $c$ and purity $p$, for $\gamma =0.1$. The plot reflects the
constraint $c\leq p$, and shows the tradeoff between coherence and the time
for which it can be maintained.
It is interesting to compare the controlled and uncontrolled dynamics. We
again set $\gamma =0.1$, and in the controlled scenario choose an initial
state which lies in a region of Fig.~\ref{fig1} where there is some
coherence to be preserved: $c=0.3$ and $p=0.8$. This implies $v_{z}(0)=\sqrt{
p(0)-c}=1/\sqrt{2}$. We set $v_{x}(0)=v_{y}(0)$, so that $v_{x}(0)=\sqrt{c/2}
=\sqrt{0.3/2}\approx 0.39$. A comparison of the two cases is plotted in
Fig.~\ref{fig2}. In the uncontrolled scenario of pure dephasing $v_{z}$ is
a constant of motion, while $v_{x}$ decays monotonically. In contrast, in
the controlled case these roles are reversed in spite of the dephasing:\ the
control fixes $v_{x}$ while $v_{z}$ is allowed to decay.
\begin{figure}
\caption{Comparison between the controlled (thick lines) and the free (thin
lines) evolution for $v_{z}
\label{fig2}
\end{figure}
The control fields necessary to keep the coherence constant are plotted in
Fig.~(\ref{fig3}), where we have chosen $\omega _{0}(t)\equiv \omega _{0}$
to account for an energy difference between the two qubit states. For the
plot we set $\omega _{0}=4.0$. All other parameters and the initial state
are as in Fig.~(\ref{fig2}). The divergence of the control fields at $t_{b}$
is clearly visible.
\begin{figure}
\caption{Control fields $\protect\omega _{1}
\label{fig3}
\end{figure}
Geometrically, the uncontrolled phase-flip channel maps the the Bloch sphere
to an ellipsoid with the $z$-axis as major axis and minor axis in the $x-y$ plane.
As is clear from Eqs.~(\ref{eq:free}), the major axis is invariant under
the uncontrolled dynamics, while the minor axis (the coherence) is
contracted. On the other hand, in the controlled scenario, the minor axis is
invariant up until the breakdown time ($c$ is constant), while the major
axis is contracted [Eq.~(\ref{eq:vz(t)})]. The control field is thus able to
trade the contraction in the $x-y$ plane for one along the $z$-axis. The
geometric interpretation of this process is the following:\ \emph{the
control field attempts to rotate the ellipsoid so that the minor axis
becomes as aligned as possible with the }$z$\emph{-axis, where it would
experience no contraction}. The rotation takes the minor axis to an
invariant point, which requires ever-growing control field amplitude, until
the contraction is so strong that the control field is no longer capable of
sustaining the required rotation, and diverges. The effectiveness of this
process depends on the desired value of coherence that is to be maintained,
and the initial purity. This rotational interpretation follows from Eq.~(\ref
{eq:Bloch-simp}):\ we have, in the simplest case of constant $\omega _{0}(t)$
, $\vec{\Omega}(t)=(\omega _{1}(t),-\omega _{2}(t),\omega _{0})$ [a force
vector whose length in the $x-y$ plane is growing monotonically for $0\leq
t\leq t_{b}$ ($|\vec{\Omega}(t)|^{2}=\frac{(\gamma ^{2}+\omega _{0}^{2})c}{
v_{z}^{2}(0)-2\gamma ct}+\omega _{0}^{2}$)], multiplying via the vector
cross-product the vector $\vec{v}(t)=(v_{x}(0),v_{y}(0),\sqrt{
v_{z}^{2}(0)-2\gamma ct})$ [a vector with fixed coordinates in the $x-y$
plane whose magnitude is shrinking monotonically], thus producing a rate of
change of $\vec{v}(t)$ pointing in the plane orthogonal to $\vec{\Omega}(t)$
and $\vec{v}(t)$.
\section{Decoherence equivalence classes}
\label{equiv}
From the geometric interpretation given above it is clear that there are
other decoherence models where the Bloch sphere experiences a similar
deformation, but the contraction happens in a different plane. Then the
question arises which decoherence models are \textquotedblleft
equivalent\textquotedblright\ in the sense that what we have learned from
the above phase flip channel can be applied directly to another decoherence
model.
Suppose we apply a global unitary transformation $U$ to each Lindblad
operator in the master equation (\ref{eq:Lind}), as well as to the control
Hamiltonian $H$. \emph{We will say that two decoherence problems which are
thus related are in the same unitary equivalence class}. Under this
transformation the master equation becomes
\begin{equation}
\frac{\partial \rho }{\partial t}=-\frac{i}{\hbar }[UHU^{\dagger },\rho ]+
\frac{1}{2}\sum_{i,j}a_{ij}\left( [UF_{i}U^{\dagger }\rho ,UF_{j}^{\dagger
}U^{\dagger }]+[UF_{i}U^{\dagger },\rho UF_{j}^{\dagger }U^{\dagger
}]\right) .
\end{equation}
This is the \textquotedblleft Heisenberg picture\textquotedblright . We can
transform to the \textquotedblleft Schrodinger picture\textquotedblright\ by
multiplying the latter master equation by $U$ from the right and $U^{\dagger
}$ from the left:
\begin{equation}
\frac{\partial \rho ^{\prime }}{\partial t}=-\frac{i}{\hbar }[H,\rho
^{\prime }]+\frac{1}{2}\sum_{ij}a_{ij}\left( [F_{i}\rho ^{\prime
},F_{j}]+[F_{i},\rho ^{\prime }F_{j}]\right) , \label{eq:rho'}
\end{equation}
where $\rho ^{\prime }=U^{\dagger }\rho U$. This is the same as the original
master equation, but for a transformed $\rho $. E.g., the phase flip channel
($\sigma _{z}$-decoherence) is unitarily related to the bit flip channel ($
\sigma _{x}$-decoherence) via the Hadamard matrix $U=\frac{1}{\sqrt{2}}
\left(
\begin{array}{cc}
1 & 1 \\
1 & -1
\end{array}
\right) $.
In the coherence vector representation the unitary transformation $U$
becomes a real rotation matrix $R$ of the coherence vector via the
appropriate adjoint representation \cite{Cornwell:84II}; e.g., for the qubit
problem $U\in \mathrm{SU}(2)$ while $R\in \mathrm{SO}(3)$. The analog of
Eq.~(\ref{eq:rho'}) [i.e., the transformed Eq.~(\ref{eq:vdot1})] is
\begin{equation}
\dot{\vec{v}}^{\prime }=(M_{0}+M(t))\vec{v}^{\prime }+\vec{k}
\label{eq:vdot}
\end{equation}
where $M_{0}$ and $\vec{k}$ are the effects of decoherence in the coherence
vector representation \cite{Alicki:87}, $\vec{v}^{\prime }=R\vec{v}$, and $
M(t)$ represents the Hamiltonian control [recall Eq.~(\ref{eq:M(t)})].
Thus, two decoherence problems which are in the same unitary equivalence
class differ, in the \textquotedblleft Schrodinger picture\textquotedblright
, by a fixed rotation of the coherence vector $\vec{v}$. In terms of the
control problem at hand, the stabilization of the coherence $c$, it is then
clear that the control solution for all decoherence problems which are in
the unitary equivalence class of pure dephasing is still given by the result
derived above, i.e., by Eqs.~(\ref{eq:om2-fin}),(\ref{eq:om1-fin}). The
difference between the decoherence problems in this equivalence class lies
only in the initial values of $v_{x}(0)$, $v_{y}(0)$ and $v_{x}(0)$, which
enter into the explicit form of the control fields through the expressions~(
\ref{eq:om2-fin}),(\ref{eq:om1-fin}). As a very simple example, consider the
case of transforming from the phase-flip channel to the unitarily equivalent
$-$(phase-flip channel), i.e., $\sigma _{z}\mapsto -\sigma _{z}$. There
should, on physical grounds, be no essential difference between these two
cases. Indeed, in this case $U=\exp (i\pi \sigma _{y}/2)$, and the adjoint $
\mathrm{SO}(3)$ matrix is a rotation by $\pi $ about the $y$-axis, i.e., $R=
\mathrm{diag}\left( -1,1,-1\right) $. Thus $\vec{v}^{\prime }=R\vec{v}
=(-v_{x},v_{y},-v_{z})$, and both the coherence $c=v_{x}^{2}+v_{y}^{2}$ and $
v_{z}^{2}$ are invariant, so that the transformed control fields [compare to
Eqs.~(\ref{eq:om2-fin}),(\ref{eq:om1-fin})], given in terms of the
untransformed coherence vector,
\begin{eqnarray}
\omega _{2}^{\prime }(t) &=&\left( \pm \right) \frac{+\gamma v_{x}(0)-\omega
_{0}(t)v_{y}(0)}{\sqrt{v_{z}^{2}(0)-2\gamma ct}}, \\
\omega _{1}^{\prime }(t) &=&\left( \pm \right) \frac{-\gamma v_{y}(0)-\omega
_{0}(t)v_{x}(0)}{\sqrt{v_{z}^{2}(0)-2\gamma ct}},
\end{eqnarray}
have exactly the same divergence as before. The fact that there is a
difference at all, is a reflection of the fact that the mapping from $
\mathrm{SU}(2)$ to $\mathrm{SO}(3)$ is homomorphic (double-valued)
\cite{Cornwell:84II}; indeed, the master equation~(\ref{eq:mastereq})
is invariant
under the transformation $\sigma _{z}\mapsto -\sigma _{z}$, but this is not
the case in the coherence vector representation [the corresponding $M_{0}$
in Eq.~(\ref{eq:vdot})\ is not invariant].
It is worth emphasizing that the equivalence class of pure dephasing is the
entire group $\mathrm{SU}(2)$, but it excludes important processes such as
spontaneous emission (represented by $\sigma _{-}\notin \mathrm{SU}(2)$].
More generally, linear combinations of Pauli matrices, corresponding
to affine shifts in the coherence vector representation, are not in the
equivalence class of pure dephasing. The solution of the control problem for
such processes is deferred to a future publication; it involves solving the
non-linear tracking equation~(\ref{eq:track}) for these cases.
\section{Singularities}
\label{singularity}
It is insightful to reformulate the above control problem in terms of the tracking
control framework of Ref.~\cite{Zhu:99}, which allows one to study the
nature of the control field singularity. We once again linearize the
quadratic control objective $c(t)=c(0)$ into a two-dimensional form where we
wish to separately control $v_{x}(t)$ and $v_{y}(t)$ (in particular, keep
them constant), using the two control fields $\omega _{1}(t)$ and $\omega
_{2}(t)$. Following the notation of Ref.~\cite{Zhu:99} as closely as
possible we have [recall Eqs.~(\ref{eq:vdot1}),(\ref{eq:M(t)})]
\begin{equation}
\dot{\vec{v}}=(M_{0}+\sum_{j=0}^{2}\omega _{j}(t)\Lambda _{j})\vec{v}+\vec{k}
, \label{eq:vdot2}
\end{equation}
and the control objectives $S_{1}=v_{x}$ and $S_{2}=v_{y}$ are
\begin{equation}
S_{i}(t)=\vec{v}^{t}(t)\cdot O_{i}\cdot \vec{v}(t)~~[=S_{i}(0)],
\label{eq:Si}
\end{equation}
with
\begin{equation}
O_{1}=\left(
\begin{array}{ccc}
1 & 0 & 0 \\
0 & 0 & 0 \\
0 & 0 & 0
\end{array}
\right) ,\quad O_{2}=\left(
\begin{array}{ccc}
0 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 0
\end{array}
\right) .
\end{equation}
The matrices $\Lambda _{j}$, defined in Eq.~(\ref{eq:lambdas}), satisfy
\begin{equation}
\lbrack O_{1},\Lambda _{1}]=[O_{2},\Lambda _{2}]=0.
\end{equation}
Using this fact, Eq.~(\ref{eq:vdot2}), and the observation $\Lambda
_{j}^{t}=-\Lambda _{j}$, it is simple to show by explicit differentiation of
Eq.~(\ref{eq:Si}) that
\begin{eqnarray}
\omega _{1}(t) &=&\frac{\dot{S}_{2}+\omega _{0}(t)\vec{v}^{t}[\Lambda
_{0},O_{2}]\vec{v}-\vec{v}^{t}\{M_{0},O_{2}\}\vec{v}-\vec{k}^{t}O_{2}\vec{v}-
\vec{v}^{t}O_{2}\vec{k}}{\vec{v}^{t}[\Lambda _{1},O_{2}]\vec{v}},
\label{eq:om1} \\
\omega _{2}(t) &=&\frac{\dot{S}_{1}+\omega _{0}(t)\vec{v}^{t}[\Lambda
_{0},O_{1}]\vec{v}-\vec{v}^{t}\{M_{0},O_{1}\}\vec{v}-\vec{k}^{t}O_{1}\vec{v}-
\vec{v}^{t}O_{1}\vec{k}}{\vec{v}^{t}[\Lambda _{2},O_{1}]\vec{v}},
\label{eq:om2}
\end{eqnarray}
where $\{,\}$ denotes the anti-commutator and its appearance in the
expression involving the decoherence matrix $M_{0}$, rather than a
commutator, is a manifestation of the associated damping. These equations
are equivalent to Eqs.~(\ref{om1}),(\ref{om2}), and can be further
simplified by using the constraint of constant coherence ($\dot{S}_{1,2}=0$
), and the explicit forms of the various vectors and matrices appearing in
them.
Clearly, a singularity arises when the denominators in Eqs.~(\ref{eq:om1}),(
\ref{eq:om2}) vanish:
\begin{eqnarray}
\vec{v}^{t}(t)[\Lambda _{1},O_{2}]\vec{v}(t) &=&0, \label{eq:zero1} \\
\vec{v}^{t}(t)[\Lambda _{2},O_{1}]\vec{v}(t) &=&0. \label{eq:zero2}
\end{eqnarray}
Ref.~\cite{Zhu:99} distinguishes between several types of singularities:\
(i) A trivial singularity is the case when the denominators are zero over a
continuous time domain, (ii)\ A non-trivial singularity is the case when the
denominators are zero at isolated points. A trivial singularity can be
removed by taking higher order time-derivatives of Eq.~(\ref{eq:Si}) under
the conditions~(\ref{eq:zero1}),(\ref{eq:zero2}), until the trivial
singularity is removed (i.e., a non-zero denominator is found). If
derivatives of all orders result in a trivial singularity, the system is
uncontrollable.
In our case, using
\begin{equation}
\lbrack \Lambda _{1},O_{2}]=\left(
\begin{array}{ccc}
0 & 0 & 0 \\
0 & 0 & 1 \\
0 & 1 & 0
\end{array}
\right) ,\quad \lbrack \Lambda _{2},O_{1}]=\left(
\begin{array}{ccc}
0 & 0 & 1 \\
0 & 0 & 0 \\
1 & 0 & 0
\end{array}
\right) ,
\end{equation}
we have
\begin{eqnarray}
\vec{v}^{t}(t)[\Lambda _{1},O_{2}]\vec{v}(t) &=&2v_{y}(t)v_{z}(t), \\
\vec{v}^{t}(t)[\Lambda _{2},O_{1}]\vec{v}(t) &=&2v_{x}(t)v_{z}(t).
\end{eqnarray}
Since our control fields keep $v_{x,y}$ fixed, the singularity arises at the
isolated point $v_{z}(t_{b})=0$. We are thus dealing with a non-trivial
singularity. There are now two subcases:\ a) The singularity is of the form $
\omega _{j}(t)=\alpha /0$ with $\alpha \neq 0$; b) The singularity is of the
form $\omega _{j}(t)=0/0$. In case b) one can apply L'Hospital's rule and
(sometimes) overcome the singularity. In our case, the numerators in Eqs.~(
\ref{eq:om1}),(\ref{eq:om2}) involve the decoherence matrix $M_{0}$ (and the
affine shift $\vec{k}$), while the denominators involve only the controls,
so that \emph{generically} one cannot expect a cancellation as in case b).
Ref.~\cite{Zhu:99} concludes that in case a) there is no solution for the
field and the system is uncontrollable at the singular point. This
conclusion agrees with our geometric interpretation of Sec.~\ref{analysis}.
\section{Conclusions and Open Questions}
\label{conclusions}
In this work we have considered the problem of controlling the coherence of
a single qubit under circumstances where none of the encoding or dynamical
decoupling methods recently developed in quantum information science apply.
Instead we have employed a version of tracking control, where a control
field is continuously adjusted in order to satisfy the objective of constant
coherence. This is possible up to a finite time, which depends on the
initial coherence, at which the control field diverges.
There are various open questions suggested by these results.
i) While our original goal was to track the coherence $c=v_{x}^{2}+v_{y}^{2}$
, we in fact solved the more restrictive problem of separately controlling $
v_{x}$ and $v_{y}$. It would be interesting to consider the case where these
components are allowed to vary while truly trying to fix only $c$. In
particular, it would be interesting to see if this enables the extension of
the breakdown time.
ii) Expansion of the Hilbert space by including additional levels:
Controllability could improve if instead of having a two-level system we
were to use an $n$-level system, where we coherently control all the $n$
levels, but use just two for the qubit. Within this larger Hilbert space it
is possible that interference effects could be used profitably to maintain
the coherence of the two qubit levels, as e.g., in electromagnetically
induced transparency \cite{Harris:97}, or control of vibrational wavepackets
\cite{Brif:01}.
iii) Periodic or continuous update of the control objective so as to reduce
the desired value of coherence: In this manner the singularity of the control
fields can be avoided for arbitrarily long times. It would be interesting to
formulate this as an optimal control problem, with the objective being,
e.g., the time-integral of coherence.
iv) As discussed above, the control fields depend crucially on the knowledge of the
initial state whose coherence we wish to maintain. For certain states [with $
v_{z}(0)=0$] no such control is possible. Additionally we need to know $
v_{x}(0)$ and $v_{y}(0)$ in order to correctly apply the control fields. The
full tracking control problem requires knowing even more: the full coherence
vector $\vec{v}(t)$, an impossible task due to the non-commutativity of the
observables involved. In general, one can envision performing quantum
tracking control based on the incomplete information gleaned in real time
from measuring an as large as possible set of \emph{commuting} observables.
It is an open problem to estimate the quality of the (partial) tracking one
can thus attain.
v) As mentioned in Sec.~\ref{purity}, the purity for non-unital decoherence
channels can actually increase without control. Unitary, open-loop control
in this case was studied in Ref.~\cite{Recht:02}, It would be interesting to
explore coherence tracking control for this class of channels.
vi) Finally, an important extension of the results reported here would
be to problems involving more than one qubit, e.g., in order to
preserve entanglement.
\begin{acknowledgments}
Financial support from the DARPA-QuIST program (managed by AFOSR under
agreement No. F49620-01-1-0468), the Sloan Foundation, and PREA (to D.A.L.)
is gratefully acknowledged.
\end{acknowledgments}
\end{document}
|
\begin{document}
\title{Alternatives for the $q$-matroid axioms of independent spaces, bases, and spanning spaces}
\begin{abstract}
It is well known that in $q$-matroids, axioms for independent spaces, bases, and spanning spaces differ from the classical case of matroids, since the straightforward $q$-analogue of the classical axioms does not give a $q$-matroid. For this reason, a fourth axiom has been proposed. In this paper we show how we can describe these spaces with only three axioms, providing two alternative ways to do that. As an application, we show direct cryptomorphisms between independent spaces and circuits and between independent spaces and bases.
\end{abstract}
\section{Introduction}
In the classical case of matroids independent sets need three axioms. For $q$-matroids, the straightforward $q$-analogue of these three axioms are not strong enough to get a $q$-matroid with a semimodular rank function. Therefore, a fourth axiom was added (see \cite{JP18}). \\
This unexpected fourth axiom raises some questions. Why would we need this extra axiom for independent spaces, bases, and spanning spaces, but not for things like dependent spaces, circuits, flats, and hyperplanes? Can't we find a better way to describe the axioms for independent spaces, bases, and spanning spaces, using only three axioms? \\
In this paper we present a positive answer to this question. We propose two ways to define independent spaces, bases, and spanning spaces with only three axioms. The first one is to remove the third axiom, because it is implied by the fourth one (where we have to be a bit careful for independent spaces to pick the right variation of the fourth axiom). The second one is an alternative for the third axiom that is still a $q$-analogue of the classical case, but that obliterates the need for the fourth axiom. \\
As an application of this restriction of the number of axioms, we prove two cryptomorphisms. The first one is a direct cryptomorphism between independent sets and circuits that was not shown before. The second one is a cryptomorphism between independent spaces and bases. This was done in \cite{JP18}, but we believe there was a gap in the proof that we will fix here.
\section{Preliminaries}
Throughout this paper, $n$ denotes a fixed positive integer and $E$ a fixed $n$-dimensional vector space over an arbitrary field $\mathbb{F}$. The notation $\mathcal{L}(E)$ indicates the \emph{lattice of subspaces} of $E$. For any $A,B\in\mathcal{L}(E)$ with $A\subseteq B$ we denote by $[A,B]$ the interval between $A$ and $B$, that is, the lattice of all subspaces $X$ with $A\subseteq X\subseteq B$. For $A\subseteq E$ we use the notation $\mathcal{L}(A)$ to denote the interval $[\{0\},A]$. For more background on lattices, see for example Birkhoff \cite{birkhoff}. \\
We use the following definition of a $q$-matroid.
\begin{Definition}\label{rankfunction}
A $q$-matroid $M$ is a pair $(E,r)$ where $r$ is an integer-valued function defined on the subspaces of $E$ with the following properties:
\begin{itemize}
\item[(R1)] For every subspace $A\in \mathcal{L}(E)$, $0\leq r(A) \leq \dim A$.
\item[(R2)] For all subspaces $A\subseteq B \in \mathcal{L}(E)$, $r(A)\leq r(B)$.
\item[(R3)] For all $A,B\in \mathcal{L}(E)$, $r(A+ B)+r(A\cap B)\leq r(A)+r(B)$.
\end{itemize}
The function $r$ is called the \emph{rank function} of the $q$-matroid.
\end{Definition}
In order to make notation more compact, we use the following ways to describe families of subspaces.
\begin{Definition}\label{def:families}
Let ${\mathcal{A}} \subseteq \mathcal{L}(E)$. We define the following family of subspaces of $E$:
\begin{align*}
\max({\mathcal{A}})&:=\{ X \in {\mathcal{A}} : X \nsubseteq A \text{ for any } A \in {\mathcal{A}}, A \neq X \}, \\
\min({\mathcal{A}})&:=\{ X \in {\mathcal{A}} : A \nsubseteq X \text{ for any } A \in {\mathcal{A}}, A \neq X \}.
\end{align*}
For any subspace $X \in \mathcal{L}(E)$, we define then the collection of \emph{maximal subspaces
of $X$ in ${\mathcal{A}}$} to be the collection of subspaces
\[
\max(X,{\mathcal{A}}):=\{ A \in {\mathcal{A}} : A \subseteq X \text{ and } B \subset X, B \in {\mathcal{A}} \implies \dim(B) \leq \dim(A) \}.
\]
In other words, $\max(X,{\mathcal{A}})$ is the set of subspaces of $X$ in ${\mathcal{A}}$ that have maximal dimension over all such choices of subspaces. Similarly, we define the \emph{minimal subspaces containing $X$ in ${\mathcal{A}}$} to be the collection of subspaces
\[
\min(X,{\mathcal{A}}):=\{ A \in {\mathcal{A}} : X \subseteq A \text{ and } X \subset B, B \in {\mathcal{A}} \implies \dim(B) \geq \dim(A) \}.
\]
Finally, by slight abuse of notation, we write
\[ X\cap{\mathcal{A}} := \{X\cap A:A\in{\mathcal{A}}\}. \]
\end{Definition}
We define several specific subspaces in a $q$-matroid.
\begin{Definition}\label{independentspaces}
Let $M=(E,r)$ be a $q$-matroid.
A subspace $A$ of $E$ is called an \emph{independent} space of $M$ if $r(A)=\dim A$. A subspace that is not an independent space is called a \emph{dependent space}. A minimal dependent space (w.r.t. inclusion) is called a \emph{circuit}. A \emph{spanning space} of $M$ is a subspace $S$ such that $r(S)=r(E)$. A \emph{loop} of $M$ is a $1$-dimensional subspace $\ell\subseteq E$ such that $r(\ell)=0$.
\end{Definition}
A $q$-matroid can be equivalently defined by its independent spaces, bases, spanning spaces and circuits. See \cite{bcj} for an overview of these cryptomorphic definitions and many others.
\begin{Definition}\label{independence-axioms}
Let ${\mathcal{I}} \subseteq \mathcal{L}(E)$. We define the following \emph{independence axioms}.
\begin{itemize}
\item[(I1)] ${\mathcal{I}}\neq\emptyset$.
\item[(I2)] For all $I,J \in \mathcal{L}(E)$, if $J\in{\mathcal{I}}$ and $I\subseteq J$, then $I\in{\mathcal{I}}$.
\item[(I3)] For all $I,J\in{\mathcal{I}}$ satisfying $\dim I<\dim J$, there exists a $1$-dimensional subspace $x\subseteq J$, $x\not\subseteq I$ such that $I+x\in{\mathcal{I}}$.
\item[(I4)] For all $A,B \in \mathcal{L}(E)$ and $I,J \in \mathcal{L}(E)$ such that
$I \in \max({\mathcal{I}} \cap \mathcal{L}(A))$ and $J \in \max({\mathcal{I}} \cap \mathcal{L}(B))$,
there exists $K\in \max({\mathcal{I}} \cap \mathcal{L}(A+B))$ such that $K \subseteq I+J$.
\end{itemize}
If ${\mathcal{I}}$ satisfies the independence axioms (I1)-(I4) we say that $(E,{\mathcal{I}})$ is a collection of \emph{independent spaces}.
\end{Definition}
\begin{Definition}\label{indep-bases}
Let $\mathcal{B} \subseteq \mathcal{L}(E)$.
We define the following \emph{basis axioms}.
\begin{itemize}
\item[(B1)] $\mathcal{B}\neq\emptyset$
\item[(B2)] For all $B_1,B_2\in\mathcal{B}$, if $B_1\subseteq B_2$, then $B_1=B_2$.
\item[(B3)] For all $B_1,B_2\in\mathcal{B}$ and for every subspace $A$ of codimension 1 in $B_1$ satisfying $B_1\cap B_2\subseteq A$, there is a $1$-dimensional subspace $y$ of $B_2$ such that $A+y\in\mathcal{B}$.
\item[(B4)]
For all $A,B\in \mathcal{L}(E)$ and $I,J\in\mathcal{L}(E)$ such that $I\in\max(E,A\cap{\mathcal{B}})$ and $J\in\max(E,B\cap{\mathcal{B}})$, there exists $K\in\max(E,(A+B)\cap{\mathcal{B}})$ such that $K\subseteq I+J$.
\end{itemize}
If $\mathcal{B}$ satisfies the bases axioms (B1)-(B4) we say that $(E,\mathcal{B})$ is a collection of \emph{bases}.
\end{Definition}
\begin{Definition}\label{spanning-axioms}
Let ${\mathcal{S}} \subseteq \mathcal{L}(E)$. We define the following \emph{spanning space axioms}.
\begin{itemize}
\item[(S1)] $E \in {\mathcal{S}}$.
\item[(S2)] For all $I,J \in \mathcal{L}(E)$, if $J\in{\mathcal{S}}$ and $J \subseteq I$, then $I\in{\mathcal{S}}$.
\item[(S3)] For all $I,J\in{\mathcal{S}}$ such that $\dim J<\dim I$, there exists some $X \in \mathcal{L}(E)$ of codimension $1$ in $E$ satisfying $J \subseteq X$, $I \nsubseteq X$, and $I\cap X\in{\mathcal{S}}$.
\item[(S4)] For all $A,B\in\mathcal{L}(E)$ and $I,J\in \mathcal{L}(E)$ such that $I\in\min({\mathcal{S}}\cap[A,E])$ and $J\in\min({\mathcal{S}}\cap[B,E])$, there exists $K\in\min({\mathcal{S}}\cap [A\cap B,E])$ such that $I\cap J\subseteq K$.
\end{itemize}
If ${\mathcal{S}}$ satisfies the independence axioms (S1)-(S4) we say that $(E,{\mathcal{S}})$ is a collection of \emph{spanning spaces}.
\end{Definition}
\begin{Definition}\label{circuit-axioms}
Let $\mathcal{C}\subseteq\mathcal{L}(E)$. We
define the following {\bf circuit axioms}.
\begin{itemize}
\item[(C1)] $\{0\}\notin\mathcal{C}$.
\item[(C2)] For all $C_1,C_2\in\mathcal{C}$, if $C_1\subseteq C_2$ $C_1=C_2$.
\item[(C3)] For distinct $C_1,C_2 \in \mathcal{C}$ and any $X\in \mathcal{L}(E)$ of codimension $1$ there is a circuit $C_3 \in \mathcal{C}$ such that $C_3 \subseteq (C_1+C_2)\cap X$.
\end{itemize}
If $\mathcal{C}$ satisfies the circuit axioms (C1)-(C3), we say that $(E,\mathcal{C})$ is a collection of {\bf circuits}.
\end{Definition}
Note that the axiom (C3) listed here is different from the axiom (C3) as defined in \cite[Theorem 64]{JP18}. An explanation of this can be found in \cite[Section 11]{bcj}.
A lattice isomorphism between a pair of lattices $(\mathcal{L}_1,\leq_1,\vee_1,\wedge_1)$ and $(\mathcal{L}_2,\leq_2,\vee_2,\wedge_2)$ is a bijective function $\varphi:\mathcal{L}_1\longrightarrow\mathcal{L}_2$ that is order-preserving and preserves the meet and join, that is, for all $x,y\in\mathcal{L}_1$ we have that $\varphi(x\wedge_1 y)=\varphi(x)\wedge_2\varphi(y)$ and $\varphi(x\vee_1 y)=\varphi(x)\vee_2\varphi(y)$. A lattice anti-isomorphism between a pair of lattices is a bijective function $\psi:\mathcal{L}_1\longrightarrow\mathcal{L}_2$ that is order-reversing and interchanges the meet and join, that is, for all $x,y\in\mathcal{L}_1$ we have that $\psi(x\wedge_1 y)=\psi(x)\vee_2\psi(y)$ and $\psi(x\vee_1 y)=\psi(x)\wedge_2\psi(y)$.
We hence define a notion of equivalence and duality between $q$-polymatroids.
\begin{Definition}
Let $E_1,E_2$ be vector spaces over the same field $\mathbb{F}$. Let $M_1=(E_1,r_1)$ and $M_2=(E_2,r_2)$ be $q$-matroids. We say that $M_1$ and $M_2$ are \emph{lattice-equivalent} or \emph{isomorphic} if there exists a lattice isomorphism $\varphi:\mathcal{L}(E_1)\longrightarrow \mathcal{L}(E_2)$ such that $r_1(A)=r_2(\varphi(A))$ for all $A\subseteq E_1$. In this case we write $M_1 \cong M_2$.
\end{Definition}
Fix an anti-isomorphism $\perp:\mathcal{L}(E)\longrightarrow\mathcal{L}(E)$ that is an involution. For any subspace $X \in \mathcal{L}(E)$ we denote by $X^\perp$ the \emph{dual} of $X$ in $E$ with respect to $\perp$.
Important operations on $q$-matroids are restriction, contraction and duality. We give a short summary here and refer to \cite{BCIR21,JP18} for details.
\begin{Definition}\label{defdual}
Let $M=(E,r)$ be a $q$-matroid. Then $M^*=(E,r^*)$ is also a $q$-matroid, called the \emph{dual $q$-matroid}, with rank function
\[ r^*(A)=\dim(A)-r(E)+r(A^\perp). \]
The subspace $B$ is a basis of $M$ if and only if $B^\perp$ is a basis of $M^*$.
\end{Definition}
\begin{Definition}\label{restr}
Let $M=(E,r)$ be a $q$-matroid. The \emph{restriction} of $M$ to a subspace $X$ is the $q$-matroid $M|_X$ with ground space $X$ and rank function $r_{M|_X}(A)=r_M(A)$. \\
The \emph{contraction} of $M$ of a subspace $X$ is the $q$-matroid $M/X$ with ground space $E/X$ and rank function $r_{M/X}(A)=r_M(A)-r_M(X)$.
\end{Definition}
\section{Redundancy in the axiom systems}\label{redundancy}
In this section we show that (I3), (B3) and (S3) are implied by the other axioms. In case of (I4) this is actually a bit subtle, since the exact statement of (I4) has a somewhat vague history.
\subsection{A discussion on variations of (I4)}
The axiom (I4) was first stated in \cite{JP18}. It was formulated using the ambiguous term ``maximal independent space inside $A$'' for some $A\subseteq E$. It was not clarified if this maximality was taken with respect to inclusion or dimension. However, if one carefully reads the proofs in \cite{JP18}, especially Proposition 15, it becomes clear that maximality is taken with respect to dimension. Intuitively this also follows from the fact that for the cryptomorphism between independence and rank the following rank function in terms of independence is defined:
\[ r_\mathcal{I}(A)=\max\{\dim I:I\in\mathcal{I},I\subseteq A\}. \]
In following papers, notably \cite{bcj}, the ambiguity in (I4) was solved by assuming maximality was taken with respect to inclusion. This did not lead to any problems, since by (I3), both notions are equivalent. It is only in cases where (I3) is not assumed (or proven) that the difference in maximality matters.
In this section we discuss the relations between the following variations of the axiom (I4).
\begin{itemize}
\item[(oI4)] For all $A,B \in \mathcal{L}(E)$ and $I,J \in \mathcal{L}(E)$ such that $I \in \max(A, \mathcal{I} )$ and $J \in \max(B, \mathcal{I})$, there exists $K\in \max(A+B, \mathcal{I})$ such that $K \subseteq I+J$.
\item[(I4)] For all $A,B \in \mathcal{L}(E)$ and $I,J \in \mathcal{L}(E)$ such that $I \in \max({\mathcal{I}} \cap \mathcal{L}(A))$ and $J \in \max({\mathcal{I}} \cap \mathcal{L}(B))$, there exists $K\in \max({\mathcal{I}} \cap \mathcal{L}(A+B))$ such that $K \subseteq I+J$.
\item[(I4')] Let $A \in \mathcal{L}(E)$ and let $I \in \max(A,{\mathcal{I}})$. Let $B \in \mathcal{L}(E)$.
Then there exists $J \in \max(A+B,{\mathcal{I}})$ such that $J \subseteq I+B$.
\item[(I4'')] Let $A \in \mathcal{L}(E)$ and let $I \in \max(A,{\mathcal{I}})$.
Let $x \in \mathcal{L}(E)$ be a $1$-dimensional space. Then there exists $J \in \max(x+A,{\mathcal{I}})$ such that $J \subseteq x+I$.
\end{itemize}
We use the notation (oI4) for the version of (I4) as implied in \cite{JP18} and we reserve (I4) for the version that appears in other papers. The following relation among these alternatives of (I4) were proven.
\begin{Theorem}[Theorem 26 of \cite{bcj}]\label{oldI4s}
Let $\mathcal{I}$ be a collection of subspaces satisfying (I1)-(I3).
Then the axiom systems (I1)-(I4), (I1)-(I4') and (I1)-(I4'') are pairwise equivalent.
\end{Theorem}
The next result is a variation of this theorem, involving (oI4) instead of (I4) and not depending on (I3). In fact, this proof is already implicit in the proof of \cite[Theorem 26]{bcj}. This result will be of use in the next section.
\begin{Proposition}\label{newI4s}
Let $\mathcal{I}$ be a collection of subspaces satisfying (I1) and (I2). Then the axiom systems (I1), (I2), (oI4); (I1), (I2), (I4'); and (I1), (I2), (I4'') are pairwise equivalent.
\end{Proposition}
\begin{proof}
In \cite[Theorem 26]{bcj} it is proven that $\mathcal{I}$ satisfies (I4') if and only if it satisfies (I4''): the axiom (I3) is not used here. It is straightforward that if $\mathcal{I}$ satisfies (oI4) than it satisfies (I4'). For the implication in the other direction, exactly the same proof as in \cite[Theorem 26]{bcj} holds and this does not use (I3). In fact, what is proven there is that if $\mathcal{I}$ satisfies (I4'), then it satisfies (oI4).
\end{proof}
\begin{Remark}\label{I4isDifferent}
By the previous discussion, it is now clear that the axiom (I4) is weaker with respect to the other variations.
Indeed, if we observe (oI4), as well as (I4') and (I4''), they actually don't need (I3) in proving their equivalence.
Things change if we want to prove their equivalence with (I4): in that proof (I3) becomes crucial. This will be made clearer in the next section.
\end{Remark}
\subsection{Redundancy of (I3)}
In this section we prove that the axiom (I3) is redundant, provided we use any variant of the fourth independence axiom that is not (I4). We do this by showing that given (I1) and (I2), the axioms (I3) and (I4) are equivalent to the axiom (I4'') (or (I4') or (oI4)).
\begin{Theorem}
Let $(E,\mathcal{I})$ be a $q$-matroid. Then, for the set $\mathcal{I}$, the axiom (oI4) holds true.
\end{Theorem}
\begin{proof}
This is a direct consequence of Proposition \ref{newI4s}.
\end{proof}
\begin{Theorem}\label{I3isImplByI4}
Let $E$ be a vector space and let $\tilde{\mathcal{I}}$ be a collection of subspaces satisfying the axioms (I1), (I2) and (oI4). Then $(E,\tilde{\mathcal{I}})$ is a $q$-matroid.
\end{Theorem}
\begin{proof}
We have to show that $\tilde{\mathcal{I}}$ satisfies the axioms (I1), (I2), (I3) and (I4). The first two axioms are satisfied by definition and (I4) follows from Proposition \ref{newI4s} and Theorem \ref{oldI4s}. So it is left to prove (I3). \\
Let $I,J\in \tilde{\mathcal{I}}$ with $\dim I<\dim J$. Assume, towards a contradiction, that for all $1$-dimensional spaces $x\subseteq J$, $x\not\subseteq I$ we have that $I+x\notin\tilde{\mathcal{I}}$. Let $x_i$ be $1$-dimensional spaces in $J$ such that we can write $I+J=I\oplus x_1\oplus\cdots\oplus x_h$. Note that, by (I2), $x_i\in\tilde{\mathcal{I}}$ for all $i$. \\
By Proposition \ref{newI4s}, $\tilde{\mathcal{I}}$ satisfies (I4''). We apply (I4'') to $I$ and $x_1$: there is a maximal member of $\tilde{\mathcal{I}}$ (w.r.t. dimension) contained in $I+x_1$. By assumption, $I+x_i\notin \tilde{\mathcal{I}}$, so $I$ is such a maximal member of $\tilde{\mathcal{I}}$ in $I+x_1$. Next, we apply (I4'') to $I+x_1$ and $x_2$: there is a maximal member of $\tilde{\mathcal{I}}$ in $I+x_1+x_2$ contained in $I+x_2$. Again by assumption, $I+x_2\notin \tilde{\mathcal{I}}$ so $I$ is such a maximal member of $\mathcal{I}$. Continuing like this, we find that $I$ is a maximal member of $\tilde{\mathcal{I}}$ in $I+x_1+\cdots+x_h=I+J$. However, this is a contradiction, since $J\subseteq I+J$, $J \in \tilde{\mathcal{I}}$, and $\dim I<\dim J$. We conclude that (I3) has to hold and thus $(E,\tilde{\mathcal{I}})$ is a $q$-matroid.
\end{proof}
The proof of (I3) is similar to Proposition 6 of \cite{JP18}, only written in terms of independence instead of rank. Also, note that the last part of this proof does not hold with (I4) instead of (I4''). This supports Remark \ref{I4isDifferent}.
\subsection{Redundancy of (B3) and (S3)}
For the basis axioms we have a similar result. Even though it is not specified, it is implied that in (B4) a maximal intersection of a space with a basis is an intersection of maximal dimension. Therefore, the subtleties we had with (I4) and (oI4) distinguishing between maximal wrt inclusion and wrt dimension, do not appear for bases.
\begin{Theorem}\label{B3isImplByB4}
Let ${\mathcal{B}}$ be a family of subspaces of $E$ that satisfies the axioms (B1), (B2) and (B4). Then ${\mathcal{B}}$ satisfies (B3).
\end{Theorem}
\begin{proof}
Let $B_1,B_2\in {\mathcal{B}}$ and let $A\subseteq B_1$ be a codimension $1$ subspace such that $B_1\cap B_2\subseteq A$. Assume, towards a contradiction, that for all $1$-dimensional spaces $x\subseteq B_2$ we have that $A+x\notin{\mathcal{B}}$. Since $B_1\cap B_2\subseteq A$, we will never have that $A+x=B_1$.
Let $x_i$ be $1$-dimensional spaces in $B_2$ such that we can write $A+B_2=A\oplus x_1\oplus\cdots\oplus x_h$. Note that $\max(E,x_i\cap{\mathcal{B}})=\{x_i\}$, since $x_i\subseteq B_2$. \\
Now apply (B4) to $A$ and $x_1$: there is a $J_1\in\max(E,(A+x_1)\cap{\mathcal{B}})$ such that $J_1\subseteq A+x_1$.
Since by assumption, $A+x_1\notin{\mathcal{B}}$, we can take $J_1=A$.
Next, we apply (B4) to $A+x_1$ and $x_2$: there is a $J_2\in\max(E,(A+x_1+x_2)\cap{\mathcal{B}})$ such that $J_2\subseteq A+x_2$. Again by assumption, $A+x_2\notin{\mathcal{B}}$ so we can take $J_2=A$. Continuing like this, we find that $A\in\max(E,(A+x_1+\cdots+x_h)\cap{\mathcal{B}})=\max(E,(A+B_2)\cap{\mathcal{B}})$. However, this is a contradiction, since $B_2\subseteq A+B_2$ and $\dim A<\dim B_2$. We conclude that (B3) has to hold.
\end{proof}
We finish with the result for spanning spaces, that can be proven by taking the dual arguments to the proofs for independent spaces.
\begin{Theorem}\label{S3isImplByS4}
Let ${\mathcal{S}}$ be a family of subspaces of $E$. Define the following spanning axiom.
\begin{itemize}
\item[(oS4)] For all $A,B\in\mathcal{L}(E)$ and $I,J\in \mathcal{L}(E)$ such that $I\in\min(A,{\mathcal{S}} )$ and $J\in\min(B, {\mathcal{S}})$, there exists $K\in\min(A\cap B, {\mathcal{S}})$ such that $I\cap J\subseteq K$.
\end{itemize}
If ${\mathcal{S}}$ satisfies the axioms (S1), (S2) and (oS4), then ${\mathcal{S}}$ satisfies (S3).
\end{Theorem}
\section{An alternative for the axiom (I3)}
In this section, we propose a new version for the axiom (I3), that we will call (nI3) and we will prove that it subsumes both the (I3) and the (I4) axioms for a $q$-matroid.
\subsection{Motivation}
Before we state the axiom (nI3), we will give some motivation for this statement. Let us look at a small example. Let $E=\mathbb{F}_2^2$ and let $x$, $y$ and $z$ be the three $1$-dimension spaces of $E$. If we let $\mathcal{I}=\{x,0\}$, we have a family satisfying the axioms (I1), (I2) and (I3), but not (I4). The latter can be seen by applying (I4) to $y$ and $z$: they are both not in $\mathcal{I}$, so a maximal member of $\mathcal{I}$ should be inside $0$. However, this is a contradiction because $x\subseteq y+z$ and $x\in\mathcal{I}$. \\
Define a rank function for all $A\subseteq E$ as $r(A)=\dim\max\{I\subseteq A:I\in\mathcal{I}\}$. Then the rank function in our example is not semimodular, i.e., does not satisfy axiom (R3):
\[ r(y+z)+r(y\cap z)=r(E)+r(0)=1+0>r(y)+r(z)=0. \]
We want the rank function to be that of a $q$-matroid. How can we achieve this with little change to $\mathcal{I}$? Note that we can still ask $z\notin\mathcal{I}$ if then we let $y\in\mathcal{I}$. This gives a mixed diamond, as explained in Appendix A.3 of \cite{ceriajurrius2021}.
\begin{Lemma}\label{LoopsInSameSubs}
If a $q$-matroid $M$ has loops, then they are exactly all $1$-dimensional subspaces of a subspace $L\subseteq E$.
\end{Lemma}
\begin{proof}
This is a direct consequence of Lemma 11 in \cite{JP18}, that says that if $x$ and $y$ are loops, then $x+y$ has rank $0$. Applying this iteratively, we find that the sum of any number of loops has rank $0$. Then axiom (r2) implies that all $1$-dimensional subspaces of this sum of loops have rank $0$, hence are loops themselves.
\end{proof}
\begin{Definition}\label{Loopspace}
The subspace of $E$ containing all loops is called the \emph{loop space} of $M$. We usually denote it by $L$.
\end{Definition}
What goes wrong in our small example has to do with the loop space. If you have one $1$-dimensional space that is independent, then all other $1$-dimensional spaces that are not in the loop space $L$ have to be independent. This applies to other dimensions as well, by applying contraction. \\
Let $I,J\in\mathcal{I}$ with $\dim I<\dim J$. Then (I3) tells us that there is a $1$-dimensional space $x\subseteq J$, $x\not\subseteq I$ such that $I+x\in\mathcal{I}$. If we consider $M/I$, we find that $(I+x)/I$ is a $1$-dimensional independent space in $M/I$. So, outside the loop space $L$ of $M/I$, all $1$-dimensional spaces in $M/I$ have to be independent. Since not every $1$-dimensional space in $M/I$ is a loop, the space $L$ has at least codimension $1$ in $E/I$. \\
Now we will translate this to $M$. The independent $1$-dimensional spaces $(I+x)/I$ in $M/I$ correspond to $1$-dimensional spaces $x$ outside the space $L\oplus I:=X$ of codimension $1$ in $E$, and for all of them, $I+x$ has to be independent. We summarise this in a proposed new axiom (nI3).
\begin{Definition}\label{nuovoI3}
Let $E$ be a vector space and $\mathcal{I}$ a family of subspaces. We define the following property (axiom) of $\mathcal{I}$.
\begin{itemize}
\item[(nI3)] For all $I,J\in\mathcal{I}$ satisfying $\dim I<\dim J$, there exists a codimension $1$ subspace $X\subseteq E$ with $I\subseteq X$, $J\not\subseteq X$ such that $I+x\in\mathcal{I}$ for all $1$-dimensional $x\subseteq E$, $x\not\subseteq X$.
\end{itemize}
\end{Definition}
\begin{Remark}\label{vnI3impliesI3}
Because $J\not\subseteq X$, there is an $x\subseteq J$ such that $x\not\subseteq X$ and thus $I+x\in\mathcal{I}$. This shows that (nI3) implies (I3). Also, (nI3) becomes (I3) in the classical case, since there is only one element $x$ outside $X$ that, by construction, is in $J$.
\end{Remark}
Looking back at the small example we started with, we see that letting $x\in\mathcal{I}$ would imply, by applying (nI3) to $0$ and $x$, that also $y\in\mathcal{I}$.
\subsection{The independence axioms are equivalent to (I1), (I2), (nI3)}
In this section we prove that the axiom system (I1), (I2), (I3), (I4) is equivalent to the axiom system (I1), (I2), (nI3). First we show that the axioms (I1), (I2), (I3) and (I4), together, imply the new axiom (nI3).
\begin{Theorem}
Let $(E, \mathcal{I})$ be a $q$-matroid. Then, for the set $\mathcal{I}$, the axiom (nI3) holds true.
\end{Theorem}
\begin{proof}
Let $I, J\in \mathcal{I}$, $\dim(I)<\dim(J)$. Consider all $1$-spaces $y$ not in $I$ such that $I$ is a maximal independent space in $I+y$. Let $A$ be the sum of all such $I+y$. We claim that $I$ is a maximal independent space in $A$. This can be seen by applying (I4) multiple times. Let $I+y_1$ and $I+y_2$ have $I$ as a maximal independent subspace. Then, by (I4), $(I+y_1)+(I+y_2)$ has $I$ as a maximal independent subspace. Iterating this argument shows that $I$ is a maximal independent subspace in $A$ and that, moreover, all $I+y$ with $y\subseteq A$, $y\not\subseteq I$ are not in $\mathcal{I}$. \\
On the other hand, for all $1$-spaces $z\subseteq E$, $z\not\subseteq A$ we have that $I+z\in\mathcal{I}$. We know from (I3) that there is at least one such $z$, namely the $x\in J$, $x\not\subseteq I$ such that $I+x\in\mathcal{I}$. This means that $\dim A\leq\dim E-1=n-1$. Now take a space $X$ of codimension $1$ in $E$ with $A\subseteq X$ and $x\not\subseteq X$. Then for all $z\subseteq E$, $z\not\subseteq X$ we have that $I+z\in\mathcal{I}$ and this proves (nI3).
\end{proof}
Next we show that the axiom (nI3), together with (I1) and (I2), implies the axioms (I3) and (I4). Before doing that, we prove a small lemma about restriction.
\begin{Lemma}\label{restrict}
Let $E$ be a vector space and let $ \tilde{\mathcal{I}}$ be a collection of subspaces of $E$ satisfying the axioms (I1), (I2) and (nI3). Let $F\subseteq E$ and let $\tilde{\mathcal{I}}|_F=\{I\in\tilde{\mathcal{I}}:I\subseteq F\}$. Then $\tilde{\mathcal{I}}|_F$ satisfies the axioms (I1), (I2) and (nI3).
\end{Lemma}
\begin{proof}
It is clear that $\tilde{\mathcal{I}}|_F$ satisfies (I1) (because $0\in\tilde{\mathcal{I}}$) and (I2). Let $I,J\in\tilde{\mathcal{I}}|_F$ with $\dim I<\dim J$. Let $X$ be the codimension $1$ space in $E$ defined by axiom (nI3). Then $F\not\subseteq X$, because $J\not\subseteq X$ and $J\subseteq F$. Therefore, $X\cap F$ is a codimension $1$ space in $F$ that satisfies (nI3).
\end{proof}
\begin{Theorem}\label{verynewI3thm}
Let $E$ be a vector space and let $\tilde{\mathcal{I}}$ be a collection of subspaces satisfying the axioms (I1), (I2) and (nI3). Then $(E,\tilde{\mathcal{I}})$ is a $q$-matroid.
\end{Theorem}
\begin{proof}
We have to show that $\tilde{\mathcal{I}}$ satisfies the axioms (I1), (I2), (I3) and (I4). The first two axioms are satisfied by definition and (nI3) implies (I3) as was noted in Remark \ref{vnI3impliesI3}, so it is left to prove (I4). By Theorem \ref{oldI4s} it is enough to prove the axiom (I4''). \\
Thanks to Lemma \ref{restrict} we can let $n=\dim(A+x)$ and restrict to $A+x$. (I4'') is direct if $x\subseteq A$ or if $I\in\max(\tilde{\mathcal{I}},A+x)$, so suppose both are not the case. Then there is a $J\in\max(\tilde{\mathcal{I}},A+x)$ with $\dim J>\dim I$. Moreover, $J\not\subseteq A$ because that would contradict the maximality of $I$. By (nI3), there is a codimension $1$ space $X$ in $A+x$ such that $I\subseteq X$, $J\not\subseteq X$ and for all $y\not\subseteq X$ we have $I+y\in\tilde{\mathcal{I}}$. We now claim that $X=A$. If not, there would be a $y\not\subseteq X$, $y\subseteq A$ such that $I+y\in\tilde{\mathcal{I}}$. Since $I+y\subseteq A$, this contradicts the maximality of $I$. So, $X=A$ and by (nI3) we have that $I+x\in\tilde{\mathcal{I}}$. Moreover, $I+x\in\max(A+x,\tilde{\mathcal{I}})$ because if there is a member of $\tilde{\mathcal{I}}$ of bigger dimension in $A+x$, its intersection with $A$ would have dimension strictly bigger then $\dim I$, which contradicts, again, $I\in\max(A,\tilde{\mathcal{I}})$. That proves (I4'') and shows that $(E,\tilde{\mathcal{I}})$ is a $q$-matroid.
\end{proof}
\section{A new bases axiom}
As a consequence of our introduction of (nI3), we could define a new basis axiom (nB3), which again avoids the presence of the fourth axiom (B4).
\begin{Definition}
Let $E$ be a vector space and $\mathcal{B}$ a family of subspaces. We define the following property (axiom) of $\mathcal{B}$.
\begin{itemize}
\item[(nB3)] For all $B_1,B_2\in{\mathcal{B}}$, and for each subspace $A$ that has codimension $1$ in $B_1$ containing $B_1 \cap B_2$ there exists $X\subseteq E$ of codimension $1$ in $E$ such that $X \supseteq A$, $X\not \supseteq B_2$ and $A+x \in \mathcal{B}$ for all $1$-dimensional $x\subseteq E$, $x\not\subseteq X$.
\end{itemize}
\end{Definition}
\begin{Remark}\label{basisclassical2}
Because $B_2\not\subseteq X$, there is an $x\subseteq B_2$ such that $x\not\subseteq X$ and thus $A+x\in{\mathcal{B}}$. This shows that (nB3) implies (B3). Also, (nB3) becomes (B3) in the classical case, since there is only one element $x$ outside $X$ that, by construction, is in $B_2$.
\end{Remark}
To show that (nB3) holds, we use a similar approach to what was done for independent spaces. We prove that the axiom system (B1), (B2), (B4) is equivalent to the axiom system (B1), (B2), (nB3). Since we showed in Theorem \ref{B3isImplByB4} that (B4) implies (B3), we can freely use (B3) within the proofs for convenience. First we show that the axioms (B1), (B2) and (B4), together, imply the new axiom (nB3).
\begin{Theorem}\label{nB3holds}
Let $(E, {\mathcal{B}})$ be a $q$-matroid. Then, for the set ${\mathcal{B}}$, the axiom (nB3) holds true.
\end{Theorem}
\begin{proof}
Let $B_1, B_2\in {\mathcal{B}}$ and let $A\subseteq B_1$ of codimension $1$ such that $B_1\cap B_2\subseteq A$. Consider all $1$-spaces $y$ not in $A$ such that $A \in \max(E,(A+y)\cap {\mathcal{B}})$. Let $C$ be the sum of all such $A+y$. We claim that $A \in \max(E,C\cap {\mathcal{B}})$.
This can be seen by applying (B4) multiple times. Since $A\in\max(E,(A+y_1)\cap{\mathcal{B}})$ and $A\in\max(E,(A+y_2)\cap{\mathcal{B}})$, by (B4) $A\in\max(E,((A+y_1)+(A+y_2))\cap{\mathcal{B}})$. Iterating this argument shows that $A\in\max(E,C\cap{\mathcal{B}})$ and moreover, all $A+y$ with $y\subseteq C$, $y\not\subseteq A$ are not in ${\mathcal{B}}$. \\
On the other hand, for all $1$-spaces $z\subseteq E$, $z\not\subseteq C$ we have that $A+z\in{\mathcal{B}}$. We know from (B3) that there is at least one such $z$, namely the $x\in B_2$, $x\not\subseteq B_1$ such that $A+x\in{\mathcal{B}}$. This means that $\dim C\leq\dim E-1=n-1$. Now take a space $X$ of codimension $1$ in $E$ with $C\subseteq X$ and $x\not\subseteq X$. Then for all $z\subseteq E$, $z\not\subseteq X$ we have that $A+z\in{\mathcal{B}}$ and this proves (nB3).
\end{proof}
Now we work towards the converse of this theorem. For this we will use the following two variations of the axiom (B4).
\begin{itemize}
\item[(B4')] Let $A,B\subseteq E$ and $I\in\max(E,A\cap{\mathcal{B}})$. Then there exists $J\in\max(E,(A+B)\cap{\mathcal{B}})$ such that $J\subseteq I+B$.
\item[(B4'')] Let $A\subseteq E$ and $I\in\max(E,A\cap{\mathcal{B}})$. Let $x \subseteq E$ be a one-dimensional space. Then, there exists $J\in\max(E,(A+x)\cap{\mathcal{B}})$ such that $J\subseteq x+I$.
\end{itemize}
The next result shows that we can in fact take these axioms to define a $q$-matroid. It is the statement of Proposition \ref{newI4s} but for bases instead of independent spaces, and the proof is similar to to proof for independence axioms in \cite[Theorem 26]{bcj}.
\begin{Theorem}\label{B4Variations}
Let ${\mathcal{B}}$ be a collection of subspaces satisfying (B1) and (B2).
Then the axiom systems (B1), (B2), (B4); (B1), (B2), (B4') and (B1), (B2), (B4'') are pairwise equivalent.
\end{Theorem}
\begin{proof}
First, we assume that (B4) holds for the collection ${\mathcal{B}}$ and we show that this implies that (B4') and (B4'') also hold.
Let $A,B\subseteq E$ and let $I\in\max(E,A\cap{\mathcal{B}})$ and $J\in\max(E,B\cap{\mathcal{B}})$. By (B4) there is a $K\in\max(E,(A+B)\cap{\mathcal{B}})$ with $K \subseteq I+J$. Since $J\subseteq B$, $K$ is also contained in $I+B$. This shows (B4'). We get (B4'') by taking $B=x$.
Suppose that (B4'') holds. We will show that (B4') holds. Let $A ,B\subseteq E$ and let $I\in\max(E,A\cap{\mathcal{B}})$.
Suppose that (B4') holds for all subspaces of dimension less than $\dim(B)$. Let $C$ be a subspace of $B$ of codimension 1 in $B$ and write $B=x+C$. By hypothesis, there exists $J\in\max(E,(A+C)\cap{\mathcal{B}})$ such that $J \subseteq I+C$. By (B4'') there exists
$J'\in\max(E,(A+C+x)\cap{\mathcal{B}}) = \max(E,(A+B)\cap{\mathcal{B}})$ such that $J' \subseteq J + x \subseteq I+C+x=I+B$. This proves (B4').
Now suppose that (B4') holds. Let $A,B\subseteq E$ and let
$I\in\max(E,A\cap{\mathcal{B}})$ and $J\in\max(E,B\cap{\mathcal{B}})$. We claim there is a $K\in\max(E,(A+B)\cap{\mathcal{B}})$ with $K \subseteq I+J$.
Since $J\in\max(E,B\cap{\mathcal{B}})$, applying (B4') to $B$ and $I$ gives that these exists $N\in\max(E,(I+B)\cap{\mathcal{B}})$ such that $N \subseteq I+J$.
Again by (B4'), there exists
$M\in\max(E,(A+B)\cap{\mathcal{B}})$ such that $M \subseteq I+B$. But then $M\in\max(E,(I+B)$ and hence $M$ and $N$ have the same dimension.
It follows that $N\in\max(E,(A+B)\cap{\mathcal{B}})$ and $N \subseteq I+J$ and so (B4') implies (B4). The result follows.
\end{proof}
We now prove the converse of Theorem \ref{nB3holds}.
\begin{Theorem}\label{verynewB3thm}
Let $E$ be a vector space and let $\tilde{{\mathcal{B}}}$ be a collection of subspaces satisfying the axioms (B1), (B2) and (nB3). Then $(E,\tilde{{\mathcal{B}}})$ is a $q$-matroid.
\end{Theorem}
\begin{proof}
We have to show that $\tilde{{\mathcal{B}}}$ satisfies the axioms (B1), (B2), and (B4), since then (B3) is implied by Theorem \ref{B3isImplByB4}. The first two axioms are satisfied by definition, so it is left to prove (B4). By Theorem \ref{B4Variations}, it is enough to prove the axiom (B4'').
Let $A\subseteq E$ and
$I\in\max(E,A\cap \tilde{{\mathcal{B}}})$. Let $x\subseteq E$ be a $1$-dimensional space. (B4'') is direct if $x\subseteq A$ or if
$I\in\max(E,(A+x)\cap \tilde{{\mathcal{B}}})$, so suppose both are not the case. Then there is a $J\in\max(E,(A+x)\cap \tilde{{\mathcal{B}}})$ with $\dim J>\dim I$. Moreover, $J\not\subseteq A$ because that would contradict the maximality of $I$.
Since $I$ and $J$ are intersections of members of $\tilde{{\mathcal{B}}}$ with $A$ and $A+x$, respectively, we can find $B_1,B_2\in\tilde{{\mathcal{B}}}$ such that $I=B_1\cap A$ and $J=B_2\cap(A+x)$. Moreover, there is a codimension $1$ subspace $C\subseteq B_1$ such that $C\cap(A+x)=I$ and $B_1\cap B_2\subseteq C$. Now we apply (nB3) to $B_1,B_2$ and $C$. This gives a codimension $1$ space $X\subseteq E$ such that $C\subseteq X$, $B_2\not\subseteq X$ and $C+y\in \tilde{{\mathcal{B}}}$ for all $1$-dimensional $y\subseteq E$, $y\not\subseteq X$.
We now claim that $X\cap(A+x)=A$. If not, there would be a $z\not\subseteq X$, $z\subseteq A$ such that $C+z\in\tilde{{\mathcal{B}}}$. Then $(C+z)\cap(A+x)=I+z$ and this contradicts the maximality of $I$. So, $X\cap(A+x)=A$ and in particular $x\not\subseteq X$, so by (nB3) we have that $C+x\in\tilde{{\mathcal{B}}}$. Moreover, $I+x\in\max(E,(A+x)\cap \tilde{{\mathcal{B}}})$ because if there is a bigger intersection with a member of $\tilde{{\mathcal{B}}}$ in $A+x$, its intersection with $A$ would have dimension strictly bigger then $\dim I$, which contradicts, again, that $I\in\max(E,A\cap \tilde{{\mathcal{B}}})$. That proves (B4'') and shows that $(E,\tilde{{\mathcal{B}}})$ is a $q$-matroid.
\end{proof}
\section{A new spanning spaces axiom}
In this section we state and prove that it is possible to define spanning spaces with three axioms, this being an easy consequence of what we did for independent spaces.
\begin{Definition}
Let $E$ be a vector space and $\mathcal{S}$ a family of subspaces. We define the following property (axiom) of $\mathcal{S}$.
\begin{itemize}
\item[(nS3)] For all $S_1,S_2\in\mathcal{S}$ satisfying $\dim S_2<\dim S_1$, there exists a $1$-dimension subspace $x\subseteq S_1$, $x\not\subseteq S_2$ such that for all codimension-one $X\subseteq E$ with $X\not\supseteq x$ we have $X\cap S_1\in{\mathcal{S}}$.
\end{itemize}
\end{Definition}
\begin{Theorem}
Let $E$ be a vector space and let ${\mathcal{S}}$ be a family of subspaces satisfying (S1), (S2) and (nS3). Then ${\mathcal{S}}$ is the family of spanning spaces of a $q$-matroid.
\end{Theorem}
\begin{proof}
This follows directly from the fact that spanning spaces are the dual spaces of the independent spaces of the dual $q$-matroid, and the axiom (nS3) is the dual statement of (nI3).
\end{proof}
\section{Two new \emph{q}-cryptomorphisms}
In this section we apply our results on new axioms for independent spaces and bases to derive new cryptomorphism: between circuits and independent spaces, and between independent spaces and bases. The latter was already done in \cite{JP18} but as we will discuss, we believe there is a gap in that proof.
We will see that while (nI3) might feel like a natural generalisation of the third axiom for matroids, it turns out in practice that (oI4) or (I4'') is much more practical in proofs.
\subsection{Circuits and independent spaces}
Her we prove that the axioms (C1), (C2) and (C3) are equivalent to the axioms (I1), (I2) and (I4'').
\begin{Theorem}\label{FromItoC}
Let $(E,\mathcal{I})$ be a $q$-matroid. Define
\[ \mathcal{C}_\mathcal{I}=\{C\subseteq E:C\notin\mathcal{I},I\in\mathcal{I}\text{ for all } I\subsetneq C\}. \]
Then $\mathcal{C}$ is a family of circuits, that is, it satisfies the axioms (C1), (C2) and (C3).
\end{Theorem}
\begin{proof}
The axioms (C1) and (C2) follow directly from the definition of $\mathcal{C}$. We will prove (C3) by making use of the independence axiom (oI4).
Let $C_1,C_2 \in \mathcal{C}$, $C_1\neq C_2$ and $X \subseteq E$ a codimension one space.
Suppose towards a contradiction $(C_1+C_2)\cap X $ does not contain any circuit, this making it an independent space.
Let $I_1 \subseteq C_1$ and $I_2 \subseteq C_2$ be of codimension $1$ such that $C_1\cap C_2=I_1\cap I_2$. Note that $I_1$ and $I_2$ are independent and since they have codimension $1$ they are maximal with respect to both dimension and inclusion.
By (oI4) there is a maximal (wrt dimension) independent subspace $I$ of $C_1+C_2$ contained in $I_1+I_2$.
Let $F\subseteq E$ a codimension $1$ space containing $C_1+I_2$ but not containing $C_2$ and $G\subseteq E$ a codimension $1$ space containing $C_2+I_1$ but not containing $C_1$. Clearly $F \neq G$, so $\dim((C_1+C_2)\cap F \cap G)= \dim(C_1+C_2)-2$.
Now, $I \subseteq ((C_1+C_2)\cap F \cap G)$, so
\[\dim(I) \leq \dim(C_1+C_2)-2 < \dim((C_1+C_2)\cap X). \]
However, by assumption $(C_1+C_2)\cap X$ is an independent space, this giving a contradiction with the maximality of $I$.
We conclude that (C3) needs to hold.
\end{proof}
\begin{Theorem}\label{FromCtoI}
Let $(E,\mathcal{C})$ be a $q$-matroid. Define
\[ \mathcal{I}_\mathcal{C}=\{I\subseteq E:C\not\subseteq I\text{ for all }C\in\mathcal{C}\}. \]
Then $\mathcal{I}$ is a family of independent spaces, that is, it satisfies the axioms (I1), (I2) and (I4'').
\end{Theorem}
\begin{proof}
The axioms (I1) and (I2) follow directly from the definition of $\mathcal{I}$. For (I4''), let $A\subseteq E$ and let $I\subseteq A$ be a maximal independent subspace. (Throughout this proof, maximality is always taken with respect to dimension.) Let $x\subseteq E $ be a $1$-dimensional space. If $x\subseteq A$, (I4'') clearly holds. If $I=A$, we also get that (I4'') holds: either $I$ is a maximal independent space in $A+x$, or $I+x=A+x$ is independent itself. So assume $x\not\subseteq A$ and $I\neq A$.
Towards a contradiction, suppose (I4'') does not hold for $A$ and $x$. Let $J$ be a maximal independent subspace in $A+x$. If $\dim J=\dim I$, we have that $I$ is also a maximal independent space in $A+x$, contradicting that there are no maximal independent spaces in $I+x$. So we have that $\dim J>\dim I$. In fact, $\dim J=\dim I+1$, because otherwise $J\cap A$, that is independent and has dimension at least $\dim J-1$, contradicts the maximality of $I$ in $A$.
There might be several choices for $J$: pick one such that $I\cap J$ is maximal. We claim that $I$ cannot be contained in $J$. If this was the case, we can write $J=I+y\in\mathcal{I}$ but $I+x\notin\mathcal{I}$ by construction. This implies $x\not\subseteq J$ hence $J+x\notin I$. Thus $I+x$ and $J+x$ both contain a circuit. Apply (C3) to these circuits with a codimension $1$ space $Y$ such that $A\subseteq Y$, $A+y\not\subseteq Y$. This yields a circuits inside $J\cap A$, which is a contradiction because $J\cap A$ is independent by (I2). So, $I\not\subseteq J$.
We pick a codimension $1$ space $X\subseteq E$ and a $1$-dimensional space $e$ such that $J\subseteq X$, $I\not\subseteq X$, $e\subseteq I$ and $e\not\subseteq X$. Also, we assume $(J+e)\cap X=J$. For any codimension $1$ space $F\subseteq E$ with $I\subseteq F$, $J\not\subseteq F$ we can now construct the following. Define $T_F=(J+e)\cap F$. Since $e\not\subseteq J$ and $J\not\subseteq F$ we have that $\dim T_F=\dim J$. However, $\dim T_F\cap I>\dim J\cap I$, so by assumption on our choice of $J$ we have that $T_F$ is not independent, hence contains a circuit $C_F$. We cannot have that $C_F\subseteq X$, because that would imply $C_F\subseteq (J+e)\cap F\cap X = J\cap F\subseteq J$ and the latter is independent.
Let $G,H\subseteq E$ be two distinct codimension $1$ spaces with $I\subseteq G,H$ and $J\not\subseteq G,H$. These exist, because $\dim J-\dim(J\cap I) \geq \dim J-(\dim I-1)=2$. We can assume $J-G\neq J-H$ so $C_G\neq C_H$. Now apply (C3) to $C_G, C_H$ and $X$. This gives a circuits $C\subseteq (C_G+C_H)\cap X$. Note that since $C_G,C_H\not \subseteq X$, $C\neq C_G,C_H$. Now $C_G+C_H\subseteq (J+e)\cap G + (J+e)\cap H\subseteq J+e$ so $C \subseteq (C_G+C_H)\cap X\subseteq (J+e)\cap X=J$. This is a contradiction because $J$ is independent. We conclude that (I4'') needs to hold.
\end{proof}
\begin{Corollary}\label{WeHaveCrypt}
Let $(E,\mathcal{I})$ be a collection of independent spaces and let $(E,\mathcal{C})$ be a collection of circuits.
\begin{enumerate}
\item $(E,\mathcal{I})$ determines a $q$-matroid with collection of independent spaces $\mathcal{I}$ and collection of circuits $\mathcal{C}_\mathcal{I}$.
\item $(E,\mathcal{C})$ determines a $q$-matroid with collection of circuits $\mathcal{C}$ and collection of independent spaces $\mathcal{I}_\mathcal{C}$.
\end{enumerate}
\end{Corollary}
\begin{proof}
It was shown in \cite{bcj} that $(E,\mathcal{I})$ determines a $q$-matroid with collection of independent space $\mathcal{I}$ and that $(E,\mathcal{C})$ determines a $q$-matroid with collection of circuits $\mathcal{C}$.
The statements now follow from Theorems \ref{FromItoC} and \ref{FromCtoI} and the straightforward result that $\mathcal{I}_{\mathcal{C}_\mathcal{I}}=\mathcal{I}$ and $\mathcal{C}_{\mathcal{I}_\mathcal{C}}=\mathcal{C}$.
\end{proof}
\subsection{Bases and independent spaces}
A cryptomorphism between independent spaces and bases was proven in \cite[Theorem 37]{JP18}. However, we believe that there is a gap in that proof. In \cite[Theorem 37]{JP18}, one of the steps is assuming a collection of bases, defining $\mathcal{I}_{\mathcal{B}}=\{I\subseteq B:B\in{\mathcal{B}}\}$, and proving $\mathcal{I}$ satisfies the axioms (I1)-(I4). In the proof of (I3), truncation in used. It was proven earlier in \cite{JP18} that the truncation of a $q$-matroid, defined by its rank function, is again a $q$-matroid. However, when assuming a collection of bases, this result is not valid: it only becomes valid once a cryptomorphism between bases and the rank function is established. This is not yet the case -- in fact, a cryptomorphism between bases and the rank function would be a corollary of the cryptomorphism between bases and independent spaces, which is the goal of \cite[Theorem 37]{JP18}.
In order to fix this issue, we can use our results from Section \ref{redundancy} that show the redundancy of the axioms (I3) and (B3). As was also mention in \cite{JP18}, the axioms (I4) and (B4) are easily related to each other. The next lemma makes this precise.
\begin{Lemma}\label{I4B4TheSame}
\begin{enumerate}
\item Let ${\mathcal{B}}$ be a collection of
subspaces of $E$ satisfying (B1) and (B2). Define $\mathcal{I}_{\mathcal{B}}=\{I\subseteq B:B\in{\mathcal{B}}\}$. Then for all $A\subseteq E$, $\max(E,A \cap {\mathcal{B}})= \max(A,\mathcal{I}_{\mathcal{B}})$.
\item Let $\mathcal{I}$ be a collection of subspaces of $E$ satisfying (I1) and (I2). Define ${\mathcal{B}}_\mathcal{I}=\max(\mathcal{I})$. Then for all $A\subseteq E$, $\max(A,\mathcal{I})=\max(E,A\cap{\mathcal{B}}_\mathcal{I})$.
\end{enumerate}
\end{Lemma}
\begin{proof}
\begin{enumerate}
\item Let $I\in\max(E,A \cap {\mathcal{B}})$. Then by definition, $I\in\mathcal{I}_{\mathcal{B}}$. Suppose there is an $I'\in\max(A,\mathcal{I}_{\mathcal{B}})$ with $\dim(I)\in \dim(I')$. Then there would be a $B\in{\mathcal{B}}$ such that $I'\subseteq B$, hence $I'\in\max(E, A\cap {\mathcal{B}})$, contradicting the maximality of $I$.
For the reverse inclusion, let $I\in\max(A,\mathcal{I}_{\mathcal{B}})$. Then there is a $B\in{\mathcal{B}}$ such that $I=B\cap A$. Suppose there is a $B'\in{\mathcal{B}}$ such that $\dim(B' \cap A) > \dim(I)$. Then $B' \cap A\in \mathcal{I}_{\mathcal{B}}$, contradicting the maximality of $I$. This proves that $\max(E,A \cap {\mathcal{B}})= \max(A,\mathcal{I}_{\mathcal{B}})$.
\item It was proven in \cite[Theorem 37]{JP18} that ${\mathcal{B}}_\mathcal{I}$ satisfies the axioms (B1) and (B2). Also, it was shown that ${\mathcal{B}}_{\mathcal{I}_{\mathcal{B}}}={\mathcal{B}}$ and that $\mathcal{I}_{{\mathcal{B}}_\mathcal{I}}=\mathcal{I}$. Applying the first part of this lemma to ${\mathcal{B}}_\mathcal{I}$ gives that $\max(E,A \cap {\mathcal{B}}_\mathcal{I})= \max(A,\mathcal{I}_{{\mathcal{B}}_\mathcal{I}})=\max(A,\mathcal{I})$. \qedhere
\end{enumerate}
\end{proof}
\begin{Corollary}\label{WeHaveCrypt2}
Let $(E,\mathcal{I})$ be a collection of independent spaces and let $(E,{\mathcal{B}})$ be a collection of bases.
\begin{enumerate}
\item $(E,\mathcal{I})$ determines a $q$-matroid with collection of independent spaces $\mathcal{I}$ and collection of bases ${\mathcal{B}}_\mathcal{I}$.
\item $(E,{\mathcal{B}})$ determines a $q$-matroid with collection of bases ${\mathcal{B}}$ and collection of independent spaces $\mathcal{I}_{\mathcal{B}}$.
\end{enumerate}
\end{Corollary}
\begin{proof}
By Theorem \ref{I3isImplByI4}, a collection of independent spaces is completely determined by the axioms (I1), (I2) and (oI4) and moreover, $(E,\mathcal{I})$ defines a $q$-matroid. By Theorems \ref{B3isImplByB4} and \ref{B4Variations}, a collection of bases is completely determined by the axioms (B1), (B2) and (B4).
Assume $\mathcal{I}$ satisfies (I1), (I2), (I4). By \cite[Theorem 37]{JP18}, ${\mathcal{B}}_\mathcal{I}$ satisfies (B1) and (B2) and by Lemma \ref{I4B4TheSame} it satisfies (B4). For the converse, assume ${\mathcal{B}}$ satisfies (B1), (B2) and (B4). Again by \cite{JP18}, $\mathcal{I}_{\mathcal{B}}$ satisfies (I1) and (I2) and by Lemma \ref{I4B4TheSame} it satisfies (oI4). Finally, ${\mathcal{B}}_{\mathcal{I}_{\mathcal{B}}}={\mathcal{B}}$ and $\mathcal{I}_{{\mathcal{B}}_\mathcal{I}}=\mathcal{I}$ is also proven in \cite[Theorem 37]{JP18}.
\end{proof}
\end{document}
|
\begin{document}
\title{Mixed Frobenius Structure and local A-model}
\begin{abstract}
We define the notion of
mixed Frobenius structure which is a generalization
of the structure of a Frobenius manifold.
We construct a mixed Frobenius structure on the cohomology of
weak Fano toric surfaces and that of the three dimensional projective space
using local Gromov--Witten invariants.
This is an analogue of the Frobenius manifold
associated to the quantum cohomology in the local
Calabi--Yau setting.
\end{abstract}
\section{Introduction}
The purpose of this paper is to introduce the notion of
mixed Frobenius structure. It is a generalization of the
structure of a Frobenius manifold,
which plays an important role in the study of mirror
symmetry. Our motivation for introducing it
comes from local mirror symmetry.
The main results, Theorems \ref{toric3} and \ref{prop-P3-2},
show that local Gromov--Witten invariants in
the local A-model (i.e. the A-model side of local mirror symmetry)
give rise to mixed Frobenius structures.
In the rest of the introduction, we first explain the contents of the paper.
Then we explain the motivation from local mirror symmetry.
Throughout the paper, an algebra is an associative
commutative algebra with unit over $\mathbb{C}$
of finite dimension.
We denote by $\circ$ the multiplication of an algebra.
\subsection{A generalization of Frobenius algebra}
Recall that a Frobenius algebra is
a pair of an algebra $A$
and a nondegenerate bilinear form $\langle~,~\rangle:
A\times A\to\mathbb{C}$
satisfying the compatibility condition
\begin{equation}\label{A1}
\langle x \circ y, z\rangle=\langle x,y\circ z \rangle\quad
(x,y,z\in A)~.
\end{equation}
For example, the even part of the
cohomology ring of a compact oriented manifold and
the intersection form is a Frobenius algebra (see, e.g. \cite{Kock}).
We generalize this notion as follows
(\S \ref{seciton:Frobenius-filtration}).
Let $A$ be an algebra. A Frobenius filtration on $A$
is a pair of an increasing sequence of ideals
$\cdots \subset I_k\subset I_{k+1}\subset \cdots$ in $A$
and a set of nondegenerate symmetric bilinear forms $(~,~)_k$
on graded quotients $I_{k}/I_{k-1}$ satisfying
the condition similar to \eqref{A1}:
\begin{equation}\nonumber
( x,a\circ y)_k=( a\circ x,y)_k
\qquad (x,y\in I_k/I_{k-1},~a\in A/I_{k-1})~.
\end{equation}
Given a Frobenius algebra $A$ with a nilpotent element $n$,
there are constructions of the Frobenius filtration in
$A$ and the quotient algebra $A/(n)$ where $(n)$ is the ideal
generated by $n$ (\S \ref{section:nilpotent-quotient-constructions}).
We call them the nilpotent construction and the quotient construction respectively.
To illustrate these constructions, we give two types of examples.
First examples are the cohomology rings
of compact K\"ahler manifolds (\S \ref{example-cohomology}).
Second examples are Chen--Ruan's cohomology rings
of some non-compact orbifolds (\S \ref{example-orbifold}).
\subsection{The mixed Frobenius structure}
Recall that
a Frobenius structure
on a manifold $M$ consists of
a structure of the Frobenius algebra on the tangent bundle $TM$
and a vector field $E$ on $M$ called the Euler vector field
which satisfy certain compatibility conditions
(see Definition \ref{def:Frobenius-structure}).
It was defined by Dubrovin \cite{Dubrovin}
but before that
K. Saito found this structure in the singularity theory \cite{Saito} (see also \cite{Saito-Takahashi}).
An example of the Frobenius structure is the quantum cohomology ring
(i.e. the cohomology with the quantum cup product)
of a compact symplectic manifold.
For other examples, see e.g. \cite{Manin}
and references therein.
We generalize this notion and define the mixed Frobenius structure (\S \ref{section:MHS}).
A mixed Frobenius structure (MFS)
on a complex manifold $M$
consists of
a structure of an algebra with a Frobenius filtration,
a torsion-free flat connection $\nabla$ on $TM$,
and an Euler vector field $E$
which satisfy compatibility conditions
(see Definition \ref{def:MFS}).
We extend the nilpotent and the quotient constructions
to the MFS
(\S \ref{section:nilpotent-quotient-construction2}).
If given a Frobenius manifold $M$ with a vector field $n$
which is nilpotent with respect to the multiplication
(and satisfies certain compatibility conditions with the metric and
the Euler vector field),
then we have a MFS
on $M$ and one on a certain submanifold
(see Theorem \ref{thm:nilpotent2} and Corollary \ref{quotient-construction2}).
Finally,
we apply the quotient construction to
the quantum cohomology ring of
the projective compactification
$\mathbb{P}(K_S\oplus \mathcal{O}_S)$
where $S$ is a weak Fano toric surface
and $K_S$ is its canonical bundle,
and obtain a MFS on $H^*(S, \mathbb{C})$
(\S \ref{example-toricsurface}).
We also apply the quotient construction to
$\mathbb{P}(K_{\mathbb{P}^3}\oplus \mathcal{O}_{\mathbb{P}^3})$
and obtain a MFS on $H^*(\mathbb{P}^3, \mathbb{C})$
(\S \ref{example-localP3}).
In the appendix (\S \ref{section:deformed-connection}),
we give a definition of the deformed connection
and show that it is flat. We also write down the
deformed flat coordinates of the above MFS's.
\subsection{Motivation from local mirror symmetry}
For a Calabi--Yau threefold $X$,
two Frobenius structures on $H^*(X, \mathbb{C})$ are known.
The one is given by
the quantum cohomology ring
and the intersection form (the A-model).
The other is constructed by Barannikov--Kontsevith \cite{BK}
which is closely related to the variation of Hodge structures
on $H^3(X, \mathbb{C})$ (the B-model).
If Calabi--Yau threefolds $X$ and $Y$ are mirror partners,
it is conjectured that the former Frobenius structure on $H^*(X, \mathbb{C})$
is isomorphic to the latter Frobenius structure on $H^*(Y, \mathbb{C})$.
This conjecture was proved when $X$ is a complete intersection
in a projective space \cite{Barannikov}.
In \cite{CKYZ}, local mirror symmetry for
weak Fano toric surfaces $S$
was derived from
mirror symmetry of toric Calabi--Yau hypersurfaces
containing $S$ as a smooth divisor.
Therefore, looking at the A-model side,
it is expected that
$H^*(S, \mathbb{C})$ should inherit
a Frobenius structure from the quantum
cohomology of
the corresponding Calabi--Yau threefold.
However, the above expectation turns out to be too naive
because it seems that there is no natural way to obtain a nondegenerate
bilinear form on $H^*(S, \mathbb{C})$ from the intersection form on
the Calabi--Yau threefold.
Therefore we have to abandon the nondegenerate pairing
and have to generalize the notion of Frobenius algebra.
Hints on how come from looking at the B-model sides.
In mirror symmetry, the B-model for Calabi--Yau manifolds
is about the Hodge structure
whereas in local mirror symmetry it is about the mixed Hodge structure,
which has one more extra datum called the weight filtration.
This leads us to introduce the notion of Frobenius filtration.
We believe that the MFS on $H^*(S, \mathbb{C})$
constructed in
\S \ref{example-toricsurface}
is what would be obtained from
the Frobenius structure of the corresponding Calabi--Yau threefold
by the following two reasons.
The first reason is that
in the case $S=\mathbb{P}^2$, if the
quotient construction is applied to the
quantum cohomology of the
corresponding Calabi--Yau threefold $X$,
the resulting MFS is the same as this one.
The second reason is that the Frobenius filtration on $H^*(S, \mathbb{C})$
agrees with the weight filtration on the local B-model side
(see Remark \ref{rem:localB}).
It would be very interesting if we can construct
a MFS on the local B-model side and see whether it is
isomorphic to the one constructed here.
These are left as future problems.
\subsection*{Note added in 2018}
The original version of this paper was written in 2012.
Since then, there are some progresses on the study of mixed Frobenius structures.
In a sequel paper \cite{KonishiMinabe14}, the authors refined a definition of
mixed Frobenius structures. The new definition requires a stronger condition
on the potentiality of the product. See \cite[Remark 4.7]{KonishiMinabe14}.
It is shown in \cite{KonishiMinabe14} that the main results of this
paper remain true under this new definition.
We also remark that mixed Frobenius structure on the local B-model side was
constructed by Shamoto \cite{Shamoto}.
\section{Frobenius ideal and Frobenius filtration }
\label{seciton:Frobenius-filtration}
\begin{definition}
Let $A$ be an algebra.
A {\it Frobenius ideal} of $A$ is a pair $(I,~(~,~))$
of an ideal $I$ of $A$ and a nondegenerate symmetric bilinear form on $I$
which satisfies the condition
\begin{equation}\label{I1}
( x,a\circ y)=( a\circ x,y)
\qquad (x,y\in I,~a\in A)~.
\end{equation}
\end{definition}
\begin{definition}
An increasing filtration of an algebra $A$ by ideals
\begin{equation}\nonumber
I_{\bullet}: 0\subset \cdots \subset~ I_k\subset~ I_{k+1}~\subset \cdots \subset A
\qquad (k\in \mathbb{Z})
\end{equation}
together with bilinear forms $(~,~)_k$ on $I_k/I_{k-1}$
is a {\it Frobenius filtration}
if $I_{\bullet}$ is exhaustive\footnote{
An increasing filtration $I_{\bullet}$ on
a finite dimensional vector space
$A$ is exhaustive if
there exists $k, l$ such that
$I_k=\{0\}$ and $I_l=A$.
}
and
$(I_k/I_{k-1},~(~,~)_k)$ is a Frobenius ideal of $A/I_{k-1}$.
\end{definition}
For the later purpose, we state the following
\begin{lemma}\label{lem:quotient}
If $(I_{\bullet},~(~,~)_{\bullet})$ is a Frobenius filtration
on an algebra $A$, then $(I_{\bullet}/I_k,~(~,~)_{\bullet})$
is a Frobenius filtration on the quotient algebra $A/I_k$
for any $k\in \mathbb{Z}$.
\end{lemma}
\section{Constructions of Frobenius filtration by a nilpotent element}
\label{section:nilpotent-quotient-constructions}
\subsection{Frobenius filtration defined by a nilpotent element}
\label{nilpotent-construction}
Let $(A,~~ \langle~~,~~\rangle)$ be a Frobenius algebra.
Assume that there exists a nilpotent element $n \in A$ of order $d$
(i.e. $n^d=0$, $n^{d-1}\neq 0$).
For $0\leq k \leq d$, we set $J_k:=\{ x \in A \mid x\circ n^k=0\}$.
Then we have a filtration
$$J_0=0\subset J_1 \subset \cdots \subset J_{d-1} \subset J_d=A$$
of ideals on $A$. Let $I=(n)$ be the ideal generated by $n$, and let
$I_k:=I+J_k$ ($0\leq k \leq d$). Then we have a filtration
\begin{equation}\label{nilp1}
I_{-1}:=0\subset I_0=I \subset I_1 \subset \cdots \subset I_{d-1} \subset I_d=A
\end{equation}
of ideals in $A$.
Next
we define a pairing $(~~,~~)_k$ on each graded quotient $I_k/I_{k-1}$.
Note that $I_k/I_{k-1}\cong J_k/ (J_{k-1}+I\cap J_k)$ for $k>0$.
\begin{definition}\label{nilp2}
(i) For $x, y \in I_0$, there are $\tilde{x}, \tilde{y}\in A$ such that
$x=n\circ \tilde{x}$ and $y=n\circ \tilde{y}$. We define the pairing $(~~,~~)_0$ on $I_0$ by
$$(x, y)_0=\langle \tilde{x}, \tilde{y}\circ n \rangle~.$$
(ii)
For $x, y\in I_k/I_{k-1}$ ($k>0$), we take representatives
$\tilde{x},~\tilde{y} \in J_k$ and set
$(x,y)_k:=\langle \tilde{x}, \tilde{y}\circ n^{k-1} \rangle$.
\end{definition}
It is easy to check that the pairing $(~~,~~)_k$ is well-defined.
\begin{lemma}
(i) The pairing $(~~,~~)_k$ on $I_k/I_{k-1}$ is symmetric and
satisfies $$(a\circ x, y)_k=(x, a\circ y)_k$$
for any $x, y \in I_k/I_{k-1}$ and $a\in A/I_{k-1}$.\\
(ii) The pairing $(~~,~~)_k$ is nondegenerate.
\end{lemma}
\begin{proof}
The first statement follows from the Frobenius property \eqref{A1}
of $\langle ~~, ~~\rangle$.
For the second, we first consider the case $k=0$.
The pairing $(~~,~~)_0$ is nondegenerate since
\begin{eqnarray*}
(x,y)_0=0\quad \forall y\in I_0 \quad
\Longleftrightarrow \langle \tilde{x}, n\circ \tilde{y}\rangle=0\quad \forall \tilde{y}\in A
&\Longleftrightarrow& x=n\circ \tilde{x}=0~.
\end{eqnarray*}
Next we consider the case $k>0$.
Consider the pairing $\langle~~,~~\rangle_k$ on $A$ defined by
$\langle a,b\rangle_k:=\langle a,b\circ n^{k-1}\rangle$.
Let $I^{\perp}:=\{a\in A \mid \langle a, b\rangle_k=0,~~\forall b\in I\}$.
Then it follows that $I^{\perp}=J_k$, since
\begin{eqnarray*}
\langle a, b\circ n^{k-1} \rangle=0, \quad \forall b \in I \quad
\Longleftrightarrow \quad
\langle a, \tilde{b}\circ n^{k} \rangle=0, \quad \forall \tilde{b} \in A \quad
\Longleftrightarrow \quad
a \in J_k~.
\end{eqnarray*}
The pairing $\langle~~,~~\rangle_k$ is degenerate precisely along
$J_{k-1}$. Then the orthogonal of $I/(I\cap J_{k-1})$ in $A/J_{k-1}$ with respect to this
pairing is $J_k/J_{k-1}$. Therefore the orthogonal $K_k$ of $J_k$ in $A$ is
$K_k=I+J_{k-1}$. It follows that $\langle~~,~~\rangle_k$ induces
a nondegenerate pairing on $J_k/(K_k\cap J_k)=J_k/(J_{k-1}+I\cap J_k)$. It is clear that
the induced pairing coincides with $(~~,~~)_k$ defined above. Thus
the pairing $(~~,~~)_k$ on $I_k/I_{k-1}$ is nondegenerate.
\end{proof}
To summarize, we have obtained the following
\begin{proposition}\label{prop-nilp}
If $(A,\langle~,~\rangle)$ is a Frobenius algebra with a nilpotent element $n$,
$\left(I_{\bullet}\, ,~ (~~,~~)_{\bullet}\right)$
defined in \eqref{nilp1} and Definition \ref{nilp2}
is a Frobenius filtration on $A$.
\end{proposition}
\begin{remark}\label{rem:mochizuki}
If a linear endomorphism $n$ on a vector space $A$ is nilpotent
of order $d$,
then we have a filtration on $A$ called the monodromy weight filtration
\begin{equation}\label{MWF}
0\subset W_0\subset W_1\subset\cdots\subset W_{2d-1}\subset W_{2d-2}=A~
\end{equation}
uniquely determined by the conditions \cite[p.93]{Cattani}:
\begin{equation}\nonumber
\begin{split}
&n(W_l)~~\subset~~ W_{l-2}~,\\
&n^j~:~W_{d+j-1}/W_{d+j-2}~\stackrel{\sim}{\rightarrow}~
W_{d-j-1}/W_{d-j-2} \quad (0\leq j<d)~.
\end{split}
\end{equation}
The filtration \eqref{nilp1} is related to the
monodromy weight filtration by
\begin{equation}\nonumber
\begin{split}
I_0&=\mathrm{Im}\,n~,
\\
I_k&
=\mathrm{Im}\,n+W_{k+d-2}
\quad (0<k\leq d)~.
\end{split}
\end{equation}
This filtration agrees with the filtration $(N_*A)_{\bullet}$
defined in \cite[\S 3.4]{Kashiwara86} for the filtration
$A_{\bullet}: 0=A_0\subset A_1=A$.
\end{remark}
\subsection{Frobenius filtration by quotient construction}
\label{quotient-construction}
As in \S \ref{nilpotent-construction},
let $(A,~\langle~,~\rangle)$ be a Frobenius algebra, $n\in A$
a nilpotent element of order $d$.
Let $J_k=\{x\in A\mid x\circ n^k=0\}$
and $I=(n)$.
By Lemma \ref{lem:quotient}, the Frobenius filtration
$(I_{\bullet},~(~,~)_{\bullet})$
constructed in \S \ref{nilpotent-construction}
induces a Frobenius filtration on the quotient algebra
$A'=A/I$. Explicitly, the induced sequence is
\begin{equation}\label{quot-filtration}
I_{\bullet}':0~\subset~ I_1'\subset\cdots \subset~ I_d'~=~A'~
\qquad (I_k':=(I+J_k)/I)~.
\end{equation}
The induced bilinear forms $( ~,~)'_k$ on $I_k'/I_{k-1}'$
are given by
\begin{equation}\label{quot-bilinear}
( a',b')_k'=( a,b )_k
\end{equation}
where $a,b\in J_k$ are those such that
the image under $J_k\hookrightarrow J_k+I\rightarrow I_k'$
are $a',b'\in I_k'$.
\begin{corollary}\label{prop-quotient}
If $(A,\langle~,~\rangle)$ is a Frobenius algebra with a nilpotent element
$n$,
$(I_{\bullet}'\,,~(~,~)_{\bullet}')$ defined in \eqref{quot-filtration},
\eqref{quot-bilinear}
is a Frobenius filtration on $A/I$.
\end{corollary}
\section{Examples I: cohomology of compact K\"ahler manifolds}
\label{example-cohomology}
\subsection{Examples}
Let $X$ be a compact K\"ahler manifold of
complex dimension $d-1$. Let $A=\oplus_{j=0}^{d-1}H^{2j}(X, \mathbb{C})$
be the even part of the cohomology of $X$. Then $A$ is a Frobenius algebra with
respect to the cup product $\cup$ and the usual intersection pairing
$$
\langle x , y \rangle =
\int_{{X}}x \cup y
\qquad (x, y \in H^*({X}, \mathbb{C})).
$$
Let $n\in H^2(X, \mathbb{C})$ be a class such that
either $n$ or $-n$ is a K\"ahler class.
Then $n$ is nilpotent of order $d$.
By the construction in \S \ref{nilpotent-construction},
we get a Frobenius filtration
$$0\subset I_0 \subset I_1 \subset \cdots \subset I_{d-1} \subset I_d=A~.$$
\begin{example}\label{example-Pn}
Let $X=\mathbb{P}^{d-1}$, $L=\mathcal{O}_{\mathbb{P}^{d-1}}(m)$
($m\in \mathbb{Z}$, $m\neq 0$) and $n=c_1(L)$.
Then $A:=H^*(X,\mathbb{C})\cong \mathbb{C}[n]/(n^d)$
and the intersection form is given by
$\langle n^i,n^j\rangle=m^{i+j}\delta_{i+j,d-1}$.
The Frobenius filtration on $A$
defined by $n$ is given as follows. The filtration is
$$I_0=\bigoplus_{i=1}^{d-1} \mathbb{C}n^i,\qquad I_k=I_0 \quad (1\leq k \leq d-1),\qquad I_d=I_0\oplus \mathbb{C}1~.$$
If we set $e_i:=n^i/m^i$ ($1\leq i \leq d-1$) and $e_{d}:=1$,
the pairings are
$$(e_i, e_j)_0=\frac{1}{m}\delta_{i+j, d}~,\qquad (e_d, e_d)_d=m^{d-1}~.$$
\end{example}
\begin{example}\label{example-surface}
Let $(X, L)$ be a polarized algebraic surface and $n=c_1(L)$.
Then by the Lefschetz decomposition, we have
$$H^2(X, \mathbb{C})=H^2_{\rm prim}(X, \mathbb{C}) \oplus \mathbb{C}n~,$$
where $H^2_{\rm prim}(X, \mathbb{C})$ is the kernel of
$n\, \cup : H^2(X,\mathbb{C}) \to H^4(X, \mathbb{C})$.
The Frobenius filtration on $A=H^{\rm even} (X, \mathbb{C})$
defined by $n$ is given as follows. The filtration is:
\begin{eqnarray*}
I_0&=&\mathbb{C}n\oplus \mathbb{C}n^2~,
\\
I_1&=&I_0\oplus H^2_{\rm prim}(X, \mathbb{C})~,
\\
I_2&=&I_1~,
\\
I_3&=&I_1\oplus \mathbb{C}1~.
\end{eqnarray*}
The pairings are:
$$(n, n^2)_0=K,\qquad (n,n)_0=(n^2,n^2)_0=0~,$$
$$(x ,y)_1=\int_Xx \cup y~,$$
$$(1,1)_3=K~,$$
where $K=\int_X n^2$.
\end{example}
\subsection{Remarks on a mixed Hodge structure for Example \ref{example-surface}}
\label{sec:MHS}
In this subsection, we explain that the filtration
$I_{\bullet}$ in Example \ref{example-surface}
can be regarded as a weight filtration of an $\mathbb{R}$-mixed Hodge structure.
See e.g. \cite[\S 3.1]{PetersSteenbrink} for a definition of the $\mathbb{R}$-mixed Hodge structure.
Although the result of this subsection is not used in the rest of the paper,
it is relevant to local mirror symmetry. See Remark \ref{rem:localB}.
Let $c\in \sqrt{-1}\mathbb{R}$ be a nonzero purely imaginary number.
Let $X$ and $n$ be as in Example \ref{example-surface}.
We set
\begin{equation}\nonumber
\begin{split}
A_{\mathbb{R}}&=\{x\cup e^{ cn}\mid x\in H^{even}(X,\mathbb{R}) \}
\\
&=\mathbb{R}\Big(1+cn+\frac{c^2}{2}n^2\Big)\oplus
\mathbb{R}(n+cn^2)\oplus H_{\mathrm{prim}}^2(X,\mathbb{R})
\oplus \mathbb{R}n^2~.
\end{split}
\end{equation}
We consider the increasing filtration
$\mathcal{W}_{\bullet}$ on $A_{\mathbb{R}}$ given by
\begin{equation}\nonumber
0\subset
\mathcal{W}_1=\mathbb{R}(n+cn^2)\oplus \mathbb{R}n^2\subset
\mathcal{W}_2=\mathcal{W}_1\oplus H^2_{\mathrm{prim}}(X,\mathbb{R})
=\mathcal{W}_3
\subset
\mathcal{W}_4=A_{\mathbb{R}}~.
\end{equation}
Notice that $\mathcal{W}_{k}\otimes \mathbb{C}=I_{k-1}$ holds.
We also consider the decreasing filtration $\mathcal{F}^{\bullet}$
on $A_{\mathbb{R}}\otimes \mathbb{C}=A$ given by
the degree of the cohomology:
\begin{equation}\nonumber
0\subset \mathcal{F}^{2}=H^0(X,\mathbb{C})\subset
\mathcal{F}^1=H^0(X,\mathbb{C})\oplus H^2(X,\mathbb{C})
\subset
\mathcal{F}^0=A~.
\end{equation}
Then it is not difficult to check the following
\begin{proposition}
(i) $(\mathcal{W}_{\bullet},\mathcal{F}^{\bullet})$
is an $\mathbb{R}$-mixed Hodge structure on $A_{\mathbb{R}}$.
\\
(ii) If we set
$$A^{p,q}=\mathcal{F}^p({\rm Gr}_k^{\mathcal{W}}A)
\cap \overline{\mathcal{F}^q({\rm Gr}_k^{\mathcal{W}}A)} \quad (p+q=k),$$
then the Hodge decomposition $A\cong \oplus_{1\leq k \leq 4} \oplus_{p+q=k} A^{p,q}$
is given by the following table.
\begin{equation}\nonumber
\begin{array}{|r|r|c|c|}\hline
A^{p,k-p} &p=2&1 &0\\\hline
k=1& &\mathbb{C}n &\mathbb{C}(n^2+\frac{1}{2c}n)\\
2 & 0&H^2_{\rm prim}&0\\
3 &0&0&0\\
4 & \mathbb{C}1&0&0\\\hline
\end{array}~~~~
\end{equation}
\end{proposition}
Next we explain that the pairings $(~~,~~)_{\bullet}$ induce
a polarization on the above $\mathbb{R}$-mixed Hodge structure.
We assume the following conditions:
\begin{equation}\label{eq:conditions}
\begin{cases}
h^{2,0}(X)=h^{0,2}(X)=0, \\
b_1(X) ~~\mathrm{is~~even}, \\
\int_X n^2=K>0.
\end{cases}
\end{equation}
Then the pairing $\langle~~,~~\rangle$ is negative definite on $H^2_{\rm prim}(X, \mathbb{R})$.
See e.g. \cite[\S IV, Corollary 2.15]{BHPV}.
For simplicity, we set $c=\sqrt{-1}$.
Let us define $(-1)^k$-symmetric bilinear form
$Q_k: {\rm Gr}_k^{\mathcal{W}}A_{\mathbb{R}} \times {\rm Gr}_k^{\mathcal{W}}A_{\mathbb{R}} \to \mathbb{R}$ by
$$
Q_k(x,y)=\frac{(\sqrt{-1})^k}{2}\left\{
(Cx\, ,\, y)_{k-1}+(-1)^k(x\, ,\, Cy)_{k-1}
\right\},
$$
where $C|_{A^{p,q}}=(\sqrt{-1})^{p-q}$ is the Weil operator.
Explicitly, $Q_k$ is given as follows:
$$
Q_1(n^2\, ,\, n+\sqrt{-1}n^2)=K,\qquad Q_1(n+\sqrt{-1}n^2\,, \, n+\sqrt{-1}n^2)=Q_1(n^2\, ,\, n^2)=0,
$$
$$
Q_2(x,y)=-\int_X x\cup y~,
$$
$$
Q_4(1,1)=K~.
$$
Then the Hermitian forms $H_k(x,y):=Q_k(Cx,\overline{y})$ are positive definite, and we have
the following
\begin{proposition}
Under the conditions \eqref{eq:conditions},
$(\mathcal{W}_{\bullet}, \mathcal{F}^{\bullet}, Q_{\bullet})$ is a graded polarized
$\mathbb{R}$-mixed Hodge structure on $A_{\mathbb{R}}$.
\end{proposition}
\section{Examples II: cohomology of some non-compact orbifolds}
\label{example-orbifold}
Let $\mathcal{X}$ be an algebraic orbifold.
We denote by $H^*_{\rm orb}(\mathcal{X}, \mathbb{C})$
the orbifold cohomology ring of $\mathcal{X}$ introduced by Chen--Ruan \cite{CR}.
If $\mathcal{X}$ is compact, $H^*_{\rm orb}(\mathcal{X}, \mathbb{C})$
has the Poincar\'e duality pairing $\langle~,~\rangle$
given by
$$
\langle x , y \rangle =
\int_{\mathcal{X}}x \cup_{\rm orb} y
\qquad (x, y \in H^*_{\rm orb}(\mathcal{X}, \mathbb{C})),
$$
where $\cup_{\rm orb}$ denotes Chen--Ruan's product.
It makes $H^*_{\rm orb}(\mathcal{X}, \mathbb{C})$ into a Frobenius algebra.
See \cite[Theorem 4.6.6]{CR}.
The following examples can be computed by the results of \cite{BCS, Jiang, Mann}.
\begin{example}
Let $\mathcal{X}=[\mathbb{C}^d/\mathbb{Z}_d]$ be the quotient orbifold
of type $\frac{1}{d}(1,\ldots, 1)$ (in the notation of \cite{Reid})
and $\overline{\mathcal{X}}=\mathbb{P} (1,\ldots, 1, d)$
be the $d$-dimensional weighted projective space of weight $(1,\ldots, 1, d)$.
The latter is a compactification of the former. The divisor
$D=\overline{\mathcal{X}}\setminus \mathcal{X}$ is
Poincar\'e dual to $dH \in H^2_{\rm orb} (\overline{\mathcal{X}}, \mathbb{C})$,
where $H:=c_1(\mathcal{O}_{\mathbb{P} (1,\ldots, 1, d)}(1))$.
If we denote by $E \in H^2(\overline{\mathcal{X}}, \mathbb{C})$
the image of the unit class on the twisted sector $\mathbb{P}(d)$ of age $1$
of the inertia orbifold of $\overline{\mathcal{X}}$,
then $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$
is isomorphic to $\mathbb{C}[H, E] / (H^d-E^d, HE)$
as an algebra. The pairings on $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$ are
determined by
\begin{equation}\nonumber
\begin{split}
\langle H^i , H^j\rangle
&= \langle E^i , E^j \rangle
=\frac{1}{d}\delta_{i+j,d}\quad(0\leq i,j\leq d)~,\\
\langle H^i,E^j\rangle&=0\quad (1\leq i,j\leq d)~.
\end{split}\end{equation}
(i) First, we apply the nilpotent construction
to $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$
with the nilpotent element $n:=dH$ of order $d+1$. Then
we obtain the following Frobenius filtration on $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$.
The filtration $I_{\bullet}$ of ideals is
$$I_0=(H) \subset I_1=\cdots =I_d=(H ,E) \subset I_{d+1}=H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})~.$$
The pairings $(~~,~~)_{\bullet}$ on graded quotients are
$$
(H^i,H^j)_0=\frac{1}{d^2}\delta_{i+j,d+1}, \quad
(E^i, E^j)_1=\frac{1}{d}\delta_{i+j,d}, \quad
(1,1)_{d+1}=d^{d-1}.
$$
(ii) Next, we take the quotient of $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$
by the ideal $I_0=(n)$. The quotient algebra is isomorphic to
$H^*_{\rm orb}({\mathcal{X}}, \mathbb{C})\cong \mathbb{C}[E]/(E^d)$.
The induced Frobenius filtration $\left(I_{\bullet}', (~~,~~)'_{\bullet} \right)$ on the quotient is
identical to the one on
$H^*(\mathbb{P}^{d-1})$ given in Example \ref{example-Pn}
with $m=d$.
\end{example}
\begin{example}
Let $\mathcal{X}=[\mathbb{C}^3/\mathbb{Z}_4]$ be the quotient orbifold
of type $\frac{1}{4}(1,1, 2)$
and $\overline{\mathcal{X}}=\mathbb{P} (1, 1, 2, 4)$.
The latter is a compactification of the former. The divisor
$D=\overline{\mathcal{X}}\setminus \mathcal{X}$ is
Poincar\'e dual to $4H \in H^2_{\rm orb} (\overline{\mathcal{X}}, \mathbb{C})$,
where $H:=c_1(\mathcal{O}_{\mathbb{P} (1,1, 2, 4)}(1))$.
If we denote by $E_1$ (resp. $E_2$) $\in H^2(\overline{\mathcal{X}}, \mathbb{C})$
the image of the unit class on the twisted sector $\mathbb{P}(4)$
(resp. $\mathbb{P}(2,4)$) of age $1$ of the inertia orbifold of $\overline{\mathcal{X}}$,
then $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$
is isomorphic to $\mathbb{C}[H, E_1, E_2] / (H^2-E_2^2, HE_1, 2HE_2=E_1^2)$
as an algebra. The pairings on $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$
are determined by
$$
\langle H^i , H^j\rangle =\frac{1}{8}\delta_{i+j,3}
$$
together with the relations in the algebra and the Frobenius property \eqref{A1}.\\
(i) Applying the nilpotent construction
to $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$
with the nilpotent element $n:=4H$ of order $4$, we obtain the
following Frobenius filtration on $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$.
The filtration $I_{\bullet}$ of ideals is
$$I_0=(H) \subset I_1=(H ,E_1) \subset
I_2=I_3=(H, E_1, E_2)\subset
I_4=H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})~.$$
The pairings $(~~,~~)_{\bullet}$ on graded quotients are
$$
(H^i,H^j)_0=\frac{1}{32}\delta_{i+j,4}, \quad
(E_1, E_1E_2)_1=\frac{1}{4}, \quad
(E_2, E_2)_2=\frac{1}{2}, \quad
(1,1)_{4}=8.
$$
(ii) Taking the quotient of $H^*_{\rm orb}(\overline{\mathcal{X}}, \mathbb{C})$
by the ideal $I_0=(n)$, we obtain the following Frobenius filtration on
$H^*_{\rm orb}({\mathcal{X}}, \mathbb{C})\cong \mathbb{C}[E_1, E_2]/(E_1^2, E_2^2)$:
$$
I_1'=\mathbb{C}E_1\oplus \mathbb{C}E_1E_2 \subset
I_2'=I_3'=I_1'\oplus \mathbb{C}E_2\subset
I_4'=H^*_{\rm orb}({\mathcal{X}}, \mathbb{C}).
$$
\end{example}
\section{The mixed Frobenius structure}
\label{section:MHS}
In this section, a manifold is a complex manifold.
The holomorphic tangent bundle (resp. the holomorphic cotangent bundle)
of a manifold $M$ is denoted by $TM$ (resp. $T^*M$).
All vector bundles on $M$ are assumed to be holomorphic.
The space of holomorphic sections on an open set $U\subset M$
of a vector bundle $\mathbb{E}\to M$ is
denoted by $\Gamma(U,\mathbb{E})$.
We mean by $\Gamma(\mathbb{E})$ $\Gamma(U,\mathbb{E})$ for some open subset
$U\subset M$.
The dual vector bundle of $\mathbb{E}$ is denoted by $\mathbb{E}^{\vee}$.
\subsection{Preliminary}
We say that a subbundle $I\subset TM$ is {\it $E$-closed}
for a given global vector field $E$ on $M$
if
\begin{equation}
[E,x]~\in~\Gamma(I)~\quad (x~\in~\Gamma(I))~.
\end{equation}
Notice that when this holds, the Lie bracket
$[E,\ast]$ induces a derivation on
the quotient bundle $TM/I$.
We say that a subbundle $I\subset TM$ is {\it $\nabla$-closed}
for an affine connection $\nabla$ if
\begin{equation}
\nabla_z \,x~\in~\Gamma(I)~\quad
( x~\in \Gamma(I)~,
~~z~\in~\Gamma(TM))~.
\end{equation}
Notice that when this holds, $\nabla$ induces a connection on $TM/I$.
\subsection{The Mixed Frobenius Structure}
Recall the definition of the Frobenius structure \cite{Dubrovin}.
\begin{definition}\label{def:Frobenius-structure}
A Frobenius structure of charge $D\in \mathbb{C}$ on a manifold $M$
consists of a structure of the Frobenius algebra
$(A_t,\langle~,~\rangle_t)$
on each tangent space $T_t M$ ($t\in M$) depending complex analytically on $t$,
and a globally defined vector field $E$ on $M$ called the Euler vector field
satisfying the following conditions.
\begin{itemize}
\item The Levi--Civita connection $\nabla$ of the metric $\langle~,~\rangle$ is flat
and the unit vector field $e$ is $\nabla$-flat (i.e. $\nabla e=0$).
\item
The bundle homomorphism $c: TM^{\otimes 3}\to \mathcal{O}_M$
defined by $c(x,y,z)=\langle x,y\circ z\rangle$ satisfies
\begin{equation}\label{c1}
(\nabla_w c) (x,y,z)=(\nabla_z c)(x,y,w)~,
\end{equation}
where $\nabla$ is the induced connection on $T^*M^{\otimes 3}$.\footnote{
This condition together with the Frobenius property \eqref{A1}
implies that $\nabla c: TM^{\otimes 4} \to \mathcal{O}_M$ is symmetric.}
\item The Euler vector field $E$ satisfies $\nabla\nabla E=0$
(here the leftmost $\nabla$ is the induced connection on $\mathrm{End}\,(TM)$)
and
\begin{eqnarray}
\label{E1}
&[E,x\circ y]-[E,x]\circ y-x\circ[E,y]=x\circ y \quad
(x,y\in \Gamma(TM))~,
\\
\label{E3}
&E\langle x,y\rangle-
\langle\, [E,x],y\rangle-
\langle x,[E,y]\,\rangle
=(2-D)\langle x,y\rangle~\quad
(x,y\in \Gamma(TM))~.
\end{eqnarray}
\end{itemize}
\end{definition}
Now we generalize the Frobenius structure
to incorporate the Frobenius filtration.
\begin{definition}\label{def:MFS}
A mixed Frobenius structure $(\nabla,~E,~\circ, ~I_{\bullet},~(~,~)_{\bullet})$
on a manifold $M$ of reference charge $D\in \mathbb{C}$
consists of
\begin{itemize}
\item a torsion free, flat connection $\nabla$ on the tangent bundle $TM$,
\item a global vector field $E$ satisfying $\nabla\nabla E=0$
called the Euler vector field,
\item
a fiberwise multiplication on every tangent space $T_t M$
depending complex analytically
on $t$ such that the unit vector field $e$ is $\nabla$-flat,
\item
a Frobenius filtration
$(I_{\bullet,t}~,~~ (~,~)_{\bullet,t})$
on every tangent space $T_t M$ depending complex analytically on $t$
such that $I_k$ are $\nabla$-closed and
$E$-closed subbundles of $TM$.
\end{itemize}
These must
satisfy the following
compatibility conditions.
Let $\pi_k:TM\to TM/I_{k-1}$ be the quotient map and let
$\circ_k$ be the induced multiplication on the quotient bundle $TM/I_{k-1}$.
Let $[E,\ast]_k$ and $\nabla^{(k)}$
be the derivation and the connection on $TM/I_{k-1}$ induced from $[E,\ast]$
and $\nabla$.
\begin{itemize}
\item The connection $\nabla$ and the bilinear forms $(~,~)_k$ must be
compatible in the sense that
\begin{equation}\label{eta-const}
z( x,y)_k=(\nabla^{(k)}_z x,y)_k+
( x,\nabla^{(k)}_z y )_k~\quad
(x,y\,\in\,\Gamma(I_k/I_{k-1}),~
z\,\in\, \Gamma(TM))~.
\end{equation}
\item
The vector bundle homomorphism $c_k: I_k/I_{k-1} \otimes I_k/I_{k-1}\otimes
TM \rightarrow \mathcal{O}_M$
defined by
\begin{equation}
c_k(x,y,z):=( x, ~\pi_k(z)\circ_k y )_k~,
\end{equation}
must satisfy
\begin{equation}\label{c-symmetry}
(\nabla^{(k)}_w c_k)(x,y,z)=(\nabla^{(k)}_z c_k)(x,y,w)~\quad
(x,y\in \Gamma(I_k/I_{k-1}),
z,w\in \Gamma(TM))~.
\end{equation}
Here $\nabla^{(k)}$ is the induced connection on
$(I_k/I_{k-1})^{\vee}\otimes (I_k/I_{k-1})^{\vee}\otimes T^*M$.
\item The Euler vector field $E$ must satisfy
\begin{eqnarray}
\label{E-multiplication}
&[E, x\circ_k \pi_k(z)]_k-x\circ_k\pi_k([E,z])
-[E,x]_k\circ_k\pi_k(z)=x\circ_k\pi_k(z)~,
\\
\label{E-metric}
&E (x,y)_k-( [E,x]_{k},y)_k
-( x,[E,y]_{k})_k=(2-D+k)( x,y)_k~
\\
&\nonumber\qquad
(x,y\,\in\,\Gamma(I_k/I_{k-1}),~
z\,\in\, \Gamma(TM))~.
\end{eqnarray}
\end{itemize}
\end{definition}
\subsection{Flat coordinates}
We write the conditions for the mixed Frobenius structure
in a local coordinate expression.
Let $m_k$ ($k\in\mathbb{Z}$) be the rank of $I_k/I_{k-1}$.
The flatness, the
torsion-free condition for $\nabla$
and the $\nabla$-closedness of $I_k$'s imply that
there exists on $M$ a system of local coordinates
$t^{ka}$ ($k\in \mathbb{Z}$, $1\leq a\leq m_k$)
which satisfies the following two conditions:
$$
\nabla \frac{\partial}{\partial t^{ka}}=0 \qquad
(k\in \mathbb{Z},~1\leq a\leq m_k)~,
$$
\begin{equation}\label{flat-frame}
\left \{
\frac{\partial}{\partial t^{la}} ~{\Big |}~ l\leq k, 1\leq a\leq m_l
\right\}\quad
\text{is a local frame of $I_k$.}
\end{equation}
Now assume that one such system of flat coordinates is fixed.
Then we can naturally regard $
\{\frac{\partial}{\partial t^{ka}} \}_{1\leq a\leq m_k}
$ as
a local frame of
$I_k/I_{k-1}$.
We sometimes use the shorthand notation $\partial_{ka}=\frac{\partial}{\partial t^{ka}}$.
Let $\eta^{(k)}$ be the matrix representation of $(~,~)_k$:
\begin{equation}\nonumber
\eta_{ka,kb}=\Big( \frac{\partial}{\partial t^{ka}}\, ,\,
\frac{\partial}{\partial t^{kb}}\Big)_k
\qquad (1\leq a,b\leq m_k)~,\quad
\eta^{(k)}:=(\eta_{ka,kb})~.
\end{equation}
Let $C_{{ka},{lb}}^{jc}$ be the structure constant of the multiplication:
\begin{equation}\nonumber
\frac{\partial}{\partial t^{ka}}\circ
\frac{\partial}{\partial t^{lb}}=
\sum_{j\in \mathbb{Z},\,1\leq c\leq m_j}
C_{{ka},{lb}}^{jc}\,\frac{\partial}{\partial t^{jc}}~.
\end{equation}
Let us write the Euler vector field $E$ as
\begin{equation}\label{defEka}
E=\sum_{k\in\mathbb{Z},\,1\leq a\leq m_k}
E^{ka}\, \frac{\partial}{\partial t^{ka}}~.
\end{equation}
Then the conditions are summarized as follows.
(We omit the associativity and the commutativity conditions.)
\begin{itemize}
\item
$\eta^{(k)}$ is a symmetric invertible matrix
since $(~,~)_k$ is symmetric
and nondegenerate. It is a constant matrix by (\ref{eta-const}).
\item
The condition that $I_k$ is an ideal is equivalent to
$C_{ka,lb}^{jc}=0$ if $k<j$ or $l<j$.
The compatibility of $(~,~)_k$ and the multiplication (see \eqref{I1})
is equivalent
to
\begin{equation}
\sum_{1\leq d\leq m_k}C_{lc,kb}^{kd}\eta_{kd,ka}
=\sum_{1\leq d\leq m_k}C_{lc,ka}^{kd}\eta_{kd,kb}~.
\end{equation}
\item The condition \eqref{c-symmetry} (with the nondegeneracy of $\eta^{(k)}$)
is equivalent to
\begin{equation}\label{c-symmetry2}
\begin{split}
&\frac{\partial}{\partial {t^{jd}}}C_{ka,lc}^{kb}=
\frac{\partial}{\partial {t^{lc}}}C_{ka,jd}^{kb}
\qquad(l,j\geq k)~,
\\
&\frac{\partial}{\partial {t^{jd}}}C_{ka,lc}^{kb}=0
\qquad(j<k)~.
\end{split}
\end{equation}
\item
$\nabla\nabla E=0$ and $E$-closedness of $I_k$'s imply
\begin{equation}\label{E-linear}
\frac{\partial^2}{\partial t^{jc} \partial t^{lb}}\,
E^{ka}=0\quad ({}^{\forall} l,j)~,\quad
\frac{\partial}{\partial t^{lb}}\, E^{ka}=0 \quad (l<k)~.
\end{equation}
\item
Eq.\eqref{E-multiplication} together with \eqref{c-symmetry2}, \eqref{E-linear}
is equivalent to
\begin{equation}\label{E-multiplication2}
\sum_{\begin{subarray}{c}j\geq k,\\1\leq d\leq m_j\end{subarray}}
\partial_{la}(E^{jd}C_{jd,kb}^{kc})
-\sum_{1\leq d\leq m_k}C_{la,kb}^{kd}(\partial_{kd}E^{kc})
+\sum_{1\leq d\leq m_k}C_{la,kd}^{kc}(\partial_{kb}E^{kd})
=C_{la,kb}^{kc}~.
\end{equation}
\item
Eq.\eqref{E-metric} is equivalent to
\begin{equation}\label{E-metric2}
\sum_{1\leq c\leq m_k} \big( \eta_{kb,kc}
(\partial_{ka}E^{kc})+\eta_{ka,kc}(\partial_{kb}E^{kc})\big)
=(2-D+k) \, \eta_{ka,kb}~.
\end{equation}
\end{itemize}
\subsection{Mixed Frobenius structure on a transversal slice.}
\label{sec:transversal}
Let $(\nabla,~E,~\circ, ~I_{\bullet},~(~,~)_{\bullet})$ be a
mixed Frobenius structure of reference charge $D$ on $M$.
Let $m_k$ be the rank of $I_k/I_{k-1}$.
Fix a system of flat coordinates $t^{ka}$
($k\in \mathbb{Z}$, $1\leq a\leq m_k$) on $M$
(see \eqref{flat-frame}).
Since each subbundle $I_k\subset TM$ is involutive,
it defines a foliation on $M$. Let $M_k$ be a leaf, i.e.
in a neighborhood $U\subset M$
where the coordinates $t^{ka}$ are well-defined,
\begin{equation}\label{eq:leaf}
M_k\cap U=\{t^{la}=\text{constant}\mid l>k\}~.
\end{equation}
Let $$
M^{(k+1)}=\{t^{la}=\text{constant}~\mid~ l\leq k,~1\leq a\leq m_l \} \subset
U~.
$$
This is transversal to the leaf $M_k$.
Then, locally on $U$,
we have
the direct sum decomposition $TM=TM^{(k+1)}\oplus I_k$
and
obtain
the isomorphism $TM^{(k+1)}\cong TM/I_k$.
Let $E^{(k+1)}$ be the vector field on $M^{(k+1)}$ induced from
the Euler vector field $E$: if we use the flat coordinate expression
in \eqref{defEka},
$$
E^{(k+1)}=\sum_{\begin{subarray}{c}l\geq k+1,\\
1\leq a\leq m_l\end{subarray}}
E^{la}\, \frac{\partial}{\partial t^{la}}~
\quad \Big(\text{just drop the terms $\frac{\partial}{\partial t^{ma}}$ for $m\leq k$ in $E$} \Big).
$$
This is a well-defined vector field on $M^{(k+1)}$ since
$E^{la}$ ($l\geq k+1$) is independent of $t^{mb}$ ($m\leq k$), see \eqref{E-linear}.
\begin{lemma}\label{lem:quotient-construction2}
$(\nabla^{(k+1)},~E^{(k+1)},~\circ_{k+1}, ~I_{\bullet}/I_k,~(~,~)_{\bullet})$ is a
mixed Frobenius structure of reference charge $D$ on $M^{(k+1)}$.
\end{lemma}
\section{Construction by a nilpotent vector field}
\label{section:nilpotent-quotient-construction2}
\subsection{Construction by a nilpotent vector field}
\label{ffm}
Let $(\circ,~\langle~,~\rangle,~E)$ be a Frobenius structure on a
manifold $M$ of charge $D$. Let $\nabla$ be the Levi--Civita connection of
the metric $\langle ~,~\rangle$.
Assume that there exists
a global vector field $n$ satisfying the following conditions:
\begin{eqnarray}
\nonumber
&n^d=\underbrace{n\circ n\circ \cdots \circ n}_{\text{$d$ times}}= 0~,\quad n^{d-1}\neq 0~,\\
&\label{condition-E-n}
[E,n]=0~,
\\
&\label{condition-nabla-n}
\nabla(n\circ x)=n\circ \nabla x~\quad(x\in \Gamma(TM))~.
\end{eqnarray}
The condition \eqref{condition-nabla-n} implies that
the map $n\, \circ :TM\to TM$ is a flat bundle homomorphism.
So the ranks of the kernel and the image of
$n\, \circ$ are constant.
As in \eqref{nilp1},
we define
\begin{equation}\nonumber
I=\mathrm{Im}\,(n\, \circ)~, \quad
J_k=\mathrm{Ker}\,(n^k\, \circ) \quad (1\leq k\leq d)~.
\end{equation}
Let $I_0=I$, $I_k=I+J_k$.
\begin{lemma} $I,J_k,I_k$ are $\nabla$-closed and $E$-closed.
\end{lemma}
\begin{proof}
1. $I$ is $\nabla$-closed: for $x=n\circ \tilde{x}\in \Gamma(I)$
and any vector field $y$,
$$\nabla_y \,x= \nabla_y (n\circ \tilde{x})
\stackrel{\eqref{condition-nabla-n}}{=}n\circ \nabla_y \,\tilde{x} ~\in ~\Gamma(I)~.$$
2. $J_k$ is $\nabla$-closed:
$$
x\in \Gamma(J_k)~~\Leftrightarrow~~n^k\circ x=0~~\Rightarrow~~
n^k\circ \nabla_y\,x\stackrel{\eqref{condition-nabla-n}}{=}\nabla_y(n^k\circ x)=0
~~\Leftrightarrow~~\nabla_y\, x~\in~ \Gamma(J_k)~.
$$
3. $I$ is $E$-closed: for $x=n\circ \tilde{x}\in \Gamma(I)$, we have
$$
[E,x]=[E,n\circ \tilde{x}]\stackrel{\eqref{E1}}{=}
[E,n]\circ \tilde{x}+n\circ[E,\tilde{x}]+n\circ\tilde{x}
\stackrel{\eqref{condition-E-n}}{=}n\circ ([E,\tilde{x}]+\tilde{x})~\in ~\Gamma(I)~.
$$
4. $J_k$ is $E$-closed:
by \eqref{E1}, \eqref{condition-E-n} and the induction on $k$,
we can show that
\begin{equation} \label{E2}
[E,n^k]=(k-1)n^k~\qquad (1\leq k\leq d)~.
\end{equation}
We have
$$
x\in \Gamma(J_k)~~\Leftrightarrow~~n^k\circ x=0~~\Rightarrow~~
n^k\circ [E,x]\stackrel{\eqref{E1}}{=}[E,n^k\circ x]-[E,n^k]\circ x-n^k\circ x
\stackrel{\eqref{E2}}{=}
0~.
$$
\end{proof}
Define the bilinear form $(~,~)_k$ on $I_k/I_{k-1}$
by \eqref{nilp2}. Then we have a Frobenius filtration of subbundles
$(I_{\bullet},\,(~,~)_{\bullet}\,)$ on $TM$.
\begin{theorem}\label{thm:nilpotent2}
If $(\circ,\langle~,~\rangle,E)$ is a Frobenius structure on $M$
of charge $D$ with a nilpotent global vector field $n$
satisfying \eqref{condition-E-n} and \eqref{condition-nabla-n},
then
the Levi--Civita connection $\nabla$,
the Euler vector field $E$, the multiplication $\circ$
and the Frobenius filtration $(I_{\bullet},\,(~,~)_{\bullet}\,)$
(defined in Eq.\eqref{nilp1} and Definition \ref{nilp2})
form
a mixed Frobenius structure on $M$ of reference charge $D+1$.
\end{theorem}
\begin{proof}
We have to check the conditions \eqref{eta-const}, \eqref{c-symmetry},
\eqref{E-multiplication} and \eqref{E-metric}.
We first show the case $k\geq 1$.
For $x,y\in \Gamma(I_k/I_{k-1})$ $(k\geq 1)$,
take representatives $\tilde{x},\tilde{y}\in \Gamma(J_k)$.
Let $z,w $ be vector fields on $M$.
Eq. \eqref{eta-const} :
\begin{equation}\nonumber
\begin{split}
z(x,y)_k=&z\langle \tilde{x},\tilde{y}\circ n^{k-1} \rangle
\\
=&\langle \nabla_z\,\tilde{x}, \tilde{y}\circ n^{k-1} \rangle
+\langle \tilde{x},\nabla_z (\tilde{y}\circ n^{k-1})\rangle
\\
\stackrel{\eqref{condition-nabla-n}}{=}&
\langle \nabla_z\,\tilde{x}, \tilde{y}\circ n^{k-1} \rangle
+\langle \tilde{x},(\nabla_z\, \tilde{y})\circ n^{k-1}\rangle
\\
=&(\nabla_z^{(k)}\, x,y)_k+(x,\nabla_z^{(k)}\,y)_k~.
\end{split}
\end{equation}
Eq. \eqref{c-symmetry} :
\begin{equation}\nonumber
\begin{split}
(\nabla_w c_k)(x,y,z)
=&w(x,~y\circ_k \pi_k(z))_k-(\nabla_w^{(k)}\,x,~y\circ_k\pi_k(z))_k
\\
&-(x,~\pi_k(z)\circ_k \nabla_w^{(k)}\,y)_k-(x,~y\circ_k \pi_k(\nabla_w \,z))_k
\\
=&w\langle \tilde{x},~\tilde{y}\circ z \circ n^{k-1}\rangle
-\langle \nabla_w\, \tilde{x},~\tilde{y}\circ n^{k-1}\circ z \rangle
\\
&-\langle \tilde{x},~z\circ n^{k-1}\circ
\nabla_w\,\tilde{y}\rangle
-\langle \tilde{x},~\tilde{y}\circ n^{k-1}\circ \nabla_w \,z \rangle
\\
\stackrel{\eqref{condition-nabla-n}}{=}&(\nabla_w \,c)(\tilde{x},n^{k-1}\circ \tilde{y},z)\stackrel{\eqref{c1}}{=}(\nabla_z \,c)(\tilde{x},n^{k-1}\circ \tilde{y},w)
\\
=&(\nabla_z c_k)(x,y,w)~.
\end{split}
\end{equation}
Eq. \eqref{E-multiplication}: by \eqref{E1}, we have
\begin{equation}\nonumber
[E,\tilde{x}\circ z]-\tilde{x}\circ [E,z]
-[E,\tilde{x}]\circ z=\tilde{x}\circ z~.
\end{equation}
Applying the projection $\pi_k: TM\to TM/I_{k-1}$ to the both sides,
we obtain \eqref{E-multiplication}.
Eq. \eqref{E-metric} :
\begin{equation}\nonumber
\begin{split}
&E\langle \tilde{x},\tilde{y}\circ n^{k-1} \rangle-
\langle\, [E,\tilde{x}],\tilde{y}\circ n^{k-1}\rangle-
\langle x,[E,\tilde{y}]\circ n^{k-1}
\rangle
\\ \stackrel{\eqref{E1}}{=}&
E\langle \tilde{x},\tilde{y}\circ n^{k-1} \rangle-
\langle\, [E,\tilde{x}],\tilde{y}\circ n^{k-1}\rangle-
\langle \tilde{x},\,[E,n^{k-1}\circ \tilde{y}]-\tilde{y}\circ [E,n^{k-1}]
-\tilde{y}\circ n^{k-1}\rangle
\\
\stackrel{\eqref{E3}}{=}
&(2-D)\langle \tilde{x},\tilde{y}\circ n^{k-1}\rangle
+\langle \tilde{x},\tilde{y}\circ [E,n^{k-1}]\rangle
+\langle \tilde{x},\tilde{y}\circ n^{k-1}\rangle
\\
\stackrel{\eqref{E2}}{=}&
(1-D+k)\langle \tilde{x},\tilde{y}\circ n^{k-1}\rangle~.
\end{split}
\end{equation} This implies
\begin{equation}\nonumber
E(x,y)_k-
(\, [E,x]_k,y)_k-
( x,[E,y]_k)_k=(1-D+k)(x,y)_k~.
\end{equation}
Next we show the case $k=0$. Let $x=n\circ\tilde{x},~y\in \Gamma(I)$.
Recall that $(x,y)_0=\langle \tilde{x},y\rangle$.
Eq. \eqref{eta-const} : since $\nabla_z\, x=\nabla_z (n\circ \tilde{x})=n\circ \nabla_z\, \tilde{x} $ by \eqref{condition-nabla-n},
we have
\begin{equation}\nonumber
(\nabla^{(0)}_z\, x, y)_0+(x,\nabla^{(0)}_z \,y)_0
=\langle \nabla_z \,\tilde{x},y \rangle
+\langle \tilde{x},\nabla_z\,y\rangle
=z\langle \tilde{x},y\rangle=
z(x,y)_0~.
\end{equation}
Eq. \eqref{c-symmetry} : since $\nabla_z\, x=n\circ \nabla_z\, \tilde{x} $,
\begin{equation}\nonumber
\begin{split}
(\nabla_w c_0)(x,y,z)
=&w(x,~y\circ z)_0-(\nabla^{(0)}_w\,x,~y\circ z)_0
-(x, ~z\circ \nabla^{(0)}_w\,y)_0-(x,~y\circ \nabla^{(0)}_w \,z)_0
\\
\stackrel{\eqref{condition-nabla-n}}{=}
&w\langle \tilde{x}, ~y\circ z \rangle
-\langle \nabla_w\, \tilde{x}, ~y\circ z\rangle
-\langle \tilde{x},~z\circ \nabla_w\,y\rangle
-\langle \tilde{x},~y\circ \nabla_w \,z\rangle
\\
=&(\nabla_w \,c)(\tilde{x},y,z)
\stackrel{\eqref{c1}}{=}(\nabla_z \,c)(\tilde{x},y,w)
\\
=&(\nabla_z\, c_0)(x,y,z)~.
\end{split}
\end{equation}
Eq. \eqref{E-multiplication} follows immediately from \eqref{E1}.
Eq. \eqref{E-metric} :
notice that
$[E,x]=n\circ ([E,\tilde{x}]+\tilde{x})$ holds by \eqref{E1}.
Therefore
\begin{equation}\nonumber
\begin{split}
E( x,y)_0-( [E,x],y)_0-
( x,\,[E,y])_0
=&E\langle \tilde{x},y\rangle-\langle [E,\tilde{x}]+\tilde{x} ,y\rangle
-\langle \tilde{x},\,[E,y]\rangle
\\
\stackrel{\eqref{E3}}{=}&(2-D)\langle \tilde{x},y\rangle
-\langle \tilde{x},y\rangle
\\=&(1-D)( x,y)_0~.
\end{split}
\end{equation}
\end{proof}
\subsection{Quotient construction}
Next we apply Lemma \ref{lem:quotient-construction2} to the
mixed Frobenius structure obtained in Theorem \ref{thm:nilpotent2}
with $k=0$.
\begin{corollary}\label{quotient-construction2}
If $(\circ,\langle~,~\rangle,E)$ is a Frobenius structure on $M$
of charge $D$ with a nilpotent global vector field $n$
satisfying \eqref{condition-E-n} and \eqref{condition-nabla-n},
then
$(\nabla^{(1)},~E^{(1)},~\circ_1, ~I_{\bullet}/I_0,~(~,~)_{\bullet})$ is a
mixed Frobenius structure of reference charge $D+1$ on $M^{(1)}$.
\end{corollary}
Examples of this construction can be found in \S \ref{example-toricsurface} and \S \ref{example-localP3}.
\section{Local quantum cohomology of weak Fano toric surfaces}
\label{example-toricsurface}
Let $S$ be a weak Fano toric surface.
We define the local quantum cup product
on the cohomology $H^*(S,\mathbb{C})$
using genus zero
local Gromov--Witten invariants
and construct
a mixed Frobenius structure.
First we recall basic facts about the quantum cohomology
in \S \ref{QH}.
Then we state the results in \S \ref{toric-results}.
In \S \ref{H-V}
and \S \ref{Proof-prop-toric3}
we explain that they can be obtained
from the quantum cohomology of
the projective compactification $V$ of
the canonical bundle $K_S$.
\subsection{Quantum cohomology}
\label{QH}
Let $V$ be a nonsingular projective variety.
$\overline{M}_{g,n}(V,\beta)$
denotes the moduli stack of genus $g$, $n$-pointed stable maps
to $V$ of degree $\beta\in H_2(V,\mathbb{Z})$.
Its virtual dimension is
\begin{equation}\label{virt-dim}
(1-g)(\dim V-3)-\int_{\beta}c_1(K_V)+n~,
\end{equation}
where $K_V$ is the canonical bundle of $V$.
Let
$ev_i:\overline{M}_{g,n}(V,\beta)\to V$ be the evaluation map
at the $i$-th marked point.
Fix a basis of the even part $H^{\rm even}(V,\mathbb{Q})$
of the cohomology $H^*(V,\mathbb{Q})$:
\begin{equation}\label{basis-X}
\Gamma_0=1~,~\underbrace{ \Gamma_1,\ldots,\Gamma_r}_{H^2(V)}~,
~\underbrace{\Gamma_{r+1},\ldots,\Gamma_{s}}_{H^{\geq 4}(V)}~.
\end{equation}
The dual basis with respect to the intersection form
is denoted $\{\Gamma_i^{\vee}\}$, i.e.
$$
\int_V \Gamma_i\cup \Gamma_j^{\vee} =\delta_{i,j}~.
$$
Let $t^0,\ldots,t^s$ be the coordinates of
$H^{\rm even}(V,\mathbb{Q})$ associated to
\eqref{basis-X}.
\begin{definition}
The genus zero Gromov--Witten potential of $V$ is defined by
\begin{equation}\label{GW-potential}
\begin{split}
\Phi(t\,;q)&=
\sum_{n=0}^{\infty}\sum_{\beta\in H_2(V,\mathbb{Z})}
\frac{1}{n!}
\Big(\int_{[\overline{M}_{0,n}(V,\beta)]^{vir}}
\prod_{i=1}^n\, ev_i^* \mathbf{t}~\Big) q^{\beta},
\\
\mathbf{t}&=\sum_{i=0}^s t^i\Gamma_i~.
\end{split}
\end{equation}
Here $q$ is the parameter associated to $H_2(V,\mathbb{Z})$
and $[\overline{M}_{0,n}(V,\beta)]^{vir}$ is the virtual fundamental class.
\end{definition}
Recall that
the contribution $\Phi_{cl}$ from $\beta=0$ in $\Phi(t\,;q)$
is given by the triple intersection because of the point mapping axiom
(see \cite[\S 2]{KM}, also \cite[Chapter 8]{Cox-Katz}, for axioms of Gromov--Witten invariants):
\begin{equation}
\Phi_{cl}=\sum_{i,j,k=0}^s \frac{t^it^jt^k}{3!}\,\int_V
\Gamma_i\cup \Gamma_j\cup\Gamma_k~.
\label{GW-classical}
\end{equation}
\begin{definition}
The quantum cup product $\circ_t$ is defined by
\begin{equation}\label{quantum-cup}
\Gamma_i\circ_t\Gamma_j=\sum_{k=0}^s
\frac{\partial^3 \Phi}{\partial t^i\partial t^j\partial t^k}\,\Gamma_k^{\vee}~
\quad (0\leq i,j\leq s)~.
\end{equation}
We call $(H^{\rm even}(V),\circ_t)$ the quantum cohomology of $V$.\footnote{
Usually the quantum cohomology refers to a superalgebra structure
on $H^*(V)$
and the quantum cohomology considered here is
its subalgebra.
For our purpose, this is sufficient
because $H^*(V)=H^{\rm even}(V)$ for
nonsingular toric varieties $V$
which we deal with in \S \ref{example-toricsurface},
\S \ref{example-localP3}.
}
\end{definition}
The intersection form $\langle~,~\rangle$ is defined by
\begin{equation}\label{X-intersection}
\langle \Gamma_i,\Gamma_j\rangle=\int_V \Gamma_i\cup \Gamma_j~.
\end{equation}
Then $(H^{\rm even}(V),\circ_t,\langle~,~\rangle)$ is a Frobenius algebra.
Define the vector field $E$ on $M=H^*(V,\mathbb{C})$ by
\begin{equation}\label{QH-Euler}
E=\sum_{i=0}^s \frac{2-\mathrm{deg}\Gamma_i}{2}\, t^i
\frac{\partial}{\partial t^i}
+\sum_{i=1}^r
\xi_i \frac{\partial}{\partial t^i}~.
\end{equation}
Here the numbers $\xi_i$ are coefficients of $\Gamma_i$ in $-c_1(K_V)$:
$$
-c_1(K_V)=\sum_{i=1}^r \xi_i\Gamma_i~.
$$
\begin{theorem} \label{QH-Frob-str}
$(\circ,\langle~,~\rangle,E)$ defined in \eqref{quantum-cup},
\eqref{X-intersection} and \eqref{QH-Euler} is a Frobenius structure on
$M=H^{\mathrm{even}}(V,\mathbb{C})$
of charge $\dim V$
(\cite{KM}, see also \cite{Manin}).
\end{theorem}
\begin{remark}\label{rem:conv}
When $V$ is a smooth projective toric variety,
the convergence of the Gromov--Witten potential \eqref{GW-potential}
was proved by Iritani \cite[Theorem 1.3]{Iritani}. We only deal
with such cases in \S \ref{example-toricsurface} and
\S \ref{example-localP3}.
\end{remark}
\subsection{Results}
\label{toric-results}
Let $S$ be a weak Fano toric surface.
Let $\gamma_{r+1}\in H^4(S,\mathbb{Z})$ be the Poincar\'e dual of
the point class and $\gamma_0=1\in H^0(S)$.
Let $\gamma_1,\ldots,\gamma_r$ be a basis of $H^2(S,\mathbb{Z})$.
Define the integers $c_{ij},b_i,b_{i}^{\vee},\kappa$ ($1\leq i,j\leq r$) by
\begin{equation}\begin{split}\nonumber
&c_{ij}=\int_S \gamma_i\cup\gamma_j~,\quad
-c_1(K_S)=\sum_{i=1}^r b_i\gamma_i~,\\
&b_i^{\vee}=\sum_{j=1}^rb_jc_{ji}~,\quad
\kappa=\int_S c_1(K_S)^2=\sum_{i,j=1}^r b_ib_jc_{ij}=\sum_{i=1}^r b_ib_i^{\vee}~,
\end{split}
\end{equation}
where $c_1(K_S)$ is the first Chern class of the canonical bundle $K_S$.
Notice that the matrix $(c_{ij})$ is invertible.
Notice also that $\kappa> 0$ holds for weak Fano toric surfaces.
Consider the filtration\footnote{When $S$ is Fano, \eqref{toric-filtration} and \eqref{toric-bilinear}
agree with those in Example \ref{example-surface}
with $n=K_S$ if the filtration is shifted by one.}
on $H^*(S,\mathbb{C})$ by subspaces
\begin{equation}\label{toric-filtration}
0= I_{0}\subset I_1=\mathbb{C}\,c_1(K_S)\oplus H^4(S)\subset I_2
=H^2(S)\oplus H^4(S)=I_3
\subset I_4=H^*(S)~,
\end{equation}
and the following bilinear forms on $I_k/I_{k-1}$:
\begin{equation}\label{toric-bilinear}
\begin{split}
&( c_1(K_S),\gamma_{r+1})_1=1~,\quad
( c_1(K_S),c_1(K_S))_1=( \gamma_{r+1},\gamma_{r+1})_1=0~,\\
&( \gamma_i,\gamma_j)_2=c_{ij}-\frac{b_i^{\vee}b_j^{\vee}}{\kappa}
\quad (1\leq i,j\leq r)~,\\
&(\gamma_0,\gamma_0)_4=\kappa~.
\end{split}
\end{equation}
Next we define the local quantum cup product.
Let $\overline{M}_{g,n}(S,\beta)$
be the moduli stack of $n$-pointed genus $g$ stable maps to $S$
of degree $\beta\in H_2(S,\mathbb{Z})$,
$ev_i:\overline{M}_{g,n}(S,\beta)\to S$ be the evaluation map
at the $i$-th marked point,
$\mu:\overline{M}_{g,1}(S,\beta)\to \overline{M}_{g,0}(S,\beta)$
be the forgetful map.
\begin{definition}
For an effective class $\beta\in H_2(S,\mathbb{Z})$ satisfying
$-\int_\beta c_1(K_S)> 0$,
we define
\begin{equation}\label{local-GW}
N_{\beta}=\int_{[\overline{M}_{0,0}(S,\beta)]^{vir}} e(R^1\mu_*ev_1^*K_S)~.
\end{equation}
Here $e$ stands for the Euler class.
For other $\beta\in H_2(S,\mathbb{Z})$, we just set $N_{\beta}=0$.
We call $N_{\beta}$ the genus zero local Gromov--Witten invariants
of degree $\beta$ of $S$.
\end{definition}
These numbers can be found in \cite{CKYZ} for some $S$.
Let $\{C_1,\ldots,C_r\}$ be the basis of $H_2(S,\mathbb{Z})$
dual to $\gamma_1,\ldots,\gamma_r$.
\begin{definition}
The local quantum cup product on $H^*(S,\mathbb{C})$ is
the following family of multiplications $\circ_t$
parameterized by $t=(t^1,\ldots,t^r)$
:
\begin{equation}\label{toric-multiplication}
\begin{split}
&\gamma_0\circ_t \gamma_i=\gamma_i~,\quad
\gamma_0\circ_t \gamma_{r+1}=\gamma_{r+1}~,\quad
\gamma_i\circ_t \gamma_{r+1}=\gamma_{r+1}\circ_t \gamma_{r+1}=0~,
\\
&\gamma_i\circ_t \gamma_j=\Big(c_{ij}-
\sum_{\begin{subarray}{c}
\beta=\sum_i \beta_iC_i\\
\end{subarray}}
\beta_i\,\beta_j (b\cdot \beta)\,N_{\beta}~
e^{\beta\cdot t}\Big)\, \gamma_{r+1}~
\quad (1\leq i,j\leq r)~,
\end{split}
\end{equation}
where $\beta\cdot t=\sum_{i=1}^r \beta_i t^i$
and $b\cdot \beta= \sum_{i=1}^r b_i \beta_i$.
\end{definition}
\begin{remark}
Although we defined the local quantum cup product by explicit formulas,
it is possible to define it in a uniform manner. One way is to use equivariant
localization with respect to the $\mathbb{C}^*$-action rotating the fiber of the
canonical bundle. See e.g. \cite{KonishiMinabe14}. Another way is to use the fact
that the evaluation map is proper in all the example in this paper. See
\cite[\S1.4]{BryanGraber}. The formulas given in \eqref{toric-multiplication}
coincide with these general definitions. The authors thank the referee for pointing out this fact.
\end{remark}
Let $M=H^*(S,\mathbb{C})$ and $t^0,t^1,\ldots,t^{r+1}$
be the coordinates associated to the basis
$\gamma_0,\gamma_1,\ldots,\gamma_{r+1}$.
Define a vector field $E$ on $M$ by
$$
E=t^0\frac{\partial}{\partial t^0}-t^{r+1}\frac{\partial}{\partial t^{r+1}}~.
$$
Regard the multiplication \eqref{toric-multiplication},
the filtration \eqref{toric-filtration}
and the bilinear forms \eqref{toric-bilinear}
on those on the tangent space $T_t M$ by
identifying $T_tM$ with $H^*(S,\mathbb{C})$.
\begin{theorem}\label{toric3}
The trivial connection, the above $E$,
the multiplication \eqref{toric-multiplication}
and the Frobenius filtration \eqref{toric-filtration}
and \eqref{toric-bilinear}
form a mixed Frobenius structure on $M=H^*(S,\mathbb{C})$
of reference charge four.
\end{theorem}
The proof of
Theorem \ref{toric3}
will be given in \S \ref{Proof-prop-toric3},
by applying the quotient construction (Corollary \ref{quotient-construction2})
to the quantum cohomology ring of the projective compactification
of $K_S$.
\begin{remark}\label{rem:localB}
In this example, the operator
$\mathcal{V}=\nabla E-\frac{2-D}{2}=-\frac{\text{deg}}{2}+2$
is diagonalizable and eigenvalues are integers.
Therefore other than the Frobenius filtration $I_{\bullet}$,
we can consider the decreasing filtration $\mathcal{F}^{\bullet}$
on $H^*(S,\mathbb{C})$
defined by
\begin{equation}\nonumber
\mathcal{F}^{p}=\bigoplus_{p'\geq p}
\text{ the eigenspace of $\mathcal{V}$ with eigenvalue $p'$}~.
\end{equation}
This $\mathcal{F}^{\bullet}$ is the same as the one in \S \ref{sec:MHS}.
Moreover $I_k~(\text{here})=\mathcal{W}_k\otimes \mathbb{C}$
with $n=c_1(K_S)$.
They agree with the Hodge filtration and the weight filtration
of the mixed Hodge structure
for the corresponding local B-model
under an appropriate vector space isomorphism
(see \cite[Theorem 4.2]{KonishiMinabe08} and references therein).
\end{remark}
\subsection{Quantum cohomology of
$V=\mathbb{P}(\mathcal{O}_S\oplus K_S)$}\label{H-V}
Let $V=\mathbb{P}(\mathcal{O}_S\oplus K_S)$ be the projective
compactification of the canonical bundle $K_S$ of
a weak Fano toric surface $S$ and
$\mathrm{pr}:V\to S$ be the projection.
Let $\Gamma_i=\mathrm{pr}^*\gamma_i$ ($0\leq i\leq r+1$)
and let $\Delta_0$ be the Poincar\'e dual of
the infinity section.
Set $\Delta_i=\Gamma_i\cup \Delta_0$ ($1\leq i\leq r$).
We have the following basis of $H^*(V,\mathbb{C})$:
\begin{equation}\label{V-basis}
\underbrace{\Gamma_0}_{H^0(V)}\quad
\underbrace{\Gamma_1,~\ldots,\Gamma_r,~\Delta_0}_{H^2(V)}\quad
\underbrace{\Gamma_{r+1},~\Delta_1,\ldots,~\Delta_r}_{H^4(V)}\quad
\underbrace{\Delta_{r+1}}_{H^6(V)}~.
\end{equation}
Let $t_i,s_i$ ($0\leq i\leq r+1$) be the coordinates
of $H^{*}(V,\mathbb{C})$ associated to this basis.
The cup product $\cup$ can be calculated by the intersection theory of
toric varieties. The unit is $\Gamma_0$,
$\Gamma_i\cup \Delta_0=\Delta_i$ by definition ($1\leq i\leq r+1$)
and
\begin{equation}\label{cup-V}
\begin{split}
&\Gamma_i\cup\Gamma_j=c_{ij}\Gamma_{r+1}~,\quad
\Gamma_i\cup\Gamma_{r+1}=0~,\quad
\Gamma_i\cup\Delta_j=c_{ij}\,\Delta_{r+1},
\\
&\Delta_0^2=\sum_{i=1}^r b_i \Delta_i~,\quad
\Delta_0\cup \Delta_j=b_j^{\vee}\,\Delta_{r+1}~
\quad (1\leq i,j\leq r).
\end{split}
\end{equation}
Other products vanish by the degree reason.
The intersection form $\langle~,~\rangle$ can be
obtained from $\int_V \Delta_{r+1}=1$ and the above cup product.
Explicitly pairings which do not vanish are:
\begin{equation}\label{intersection-V}
\begin{split}
&\langle \Gamma_0,\Delta_{r+1}\rangle=1~, \quad
\langle \Delta_0,\Gamma_{r+1}\rangle=1~,\\
&\langle \Gamma_i,\Delta_j\rangle =c_{ij}~,\quad
\langle \Delta_0,\Delta_j\rangle =b_j^{\vee}~\quad (1\leq i,j\leq r)~.
\end{split}
\end{equation}
Let $A=(a_{ij})$ be the inverse of the matrix $(c_{ij})_{1\leq i,j\leq r}$.
The dual basis
of \eqref{V-basis} is given by the following.
\begin{equation}
\begin{split}
&\Gamma_0^{\vee}=\Delta_{r+1}~,\quad
\Gamma_{i}^{\vee}=\sum_{j=1}^ra_{ij}\Delta_j-b_i \Gamma_{r+1}~,
\quad
\Delta_0^{\vee}=\Gamma_{r+1}~,
\\
&\Gamma_{r+1}^{\vee}=\Delta_0-\sum_{k=1}^r b_k\Gamma_k~,
\quad
\Delta_i^{\vee}=\sum_{j=1}^r a_{ij}\Gamma_j~,
\quad
\Delta_{r+1}^{\vee}=\Gamma_0~.
\end{split}
\end{equation}
Now consider the Gromov--Witten potential $\Phi(t,s\,;q)$
(see \eqref{GW-potential})
and the quantum cup product \eqref{quantum-cup} of $V$.
To be concrete, let us fix a basis of $H_2(V,\mathbb{Z})$
as follows.
Let
$\iota:S\rightarrow V$ be the inclusion as the zero
section of $K_S\subset V$ and
$C_i'=\iota_*C_i$ ($1\leq i\leq r$).
Let $C_{0}'$ be the fiber class of $\mathrm{pr}:V\to S$.
Then $\{C_0',C_1',\ldots,C_r'\}$ is a base of $H_2(V,\mathbb{Z})$
which is dual to the basis $\{\Delta_0,\Gamma_1,\ldots,\Gamma_{r}\}$
of $H^2(V,\mathbb{Z})$.
Let $q_i$ ($0\leq i\leq r$) be parameters
associated to $C_0',C_1',\ldots,C_{r}'$.
The parameter $q^{\beta}$ in \eqref{GW-potential} is written as
$$
q^{\beta}=\prod_{i=0}^r q_i^{\beta_i} \quad\text{ for }\quad
\beta=\sum_{i=0}^{r}\beta_iC_{i}'~.$$
\begin{lemma}\label{GW2}
We have
\begin{equation}
\begin{split}
\Phi(t,s\,;q)&=\Phi_{cl}+\Phi_0+\Phi_1~,\\
\Phi_{cl}
&=\frac{1}{2}\,t_0^2 s_{r+1}
+t_0\Big(\sum_{i,j=1}^r c_{ij}t_i s_j
+t_{r+1} s_0+\sum_{i=1}^r b_i^{\vee}s_0 s_i
\Big)
\\
&+\frac{1}{2}\sum_{i,j=1}^r c_{ij}t_it_js_0
+\frac{1}{2}\sum_{i=1}^{r}b_i^{\vee}t_i\, s_0^2~,
\\
\Phi_0&=
\sum_{\begin{subarray}{c}\beta=\beta_1C_1'+\cdots+\beta_r C_r'\neq 0
\end{subarray}}
N_{\beta}^V\,e^{\beta\cdot t} q^{\beta}~,\qquad
\mathrm{where}\quad N_{\beta}^V=\int_{[\overline{M}_{0,0}(V,\beta)]^{vir}}\,1,
\\
\Phi_1&= \mathcal{O}(q_{0})~.
\end{split}
\end{equation}
\end{lemma}
\begin{proof}
We decompose the Gromov--Witten potential $\Phi$ into
three parts: $\Phi_{cl}$ which is the contribution of $\beta=0$,
$\Phi_0$ which is the contribution of homology classes
$\beta\neq 0$ with $\beta_0=0$, and the remaining part $\Phi_{1}$
which is the contribution of $\beta$ with $\beta_0\neq 0$.
$\Phi_{cl}$ can be computed by \eqref{GW-classical}.
In $\Phi_1$, the terms with $\beta_0<0$ vanish because
such $\beta$ are not effective and the moduli stack
$\overline{M}_{0,n}(V,\beta)$ is empty.
Therefore $\Phi_1=\mathcal{O}(q_0)$.
Notice that since $-c_1(K_V)=2\Delta_0$,
the virtual dimension \eqref{virt-dim} of $\overline{M}_{0,n}(V,\beta)
$ is $2\beta_0+n$.
If $\beta_0=0$,
\begin{equation}\nonumber
\begin{split}
\int_{[\overline{M}_{0,n}(V,\beta)]^{vir}}
\prod_{i=1}^n \,ev^*_i\mathbf{t}
&=\int_{[\overline{M}_{0,n}(V,\beta)]^{vir}}
\prod_{i=1}^n \,ev^*_i\mathbf{t'}~
\quad\Big(\mathbf{t'}=\sum_{i=1}^r t_i\Gamma_i +s_0\Delta_0\Big)
\\
&=\Big(\sum_{i=1}^r \beta_it^i\Big)^n
\int_{[\overline{M}_{0,0}(V,\beta)]^{vir}}\,1~.
\end{split}
\end{equation}
Here
the first equality follows from the degree consideration and
the fundamental class axiom,
the second equality follows from the divisor axiom.
This proves the equation for $\Phi_0$.
\end{proof}
We consider
the specialization $q_{0}=0$, and set $q_1=\cdots=q_r=1$.\footnote{
Since $q_i$ ($1\leq i\leq r$)
appears in $\Phi_0$ always in the combination $e^{t^i}q_i$,
one can always recover $q_1,\ldots,q_r$ in $\Phi_0$.
}
Then by Lemma \ref{GW2},
we see that the quantum cup product \eqref{quantum-cup} reduces to
\begin{equation}\label{quantum-cup-V}
\begin{split}
\Gamma_i\circ_t\Gamma_j&= c_{ij}\Gamma_{r+1}
+\sum_{k=1}^r
\Big(\sum_{\begin{subarray}{c}
\beta=\beta_1C_1+\cdots+\beta_r C_r\\
\beta\neq 0\end{subarray}} \beta_i\,\beta_j\,\beta_k\, N^V_{\beta} \,
e^{\beta\cdot t}\Big)\,\Gamma_k^{\vee}
\quad (1\leq i,j\leq r)~,
\\
\Delta_i\circ_t *&=\Delta_i\cup *~.
\end{split}
\end{equation}
\begin{lemma}\label{FS-V}
The multiplication \eqref{quantum-cup-V},
the intersection form \eqref{intersection-V}
(regarded as the multiplication on $T_tH^*(V,\mathbb{C})$
and the metric by the canonical isomorphism $H^*(V)\stackrel{\sim}{\rightarrow}T_tH^*(V)$)and
the vector field
\begin{equation}\label{Euler-SV}
E=t^0\frac{\partial}{\partial t^0}+2\frac{\partial}{\partial s^0}
-t^{r+1}\frac{\partial}{\partial t^{r+1}}
-\sum_{i=1}^r s^i\frac{\partial}{\partial s^i}
-2s^{r+1}\frac{\partial}{\partial s^{r+1}}
\end{equation}
form a Frobenius structure of charge three
on $\tilde{M}=H^{*}(V,\mathbb{C})$.
\end{lemma}
\begin{proof}
The quantum cup product on $V$
is a power series in
$t^0, e^{t^i}q_i (1\leq i\leq r)$, $e^{s^0}q_0,$
$t^{r+1}$, $s^i(1\leq i\leq r+1)$ and it is convergent \cite{Iritani}.
By Lemma \ref{GW2}, it becomes as follows.
\begin{equation}\nonumber
\begin{split}
\Gamma_i\circ_t\Gamma_j&= c_{ij}\Gamma_{r+1}
+\sum_{k=1}^r
\Big(\sum_{\begin{subarray}{c}
\beta=\beta_1C_1+\cdots+\beta_r C_r\\
\beta\neq 0\end{subarray}} \beta_i\,\beta_j\,\beta_k\, N^V_{\beta} \,
e^{\beta\cdot t}\Big)\,\Gamma_k^{\vee}+\mathcal{O}(e^{s^0}q_0)
\quad (1\leq i,j\leq r)~,
\\
\Delta_i\circ_t *&=\Delta_i\cup *~+\mathcal{O}(e^{s^0}q_0).
\end{split}\end{equation}
The above multiplication \eqref{quantum-cup-V} is
just the terms of degree zero in $e^{s^0}q_0$ of this product.
Therefore the associativity and the commutativity of \eqref{quantum-cup-V}
follow from those of this quantum cup product.
The Euler vector field \eqref{QH-Euler} for the quantum cohomology of $V$
is given by \eqref{Euler-SV} since $-c_1(K_V)=2\Delta_0$.
The compatibility \eqref{E1} with the multiplication \eqref{quantum-cup-V}
follows from that of the quantum cohomology.
The symmetry \eqref{c1} of $\nabla c$ holds because of the same reason.
\end{proof}
\subsection{Proof of Theorem \ref{toric3}}
\label{Proof-prop-toric3}
We first apply
Theorem \ref{thm:nilpotent2}
to the Frobenius structure on $\tilde{M}=H^{*}(V,\mathbb{C})$
obtained in Lemma \ref{FS-V}
with the nilpotent vector field $n=\frac{\partial}{\partial s^0}$
which satisfies the conditions \eqref{condition-E-n}
and \eqref{condition-nabla-n}.
Let us construct the Frobenius filtration.
Using the canonical isomorphism $T_t\tilde{M}\cong H^*(V,\mathbb{C})$,
we write it down as that of $H^*(V,\mathbb{C})$.
Let $I$ be the ideal generated by $\Delta_0$:
\begin{equation}\nonumber
I=\mathbb{C} \Delta_0\oplus \bigoplus_{i=1}^{r}\mathbb{C}\Delta_i
\oplus \mathbb{C}\Delta_{r+1}~.
\end{equation}
Then following the construction in \S \ref{nilpotent-construction}, we compute
$J_k=\mathrm{Ker}\,\Delta_0^k$:
\begin{equation}\nonumber
\begin{split}
J_1&=\mathbb{C}\Gamma_{r+1}^{\vee}\oplus
\bigoplus_{i=1}^{r}\mathbb{C}\Gamma_i^{\vee}
\oplus \mathbb{C}\Delta_{r+1}~,
\\
J_2&=\bigoplus_{i=1}^r \mathbb{C}(\Gamma_i-\frac{b_i^{\vee}}{\kappa}\Delta_0)
\oplus \mathbb{C}\Gamma_{r+1}\oplus
\bigoplus_{i=1}^{r}\mathbb{C}\Delta_i\oplus \mathbb{C}\Delta_{r+1}
~,
\\
J_3&=\bigoplus_{i=1}^{r} \mathbb{C}\Gamma_i\oplus \mathbb{C}\Delta_0
\oplus \mathbb{C}\Gamma_{r+1}
\bigoplus_{i=1}^{r}\mathbb{C}\Delta_i\oplus
\mathbb{C}\Delta_{r+1}=H^{\geq 2}(V)\supset I~,
\\
J_4&=\mathbb{C} \Gamma_0\oplus J_3=H^*(V)~.
\end{split}
\end{equation}
So $I_k=I+J_k$ ($k=1,2,3,4$) are
\begin{equation}\label{toric-filtration2}
\begin{split}
I_0&=I~,\\
I_1&=\mathbb{C}\Gamma_{r+1}^{\vee}\oplus \mathbb{C}\Delta_0
\oplus \mathbb{C}\Gamma_{r+1}
\oplus
\bigoplus_{i=1}^r \mathbb{C}\Delta_i\oplus \mathbb{C}\Delta_{r+1},\\
I_2&=I_3=H^{\geq 2}(V)~,\\
I_4&=H^*(V)~.
\end{split}
\end{equation}
The induced bilinear forms $(~,~)_k$ on $I_k/I_{k-1}$ are as follows.
\begin{equation}\label{toric-bilinear2}
\begin{split}
k=0\quad&
(\Delta_0,\Delta_{r+1})_0=\langle \Delta_0,\Gamma_{r+1}\rangle=1~,\\
&(\Delta_i,\Delta_j)_0=\langle\Delta_i,\Gamma_j\rangle=c_{ij}\quad (1\leq i,j\leq r)~,
\\
k=1\quad &
([\Gamma_{r+1}^{\vee}],[\Gamma_{r+1}])_1
=\langle \Gamma_{r+1}^{\vee},\Gamma_{r+1}\rangle
=1~,
\\
&([\Gamma_{r+1}^{\vee}],[\Gamma_{r+1}^{\vee}])_1=
([\Gamma_{r+1}],[\Gamma_{r+1}])_1=0~,
\\
k=2\quad &
([\Gamma_i],[\Gamma_j])_2
=
\Big\langle \Big(\Gamma_i-\frac{b_i^{\vee}}{\kappa}\Delta_0\Big)\circ_t
\Big(\Gamma_j-\frac{b_j^{\vee}}{\kappa}\Delta_0\big),~ \Delta_0 \Big\rangle
\\&\qquad\quad\quad=c_{ij}-\frac{b_i^{\vee}b_j^{\vee}}{\kappa}~\quad (1\leq i,j\leq r),
\\
k=4\quad&
( [\Gamma_0],[\Gamma_0])_4=\langle \Gamma_0\cup \Gamma_0, \Delta_0^3\rangle
=\kappa~.
\end{split}
\end{equation}
Thus by Theorem \ref{thm:nilpotent2}, we have the following lemma.
\begin{lemma}\label{toric4}
The trivial connection, the vector field $E$ \eqref{Euler-SV},
the multiplication \eqref{quantum-cup-V},
the filtration \eqref{toric-filtration2}
and the bilinear forms \eqref{toric-bilinear2}
form a MFS of reference charge four on $\tilde{M}=H^*(V,\mathbb{C})$~.
\end{lemma}
Next we apply Corollary \ref{quotient-construction2}.
Since
$I$ is the kernel of
the pullback
$\iota^*: H^*(V,\mathbb{C})\to H^*(S,\mathbb{C})$
by the inclusion $\iota:S\hookrightarrow K_S\subset V$,
if we set $$
\tilde{M}^{(1)}=\{s^0=s^1=\cdots=s^{r+1}=0\}\subset \tilde{M}~,
$$
then it is naturally isomorphic to $H^*(S,\mathbb{C})$.
Theorem \ref{toric3} follows from
Corollary \ref{quotient-construction2} and Lemma \ref{FS-V}
together with the fact that $N^V_{\iota_*\beta}=N_{\beta}$
for $\beta \in H_2(S, \mathbb{Z})$ (see e.g. \cite[Proposition 2.2]{KonishiMinabe}).
\begin{remark}
It is not difficult to see that the argument in \S \ref{H-V} and \S \ref{Proof-prop-toric3} shows that the same
result as in Theorem \ref{toric3} holds for any smooth projective surface $S$ with nef
anticanonical bundle. (For such an $S$, the quantum cup products \eqref{toric-multiplication}
and \eqref {quantum-cup-V} make sense without any change as formal power series.)
So we have a formal MFS on the even part of
$H^*(S, \mathbb{C})$. If $S$ is toric, then we have a genuine
MFS since the quantum cup product is known to be convergent (cf. Remark \ref{rem:conv}).
This is the only point where we used the toric assumption on $S$.
\end{remark}
\section{Local quantum cohomology of $\mathbb{P}^3$}
\label{example-localP3}
In this section,
we construct a mixed Frobenius structure on the cohomology of the
projective space $\mathbb{P}^3$
similar to the one in \S \ref{example-toricsurface}.
\subsection{Results}
Take the following basis of the cohomology $H^*(\mathbb{P}^3,\mathbb{C})$:
$$
\gamma_0=1~,\quad
\gamma_1=c_1(\mathcal{O}_{\mathbb{P}^3}(1))~,\quad
\gamma_2=\gamma_1\cup\gamma_1~,\quad
\gamma_3=\gamma_1\cup \gamma_2~.
$$
Let $t^0,t^1,t^2,t^3$ be the associated coordinates.
We identify $H_2(\mathbb{P}^3,\mathbb{Z})$ with $\mathbb{Z}$
by $\beta \mapsto \int_\beta \gamma_1$.
We consider the following filtration\footnote{\eqref{P3-filtration} and \eqref{P3-bilinear}
agree with those in Example \ref{example-Pn} with $m=-4$
if the filtration is shifted by one.} on $H^*(\mathbb{P}^3)$.
\begin{equation}\label{P3-filtration}
\begin{split}
I_0&=0~,\\
I_1&=\cdots=I_{4}=H^{\geq 2}(\mathbb{P}^3)~,
\\
I_{5}&=H^*(\mathbb{P}^3)~.
\end{split}
\end{equation}
On the graded quotients, we consider the bilinear forms:
\begin{equation}\label{P3-bilinear}
\begin{split}
I_1/I_0~:~&\quad (\gamma_k,\gamma_l)_1=\begin{cases}
-\frac{1}{4}&(k+l=4)\\
0&(k+l\neq 4)~
\end{cases}~,
\\
I_{5}/I_{4}~:~&\quad
(1,1)_{5}=4^3~.
\end{split}
\end{equation}
Next we define the local quantum cup product.
Let $\overline{M}_{g,n}(\mathbb{P}^3,\beta)$
be the moduli stack of $n$-pointed genus $g$ stable maps to $\mathbb{P}^3$
of degree $\beta\in H_2(\mathbb{P}^3,\mathbb{Z})\cong \mathbb{Z}$,
$ev_i:\overline{M}_{g,n}(\mathbb{P}^3,\beta)\to \mathbb{P}^3$
be the evaluation map
at the $i$-th marked point and
$\mu:\overline{M}_{g,2}(\mathbb{P}^3,\beta)\to
\overline{M}_{g,1}(\mathbb{P}^3,\beta)$
be the forgetful map.
\begin{definition}
For $\beta\neq 0$, we define the
genus zero local Gromov--Witten invariant $N_{\beta}\in\mathbb{Q}$
of degree $\beta\in H_2(\mathbb{P}^3,\mathbb{Z})$ by
\begin{equation}\label{local-GW-P3}
N_{\beta}:=\int_{[\overline{M}_{0,1}(\mathbb{P}^3,\beta)]^{vir}}
ev_1^*\gamma_2\cup e(R^1 \mu_*ev_2^*\mathcal{O}_{\mathbb{P}^3}(-4))~.
\end{equation}
\end{definition}
These numbers are computed in \cite[Table 1]{KlemmPandharipande}.
\begin{definition}
Let
\begin{equation}\label{local-pot-P3}
\Phi_{qu}(t^1,t^2):=t^2 \sum_{\beta>0}N_{\beta}\, e^{\beta t^1}~.
\end{equation}
The local quantum cup product $\circ_t$ on $H^*(\mathbb{P}^3,\mathbb{C})$
is the family of multiplications given by
\begin{equation}\label{P3-multiplication1}
\gamma_i\circ_t \gamma_j=\gamma_i \cup \gamma_j
-4\sum_{k=1}^2\frac{\partial^3 \Phi_{qu}}{\partial t^i\partial t^j\partial t^k}\,\gamma_{4-k}~.
\end{equation}
\end{definition}
More explicitly, the local quantum cup product is given by
\begin{equation}\label{P3-multiplication2}
\begin{split}
\gamma_0\circ_t&=\gamma_0\cup~,\quad \gamma_3\circ_t=\gamma_3\cup~,\\
\gamma_1\circ_t\gamma_1&=
\gamma_2-4\Big(\sum_{\beta>0}\beta^2 N_{\beta}\,e^{\beta t^1}\Big)\gamma_2
-4\Big(t^2\sum_{\beta>0}\beta^3 N_{\beta}\,e^{\beta t^1}\Big)\gamma_3~,\\
\gamma_1\circ_t\gamma_2&=\gamma_3
-4\Big(\sum_{\beta>0}\beta^2 N_{\beta}\,e^{\beta t^1}\Big)\gamma_3~,\\
\gamma_2\circ_t\gamma_2&=0~.
\end{split}
\end{equation}
Regard $M=H^*(\mathbb{P}^3,\mathbb{C})$ as a manifold.
We identify each tangent space $T_t M$ with $H^*(\mathbb{P}^3,\mathbb{C})$
by $\frac{\partial}{\partial t^i}\mapsto \gamma_i$.
Let $\nabla$ be the trivial connection on $TM$
such that $\frac{\partial}{\partial t^i}$ are flat sections.
We set
\begin{equation}\label{Euler-localP3}
E=t^0\frac{\partial}{\partial t^0}-t^2\frac{\partial}{\partial t^2}-2
t^3\frac{\partial}{\partial t^3}.
\end{equation}
\begin{theorem}\label{prop-P3-2}
The trivial connection $\nabla$, the vector field $E$ \eqref{Euler-localP3},
the local quantum cup product \eqref{P3-multiplication2},
the filtration \eqref{P3-filtration} and the bilinear forms \eqref{P3-bilinear}
form a mixed Frobenius structure on $M$ of reference charge five.
\end{theorem}
A proof will be given in \S \ref{proof-prop-P3-2}.
\subsection{Quantum cohomology of $V=\mathbb{P}(\mathcal{O}_{\mathbb{P}^3}
\oplus\mathcal{O}_{\mathbb{P}^3}(-4))$ }
Let $V=\mathbb{P}(\mathcal{O}_{\mathbb{P}^3}
\oplus\mathcal{O}_{\mathbb{P}^3}(-4))$
be the projective compactification of the line bundle
$\mathcal{O}(-4)\to \mathbb{P}^3$.
Let $\mathrm{pr}: V\to \mathbb{P}^3$ be the projection
and let $\Gamma_i=\mathrm{pr}^*\gamma_i$ ($0\leq i\leq 3$).
Let $\Delta_0\in H^2(V,\mathbb{C})$ be the Poincar\'e dual of the
infinity section
and
$\Delta_i=\Delta_0\cup \Gamma_i\in H^{2(i+1)}(V,\mathbb{C})$.
The cohomology $H^*(V,\mathbb{C})$ is spanned by the basis
\begin{equation}\label{basisV2}
\Gamma_0=1~,\quad \underbrace{\Gamma_1~,\Delta_0}_{H^2(V)}~,
\quad
\underbrace{\Gamma_2~,\Delta_1}_{H^4(V)}~,
\quad
\underbrace{\Gamma_3~,\Delta_2}_{H^6(V)}~,
\quad
\underbrace{\Delta_3}_{H^8(V)}~.
\end{equation}
The cup product is given as follows.
\begin{equation}\label{intersectionV2}
\begin{split}
\Gamma_i\cup \Gamma_j&=\begin{cases}\Gamma_{i+j}&(i+j\leq 3)\\
0&(i+j>3)\end{cases}~,\quad
\\
\Delta_i\cup \Gamma_j&=\begin{cases} \Delta_{i+j}&(i+j\leq 3)\\
0&(i+j>3)\end{cases}~,\quad
\\
\Delta_i\cup\Delta_j&=\begin{cases}4\Delta_{i+j+1} &(i+j<3)\\
0&(i+j\geq 3)\end{cases}~.
\end{split}
\end{equation}
The intersection form $\langle~,~\rangle$ is computed from the cup product
and $\int_V \Delta_3=1$. Explicitly, pairings which do not vanish are
\begin{equation}\label{intersection-V2}
\begin{split}
&\langle \Gamma_0,\Delta_3\rangle=1~,\quad
\langle \Gamma_k,\Delta_{3-k}\rangle=1~,
\\
&\langle \Delta_{k-1},\Delta_{3-k}\rangle=4~\quad
(1\leq k\leq 3)~.
\end{split}
\end{equation}
The dual basis of \eqref{basisV2} is given by
\begin{equation}\nonumber
\begin{split}
&\Gamma_0^{\vee}=\Delta_3~,\quad
\Delta_3^{\vee}=\Gamma_0~,
\\
&\Gamma_k^{\vee}=\Delta_{3-k}-4\Gamma_{4-k}~,\quad
{\Delta^{\vee}_{k-1}}=\Gamma_{4-k}\quad (1\leq k\leq 3)~.
\end{split}
\end{equation}
Now we consider the quantum cup product.
First we fix a basis of $H_2(V,\mathbb{Z})$.
Let
$\iota:\mathbb{P}^3\rightarrow V$ be the inclusion as the zero
section of $\mathcal{O}_{\mathbb{P}^3}(-4)\subset V$ and
$C_1'=\iota_*\mathbb{P}^1$.
Let $C_{0}'$ be the fiber class of the $\mathbb{P}^1$-bundle
$\mathrm{pr}:V\to \mathbb{P}^3$.
Then $\{C_0',C_{1}'\}$ is a basis of $H_2(V,\mathbb{Z})$
which is dual to the basis $\{\Delta_0,\Gamma_1\}$
of $H^2(V,\mathbb{C})$.
Let $t^0,t^1,s^0,t^2,s^1,t^3,s^2,s^3$
be the coordinates of $H^*(V)$ associated with the basis \eqref{basisV2}.
Let $q_0,q_1$ be parameters
associated to $C_0',~C_{1}'$.
\begin{lemma}\label{lemP3}
The genus zero Gromov--Witten potential $\Phi(t,s\,;q)$
of $V$ becomes as follows.
\begin{equation}\nonumber
\begin{split}
\Phi(t,s\,;q)&=\Phi_{cl}+\Phi_{qu}+\Phi_1~,
\\
\Phi_{cl}(t,s)&=
\frac{1}{2}\sum_{\begin{subarray}{c} 0\leq k,l\leq 3,\\k+l\leq 3 \end{subarray}}
t^kt^ls^{3-k-l}
+\frac{4}{2}
\sum_{\begin{subarray}{c}0\leq k,l\leq 3,\\k+l\leq 2 \end{subarray}}
t^{2-k-l}s^ks^l~,
\\
\Phi_1&=\mathcal{O}(q_{0})~.
\end{split}
\end{equation}
Here $\Phi_{qu}$ is the function in $t^1,t^2$ defined in \eqref{local-pot-P3}.
\end{lemma}
\begin{proof}
Notice that the class $\beta\in H_2(V,\mathbb{Z})$ of an effective curve can be written as
$\beta=\beta_0C_0'+\beta_1C_1'\neq 0$ with
$\beta_0,\beta_1\geq 0$.
Since the moduli of the stable maps $\overline{M}_{0,n}(V,\beta)$ ($\beta\neq 0$)
is empty if $\beta$ is not effective,
we can decompose the Gromov--Witten potential $\Phi$ into
three parts: $\Phi_{cl}$ which is the contribution of $\beta=0$,
$\Phi_0$ which is the contribution of homology classes
$\beta$ such that $\beta=\beta_1 C_1'$ ($\beta_1>0$),
and the remaining part $\Phi_{1}$
which is the contribution of $\beta=\beta_0 C_0'+\beta_1 C_1'$
($\beta_0,\beta_1\geq 0$)
with $\beta_0\neq 0$.
$\Phi_{cl}$ can be computed by the triple intersection
(see \eqref{GW-classical}).
We have $\Phi_1=\mathcal{O}(q_0)$.
Now we consider $\Phi_0$.
Notice that since $-c_1(K_V)=2\Delta_0$,
the virtual dimension \eqref{virt-dim} of $\overline{M}_{0,n}(V,\beta)
$
is $1+n$ if $\beta_0=0$.
Therefore we have
\begin{equation}\nonumber
\begin{split}
\int_{[\overline{M}_{0,n}(V,\beta)]^{vir}}
\prod_{i=1}^n \,ev^*_i\mathbf{t}
&=\sum_{j=1}^n \int_{[\overline{M}_{0,n}(V,\beta)]^{vir}}
ev^*_j \mathbf{t}''\,
\prod_{\begin{subarray}{c}1\leq i\leq n;\\i\neq j\end{subarray}}
\,ev^*_i\mathbf{t}'~
\\
&=n (\beta_1 t^1)^{n-1}
\int_{[\overline{M}_{0,1}(V,\beta)]^{vir}}ev^*_1 \mathbf{t}''
\\
&=n(\beta_1 t^1)^{n-1} t^2
\int_{[\overline{M}_{0,1}(V,\beta)]^{vir}}ev^*_1 \Gamma_2~
\\
&=n(\beta_1 t^1)^{n-1} t^2\,N_{\beta_1}~,
\\
&\Big(\mathbf{t}'=t^1\Gamma_1 +s^0\Delta_0~,\quad
\mathbf{t}''=t^2\Gamma_2+s^1 \Delta_1\Big)~.
\end{split}
\end{equation}
Here $N_{\beta_1}$
is the genus zero local Gromov--Witten invariant
of $\mathbb{P}^3 $ defined in \eqref{local-GW-P3}.
The first equality follows from the degree consideration and
the fundamental class axiom,
the second equality follows from the divisor axiom.
The third and fourth equalities follow from the next lemma.
This shows that $\Phi_0=\Phi_{qu}$.
\end{proof}
\begin{lemma}
For $\beta_1>0$,
\begin{equation}\begin{split}\nonumber
&
\int_{[\overline{M}_{0,1}(V,\,\beta_1 C_1')]^{vir}}ev^*_1 \Delta_1=0~,
\\
&\int_{[\overline{M}_{0,1}(V,\,\beta_1 C_1')]^{vir}}ev^*_1 \Gamma_2
=N_{\beta_1}~.
\end{split}
\end{equation}
\end{lemma}
\begin{proof}
Let us consider the $\mathbb{C}^*$-action in the fiber direction
of $V$ and do the
localization calculation \cite{GraPa}.
The fixed point loci is
$$
\overline{M}_{0,1}(V,\beta_1 C_1')^{\mathbb{C}^*}=
\iota (\overline{M}_{0,1}(\mathbb{P}^3,\beta_1\mathbb{P}^1))~.
$$
Here we use $\iota$ also as the map
$\overline{M}_{0,1}(\mathbb{P}^3,\beta_1\mathbb{P}^1)\to
\overline{M}_{0,1}(V,\beta_1 C_1')
$
induced from the inclusion $\iota:\mathbb{P}^3\to V$
as the zero section of $\mathcal{O}(-4)$.
Therefore we have
\begin{equation}\nonumber
\int_{[\overline{M}_{0,1}(V,\beta_1 C_1']^{vir}}ev^*_1\Delta_1
=
\int_{[\overline{M}_{0,1}(\mathbb{P}^3,\beta_1\mathbb{P}^1)]^{vir}}
\iota^*(ev_1^*\Delta_1)\cup e(R^1 \mu_*ev_2^*\mathcal{O}_{\mathbb{P}^3}(-4))
~.
\end{equation}
Here $e(R^1 \mu_*ev_2^*\mathcal{O}_{\mathbb{P}^3}(-4))$
is the contribution of the normal bundle of the fixed loci
(see \cite[Proposition 2.2]{KonishiMinabe}).
Since the commutativity of the diagram
$$
\begin{CD}
\overline{M}_{0,1}(\mathbb{P}^3,\beta_1\mathbb{P}^1)@>\iota>>\overline{M}_{0,1}(V,\beta_1C_1)\\
@Vev_1VV @Vev_1VV\\
\mathbb{P}^3@>\iota>>V
\end{CD}
$$
implies $\iota^*ev_1^*\Delta_1=ev^*_1 \iota^*\Delta_1=0$,
we obtain the first statement.
The proof of the second statement is similar.
\end{proof}
Let us consider the specialization $q_0=0$, $q_1=1$.
Then by Lemma \ref{lemP3},
the quantum cup product reduces to the following:
\begin{equation}\label{quantum-cup-V2}
\begin{split}
\Gamma_k\circ\Gamma_l&=\Gamma_{k+l}
+\sum_{j=1}^{2}~\frac{\partial^3 \Phi_{qu}}{\partial t^k \partial t^l\partial t^j}~(\Gamma_{j})^{\vee}~,
\\
\Delta_i\,\circ ~~&=\Delta_i\,\cup~~~.
\end{split}
\end{equation}
\begin{lemma}\label{P3-V}
The multiplication \eqref{quantum-cup-V2},
the intersection form \eqref{intersection-V2}
(regarded as the multiplication on $T_t\tilde{M}$
and the metric by the canonical isomorphism $H^*(V)\stackrel{\sim}{\rightarrow}T_t\tilde{M}$) and
the vector field
\begin{equation}\label{Euler-P3V}
E=t^0\frac{\partial}{\partial t^0}
-\sum_{k=2}^3 \,(k-1)\Big(t^k\frac{\partial}{\partial t^k}
+s^{k-1}\frac{\partial}{\partial s^{k-1}}\Big)
-3s^3\frac{\partial}{\partial s^3}
+2\frac{\partial}{\partial s^0}~
\end{equation}
form a Frobenius structure of charge four on $\tilde{M}=H^{*}(V,\mathbb{C})$.
\end{lemma}
The proof of the lemma is similar to that of Lemma \ref{FS-V} and omitted.
\subsection{Proof of Theorem \ref{prop-P3-2}}
\label{proof-prop-P3-2}
We first apply
Theorem \ref{thm:nilpotent2}
to the Frobenius structure on $\tilde{M}=H^*(V,\mathbb{C})$ in Lemma \ref{P3-V}
with the nilpotent vector field $\frac{\partial}{\partial s^0}$.
Let us construct the Frobenius filtration.
Using the canonical isomorphism $T_t\tilde{M}\cong H^*(V,\mathbb{C})$,
we write it down as that of $H^*(V,\mathbb{C})$.
Let $I$ be the ideal generated by $\Delta_0$:
$$
I=\mathbb{C}\Delta_0\oplus\mathbb{C}\Delta_1\oplus
\mathbb{C}\Delta_2\oplus \mathbb{C}\Delta_3~\subset~H^*(V).
$$
$J_k=\mathrm{Ker}\,(\Delta_0^k\,\cup)$ are as follows.
\begin{equation}
\begin{split}
J_1&=
\mathbb{C}\Gamma_3^{\vee}\oplus \mathbb{C}\Gamma_2^{\vee}\oplus
\mathbb{C}\Gamma_1^{\vee}
\oplus \mathbb{C}\Delta_3~,
\\
J_2&=J_1+ (\mathbb{C}\Delta_{2}\oplus \mathbb{C}\Gamma_3)
=\mathbb{C}\Gamma_3^{\vee}\oplus \mathbb{C}\Gamma_2^{\vee}
\oplus H^{\geq 6}(V)~,
\\
J_3&=\mathbb{C}\Gamma_{3}^{\vee}\oplus H^{\geq 4}(V)~,
\\
J_4&=H^{\geq 2}(V)~,
\\
J_5&=H^*(V)~.
\end{split}
\end{equation}
Therefore the filtration $I_{\bullet}$ defined in \eqref{nilp1} on
$H^*(V,\mathbb{C})$ is given by
\begin{equation}\label{filtration-V2}
I_0=I~,\quad
I_k=H^{\geq 2 }(V)~\quad (1\leq k\leq 4)~,\quad
I_{5}=H^*(V)~.
\end{equation}
The bilinear forms on $I_k/I_{k-1}$
(see Definition \eqref{nilp2}) are
\begin{equation}\label{bilinear-V2}
\begin{split}
k=0\quad&([\Delta_k],[\Delta_l])_0=
\langle \Delta_k,\Gamma_l\rangle=\delta_{k+l,3}~,
\\
k=1\quad &([\Gamma_k],[\Gamma_l])_1=\Big\langle
\Gamma_k-\frac{1}{4}\Delta_{k-1},
\Gamma_l-\frac{1}{4}\Delta_{l-1}
\Big\rangle
\\
&\quad\quad\quad\quad
=\begin{cases}
-\frac{1}{4}&(k+l=4)\\
0&(k+l\neq 4)
\end{cases},
\\
k=5\quad& (1,1)_{5}=\langle 1 ,\Delta_0^{4}\rangle=4^3~.
\end{split}
\end{equation}
By Theorem \ref{thm:nilpotent2}, we have the following
\begin{lemma}
The trivial connection, the vector field $E$ \eqref{Euler-P3V},
the multiplication \eqref{quantum-cup-V2},
the filtration \eqref{filtration-V2} and the bilinear forms \eqref{bilinear-V2}
form a MFS of reference charge five on $\tilde{M}=H^*(V,\mathbb{C})$~.
\end{lemma}
Next we apply Corollary \ref{quotient-construction2}.
Since
$I$ is the kernel of
the pullback
$\iota^*: H^*(V,\mathbb{C})\to H^*(\mathbb{P}^3,\mathbb{C})$
by the inclusion $\iota:\mathbb{P}^3 \hookrightarrow \mathcal{O}(-4)\subset V$,
if we set $$
\tilde{M}^{(1)}=\{s^0=s^1=s^2=s^3=0\}\subset \tilde{M}~,
$$
then it is naturally isomorphic to $H^*(\mathbb{P}^3,\mathbb{C})$.
Theorem \ref{prop-P3-2} follows from
Corollary \ref{quotient-construction2} and Lemma \ref{P3-V}.
\appendix
\section{Deformed connection}
\label{section:deformed-connection}
In this appendix, we define an analogue of the deformed connection
of the Frobenius structure \cite{Dubrovin2} for the MFS. Let
$
(\nabla,~E,~\circ,~I_{\bullet},~(~,~)_{\bullet})
$
be a MFS on $M$ of reference charge $D$
and let $t^{ka}$ $(k\in \mathbb{Z},~1\leq a\leq m_k)$
be a system of local flat coordinates satisfying \eqref{flat-frame}.
\subsection{Operators}
Define endomorphisms $\mathcal{U},\mathcal{V}:TM\rightarrow TM$ by
\begin{equation}
\mathcal{U}(x)=E\circ x~,\quad
\mathcal{V}(x)=\nabla_x E -\frac{2-D}{2}\,x~.
\end{equation}
\begin{lemma} If $x\in \Gamma(I_k)$, then
$\mathcal{U}(x)\, ,\, \mathcal{V}(x)\in \Gamma(I_k)$.
\end{lemma}
\begin{proof} If $x\in \Gamma(I_k)$,
$U(x)=E\circ x\in \Gamma(I_k)$
since $I_k$ is an ideal.
If $x\in \Gamma(I_k)$, we have
$$
\nabla_x E=[E,x]-\nabla_E\, x~\in\Gamma(I_k) ~,
$$
by the torsion free condition for $\nabla$ and
the assumptions that $I_k$ is $E$-closed and $\nabla$-closed.
\end{proof}
The above lemma implies that $\mathcal{U}$, $\mathcal{V}$
induce endomorphisms $\mathcal{U}^{(k)}$, $\mathcal{V}^{(k)}$
on $I_k/I_{k-1}$.
In the local flat coordinate expression,
\begin{equation}\nonumber
\begin{split}
\mathcal{U}(\partial_{ka})
&=\sum_{\begin{subarray}{c}l\in \mathbb{Z},\\1\leq b\leq m_l\end{subarray}}
\sum_{\begin{subarray}{c}j\leq k,l,\\1\leq c\leq m_j\end{subarray}}
E^{lb}C_{ka,lb}^{jc} \partial_{jc}~,
\\
\mathcal{U}^{(k)}(\partial_{ka})
&=\sum_{\begin{subarray}{c}l \geq k,\\1\leq b\leq m_l\end{subarray}}
\sum_{1\leq c\leq m_k}
E^{lb}C_{ka,lb}^{kc} \partial_{kc}~,
\\
\mathcal{V}(\partial_{ka})&=
\sum_{\begin{subarray}{c}l\leq k ,\\1\leq b\leq m_l\end{subarray}}
(\partial_{ka}E^{lb})\partial_{lb}-\frac{2-D}{2}\partial_{ka}~,
\\
\mathcal{V}^{(k)}(\partial_{ka})&=
\sum_{1\leq b\leq m_k}
(\partial_{ka}E^{kb})\partial_{kb}-\frac{2-D}{2}\partial_{ka}~.
\end{split}
\end{equation}
\begin{remark}
The assumption $\nabla\nabla E=0$ implies $\nabla \mathcal{V}=0$.
In other words, the matrix representations of $\mathcal{V}$ and $\mathcal{V}^{(k)}$
with respect to the flat basis $\{\partial_{ka}\}$ are constant matrices.
Notice also that
the condition
\eqref{E-metric} is equivalent to
\begin{equation}
( \mathcal{V}^{(k)}(x),y)_k+
( x,\mathcal{V}^{(k)}(y))_k=k( x,y)_k~.
\end{equation}
\end{remark}
\subsection{Deformed connection}
Let $\Tilde{M}=M\times \mathbb{C}^*$ and
let $\hbar$ be the coordinate of $\mathbb{C}^*$.
For a holomorphic vector bundle $\mathbb{E}\to \Tilde{M}$,
$\Tilde{\Gamma}(\mathbb{E})$ denotes
the space of holomorphic sections of $\mathbb{E}$ on some open subset
$\Tilde{U}\subset \Tilde{M}$.
Recall that
$\pi_k:TM\to TM/I_{k-1}$ is the projection
and that $\nabla^{(k)}$ and $\circ_k$
are the connection and the multiplication on $TM/I_{k-1}$
induced from the connection $\nabla$ and the multiplication $\circ$~.
\begin{definition}
Define a connection
$\Tilde{\nabla}^{(k)}$ on $I_k/I_{k-1}\times T\mathbb{C}^*
\rightarrow \tilde{M}$ by
\begin{equation}
\begin{split}
&\Tilde{\nabla}^{(k)}_x y=\nabla^{(k)}_x\,y+\hbar\,\, \pi_k(x)\circ_k y
\quad (x\in\Tilde{\Gamma}(TM),~y\in \Tilde{\Gamma}(I_k/I_{k-1}))~,
\\
&\Tilde{\nabla}^{(k)}_{\hbar} y=\partial_{\hbar}\, y
+\mathcal{U}^{(k)}(y)+\frac{1}{\hbar}\,(\mathcal{V}^{(k)}(y)-\frac{k}{2}y\,)~,
\\
&\Tilde{\nabla}^{(k)}_x (\partial_{\hbar})=
\Tilde{\nabla}^{(k)}_{\hbar}(\partial_{\hbar})=0~.
\end{split}
\end{equation}
\end{definition}
We write $\Tilde{\nabla}_{la}^{(k)}$
for $\Tilde{\nabla}_{x}^{(k)}$ with $x=\partial_{la}$.
In the local flat coordinate expression,
\begin{equation}\label{deformed-conn1}
\begin{split}
\Tilde{\nabla}_{lb}^{(k)} (\partial_{ka})&=
\hbar\sum_{1\leq c\leq m_k}C_{ka,lb}^{kc} \,\partial_{kc}
\quad (\text{this is zero if $l<k$ by \eqref{c-symmetry2}})~,
\\
\Tilde{\nabla}^{(k)}_{\hbar}(\partial_{ka})
&=\sum_{\begin{subarray}{c}l\geq k,\\
1\leq b\leq m_l\end{subarray}}
\sum_{1\leq c\leq m_k}
(E^{lb}C_{ka,lb}^{kc})\partial_{kc}
\\
&+\frac{1}{\hbar}\,
\Big(\sum_{1\leq b\leq m_k}
(\partial_{ka}E^{kb})\partial_{kc}
-\frac{2-D+k}{2}\partial_{ka}\Big)~.
\end{split}
\end{equation}
\begin{proposition}
The deformed connection $\Tilde{\nabla}^{(k)}$ is flat.
\end{proposition}
\begin{proof}
Let
$
\tilde{\Omega}^{(k)}$
be the curvature of $\Tilde{\nabla}^{(k)}$.
We first show that $\Tilde{\Omega}^{(k)}(\partial_{la},\partial_{jb})=
\frac{1}{2}(\Tilde{\nabla}^{(k)}_{la}\Tilde{\nabla}^{(k)}_{jb}
-\Tilde{\nabla}^{(k)}_{jb}\Tilde{\nabla}^{(k)}_{la})
=0$.
By the first equation in \eqref{deformed-conn1},
it is immediate to check that
$\Tilde{\Omega}^{(k)}(\partial_{la},\partial_{jb})(\partial_{kc})=0
$ if $l,j<k$. If $l<k$ and $j\geq k$,
\begin{equation}\nonumber
\Tilde{\Omega}^{(k)}(\partial_{la},\partial_{jb})(\partial_{kc})=
\Tilde{\nabla}_{la}^{(k)}\Big(\sum_{1\leq d\leq m_k}
C_{jb,kc}^{kd}\partial_{kd}\Big)
=\sum_{1\leq d\leq m_k}
(\partial_{la}C_{jb,kc}^{kd}) \partial_{kd}
\stackrel{\eqref{c-symmetry2}}{=}0~.
\end{equation}
If $l,j\geq k$,
\begin{equation}\nonumber
\begin{split}
\Tilde{\Omega}^{(k)}(\partial_{la},\partial_{jb})(\partial_{kc})
&=\Tilde{\nabla}_{la}^{(k)}\Big(\sum_{1\leq d\leq m_k}
C_{jb,kc}^{kd}\partial_{kd}\Big)
-(la~\leftrightarrow~jb)
\\
&=\sum_{1\leq d\leq m_k}
(\partial_{la}C_{jb,kc}^{kd}-\partial_{jb}C_{la,kc}^{kd}) \partial_{kd}
\\&+
\hbar \sum_{1\leq d,f\leq m_k}(C_{jb,kc}^{kd}C_{la,kd}^{kf}-
C_{la,kc}^{kd}C_{jb,kd}^{kf})\,\partial_{kf}
=0~,
\end{split}
\end{equation}
by \eqref{c-symmetry2} and the associativity.
Next we show $\Tilde{\Omega}^{(k)}(\partial_{la},\partial_{\hbar})=
\frac{1}{2}(\Tilde{\nabla}^{(k)}_{la}\Tilde{\nabla}^{(k)}_{\hbar}
-\Tilde{\nabla}^{(k)}_{\hbar}\Tilde{\nabla}^{(k)}_{la})
=0$.
By the second equation in \eqref{deformed-conn1},
\eqref{E-linear} and \eqref{c-symmetry2},
if $l<k,$ we have
\begin{equation}\nonumber
\begin{split}
\Tilde{\Omega}^{(k)}(\partial_{la},\partial_{\hbar})(\partial_{kc})
&=\sum_{\begin{subarray}{c}j\geq k,\\
1\leq b\leq m_j\end{subarray}}
\sum_{1\leq d\leq m_k}
\big(\partial_{la}(E^{jb}C_{kc,jb}^{kd})\big)\partial_{kd}
=0~.
\end{split}
\end{equation}
If $l\geq k$, we have
\begin{equation}\nonumber
\begin{split}
\Tilde{\nabla}^{(k)}_{la}\Tilde{\nabla}^{(k)}_{\hbar}
(\partial_{kc})
&=
\sum_{1\leq d\leq m_k}
\Big(\sum_{\begin{subarray}{c}j\geq k,\\
1\leq b\leq m_j\end{subarray}}
\partial_{la}(E^{jb}C_{kc,jb}^{kd})
+\sum_{1\leq b\leq m_k}
C_{la,kb}^{kd}\partial_{kc}E^{kb}-\frac{2-D+k}{2}C_{la,kc}^{kd}
\Big)\,
\partial_{kd}\\
&+\hbar\,
\sum_{1\leq f\leq m_k}
\Big(\sum_{1\leq d\leq m_k}
\sum_{\begin{subarray}{c}j\geq k,\\
1\leq b\leq m_j\end{subarray}}
E^{jb}C_{kc,jd}^{kd}C_{la,kd}^{kf}\Big) \,\partial_{kf}
~,
\\
\Tilde{\nabla}^{(k)}_{\hbar}\Tilde{\nabla}^{(k)}_{la}
(\partial_{kc})
&=\sum_{1\leq d\leq m_k}
\Big(C_{la,kc}^{kd}
+\sum_{1\leq b\leq m_k}C_{la,kc}^{kb}\partial_{kb}E^{kd}
-\frac{2-D+k}{2}C_{la,kc}^{kd}
\Big)\,\partial_{kd}
\\
&+\hbar\,\sum_{1\leq f\leq m_k}
\Big(
\sum_{1\leq d\leq m_k}
\sum_{\begin{subarray}{c}j\geq k,\\
1\leq b\leq m_j\end{subarray}}
E^{jb} C_{la,kc}^{kd}C_{kd,jb}^{kf}
\Big)\,\partial_{kf} ~.
\end{split}
\end{equation}
Thus by \eqref{E-multiplication2} and the associativity, we obtain
\begin{equation}\nonumber
\begin{split}
\Tilde{\Omega}^{(k)}(\partial_{la},\partial_{\hbar})(\partial_{kc})
&=\Tilde{\nabla}^{(k)}_{la}\Tilde{\nabla}^{(k)}_{\hbar}
(\partial_{kc})
-\Tilde{\nabla}^{(k)}_{\hbar}\Tilde{\nabla}^{(k)}_{la}
(\partial_{kc})=0~.
\end{split}
\end{equation}
\end{proof}
\subsection{Deformed flat coordinates}
\begin{proposition}
There exist (local) holomorphic functions
$\tilde{t}^{ka}(t,\hbar) $ $(k\in \mathbb{Z}$, $1\leq a\leq m_k)$
on $\tilde{M}$
such that $\hbar$, $\tilde{t}^{ka}(t,\hbar)$ are
a system of local coordinates on $\tilde{M}$
satisfying the following conditions:
\begin{equation}\nonumber
\left \{
\frac{\partial}{\partial \tilde{t}^{la}} ~{\Big |}~ l\leq k, 1\leq a\leq m_l
\right\}\quad
\text{is a local frame of $I_k$,}
\end{equation}
\begin{equation}
\tilde{\nabla}^{(k)}\pi_k\Big(\frac{\partial}{\partial \tilde{t}^{ka}}\Big)
=0~.
\end{equation}
\end{proposition}
We call $\tilde{t}^{ka}(t,\hbar)$ deformed flat coordinates.
\begin{proof}
Let $\mathrm{Ann}_k\subset T^*M$ be the annihilator of $I_k$:
$$
\mathrm{Ann}_k:=\{x\in T^*M\mid x(y)=0~,
~~{}^{\forall}y\in I_k \}~.
$$
Its local frame is given by
$
dt^{la}~ (l>k).
$
Notice that
the dual bundle of $I_k/I_{k-1}$ is isomorphic to
$\mathrm{Ann}_{k-1}/\mathrm{Ann}_k$.
We use the same notation $\nabla^{(k)}$ for the induced dual connection on
$\mathrm{Ann}_{k-1}/\mathrm{Ann}_k \times T^*\mathbb{C}^*\to \tilde{M}$.
The $\nabla^{(k)}$-flatness condition $\Tilde{\nabla}^{(k)}\xi=0$ for
a section $\xi=\sum_{1\leq a\leq m_k}\xi_{ka}dt^{ka}\in
\tilde{\Gamma}(\mathrm{Ann}_{k-1}/\mathrm{Ann}_k)$ is equivalent to
\begin{equation}\label{flat-dual}
\begin{split}
\partial_{lb}(\xi_{ka})&=
\hbar \sum_{1\leq c\leq m_k}C_{lb,ka}^{kc}\,\xi_{kc}\qquad \text{ and }
\\
\partial_{\hbar}(\xi_{ka})
&=
\sum_{\begin{subarray}{c}
l\geq k,\\1\leq b\leq m_l
\end{subarray}}
\sum_{1\leq c\leq m_k}
E^{lb}C_{lb,ka}^{kc}\,\xi_{kc}
+
\frac{1}{\hbar}\Big(
\sum_{1\leq c\leq m_k}
(\partial_{ka}E^{kc})\xi_{kc}-\frac{2-D+k}{2}\,\xi_{ka}
\Big)~.
\end{split}
\end{equation}
The first equation in \eqref{flat-dual} implies that
$
\partial_{kb}\xi_{ka}
=\partial_{ka}\xi_{kb}$
and
$\partial_{lb}\xi_{ka}=0
$ if $l<k$.
Therefore if $\Tilde{\nabla}^{(k)}\xi=0$, there exists a local function
$\tilde{t}=\tilde{t}(t,\hbar)$ on $\tilde{M}$
satisfying
$
\partial_{ka}\tilde{t}=\xi_{ka}
$ and
$
\partial_{la}\tilde{t}=0
$ ($l<k$).
In other words, there exists $\tilde{t}(t,\hbar)$ such that
$$
d\tilde{t}=\xi+\text{ terms involving $dt^{lb}$ ($l>k$) and $d\hbar$}.
$$
Since $\Tilde{\nabla}^{(k)}$ is flat,
there exists a local frame $\{p^{ka}(t,\hbar)\mid 1\leq a\leq m_k\}$
of
$\mathrm{Ann}_{k-1}/\mathrm{Ann}_k \boxtimes T^*\mathbb{C}^*\to \tilde{M}$
such that
$
\tilde{\nabla}^{(k)}p^{ka}=0
$.
From the argument in the previous paragraph,
we see that there exist local functions
$\Tilde{t}^{ka}(t,\hbar)$ ($k\in \mathbb{Z}$, $1\leq a\leq m_k$)
satisfying the following two conditions:
\begin{equation}\nonumber
\begin{split}
&\{d\tilde{t}^{(la)}\mid l\geq k, 1\leq a\leq m_l\} \text{
is a local frame of $\mathrm{Ann}_{k-1}$},
\\
&p^{(ka)}=d\tilde{t}^{(ka)} \mod dt^{lb} ~~~(l>k),~d\hbar~.
\end{split}
\end{equation}
These $\Tilde{t}^{(ka)}(t,\hbar)$ satisfy the conditions
in the above proposition.
\end{proof}
\subsection{Deformed flat coordinates for
weak Fano toric surfaces}
\label{deformed-coord-toric}
The deformed flat coordinates for the MFS in Theorem \ref{toric3}
is written as follows.\footnote{We omit the detail of the calculation for the following results.
It can be found in the first version of this paper at the arXiv.}
Assume that $b_r^{\vee}\neq 0$.
We take the following flat coordinates on $M=H^*(S,\mathbb{C})$
so that the condition
\eqref{flat-frame} is satisfied:
\begin{equation}\label{coord-S}
\begin{split}
&t^{r+1}~,\quad u^r=-\frac{1}{\kappa}\sum_{k=1}^r b_k^{\vee}\,t^k~,
\\
&u^k=\frac{1}{\kappa\cdot b_r^{\vee}}\Big\{
\sum_{\begin{subarray}{c}j\neq k,\\1\leq j\leq r\end{subarray}}
b_jb_j^{\vee}\,t^k
-b_k \sum_{\begin{subarray}{c}j\neq k,\\1\leq j\leq r\end{subarray}}
b_j^{\vee}\,t^j
\Big\}~\quad (1\leq k\leq r-1),\\
& t^0~.
\end{split}
\end{equation}
Solving the flatness equation for $\Tilde{\nabla}^{(k)}$
($1\leq k\leq 4$), we obtain
the following deformed flat coordinates.
\begin{equation}\label{flat-coord-toric}
\begin{split}
&\tilde{t}^{r+1}=e^{\hbar\, t^0}\Big\{
\frac{1}{\sqrt{\hbar}}t^{r+1}+\sqrt{\hbar}
\Big(\frac{\kappa}{2}{u_r}^2
-\sum_{\beta\neq 0}(b\cdot \beta)N_{\beta}\,e^{\beta\cdot t}
\Big)
\Big\}~,
\\
&
{\tilde{u}}^r=\sqrt{\hbar}\,e^{\hbar\, t^0}u^r~,
\\
&\tilde{u}^k=e^{\hbar\,t^0}u^k\quad (1\leq k\leq r-1)~,
\\
&\tilde{t}^0=e^{\hbar\, t^0}~.
\end{split}
\end{equation}
\subsection{Deformed flat coordinates for $\mathbb{P}^3$ }
\label{deformed-coord-P3}
The deformed flat coordinates for the MFS in Theorem \ref{prop-P3-2}
is written as follows.
\begin{equation}\label{deformed-coord-P3-result}
\begin{split}
\tilde{t}^0&=\frac{1}{\hbar}e^{\hbar\,t^0}~,
\\
\tilde{t}^1&=\hbar\, e^{\hbar\,t^0}t^1~,
\\
\tilde{t}^2&=e^{\hbar\,t^0}\Big\{
t^2+\hbar\Big(\frac{(t^1)^2}{2}
-4\sum_{\beta>0} N_{\beta}\,e^{\beta\,t^1}\Big)\Big\}~,
\\
\tilde{t}^3&=e^{\hbar\,t^0}\Big\{
\frac{t^3}{\hbar}+
t^1t^2-4t^2\sum_{\beta>0}\beta N_{\beta}\,e^{\beta\,t^1}
\\
&+\frac{\hbar}{2}
\Big[\frac{(t^1)^3}{3}
-8\sum_{\beta>0}\Big(t^1-\frac{1}{\beta}\Big)N_{\beta}\,e^{\beta\,t^1}
+16\sum_{\beta,\gamma>0} \frac{\beta\gamma}{\beta+\gamma}
N_{\beta}N_{\gamma}e^{(\beta+\gamma)t^1}\Big]
\Big\}~.
\end{split}
\end{equation}
\end{document}
|
\betaegin{document}
\muaketitle
\betaegin{abstract}
In this paper we present some basic uniqueness results for evolutive equations under density constraints. First, we develop a rigorous proof of a well-known result (among specialists) in the case where the spontaneous velocity field satisfies a monotonicity assumption: we prove the uniqueness of a solution for first order systems modeling crowd motion with hard congestion effects, introduced recently by \varepsilonmph{Maury et al.} The monotonicity of the velocity field implies that the $2-$Wasserstein distance along two solutions is $\lambdaambda$-contractive, which in particular implies uniqueness. In the case of diffusive models, we prove the uniqueness of a solution passing through the dual equation, where we use some well-known parabolic estimates to conclude an $L^1-$contraction property. In this case, by the regularization effect of the non-degenerate diffusion, the result follows even if the given velocity field is only $L^\infty$ as in the standard Fokker-Planck equation.
\varepsilonnd{abstract}
\sigmaection{Introduction and preliminaries}
Recently, modeling crowd behavior has received a lot of attention in applied mathematics. These models actually are in the heart of many other ones coming from biology (for instance cell migration, tumor growth, pattern formations in animal populations, etc.), particle physics and economics (see for example the recently introduced models of Mean Field Games, \cite{lasry1,lasry2,lasry3}). For more details on these models we direct the reader to the non-exhaustive list of works \cite{Cha1, Col, Cos, CriPicTos, Helb1, Helb3, Hend, Hug1, Hug2}.
In all these models the question of {\it congestion} can play a crucial role. Indeed, from the modeling point of view one could have some `singularities' if individuals want to occupy the same spot. In this paper, we will work with equations which model some type of congestion effects in crowd motion models (for a more detailed description of these models we direct the reader to the references \cite{MauRouSan1, MauRouSanVen, MesSan}). These systems read as
$$
\betaegin{cases}
\partialartial_t\rho_t-\nuu\Delta\rho_t+\nuabla\cdot(\rho_t{\tauextbf{v}}_t)=0\\
\rho_t\lambdae1,\ \rho|_{t=0}=\rho_0\\
{\tauextbf{v}}_t=P_{{\rm adm}(\rho_t)}[{\tauextbf{u}}_t].
\varepsilonnd{cases}
$$
In the above system $\rho_t$ represents the density of a crowd (at time $t$) that moves in $\Omegaega\sigmaubset{\muathbb R}^d$ for a time $T>0$ accordingly to the prescribed velocity ${\tauextbf{u}}_t$, a given field that everyone would follow in the absence of the others. $\nuu\in\{0,1\}$ is just a parameter: if $\nuu=0$, one deals with first order systems (the density of the population is just transported by some vector field) introduced in \cite{MauRouSan1}, while for $\nuu=1$ one has a second order system (in addition to the transport, the population is also affected by some randomness, modeled by a non-degenerate diffusion) studied in \cite{MesSan}. In order to preserve a density constraint (we suppose that $\rho_t$ does not exceed a given threshold, let us say $1$), at each moment one modifies ${\tauextbf{u}}_t$ to ${\tauextbf{v}}_t,$ a field that is the closest to ${\tauextbf{u}}_t$ (in the $L^2$ sense) and it is an admissible velocity field, i.e. the set ${\rm adm}(\rho_t)$ represents the fields that do not increase the density on already saturated zones $\{\rho_t=1\}.$
In generic congested models a pressure is also acting (Darcy's law): this pressure is preventing congestion and, according to various models, it is an increasing function of the density. In the usual \tauextit{soft} congestion models we have some power law of the type $p=\rho^m$ (porous medium equation). However this choice cannot prevent $\rho$ to be very high, while it is clear that a crowd of people cannot have a density higher than a certain threshold (studies say that the maximum density is $4.5$ people$/m^2$), which we put for convenience equal to $1$: we will refer to the constraint $\rho \lambdaeq 1$ as an \tauextit{hard} congestion effect. We shall present later how the pressure field appears in the models considered in this paper.
Introducing general multivalued monotone operators ${{\tauextbf{w}}idetilde \beta}a$, the above systems can be written in a compact form as
\betaegin{equation}\lambdaabel{eqn:main}
\betaegin{cases} \partialartial_t \rho_t + \nuabla \cdot ( {\tauextbf{u}}_t \rho_t ) = \Delta p_t \quad & \tauext{ in }(0,T)\tauimes\Omegaega\\ ({\tauextbf{u}}_t \rho_t - \nuabla p) \cdot \hat{\muathbf{n}} = 0 & \tauext{ on } (0,T)\tauimes\partialartial \Omegaega \\ p_t \in {{\tauextbf{w}}idetilde \beta}a (\rho_t).
\varepsilonnd{cases}
\varepsilonnd{equation}
The two cases $\nuu=0$ and $\nuu=1$ correspond to two special operators ${{\tauextbf{w}}idetilde \beta}a_1$ and ${{\tauextbf{w}}idetilde \beta}a_2$, namely
$$ {{\tauextbf{w}}idetilde \beta}a_1 (\rho ) = \betaegin{cases} 0 \quad & \tauext{ if }\rho <1 \\ [0,+\infty] & \tauext{ if }\rho=1 \\ +\infty & \tauext{ if } \rho>1, \varepsilonnd{cases} \qquad \quad
{{\tauextbf{w}}idetilde \beta}a_2 (\rho ) = \betaegin{cases} \rho \quad & \tauext{ if }\rho <1 \\ [1,+\infty] & \tauext{ if }\rho=1 \\ +\infty & \tauext{ if } \rho>1; \varepsilonnd{cases} $$
notice in particular that ${{\tauextbf{w}}idetilde \beta}a_2(\rho) = {{\tauextbf{w}}idetilde \beta}a_1(\rho)+\rho$. The \tauextit{hard} congestion effect is due to the fact that ${{\tauextbf{w}}idetilde \beta}a_{i}(\rho)= +\infty$ whenever $\rho >1$: in fact, this will force $\rho$ to be always less than $1$.
It is worthwhile noticing that this problem has some features in common with the Hele-Shaw model and the Stefan problem (see for instance \cite{Igb2, IgbShiWit}), namely in both problems there is a degenerate monotone operator linking the density and the diffusive part. However there are also big differences: in our case we treat also the convection term, which can depend not smoothly on the position, while usually in the Hele-Shaw models there is a source term for the mass, which we do not treat here since we want to model a crowd moving inside a domain $\Omegaega$. This modeling assumption is also the reason why we consider the no-flux Neumann boundary condition, since we want neither people exiting nor entering the domain.
Moreover our equations can be seen also as a quasilinear elliptic-parabolic system with very degenerate nonlinearity (see \cite{Otto,Igb, Carrillo}), for which the issue of uniqueness is still an open problem when we add a driving vector field without imposing an entropy condition. In this context we may write our equations using the variable $u=p+\rho$ as
$$ \partialartial_t g( u ) + \nuabla \cdot ( \Phi (t,x, u ))= \Delta b_i( u ), $$
where $b_1(u)=(u-1)_+$, $g(u)=u-b_1(u)$, $b_2(u)=u$ and $\Phi(t,x,u ) = {\tauextbf{u}}_t(x) g(u)$. In the case $i=1$ we have a double degeneracy since both $b_1$ and $g$ have a flat part, instead for the case $i=2$ we have only a degenerate part in $g(u)$. However again the (possibly) rough coefficient in $\Phi$ rules out results already present in the literature. For problems of this form we expect an $L^1-$contraction result, see for example \cite{Carrillo}; notice also that we do not need a concept of entropic solution since the equation reduces to a linear one in the joint variable $(\rho,p)$ (see the systems \varepsilonqref{main2} below). Thus the usual concept of weak solutions can be considered for our purposes.
In fact, we will be dealing always with the following reformulations of the systems, using the fact that $p \in {{\tauextbf{w}}idetilde \beta}a(\rho)$ if and only if $\rho \lambdaeq 1$ and $p \in H^1_{\rho}(\Omegaega)$ (see its definition in \varepsilonqref{def:h1r}):
\betaegin{equation}\lambdaabel{main2} \betaegin{cases} \partialartial_t \rho_t + \nuabla \cdot ( {\tauextbf{u}}_t \rho_t ) = \Delta p_t \; & \tauext{ in }(0,T)\tauimes\Omegaega\\ ({\tauextbf{u}}_t \rho_t - \nuabla p) \cdot \hat{\muathbf{n}} = 0 & \tauext{ on } (0,T)\tauimes\partialartial \Omegaega \\ \rho_t \lambdaeq 1 , \, p_t \in H_{\rho_t}^1(\Omegaega). \varepsilonnd{cases} \
\betaegin{cases} \partialartial_t \rho_t + \nuabla \cdot ( {\tauextbf{u}}_t \rho_t ) = \Delta (p_t + \rho_t ) \quad & \tauext{ in }(0,T)\tauimes\Omegaega\\ ({\tauextbf{u}}_t \rho_t - \nuabla p - \nuabla \rho_t) \cdot \hat{\muathbf{n}} = 0 & \tauext{ on } (0,T)\tauimes\partialartial \Omegaega \\ \rho_t \lambdaeq 1 , \, p_t \in H_{\rho_t}^1(\Omegaega). \varepsilonnd{cases}
\varepsilonnd{equation}
In Subsection \ref{oneone} we will derive in another way these systems: that derivation justifies also the regularity assumption of $p_t$.
A very powerful tool to attack these type of macroscopic hard-congestion problems -- where we impose a density constraint on the density of the population -- is the theory of optimal transport (see \cite{villani, OTAM}), as we can see in the recent works \cite{MauRouSan1, MauRouSan2, MauRouSanVen, MesSan, AleKimYao}. In this framework, the density of the agents satisfies a continuity, or a Fokker-Planck equation (with a velocity field taking into account the congestion effects) and can be seen as a curve in the Wasserstein space.
Our aim in this paper is to prove some basic results of uniqueness of the solutions in this setting. As far as we are aware of, this question is a missing puzzle in full generality in the models studied in \cite{MauRouSan1, MauRouSan2, MauRouSanVen, MesSan}. Let us remark that the uniqueness question is crucial if one wants to include this type of models into a larger system and one aims to show existence results by fixed point methods, as it is done for Mean Field Games in general for instance (for example as in \cite{Por}).
We will treat two different cases, with very different approaches: in the first one we simply consider a crowd driven by a given velocity field and subject to a density constraint. In this case the assumption that the velocity field is monotone will be crucial in order to prove a $\lambdaambda$-contraction result for the solutions, that will imply uniqueness. In the second case we add a diffusive term, which models some randomness in the crowd movement (see \cite{MesSan} for recent developments and existence results in this setting); in this case we prove a standard $L^1-$contraction property passing to the dual problem and proving there existence for sufficiently generic data. In this case a major role is played by the regularizing effect of the Laplacian, which allows us to prove the result even if the velocity field is merely bounded.
Let us underline that the core of both methods is classical in the literature.
\betaegin{itemize}
\item[(i)] On one hand we use the differentiation of the squared Wasserstein distance along two solutions of continuity-type equations, and then use a Gr\"onwall-type argument to show a contractive property and hence uniqueness of solutions for these evolution equations. This type of proof is very common in the optimal transport theory; nevertheless our analysis requires some finer nontrivial new ideas, mainly because of the appearance of the new pressure variable which we can formally identify as a term associated to the subdifferential of the indicatrix function of the (geodesically convex) set $\{ \rho \in {\muathscr P} (\Omega) \; : \; \rho \lambdaeq 1 \}$. Moreover, we believe that by the nature of our problem the machinery of the $L^1-$contraction through the doubling and re-doubling of the variables (successfully used for instance in \cite{Otto} and \cite{Carrillo}) seems to be very difficult, complicated and heavy to adapt to our situation, while the method proposed by us is simple and elegant. In addition, in this context we give new proofs for some of the used results from the theory of optimal transport.
\item[(ii)] On the other hand, in the second order case (when we add a non-degenerate diffusion term into the model) the idea to pass to the dual equation to show the uniqueness is very similar to the techniques developed already in \cite{Cro} for instance; the method to obtain the $L^1-$contraction follows the same lines of \cite{Bou}. However the PDE studied here seems to be new in this context and the result of uniqueness is per se interesting in the theory of crowd motion.
\varepsilonnd{itemize}
We underline moreover that the two methods used in the two models are mutually exclusive: the $W_2$ distance along two solutions of the second order model would be contractive if the vector field would be monotone, but we do not require this assumption; the parabolic estimates used for the second model highly rely on the fact that one has a non-degenerate diffusion in the system, which is clearly not the case for the first order model.
We remark also that we expect a $L^1-$contraction result also in the first case, since it can be seen as a doubly degenerate quasilinear elliptic-parabolic equation \cite{Otto}. In Section \ref{ss:l1} we provide a sketch of this result in the case in which the velocity field is monotone: we underline that we use the uniqueness proved in Section \ref{two}.
\sigmaubsection{The density constraint: admissible velocities and pressures}\lambdaabel{oneone}
In order to model crowd movement in the macroscopic setting with hard congestion, we work in a convex bounded domain $\Omega\sigmaubset{\muathbb R}^d$ such that $|\Omega|>1$. The evolution of the crowd will be analyzed by the evolution of its density, which is assumed to be a probability measure on $\Omega$. The condition we want to impose is a bound on the density of the crowd, which is considered to be always less than or equal to $1$. In particular the set of admissible measures will be denoted by
$${\muathcal K}_1:=\{\rho\in{\muathscr P}(\Omegaega):\rho\lambdae 1\ {\rm{a.e.}}\}$$
(here and after we identify $\rho$ with its density with respect to the Lebesgue measure $\muathscr{L}^d$).
As for the velocities, we need to impose that the density is not increasing when it is saturated: informally we would say that ${\tauextbf{v}}$ is an admissible velocity for the measure $\rho \in{\muathcal K}_1$ if $ \nuabla\cdot {\tauextbf{v}} \gammaeq 0$ in the set $\{ \rho=1\}$ and ${\tauextbf{v}} \cdot \hat{n} \lambdaeq 0$ on $\partialartial \Omegaega$ where $\hat{n}$ is the outward normal. In order to make a rigorous definition we have to introduce the set of pressures:
\betaegin{equation}\lambdaabel{def:h1r}
H^1_{\rho}(\Omegaega) := \{ p \in H^{1}(\Omegaega) \; : \; p \gammaeq 0 , \;\, p (1-\rho) = 0\;{\rm{a.e.}}\}.
\varepsilonnd{equation}
Then, using the integration by parts formula, formally for $p\in H^1_\rho(\Omegaega)$
and ${\tauextbf{v}}$ admissible one should have
$$0\lambdaeq \int_{\Omegaega} p \nuabla\cdot {\tauextbf{v}} \,{\rm d} x - \int_{\partialartial \Omegaega} p {\tauextbf{v}} \cdot \hat{n} \,{\rm d}{\muathscr H}^{d-1}= - \int_{\Omegaega} {\tauextbf{v}} \cdot \nuabla p \,{\rm d} x$$ and so we can define
$$ {\rm adm} (\rho) :=\lambdaeft\{ {\tauextbf{v}} \in L^2(\Omegaega; {\muathbb R}^d ) \; : \; \int_{\Omegaega} {\tauextbf{v}} \cdot \nuabla p \,{\rm d} x \lambdaeq 0 \, \tauext{ for every } p \in H^1_{\rho}(\Omegaega) \right\}. $$
In the sequel we will denote by $P_{{\rm adm}(\rho)}$ the $L^2(\Omegaega, \muathscr{L}^d; {\muathbb R}^d)-$projection onto the cone ${\rm adm}(\rho)$.
Now, in order to preserve the constraint $\rho \lambdaeq 1$, we impose that the velocity always belongs to ${\rm adm} (\rho )$ and so a generic evolution equation with density constraint will be
$$ \betaegin{cases} \partialartial_t \rho_t + \nuabla\cdot (\rho_t {\tauextbf{v}}_t) = 0 \\ \rho_t \lambdaeq 1, \, {\tauextbf{v}}_t \in {\rm adm} (\rho_t). \varepsilonnd{cases} $$
One of the simplest such model is when we have a prescribed time-dependent velocity field ${\tauextbf{u}}_t$ and we want to impose that the velocity ${\tauextbf{v}}_t$ is the nearest possible to ${\tauextbf{u}}_t$, time by time. This describes a situation where the crowd wants to have the velocity ${\tauextbf{u}}_t$ but it cannot, because of the density constraint, and so it adapts its velocity trying to deviate as little as possible: this will result in an highly nonlocal and discontinuous effect. The first order problem hence reads as
\betaegin{equation}\lambdaabel{fp0}
\betaegin{cases}
\partialartial_t\rho_t+\nuabla\cdot(\rho_t{\tauextbf{v}}_t)=0 \quad \tauext{ in }(0,T)\tauimes\Omegaega\\
\rho_t\lambdae1,\ \rho|_{t=0}=\rho_0\\
{\tauextbf{v}}_t=P_{{\rm adm}(\rho_t)}[{\tauextbf{u}}_t],
\varepsilonnd{cases}
\varepsilonnd{equation}
where the first equation is meant in the weak sense and the minimal hypothesis in order to have a well defined projection is ${\tauextbf{u}} \in L^2([0,T] \tauimes \Omega)$. In the following lemma we characterize the projection of the velocity field:
\betaegin{lemma}\lambdaabel{lem:projection}
Let $\rho \in {\muathscr P} (\Omegaega)$ such that $\rho \lambdaeq 1$ a.e. and let ${\tauextbf{u}} \in L^2(\Omegaega; {\muathbb R}^d)$. Then there exists $p \in H^1_\rho(\Omega)$ such that $ P_{{\rm adm} (\rho)} [ {\tauextbf{u}} ] = {\tauextbf{u}} - \nuabla p$. Furthermore $p$ is characterized by
\betaegin{itemize}
\item[(i)] $\deltaisplaystyle \int_{\Omegaega} \nuabla p \cdot ( {\tauextbf{u}} - \nuabla p ) \,{\rm d} x =0$;
\item[(ii)] $\deltaisplaystyle \int_{\Omegaega} \nuabla q \cdot ({\tauextbf{u}} - \nuabla p) \,{\rm d} x \lambdaeq 0$, for all $q \in H^1_\rho(\Omega)$.
\varepsilonnd{itemize}
\varepsilonnd{lemma}
\betaegin{proof} Let us set $K=\{ \nuabla p \; : \; p \in H^1_\rho(\Omega) \}$. It is easy to see that $K$ is a closed cone in $L^2(\Omega;{\muathbb R}^d)$.
Let us recall that the polar cone to $K$ is defined as
$$K^o:=\lambdaeft\{{\tauextbf{v}}\in L^2(\Omega;{\muathbb R}^d)\; : \; \int_\Omega {\tauextbf{v}}\cdot\nuabla q\,{\rm d} x\lambdae 0,\ \forall\ \nuabla q\in K \right\}.$$
By the definition of the admissible velocities we have ${\rm adm} (\rho ) = K^o$. Moreau decomposition applied to $K$ and $K^o$ gives
$$ {\tauextbf{u}} = P_K [{\tauextbf{u}}] + P_{K^o} [ {\tauextbf{u}} ] \qquad \forall {\tauextbf{u}} \in L^2(\Omegaega; {\muathbb R}^d).$$
This proves the claims (i) and (ii), since these are precisely the conditions of being the projection onto a cone.
\varepsilonnd{proof}
\betaegin{corollary}\lambdaabel{cor:proj}
Lemma \ref{lem:projection} ${\rm{(i)}}$ implies that $\deltaisplaystyle \int_{\Omega} | {\tauextbf{u}}|^2\,{\rm d} x = \int_{\Omega} | \nuabla p|^2\,{\rm d} x + \int_{\Omega} | {\tauextbf{u}} - \nuabla p|^2\,{\rm d} x$, and so in particular we get
$$\deltaisplaystyle\int_\Omega|\nuabla p|^2\,{\rm d} x\lambdae\int_\Omega|{\tauextbf{u}}|^2\,{\rm d} x, \qquad \quad \int_\Omega|P_{{\rm adm} (\rho)} [ {\tauextbf{u}} ]|^2\,{\rm d} x\lambdae\int_\Omega|{\tauextbf{u}}|^2\,{\rm d} x.$$
\varepsilonnd{corollary}
Using Lemma \ref{lem:projection} we get that if $(\rho, {\tauextbf{v}})$ is a solution to \varepsilonqref{fp1} then ${\tauextbf{v}}_t = {\tauextbf{u}}_t - \nuabla p_t$. Now, using that $\rho_t \nuabla p_t = \nuabla p_t$, we have that $(\rho, p)$ is a solution to
\betaegin{equation}\lambdaabel{fp1}
\betaegin{cases}
\partialartial_t\rho_t+\nuabla\cdot(\rho_t{\tauextbf{u}}_t)=\Delta p_t, \quad & \tauext{in }(0,T)\tauimes\Omegaega\\
\rho_t\lambdae1, \, p_t \in H^1_{\rho_t}(\Omegaega).
\varepsilonnd{cases}
\varepsilonnd{equation}
Thus we found again equation \varepsilonqref{main2}, where we imposed also the no-flux boundary condition, which is the one expected when we model a crowd in a closed room $\Omega$. Now we are ready to give the formal definition of solution to our problem:
\betaegin{definition}\lambdaabel{def:11} Let $ {\tauextbf{u}} \in L^2([0,T] \tauimes \Omega)$ and let $\rho_0 \in{\muathcal K}_1$. Then we define a solution to \varepsilonqref{fp1} to be a couple $(\rho, p ) \in L^{ \infty} ([0,T] \tauimes \Omega )\cap AC([0,T];({\muathscr P}(\Omega),W_2)) \tauimes L^2 ( [0,T]; H^1( \Omega )) $ with $\rho|_{t=0}=\rho_0$ and such that:
\betaegin{itemize}
\item for all $\partialhi \in C_c^{\infty} ( {\muathbb R}^d)$ and for all $0\lambdaeq r<s \lambdaeq T$ we have
$$ \int_r^s \int_{\Omega} ({\tauextbf{u}}_t (x)- \nuabla p (x)) \cdot \nuabla \partialhi (x) \rho_t(x) \, dx \, dt = \int_{\Omega} \partialhi(x) \rho_s (x) \, dx- \int_{\Omega} \partialhi(x) \rho_r (x) \, dx; $$
\item we have $0 \lambdaeq \rho \lambdaeq 1$ for $ \muathscr{L}^1\mures{[0,T]} \omegatimes \muathscr{L}^d\mures{\Omega} $-a.e. $(t,x)$ and $p_t \in H^1_{\rho_t} ( \Omegaega)$ for $ \muathscr{L}^1\mures{[0,T]}$-a.e. $t$.
\varepsilonnd{itemize}
\varepsilonnd{definition}
\betaegin{remark}
We remark that the boundary condition implied by our definition is homogeneous Neumann. Note that we assume for the density $\rho$ to be also an absolutely continuous curve in the space of probability measures equipped with the Wasserstein distance\footnote{see the subsection on optimal transport below and \cite{AmbGigSav} for further details.}. Like this it is meaningful to assume $\rho|_{t=0}=\rho_0.$
\varepsilonnd{remark}
In \cite{MauRouSanVen,MauRouSan1} and \cite{aude:phd} the following regularity hypotheses have been assumed to show the existence result: ${\tauextbf{u}}\in C^1$ or ${\tauextbf{u}}=-\nuabla D$ for a $\lambda-$convex potential $D$ and in both cases no dependence on time.
\sigmaubsection{The diffusive case}\lambdaabel{diffusive}
Recently (see \cite{MesSan}) a second order model for crowd motion has been proposed. In a nutshell, it consists of adding a non-degenerate diffusion to the movement and imposing the density constraint. This leads to a modified Fokker-Planck equation and, with the notations previously introduced, it reads as
\betaegin{equation}\lambdaabel{fokker1}
\betaegin{cases}
\partialartial_t\rho_t-\Delta\rho_t+\nuabla\cdot(\rho_t{\tauextbf{v}}_t)=0\\
\rho_t\lambdae1,\ \rho|_{t=0}=\rho_0\\
{\tauextbf{v}}_t=P_{{\rm adm}(\rho_t)}[{\tauextbf{u}}_t],
\varepsilonnd{cases}
\varepsilonnd{equation}
where ${\tauextbf{u}}_t$ is -- as before -- the desired given velocity field of the crowd. Introducing the pressure gradient in the characterization of the projection, one can write system \varepsilonqref{fokker1} as
\betaegin{equation}\lambdaabel{fokker2}
\betaegin{cases}
\partialartial_t\rho_t+\nuabla\cdot(\rho_t {\tauextbf{u}}_t ) =\Delta (p_t +\rho_t)\\
\rho_t\lambdae1,\ \rho|_{t=0}=\rho_0, \, p_t \in H^1_{\rho_t}(\Omegaega)
\varepsilonnd{cases}
\varepsilonnd{equation}
where, as before we equip the equation with the natural no-flux boundary condition on $\partialartial\Omega$; the rigorous definition of solution is similar to that of Definition \ref{def:11}. We notice also the fact that we have in particular $p_t+ \rho_t \in {{\tauextbf{w}}idetilde \beta}a_2(\rho_t)$ and so we are in fact solving equation \varepsilonqref{eqn:main} with ${{\tauextbf{w}}idetilde \beta}a={{\tauextbf{w}}idetilde \beta}a_2$.
Under the assumption that $\Omega$ is convex and ${\tauextbf{u}}\in L^\infty([0,T]\tauimes\Omega)$ it has been shown (see \cite[Theorem 3.1]{MesSan}) that the system \varepsilonqref{fokker2} admits a solution $(\rho,p)\in L^\infty([0,T]\tauimes\Omega)\tauimes L^2([0,T];H^1(\Omega)).$ In addition $[0,T]\nui t\muapsto\rho_t$ is an absolutely continuous curve in the 2-Wasserstein space (see Subsection \ref{OptT}). We direct the reader to \cite{MesSan} for further details on this model.
\sigmaubsection{Optimal trasport}\lambdaabel{OptT}
Here we collect some facts about optimal transport that will be needed in the sequel. Given $X_1$ and $X_2$ two measurable spaces and $T$ a measurable map between them, we say that the Borel measure $\muu_2$ is the {\it push forward} of the Borel measure $\muu_1$ through $T$ and we write $\muu_2 = T_{\sigmaharp} \muu_1$, if $\muu_2(A) = \muu_1(T^{-1}(A))$ for every measurable set $A \sigmaubseteq X_2$.
Given two measures $\muu \in {\muathscr P}(X_1)$ and $\nuu \in{\muathscr P}(X_2)$, we define $\Pi(\muu,\nuu)$ as the set of $\gammaamma \in {\muathscr P}(X_1 \tauimes X_2)$ such that $(\partiali_1)_{\sigmaharp} \gammaamma = \muu$ and $(\partiali_2)_{\sigmaharp} \gammaamma = \nuu$, where $\partiali_i$ is the projection to the $i$-th coordinate: these measures are called {\it transport plans} between $\muu$ and $\nuu$.
A particular example of transport plans is given by the transport maps: whenever we have a measurable $T: X_1 \tauo X_2$ such that $T_{\sigmaharp} \muu = \nuu$ we have that the induced plan $\gammaamma_T=({\rm id},T)_{\sigmaharp} \muu$ belongs to $\Pi(\muu, \nuu)$.
Let us summarize some well-known facts about optimal transport in the following theorem (see for instance \cite{villani} or \cite{OTAM}).
\betaegin{theorem}\lambdaabel{OT}
Let $\Omegaega \sigmaubset {\muathbb R}^d$ be an open bounded set and let $\muu , \nuu \in {\muathscr P}(\Omegaega)$. Let us consider the following quantities
\betaegin{equation}
A(\muu, \nuu) = \inf \lambdaeft\{ \int_{\Omegaega \tauimes \Omegaega} |x - y|^2 \,{\rm d} \gammaamma \; : \; \gammaamma \in \Pi ( \muu, \nuu ) \right \}
\tauag{P}\lambdaabel{P}
\varepsilonnd{equation}
\betaegin{equation}
B(\muu,\nuu)=\sigmaup \lambdaeft\{ \int_{\Omegaega} {\tauextbf{v}}phi (x) \,{\rm d} \muu + \int_{\Omegaega} \partialsi (x) \,{\rm d} \nuu \; : \;{\tauextbf{v}}phi,\partialsi\in C_b(\Omega), {\tauextbf{v}}phi (x) + \partialsi (y) \lambdaeq \frac 12 | x-y|^2\; \; \forall \,x,y \in \Omegaega \right\},\tauag{D}\lambdaabel{D}
\varepsilonnd{equation}
where $C_b(\Omega)$ denotes the space of bounded continuous functions on $\Omega$.
We will call \varepsilonqref{P} the primal and \varepsilonqref{D} the dual problem.
\betaegin{itemize}
\item[(i)] There exists at least a minimizer for the primal problem (the set of minimizers is denoted by $\Pi_o(\muu,\nuu)$) and there exists also a maximizer $({\tauextbf{v}}phi,\partialsi)$ in the dual problem.
\item[(ii)] $A(\muu,\nuu) = 2B(\muu, \nuu)$ and we will call $W^2_2(\muu,\nuu)$ the common value.
\item[(iii)] We can choose a maximizer $({\tauextbf{v}}phi,\partialsi)$ of \varepsilonqref{D} such that ${\tauextbf{v}}phi$ and $\partialsi$ are Lipschitz in $\Omegaega$ and also such that $\frac 12 |x|^2 -{\tauextbf{v}}phi(x)$ is a convex function on the convex hull of $\Omegaega$. If $\muu\lambdal \muathscr{L}^d$ then its gradient is a map $T(x)=x- \nuabla {\tauextbf{v}}phi(x)$ such that $T_{\sigmaharp} \muu = \nuu$ and whose associated plan is the unique optimal plan, that is $\Pi_o(\muu,\nuu)=\{ \gammaamma_T \}$.
\item[(iv)] If $\Omegaega$ is convex, ${\muathscr P}(\Omegaega)$ endowed with the Wasserstein distance $W_2$ is a geodesic space and if $\muu\lambdal \muathscr{L}^d$ the geodesic between $\muu$ and $\nuu$ is unique and it is described by $$[0,1]\nui t\muapsto\muu_t:= ({\rm id} + t (T- {\rm id}) ) _{\sigmaharp} \muu.$$
\varepsilonnd{itemize}
\varepsilonnd{theorem}
The space of probability measures equipped with the {\it Wasserstein distance} $W_2$ is called the {\it Wasserstein space}. We denote it by ${\muathbb W}_2:=({\muathscr P}(\Omega),W_2)$.
{\tauextbf{v}}space{1cm}
Structurally, the following two sections contain our main results. Section \ref{two} is devoted to the uniqueness question for first order models where the main tool is the theory of optimal transport. In Section \ref{three} we investigate the uniqueness issue for second order models, using PDE techniques. Finally, in Section \ref{ss:l1} we discuss an approach that could lead to an $L^1-$contraction result in the first order case as well.
\sigmaection{Monotone vector fields in the first order case}\lambdaabel{two}
Let $\Omega\sigmaubset{\muathbb R}^d$ be a bounded convex domain. In this section we suppose that the desired velocity field ${\tauextbf{u}}:[0,T]\tauimes\Omega\tauo\muathbb{R}^d$ of the crowd is a monotone vector field in $L^2([0,T]\tauimes\Omega;{\muathbb R}^d)$, i.e. the following assumption is fulfilled: there exists $\lambdaambda\in{\muathbb R}$ such that for all $t\in[0,T]$ there exists a Borel measurable set $A_t\sigmaubseteq\Omega$ (possibly depending on $t$) with full measure, i.e. $\muathscr{L}^d(\Omega\sigmaetminus A_t)=0$ and
\betaegin{equation}
\lambdaeft({\tauextbf{u}}_t(x)-{\tauextbf{u}}_t(y)\right)\cdot(x-y)\lambdae\lambda |x-y|^2,\;\;\forall\; x,y\in A_t.
\tauag{H1}\lambdaabel{H1}
\varepsilonnd{equation}
The following contractivity results are not very surprising in the Wasserstein context. In practice we show that (Lemma \ref{positivity})
$$ \{ - \nuabla p \; : \; p \in H^1_{\rho}(\Omegaega) \} \sigmaubseteq \partialartial_{\muathbb{W}_2} \muathscr{I}_1 ( \rho ), $$
where $\muathscr{I}_1$ the indicatrix function of ${\muathcal K}_1$ and $\partialartial_{\muathbb{W}_2}$ denotes the Wasserstein subdifferential (see \cite{AmbGigSav}): then we exploit the geodesic convexity of $\muathscr{I}_1$ in order to get the contraction properties. However, in order to let the reader understand clearly the proofs we will omit the $\muathbb{W}_2$ technical language. It would be interesting to adapt these tools to more general Hele-Shaw problems.
Although a first written version of these results is essentially contained in \cite{Mes} (Section 4.3.1), here we simplified and clarified some of the proofs. A key observation is the following lemma (see also Lemma 4.3.13 in \cite{Mes}):
\betaegin{lemma}\lambdaabel{positivity}
Let $\Omegaega$ be a convex bounded domain of $\muathbb{R}^d$ and let
$\rho_0,\rho_1\in{\muathscr P}(\Omegaega)$ two absolutely
continuous measures such that $\rho_0\lambdae1$ and $\rho_1\lambdae1$ a.e.
Take a Kantorovich potential ${\tauextbf{v}}phi$ from $\rho_0$ to $\rho_1$
and $p\in H^1_{\rho_0}(\Omegaega).$ Then
$$\int_{\Omegaega}\nuabla{\tauextbf{v}}phi\cdot\nuabla p\,{\rm d} x=\int_{\Omegaega}\nuabla{\tauextbf{v}}phi\cdot\nuabla p\,{\rm d}\rho_0\gammae0.$$
\varepsilonnd{lemma}
To prove this result we consider the following extra lemma:
\betaegin{lemma}\lambdaabel{derivative}
Let $\Omegaega$ be a convex bounded domain of $\muathbb{R}^d$ and let
$\rho_0,\rho_1\in{\muathscr P}(\Omegaega)$ two absolutely
continuous measures such that $\rho_0\lambdae 1$ and $\rho_1\lambdae 1$ a.e.
Take a Kantorovich potential ${\tauextbf{v}}phi$ from $\rho_0$ to $\rho_1$
and $p\in H^1(\Omegaega)$. Let $[0,1]\nui t\muapsto\rho_t$ be the geodesic connecting $\rho_0$ to $\rho_1$, with respect to the $2$-Wasserstein distance $W_2$. Then we have that
$$ \frac { \,{\rm d}}{\,{\rm d} t}\Bigg|_{\{t=0\}} \int_{\Omegaega} p \,{\rm d} \rho_t = - \int_{\Omegaega}\nuabla{\tauextbf{v}}phi\cdot\nuabla p \,{\rm d} \rho_0. $$
\varepsilonnd{lemma}
\betaegin{proof} We know (using the interpolation introduced by R. McCann, see \cite{mccann} or Theorem \ref{OT} (iv)) that $\rho_t = (x-t \nuabla {\tauextbf{v}}phi (x) ) _{\sigmaharp} \rho_0$ for all $t\in[0,1]$ and so we have
\betaegin{equation*}
\betaegin{split}
\frac { \,{\rm d}} { \,{\rm d} t}\Bigg|_{\{t=0\}} \int_{\Omegaega} p \,{\rm d} \rho_t & = \lambdaim_{ t \tauo 0} \int_{\Omegaega} \frac{ p(x-t \nuabla {\tauextbf{v}}phi (x) ) - p(x) }{t} \,{\rm d} \rho_0(x) \\
& = - \lambdaim_{t \tauo 0 } \int_{\Omegaega} \frac 1t \int_0^t \nuabla p ( x- s \nuabla {\tauextbf{v}}phi (x) ) \cdot \nuabla {\tauextbf{v}}phi (x) \,{\rm d} s \,{\rm d} \rho_0(x) \\
& = - \lambdaim_{t \tauo 0 } \int_{\Omegaega} A_t( \nuabla p ) \cdot \nuabla {\tauextbf{v}}phi \,{\rm d} \rho_0(x),
\varepsilonnd{split}
\varepsilonnd{equation*}
where the second equality is easy to prove, for fixed $t$, by approximation via smooth functions and, for $t\in[0,1]$, we denoted by $A_t : L^2(\Omegaega;{\muathbb R}^d) \tauo L^2_{\rho_0}(\Omegaega;{\muathbb R}^d)$ the linear operator
$$ A_t (h)(x) = \frac 1t \int_0^t h(x- s \nuabla {\tauextbf{v}}phi (x) ) \,{\rm d} s. $$
Now as a general fact we will prove that $A_t(h) \tauo h $ strongly in $L^2_{\rho_0}(\Omegaega;{\muathbb R}^d)$ as $t\tauo 0,$ for every $h \in L^2(\Omegaega;{\muathbb R}^d)$. First of all it is easy to see that $ \| A_t \| \lambdaeq 1$: indeed
\betaegin{equation*}
\betaegin{split}
\int_{\Omegaega} |A_t (h)|^2 \,{\rm d} \rho_0 & \lambdaeq \frac 1t \int_{\Omegaega} \int_0^t |h(x- s \nuabla {\tauextbf{v}}phi (x) )|^2 \,{\rm d} s \,{\rm d} \rho_0(x) \\
& = \frac 1t \int_0^t \int_{\Omegaega} |h|^2 \,{\rm d} \rho_s(x)\,{\rm d} s \lambdaeq \int_{\Omegaega} |h|^2 \,{\rm d} x.
\varepsilonnd{split}
\varepsilonnd{equation*}
Here we used the fact that since $\rho_0,\rho_1 \lambdaeq 1$ a.e we have also $\rho_t \lambdaeq 1$ a.e. for all $t\in[0,1]$. Now it is sufficient to note that for every $\varepsilon >0 $ there exists a Lipschitz function $h_{\varepsilon}$ such that $\|h_{\varepsilon} - h \|_{L^2} \lambdaeq \varepsilon$, and so we have
$$ \| A_t (h) - h \|_{L^2_{\rho_0}} \lambdaeq \| A_t ( h- h_{\varepsilon}) \|_{L^2_{\rho_0}} + \| h-h_{\varepsilon}\|_{L^2_{\rho_0}} + \|A_t (h_{\varepsilon}) - h_{\varepsilon} \|_{L^2_{\rho_0}} \lambdaeq 2 \varepsilon + t L \| \nuabla {\tauextbf{v}}phi \|_{L^2_{\rho_0}}, $$
where $L$ is the Lipschitz constant of $h_{\varepsilon}$. Taking now the limit as $t$ goes to $0$ we obtain $\lambdaimsup_{t \tauo 0} \| A_t( h) - h \|_{L^2_{\rho_0}} \lambdaeq 2\varepsilon$; by the arbitrariness of $\varepsilon>0$ we conclude.
Now it is easy to finish the proof, since $\nuabla p \in L^2(\Omegaega)$ and so
$$\frac { \,{\rm d}} { \,{\rm d} t}\Bigg|_{\{t=0\}} \int_{\Omegaega} p \,{\rm d} \rho_t =-\lambdaim_{t \tauo 0} \int_{\Omegaega} A_t(\nuabla p) \cdot \nuabla {\tauextbf{v}}phi \,{\rm d} \rho_0=-\int_{\Omegaega} \nuabla p \cdot \nuabla {\tauextbf{v}}phi \,{\rm d} \rho_0.$$
\varepsilonnd{proof}
\betaegin{proof}[Proof of Lemma \ref{positivity}] Let $[0,1]\nui t\muapsto\rho_t$ be the Wasserstein geodesic between $\rho_0$ and $\rho_1$. We know that $\rho_t \lambdaeq 1$ a.e. for all $t\in[0,1]$ and in particular it is true that
$$\int_{\Omegaega} p \,{\rm d} \rho_t \lambdaeq \int_{\Omegaega} p\,{\rm d} x = \int_{\Omegaega} p \,{\rm d} \rho_0,$$
which means that the function $\deltaisplaystyle [0,1]\nui t\muapsto\int_\Omega p \,{\rm d} \rho_t$ has a local maximum in $t=0,$ hence its derivative at $0$ is non-positive.
Given this, the claim is a consequence of Lemma \ref{derivative}.
\varepsilonnd{proof}
We will also need a regularity lemma on the continuity equation: by definition a curve $\rho_t$ satisfies a continuity equation with velocity ${\tauextbf{v}}_t$ if for every $\partialhi \in C^1_0({\muathbb R}^d)$\footnote{we recall that for us $C_0^1({\muathbb R}^d)$ is the closure of $C_c^{\infty}({\muathbb R}^d)$ with respect to the norm $\| \partialhi \|_1 = \| \partialhi \|_{\infty} + \| \nuabla \partialhi \|_{\infty}$ or, equivalently, the set of $\partialhi \in C^1({\muathbb R}^d)$ such that $\lambdaim_{ |x| \tauo \infty} | \partialhi (x) | + | \nuabla \partialhi (x)| \tauo 0$} the application $\deltaisplaystyle [0,T]\nui t \muapsto \int_{{\muathbb R}^d} \partialhi \,{\rm d} \rho_t$ is absolutely continuous and its derivative is $\deltaisplaystyle\int_{{\muathbb R}^d} {\tauextbf{v}}_t \cdot \nuabla \partialhi \,{\rm d} \rho_t$ for almost every time $t$. We will prove that under some integrability assumption there exists a universal full-measure set of differentiability, even when $\partialhi \in H^1({\muathbb R}^d)$.
\betaegin{lemma}\lambdaabel{lem:diffB} Let $\rho_t$ be a weakly continuous curve of probability measures on ${\muathbb R}^d$; let us suppose that the continuity equation $\partialartial_t \rho_t + \nuabla \cdot ( \rho_t {\tauextbf{v}}_t)=0$ holds with a velocity field ${\tauextbf{v}}_t$ such that $\deltaisplaystyle\int_0^T\int_{{\muathbb R}^d} | {\tauextbf{v}}_t| \,{\rm d} \rho_t \,{\rm d} t < +\infty$. Then there exists a set $\muathbb{T}au \sigmaubset [0,T]$ such that $ \muathscr{L}^1 ( [0,T] \sigmaetminus \muathbb{T}au ) =0$ and
\betaegin{equation}\lambdaabel{eq:deriv} \lambdaim_{ h \tauo 0} \frac 1h \lambdaeft( \int_{{\muathbb R}^d} \partialhi \,{\rm d} \rho_{t+h} - \int_{{\muathbb R}^d} \partialhi \,{\rm d} \rho_t \right) = \int_{{\muathbb R}^d} \nuabla \partialhi \cdot {\tauextbf{v}}_t \,{\rm d} \rho_t \qquad \forall t \in \muathbb{T}au
\varepsilonnd{equation}
for every $\partialhi \in C_0^1({\muathbb R}^d)$. Moreover if we further have that $\rho_t \lambdaeq 1$ a.e. and $\deltaisplaystyle\int_0^T\int_{{\muathbb R}^d} | {\tauextbf{v}}_t|^2 \,{\rm d} \rho_t \,{\rm d} t < +\infty$ then we can require that \varepsilonqref{eq:deriv} also holds for every $\partialhi \in H^1({\muathbb R}^d)$.
\varepsilonnd{lemma}
\betaegin{proof} Let us prove the following general statement: for a given separable Banach space $B$ and a curve $x^* \in L^1([0,T];B^*)$, there exists $\muathbb{T}au \sigmaubset [0,T]$ such that $\muathbb{T}au$ is a set of Lebesgue points for the map $t \muapsto x^*_t(b)$ for every $b \in B$, and moreover $\muathscr{L}^1 ( [0,T] \sigmaetminus \muathbb{T}au )=0$. This can be proven easily by choosing a dense subset $(b_n) \sigmaubset B$ and then taking $\muathbb{T}au_n$ as the set of Lebesgue points of $t \muapsto x^*_t(b_n)$ and $\muathbb{T}au_0$ as the Lebesgue points of $t \muapsto \| x^*_t \|$, and then we take $ \muathbb{T}au = \betaigcap_{ n \gammaeq 0} \muathbb{T}au_n$. For every $b \in B$ and $\varepsilon>0$ let us consider $i\in\muathbb{N}$ such that $\|b_i - b\| \lambdaeq \varepsilon$ and then taking $t \in \muathbb{T}au$ we have
$$ \frac 1{2\deltaelta} \int_{t-\deltaelta}^{t+\deltaelta}| x^*_s(b) - x^*_t(b)| \,{\rm d} s \lambdaeq \varepsilon \Bigl( \|x^*_t\| + \frac 1{2\deltaelta}\int_{t-\deltaelta}^{t+\deltaelta} \| x^*_s\| \,{\rm d} s \Bigr) + \frac 1{2\deltaelta} \int_{t-\deltaelta}^{t+\deltaelta} |x^*_s(b_i) - x^*_t(b_i) | \,{\rm d} s. $$
Now, taking the limit as $\deltaelta \tauo 0$ and using the properties of $\muathbb{T}au$ we get
$$ \lambdaimsup_{\deltaelta \tauo 0} \frac 1{2\deltaelta} \int_{t-\deltaelta}^{t+\deltaelta} |x^*_s(b) - x^*_t(b)| \,{\rm d} s \lambdaeq 2 \varepsilon \| x^*_t\|$$
and by the arbitrariness of $\varepsilon$ we conclude that $t$ is a Lebesgue point for $x^*_s(b)$. Now it is easy to conclude thanks to the fact that we know that
$$ \int_{{\muathbb R}^d} \partialhi \,{\rm d} \rho_{t+h} - \int_{{\muathbb R}^d} \partialhi \,{\rm d} \rho_t = \int_t^{t+h} \int_{{\muathbb R}^d} \nuabla \partialhi \cdot {\tauextbf{v}}_s \,{\rm d} \rho_s, $$
and noticing that $\deltaisplaystyle x^*_s : \partialhi \muapsto \int_{{\muathbb R}^d} \nuabla \partialhi \cdot {\tauextbf{v}}_s \,{\rm d} \rho_s$ satisfies the assumption $x^* \in L^1([0,T];B^*)$, when $B$ is the Banach space $C^1_0({\muathbb R}^d)$ in the first case and $H^1({\muathbb R}^d)$ in the second case. Notice that if we follow the construction of $\muathbb{T}au$ for the case $B=C^1_0({\muathbb R}^d)$, this set of times will work also for $H^1({\muathbb R}^d)$ since $C^1_0$ is dense in $H^1$.
\varepsilonnd{proof}
Now we are in position to prove the main theorem of this section, namely:
\betaegin{theorem}\lambdaabel{thm:unique_first}
Suppose $\Omegaega\sigmaubset\muathbb{R}^d$ is a bounded convex domain,
${\tauextbf{u}}$ is a vector field satisfying Assumption \varepsilonqref{H1} and let $\rho_0\in{\muathcal K}_1$ be an admissible
initial density. Let us suppose that there exist $(\rho^1,p^1),(\rho^2,p^2)$ two solutions to the system
\betaegin{equation}\lambdaabel{fp2}
\lambdaeft\{\betaegin{array}{ll}
\partialartial_t\rho_t+\nuabla\cdot(\rho_t({\tauextbf{u}}_t-\nuabla p_t))=0 & {\rm{in\ }}(0,T)\tauimes\Omega\\[7pt]
\rho_t \lambdae 1,\; p_t\gammae0,\; (1-\rho_t)p_t=0 &{\rm{a.e.\ in\ }}[0,T]\tauimes\Omega, \\[7pt]
\rho|_{t=0}=\rho_0,
\varepsilonnd{array} \right.
\varepsilonnd{equation}
$p^i\in L^2( [0,T]; H^1(\Omegaega))$ and $\rho_i \in {\muathscr P} (\Omega)$ for $i\in\{1,2\}$, where the first equation is supposed to be satisfied in duality with $C^{\infty}_c({\muathbb R}^d)$ (see Definition \ref{def:11}) in order to take into account the boundary conditions. Then, $\rho^1=\rho^2$ and $p^1=p^2$ a.e. In particular, under the same assumptions, we can say that there exists a unique pair $(\rho,{\tauextbf{v}})$ that solves \varepsilonqref{fp0}.
\varepsilonnd{theorem}
\betaegin{proof}
We associate to the two curves $\rho^1_t$ and $\rho^2_t$ a continuity equation \varepsilonqref{fp1} with the corresponding vector fields ${\tauextbf{v}}^1_t={\tauextbf{u}}^1_t- \nuabla p^1_t$ and ${\tauextbf{v}}^2_t={\tauextbf{u}}^2_t- \nuabla p^2_t $. Let us compute and estimate
$\deltaisplaystyle\frac{\,{\rm d}}{\,{\rm d} t}\frac{1}{2}W_2^2(\rho^1_t,\rho^2_t)$; we refer also to \cite[Theorem 8.4.7]{AmbGigSav} for a more general statement, but we prefer to include a simpler proof in this case. We know that $t \muapsto W_2^2(\rho^1_t, \rho^2_t)$ is absolutely continuous: let us consider a time $t$ for which its derivative exists and also such that $t \in \muathbb{T}au^1 \cap \muathbb{T}au^2$, where $\muathbb{T}au^i$ is the set for which \varepsilonqref{eq:deriv} is satisfied for the continuity equation for $\rho^i$. Then we know that, for all $s \in [0,T]$ we have
\betaegin{equation}\lambdaabel{eq:Kdua} \frac12 W_2^2 (\rho^1_s,\rho^2_s) \gammaeq \int_{\Omega} {\tauextbf{v}}phi_t \,{\rm d} \rho^1_s + \int_{\Omega} \partialsi_t \,{\rm d} \rho^2_s,
\varepsilonnd{equation}
where $({\tauextbf{v}}phi_t,\partialsi_t)$ is a pair of Kantorovich potentials for $\rho^1_t$ and $\rho^2_t$. In particular we have equality in \varepsilonqref{eq:Kdua} for $t=s$ and so, since both sides are differentiable by hypothesis, their derivatives are equal. Hence we get
$$\frac{\,{\rm d}}{\,{\rm d} t}\frac{1}{2}W_2^2(\rho^1_t,\rho^2_t) = \int_{\Omegaega}{\tauextbf{v}}^1_t \cdot \nuabla {\tauextbf{v}}phi_t \,{\rm d}\rho^1_t + \int_{\Omegaega}{\tauextbf{v}}^2_t \cdot \nuabla \partialsi_t \,{\rm d}\rho^2_t,$$
where we used the fact that, since $\Omegaega$ is bounded, we can assume ${\tauextbf{v}}phi_t$ and $\partialsi_t$ to be Lipschitz and in particular they belong to $H^1$, which allows to use Lemma \ref{lem:diffB}.
We also know that thanks to Theorem \ref{OT} there is a pair of optimal
transport maps $T_t(x)=x-\nuabla {\tauextbf{v}}phi_t(x)$ and $S_t(y)=y - \nuabla \partialsi_t(y)$ such that $(T_t)_\sigmaharp\rho^1_t=\rho^2_t$ and $(S_t)_\sigmaharp \rho^2_t = \rho^1_t$ for all $t\in[0,T]$; moreover $T_t$ is the inverse of $S_t$ (in the appropriate almost everywhere sense). Using this, we have $\nuabla \partialsi_t(T_t(x)) = -\nuabla {\tauextbf{v}}phi_t(x)$ and in particular using the change of variable formula $y=T_t(x)$ we get
$$ \int_{\Omegaega} \nuabla \partialsi_t (y) \cdot {\tauextbf{u}}_t (y) \,{\rm d} \rho_t^2(y) = - \int_{\Omegaega} \nuabla {\tauextbf{v}}phi_t(x) \cdot {\tauextbf{u}}_t ( T_t(x)) \,{\rm d} \rho_t^1(x).$$
We can use this to split the formula for the derivative of $W^2_2(\rho^1_t,\rho^2_t)/2$. We use the result of Lemma \ref{positivity} and then rewrite the term regarding ${\tauextbf{u}}_t$ in terms of transport maps and see:
\betaegin{align*}
\frac{\,{\rm d}}{\,{\rm d} t}\frac{1}{2}W_2^2(\rho^1_t,\rho^2_t)&=\int_{\Omegaega}{\tauextbf{v}}^1_t \cdot \nuabla {\tauextbf{v}}phi_t \,{\rm d}\rho^1_t + \int_{\Omegaega}{\tauextbf{v}}^2_t \cdot \nuabla \partialsi_t \,{\rm d}\rho^2_t \\
&= \int_{\Omegaega}{\tauextbf{u}}_t \cdot \nuabla {\tauextbf{v}}phi_t \,{\rm d}\rho^1_t + \int_{\Omegaega}{\tauextbf{u}}_t \cdot \nuabla \partialsi_t \,{\rm d}\rho^2_t - \int_{\Omegaega}\nuabla p^1_t \cdot \nuabla {\tauextbf{v}}phi_t \,{\rm d}\rho^1_t - \int_{\Omegaega}\nuabla p^2_t \cdot \nuabla \partialsi_t \,{\rm d}\rho^2_t \\
& \lambdaeq \int_{\Omegaega} \nuabla {\tauextbf{v}}phi_t(x) \cdot \betaigl[{\tauextbf{u}}_t (x) - {\tauextbf{u}}_t(T_t(x))\betaigr] \,{\rm d} \rho^1_t \\
& \lambdaeq \int_{\Omegaega} (x-T_t(x)) \cdot \betaigl[{\tauextbf{u}}_t (x) - {\tauextbf{u}}_t(T_t(x))\betaigr] \,{\rm d} \rho^1_t \\
& \lambdaeq \lambdaambda \int_{\Omegaega} | x- T_t(x)|^2 \,{\rm d} \rho^1_t \lambdaeq \lambdaambda W_2^2(\rho^1_t, \rho^2_t).
\varepsilonnd{align*}
Gr\"onwall's lemma implies that
$$W_2^2(\rho^1_t,\rho^2_t)\lambdae e^{2\lambdaambda t}W_2^2(\rho^1_0,\rho^2_0).$$
Since $\rho_0^1=\rho_0^2=\rho_0$ a.e., the above property implies that $\rho^1=\rho^2$ a.e. in $[0,T]\tauimes\Omega.$ From this fact we can easily deduce that $\Delta(p_t^1-p_t^2)=0,$ for a.e. $t\in[0,T]$ in the sense of distributions. In particular $p_t^1-p_t^2$ is analytic in the interior of $\Omega.$ Moreover, both $p^1_t$ and $p^2_t$ vanish a.e. in the set $\{\rho_t^1<1\}$ which has a positive Lebesgue measure greater than $|\Omega|-1>0.$ Thus, $p^1=p^2$ a.e. in $[0,T]\tauimes\Omega.$ The claim follows.
\varepsilonnd{proof}
\betaegin{remark}
The existence result for system \varepsilonqref{fp2} was obtained in different settings in the literature. On the one hand, if ${\tauextbf{u}}=-\nuabla D$ (for a reasonably regular potential $D$), the existence and uniqueness of a pair $(\rho,p)$ can be obtained by gradient flow techniques in ${\muathbb W}_2(\Omega)$ (see \cite{AmbGigSav, MauRouSan1, aude:phd}), under the assumption that ${\tauextbf{u}}$ is monotone that translates into a $\lambdaambda$-convexity for $D$. On the other hand, if ${\tauextbf{u}}$ is a general field with $C^1$ regularity, the existence result is proven with the help of a well-chosen splitting algorithm (see \cite{MauRouSanVen, aude:phd}).
Nevertheless, combining the techniques developed in \cite{MesSan} on the one hand, and the well-known DiPerna-Lions-Ambrosio theory on the other hand, we expect to obtain existence result for \varepsilonqref{fp2} for more general vector fields with merely Sobolev regularity and suitable divergence bounds.
\varepsilonnd{remark}
\betaegin{remark}
The monotonicity assumption \varepsilonqref{H1} is not surprising in this setting. We remark that the same assumption was required in \cite{NatPelSav} to prove contraction properties for a general class of transport costs along the solution of the Fokker-Planck equation in ${\muathbb R}^d$
$$\partialartial_t\rho-\Delta\rho+\nuabla\cdot(B\rho)=0,\;\;\;\rho|_{t=0}=\rho_0,$$
where the velocity field $B:{\muathbb R}^d\tauo{\muathbb R}^d$ was supposed to satisfy the monotonicity property \varepsilonqref{H1}.
\varepsilonnd{remark}
We note also the fact that we can allow for moving domains $\Omegaega_t$ (considering always a no-flux boundary condition): in fact our proof never uses that the domains are fixed but uses only convexity at any fixed time. This generalization has been used in \cite{DiMoSa} for proving uniqueness for an evolution equation with density constraint driven only by the boundary of the moving sets.
\sigmaection{Bounded vector field in the diffusive case}\lambdaabel{three}
We use Hilbert space techniques (similarly to the one developed in \cite{Cro, Bou, PetQuiVaz}; see also Section 3.1. from \cite{Por}) to study the uniqueness of a solution of the diffusive crowd motion model with density constraints described in Subsection \ref{diffusive} (see also \cite{MesSan}). Moreover we can expect that this holds under more general assumptions in the presence of a non-degenerate diffusion in the model.
Let ${\tauextbf{u}}:[0,T]\tauimes\Omega\tauo{\muathbb R}^d$ be a given vector field, which represents again the desired velocity field of the crowd, $\Omega\sigmaubset{\muathbb R}^d$ a bounded open set with $C^1$ boundary, $\rho_0\in{\muathscr P}(\Omega)$ the initial density of the population such that $0\lambdae\rho_0\lambdae 1$ a.e. in $\Omega$ and let us consider the following problem
\betaegin{equation}\lambdaabel{2nd_1}
\lambdaeft\{
\betaegin{array}{rl}
\partialartial_t\rho_t-\Delta\rho_t+\nuabla\cdot(P_{{\rm adm}(\rho_t)}[{\tauextbf{u}}_t]\rho_t)=0, & {\rm{in}}\ (0,T)\tauimes\Omega;\\[10pt]
\rho|_{t=0}=\rho_0,\;\;\; 0\lambdae \rho_t\lambdae 1, & {\rm{a.e.}}\ {\rm{in}}\ \Omega,
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
equipped with the natural no flux boundary condition.
Introducing the pressure variable, equivalently the above system can be written as
\betaegin{equation}\lambdaabel{2nd_2}
\lambdaeft\{
\betaegin{array}{rl}
\partialartial_t\rho_t-\Delta\rho_t-\Delta p_t+\nuabla\cdot({\tauextbf{u}}_t\rho_t)=0, & {\rm{in}}\ (0,T)\tauimes\Omega,\\[10pt]
\rho|_{t=0}=\rho_0, & {\rm{in}}\ \Omega\\[10pt]
(\nuabla\rho_t+\nuabla p_t -{\tauextbf{u}}_t\rho_t)\cdot\nu = 0, & {\rm{on}}\ \partialartial\Omega,\ {\rm{for\ a.e.}}\ t\in[0,T],
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
for a pressure field $p_t\in H^1_{\rho_t}(\Omega)$.
It has been shown in \cite{MesSan} that under the assumption that
\betaegin{equation}\lambdaabel{eq:H2}
{\tauextbf{u}}\in L^{\infty}([0,T]\tauimes\Omega;{\muathbb R}^d)\tauag{H2}
\varepsilonnd{equation}
the systems \varepsilonqref{2nd_1} and \varepsilonqref{2nd_2} have a solution. More precisely there exist an absolutely continuous curve $[0,T]\nui t\muapsto\rho_t\in {\muathbb W}_2$ and $p_t\in H^1_{\rho_t}(\Omega)$ for a.e. $t\in[0,T]$ (in particular $\rho\in L^\infty([0,T]\tauimes\Omega)$ and $p\in L^2([0,T];H^1(\Omega))$ such that $(p,\rho)$ solves \varepsilonqref{2nd_2} in weak sense (see \varepsilonqref{2nd_dual}).
Our aim in this section is to show the following theorem:
\betaegin{theorem}
Let ${\tauextbf{u}}$ satisfy \varepsilonqref{eq:H2}. Then there exists a unique pair $(\rho,p)\in L^\infty([0,T]\tauimes\Omega)\tauimes L^2([0,T]; H^1(\Omega))$ that solves \varepsilonqref{2nd_2} in the weak sense \varepsilonqref{2nd_dual}. Moreover for every solution $(\rho^1,p^1)$ and $(\rho^2,p^2)$ to \varepsilonqref{2nd_2} we have the $L^1-$contraction property
$$ \int_{\Omega} | \rho^1(T,x) - \rho^2(T,x)| \,{\rm d} x \lambdaeq \int_{\Omega} |\rho^1(0,x) - \rho^2(0,x)| \,{\rm d} x.$$
\varepsilonnd{theorem}
\betaegin{proof}
The existence of a solution $(\rho,p)$ was obtained in \cite{MesSan}. Now let us show the uniqueness of the solution via an $L^1-$contraction property; notice that this contraction is valid for a general open set $\Omega$ with $C^1$ boundary.
Let us write the weak formulation of \varepsilonqref{2nd_2}: for every smooth test function $\partialhi:[0,T]\tauimes\Omega\tauo{\muathbb R}$ with $\nuabla\partialhi\cdot\nu=0$ on $[0,T]\tauimes\partialartial\Omega$ we have
\betaegin{equation}\lambdaabel{2nd_dual}
\int_0^T\int_\Omega\lambdaeft[\rho\partialartial_t\partialhi+(\rho+p)\Delta\partialhi+\rho{\tauextbf{u}}\cdot\nuabla\partialhi\right]\,{\rm d} x\,{\rm d} t+\int_\Omega\rho_0(x)\partialhi(0,x)\,{\rm d} x=\int_\Omega\rho(T,x)\partialhi(T,x)\,{\rm d} x.
\varepsilonnd{equation}
By density arguments the above formulation holds for $\partialhi\in W^{1,1}([0,T];L^1(\Omega))\cap L^2([0,T];H^2(\Omega)).$
Now, let us consider two solutions $(\rho^1,p^1)$ and $(\rho^2,p^2)$ of Problem \ref{2nd_2}, with initial conditions respectively $\rho^1_0$ and $\rho^2_0$.
Writing the weak formulation \varepsilonqref{2nd_dual} for both of them and taking the difference we obtain
\betaegin{equation}\lambdaabel{difference}
\muathcal{I}(\partialhi,T) =\muathcal{I}(\partialhi,0) + \int_0^T\int_\Omega\lambdaeft[(\rho^1-\rho^2)\partialartial_t\partialhi+(\rho^1-\rho^2+p^1-p^2)\Delta\partialhi+(\rho^1-\rho^2){\tauextbf{u}}\cdot\nuabla\partialhi\right]\,{\rm d} x\,{\rm d} t,
\varepsilonnd{equation}
where $\deltaisplaystyle\muathcal{I}(\partialhi,t)= \int_{\Omega} \partialhi(t,x) [ \rho^1(t,x) - \rho^2(t,x)]\,{\rm d} x$. We introduce the following quantities
$$
A:=\frac{\rho^1-\rho^2}{(\rho^1-\rho^2)+(p^1-p^2)}\;\;\;{\rm{and}}\;\;\; B:=\frac{p^1-p^2}{(\rho^1-\rho^2)+(p^1-p^2)}.
$$
Note that $0\lambdae A\lambdae 1$ and $0\lambdae B\lambdae 1$ a.e. in $[0,T]\tauimes\Omega$ and $A+B=1.$ To be consistent with these bounds, we set $A=0$ when $\rho^1=\rho^2$, even if $p^1=p^2$ and $B=0$ when $p^1=p^2$, even if $\rho^1=\rho^2$. With these notations the weak formulation for the difference gives
\betaegin{equation}\lambdaabel{uniq_cond}
\muathcal{I}(\partialhi,T) = \muathcal{I}(\partialhi,0) + \int_0^T\int_\Omega((\rho^1-\rho^2)+(p^1-p^2))\lambdaeft[A\partialartial_t\partialhi+(A+B)\Delta\partialhi+A{\tauextbf{u}}\cdot\nuabla\partialhi\right]\,{\rm d} x\,{\rm d} t.
\varepsilonnd{equation}
For a smooth function $\tauhetaeta:\Omega\tauo{\muathbb R}$ such that $|\tauhetaeta| \lambdaeq 1$, let us consider the dual problem
\betaegin{equation}\lambdaabel{parabolic1}
\lambdaeft\{
\betaegin{array}{rl}
A\partialartial_t\partialhi+(A+B)\Delta\partialhi+A{\tauextbf{u}}\cdot\nuabla\partialhi=0, & {\rm{in}}\ [0,T)\tauimes\Omega,\\[10pt]
\nuabla\partialhi\cdot\nu=0\ {\rm{on}}\ [0,T]\tauimes\partialartial\Omega, & \partialhi(T,\cdot)=\tauhetaeta\ {\rm{a.e.}}\ {\rm{in}}\ \Omega.
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
Let us remark that if we are able to find a (reasonably regular) solution $\partialhi$ for this problem for any $\tauhetaeta$ smooth then, using the maximum principle and then optimizing in $\tauhetaeta$, we would get an $L^1-$contraction result for $\rho$: in particular we get uniqueness for the initial value problem for $\rho$ and hence also for $p$ (as done in the end of the proof of Theorem \ref{thm:unique_first}). However, since the coefficients in \varepsilonqref{parabolic1} are not regular, we study a regularized problem. For $\varepsilon>0$ let us consider $A_\varepsilon$, $B_\varepsilon$, ${\tauextbf{u}}_{\varepsilon}$ to be smooth approximations of $A$, $B$ and ${\tauextbf{u}}$ such that
\betaegin{equation}\lambdaabel{eq:approx}
\|A-A_\varepsilon\|_{L^r([0,T]\tauimes\Omega)} + \|B-B_\varepsilon\|_{L^r([0,T]\tauimes\Omega)} + \| {\tauextbf{u}} - {\tauextbf{u}}_{\varepsilon} \|_{L^r([0,T] \tauimes \Omega )}<\varepsilon,\;\; ,\;\; \varepsilon<A_\varepsilon, B_\varepsilon\lambdae 1,
\varepsilonnd{equation}
for some $1\lambdae r<+\infty$, the value of which to be chosen later.
The regularized problem reads as follows
\betaegin{equation}\lambdaabel{parabolic2}
\lambdaeft\{
\betaegin{array}{rl}
\partialartial_t\partialhi_\varepsilon+(1+B_\varepsilon/A_\varepsilon)\Delta\partialhi_\varepsilon+{\tauextbf{u}}_{\varepsilon}\cdot\nuabla\partialhi_\varepsilon=0, & {\rm{in}}\ (0,T)\tauimes\Omega,\\[10pt]
\nuabla\partialhi_\varepsilon\cdot\nu=0\ {\rm{a.e.}}\ {\rm{on}}\ [0,T]\tauimes\partialartial\Omega, & \partialhi_\varepsilon(T,\cdot)=\tauhetaeta\ \ {\rm{in}}\ \Omega.
\varepsilonnd{array}
\right.
\varepsilonnd{equation}
For all $\varepsilon>0$ the above problem is uniformly parabolic and $B_\varepsilon/A_\varepsilon$ is continuous and positive. Moreover $\tauhetaeta$ is smooth, thus by classical results (see for instance \cite{lady, krylov}) the problem has a (unique) solution $\partialhi_\varepsilon\in C^1([0,T] \tauimes \omegaverlineerline{ \Omega} )$. In particular $\partialhi_\varepsilon$ can be used as test function in \varepsilonqref{2nd_dual}. In the followings we shall use some standard uniform estimates (in $\varepsilon$) on $\partialhi_\varepsilon$ given in Lemma \ref{lem:estimates}.
In particular, using $\partialhi_\varepsilon$ as test function in \varepsilonqref{uniq_cond} one has
\betaegin{align*}
\muathcal{I}(\partialhi_{\varepsilon},T) - \muathcal{I}(\partialhi_{\varepsilon},0) & = \int_0^T\int_\Omega (\rho^1-\rho^2+p^1-p^2)\lambdaeft[A\partialartial_t\partialhi_\varepsilon+(A+B)\Delta\partialhi_\varepsilon+A{\tauextbf{u}}\cdot\nuabla\partialhi_\varepsilon\right]\,{\rm d} x\,{\rm d} t\\
&=\int_0^T\int_\Omega (\rho^1-\rho^2+p^1-p^2)\lambdaeft[A\partialartial_t\partialhi_\varepsilon+(A+B)\Delta\partialhi_\varepsilon+A{\tauextbf{u}}\cdot\nuabla\partialhi_\varepsilon\right]\,{\rm d} x\,{\rm d} t\\
&-\int_0^T\int_\Omega (\rho^1-\rho^2+p^1-p^2)A\lambdaeft[\partialartial_t\partialhi_\varepsilon+(1+B_\varepsilon/A_\varepsilon)\Delta\partialhi_\varepsilon+{\tauextbf{u}}_\varepsilon \cdot\nuabla\partialhi_\varepsilon\right]\,{\rm d} x\,{\rm d} t\\
&=\int_0^T\int_\Omega (\rho^1-\rho^2+p^1-p^2)(B_\varepsilon/A_\varepsilon)(A_\varepsilon-A)\Delta\partialhi_\varepsilon\,{\rm d} x\,{\rm d} t\\
&+ \int_0^T\int_\Omega (\rho^1-\rho^2+p^1-p^2)(B-B_\varepsilon)\Delta\partialhi_\varepsilon\,{\rm d} x\,{\rm d} t\\
&+ \int_0^T\int_\Omega (\rho^1-\rho^2+p^1-p^2)A({\tauextbf{u}}_\varepsilon - {\tauextbf{u}}) \cdot \nuabla \partialhi_\varepsilon\,{\rm d} x\,{\rm d} t\\
&:=I_\varepsilon^1+I_\varepsilon^2 + I_\varepsilon^3
\varepsilonnd{align*}
Let us show that $|I_\varepsilon^1|\tauo 0$, $|I_\varepsilon^2|\tauo 0$ and $|I_{\varepsilon}^3| \tauo 0$ as $\varepsilon\tauo 0$. More precisely, first let us recall that $0\lambdae \rho^1,\rho^2\lambdae 1$ a.e. in $[0,T]\tauimes\Omega$, hence $\rho^1,\rho^2\in L^\infty([0,T]\tauimes\Omega)$. On the other hand $p^1,p^2\in L^2([0,T]; H^1(\Omega))$ and by Corollary \ref{cor:proj} we have that
$$\int_\Omega|\nuabla p^i_t|^2\,{\rm d} x\lambdae \int_\Omega|{\tauextbf{u}}_t|^2\,{\rm d} t,$$
for almost every $t\in[0,T].$ This implies that (since ${\tauextbf{u}}$ is bounded)
$${\rm{ess-sup}}_{t\in[0,T]}\|\nuabla p^i_t\|_{L^2(\Omega)}\lambdae C.$$
In addition, $p^i$'s being pressures one has $|\{p_t^i=0\}|\gammae|\{\rho_t^i<1\}|\gammae |\Omega|-1>0$ for a.e. $t\in[0,T],$ and so by a suitable version of Poincar\'e's inequality (since $p_t^i$ vanishes on a set of positive Lebesgue measure) one obtains $p^i\in L^\infty([0,T]; H^1(\Omega)).$ In particular, by the Sobolev embedding this implies that $p^i\in L^\infty([0,T];L^q(\Omega)),$ $i\in\{1,2\}$ for all $1\lambdae q\lambdae2^*.$ Let us fix $q\in(2,2^*),$ where $2^{*}=2d/(d-2)$ if $d\gammae 3$ and $2^*=\infty$ if $d=2$.
This implies the following estimates
\betaegin{align*}
|I_\varepsilon^1|&\lambdae \|\rho^1-\rho^2\|_{L^{\infty}([0,T]\tauimes\Omega)}\cdot\|(B_\varepsilon/A_\varepsilon)^{1/2}(A-A_\varepsilon)\|_{L^2([0,T]\tauimes\Omega)}\cdot \|(B_\varepsilon/A_\varepsilon)^{1/2}\Delta\partialhi_\varepsilon\|_{L^2([0,T]\tauimes\Omega)}\\
&+\int_0^T\|p^1-p^2\|_{L^{q}(\Omega)}\cdot\|(B_\varepsilon/A_\varepsilon)^{1/2}(A-A_\varepsilon)\|_{L^r(\Omega)}\cdot \|(B_\varepsilon/A_\varepsilon)^{1/2}\Delta\partialhi_\varepsilon\|_{L^2(\Omega)}\,{\rm d} t\\
&\lambdae C(1/\varepsilon)^{1/2}\varepsilon\\
&+\|p^1-p^2\|_{L^\infty(L^{q})}\cdot\|(B_\varepsilon/A_\varepsilon)^{1/2}(A-A_\varepsilon)\|_{L^2(L^r)}\cdot \|(B_\varepsilon/A_\varepsilon)^{1/2}\Delta\partialhi_\varepsilon\|_{L^2(L^2)} \\
&\lambdae C\varepsilon^{1/2}\tauo 0, \;\; {\rm{as}\ }\varepsilon\tauo 0
\varepsilonnd{align*}
and similarly
\betaegin{align*}
|I_\varepsilon^2|&\lambdae \|\rho^1-\rho^2\|_{L^{\infty}([0,T]\tauimes\Omega)}\cdot\|(A_\varepsilon/B_\varepsilon)^{1/2}(B-B_\varepsilon)\|_{L^2([0,T]\tauimes\Omega)}\cdot \|(B_\varepsilon/A_\varepsilon)^{1/2}\Delta\partialhi_\varepsilon\|_{L^2([0,T]\tauimes\Omega)}\\
&+\|p^1-p^2\|_{L^\infty(L^{q})}\cdot\|(A_\varepsilon/B_\varepsilon)^{1/2}(B-B_\varepsilon)\|_{L^2(L^r)}\cdot \|(B_\varepsilon/A_\varepsilon)^{1/2}\Delta\partialhi_\varepsilon\|_{L^2(L^2)}\\
&\lambdae C(1/\varepsilon)^{1/2}\varepsilon= C\varepsilon^{1/2}\tauo 0, \;\; {\rm{as}\ }\varepsilon\tauo 0;
\varepsilonnd{align*}
finally we have
$$ |I_\varepsilon^3| \lambdaeq \| \rho^1 - \rho^2\|_{L^{\infty}(L^{q})} \cdot \| {\tauextbf{u}} - {\tauextbf{u}}_{\varepsilon} \|_{L^{1}(L^r)} \cdot \| \nuabla \partialhi_\varepsilon \|_{L^{\infty}(L^2)} \lambdaeq C \varepsilon \tauo 0,$$
where $r>1$ is the exponent such that $\deltaisplaystyle\frac12+\frac1r+\frac{1}{q}=1,$ i.e. $\deltaisplaystyle r=\frac{2q}{q-2}$ (recall that $q\in(2,2^*)$. We used the approximations \varepsilonqref{eq:approx} with this specific $r$. Hence we obtained that
$$\muathcal{I}(\partialhi_{\varepsilon},T ) = \muathcal{I}(\partialhi_\varepsilon , 0) + o(1);$$
now we use the fact that we fixed the final condition $\tauhetaeta$ for all $\varepsilon$, and also that $|\partialhi_{\varepsilon} | \lambdaeq 1$, that is implied by the strong maximum principle (see for example Theorem 2.9 in \cite{lieberman}); in particular we can write
$$ \int_{\Omega} \tauhetaeta(x) \betaigl( \rho^1 (T,x ) - \rho^2(T,x) \betaigr) \,{\rm d} x = \int_{\Omega} \partialhi_{\varepsilon}(0,x) ( \rho_0^1 - \rho_0^2) \,{\rm d} x + o(1) \lambdaeq \int_{\Omega} | \rho^1_0 - \rho^2_0| \,{\rm d} x + o(1).$$
Letting $\varepsilon \tauo 0$ we finally find $\deltaisplaystyle\int_\Omega \tauhetaeta ( \rho^1_T - \rho^2_T)\,{\rm d} x \lambdaeq \| \rho^1_0 - \rho^2_0 \|_{L^1}$, and so, optimizing in $|\tauhetaeta| \lambdaeq 1$, we proved the $L^1-$contraction. In particular this implies that $\rho^1=\rho^2$ a.e. in $[0,T]\tauimes\Omega$ whenever $\rho^1_0 = \rho^2_0$.
Then one obtains also that $p^1=p^2$ a.e. in $[0,T]\tauimes\Omega$, as in the end of the proof of Theorem \ref{thm:unique_first}. The result follows.
\varepsilonnd{proof}
\betaegin{lemma}\lambdaabel{lem:estimates}
Let $\partialhi_\varepsilon$ be a solution of \varepsilonqref{parabolic2}. Then there exists a constant
$$C=C(T,\|{\tauextbf{u}}\|_{L^\infty},\|\nuabla \tauhetaeta\|_{L^2(\Omega)})>0$$
such that we have the following estimates, uniformly in $\varepsilon>0$:
\betaegin{itemize}
\item[(i)] $\deltaisplaystyle\sigmaup_{t\in[0,T]}\|\nuabla\partialhi_\varepsilon(t)\|_{L^2(\Omega)}\lambdae C;$\\
\item[(ii)] $\|(B_\varepsilon/A_\varepsilon)^{\frac12}\Delta\partialhi_\varepsilon\|_{L^2([0,T]\tauimes\Omega)}\lambdae C;$\\
\item [(iii)] $\|\Delta\partialhi_\varepsilon\|_{L^2([0,T]\tauimes\Omega)}\lambdae C.$
\varepsilonnd{itemize}
\varepsilonnd{lemma}
\betaegin{proof}
Let us multiply the first equation in \varepsilonqref{parabolic2} by $\Delta\partialhi_\varepsilon$ and integrate over $[t,T]\tauimes\Omega$ for $0\lambdae t<T$. We obtain
\betaegin{align}\lambdaabel{estim1}
\frac12\|\nuabla\partialhi_\varepsilon(t)\|^2_{L^2(\Omega)}&+\int_t^T\int_\Omega(1+B_\varepsilon/A_\varepsilon)|\Delta\partialhi_\varepsilon|^2\,{\rm d} x\,{\rm d} t\nuonumber\\
&=\frac 12 \int_\Omega| \nuabla\tauhetaeta|^2 \,{\rm d} x -\int_t^T\int_\Omega {\tauextbf{u}}_\varepsilon \cdot\nuabla\partialhi_\varepsilon\Delta\partialhi_\varepsilon\,{\rm d} x\,{\rm d} t
\varepsilonnd{align}
Hence by Young's inequality we have
\betaegin{align*}
\frac12\|\nuabla\partialhi_\varepsilon(t)\|^2_{L^2(\Omega)}&\lambdae \frac{\|{\tauextbf{u}}\|_{L^\infty}}{2\delta}\int_t^T\|\nuabla \partialhi_\varepsilon(s)\|^2_{L^2(\Omega)}\,{\rm d} s+\frac12\|\nuabla \tauhetaeta\|^2_{L^2(\Omega)}\\
&\lambdae C+\frac{C}{2}\int_t^T\|\nuabla\partialhi_\varepsilon(s)\|_{L^2(\Omega)}^2\,{\rm d} s
\varepsilonnd{align*}
where the term in $|\Delta \partialhi_\varepsilon|^2$ has been absorbed by the left hand side, and $0<\delta\lambdae2/\|{\tauextbf{u}}\|_{L^\infty}$ is a fixed constant and the constant $C>0$ is depending just on $\|\nuabla \tauhetaeta\|_{L^2(\Omega)}$ and $\|{\tauextbf{u}}\|_{L^\infty}$. Hence by Gr\"onwall's inequality we obtain
$$\frac12\|\nuabla\partialhi_\varepsilon(t)\|^2_{L^2(\Omega)}\lambdae Ce^{C(T-t)},$$
which implies in particular that $\deltaisplaystyle\sigmaup_{t\in[0,T]}\|\nuabla\partialhi_\varepsilon(t)\|_{L^2(\Omega)}\lambdae C.$ Thus ${\rm{(i)}}$ follows.
On the other hand choosing $\delta:=2/\|{\tauextbf{u}}\|_{L^\infty}$ in Young's inequality used in \varepsilonqref{estim1} and using (i), we obtain
$$\int_t^T\int_\Omega(B_\varepsilon/A_\varepsilon)|\Delta\partialhi_\varepsilon|^2\,{\rm d} x\,{\rm d} t\lambdae C$$
hence $\|(B_\varepsilon/A_\varepsilon)^{\frac12}\Delta\partialhi_\varepsilon\|_{L^2([0,T]\tauimes\Omega)}\lambdae C,$ and thus ${\rm{(ii)}}$ follows.
By \varepsilonqref{estim1}, ${\rm{(i)}}$ and ${\rm{(ii)}}$ easily imply ${\rm{(iii)}}.$
\varepsilonnd{proof}
\sigmaection{About the $L^1-$contraction in the first order case}\lambdaabel{ss:l1}
In the previous section we proved uniqueness in the second order case using an $L^1-$contraction result. We expect this result to be true also in the first order case; however we expect the treating of this $L^1-$contraction problem in the most general framework (let us say when ${\tauextbf{u}} \in W^{1,1}$) to be difficult, since this approach should ``include'' (in the proof) also the well-posedness in the Di Perna-Lions theory as a special case. In fact, when one has $\nuabla \cdot {\tauextbf{u}}=0$ and if one starts with $\rho_0 \lambdaeq 1$ then this condition is preserved without adding the pressure term.\\
However in the case treated in Section \ref{two} (i.e. with a monotone velocity field) we can try to sketch a proof of the $L^1$ contraction result using the uniqueness already proved: let us approximate the solution discretizing in time using the splitting discrete scheme ``continuity equation + Wasserstein projection onto the set $\{ \rho \lambdaeq 1\}$'' (similarly to the scheme introduced in \cite{MesSan}). Since the vector field is monotone there exists a unique flow, that implies also that there exists a unique solution to the initial value problem $\partialartial_t \rho_t + \nuabla \cdot ( {\tauextbf{u}}_t \rho_t)=0$, $\rho_{t_0}=\rho$; in particular one can define a function $\Psi_{t_0}^{t_1} (\rho) := \rho_{t_1}$, that will satisfy also the semi-group rule $\Psi_t^{s'} \circ \Psi_s^t = \Psi_s^{s'}$. Let $\tauau$ be a time step and define recursively $\rho_0^{\tauau}= \rho_0$ and then
$$\rho^{\tauau}_{n+1} =\betaegin{cases} \Psi_{n\tauau}^{(n+1)\tauau}( \rho^{\tauau}_n) \qquad &\tauext{ if } n \tauext{ is even } \\ \muathcal{P} ( \rho_{n} )& \tauext{ if }n\tauext{ is odd.}\varepsilonnd{cases}$$
Then the $L^1$ distance is preserved through the continuity equation step while it decreases after the projection thanks to Lemma \ref{lem:contraction}. So in particular the $L^1-$contraction is true in the discrete scheme; once we have that the scheme converges as $\tauau \tauo 0$ to our equation, the uniqueness result gives that this property is preserved in the limit. In order to guarantee convergence (see \cite{DiMoSa} for general conditions) the crucial quantity to estimate is $W_2(\Psi_t^{t+\tauau} (\rho), \rho)$; using the Benamou-Brenier formula, two conditions that guarantee a good estimate are:
\betaegin{itemize}
\item ${\tauextbf{u}}\in L^2([0,T];L^{\infty}({\muathbb R}^d))$: in this case we would have $ W_2^2(\Psi_t^{t+\tauau} (\rho), \rho) \lambdaeq \tau\int_t^{t+\tauau} \| {\tauextbf{u}}_s \|^2_{L^\infty}\,{\rm d} s$;
\item $(\nuabla \cdot {\tauextbf{u}})_{-} \in L^1([0,T] ; L^{\infty}({\muathbb R}^d))$: in this case we would have $\| \Psi_t^s (\rho) \|_{L^\infty} \lambdaeq C \| \rho \|_{L^\infty}$ for some universal $C$ and in particular we have $W_2^2(\Psi_t^{t+\tauau} (\rho), \rho) \lambdaeq C \| \rho \|_{L^\infty} \tau\int_t^{t+\tauau} \| {\tauextbf{u}}_s \|^2_{L^2}\,{\rm d} s$.
\varepsilonnd{itemize}
We believe that this general scheme (uniqueness in the Wasserstein framework and approximation with $L^1-$contractive time discrete approximations) could be adapted also with some other convection terms $\Phi$.
\betaegin{lemma}\lambdaabel{lem:contraction} Let us consider the projection operator $\muathcal{P}: {\muathscr P}({\muathbb R}^d ) \tauo {\muathscr P}(\Omegaega)$
\betaegin{equation}\lambdaabel{def:proj} \muathcal{P}(\rho) = {\rm argmin} \{ W_2^2(\rho, \varepsilonta) \; : \; \varepsilonta \in {\muathscr P}(\Omegaega) , \; \varepsilonta \lambdaeq 1 \}.
\varepsilonnd{equation}
Then, when $\rho_1, \rho_2$ are probability densities we have $\| \muathcal{P}(\rho_1) - \muathcal{P} ( \rho_2) \|_{L^1} \lambdaeq \|\rho_1 - \rho_2\|_{L^1}$.
\varepsilonnd{lemma}
\betaegin{proof}
First we see that formula \varepsilonqref{def:proj} let us extend $\muathcal{P}$ to measures in ${\muathscr M}_+({\muathbb R}^d)$ with mass less than $|\Omegaega|$. In this context a monotonicity property is true (see for example Theorem 5.1 in \cite{AleKimYao}), that is we have $\muathcal{P}(\rho) \lambdaeq \muathcal{P}(\varepsilonta)$ almost everywhere if $\rho \lambdaeq \varepsilonta$. Now we can derive the $L^1$ contraction: let us denote $\rho= \muin \{ \rho_1 , \rho_2\}$. First of all we have that
$$|\rho_1 - \rho_2| = \muax\{\rho_1, \rho_2\} - \muin \{ \rho_1 , \rho_2\} = ( \rho_1 + \rho_2 - \muin\{\rho_1, \rho_2\} ) - \muin\{\rho_1,\rho_2\};$$
in particular $\| \rho_1 - \rho_2\|_{L^1} = 2- 2 \int \rho\,{\rm d} x$. By the monotonicity we have $\muathcal{P}(\rho_i) \gammaeq \muathcal{P} (\rho)$ and in particular we have $\muin\{ \muathcal{P}(\rho_1) , \muathcal{P}(\rho_2) \} \gammaeq \muathcal{P}(\rho)$ and so
$$ \| \muathcal{P}(\rho_1) - \muathcal{P}(\rho_2)\|_{L^1} = 2- 2\int \muin\{ \muathcal{P}(\rho_1) , \muathcal{P}(\rho_2) \}\,{\rm d} x \lambdaeq 2- 2\int \muathcal{P}(\rho)\,{\rm d} x = 2-2 \int \rho\,{\rm d} x = \| \rho_1 -\rho_2 \|_{L^1}, $$
which proves the claim.
\varepsilonnd{proof}
{\tauextbf{v}}space{1cm}
{\sigmac Acknowledgements.} We would like to thank Filippo Santambrogio for the fruitful discussions during this project and for the careful reading of various versions of the manuscript. The authors warmly acknowledge the support of the ANR project ISOTACE (ANR-12-MONU-0013). We also thank the referee for her/his very useful comments and remarks on the manuscript.
\betaegin{thebibliography}{99}
\betaibitem{AleKimYao} {\sigmac D. Alexander, I. Kim, Y. Yao}, Quasi-static evolution and congested crowd transport, {\it Nonlinearity}, 27 (2014), No. 4, 823-858.
\betaibitem{AmbGigSav} {\sigmac L. Ambrosio, N. Gigli, G. Savar\'e}, {\it Gradient flows in metric spaces and in the space of probability measures,} Lectures in Mathematics ETH Z\"urich, Birkh\"auser Verlag, Basel, (2008).
\betaibitem{Bou} \tauextsc{J. E. Bouillet}, Nonuniqueness in $L^{\infty}$: an example, {\it Lecture notes in pure and applied mathematics,} 148 (1993), Differential equations in Banach spaces, 35-40.
\betaibitem{Carrillo} \tauextsc{J. Carrillo}, Entropy Solution for Nonlinear Degenerate Problems, {\it Arch. Rational Mech. Anal.,} 147 (1999), 269-361.
\betaibitem{Cha1} \tauextsc{C. Chalons}, Numerical Approximation of a macroscopic model of pedestrian flows, {\it SIAM J. Sci. Comput.}, Vol. 29 (2007), Issue 2, 539-555.
\betaibitem{Col} \tauextsc{R.M. Colombo, M.D. Rosini}, Pedestrian Flows and non-classical shocks, {\it Math. Mod. Meth. Appl. Sci.}, 28 (2005), 1553-1567.
\betaibitem{Cos} \tauextsc{V. Coscia, C. Canavesio}, First-Order macroscopic modelling of human crowd dynamics, {\it Math. Mod. Meth. Appl. Sci.}, 18 (2008), 1217-1247.
\betaibitem{CriPicTos} {\sigmac E. Cristiani, B. Piccoli, A. Tosin,} \tauextit{Multiscale Modeling of Pedestrian Dynamics}, Springer, (2014).
\betaibitem{Cro} {\sigmac A.B. Crowley}, On the weak solution of moving boundary problems, {\it J. Inst. Math. Applics}, (1979) 24, 43-57.
\betaibitem{DiMoSa} {\sigmac S. Di Marino, B. Maury, F. Santambrogio,} Measure sweeping processes, {\it Journal of Convex Analysis}, vol. 23, 2 (2016).
\betaibitem{Helb1} \tauextsc{D. Helbing}, A fluid dynamic model for the movement of pedestrians, {\it Complex Systems}, 6 (1992), 391-415.
\betaibitem{Helb3} \tauextsc{D. Helbing, P. Moln\'ar}, Social force model for pedestrian dynamics, {\it Phys. Rev E} 51 (1995), 4282-4286.
\betaibitem{Hend} \tauextsc{L.F. Henderson}, The statistics of crowd fluids, {\it Nature, } 229 (1971), 381-383.
\betaibitem{Hug1} \tauextsc{R. L. Hughes}, A continuum theory for the flow of pedestrian, {\it Transportation research Part B}, 36 (2002), 507-535.
\betaibitem{Hug2} \tauextsc{R. L. Hughes}, The flow of human crowds, {\it Annual review of fluid mechanics,} Vol. 35. (2003), Annual Reviews: Palo Alto, CA, 169-182.
\betaibitem{Igb} \tauextsc{N. Igbida, J. M. Urbano}, Uniqueness for nonlinear degenerate problem, {\it NoDEA Nonlinear Differential Equations Appl.,} 10 (2003), No. 3, 287-307.
\betaibitem{Igb2} {\sigmac N. Igbida}, Hele-Shaw type problems with dynamical boundary conditions, {\it J. Math. Anal. Appl.}, 335 (2007), No. 2, 1061-1078.
\betaibitem{IgbShiWit} {\sigmac N. Igbida, K. Sbihi, P. Wittbold}, Renormalized solution for Stefan type problems: existence and uniqueness,
{\it NoDEA Nonlinear Differential Equations Appl.}, 17 (2010), No. 1, 69-93.
\betaibitem{krylov} {\sigmac N.V. Krylov}, {\it Lectures on elliptic and parabolic equations in Sobolev spaces}, Graduate Studies in Mathematics, 96, AMS, (2008).
\betaibitem{lady} {\sigmac O. A. Ladyzenskaja, V. A. Solonnikov, N.N. Uralceva}, {\it Linear and quasilinear equations of parabolic type}, {Translated from the Russian}, Translations of Mathematical Monographs, Vol. 23, (1968).
\betaibitem{lasry1} {\sigmac J.-M. Lasry, P.-L. Lions}, Jeux \`a champ moyen I. Le cas stationnaire, \tauextit{C. R. Math. Acad. Sci. Paris}, 343 (2006), No. 9, 619-625.
\betaibitem{lasry2} {\sigmac J.-M. Lasry, P.-L. Lions}, Jeux \`a champ moyen II. Horizon fini et contr\^ ole optimal, \tauextit{C. R. Math. Acad. Sci.
Paris}, 343 (2006), No. 10, 679-684.
\betaibitem{lasry3} {\sigmac J.-M. Lasry, P.-L. Lions,} Mean field games, \tauextit{Jpn. J. Math.}, 2 (2007), No. 1, 229-260.
\betaibitem{lieberman} {\sigmac G. M. Lieberman}, Second order parabolic differential equation, \tauextit{World Scientific Publishing}, (1996).
\betaibitem{MauRouSan1} {\sigmac B. Maury, A. Roudneff-Chupin, F. Santambrogio,} A macroscopic crowd motion model of gradient flow type, \tauextit{Math. Models and Meth. in Appl. Sci.}, 20 (2010), No. 10, 1787-1821.
\betaibitem{MauRouSan2} {\sigmac B. Maury, A. Roudneff-Chupin, F. Santambrogio,} Congestion-driven dendritic growth,
{\it Discrete Contin. Dyn. Syst.}, 34 (2014), no. 4, 1575-1604.
\betaibitem{MauRouSanVen} {\sigmac B. Maury, A. Roudneff-Chupin, F. Santambrogio, J. Venel, } Handling congestion in crowd motion modeling, \tauextit{Netw. Heterog. Media}, 6 (2011), No. 3, 485-519.
\betaibitem{crowd1} \tauextsc{B. Maury, J. Venel}, Handling of contacts in crowd motion simulations, {\it Traffic and Granular Flow, } Springer (2007).
\betaibitem{mccann} {\sigmac R.J. McCann}, A convexity principle for interacting gases, {\it Adv. Math.}, 128 (1997), No. 1, 153-179.
\betaibitem{Mes} {\sigmac A.R. M\'esz\'aros}, Mean Field Games with density constraints, {\it MSc thesis, \'Ecole Polytechnique, Palaiseau, France,} (2012), available at {\tauextbf{u}}rl{http://www.math.ucla.edu/~alpar/theses/msc_thesis_polytechnique.pdf}.
\betaibitem{MesSan} {\sigmac A.R. M\'esz\'aros, F. Santambrogio}, Advection-diffusion equations with density constraints, {\it Analysis \& PDE}, { to appear}.
\betaibitem{NatPelSav} {\sigmac L. Natile, M. Peletier, G. Savar\'e}, Contraction of general transportation costs along solutions to Fokker-Planck equations with monotone drifts, {\it J. Math. Pures Appl.}, 95 (2011), 18-35.
\betaibitem{Otto} {\sigmac F. Otto}, $L^1$-contraction and uniqueness for quasilinear elliptic-parabolic equations, {\it J. of Diff. Eq.}, 131 (1996), 20-38.
\betaibitem{PetQuiVaz} {\sigmac B. Perthame, F. Quir\'os, J.L. V\'azquez}, The Hele-Shaw asymptotics for mechanical models of tumor growth, {\it Arch. Rational Mech. Anal.}, 212 (2014), 93-127.
\betaibitem{Por} {\sigmac A. Porretta}, Weak Solutions to Fokker-Planck Equations and Mean Field Games, {\it Arch. Rational Mech. Anal.}, 216 (2015), Issue 1, 1-62.
\betaibitem{aude:phd} {\sigmac A. Roudneff-Chupin,} Mod\'elisation macroscopique de mouvements de foule, PhD Thesis, Universit\'e Paris-Sud, (2011), available at {\tauextbf{u}}rl{http://www.math.u-psud.fr/~roudneff/Images/these_roudneff.pdf}.
\betaibitem{OTAM} {\sigmac F. Santambrogio}, {\it Optimal Transport for Applied Mathematicians,} Progress in Nonlinear Differential Equations and Their Applications 87, Birkh\"{a}user, Basel (2015).
\betaibitem{villani} {\sigmac C. Villani} {\it Topics in Optimal Transportation}. Graduate Studies in Mathematics, AMS, (2003).
\varepsilonnd{thebibliography}
\varepsilonnd{document}
|
\begin{document}
\begin{abstract}
We study Voiculescu's microstate free entropy for a single non--selfadjoint
random variable. The main result is that certain additional constraints on
eigenvalues of microstates do not change the free entropy. Our tool is
the method of random regularization of Brown measure which was studied recently
by Haagerup and the author. As a simple application we present an upper
bound for the free entropy of a single non--selfadjoint operator in terms of
its Brown measure and the second moment. We furthermore show that this
inequality becomes an equality for a class of $DT$--operators which was
introduced recently by Dykema and Haagerup.
\end{abstract}
\maketitle
\section{Introduction}
The microstate free entropy $\chi$ was introduced by Voiculescu
\cite{VoiculescuPart2} as a tool for the study of some non--commutative
systems. Roughly speaking, it answers the question how many finite matrices
have nearly the same moments as a given non--commutative random variable. It
has turned out to have very powerful applications (cf.\
\cite{Ge1997,VoiculescuPart3}), however it is not an easy object to deal with.
One of the reasons of these difficulties is that currently there are no
general methods for the computation of the free entropy in concrete cases.
Exact formulas were found for the free entropy of a single selfadjoint random
variable, for tuples of free random variables \cite{VoiculescuPart2} and for
$R$--diagonal elements \cite{NicaShlyakhtenkoSpeicher1999}.
In this article we present a method which hopefully will be
useful for calculating and estimating free entropy in many concrete cases.
The main idea is to change the definition of microstates $\Gamma$ which
approximate a single non--selfadjoint random variable $x$ in such a way that
it does not change the value of the free entropy $\chi(x)$.
The original sets $\Gamma$ consisted of all matrices which---informally
speaking---had almost the same moments as a given random variable $x$, while
our new sets $\tilde{\Gamma}$ will consist of these matrices in $\Gamma$ which
additionally have similar eigenvalues to the Brown measure of $x$. In order to
show that $\Gamma$ and $\tilde{\Gamma}$ give rise to the same free entropy we
use the method of random regularization of Brown spectral measure which was
introduced by Haagerup \cite{Haagerup2001} and further developed by the author
\cite{Sniady2001}.
As an application we present a new upper bound for the free entropy of a single
random variable $x$ in terms of its Brown measure and second moment. We
show also that for a class of $DT$--operators which was introduced recently by
Dykema and Haagerup \cite{DykemaHaagerup2001} this inequality becomes equality.
\section{Preliminaries}
\label{sec:preliminaries}
{\operatorname{sut}}bsection{Non--commutative probability spaces}
A non--commutative probability space is a pair $({\mathcal{A}},\phi)$, where ${\mathcal{A}}$ is a
$\star$--algebra and $\phi$ is a normal, faithful, tracial state on ${\mathcal{A}}$.
Elements of ${\mathcal{A}}$ will be referred to as non--commutative random variables and
state $\phi$ as expectation value.
One of the simplest examples is the set ${\mathcal{M}}_N$ of all complex--valued $N\times
N$ matrices equipped with a normalized trace $\tr$ given by
$\tr m=\frac{1}{N} \Tr m$, where $m\in{\mathcal{M}}_N$ and $\Tr$ is the usual trace.
{\operatorname{sut}}bsection{Microstate free entropy.}
The original definition of Voiculescu's free entropy $\chi^{\operatorname{sa}}(x_1,\dots,x_n)$
allowed to compute the free entropy of a tuple of non--commutative
self--adjoint random variables. The considered in this article free entropy
$\chi(x)$ of a non--selfadjoint random variable is connected with the original
definition by $$\chi(x)=\chi^{\operatorname{sa}}({\mathbb{R}}e x,\Im x).$$
Let $x$ be a non--commutative random variable,
$\epsilon>0$, $R>0$ be real numbers and $k>0$ be integer.
We define the sets \cite{VoiculescuPart2,NicaShlyakhtenkoSpeicher1999}
\begin{multline}
\Gamma_R(x;k,N,\epsilon)=\Big\{m\in {\mathcal{M}}_N: \|m\|\leq R \text{ and} \\
|\tr(m^{s_1}\cdots m^{s_p})-
\tau(x^{s_1} \cdots x^{s_p})|<\epsilon\\ \text{for all } p\leq k \text{ and
} s_1,s_2,\dots, s_p\in \{1,\star\} \Big\}. \label{eq:defgamma}
\end{multline}
Define next
\begin{equation}
\chi_R(x;k,\epsilon)=\limsup_{N\to\infty}
\left[\frac{1}{N^2}\log\vol \Gamma_R(x;k,N,\epsilon)
+ \log N\right],
\label{eq:defchir}
\end{equation}
where $\vol$ is a Lebesgue measure on ${\mathcal{M}}_N$ as described in
(\ref{eq:defvol}).
Lastly, the free entropy is defined by
\begin{equation}
\chi(x)={\operatorname{sut}}p_R \inf_{k,\epsilon} \chi_R(x;k,\epsilon).
\label{eq:defchi}
\end{equation}
Since $\chi_R(x;k,\epsilon)$ is a decreasing function of $k$ and an increasing
function of $\epsilon$, hence we have the following simple lemma.
\begin{lemma}
\label{lem:pierwszy}
Let a non--commutative random variable $x$ and a number $R>0$ be given. Then
there exists a sequence $(\epsilon_N)$ of non--negative numbers and a sequence
$(k_N)$ of natural numbers such that
$\lim_{N\rightarrow\infty} \epsilon_N =0$,
$\lim_{N\rightarrow\infty} k_N=\infty$ and
\begin{equation}
\chi_R(x)
\leq\limsup_{N\rightarrow\infty} \chi_R(x;k_N,N,\epsilon_N).
\label{eq:nawygnaniu}
\end{equation}
\end{lemma}
{\operatorname{sut}}bsection{Fuglede--Kadison determinant and Brown measure}
Let a non--commutative probability space $({\mathcal{A}},\phi)$ be given.
For $x\in{\mathcal{A}}$ we define its Fuglede--Kadison determinant $\Delta(x)$ by
\cite{FugledeKadison}
$$\Delta(x)=\exp\left[ \phi( \ln |x| )\right]$$
and its Brown measure \cite{Brown} to be the
Schwartz distribution on ${\mathbb{C}}$ given by
$$\mu_x= \frac{1}{2\pi} \left( \frac{\partial^2}{\partial ({\mathbb{R}}e\lambda)^2}
+\frac{\partial^2}{\partial (\Im\lambda)^2} \right) \ln \Delta (x-\lambda).$$
One can show that in fact $\mu_x$ is a positive probability measure on ${\mathbb{C}}$.
\begin{lemma}
The Brown measure of a matrix $m\in{\mathcal{M}}_N$ with respect to the state $\tr$
is a probability counting measure on the set of eigenvalues of $m$:
$$\mu_m=\frac{1}{N} {\operatorname{sut}}m_{i=1}^N \delta_{\lambda_i}, $$
where $\lambda_1,\dots,\lambda_N$ are the eigenvalues of $m$ counted with
multiples.
\end{lemma}
In the following we will be interested in studying the random measure
$\omega\mapsto \mu_{A(\omega)}$ for a random matrix $A\in\M_N\big(\El^{\infty-}(\Omega) \big)$.
This random measure is called the empirical eigenvalues distribution.
{\operatorname{sut}}bsection{Convergence of $\star$--moments}
Let a sequence $(A_N)$ of random matrices (where $A_N\in\M_N\big(\El^{\infty-}(\Omega) \big)$), a
non--commutative probability space $({\mathcal{A}},\phi)$ and $x\in{\mathcal{A}}$ be given.
We say that the sequence $A_N$ converges to $x$ in $\star$--moments almost
surely if for every $n\in{\mathbb{N}}$ and $s_1,\dots, s_n\in\{1,\star\}$ we have that
$$\lim_{N\rightarrow\infty} \tr_N[ A_N^{s_1} \cdots
A_N^{s_n} ] = \phi( x^{s_1} \cdots x^{s_n} )$$
holds almost surely.
{\operatorname{sut}}bsection{Random regularization of Brown measure}
We say that a random matrix
$$G_N=(G_{N,ij})_{1\leq i,j\leq N}\in \M_N\big(\El^{\infty-}(\Omega) \big)$$
is a standard Gaussian random matrix if
$$\big({\mathbb{R}}e G_{N,ij}\big)_{1\leq i,j\leq N},
\big(\Im G_{N,ij}\big)_{1\leq i,j \leq N}$$
are independent Gaussian variables with mean zero and variance $\frac{1}{2 N}$.
\begin{theorem}
\label{theo:regularyzacja}
Let $(A_N)$ be a sequence of random matrices, $A_N\in\M_N\big(\El^{\infty-}(\Omega) \big)$, which
converges in $\star$--moments to $x$ almost surely.
Let furthermore $(G_N)$ be a
sequence of independent standard Gaussian matrices which is independent of
$(A_N)$.
There exists a sequence $(t_N)$ of real numbers such that
$\lim_{N\rightarrow\infty} t_N=0$ and such that the sequence of empirical
eigenvalues distributions $\mu_{A_N+t_N G_N}$ converges weakly to $\mu_x$
almost surely.
There also exists a sequence $(B_N)$ of non--random matrices $B_N\in{\mathcal{M}}_N$ such
that $\lim_{N\rightarrow\infty} \|B_N\|=0$ and such that the sequence of
empirical eigenvalues distributions $\mu_{A_N+B_N}$ converges weakly to $\mu_x$
almost surely.
\end{theorem}
\begin{proof}
The first part was was proved in \cite{Sniady2001}.
For the second part of the theorem let us define $B_N=t_N G_N(\omega)$. Since
$\limsup_{N\rightarrow\infty} \|G_N\|<\infty$ holds almost surely \cite{Geman},
hence so defined sequence $B_N$ fulfills the hypothesis of the
theorem almost surely. \end{proof}
\section{The main result: improved microstates $\tilde{\Gamma}$}
\label{sec:themain}
Let $x$ be a non--commutative random variable,
$\epsilon>0$, $R>0$ be real numbers and $k>0$, $m>0$ be integers.
In the full analogy with (\ref{eq:defgamma})---(\ref{eq:defchi}) we define
improved microstates $\tilde{\Gamma}$ and improved free entropy~$\tilde{\chi}$:
\begin{multline}
\tilde{\Gamma}_R(x;k,N,\epsilon,l,\theta)=\bigg\{m\in
\Gamma_R(x;k,N,\epsilon):\\
\left| \int_{\mathbb{C}} z^i \bar{z}^j d\mu_m - \int_{\mathbb{C}} z^i \bar{z}^j \ {\mathrm{d}}\mu_x\right|
<\theta \quad\text{for } i,j\leq l
\bigg\}, \label{eq:defgammatilde}
\end{multline}
\begin{equation}
\tilde{\chi}_R(x;k,\epsilon,l,\theta)=\limsup_{N\to\infty}
\left[\frac{1}{N^2}\log\vol \tilde{\Gamma}_R(x;k,N,\epsilon,l,\theta)
+ \log N\right],
\label{eq:defchirtilde}
\end{equation}
\begin{equation}
\tilde{\chi}(x)={\operatorname{sut}}p_R \inf_{k,\epsilon,l,\theta}
\tilde{\chi}_R(x;k,\epsilon,l,\theta). \label{eq:defchitilde}
\end{equation}
\begin{theorem}
\label{theo:glowne}
For every non--commutative random variable $x$ we have
$$\chi(x)=\tilde{\chi}(x).$$
\end{theorem}
\begin{proof}
Since $\tilde{\Gamma}_R(x; k,N,\epsilon,l,\theta){\operatorname{sut}}bseteq \Gamma_R(x;
k,N,\epsilon)$ hence $\tilde{\chi}(x)\leq \chi(x)$ follows easily.
Let $(\epsilon_N)$ and $(k_N)$ be the sequences given by Lemma
\ref{lem:pierwszy}. Let $(A_N)$ be a sequence of independent random matrices
such that the distribution of $A_N$ is the uniform distribution on the set
$\Gamma_R(x; k_N,N,\epsilon_N)$ and let $(B_N)$ be the sequence given by
Theorem \ref{theo:regularyzacja}.
Since $\|B_N\|$ converges to zero, hence there exists $R'>0$, a sequence of
positive numbers $(\epsilon'_N)$ which converges to zero and a sequence of
natural numbers $(k'_N)$ which diverges to infinity such that
$$\Gamma_R(x;k_N,N,\epsilon_N)+B_N{\operatorname{sut}}bseteq\Gamma_{R'}(x;k'_N,N,\epsilon_N')$$
holds for every $N\in{\mathbb{N}}$,
where $\Gamma_R(x;k_N,N,\epsilon_N)+B_N$ denotes a translation of the set
$\Gamma_R(x;k_N,N,\epsilon_N)$ by the vector $B_N$.
Since random measures $\omega\mapsto\mu_{A_N(\omega)+B_N}$ converge weakly to
$\mu_x$ in probability, hence for any $\theta>0$ and integer $l>0$ we have that
$$\lim_{N\rightarrow\infty}
P\big(\omega:A_N(\omega)+B_N\not\in\tilde{\Gamma}_{R'}(x;
k'_N,N,\epsilon_N',l,\theta) \big) =0.$$
Since the Lebesgue measure is translation--invariant, hence for any $\theta>0$
and integer $l>0$ we have
$$\limsup_{N\rightarrow\infty} \frac{\vol \Gamma_R(x;
k_N,N,\epsilon_N)}{\vol \tilde{\Gamma}_{R'}(x; k'_N,N,\epsilon'_N,l,\theta)}
\leq 1, $$
or equivalently
\begin{equation} \label{eq:christmasa} \limsup_{N\rightarrow\infty} \big(
\chi_R(x; k_N,N,\epsilon_N) - \tilde{\chi}_{R'}(x; k'_N,N,\epsilon'_N,l,\theta)
\big)\leq 0.
\end{equation}
For any $\epsilon>0$ and integer $k>0$ there exists $N_0$ such that for any
$N>N_0$ we have $\epsilon_N<\epsilon$ and $k_N>k$, hence for $N>N_0$
\begin{equation}
\label{eq:christmasb}
\tilde{\chi}_{R'}(x; k'_N,N,\epsilon'_N,l,\theta) \leq
\tilde{\chi}_{R'}(x; k,N,\epsilon,l,\theta).
\end{equation}
Inequalities \eqref{eq:nawygnaniu}, \eqref{eq:christmasa} and
\eqref{eq:christmasb} combine to give
$$\chi(x)\leq \tilde{\chi}(x).$$
\end{proof}
\section{Application: upper bound for free entropy}
In this section we present a new inequality for the free entropy of a single
non--selfadjoint random variable. The main idea is to write matrices from
microstates $\tilde{\Gamma}$ in the upper triangular form and then to find
constraints on diagonal and offdiagonal entries.
{\operatorname{sut}}bsection{Pull--back of the Lebesgue measure on ${\mathcal{M}}_N$}
We denote by
${\mathcal{M}}_N^{\operatorname{d}}=\{m\in{\mathcal{M}}_N: m_{ij}=0 \mbox{ if } i\neq j\}$ the set of diagonal
matrices and by ${\mathcal{M}}_N^{\operatorname{sut}}=\{m\in{\mathcal{M}}_N: m_{ij}=0 \mbox{ if } i\geq j\}$ the set
of all strictly upper triangular matrices.
We can regard ${\mathcal{M}}_N$ and ${\mathcal{M}}_N^{\operatorname{sut}}$ as real Euclidean spaces with a scalar
product $\langle x,y\rangle={\mathbb{R}}e \Tr x y^{\star}$ and thus equip them
with Lebesgue measures
\begin{equation}
\vol=\prod_{1\leq i,j\leq N} \ {\mathrm{d}} {\mathbb{R}}e m_{ij} \ {\mathrm{d}} \Im m_{ij}
\label{eq:defvol}
\end{equation}
and
$$\vol^{\operatorname{sut}}= \prod_{1\leq i<j\leq N} \ {\mathrm{d}} {\mathbb{R}}e m_{ij} \ {\mathrm{d}} \Im m_{ij}$$
respectively.
We have a clear isomorphism ${\mathcal{M}}_N^{\operatorname{d}}=\{ (\lambda_1,\dots,\lambda_N)\in {\mathbb{C}}^N \}$
and we equip it with a measure
$$\vol^{\operatorname{d}}=\frac{ \pi^{\frac{N^2-N}{2}} }{\prod_{1\leq i\leq N} i!}
\prod_{1\leq i<j\leq N} |\lambda_i-\lambda_j|^2 \prod_{1\leq i\leq N} \ {\mathrm{d}}{\mathbb{R}}e
\lambda_i \ {\mathrm{d}}\Im \lambda_i. $$
We also denote by $U_N$ the set of unitary $N\times N$ matrices equipped with
the Haar measure $\vol^{\operatorname{U}}$ normalized in such a way that it is a probability
measure.
\begin{proposition}
\label{prop:pullback}
For every $N$ the measure $\vol^{\operatorname{d}}\times \vol^{\operatorname{sut}} \times \vol^{\operatorname{U}}$ is a
pull--back of the measure $\vol$ with respect to the map
$${\mathcal{M}}_N^{\operatorname{d}} \times {\mathcal{M}}_N^{\operatorname{sut}} \times U_N
\ni (d, m, u) \mapsto u(d+m)u^{-1} \in {\mathcal{M}}_N. $$
\end{proposition}
\begin{proof}
This result is due to Dyson and can be extracted from Appendix A.35 of
\cite{Mehta}. \end{proof}
{\operatorname{sut}}bsection{Diagonal entropy $\hat{\chi}^{\operatorname{d}}$}
Let $x$ be a non--commutative random variable. In the
following we define an auxiliary quantity $\hat{\chi}^{\operatorname{d}}(\nu)$ which would
answer the question how many diagonal matrices (with respect to the measure
$\vol^{\operatorname{d}}$) have almost the same Brown measure as $x$.
\begin{multline*}
\hat{\Gamma}^{\operatorname{d}}_R (x;N,l,\theta)=
\bigg\{m \in {\mathcal{M}}_N^{\operatorname{d}}: \|m\|\leq R \text{ and} \\ \bigg| \int_{\mathbb{C}} z^i
\bar{z}^j\ {\mathrm{d}}\mu_m - \int_{\mathbb{C}} z^i \bar{z}^j\ {\mathrm{d}}\mu_x \bigg|< \theta \text{ for
all } i,j\leq l \bigg\}; \end{multline*}
$$\hat{\chi}_R^{\operatorname{d}}(x;l,\theta)=\lim_{N\to\infty}
\left[\frac{1}{N^2}\log \vol^{\operatorname{d}} \hat{\Gamma}_R^{\operatorname{d}}(x;l,\theta) +
\frac{\log N}{2} \right],$$
$$\hat{\chi}^{\operatorname{d}}(x)={\operatorname{sut}}p_R \inf_{l,\theta} \hat{\chi}^{\operatorname{d}}(x;
l,\theta).$$
\begin{theorem}
\label{theo:diagonalne}
For any non--commutative random variable $x$ we have
$$\hat{\chi}^{\operatorname{d}}(x)=\int_{{\mathbb{C}}} \int_{{\mathbb{C}}} \log |z_1-z_2| \ {\mathrm{d}}\mu_x(z_1)
\ {\mathrm{d}}\mu_x(z_2)+\frac{3}{4}+\frac{\ln \pi}{2}. $$ \end{theorem}
\begin{proof}
Proof follows exactly the proof of Proposition 4.5 of \cite{VoiculescuPart2},
but since we are dealing with measures on ${\mathbb{C}}$ and the original proof concerns
measures on ${\mathbb{R}}$, we have to replace Lemma 4.3 in \cite{VoiculescuPart2} by
Theorem 2.1 of \cite{Hadwin}.
\end{proof}
{\operatorname{sut}}bsection{Offdiagonality}
If $x$ is a non--commutative random variable we define its offdiagonality
$\od_x$ by $$\od_x=\tau(xx^{\star})-\int_{\mathbb{C}} |z|^2 d\mu_x(z).$$
This quantity can be regarded as a kind of a non--commutative variance. Since
$\od_x=0$ if and only if $x$ is normal, hence offdiagonality of $x$ can be also
regarded as a kind of a distance of $x$ to normal operators.
For an upper--triangular matrix $m\in{\mathcal{M}}_N({\mathbb{C}})$ ($m_{ij}=0$ if $i>j$) its
offdiagonality is equal to the (normalized) sum of squares of the offdiagonal
entries:
$$\od_m=\frac{1}{N} {\operatorname{sut}}m_{1\leq i<j\leq N} |m_{ij}|^2.$$
\begin{proposition}
\label{prop:pozadiagonalne}
For any $o>0$ we have that
$$\lim_{N\rightarrow\infty} \left[ \frac{1}{N^2} \log \vol^{\operatorname{sut}}t
\{m\in{\mathcal{M}}_N^{\operatorname{sut}}t: \od_m\leq o\}+\frac{\log N}{2} \right]=\frac{1}{2}+\frac{\log
2\pi o}{2}. $$ \end{proposition}
\begin{proof}
It is enough to notice that
$\{m\in{\mathcal{M}}_N^{\operatorname{sut}}t: \tr mm^{\star}\leq o \}$ is a
$N(N-1)$--dimensional ball with radius $\sqrt{oN}$, hence its volume is equal
to $$\pi^{\frac{N(N-1)}{2}} \left[ \Gamma\left( \frac{N (N-1)}{2}+1
\right)\right]^{-1} (oN)^{\frac{N (N-1)}{2}}.$$
\end{proof}
{\operatorname{sut}}bsection{The main inequality}
\begin{theorem}
Let $x$ be a non--commutative random variable. Then
\begin{equation}
\label{eq:glowne}
\chi(x)\leq \int_{{\mathbb{C}}} \int_{{\mathbb{C}}} \log |z_1-z_2| \ {\mathrm{d}}\mu_x(z_1)
\ {\mathrm{d}}\mu_x(z_2)+\frac{5}{4}+\ln \pi\sqrt{2 \od_x}.
\end{equation}
\end{theorem}
\begin{proof}
Proof is a direct consequence of Theorem \ref{theo:glowne}, Proposition
\ref{prop:pullback}, Theorem \ref{theo:diagonalne} and Proposition
\ref{prop:pozadiagonalne}.
\end{proof}
{\operatorname{sut}}bsection{Free entropy of $DT$--operators}
For any compactly supported probability measure $\nu$ on ${\mathbb{C}}$ and $o\geq 0$
Dykema and Haagerup consider an operator $x$ which is said to be $DT(\nu,o)$
\cite{DykemaHaagerup2001}. This operator is implicitly defined to be the
expected $\star$--moment limit of random matrices
\begin{equation}
A_N=D_N+\sqrt{o} T_N,
\label{eq:definicjaan}
\end{equation}
where $D_N$ is a diagonal random matrix with eigenvalues $\lambda_1,\dots,
\lambda_n$ which are i.i.d.\ random variables with distribution given
by $\nu$ and
\begin{equation}
T_N=\left[ \begin{array}{ccccc}
0 & g_{1,2} & \cdots & g_{1,n-1} & g_{1,n} \\
0 & 0 & \cdots & g_{2,n-1} & g_{2,n} \\
\vdots& & \ {\mathrm{d}}ots & \vdots & \vdots \\
& & & 0 & g_{n-1,n} \\
0& & \cdots & 0 & 0
\end{array} \right],
\label{eq:utm}
\end{equation}
is an upper--triangular random matrix where $({\mathbb{R}}e g_{i,j},\Im
g_{i,j})_{1\leq i<j \leq N}$ are i.i.d.\ $N\left(0,\frac{1}{N}\right)$ random
variables. We recall that the Brown measure and offdiagonality of $x$ are given
by $\mu_x=\nu$ and $\od_x=o$.
\begin{theorem}
For any compactly supporded probability measure $\nu$ on ${\mathbb{C}}$ and any number
$o>0$ if $x$ is a $DT(\nu,o)$ then the inequality (\ref{eq:glowne}) becomes
equality.
\end{theorem}
\begin{proof}
Let us fix $R>0$. Similarly as in Lemma \ref{lem:pierwszy} let $(\theta_N)$ be
a sequence of non--negative numbers and $(l_N)$ be a sequence of natural
numbers such that $$\lim_{N\rightarrow\infty} \theta_N =0,
\qquad \lim_{N\rightarrow\infty} l_N=\infty,$$
$$\hat{\chi}^{\operatorname{d}}_R(x) \leq\limsup_{N\rightarrow\infty}
\hat{\chi}^{\operatorname{d}}_R(x;N,l_N,\theta_N).$$
In definition (\ref{eq:definicjaan}) of $A_N$ let us change $D_N$ to be
any (non--random) element of the set $\hat{\Gamma}^{\operatorname{d}}_R(x; N, l_N,\theta_N)$.
From results of Dykema and Haagerup \cite{DykemaHaagerup2000} it follows that
despite this change the sequence $A_N$ still converges in expected
$\star$--moments to $x$:
\begin{equation} \lim_{N\rightarrow\infty} {\mathbb{E}} \tr (A_N^{s_1} \cdots
A_N^{s_k})= \phi(x^{s_1} \cdots x^{s_k})
\label{eq:zbieznosca}
\end{equation}
for any $k\in{\mathbb{N}}$ and $s_1,\dots,s_k\in\{1,\star\}$
and by using similar combinatorial arguments as in \cite{Thorbjornsen2000} one
can show that
\begin{equation}
\lim_{N\rightarrow\infty} \Var \tr (A_N^{s_1} \cdots
A_N^{s_k})=0.
\label{eq:zbieznoscb}
\end{equation}
Since $\limsup_{N\rightarrow\infty} \|T_N\|<\infty$ almost
surely \cite{Geman}, therefore there exists $R'>0$ such that for any integer
$k$ and $\epsilon>0$
\begin{equation}
\lim_{N\rightarrow\infty} P\big(\omega\in\Omega:
D_N+\sqrt{o} T_N(\omega)\in \Gamma_{R'}(x; k,N,\epsilon) \big) =1 .
\label{eq:prawieok}
\end{equation}
Furthermore since the convergence in (\ref{eq:zbieznosca}) and
(\ref{eq:zbieznoscb}) is uniform with respect to choice of the sequence
$(D_N)$, hence it is possible to find universal $R'$ for all choices of
$(D_N)$.
By comparing the densities of two measures on ${\mathcal{M}}_N^{\operatorname{sut}}t$: the Lebesgue measure
$\vol^{\operatorname{sut}}t$ and the distribution of the Gaussian random matrix $\sqrt{o} T_N$
we see that (\ref{eq:prawieok}) implies that for every $0<\delta<1$, every
integer $k$ and $\epsilon>0$ there exists $N_0$ such that for
$N>N_0$ we have that the volume of the set $\{m\in{\mathcal{M}}_N^{\operatorname{sut}}t: D_N+m \in
\Gamma_{R'}(x;k,N,\epsilon) \}$ is bigger or equal to the volume of a
$N(N-1)$--dimensional ball with radius $\sqrt{(1-\delta)oN}$:
\begin{multline} \vol^{\operatorname{sut}}t \{m\in{\mathcal{M}}_N^{\operatorname{sut}}t: D_N+m \in
\Gamma_{R'}(x;k,N,\epsilon) \}\geq \\
\pi^{\frac{N(N-1)}{2}} \left[ \Gamma\left(
\frac{N (N-1)}{2}+1 \right)\right]^{-1} [(1-\delta)oN]^{\frac{N (N-1)}{2}}.
\label{eq:idenaobiad}
\end{multline}
Since the sequence $(D_N)$ was chosen arbitrarily, it follows that for every
$0<\delta<1$, every integer $k$ and $\epsilon>0$ there exists $N_0$ such that
for any $N>N_0$ and any $D_N\in\hat{\Gamma}^{\operatorname{d}}_R(x; N, l_N,\theta_N)$
inequality (\ref{eq:idenaobiad}) holds.
Now it is enough to apply Proposition \ref{prop:pullback} to show that for
every $0<\delta<1$, every integer $k$ and $\epsilon>0$ we have
$$\chi_{R'}(x; k,\epsilon) \geq
\hat{\chi}^{\operatorname{d}}_{R}(x)+\frac{1}{2}+\frac{\log 2\pi (1-\delta) o}{2}$$
what finishes the proof.
\end{proof}
{\operatorname{sut}}bsection{Comparison of inequalities for free entropy}
The inequality (\ref{eq:glowne}) contains a double integral, which is often
called logarithmic energy of a measure. A similar term appears in the formula
for the free entropy of a single selfadjoint operator which is due do
Voiculescu \cite{VoiculescuPart2}:
\begin{equation}
\chi^{\operatorname{sa}}(x)=\int_{{\mathbb{R}}} \int_{{\mathbb{R}}} \log |z_1-z_2| \ {\mathrm{d}}\mu_x(z_1)
\ {\mathrm{d}}\mu_x(z_2)+\frac{3}{4}+\frac{\log 2\pi}{2}.
\label{eq:entropiasamosprzezonego}
\end{equation}
It should be stressed that---despite the formal resemblance---the free
entropies in (\ref{eq:glowne}) and (\ref{eq:entropiasamosprzezonego}) are
different objects. Namely, in (\ref{eq:glowne}) we consider a free entropy
$\chi(x)$ of a non--selfadjoint random variable while in
(\ref{eq:entropiasamosprzezonego}) we consider a free entropy $\chi^{\operatorname{sa}}(x)$ of
a selfadjoint random variable which is defined by hermitian matrix
approximations.
On the other hand inequality (\ref{eq:glowne}) contains a term equal to the
logarithm of the offdiagonality, which can be regarded as a non--commutative
variance. A similar expression appears in the Voiculescu's inequality for a
free entropy of a non--selfadjoint variable \cite{VoiculescuPart2}:
\begin{equation}
\chi(x)\leq \log \left[ \pi e \big( \phi(|x|^2)-|\phi(x)|^2 \big)^2 \right].
\end{equation}
\end{document}
|
\begin{document}
\title{Two-descent on some genus two curves}
\section{Introduction}
In this paper we study for an arbitrary prime number $p$ the curve $C_p$ of genus $2$ defined by the equation
\begin{equation}\label{eq:thecurve}
y^2=x(x^2-p^2)(x^2-4p^2).
\end{equation}
Specifically, we start by bounding the rank of its Jacobian $J_p$ over $\mathbb{Q}$ in terms of the $2$-Selmer group $S^2(J_p/\mathbb{Q})$. Next we show for three infinite
sets of prime numbers $p$ how to improve the upper bound on $\mathrm{rank}\,J_p(\mathbb{Q})$ by using a $2$-Selmer group computation over
$\mathbb{Q}(\sqrt{\pm p})$ of the Jacobian of the curve
$C=C_1$ defined by
$y^2=x(x^2-1)(x^2-4)$.
This computation applies the R\'edei symbols of \cite{stevenhagen2018redei}.
The improved
upper bound yields cases where the
Shafarevich-Tate group $\Sh(J_p/\mathbb{Q})$ is nontrivial.
As an example: for primes $p\equiv 23\bmod 48$ it turns out
that $J_p(\mathbb{Q})$ is finite and $\Sh(J_p/\mathbb{Q})[2]\cong(\mathbb{Z}/2\mathbb{Z})^2$.
We also discuss the $\mathbb{Q}$-rational points of the curve $C_p$. This is easy in
case the group $J_p(\mathbb{Q})$ is finite (as occurs,
for example, for all primes $p\equiv 7\bmod 24$). A less
obvious case we treat is $p=241$; the group
$J_{241}(\mathbb{Q})$ turns out
to have rank $2$. Using so-called `Two-Selmer sets', it is shown that
$C_{241}(\mathbb{Q})$ consists of only the
obvious Weierstrass points (the one at infinity and the ones with $y=0$).
Studying genus $1$ curves depending on a prime number $p$ is
a very classical subject; the survey paper \cite{Na29}
already lists
various examples; more recent ones are found, e.g.,
in \cite{BrCa1984}, \cite{Mo1992}, \cite{StrTo1994}.
The natural question of investigating analogous ideas
in the case of genus $2$
curves so far seems to have obtained less attention.
The 1998 master's thesis \cite{heiden} by one of us provides a first step
(not yet involving Shafarevich-Tate groups). As shown in
{\sl loc.\ sit.} Prop.~4.3.3 and Thm.~4.3.4, this already
suffices to conclude for the curves discussed in the
present paper that $C_p(\mathbb{Q})$ consists of the $6$
Weierstrass points only, whenever $p\equiv 7\bmod 24$.
The recent preprint \cite{hima2019} studies some similar
families of genus $2$ curves, but with only $2$ rational
Weierstrass points. Again, computing the $2$-Selmer group
over $\mathbb{Q}$ allow the authors to identify congruence conditions on
the prime $p$ such that the corresponding Mordell-Weil group
is finite. As a consequence, for those primes the only rational points on
the curve are the rational Weierstrass points.
Many results in the present paper originate from two
master's projects \cite{heiden}, \cite{evink} (1998
resp. 2019) by the second and the first author,
supervised by the third one.
\section{Notation and results}\label{NotandRes}
The first step in order to obtain information on the rank of Jacobian $J_p$ of the hyperelliptic curve $C_p$ defined by the equation
\[
y^2=x(x^2-p^2)(x^2-4p^2)
\]
for a prime $p$, is the relatively basic computation of the $2$-Selmer group of $J_p/\mathbb{Q}$. It fits in the well know short exact sequence
\begin{equation}\label{eq:2selmerexact}
0\to J_p(\mathbb{Q})/2J_p(\mathbb{Q})\to S^2(J_p/\mathbb{Q})\to\Sh(J_p/\mathbb{Q})[2]\to 0.
\end{equation}
This Selmer group was computed in \cite{heiden} (with
minor corrections in \cite[Appendix~B]{evink}).
The computation is based on the method described in
\cite{schaefer1995} and uses (see \cite[Section~7]{FPS1997})
\[
\#J_p(K_v)/2J_p(K_v)=|2|_v^{-2}\cdot \#J_p(K_v)[2]
=|2|_v^{-2}\cdot 16
\]
where $K_v\supset\mathbb{Q}_\ell$ is a
finite extension with
valuation ring $O_v$, and \[|2|_v=\text{vol}(2O_v)/\text{vol}(O_v)=\left\{
\begin{array}{ll}1 & \text{if}\;\ell\neq 2,\\
2^{-[K_v:\mathbb{Q}_2]} & \text{if}\;\ell=2\end{array}\right..
\]The result is as follows. (A calculation illustrating this type of result is the proof of
Lemma~\ref{lem5.1} below.)
\begin{proposition}\label{prop2.1}
For a prime number $p>3$, the $\mathbb{F}_2$-vectorspace
$S^2(J_p/\mathbb{Q})$ of the Jacobian $J_p$ of the curve
defined by $y^2=x(x^2-p^2)(x^2-4p^2)$ has dimension as given
in the next table.
\[
\begin{array}{c|c}
p \bmod 24 & \dim_{\mathbb{F}_2}S^2(J_p/\mathbb{Q}) \\
\hline
1 & 8 \\
5,11,13,19 & 5 \\
7 & 4 \\
17,23 & 6
\end{array}
\]
\end{proposition}
Since all Weierstrass points on $C_p$ are
$\mathbb{Q}$-rational, one has
$J_p(\mathbb{Q})[2]\cong(\mathbb{Z}/2\mathbb{Z})^4$. Either
by observing that in the present situation $J_p(\mathbb{Q})[2]\hookrightarrow S^2(J_p/\mathbb{Q})$,
or using that the torsion subgroup $J_p(\mathbb{Q})_{\mbox{\scriptsize tor}}\subset J_p(\mathbb{Q})$ yields a $4$-dimensional subspace
$J_p(\mathbb{Q})_{\mbox{\scriptsize tor}}/2J_p(\mathbb{Q})_{\mbox{\scriptsize tor}}$
of $J_p(\mathbb{Q})/2J_p(\mathbb{Q})$,
the short exact sequence \eqref{eq:2selmerexact} implies
\begin{equation}\label{eq:rankshasel}
\mathrm{rank}\,J_p(\mathbb{Q})+\dim_{\mathbb{F}_2}\Sh(J_p/\mathbb{Q})[2]=\dim_{\mathbb{F}_2}S^2(J_p/\mathbb{Q})-4.
\end{equation}
We state an immediate consequence of this:
\begin{corollary}\label{cor2.2}
For any prime number $p\equiv 7\bmod 24$ one has
$\mathrm{rank}\,J_p(\mathbb{Q})=0$ and $C_p(\mathbb{Q})$ consists of
only the $6$ Weierstrass points of $C_p$.
\end{corollary}
\begin{proof}
The proof of the statement about the rank is indicated above.
Note that for $p\neq 5$ one has $\#J_p(\mathbb{F}_5)=16$ independent of $p$. Moreover $\#J_5(\mathbb{F}_7)=48$ and
$\#J_5(\mathbb{F}_{11})=128$. Since for primes $\ell\geq 3$ the reduction mod $\ell$
map is an injective group homomorphism on rational torsion points,
it follows that $J_p(\mathbb{Q})$ has torsion subgroup
$(\mathbb{Z}/2\mathbb{Z})^4$ for every prime $p$.
Embedding $C_p\subset J_p$ via $P\mapsto [P-\infty]$ with
$\infty\in C_p$ the Weierstrass point at infinity, one
concludes that $C_p\cap J_p(\mathbb{Q})$ consists of
the divisor classes $[W-\infty]$ for $W$ any Weierstrass point
on $C_p$, implying the result.
\end{proof}
For the primes $p\equiv 5, 11, 13, 19\bmod 24$
the structure of the group $J_p(\mathbb{Q})$
is in fact also predicted by Proposition~\ref{prop2.1}:
\begin{corollary}
For any prime $p>3$, assume that $\Sh(J_p/\mathbb{Q})$ is finite. Then
\[\text{rank}\,J_p(\mathbb{Q})\equiv
\left\{
\begin{array}{lcl}
1\bmod 2 &&\text{if}\;\;p\equiv 5, 11, 13, 19\bmod 24;\\
0\bmod 2 && otherwise.
\end{array}
\right.
\]
In particular, if for a prime $p\equiv 5, 11, 13, 19\bmod 24$
the group $\Sh(J_p/\mathbb{Q})$ is finite then for this prime
$J_p(\mathbb{Q})\cong \mathbb{Z}\times (\mathbb{Z}/2\mathbb{Z})^4$.
\end{corollary}
\begin{proof}
By a result of Poonen and Stoll \cite[\S6, \S8]{poonenstoll1999}
finiteness of $\Sh$ and the fact that $C_p$
contains a rational point, implies that
$\dim_{\mathbb{F}_2}\Sh(J_p/\mathbb{Q})[2]$ is even.
Hence Equation \eqref{eq:rankshasel}
and Proposition~\ref{prop2.1} imply the first assertion
as well as $\mathrm{rank}\,J_p(\mathbb{Q})=1$
whenever $p\equiv 5, 11, 13, 19\bmod 24$.
The result follows since the proof of
Corollary~\ref{cor2.2} in particular determines the torsion
subgroup of $J_p(\mathbb{Q})$.
\end{proof}
The remainder of this paper deals with improvements of
Proposition~\ref{prop2.1} and variations on Corollary~\ref{cor2.2}. Specifically, this is possible in all remaining
congruence classes (so, $p\equiv 1, 17, 23\bmod 24$). We show the following.
\begin{theorem}\label{Thm23mod24}
Let $p\equiv 23\bmod 48$ be a prime number. The Jacobian
$J_p$ of the curve corresponding to $y^2=x(x^2-p^2)(x^2-4p^2)$
satisfies $J_p(\mathbb{Q})=J_p(\mathbb{Q})[2]\cong
(\mathbb{Z}/2\mathbb{Z})^4$ and $\Sh(J_p/\mathbb{Q})[2]\cong
(\mathbb{Z}/2\mathbb{Z})^2$.
\end{theorem}
\begin{theorem}\label{Thm17mod24}
Let $p\equiv 17\bmod 24$ be a prime number that does not
split completely in $\mathbb{Q}(\sqrt[4]{2})$. The Jacobian
$J_p$ of the curve corresponding to $y^2=x(x^2-p^2)(x^2-4p^2)$
satisfies $J_p(\mathbb{Q})=J_p(\mathbb{Q})[2]\cong
(\mathbb{Z}/2\mathbb{Z})^4$ and $\Sh(J_p/\mathbb{Q})[2]\cong
(\mathbb{Z}/2\mathbb{Z})^2$.
\end{theorem}
\begin{theorem}\label{Thm1mod24}
Let $p\equiv 1\bmod 24$ be a prime number satisfying one
of the conditions
\begin{itemize}
\item[$(a)$] $p$ splits completely in $\mathbb{Q}(\sqrt[4]{2})$
and not in $\mathbb{Q}(\sqrt{1+\sqrt{3}})$;
\item[$(b)$] $p\equiv 1\bmod 48$ and $p$ splits completely in
$\mathbb{Q}(\sqrt{1+\sqrt{3}})$ and not in
$\mathbb{Q}(\sqrt[4]{2})$;
\item[$(c)$] $p\equiv 25\bmod 48$ and $p$ does not split
completely in either of $\mathbb{Q}(\sqrt[4]{2})$
and $\mathbb{Q}(\sqrt{1+\sqrt{3}})$.
\end{itemize}
The Jacobian
$J_p$ of the curve corresponding to $y^2=x(x^2-p^2)(x^2-4p^2)$
satisfies $J_p(\mathbb{Q})=J(\mathbb{Q})[2]\cong
(\mathbb{Z}/2\mathbb{Z})^4$ and $\Sh(J_p/\mathbb{Q})[2]\cong
(\mathbb{Z}/2\mathbb{Z})^4$.
\end{theorem}
Using Chebotar\"{e}v's density theorem (see, e.g., \cite{LS}),
one observes that the set of prime numbers satisfying
the condition given in Theorem~\ref{Thm23mod24} has
a positive Dirichlet density. The same holds for the set of
primes satisfying the condition in Theorem~\ref{Thm17mod24}
and for each of the three sets corresponding to
Theorem~\ref{Thm1mod24}~(a), \ref{Thm1mod24}~(b),
and \ref{Thm1mod24}~(c).
\section{R\'edei Symbols}
In this section we recall the definition
and various properties of the R\'edei symbol. It is a tri-linear symbol taking values in $\mu_2$ and it satisfies a reciprocity law based on the product formula for quadratic Hilbert symbols. This reciprocity allows us to link the splitting behaviour of certain primes in dihedral extensions over $\mathbb{Q}$ of degree $8$ in a non-trivial way, which functions as a useful supplement to various $2$-Selmer group computations. The reciprocity of the R\'edei-symbol is a recent result due to P. Stevenhagen in \cite{stevenhagen2018redei}; his text is the basis for the exposition in this section.\\
Let $a,b$ be square-free integers representing non-trivial elements in $\mathbb{Q}^{*}/\mathbb{Q}^{*2}$,
and suppose their local quadratic Hilbert symbols are all trivial:
\begin{equation}\label{eq:quadhilbert}
(a,b)_p=1,\quad\text{for all primes}\; p.
\end{equation}
By the local-global principle of Hasse and Minkowski, condition \eqref{eq:quadhilbert} is equivalent to the existence of a non-zero rational solution $(x,y,z)$ to the equation
\begin{equation}\label{eq:hassmink}
x^2-ay^2-bz^2=0.
\end{equation}
Take such a solution and put
\begin{equation}
\alpha=2(x+z\sqrt{b}),\quad \beta=x+y\sqrt{a}.
\end{equation}
Then $F:=E(\sqrt{\alpha})=E(\sqrt{\beta})$ defines a
quadratic extension of $E=\mathbb{Q}(\sqrt{a},\sqrt{b})$ that is normal over $\mathbb{Q}$, cyclic of degree $4$ over $\mathbb{Q}(\sqrt{ab})$, and dihedral of degree $8$ over $\mathbb{Q}$ when $\mathbb{Q}(\sqrt{ab})\neq\mathbb{Q}$, see \cite[Lemma~5.1, Corollary~5.2]{stevenhagen2018redei}. The extension $F$ can be twisted to $F_t$ for $t\in\mathbb{Q}^{*}$ by scaling the solution $(x,y,z)$ to $(tx,ty,tz)$.
By \cite[Propositions~7.2,7.3]{stevenhagen2018redei}
choosing $t$ appropriately ensures that $F_t/E$ is unramified at all finite primes of odd residue
characteristic, but in some cases ramification over $2$ cannot be avoided. With $\Delta(d)=\Delta(\mathcal{O}_{\mathbb{Q}(\sqrt{d})})$ for $d\in\mathbb{Q}^{*}/\mathbb{Q}^{*2}$ denoting the discriminant, one makes the following definition.
\begin{definition}\label{minram}
Let $K=\mathbb{Q}(\sqrt{ab})$ for non-trivial $a,b\in\mathbb{Q}^{*}/\mathbb{Q}^{*2}$, and let $F$ be the quadratic extension of $E=\mathbb{Q}(\sqrt{a},\sqrt{b})$ corresponding with a solution of \eqref{eq:hassmink}. The extension $F/K$ is minimally ramified if the following conditions hold:
\begin{itemize}
\item[$(a)$] The extension $F/K$ is unramified over all odd primes $p\!\!\not|\,\mathrm{gcd}(\Delta(a),\Delta(b))$.
\item[$(b)$] The extension $F/K$ is unramified over $2$ if $\Delta(a)\Delta(b)$ is odd, or if one of $\Delta(a),\Delta(b)$ is $1\bmod 8$.
\item[$(c)$] If $\{\Delta(a),\Delta(b)\}\equiv \{4,5\}\bmod 8$, take $s\in\{a,b\}$ such that $\Delta(s)\equiv 4\bmod 8$. The local biquadratic extension $\mathbb{Q}_2(\sqrt{s})\subset F\otimes\mathbb{Q}_2$ must have conductor $2$.
\end{itemize}
\end{definition}
By \cite[Lemma~7.7]{stevenhagen2018redei} it is possible to twist a given $F$ to a suitable $F_t$ which is minimally ramified over $\mathbb{Q}(\sqrt{ab})$.
For convenience, a degree $8$ dihedral extension of $\mathbb{Q}$ is called minimally ramified if it is so over its subfield defined by the order $4$ cyclic subgroup of the Galois group. Observe that the definition imposes no restrictions over the prime $2$ in case $2$ is totally ramified in $\mathbb{Q}(\sqrt{a},\sqrt{b})$.
\begin{definition}
For non-trivial $a,b,c\in\mathbb{Q}^{*}/\mathbb{Q}^{*2}$ with local quadratic Hilbert symbols
\begin{equation}\label{eq:quadhilbs}
(a,b)_p=(a,c)_p=(b,c)_p=1
\end{equation}
for all primes $p$ and moreover
\begin{equation}\label{eq:coprimedisc}
\mathrm{gcd}(\Delta(a),\Delta(b),\Delta(c))=1,
\end{equation}
set $K=\mathbb{Q}(\sqrt{ab})$ and $E=\mathbb{Q}(\sqrt{a},\sqrt{b})$, and take a
corresponding $F/K$ which is minimally ramified.
Define $[a,b,c]\in\mathrm{Gal}(F/E)=\mu_2$ by
\[
[a,b,c]=\begin{cases}
\mathrm{Art}(\mathfrak{c},F/K) & \text{ if } c> 0 \\
\mathrm{Art}(\mathfrak{c}\infty,F/K) & \text{ if } c<0 \\
\end{cases}
\]
where $\mathrm{Art}(\cdot , \cdot)$ is the Artin symbol and $\mathfrak{c}\in\mathcal{I}(\mathcal{O}_K)$ has norm $|c_0|$ with $c_0$ the square-free integer representing
$c$, and $\infty$ denotes an infinite prime of $K$.\\
If at least one of $a,b$ and $c$ is trivial then one sets $[a,b,c]=1$.
\end{definition}
\begin{proposition}
For $a,b,c\in\mathbb{Q}^{*}/\mathbb{Q}^{*2}$ satisfying \eqref{eq:quadhilbs} and \eqref{eq:coprimedisc}, the Rédei symbol $[a,b,c]\in\mu_2$ is well-defined. Moreover, the symbol is tri-linear, and perfectly symmetrical in all three arguments.
\end{proposition}
\begin{proof} {\it (Sketch.)}
If $p|c$, then $(c,b)_p=(c,a)_p=1$ implies that $p$ is either split or ramified in both $\mathbb{Q}(\sqrt{a})$ and $\mathbb{Q}(\sqrt{b})$.
Condition \eqref{eq:coprimedisc} implies that $p$ cannot ramify in both, hence a prime $\mathfrak{p}_K|p$ in $K$ has norm $p$ and splits in $E$.
The prime $\mathfrak{p}_K$ is unramified in $F$ by the minimal ramification of $F$, where the parity of $p$ determines whether this is due to condition $(a)$ or $(b)$. It follows that indeed $\mathrm{Art}(\mathfrak{p}_K,F/K)\in\mathrm{Gal}(F/E)$, and as $\mathrm{Gal}(F/E)$ is in the center of $\mathrm{Gal}(F/\mathbb{Q})$ this Artin symbol is independent of $\mathfrak{p}_K$. When $c<0$ and $K$ is real, the Artin symbol in $F$ of any infinite prime of $K$ measures whether $F$ is real or complex and hence is independent of the choice of infinite prime of $K$. As $[a,b,c]$ is the product of such Artin symbols we see that $[a,b,c]$ does not depend on the choice of $\mathfrak{c}$ or $\infty$. For the independence of $F$ we refer to \cite[Corollary~8.2]{stevenhagen2018redei}.\\
The set of triples $(a,b,c)$ in $\mathbb{Q}^{*}/\mathbb{Q}^{*2}$ for which \eqref{eq:quadhilbs} and \eqref{eq:coprimedisc} hold is `tri-linearly closed', and the R\'edei symbol $[a,b,c]$ is clearly linear in $c$, hence tri-linearity follows from the symmetry. The symmetry in the first two arguments is immediate, while the identity
\[
[a,b,c]=[a,c,b]
\]
is a non-trivial reciprocity depending on the product formula for quadratic Hilbert symbols in $\mathbb{Q}(\sqrt{a})$. The proof of this reciprocity is the subject of \cite[Section~8]{stevenhagen2018redei}.
\end{proof}
\begin{example}\label{redeiexample1}
Consider the case when $a=b=2$.
Then the invariant fields $F$ and $F'$ of the subgroups generated by $-1\bmod 16$ and $7\bmod 16$ inside $\mathrm{Gal}(\mathbb{Q}(\zeta_{16})/\mathbb{Q})$, respectively,
are two minimally ramified extensions of $\mathbb{Q}$ which can be used to compute a R\'edei symbol of the form $[2,2,c]$ provided that the symbol is defined, i.e. when $\Delta(c)$ is odd and $(2,c)_2=1$, i.e. when $c\equiv 1\bmod 8$. Taking for example $c=-p$ for a prime $p\equiv -1\bmod 8$, then as $F$ is totally real and $p$ splits completely in $F$ precisely when $p\equiv \pm 1\bmod 16$ we obtain
\[
[2,2,-p]=\begin{cases}
1 & \text{ if } p\equiv -1\bmod 16 \\
-1 & \text{ if } p\equiv 7\bmod 16 \\
\end{cases}
\]
Note that we get the same conclusion when using the (complex!) field $F'$ as $p$ splits completely in $F'$ precisely when $p\equiv 1,7\bmod 16$.
\end{example}
\begin{nonexample}
Continuing the setup of Example \ref{redeiexample1}, we see that $`[2,2,-1]'$ is not defined as $\Delta(-1)$ is even (although $(2,-1)_2=1$). We can nonetheless consider an Artin symbol `that should define $[2,2,-1]$', but as $F$ is real and $F'$ complex, such a symbol is \textit{not} independent of the minimally ramified extension.
\end{nonexample}
\begin{example}\label{Ex3.6}
Let $p\equiv 1\bmod 8$ be a prime. Let $\pi\in\mathbb{Z}[\sqrt{2}]$ be an element of norm $p$ with conjugate $\pi'$. Then $[2,p,p]=1$ precisely when $\pi$ is a square mod $\pi'$. Since $[2,p,p]=[p,p,2]$, this is equivalent to $2$ being completely split in the quartic subfield $E$ of $\mathbb{Q}(\zeta_p)$. As $E$ corresponds with the subgroup of fourth powers in $\mathrm{Gal}(\mathbb{Q}(\zeta_p)/\mathbb{Q})=(\mathbb{Z}/p\mathbb{Z})^{*}$ and $2\bmod p=\mathrm{Frob}_p\in \mathrm{Gal}(\mathbb{Q}(\zeta_p)/\mathbb{Q})$, we see that $2$ splits completely in $E$ precisely when $2\bmod p$ is a fourth power, i.e. when $p$ splits completely in $\mathbb{Q}(\sqrt[4]{2})$, i.e. when $[2,-2,p]=1$. We thus have the identity
\[
[2,p,p]=[2,-2,p].
\]
\end{example}
With this we obtain a generalisation of \cite[Prop. ~4.1]{StrTo1994}, where it is used to prove that $ (\mathbb{Z}/2\mathbb{Z})^2\subset\Sh(E/\mathbb{Q})[2]$ for the elliptic curve $E$ defined by $y^2=(x+p)(x^2+p^2)$ for a prime $p\equiv 9\bmod 16$ such that $1+\sqrt{-1}\in \mathbb{F}_p$ is a square.
\begin{corollary}
Let $p\equiv 1\bmod 8$ be a prime, let $\pi\in\mathbb{Z}[\sqrt{2}]$ have norm $p$ with conjugate $\pi'$ and let $i\in\mathbb{F}_p$ be a primitive fourth root of unity. Consider the following statements.
\begin{enumerate}[(a)]
\item $\pi$ is a square mod $\pi'$.
\item $1+i$ is a square mod $p$.
\end{enumerate}
Then the statements are equivalent when $p\equiv 1\bmod 16$, while for $p\equiv 9\bmod 16$ exactly one of the statements holds.
\end{corollary}
\begin{proof}
Statement $(a)$ holds when $[2,p,p]=[2,-2,p]=1$, while statement $(b)$ holds when $[2,-1,p]=1$. The result follows because
\[
[2,-2,p]\cdot[2,-1,p]=[2,2,p]=
\begin{cases}
1 & \text{ if }p\equiv 1\bmod 16,\\
-1 & \text{ if }p\equiv 9\bmod 16.
\end{cases}\qedhere
\]
\end{proof}
\section{Computation of $2$-Selmer groups}
We start by recalling the explicit form of $2$-descent that will be used. Let $K$ be a number field and $C$ the hyperelliptic curve defined by $y^2=f(x)$, for $f\in K[x]$ square-free and of odd degree $2g+1$. We have the short exact sequence
\[
0\to J(K)/2J(K)\to S^2(J/K)\to \Sh(J/K)[2]\to 0,
\]
where $S^2(J/K)$ and $\Sh(J/K)$ are respectively the $2$-Selmer group and the Shafarevich-Tate group defined in terms of Galois cohomology by
\begin{align*}
S^2(J/K)&:=\ker\left(H^1(G_K,J(\overline{K})[2])\to \prod\nolimits_{\mathfrak{p}}H^1(G_{K_{\mathfrak{p}}},J(\overline{K_{\mathfrak{p}}}))\right),\\
\Sh(J/K)&:=\ker\left(H^1(G_K,J(\overline{K}))\to \prod\nolimits_{\mathfrak{p}}H^1(G_{K_{\mathfrak{p}}},J(\overline{K_{\mathfrak{p}}}))\right).
\end{align*}
By \cite[Theorems~2.1 \& 2.2]{schaefer1995} one has
$H^1(G_K,J(\overline{K})[2])\cong \ker(A^{*}/A^{*2}\xrightarrow{N} K^{*}/K^{*2})$,
where $A = K[x]/(f(x))$ and $N$ is induced by the norm map $A\to K$.
This identifies $S^2(J/K)$ with the elements in $ \ker(A^{*}/A^{*2}\xrightarrow{N} K^{*}/K^{*2})$ that are mapped, according to the commutative diagram
\[
\begin{tikzcd}
J(K)/2J(K) \arrow[d] \arrow[r, "\delta", hook] & A^{*}/A^{*2} \arrow[d] \\
J(K_{\mathfrak{p}})/2J(K_{\mathfrak{p}}) \arrow[r, "\delta_{\mathfrak{p}}", hook] & A_{\mathfrak{p}}^{*}/A_{\mathfrak{p}}^{*2},
\end{tikzcd}
\]
into $\mathrm{im}(\delta_{\mathfrak{p}})$ for all primes $\mathfrak{p}$ of $K$.
We consider the special case that $f\in\mathcal{O}_K[x]$ is monic and completely splits, so $f=\prod_{i=1}^{2g+1}(x-\alpha_i)$ for distinct $\alpha_j\in \mathcal{O}_K$. In this case $A\xrightarrow{\sim} \bigoplus_{i=1}^{2g+1}K$ determined by $x\mapsto (\alpha_1,\dotsc,\alpha_{2g+1})$, and
the norm map $A\to K$ corresponds to multiplication
$\oplus_{i=1}^{2g+1} K\to K$. Hence the kernel of the norm $\oplus_{i=1}^{2g+1}K^*/{K^*}^2\stackrel{N}{\longrightarrow} K^*/{K^*}^2$ consists of the `hyperplane' of those $(2g+1)$-tuples for which the product of all coordinates is trivial.
Let $S$ consist of the real primes of $K$ together with the finite primes dividing $2\Delta(f)$, and put $K(S):=\{x\in K^{*}/K^{*2}:\mathrm{ord}_{\mathfrak{p}}(x)\equiv 0\bmod 2\text{ for all finite }\mathfrak{p}\notin S\}$.
One has (compare \cite[pp.~226-227]{schaefer1995})
\begin{equation}\label{eq:finselmalg}
S^2(J/K)\subset\ker\bigg(\bigoplus_{i=1}^{2g+1}K(S)\to K(S)\bigg),
\end{equation}
and $S^2(J/K)$ consists of those elements in the kernel of \eqref{eq:finselmalg} that map into $\mathrm{im}(\delta_{\mathfrak{p}})$ for each $\mathfrak{p}\in S$ in the following diagram.
\[
\begin{tikzcd}
J(K)/2J(K)\ar{r}{\delta}\ar{d} & \displaystyle\bigoplus\limits_{i=1}^{2g+1}K^{*}/K^{*2}\ar{d}\\
J(K_{\mathfrak{p}})/2J(K_{\mathfrak{p}})\ar{r}{\delta_{\mathfrak{p}}} & \displaystyle\bigoplus_{i=1}^{2g+1}K_{\mathfrak{p}}^{*}/K_{\mathfrak{p}}^{*2}.
\end{tikzcd}
\]
Here the injective homomorphism $\delta$ and similarly $\delta_{\mathfrak{p}}$
is given by
\begin{equation}\label{eq:imagedivisors}
\sum_{i=1}^r[P_i]-r[\infty]\mapsto\prod_{i=1}^r(x(P_i)-\alpha_1,\dotsc, x(P_i)-\alpha_{2g+1}),
\end{equation}
for $P_1,\dotsc,P_r\in C(\overline{K})$ forming a $G_K$-orbit not containing a Weierstrass point. The $j$-th coordinate of the $\delta$-image of $[(\alpha_i,0)]-[\infty]$ for $i\neq j$ is $\alpha_i-\alpha_j$. The $i$-th coordinate is then determined by the hyperplane condition: it equals $\prod_{j\neq i}(\alpha_i-\alpha_j)$.
As already remarked in Section~\ref{NotandRes} the cardinality
of $J(K_{\mathfrak{p}})/2J(K_{\mathfrak{p}})$ and hence that of
$\mathrm{im}(\delta_{\mathfrak{p}})$ is known. In practise this makes it
fairly straightforward to describe explicit representants of the
elements in $\mathrm{im}(\delta_{\mathfrak{p}})$, for each $\mathfrak{p}\in S$.
The group $K(S)$ fits in the exact sequence
\[
\begin{tikzcd}
0 \arrow[r] & R_S^{*}/R_S^{*2} \arrow[r] & K(S) \arrow[r,"\beta"] & \mathrm{Cl}(R_S)[2] \arrow[r] & 0
\end{tikzcd}
\]
where $R_S=\{0\}\cup\{x\in K^{*}:\mathrm{ord}_{\mathfrak{p}}(x)\geq 0\text{ for all finite }\mathfrak{p}\notin S\}$ is the ring of $S$-integers in $K$.
Here $\beta$ sends $xK^{*2}$ to the class $[I R_S]$, where $x\mathcal{O}_K=\mathfrak{a}I^2$ with $\mathfrak{a}$ and $I$ co-prime fractional ideals such that $\mathfrak{a}$ is supported on prime ideals of $S$ and the support of $I$ does not contain any prime of $S$.
This is well-known; for completeness see \cite[Prop.~2.4.4]{evink}. The case of interest to us is when $K$ has odd class number.
\begin{proposition}\label{sunitbasis}
If $K$ has odd class number then the map $R_S^{*}/R_S^{*2}\to K(S)$ is an isomorphism. Moreover,
for each finite $\mathfrak{p}\in S$ writing $\mathfrak{p}^{k_{\mathfrak{p}}}=(x_{\mathfrak{p}})$ with $k_{\mathfrak{p}}$ the order of $\mathfrak{p}$ in the class group of $K$, the $x_{\mathfrak{p}}$ together with an $\mathbb{F}_2$-basis for $\mathcal{O}_K^{*}/\mathcal{O}_K^{*2}$ form an $\mathbb{F}_2$-basis for $K(S)$.
\end{proposition}
\begin{proof}
A detailed proof of this standard fact is provided in \cite[Cor.~2.4.7]{evink}.
\end{proof}
For an odd prime $p$ write $p^{*}=(-1)^{(p-1)/2}p$, so
$\mathbb{Q}(\sqrt{p^{*}})$ is the quadratic subfield of the cyclotomic field $\mathbb{Q}(\zeta_p)$.
In what follows we will compute $2$-Selmer groups over these
quadratic fields. One has
\begin{lemma}\label{oddclassnmr}
For any odd prime $p$ the field $K=\mathbb{Q}(\sqrt{p^{*}})$ has odd class number, and if $K$ is real (i.e., $p\equiv 1\bmod 4$) then a fundamental unit of $K$ has norm $-1$.
\end{lemma}
\begin{proof}
For a proof using genus theory, see for example \cite[Thm~2.1]{stevenhagen2018redei}. A slightly more direct argument
is given in \cite[Appendix~A.2]{evink}.
\end{proof}
\section{Proofs of the rank and Shafarevich-Tate group results}
Consider the genus two hyperelliptic curves
\[
C/\mathbb{Q}\colon y^2=f(x):=x(x^2-1)(x^2-4),
\]
and, for $p$ any prime number,
\[ C_p/\mathbb{Q}\colon y^2=x(x^2-p^2)(x^2-4p^2). \]
Then $C_p$ is a quadratic twist of $C$ over both $\mathbb{Q}(\sqrt{p})$ and $\mathbb{Q}(\sqrt{-p})$. Let $J$ and $J_p$ denote the Jacobians of
$C$ and $C_p$, respectively.
Observe that
\begin{equation}\label{eq:rankrelationtwists}
\mathrm{rank}\,J_p(\mathbb{Q})+\mathrm{rank}\,J(\mathbb{Q})=\mathrm{rank}\,J(\mathbb{Q}(\sqrt{\pm p})),
\end{equation}
for both possibilities of the sign $\pm$. A quick computation (Lemma~\ref{lem5.1}) yields $\mathrm{rank}\,J(\mathbb{Q})=0$. Since for the Jacobians at hand
the torsion subgroup yields a subgroup of
the $2$-Selmer group of dimension $4$,
it follows that
\[\mathrm{rank}\,J_p(\mathbb{Q})\leq
\dim_{\mathbb{F}_2}S^2(J/\mathbb{Q}(\sqrt{\pm p}))-4.\]
Using $\mathbb{Q}(\sqrt{p})$ in case $p\equiv 1,17\bmod 24$ and $\mathbb{Q}(\sqrt{-p})$ for $p\equiv 23\bmod 24$,
it will be shown that for certain subsets of these primes
the bound for $\mathrm{rank}\,J_p(\mathbb{Q})$ obtained in this way sharpens
the one which follows by directly applying
Proposition~\ref{prop2.1}. Specifically, this
results in proofs for Theorems \ref{Thm23mod24} - \ref{Thm1mod24}.
\label{NotationDxi}
Label the roots of $f$ as $(\alpha_1,\alpha_2,\alpha_3,\alpha_4,\alpha_5)=(-2,-1,0,1,2)$.
For a field $F\supset\mathbb{Q}$ and a point $(\xi,\eta)\in C(F)$ write $D_{\xi}\in J(F)$ for the point corresponding to the divisor $[(\xi,\eta)]-[\infty]$ on $C$. Note that
although $D_\xi$ depends on $\eta$, its image in
the $2$-Selmer group $S^2(J/F)$ does not.
The image of $J(\mathbb{Q})[2]$ under $\delta$ is spanned by
\[
\begin{array}{c|c c c c c}
& x+2 & x+1 & x & x-1 & x-2 \\
\hline
D_{-2} & 6 & -1 & -2 & -3 & -1 \\
D_{-1} & 1 & -6 & -1 & -2 & -3 \\
D_{0} & 2 & 1 & 1 & -1 & -2 \\
D_{1} & 3 & 2 & 1 & -6 & -1
\end{array}
\]
Here $x-\alpha_i$ denotes the map
$[P]-[\infty]\mapsto x(P)-\alpha_i$ as in \eqref{eq:imagedivisors},
compare \cite{schaefer1995}.\\
The local fields for which we need the images
$\text{im}\,\delta_p$ are $\mathbb{Q}_2$,
$\mathbb{Q}_3$, $\mathbb{Q}_3(i)$ and $\mathbb{R}$. Much of this was already done in
\cite[pp.~43-45]{heiden}. One has $\mathbb{Q}_2^{*}/\mathbb{Q}_2^{*2}=\langle -1,2,3\rangle$, $\mathbb{Q}_3^{*}/\mathbb{Q}_3^{*2}=\langle -1,3\rangle$, for $F=\mathbb{Q}_3(i)$ moreover $F^{*}/F^{*2}=\langle 3,r\rangle$, where $r=1+i$, and of course $\mathbb{R}^{*}/\mathbb{R}^{*2}=\langle -1\rangle$. The local images are then spanned as follows.
\begin{equation*}
\begin{array}{c|c c c c c }
\mathbb{Q}_2 & x+2 & x+1 & x & x-1 & x-2 \\
\hline
D_{-2} & 6 & -1 & -2 & -3 & -1 \\
D_{-1} & 1 & -6 & -1 & -2 & -3 \\
D_0 & 2 & 1 & 1 & -1 & -2 \\
D_1 & 3 & 2 & 1 & -6 & -1 \\
D_6 & 2 & -1 & 6 & -3 & 1 \\
D_7 & 1 & 2 & -1 & 6 & -3
\end{array}
\end{equation*}
\begin{equation*}
\begin{array}{c|c c c c c}
\mathbb{Q}_3 & x+2 & x+1 & x & x-1 & x-2 \\
\hline
D_{-2} & -3 & -1 & 1 & -3 & -1 \\
D_{-1} & 1 & 3 & -1 & 1 & -3 \\
D_0 & -1 & 1 & 1 & -1 & 1 \\
D_4 & 1 & -3 & -1 & 1 & 3 \\
\end{array}
\end{equation*}
\begin{equation*}
\begin{array}{c|c c c c c}
\mathbb{Q}_3(i) & x+2 & x+1 & x & x-1 & x-2 \\
\hline
D_{-2} & 3 & 1 & 1 & 3 & 1 \\
D_{-1} & 1 & 3 & 1 & 1 & 3 \\
D_{i} & r & r & 1 & r & r \\
D_{4+3i} & 3r & 1 & 1 & 3r & 1 \\
\end{array}
\end{equation*}
\begin{equation*}
\begin{array}{c|c c c c c}
\mathbb{R} & x+2 & x+1 & x & x-1 & x-2 \\
\hline
D_{-1} & 1 & -1 & -1 & -1 & -1 \\
D_{0} & 1 & 1 & 1 & -1 & -1 \\
\end{array}
\end{equation*}
\begin{lemma}\label{lem5.1}
We have $\mathrm{rank}\,J(\mathbb{Q})=0$.
\end{lemma}
\begin{proof}
It suffices to show $\dim_{\mathbb{F}_2}S^2(J/\mathbb{Q})=4$. Note $\Delta(f)=2^{10}\cdot 3^{4}$, so $S=\{2,3,\infty\}$ and $K(S)=\langle -1,2,3\rangle$. Then $S^2(J/\mathbb{Q})$ injects into the $2$-adic image, and
\[
S^2(J/\mathbb{Q})=A\oplus\delta(J(\mathbb{Q})[2])
\]
where $A$ consists of all $x\in S^2(J/\mathbb{Q})$ with
$2$-adic image in the span of
\[
\begin{array}{c c c c c c c}
( & 2, & -1, & 6, & -3, & 1 & ),\\
( & 1, & 2, & -1, & 6, & -3 & ).
\end{array}
\]
If $x=(e_1,\dotsc,e_5)\in A$, then the $3$-adic image forces $e_3\in\langle -1\rangle$, hence $x$ is in the span of $(1,2,-1,6,-3)$. Therefore $x$ is trivial because
$(1,2,-1,6,-3)\not\in\mathrm{im}(\delta_3)$. Thus $A=0$ and $S^2(J/\mathbb{Q})$ has $\mathbb{F}_2$-dimension $4$.
\end{proof}
We now compute $S^2(J/\mathbb{Q}(\sqrt{-p}))$ for $p\equiv 23\bmod 24$
and $S^2(J/\mathbb{Q}(\sqrt{p}))$ for $p\equiv 1,17\bmod 24$. The computation follows \cite[\S~3.4.2-4]{evink},
except that R\'edei symbols are used instead of
various reciprocity arguments in {\sl loc.\ sit.}\\
Consider a prime $p\equiv 23\bmod 24$ and let $K=\mathbb{Q}(\sqrt{-p})$. Then $K$ is complex and both $2$ and $3$ split in $K$, so as set $S$ of
places of $K$ needed for embedding
$S^2(J/K)$ in $\oplus_{i=1}^5K(S)$ we take
the four primes dividing $6$. The completion of
$K$ at a prime in $S$ equals $\mathbb{Q}_2$ or $\mathbb{Q}_3$.\\
Write $\mathfrak{p}_3,\mathfrak{q}_3$ for the prime ideals in $\mathcal{O}_K$ dividing $3$ and let $k_3$ be the order of $[\mathfrak{p}_3]$ in $\mathrm{Cl}_K$.
Then $\mathfrak{p}_3^{k_3}=(x_3)$ for some $x_3\in \mathcal{O}_K$.
Since $\mathfrak{q}_3\nmid (x_3)$ and
$K_{\mathfrak{q}_3}=\mathbb{Q}_3$, this
$x_3$ maps to $\pm 1$ in $K_{\mathfrak{q}_3}^{*}/K_{\mathfrak{q}_3}^{*2}$. Multiplying $x_3$ by $-1$ if necessary, we may and will assume that $x_3$ is a square in
$K_{\mathfrak{q}_3}$.
The conjugate $y_3\in\mathcal{O}_K$ of $x_3$ satisfies $\mathfrak{q}_3^{k_3}=(y_3)$ and $x_3y_3=3^{k_3}$.\\
Let $\mathfrak{p},\mathfrak{q}$ be the prime
ideals in $\mathcal{O}_K$ over $2$. In the
$\mathfrak{p}$-adic completion, $x_3$ and $y_3$
yield elements of $\langle -1,3\rangle\subset\mathbb{Q}_2^{*}/\mathbb{Q}_2^{*2}$. By Lemma~\ref{oddclassnmr} the order $k_3$ of
$[\mathfrak{p}_3]\in\mathrm{Cl}_K$ is odd, so the product
$x_3y_3$ yields $3\in \mathbb{Q}_2^{*}/\mathbb{Q}_2^{*2}$. Hence exactly one of $x_3, y_3$ after
$\mathfrak{p}$-adic completion has image $1$ or $-3$ in
$\mathbb{Q}_2^{*}/\mathbb{Q}_2^{*2}$. As $\mathrm{im}_{\mathfrak{p}}(y_3)=\mathrm{im}_{\mathfrak{q}}(x_3)$,
this implies that $x_3$ maps into $\langle -3\rangle \subset\mathbb{Q}_2^{*}/\mathbb{Q}_2^{*2}$ for precisely one of $\mathfrak{p},\mathfrak{q}$.
Denote this ideal by $\mathfrak{p}_2$,
then $\mathfrak{p}_2$ is unramified in $K(\sqrt{x_3})$.\\
Let $x_2\in\mathfrak{p}_2$ be a generator for $\mathfrak{p}_2^{k_2}$, with $k_2$ the order of $[\mathfrak{p}_2]$. As above, multiplying $x_2$ by
$-1$ if necessary we may and will assume that $x_2$ maps $\mathfrak{q}_2$-adically into $\langle -3\rangle\subset\mathbb{Q}_2^{*}/\mathbb{Q}_2^{*2}$, where $\mathfrak{q}_2$ is the conjugate of $\mathfrak{p}_2$. Let $y_2$ be the conjugate of $x_2$, so $\mathfrak{q}_2^{k_2}=(y_2)$ and $x_2y_2=2^{k_2}$.\\
Proposition~\ref{sunitbasis} implies $K(S)=\langle -1,x_2,y_2,x_3,y_3\rangle$. We collect the local images in $K_{\mathfrak{p}}^{*}/K_{\mathfrak{p}}^{*2}$ of these generators, for $\mathfrak{p}\in S=\{\mathfrak{p}_2,\mathfrak{q}_2,\mathfrak{p}_3,\mathfrak{q}_3\}$, as follows.
\begin{equation}\label{redeitable23}
\begin{array}{c|c c c c}
& \mathfrak{p}_2 & \mathfrak{q}_2 & \mathfrak{p}_3 & \mathfrak{q}_3 \\
\hline
-1 & -1 & -1 & -1 & -1 \\
x_2 & \cellcolor[gray]{0.65} & \cellcolor[gray]{0.65} & \cellcolor[gray]{0.8} & \cellcolor[gray]{0.8} \\
y_2 & \cellcolor[gray]{0.65} & \cellcolor[gray]{0.65} & \cellcolor[gray]{0.8} & \cellcolor[gray]{0.8} \\
x_3 & \cellcolor[gray]{0.8} & \cellcolor[gray]{0.8} & 3 & 1 \\
y_3 & \cellcolor[gray]{0.8} & \cellcolor[gray]{0.8} & 1 & 3
\end{array}
\end{equation}
For $l\in\{2,3\}$ recall $\mathrm{im}_{\mathfrak{p}}(x_l)=\mathrm{im}_{\mathfrak{q}}(y_l)$ for conjugate $\mathfrak{p}$ and $\mathfrak{q}$ in $S$ and $x_ly_l=l^{k_l}$ with $k_l$ odd. Hence the $2\times2$-block in the table corresponding to $x_l,y_l$ and conjugate $\mathfrak{p}, \mathfrak{q}$ is determined by any one entry in the block.\\
The normal closure of $K(\sqrt{x_2})/\mathbb{Q}$ yields a minimally ramified extension over $\mathbb{Q}(\sqrt{-2p})$, as $\mathfrak{p}_2$ is unramified in $K(\sqrt{y_2})$.
Hence $y_2$ is $\mathfrak{p}_2$-adically a square if and only if $[-p,2,2]=1$.
Thus the top left block in (\ref{redeitable23}) is determined by the R\'edei symbol $[2,2,-p]$.
As suggested by the coloring, the two blocks away from the diagonal are both determined by the same R\'edei symbol. To see this, note that the normal closure of $K(\sqrt{x_3})/\mathbb{Q}$ yields a minimally ramified extension of $\mathbb{Q}(\sqrt{-3p})$.
This extension has trivial inertia degree over $3$, hence $\mathrm{im}_{\mathfrak{p}_2}(x_3)=1$ if and only if $[-p,3,6]=1$. Similarly, the normal closure of
$K(\sqrt{x_2y_3})/\mathbb{Q}$ yields a minimally ramified extension of
$\mathbb{Q}(\sqrt{-6p})$.
Since $\mathrm{im}_{\mathfrak{p}_3}(y_3)=1$,
this implies $\mathrm{im}_{\mathfrak{p}_3}(x_2)=1$ if and only if $[-p,6,3]=1$.
Hence Table~(\ref{redeitable23}) is determined by the values of the two R\'edei symbols $[2,2,-p]$ and $[3,6,-p]$.
Below, the four possibilities for this pair
of symbols will be considered.
\begin{remark}
Since $(3,2)_3=-1$, the similar statement `$[-p,2,3]=[-p,3,2]$' cannot be used to show that the two lighter gray blocks in
Table~(\ref{redeitable23}) are determined by the same R\'edei symbol. This is the reason for the workaround with the bottom right block. However, the proof of
$[a,b,c]=[a,b,c]$ relies on the product formula for quadratic Hilbert symbols in $\mathbb{Q}(\sqrt{a})$; there is nothing
against using this product formula in $\mathbb{Q}(\sqrt{-p})$.
Here the identity $\prod_{\mathfrak{p}}(x_2,x_3)_{\mathfrak{p}}=1$ leads to
$\mathrm{im}_{\mathfrak{p}_2}(x_3)=1 \Leftrightarrow\mathrm{im}_{\mathfrak{p}_3}(x_2)=1$, but one still needs the symbol $[3,6,-p]$ to link the two blocks to splitting behaviour of primes in a \textit{fixed} (i.e., not depending on $p$) number field.
\end{remark}
For the Selmer group computations, observe that $S^2(J/K)=A\oplus \mathrm{im}(J(K)[2])$ for
\[
A=\{(e_1,\dotsc,e_5)\in S^2(J/K):e_3\xmapsto{\mathfrak{p}_3} 1\text{ and }e_4\xmapsto{\mathfrak{p}_2} 1\}.
\]
First consider the case $[2,2,-p]=[3,6,-p]=1$,
which means the table is as follows.
\begin{equation*}
\begin{array}{c|c c c c}
& \mathfrak{p}_2 & \mathfrak{q}_2 & \mathfrak{p}_3 & \mathfrak{q}_3 \\
\hline
-1 & -1 & -1 & -1 & -1 \\
x_2 & \cellcolor[gray]{0.65}2 & \cellcolor[gray]{0.65}1 & \cellcolor[gray]{0.8}1 & \cellcolor[gray]{0.8}-1 \\
y_2 & \cellcolor[gray]{0.65}1 & \cellcolor[gray]{0.65}2 & \cellcolor[gray]{0.8}-1 & \cellcolor[gray]{0.8}1 \\
x_3 & \cellcolor[gray]{0.8}1 & \cellcolor[gray]{0.8}3 & 3 & 1 \\
y_3 & \cellcolor[gray]{0.8}3 & \cellcolor[gray]{0.8}1 & 1 & 3
\end{array}
\end{equation*}
Let $x=(e_1,\dotsc,e_5)\in A$. The
$\mathfrak{p}_3$-adic and $\mathfrak{q}_3$-adic image implies
$e_3\in\langle x_2,-y_2\rangle$,
and therefore $\mathrm{im}_{\mathfrak{p}_2}(e_3)\subset\langle -1,2\rangle$ and $\mathrm{im}_{\mathfrak{q}_2}(e_3)\subset\langle -2\rangle$.
This removes the fifth row of the $\mathbb{Q}_2$-table from consideration. As $\mathrm{im}_{\mathfrak{p}_2}(e_4)=1$, one concludes $\mathrm{im}_{\mathfrak{p}_2}(x)$ is in the span of
\begin{equation*}
\begin{array}{r c c c c c l}
(&6, & 1, & -1, & 1, & -6 &), \\
(&6, & 3, & -2, & 1, & -1 &).
\end{array}
\end{equation*}
Together with $\mathrm{im}_{\mathfrak{p}_3}(e_2)\subset\langle -1\rangle$ this gives $e_2\in\langle y_2,y_3\rangle$.
Next, $\mathrm{im}_{\mathfrak{q}_2}(e_2)\subset\langle 2\rangle$ and $\mathrm{im}_{\mathfrak{q}_2}(e_3)\subset\langle -2\rangle$ implies $\mathrm{im}_{\mathfrak{q}_2}(x)$ is in the span of
\begin{equation*}
\begin{array}{r c c c c c l}
(&2, & 1, & 1, & -1, & -2 &), \\
(&3, & 2, & 1, & -6, & -1 &),
\end{array}
\end{equation*}
so $e_3\in\langle x_2\rangle$. Since $n=(1,y_3,x_2,1,x_2y_3)\in A$,
a complement inside $A$ of $\langle n\rangle$ is obtained by setting $e_3=1$. For $x$
in this complement $\mathrm{im}_{\mathfrak{p}_2}(x)$ is trivial, hence $e_i\in\langle y_2,x_3\rangle$ for all $i$, implying $\mathrm{im}_{\mathfrak{q}_2}(x)$ is in the span of $(6,2,1,6,2)$.
Then $e_1,e_4\in\langle y_2x_3\rangle$ and $e_2,e_5\in\langle y_2\rangle$. A nontrivial $\mathrm{im}_{\mathfrak{q}_2}(x)$ can only occur
for $e_1,e_2,e_4,e_5$ all $\neq 1$, so this complement is at most one dimensional. Since $(y_2x_3,y_2,1,y_2x_3,y_2)\in A$
one concludes that $A$ is two-dimensional, and $\dim_{\mathbb{F}_2}S^2(J/K)=6$.\\
In the remaining three cases (i.e., $[2,2,-p]$ and $[3,6,-p]$ not both $1$)
the computation is analogous; for details see \cite[\S~3.4.2-4]{evink}. The results are as follows.
\begin{equation*}
\begin{array}{c|c|c|c}
\hspace{-4pt}{ }[2,2,-p]\hspace{-4pt}{ } &
{ }\hspace{-4pt}[3,6,-p]\hspace{-4pt}{ } &
{ }\hspace{-4pt}
\dim_{\mathbb{F}_2}S^2(J/K)\hspace{-2pt}{ } & \text{additional generators} \\
\hline
1 & 1 & 6 & (1,y_3,x_2,1,x_2y_3),(y_2x_3,y_2,1,y_2x_3,y_2) \\
1 & -1 & 6 & (-y_2y_3,y_2,1,-y_2y_3,y_2),(1,-y_3,y_2,1,-y_2y_3) \\
-1 & 1 & 4 & \text{ none }\\
-1 & -1 & 4 & \text{ none } \\
\end{array}
\end{equation*}
With this one proves
Theorem~\ref{Thm23mod24}:
\begin{proof}[Proof of Theorem~\ref{Thm23mod24}.]
Let $p\equiv 23\bmod 48$ be prime. Then $p\equiv 7\bmod 16$ so Example~\ref{redeiexample1}
shows $[2,2,-p]=-1$. The table above implies $\dim_{\mathbb{F}_2}S^2(J/\mathbb{Q}(\sqrt{-p}))=4$
and as a consequence $\mathrm{rank}\,J(\mathbb{Q}(\sqrt{-p}))=0$. Hence $\mathrm{rank}\,J_p(\mathbb{Q})=0$ by
equation~\eqref{eq:rankrelationtwists}. Since
$p\equiv 17\bmod 24$, Proposition~\ref{prop2.1}
yields $\dim_{\mathbb{F}_2}S^2(J_p/\mathbb{Q})=6$ hence
the exact sequence \eqref{eq:rankshasel} shows $\Sh(J_p/\mathbb{Q})[2]\cong(\mathbb{Z}/2\mathbb{Z})^2$.
\end{proof}
\begin{remark}
Part of what is proven above is that $\dim_{\mathbb{F}_2}S^2(J/\mathbb{Q}(\sqrt{-p}))$ for primes $p\equiv 23\bmod 24$ depends only on the values of $[2,2,-p]$ and $[3,6,-p]$. Hence instead of the
provided calculations for an undetermined $p\equiv 23\bmod 24$
one may take a fixed prime for each of the four possibilities for
the pair of R\'{e}dei symbols, and use e.g. Magma \cite{magma} to compute the Selmer group for this prime. The smallest primes covering all cases are given in the table below.
\[
\begin{array}{c|c|c}
p & [2,2,-p] & [3,6,-p] \\
\hline
191 & 1 & 1 \\
47 & 1 & -1 \\
167 & -1 & 1 \\
23 & -1 & -1
\end{array}
\]
We use Magma in this way to obtain proofs of Theorems~\ref{Thm17mod24} and \ref{Thm1mod24}.
\end{remark}
\begin{proposition}\label{prop5.4}
For $K=\mathbb{Q}(\sqrt{p})$ with $p\equiv 17\bmod 24$ prime, $\dim_{\mathbb{F}_2}S^2(J/K)$ is completely determined by the R\'edei symbols $[2,2,p]$ and $[2,-1,p]$.
\end{proposition}
\begin{proof}
Let $\sigma_1,\sigma_2:K\hookrightarrow\mathbb{R}$ be the two real embeddings of $K$. Take a fundamental unit $\varepsilon\in\mathcal{O}_K^*$ with $\sigma_1(\varepsilon)>0$.
Lemma~\ref{oddclassnmr} implies $\varepsilon\overline{\varepsilon}=-1$, hence there is a unique prime ideal $\mathfrak{p}_2\subset \mathcal{O}_K$ over $2$ that is unramified in $K(\sqrt{\varepsilon})$. Let $\mathfrak{q}_2$ be the conjugate
of $\mathfrak{p}_2$ and write $\mathfrak{p}_2^k=(x_2)$ where $k$ is the order of $[\mathfrak{p}_2]$ in $Cl_K$. Multiplying $x_2$ by $\pm\varepsilon$ if necessary we can
and will assume that $x_2$ has positive norm and moreover $\mathfrak{q}_2$ is unramified in $K(\sqrt{x_2})$. Let $y_2$ be the conjugate of $x_2$, so $x_2y_2=2^k$.
Put $S=\{\mathfrak{p}_2,\mathfrak{q}_2,(3),\sigma_1,\sigma_2\}$, then $K(S)=\langle -1,\varepsilon,x_2,y_2,3\rangle$.
The table of images in $K_v^*/{K_v^*}^2$ of the generators of $K(S)$ is as follows
(as before, $r^2=2i\in\mathbb{Q}_3(i)$).
\begin{equation*}
\begin{array}{c|c c c c c}
& \mathfrak{p}_2 & \mathfrak{q}_2 & (3) & \sigma_1 & \sigma_2 \\
\hline
-1 & -1 & -1 & 1 & -1 & -1 \\
\varepsilon & \cellcolor[gray]{0.65} & \cellcolor[gray]{0.65} & r & 1 & -1 \\
x_2 & \cellcolor[gray]{0.8} & \cellcolor[gray]{0.8} & r & \cellcolor[gray]{0.65} & \cellcolor[gray]{0.65} \\
y_2 & \cellcolor[gray]{0.8} & \cellcolor[gray]{0.8} & r & \cellcolor[gray]{0.65} & \cellcolor[gray]{0.65} \\
3 & 3 & 3 & 3 & 1 & 1 \\
\end{array}
\end{equation*}
The $3$-adic images of $\varepsilon,x_2,y_2$
follow by observing that the inertia degree
of $3\mathbb{Z}$ in the normal closures of $K(\sqrt{x_2})$ and $K(\sqrt{\varepsilon})$ over $\mathbb{Q}$ equals $4$.
As $\mathfrak{p}_2$ is unramified in $K(\sqrt{\varepsilon})$ and in $K(\sqrt{y_2})$, the normal closures over $\mathbb{Q}$ yield minimally ramified extensions. Hence $\mathrm{im}_{\mathfrak{p}_2}(\varepsilon)=1
\Leftrightarrow [p,-1,2]=1$ and $\mathrm{im}_{\mathfrak{p}_2}(y_2)=1
\Leftrightarrow [p,2,2]=1$ and $\mathrm{im}_{\sigma_1}(x_2)=1\Leftrightarrow [p,2,-1]=1$.
R\'edei reciprocity completes the proof.
\end{proof}
Aided by Magma for the rightmost column,
one computes the following table.
\[
\begin{array}{c|c|c|c}
p & [2,2,p] & [2,-1,p] & \dim_{\mathbb{F}_2}S^2(J/\mathbb{Q}(\sqrt{p})) \\
\hline
113 & 1 & 1 & 6\\
17 & 1 & -1 & 4\\
41 & -1 & 1 & 4 \\
89 & -1 & -1 & 6
\end{array}
\]
From the above, Theorem~\ref{Thm17mod24}
readily follows:
\begin{proof}[Proof of Theorem~\ref{Thm17mod24}]
Take $p\equiv 17\bmod 24$ prime and put
$K=\mathbb{Q}(\sqrt{p})$.
Proposition~\ref{prop5.4} and the table
above show
$\dim_{\mathbb{F}_2}S^2(J/K)=4
\Leftrightarrow [2,2,p][2,-1,p]=-1$.
Tri-linearity of the R\'edei symbol implies that the latter condition is equivalent to $[2,-2,p]=-1$, which by Example~\ref{Ex3.6}
means $p$ is not completely split in $\mathbb{Q}(\sqrt[4]{2})$.
As remarked earlier,
$\dim_{\mathbb{F}_2}S^2(J/K)=4
\Rightarrow \mathrm{rank}\, J(K)=0
\Leftrightarrow \mathrm{rank}\,J_p(\mathbb{Q})=0$.
Proposition~\ref{prop2.1}
and the exact sequence \eqref{eq:rankshasel}
now finish the proof.
\end{proof}
Lastly we cover the case $p\equiv 1\bmod 24$.
\begin{proposition}\label{prop5.5}
For $K=\mathbb{Q}(\sqrt{p})$ with $p\equiv 1\bmod 24$ prime, $\dim_{\mathbb{F}_2}S^2(J/K)$ is completely determined by the R\'edei symbols $[2,2,p],\;[2,-1,p],\;[3,-2,p]\text{ and }[3,6,p]$.
\end{proposition}
\begin{proof}
Let $p\equiv 1\bmod 24$ be prime and put
$K=\mathbb{Q}(\sqrt{p})$. As in the proof of
Proposition~\ref{prop5.4} let $\sigma_1,\sigma_2:K\hookrightarrow\mathbb{R}$ be the real embeddings, take a fundamental unit
$\varepsilon\in\mathcal{O}_K$ with $\sigma_1(\varepsilon)>0$,
let $\mathfrak{p}_2$ be the prime over $2$ that is unramified in $K(\sqrt{\varepsilon})$,
and denote the conjugate of
$\mathfrak{p}_2$ by $\mathfrak{q}_2$. Then $\mathfrak{p}_2^{k_2}=(x_2)$ with $k_2=\mathrm{ord}([\mathfrak{p}_2])$, where one chooses
$x_2\in\mathcal{O}_K$ of positive norm and such that $\mathfrak{q}_2$ is unramified in $K(\sqrt{x_2})$.\\
Let $\mathfrak{p}_3$ be the prime over $3$ that splits in $K(\sqrt{x_2})$, and let $\mathfrak{q}_3$ be its conjugate.
With $k_3=\mathrm{ord}([\mathfrak{p}_3])$, write $\mathfrak{p}_3^{k_3}=(x_3)$ with $x_3\in\mathcal{O}_K$ of positive norm, chosen so that $\mathfrak{p}_2$ is unramified in $K(\sqrt{x_3})$. For $i\in\{2,3\}$ let $y_i$ be the conjugate of $x_i$, so $x_iy_i=i^{k_i}$.
Put $S=\{\mathfrak{p}_2,\mathfrak{q}_2,\mathfrak{p}_3,\mathfrak{q}_3,\sigma_1,\sigma_2\}$, then $K(S)=\langle -1,\varepsilon,x_2,y_2,x_3,y_3\rangle\subset K^*/{K^*}^2$.
Information on local images of $K(S)$ is presented in the following table.
\begin{equation*}
\begin{array}{c|c c c c c c}
& \mathfrak{p}_2 & \mathfrak{q}_2 & \mathfrak{p}_3 & \mathfrak{q}_3 & \sigma_1 & \sigma_2 \\
\hline
-1 & -1 & -1 & -1 & -1 & -1 & -1 \\
\varepsilon & \cellcolor[gray]{0.4} & \cellcolor[gray]{0.4} & \cellcolor[gray]{0.55} & \cellcolor[gray]{0.55} & 1 & -1 \\
x_2 & \cellcolor[gray]{0.7} & \cellcolor[gray]{0.7} & 1 & -1 & \cellcolor[gray]{0.4} & \cellcolor[gray]{0.4} \\
y_2 & \cellcolor[gray]{0.7} & \cellcolor[gray]{0.7} & -1 & 1 & \cellcolor[gray]{0.4} & \cellcolor[gray]{0.4} \\
x_3 & 1 & 3 & \cellcolor[gray]{0.85} & \cellcolor[gray]{0.85} & \cellcolor[gray]{0.55} & \cellcolor[gray]{0.55} \\
y_3 & 3 & 1 & \cellcolor[gray]{0.85} & \cellcolor[gray]{0.85} & \cellcolor[gray]{0.55} & \cellcolor[gray]{0.55}
\end{array}
\end{equation*}
To see this, first consider the bottom middle $2\times 2$ block. Note that $[p,6,3]=1$ if and only if $\mathrm{im}_{\mathfrak{p}_3}(y_3)=\mathrm{im}_{\mathfrak{p}_3}(x_2y_3)=1$,
and similarly $[p,3,6]=1$ precisely when the equivalence $\mathrm{im}_{\mathfrak{q}_2}(y_3)=1$ $\Leftrightarrow$ $\mathrm{im}_{\mathfrak{p}_3}(y_3)=1$ holds. Since $[p,6,3]=[p,3,6]$, this implies $\mathrm{im}_{\mathfrak{q}_2}(y_3)=1$ and moreover $\mathrm{im}_{\mathfrak{p}_3}(y_3)=1$ if and only if $[p,6,3]=1$.
The choice of $x_3$ and the equality $x_3y_3=3^{k_3}$ for $k_3=\mathrm{ord}([\mathfrak{p}_3])$ odd, implies the bottom left block.
The remaining assertions about the table (in particular:
the regions colored in the same shade of grey are determined
by any one entry in that region) are straightforward and/or
analogous to what we did in other
$\bmod~24$ cases.\\
As in the $17\bmod 24$ case, $\mathrm{im}_{\mathfrak{p}_2}(\varepsilon)=1$ $\Leftrightarrow$ $[p,-1,2]=1$, and $\mathrm{im}_{\sigma_1}(x_2)=1$ $\Leftrightarrow$ $[p,2,-1]=1$, and $\mathrm{im}_{\mathfrak{p}_2}(y_2)=1$ $\Leftrightarrow$ $[p,2,2]=1$.\\
Finally, $\mathrm{im}_{\mathfrak{p}_3}(\varepsilon)=\mathrm{im}_{\mathfrak{p}_3}(\varepsilon x_2)=1$ precisely when $[p,-2,3]=1$.
Since $\mathrm{im}_{\mathfrak{p}_2}(x_3)=1$, one has $\mathrm{im}_{\sigma_1}(x_3)=1\Leftrightarrow [p,3,-2]=1$.
R\'edei reciprocity finishes the proof.
\end{proof}
Using Magma for the rightmost column results in the following table (in fact implying a
stronger version of Proposition~\ref{prop5.5}:
$\dim_{\mathbb{F}_2}S^2(J/\mathbb{Q}(\sqrt{p}))$ for the primes
$p\equiv 1\bmod 24$ only depends on the R\'edei symbols
$[2,2,p],\,[2,-1,p]$, and $[3,-2,p]$).
\[
\begin{array}{c|c|c|c|c|c}
p & [2,2,p] & [2,-1,p] & [3,-2,p] & [3,6,p] & \dim_{\mathbb{F}_2}S^2(J/\mathbb{Q}(\sqrt{p})) \\
\hline
2593 & 1 & 1 & 1 & 1 & 8\\
1153 & 1 & 1 & 1 & -1 & 8\\
337 & 1 & 1 & -1 & 1 & 4\\
557 & 1 & 1 & -1 & -1 & 4\\
433 & 1 & -1 & 1 & 1 & 4\\
97 & 1 & -1 & 1 & -1 & 4\\
241 & 1 & -1 & -1 & 1 & 6\\
193 & 1 & -1 & -1 & -1 & 6\\
1321 & -1 & 1 & 1 & 1 & 6\\
409 & -1 & 1 & 1 & -1 & 6\\
1129 & -1 & 1 & -1 & 1 & 4\\
313 & -1 & 1 & -1 & -1 & 4\\
937 & -1 & -1 & 1 & 1 & 6\\
1033 & -1 & -1 & 1 & -1 & 6\\
73 & -1 & -1 & -1 & 1 & 4\\
601 & -1 & -1 & -1 & -1 & 4\\
\end{array}
\]
\begin{proof}[Proof of Theorem~\ref{Thm1mod24}]
Let $p\equiv 1\bmod 24$ be prime and put
$K=\mathbb{Q}(\sqrt{p})$. Proposition~\ref{prop2.1} implies $\dim_{\mathbb{F}_2}S^2(J_p/\mathbb{Q})=8$,
hence as in the proofs of
Theorems~\ref{Thm23mod24} and \ref{Thm17mod24}
it suffices to show that $\dim_{\mathbb{F}_2}S^2(J/K)=4$
if $p$ satisfies one of the conditions (a), (b), or (c)
mentioned in the statement of Theorem~\ref{Thm1mod24}.
\\
Note: $p$ splits completely in $\mathbb{Q}(\sqrt[4]{2}) \Leftrightarrow [2,2,p][2,-1,p]=[2,-2,p]=1$.
Also, $p$ splits completely in $\mathbb{Q}(\sqrt{1+\sqrt{3}})\Leftrightarrow [3,-2,p]=1$,
and $[2,2,p]=1\Leftrightarrow p\equiv 1\bmod 16$. Hence condition (a) corresponds to the cases
$p\in\{73, 337, 557, 601\}$ in the table above.
Condition (b) corresponds to $p\in\{97, 433\}$ in the table,
and condition~(c) to $p\in\{313, 1129\}$.
In all these cases the table shows $\dim_{\mathbb{F}_2}S^2(J/K)=4$, hence the result follows
by using Proposition~\ref{prop5.5}.
\end{proof}
We finish this section by presenting an analogous result for elliptic curves; we restrict to $p\equiv 1\bmod 24$ but
in the same spirit one obtains similar statements for the other congruence classes
$p\bmod 24$.
\begin{proposition}\label{ellRedeiexample}
Let $E/\mathbb{Q}$ be an elliptic curve with good reduction
away from $2, 3$ and with $E(\mathbb{Q})[2]=E(\overline{\mathbb{Q}})[2]$. For a prime $p\equiv 1\bmod 24$, the size of the $2$-Selmer group $S^2(E/\mathbb{Q}(\sqrt{p}))$ is determined by $E/\mathbb{Q}$ together with the Rédei symbols
\[
[2,2,p],\;[2,-1,p],\;[3,-2,p],\;[3,6,p].
\]
\end{proposition}
\begin{proof}
We use the notation introduced in the proof of
Proposition~\ref{prop5.5}.
Descent yields an embedding
\[
\delta\colon E(K)/2E(K)\hookrightarrow{}\ker\left(\bigoplus_{i=1}^3K(S)\to K(S)\right)
\]
and $S^2(E/K)$ consists of the elements in
$\bigoplus_{i=1}^3K(S)$ that locally are in the image of the
corresponding maps $\delta_v$, for all
$v\in S=\{\sigma_1,\sigma_2,\mathfrak{p}_2,\mathfrak{q}_2,
\mathfrak{p}_3,\mathfrak{q}_3\}$.
For these $v$, the image in $K_v^*/{K_v^*}^2$ of a basis
for $K(S)$ is described in the table presented in the proof of
Proposition~\ref{prop5.5}. As this table is determined
by the four given
R\'edei symbols and $S^2(E/K)$ consists of
triples of elements in $K(S)$ that
for $v\in S$ locally are in $\delta_v(E(K_v))$, the result follows.
\end{proof}
\begin{remark}
The finite list of elliptic curves satisfying the conditions from
Proposition~\ref{ellRedeiexample} was already presented in the
PhD thesis of F.B.~Coghlan \cite{Coghlan1966}. In fact he listed
{\em all} elliptic curves over $\mathbb{Q}$ having good reduction away
from $2$ and $3$. Precisely $28$ of these have full rational
$2$-torsion. In the LMFDB tables \cite{lmfdb} contain them under
the conductors
$
\left\{ 24,\; 32,\; 48,\; 64,\; 72,\; 96,\; 144,\; 192,\; 288,\; 576 \right\}
$.
\end{remark}
\section{The $\mathbb{Q}$-rational points}\label{Sect6}
Here briefly $\mathbb{Q}$-rational points of the curves
$C_p$ are discussed. The proof of Corollary~\ref{cor2.2}
shows that for primes $p$ such that
$\mathrm{rank}\,J_p(\mathbb{Q})=0$, the set $C_p(\mathbb{Q})$
consists of the Weierstrass points only. Below a less
immediate case is discussed, namely a situation
with $\mathrm{rank}\,J_p(\mathbb{Q})=2$. We remark that in this case $\mathrm{rank}\,J_p(\mathbb{Q})$ is not strictly smaller than the genus of $C_p$ so the standard Chabauty method does not apply.
Take the prime $p=241$.
Using $\mathrm{rank}\,J_p(\mathbb{Q})\leq
\dim_{\mathbb{F}_2}S^2(J/\mathbb{Q}(\sqrt{p})))-4$,
the row $p=241$ in the table preceding the proof of Theorem~\ref{Thm1mod24} yields $\mathrm{rank}\,J_{241}(\mathbb{Q})\leq 2$.
The Mumford representations
\begin{align*}
P=&\left(x^2 - \tfrac{868230159329}{1782528400}x + \tfrac{8609056225}{4456321},
\tfrac{83127269153329233}{75258349048000}x
- \tfrac{8905877454269565}{37629174524}\right),\\
Q=&\left(x^2 - \tfrac{692452}{3721}x + \tfrac{73966756}{3721},
\tfrac{6990522627}{2269810}x + \tfrac{1284886465269}{1134905}\right)
\end{align*}
turn out to
define points in $J_{241}(\mathbb{Q})$. The homomorphism
$\delta\colon J_p(\mathbb{Q})\to S^2(J_p/\mathbb{Q})$ yields
$\delta(P)=(2,p,1,p,2)$ and $\delta(Q)=(1,p,p,p,p)$. These images
are independent of $\delta(J_p(\mathbb{Q})_{\mbox{\scriptsize tor}})$
which is generated by $(6,-p,-2p,-3p,-p)$, $(p,-6,-p,-2p,-3p)$, $(2p,p,1,-p,-2p)$ and $(3p,2p,p,-6,-p)$.
Hence $\mathrm{rank}\,J_{241}(\mathbb{Q})=2$. Moreover by
Proposition~\ref{prop2.1} and equality \eqref{eq:rankshasel}
one concludes $\Sh(J_{241}/\mathbb{Q})[2]\cong(\mathbb{Z}/2\mathbb{Z})^2$.
To determine $C_{241}(\mathbb{Q})$ the methods developed in
\cite{BrSt} will now be used. Although this works in much greater
generality, here it is only briefly recalled in the special case
of the curves $C_p$. Consider the composition
\[
C_p(\mathbb{Q})\longrightarrow J_p(\mathbb{Q})
\stackrel{\delta}{\longrightarrow} S^2(J_p/\mathbb{Q})
\]
mapping $(a,b)\in C_p(\mathbb{Q})$ with $b\neq 0$ to
$(a+2p,a+p,a,a-p,a-2p)\in S^2(J_p/\mathbb{Q})$.
Is $s=(e_1,\ldots,e_5)\in S^2(J_p/\mathbb{Q})$, then being in the
image of $C_p(\mathbb{Q})$ implies that one has a rational point
on the smooth, complete curve $X_s/\mathbb{Q}$ corresponding to the
affine equations
\[
x+2p=e_1y_1^2,\;x+p=e_2y_2^2,\;
x=e_3y_3^2,\;x-p=e_4y_4^2,\;x-2p=e_5y_5^2.
\]
Here by abuse of notation $e_j$ represents the class $e_j\in\mathbb{Q}^*/{\mathbb{Q}^*}^2$; the result is independent of this representing element. The curve $X_s$ is what in \cite{BrSt} is called a two-cover of $C_p$ over $\mathbb{Q}$.
The ``Two-Selmer set'' of $C_p/\mathbb{Q}$ is
\[
\left\{ s\in S^2(J_p/\mathbb{Q})\;:\;
X_s\;\text{has rational points everywhere locally}
\right\}.
\]
As an example, for $p=241$ let $s:=\delta(P)=(2,p,1,p,2)$. Among the equations
for $X_s$ one has $x+2p=2y_1^2$ and $x-p=py_4^2$, defining the conic
$Q\colon 2y_1^2-py_4^2=3p$. One obtains a finite
morphism $X_s\to Q$ defined over $\mathbb{Q}$.
Since $Q(\mathbb{Q}_2)$ (as well as $Q(\mathbb{Q}_3)$) is empty, this shows $\delta(P)$
is not in the Two-Selmer set of $C_p/\mathbb{Q}$.
In other words: although $\delta(P)$ is everywhere
locally (even globally!) in $\delta_v(J_p(\mathbb{Q}_v))$, it is not in the
image of $C_p(\mathbb{Q}_2)\subset J_p(\mathbb{Q}_2)$.
The Magma command {\tt TwoCoverDescent();} computes
the curves $X_s$ corresponding to the Two-Selmer set. In our case it turns out that of the $2^8$
elements in $S^2(J_{241}/\mathbb{Q})$, only the
six $\delta([W]-[\infty])$ for $W\in C_p(\mathbb{Q})$ a Weierstrass point, are in the
Two-Selmer set. We now show that for each of these
six elements $s$ one finds that
$\left\{R\in C_p(\mathbb{Q})\;:\;\delta([R]-[\infty])
=s\right\}$ consists of only a Weierstrass point.
As a consequence, $C_{241}(\mathbb{Q})=\{\infty,\,
(0,0),\,(\pm 241,0), (\pm 482,0)\}$. We use the notation
$D_\xi$ (here for certain elements in $J_p$) as introduced
on page~\pageref{NotationDxi}.\\
\begin{itemize}
\item $s:=\delta(0)=(1,1,1,1,1)$.
If $(a,b)\in C_{241}(\mathbb{Q})$ with $b\neq 0$
would result in $\delta$-image $s$,
then in particular the elliptic curve
$E_1\colon y^2=x(x+p)(x+2p)$ admits a point
in $E_1(\mathbb{Q})$ with $x=a$ and $y\neq 0$.
Since $E_1(\mathbb{Q})\cong \mathbb{Z}/2\mathbb{Z}\times \mathbb{Z}/2\mathbb{Z}$, no such point exists.
\item $s:=\delta(D_{-2p})=(6,-p,-2p,-3p,-p)$.
In this case, considering the 1st, 3rd, and 4th
entry results in the elliptic curve
$E_2\colon y^2=x(x+2p)(x-p)$ satisfying
$E_2(\mathbb{Q})\cong \mathbb{Z}/2\mathbb{Z}\times \mathbb{Z}/2\mathbb{Z}$. Hence only
the Weierstrass point $(-482,0)\in C_p(\mathbb{Q})$ yields $\delta$-image $s$.
\item $s:=\delta(D_{-p})=(p,-6,-p,-2p,-3p)$.
Here the 2nd, 4th, and 5th entry results in
the elliptic curve $E_3\colon -y^2=(x+p)(x-p)(x-2p)$ whose only rational points
are the points of order at most $2$. Reasoning
as before, this implies that only
the Weierstrass point $(-241,0)\in C_p(\mathbb{Q})$ yields $\delta$-image $s$.
\item $s:=\delta(D_0)=(2p,p,1,-p,-2p)$.
Using entries 1, 2, and 3 results in the
elliptic curve $E_4\colon 2y^2=x(x+p)(x+2p)$,
whose only rational points
are the points of order at most $2$.
As above, this implies that only
the Weierstrass point $(0,0)\in C_p(\mathbb{Q})$ yields $\delta$-image $s$.
\item $s:=\delta(D_{p})=(3p,2p,p,-6,-2p)$.
Here we use entries 1, 2, and 4, leading
to $E_5\colon -y^2=(x+2p)(x+p)(x-p)$.
Also here the only rational points are the points of order dividing $2$. So
only
the Weierstrass point $(241,0)\in C_p(\mathbb{Q})$ yields $\delta$-image $s$.
\item $s:=\delta(D_{2p})=(p,3p,2p,p,6)$.
Using entries 1, 2, and 5 one obtains
$E_6\colon 2y^2=(x+2p)(x+p)(x-2p)$. Here as well, the only rational points
are the points of order dividing $2$. So
$(482,0)\in C_p(\mathbb{Q})$ is the
only rational point with $\delta$-image $s$.
\end{itemize}
This completes the determination of the rational
points on $C_{241}$.\\
Note that for $p=5$ there are two additional points: one has $\#C_5(\mathbb{Q})=8$, where the two non-Weierstrass points are $(20,\pm 1500)$. Applying Chabauty's method implies that there are no other points.\\
It may be possible to extend the method described here and in this
way answer the question whether a prime $p>5$ exists
such that $\#C_p(\mathbb{Q})>6$.
As a final remark, recall that the two-cover $X:=X_{(1,1,1,1,1)}$ of $C_p/\mathbb{Q}$
corresponds to the affine model
\[
x+2p=y_1^2,\;x+p=y_2^2,\;
x=y_3^2,\;x-p=y_4^2,\;x-2p=y_5^2.
\]
The maps $y_j\mapsto -y_j$ define a group
$(\mathbb{Z}/2\mathbb{Z})^5$ in
$\text{Aut}_{\mathbb{Q}}(X)$. Using
appropriate subgroups one obtains up to
isogeny the decomposition of $\text{Jac}(X)$ over $\mathbb{Q}$ given as follows.
Let $E_{24}\colon y^2=(x-1)(x^2-4)$ and $E_{32}\colon y^2=x^3-x$ and finally
$E_{96a}\colon y^2=x(x+1)(x-2)$ be elliptic
curves over $\mathbb{Q}$. For any such
$E/\mathbb{Q}$ and any $d\in\mathbb{Q}/{\mathbb{Q}^*}^2$
we write $E^{(d)}$ for the quadratic twist
of $E$ defined by $d$.
Then $\text{Jac}(X)$ is isogenous over
$\mathbb{Q}$ to the product
\[
J_p \times \left(E_{24}\right)^2 \times
E_{24}^{(-1)} \times E_{24}^{(p)} \times
E_{24}^{(-p)} \times
\left(E_{32}^{(p)}\right)^3 \times E_{32}^{(2p)}
\times
\left(E_{96a}^{(-2)}\right)^2 \times
\left(E_{96a}^{(p)}\right)^2 \times
\left(E_{96a}^{(-p)}\right)^2.
\]
In particular the rank of $\text{Jac}(X)$ is determined
by that of $J_p$ and of the given twists of the
three elliptic curves
$E_{24}$, $E_{32}$, and $E_{96a}$.
Using analogs of Proposition~\ref{ellRedeiexample}
for various classes of primes $p$ provides
a natural approach towards bounding
$\mathrm{rank}\,\text{Jac}(X)(\mathbb{Q})$.
\end{document}
|
\begin{document}
\begin{abstract}
\emph{Jack characters} are a one-parameter deformation of the characters of the symmetric groups;
a deformation given by the coefficients in the expansion of Jack symmetric functions
in the basis of power-sum symmetric functions.
We study Jack characters from the viewpoint of the asymptotic representation theory.
In particular, we give explicit formulas for their asymptotically top-degree part,
in terms of bicolored oriented maps with an arbitrary face structure.
We also study their multiplicative structure and their structure constants
and we prove that they fulfill approximate factorization property,
a convenient tool for proving Gaussianity of fluctuations of random Young diagrams.
\end{abstract}
\subjclass[2010]{
Primary 05E05;
Secondary
20C30,
05C10,
05E10,
05E15.
}
\keywords{Jack polynomials, Jack characters, oriented maps, free cumulants,
Kerov polynomials, Kerov--Lassalle polynomials, structure coefficients, approximate factorization of characters}
\maketitle
For a given partition $\lambda\vdash n$
we consider the expansion of the corresponding Schur function in the basis of the power-sum symmetric functions:
\[
s_\lambda=\sum_{\pi\vdash n} \theta_{\pi}(\lambda)\ p_{\pi}.
\]
The normalized coefficient
\[ \chi_{\lambda}(\pi):= \frac{z_\pi}{n!} \theta_{\pi}(\lambda) = \tr \rho_\lambda(\pi) \]
turns out to be equal to the irreducible character of the symmetric group,
taken with respect to the \emph{normalized trace}
\[ \tr A := \frac{\Tr A}{\Tr 1}.\]
Above,
\[ z_\pi=\prod_i i^{m_i(\pi)}\ m_i(\pi)! \]
is the standard numerical factor, where
$m_i(\pi)$ is number of the parts of $\lambda$ which are equal to $i$.
This observation is the starting point and the initial motivation
for the following \emph{deformation
of the characters of the symmetric groups}.
Following the ideas of Lassalle \cite{Lassalle2008a},
for a given $\alpha>0$ and a partition $\pi\vdash n$
we replace the Schur symmetric function by
\emph{Jack polynomial} $J^{(\alpha)}_\lambda$ and consider the analogous expansion
in the basis of power-sum symmetric functions:
\begin{equation}
\label{eq:definition-theta-A}
J^{(\alpha)}_\lambda = \sum_{\pi\vdash n} \theta^{(\alpha)}_\pi(\lambda)\ p_\pi.
\end{equation}
For partitions $\pi,\lambda\vdash n$
we define the \emph{irreducible Jack character $\chi^{(\alpha)}_\lambda$} as
\begin{equation}
\label{eq:character-Jack-unnormalized-zmiana}
\chi^{(\alpha)}_\lambda(\pi) := \alpha^{-\frac{|\pi|-\ell(\pi)}{2}}\ \frac{z_\pi}{n!}\ \theta^{(\alpha)}_\pi(\lambda),
\end{equation}
where $\ell(\pi)$ denotes the number of parts of the partition $\pi$.
Those who like the analogy between the Jack characters and the characters of the symmetric groups
may heuristically think that the Young diagram $\lambda$ determines
some non-existent, mythical \emph{`Jack representation'}
and the partition $\pi$ determines a conjugacy class in the symmetric group $\Sym{n}$.
\emph{A growing collection of partial results, unproved conjectures and computer exploration indicates
that such irreducible Jack characters have a rich combinatorial and algebraic structure
which still remains elusive and resembles the one of the irreducible characters of the symmetric groups.}
In the current paper we regard Jack characters from the viewpoint
of the \emph{asymptotic representation theory} which, roughly speaking,
corresponds to the scaling in which the Young diagram $\lambda$ tends
in some sense to infinity and the conjugacy class $\pi$ remains fixed.
With this perspective in mind our results in this paper are twofold:
firstly, we will find the first-order asymptotics of
Jack characters on a fixed conjugacy class (see \cref{sec:intro1}); secondly, we
will investigate the asymptotics of the multiplicative structure of Jack characters
and their structure constants (see \cref{sec:intro2}).
\section{Introduction part 1: asymptotics of a single Jack character}
\label{sec:intro1}
\subsection{Jack polynomials}
\label{sec:jack-polynomials-motivations}
\emph{Jack polynomials} $\big( J^{(\alpha)}_\pi\big)$ \cite{Jack1970/1971} are
a family (indexed by an integer partition $\pi$) of symmetric functions
which depend on an additional parameter $\alpha$.
During the last forty years,
many connections of Jack polynomials with various fields of mathematics and physics were established:
it turned out that the combinatorial structure of Jack polynomials plays a crucial role in
understanding Ewens random permutations model \cite{DiaconisHanlon1992},
generalized $\beta$-ensembles and some statistical mechanics models
\cite{OkounkovOlshanski1997},
Selberg-type integrals \cite{Kaneko1993},
certain random partition models
\cite{Kerov2000,BorodinOlshanski2005,Matsumoto2008,DoleegaFeray2014},
and some problems of the algebraic geometry \cite{Nakajima1996},
among many others.
\subsection{Asymptotic representation theory viewpoint on Jack characters}
The usual way of viewing the characters of the symmetric groups is to fix the representation $\lambda$
and to consider the character as a function of the conjugacy class $\pi$.
However, there is also another very successful viewpoint due to Kerov and Olshanski \cite{KerovOlshanski1994},
called \emph{dual approach}, which suggests to
do roughly the opposite.
We will mention only one of its success stories, namely Kerov's
Central Limit Theorem and its generalizations \cite{Kerov1993gaussian,IvanovOlshanski2002,Sniady2006c}.
Lassalle \cite{Lassalle2008a,Lassalle2009} adapted this dual approach to the framework of Jack characters.
In order for the dual approach to be successful
one has to choose the most convenient normalization constants.
We will use the normalization introduced by Dołęga and F\'eray
\cite{DoleegaFeray2014}
which offers some advantages over the original normalization of Lassalle.
Thus, with the right choice of the multiplicative constant, the irreducible Jack character
$\chi_{\lambda}^{(\alpha)}(\pi)$ becomes the \emph{normalized Jack character
$\mathbb{C}h_\pi(\lambda)$}, defined as follows.
\begin{definition}
\label{def:jack-character-classical}
Let $\alpha>0$ be given and let $\pi\vdash n$ be a fixed partition.
For a partition $\lambda\vdash N$
we define the value of the corresponding \emph{normalized Jack character}
by
\begin{equation}
\label{eq:definition-Jack}
\mathbb{C}h_{\pi}(\lambda) :=
\begin{cases}
\underbrace{N (N-1) \cdots (N-n+1)}_{\text{$n$ factors}} \
\chi^{(\alpha)}_\lambda (\pi,1^{N-n})
&\text{if } N \ge n ,\\
0 & \text{if }N < n.
\end{cases}
\end{equation}
Each Jack character depends on the deformation parameter $\alpha$;
in order to keep the notation light we make this dependence implicit.
\end{definition}
In the above definition, the irreducible Jack character $\chi^{(\alpha)}_\lambda$
is evaluated on $(\pi,1^{N-n})$
which is simply the partition $\pi$ augmented by the necessary
number of parts, all equal to $1$.
This operation becomes very natural
if we look on the corresponding conjugacy classes
in the symmetric groups $\Sym{N}\supseteq \Sym{n}$:
this augmentation corresponds to adding the necessary number of fixpoints (=cycles of length $1$)
to a permutation from $\Sym{n}$ so that it becomes a permutation in $\Sym{N}$.
Thus, indeed, investigation of the Jack character $\mathbb{C}h_\pi$ as a function on the set
$\mathbb{Y}$ of Young diagrams (without any restrictions on the number of boxes) corresponds to
the scaling in which the Young diagram $\lambda$ tends to infinity while the
\emph{`conjugacy class'} $\pi$ is fixed.
\subsection{Preliminaries: the filtered algebra $\mathscr{P}$, the embeddings}
\subsubsection{The deformation parameters. Laurent polynomials}
In order to avoid dealing with the square root of the variable $\alpha$,
we introduce an indeterminate $A$ such that
\[ A^2 = \alpha.\]
Several quantities in this paper will be viewed as elements of $\left\langleurent$,
i.e., as Laurent polynomials in the variable $A$.
\subsubsection{$\alpha$-content}
The set of Young diagrams will be denoted by $\mathbb{Y}$.
For drawing Young diagrams we use the French convention and the usual Cartesian coordinate system;
in particular,
the box $(x,y)\in\mathbb{N}^2$ is the one in the intersection of
the column with the index $x$ and
the row with the index $y$.
We index the rows and the columns by the elements of the set
\[\mathbb{N}=\{1,2,\dots\}\]
of positive integers.
\begin{definition}
For a box $\Box=(x,y)$ of a Young diagram we define its \emph{$\alpha$-content} by
\begin{equation}
\label{eq:alpha-content}
\text{$\alpha$-$\content$}(\Box)=\text{$\alpha$-$\content$}(x,y):= A x - \frac{1}{A} y\in \left\langleurent.
\end{equation}
\end{definition}
\subsubsection{The algebra $\mathscr{P}$ of $\alpha$-polynomial functions on the set of Young diagrams}
\label{sec:polynomial-functions}
For an integer $n\geq 2$ we consider a function $\mathcal{T}_n\colon\mathbb{Y}\to\left\langleurent$ given by
\[ \mathcal{T}_n (\lambda):= (n-1) \sum_{\Box\in\lambda} \big( \text{$\alpha$-$\content$}(\Box) \big)^{n-2}.\]
We denote by $\mathscr{P}$ the filtered unital algebra (over the field $\mathbb{Q}$
of rational numbers) which is generated by $\gamma, \mathcal{T}_2, \mathcal{T}_3, \dots$.
Above we view $\gamma$ as a constant function on $\mathbb{Y}$ given by
\begin{equation}
\label{eq:gamma}
\gamma := -A+\frac{1}{A}\in \left\langleurent.
\end{equation}
The unit of this algebra is $1$ (=the function constantly equal to $1$).
The filtration on $\mathscr{P}$ is specified on the generators by
\begin{equation}
\label{eq:filtration}
\left\{
\begin{aligned}
\degg \gamma &= 1, \\
\degg \mathcal{T}_n &= n \qquad \text{for $n\geq 2$};
\end{aligned}
\right.
\end{equation}
in other words the set of elements of degree at most $d$ is spanned by
\[\left\{ \gamma^{d_1} \mathcal{T}_2^{d_2} \mathcal{T}_3^{d_3} \cdots \quad :
d_1,d_2,\ldots\geq 0, \sum_i i d_i \leq d \right\}. \]
The elements of this algebra $\mathscr{P}$ will be called
\emph{$\alpha$-polynomial functions on the set of Young diagrams}.
\subsubsection{Number of embeddings}
\label{sec:number-of-embeddings}
Let $G$ be a \emph{bicolored graph}, i.e., a bipartite graph together
with the choice of the coloring of the vertices.
We denote the set of its white (respectively, black) vertices
by $\mathcal{V}_{\circ}$ (respectively, $\mathcal{V}_{\bullet}$).
We will always assume that $G$ has no isolated vertices.
Furthermore, let $\lambda$ be a Young diagram.
\begin{definition}[\cite{FeraySniady2011a,DolegaFeraySniady2008}]
\label{def:embeddings}
We say that $f=(f_1,f_2)$ is an \emph{embedding} of $G$ into $\lambda$ if the functions
\[ f_1\colon \mathcal{V}_{\circ}\to\mathbb{N}, \qquad f_2\colon \mathcal{V}_{\bullet}\to\mathbb{N}\]
are such that the condition
\begin{equation}
\label{embedding:young}
\text{$\big( f_1(w), f_2(b) \big)$ is one of the boxes of $\lambda$}
\end{equation}
holds true for each pair of vertices $w\in \mathcal{V}_{\circ}$, $b\in \mathcal{V}_{\bullet}$ connected by an edge.
We denote by $N_G(\lambda)$ the number of embeddings of $G$ into $\lambda$.
\end{definition}
\begin{definition}
We define the \emph{normalized number of embeddings} as
\begin{equation}
\label{eq:normalized-embedding}
\mathfrak{N}_G (\lambda):=
{A}^{|\mathcal{V}_\circ(G)|} \left(- A^{-1}\right)^{|\mathcal{V}_\bullet(G)|}
\ N_{G}(\lambda) \in\left\langleurent.
\end{equation}
\end{definition}
\begin{definition}
\label{def:bicolored-graph-to-permutations}
To a pair $(\sigma_1,\sigma_2)\in\Sym{n}\times \Sym{n}$ of permutations one can associate a natural bicolored graph
$G(\sigma_1,\sigma_2)$
with the white vertices $\mathcal{V}_{\circ}:=C(\sigma_1)$ corresponding to the cycles of $\sigma_1$ and
the black vertices $\mathcal{V}_{\bullet}:=C(\sigma_2)$ corresponding to the cycles of $\sigma_2$.
A pair of vertices $w\in C(\sigma_1)$, $b\in C(\sigma_2)$ is connected by an edge
if the corresponding cycles are not disjoint.
We will write
\begin{align*}
N_{\sigma_1,\sigma_2}(\lambda) & :=N_{G(\sigma_1,\sigma_2)}(\lambda), \\
\mathfrak{N}_{\sigma_1,\sigma_2}(\lambda) & :=\mathfrak{N}_{G(\sigma_1,\sigma_2)}(\lambda).
\end{align*}
\end{definition}
\subsection{The first main result}
\subsubsection{Top-degree asymptotics of Jack characters}
We say that \emph{$\langle \sigma_1,\sigma_2 \rangle$ is transitive}
if the group generated by the permutations $\sigma_1,\sigma_2\in \Sym{n}$
acts transitively on the underlying set $[n]=\{1,\dots,n\}$.
We define a function $\mathbb{C}htt_n\colon\mathbb{Y}\to\left\langleurent$ given by
\begin{equation}
\label{eq:top-top-top}
\mathbb{C}htt_n :=
\frac{-1}{(n-1)!} \sum_{\substack{\sigma_1,\sigma_2\in \Sym{n} \\
\langle \sigma_1,\sigma_2 \rangle \text{ is transitive}}}
\gamma^{n+1-|C(\sigma_1)|-|C(\sigma_2)|}
\ \mathfrak{N}_{\sigma_1,\sigma_2},
\end{equation}
where $C(\pi)$ denotes the set of cycles of a permutation $\pi$.
Note that the transitivity implies that the exponent
\[ n+1-|C(\sigma_1)|-|C(\sigma_2)|\geq 0\]
is always non-negative.
We will show later (in \cref{thm:degree-of-Jack-character})
that the Jack character $\mathbb{C}h_n\in\mathscr{P}$
is of degree at most $n+1$.
The following result identifies $\mathbb{C}htt_n$ defined by \eqref{eq:top-top-top}
as the top-degree part of $\mathbb{C}h_n\in\mathscr{P}$.
\begin{theorem}[The first main result]
\label{theo:second-main-bis}
For each $n\geq 1$ the function
\begin{equation}
\label{eq:top-degree-of-character}
\mathbb{C}h_n - \mathbb{C}htt_n
\end{equation}
is an element of $\mathscr{P}$
of degree at most $n-1$.
\end{theorem}
We can write this result as the following approximate equality in $\mathscr{P}$
which gives the dominant contribution for Jack characters with respect to the filtration which we consider:
\[ \mathbb{C}h_n \approx \mathbb{C}htt_n.\]
The proof is postponed to \cref{sec:proof}.
\subsubsection{Top-degree of Jack characters in terms of labeled maps}
\label{sec:labeled-maps}
Recall that a \emph{map} \cite{LandoZvonkin2004}
is a graph $G$ (possibly, with multiple edges) drawn on a surface $\Sigma$.
We denote the vertex set by $\mathcal{V}$ and the edge set by $\mathcal{E}$.
As usual, we assume that $\Sigma\setminus \mathcal{E}$ is homeomorphic to a collection of open discs.
The sum in \eqref{eq:top-degree-of-character} is taken over the set
\begin{equation}
\label{eq:magicset}
\mathcal{X}_n:= \big\{ (\sigma_1,\sigma_2) \in \Sym{n} \times \Sym{n}
: \langle \sigma_1,\sigma_2 \rangle \text{ is transitive} \big\}.
\end{equation}
To any pair $(\sigma_1,\sigma_2)\in\mathcal{X}_n$ in this set we can canonically associate
a map $M$ which is:
\begin{itemize}
\item \emph{labeled, with $n$ edges}, i.e., each edge carries some label from the set
$[n]$ and each label is used exactly once;
\item \emph{bicolored}, i.e., the set of vertices $\mathcal{V}=\mathcal{V}(M)$ is decomposed $\mathcal{V}=\mathcal{V}_{\circ}\sqcup \mathcal{V}_{\bullet}$
into the set $\mathcal{V}_{\circ}=\mathcal{V}_{\circ}(M)$ of white vertices and the set $\mathcal{V}_{\bullet}=\mathcal{V}_{\bullet}(M)$
of black vertices;
each edge connects two vertices with the opposite colors;
\item \emph{connected}, i.e., the graph $G$ is connected;
\item \emph{oriented}, i.e., the surface $\Sigma$ is orientable and has some fixed orientation.
\end{itemize}
This correspondence follows from the observation that
the structure of such a map is uniquely determined by the counterclockwise cyclic order of the edges
around the white vertices
(which we declare to be encoded by the disjoint cycle decomposition of the permutation $\sigma_1$)
and by the counterclockwise cyclic order of the edges around the black vertices
(which we declare to be encoded by the disjoint cycle decomposition of the permutation $\sigma_2$).
\begin{example}
\label{example:map-on-torus}
The map shown in \cref{fig:torus} corresponds to the pair
\[ \sigma_1=(1,4,9,5,7)(2,6)(3,8), \qquad \sigma_2=(1,9)(2,3,5)(4,7)(6,8).\]
\end{example}
Due to this correspondence the sum in \eqref{eq:top-top-top} can be viewed
as a summation over \emph{labeled, oriented, connected maps}.
\begin{figure}
\caption{\protect\subref{fig:torus}
\label{fig:torus}
\label{fig:torus-rooted}
\end{figure}
\subsubsection{Top-degree of Jack characters in terms of unlabeled maps}
Informally speaking, an \emph{unlabeled, rooted, oriented map with $n$ edges} is a labeled, oriented map,
from which all labels have been removed,
except for a single edge. This special edge is called the \emph{root edge}.
For an example, see \cref{fig:torus-rooted}.
This concept can be formalized as follows: on the set of labeled, oriented maps with $n$ edges
we consider the action of the symmetric group
\begin{equation}
\label{eq:symmetric-group-sn-1}
\Sym{n-1}:=\{\pi\in\Sym{n}: \pi(n)=n \}
\end{equation}
by the permutation of the labels of the edges.
An \emph{unlabeled map} is defined as an orbit of this action.
The \emph{root edge} is defined as the edge with the label $n$, which is invariant under the action of
$\Sym{n-1}$.
Such unlabeled maps are in a bijective correspondence with the equivalence classes in $\mathcal{X}_n/\sim$
with respect to the following equivalence relation:
\[ (\sigma_1,\sigma_2) \sim (\sigma_1',\sigma_2') \iff
\bigexists_{\substack{\pi\in\Sym{n},\\ \pi(n)=n }} \sigma'_i=\pi \sigma_i \pi^{-1} \text{ for each } i\in\{1,2\}.
\]
The equivalence classes are nothing else but the orbits of the obvious action of the group $\Sym{n-1}$
on $\mathcal{X}_n$ by coordinate-wise conjugation.
Let $\pi\in\Stab(\sigma_1,\sigma_2)\subseteq\Sym{n-1}$
belong to the stabilizer of some $(\sigma_1,\sigma_2)\in\mathcal{X}_n$
with respect to the above action of $\Sym{n-1}$;
in other words
\begin{equation}
\label{eq:stabilizer}
\sigma_i =\pi \sigma_i \pi^{-1} \text{ for each } i\in\{1,2\}.
\end{equation}
The set of fixpoints of $\pi$ is non-empty (it contains, for example, $n$).
Furthermore, if $x\in [n]$ is a fixpoint of $\pi$,
then \eqref{eq:stabilizer} implies that $\sigma_i(x)$ is also a fixpoint.
As $\langle \sigma_1,\sigma_2 \rangle$ is transitive, it follows
that all elements of $[n]$ are fixpoints, thus $\pi=\id$. In this way we proved that
$\Stab(\sigma_1,\sigma_2)=\{\id\}$, thus each equivalence class consists of exactly
$\frac{\left| \Sym{n-1} \right|}{\left| \Stab(\sigma_1,\sigma_2) \right|}=(n-1)!$ elements.
Since the number of embeddings $\mathfrak{N}_{\sigma_1,\sigma_2}$ is constant on each
equivalence class, we have proved the following result.
\begin{corollary}
\label{coro:nonoriented-maps}
The top-degree of Jack character \eqref{eq:top-top-top} can be written as
a sum over
\emph{rooted, oriented, bicolored, connected maps $M$ with $n$ unlabeled edges}:
\begin{equation}
\label{eq:top-top-top2}
\mathbb{C}htt_n =
(-1) \sum_{M}
\gamma^{n+1-|\mathcal{V}(M)|}
\ \mathfrak{N}_{M}.
\end{equation}
\end{corollary}
\subsubsection{Application: Kerov--Lassalle polynomials}
In the context of the asymptotic representation theory
a convenient way of parametrizing the shape of a Young diagram
is provided by \emph{free cumulants} \cite{Biane1998}.
For an integer $n\geq 2$ the corresponding free cumulant
$\mathcal{R}_n\colon\mathbb{Y}\to\left\langleurent$ is a function on the set of Young diagrams
defined as
\begin{equation}
\label{eq:definition-free-cumulant}
\mathcal{R}_k(\lambda) := (-1) \sum_{\sigma_1,\sigma_2} \mathfrak{N}_{\sigma_1,\sigma_2}(\lambda),
\end{equation}
where sum in \eqref{eq:definition-free-cumulant} runs over
pairs of permutations $\sigma_1,\sigma_2\in\Sym{k-1}$ with the property that:
\begin{enumerate}[label=(\alph*)]
\item \label{item:free-cumulant-A} their product $\sigma_1 \sigma_2=(1,2,\dots,k-1)$ is the full cycle, and
\item \label{item:free-cumulant-B} their total number of cycles fulfills $|C(\sigma_1)|+|C(\sigma_2)|=k$.
\end{enumerate}
Such pairs $(\sigma_1,\sigma_2)$ can be identified with plane rooted trees.
It turns out (cf.~\cref{prop:generate-the-same=free})
that the filtered unital algebra $\mathscr{P}$ can be alternatively viewed as generated by
$\gamma,\mathcal{R}_2,\mathcal{R}_3,\dots$ with the degrees of the generators
\begin{equation}
\label{eq:filtration-5}
\left\{
\begin{aligned}
\degg \gamma &= 1, \\
\degg \mathcal{R}_n &= n \qquad \text{for $n\geq 2$}.
\end{aligned}
\right.
\end{equation}
Each Jack character can be expressed in terms of these generators
by \emph{Kerov--Lassalle polynomials};
for example
\begin{equation}
\label{eq:Kerov-example}
\mathbb{C}h_4 =\underbrace{\mathcal{R}_{5} + 6 \mathcal{R}_{4} \gamma + \mathcal{R}_{2}^{2} \gamma + 11 \mathcal{R}_{3} \gamma^{2} + 6
\mathcal{R}_{2} \gamma^{3}}_{\mathbb{C}htt_4} + 5 \mathcal{R}_{3} + 7 \mathcal{R}_{2} \gamma.
\end{equation}
Some partial theoretical results \cite{Lassalle2009} as well as computer explorations indicate
that all coefficients of such a polynomial for $\mathbb{C}h_n$ are conjecturally non-negative integers.
There are some fairly standard techniques \cite{DolegaFeraySniady2008} which can be used to find an explicit form
of Kerov--Lassalle polynomial for $\mathbb{C}htt_n$ given by \cref{coro:nonoriented-maps}.
In particular, it follows that such a Kerov--Lassalle polynomial is homogeneous of degree $n+1$
and is a sum of monomials with non-negative integer coefficients.
\cref{theo:second-main-bis} implies therefore that \emph{the homogeneous part of degree $n+1$
of Kerov--Lassalle polynomial for $\mathbb{C}h_n$ is equal to
the analogous Kerov--Lassalle polynomial for $\mathbb{C}htt_n$}.
In the example \eqref{eq:Kerov-example} this homogeneous part was indicated by the curly braces.
\begin{corollary}
\label{coro:Kerov-Lassalle}
For each integer $n\geq 1$, the homogeneous part of degree $n+1$
of Kerov--Lassalle polynomial for $\mathbb{C}h_n$
is a sum of monomials in the generators \eqref{eq:filtration-5}
with \emph{non-negative integer coefficients}.
\end{corollary}
The details of the proof are postponed to \cref{sec:proof-theo-kerov-lassalle}.
\subsubsection{Top-degree of Jack characters in terms of weighted unicellular maps}
We refer to \cite{Czyzewska-Jankowska2017} for an equivalent formula for
$\mathbb{C}htt_n$ which is expressed in terms of \emph{unicellular non-oriented maps}, weighted
according to some specific measure of non-orientability.
It is also
worth to mention the long-standing open problem formulated by
Goulden and Jackson \cite{Goulden1996} known as the
$b$--conjecture. Goulden and Jackson defined certain
rational functions related to Jack polynomials and they conjectured
that, in fact, they are polynomials
with nonnegative integer coefficients. Dołęga \cite{Dolega2017a} found
the top-degree part of these expressions with striking similarities to our formulas. Although we cannot
translate one of these results into the other, we cannot resist to state
that there has to be a strong connection between both problems.
\section{Introduction part 2: the multiplicative structure of Jack characters}
\label{sec:intro2}
\subsection{Conditional cumulants}
\label{sec:conditional-cumulants}
Let $\mathcal{A}$ and $\mathcal{A}B$ be commutative unital algebras
and let $\mathcal{E}E\colon\mathcal{A}\to\mathcal{A}B$ be a unital linear map.
We will refer to $\mathcal{E}E$ as a \emph{conditional expectation}.
For any tuple $x_1,\dots,x_n\in\mathcal{A}$ we define their \emph{conditional cumulant} as
\begin{multline}
\label{eq:what-is-cumulant}
\condKumu{\mathcal{A}}{\mathcal{A}B}(x_1,\dots,x_n) =
[t_1 \cdots t_n] \log \mathcal{E}E e^{t_1 x_1+\dots+t_n x_n} = \\
\left. \frac{\partial^n}{\partial t_1 \cdots \partial t_n}
\log \mathcal{E}E e^{t_1 x_1+\dots+t_n x_n} \right|_{t_1=\cdots=t_n=0} \in\mathcal{A}B
\end{multline}
where the operations on the right-hand side should be understood
in the sense of formal power series in the variables
$t_1,\dots,t_n$.
\subsection{Approximate factorization property}
\begin{definition}
Let $\mathcal{A}$ and $\mathcal{A}B$ be filtered unital algebras and let $\mathcal{E}E:\mathcal{A}\rightarrow\mathcal{A}B$ be a
unital linear map.
We say that \emph{$\mathcal{E}E$ has approximate factorization property}
\cite{Sniady2006c}
if for all $l\geq 1$ and all choices of $x_1,\dots,x_l\in\mathcal{A}$ we have that
\[ \degg_{\mathcal{A}B} k_{\mathcal{A}}^{\mathcal{A}B}(x_1,\dots,x_l) \leq
\left(\degg_{\mathcal{A}} x_1\right) + \cdots + \left(\degg_{\mathcal{A}} x_l \right)-2(l-1).\]
\end{definition}
\subsection{Disjoint product}
We introduce a parameter
\[ \delta := - \gamma, \]
cf.~\eqref{eq:gamma}, on which Jack characters depend implicitly.
We will show later in \cref{prop:filtration-by-characters}
that if we regard $\mathscr{P}$ as a $\mathbb{Q}[\delta]$-module, it is a free module with the basis $(\mathbb{C}h_\pi)$
where $\pi$ runs over the set of partitions.
We will also show that
with respect to this basis the filtration on $\mathscr{P}$ from \eqref{eq:filtration}
can be equivalently defined by
\begin{equation}
\label{eq:filtration-2}
\left\{
\begin{aligned}
\degg \delta &= 1, \\
\degg \mathbb{C}h_{\pi} &= |\pi|+\ell(\pi) \qquad \text{for any partition $\pi$.}
\end{aligned}
\right.
\end{equation}
This allows us to define a new multiplication on $\mathscr{P}$ (which we call \emph{disjoint product})
by setting on the generators
\[ \mathbb{C}h_{\pi} \bullet \mathbb{C}h_{\sigma} := \mathbb{C}h_{\pi\sigma}, \]
where $\pi \sigma$ denotes the \emph{concatenation} of the partitions $\pi$ and $\sigma$.
For example,
\[ \mathbb{C}h_{4,3,1} \bullet \mathbb{C}h_{5,4,2} = \mathbb{C}h_{5,4,4,3,2,1}. \]
It is easy to check that this product is commutative and associative;
the linear space of $\alpha$-polynomial functions equipped with this multiplication becomes
an algebra which will be denoted by $\mathscr{P}_\bullet$.
Thanks to \eqref{eq:filtration-2} is easy to check that the usual filtration
\eqref{eq:filtration-2}
works fine also with this product; in this way $\mathscr{P}_\bullet$ becomes a filtered algebra.
\subsection{Cumulants $\kappaDisjointPoint$}
We consider the filtered unital algebras $\mathscr{P}_\bullet$ and
$\mathscr{P}$, and as a conditional expectation between them we take the identity map
\begin{equation}
\label{eq:commutative-diagram-A}
\begin{tikzpicture}[node distance=2cm, auto, baseline=-0.8ex]
\node (A) {$\mathscr{P}_\bullet$};
\node (B) [right of=A] {$\mathscr{P}.$};
\draw[->] (A) to node {$\id$} (B);
\end{tikzpicture}
\end{equation}
The corresponding cumulants will be denoted by $\kappaDisjointPoint$.
Computer exploration suggests that the expansions of the cumulants
$\kappaDisjointPoint$ in the module basis \eqref{eq:filtration-2}
take a form which is interesting from the viewpoint of algebraic combinatorics
and encourages stating the following conjecture.
\begin{conjecture}
\label{conj:connection-coefficients-steroids}
For partitions $\pi_1,\dots,\pi_\ell$ we consider the expansion
\begin{equation}
\label{eq:steroids}
\kappaDisjointPoint(\mathbb{C}h_{\pi_1},\dots,\mathbb{C}h_{\pi_\ell}) = \sum_{\sigma} d^{\sigma}_{\pi_1,\dots,\pi_\ell}(\delta)
\ \mathbb{C}h_\sigma.
\end{equation}
Then $(-1)^{\ell-1} d^{\sigma}_{\pi_1,\dots,\pi_\ell} \in \mathbb{Q}[\delta]$
is a polynomial with non-negative integer coefficients.
\end{conjecture}
\subsection{The second main result:
approximate factorization property}
\label{sec:main-cumulants}
\cref{conj:connection-coefficients-steroids} is beyond our reach.
Nevertheless we will prove a partial result about the form of the left-hand side of \eqref{eq:steroids},
namely that $\kappaDisjointPoint(\mathbb{C}h_{\pi_1},\dots,\mathbb{C}h_{\pi_\ell})\in\mathscr{P}$ is of degree at most
\[ \left( \sum_{i} |\pi_i| + \ell(\pi_i) \right) - 2(\ell-1). \]
This concrete claim can be reformulated in an abstract language as follows.
\begin{theorem}[The second main result]
\label{theo:factorization-of-characters}
The identity map
\begin{equation*}
\begin{tikzpicture}[node distance=2cm, auto, baseline=-0.8ex]
\node (A) {$\mathscr{P}_\bullet$};
\node (B) [right of=A] {$\mathscr{P}.$};
\draw[->] (A) to node {$\id$} (B);
\end{tikzpicture}
\end{equation*}
has approximate factorization property.
\end{theorem}
In the special case of the characters of the symmetric groups (i.e.~$A=1$, $\gamma=0$) this result was proved in \cite{Sniady2006c}.
The proof will be presented in \cref{sec:key-tool-proof}.
In a forthcoming joint paper with Dołęga
\cite{DolegaSniady2014} we will present applications of this result to
investigations of some natural models of random Young diagrams related to Jack polynomials.
\section{Heuristics: Towards the proof}
\subsection{The key tool}
\label{sec:keykeykeytool}
The main difficulty in both main results of this paper
(\cref{theo:second-main-bis} and
\cref{theo:factorization-of-characters})
is to show that a given $\alpha$-polynomial function $F\in\mathscr{P}$ is of smaller degree than
one would expect from some trivial bounds.
Our key tool will be \cref{lem:keylemma}
which provides three conditions:
\ref{zero:topdegree},
\ref{zero:vanishing}, and
\ref{zero:laurent}
which together guarantee that $F\in\mathscr{P}$ is of smaller degree than
initially suspected. The only really troublesome of them is condition \ref{zero:vanishing}
and we shall discuss it in the following.
\subsection{Finite difference operators}
\label{sec:why-finite-difference-nice}
Roughly speaking, the latter condition \ref{zero:vanishing}
is formulated in terms of the finite difference operators $\Delta_{\lambda_1},\Delta_{\lambda_2},\dots$
adapted to the context of functions $F(\lambda_1,\lambda_2,\dots)$ on the set $\mathbb{Y}$ of Young diagrams.
This approach is hardly surprising as the finite difference operators have a long record of being useful
in combinatorics. In our context when $F\in\mathscr{P}$ is an $\alpha$-polynomial function,
and henceforth
$(\lambda_1,\dots,\lambda_k)\mapsto F(\lambda_1,\dots,\lambda_k)$ is a multivariate polynomial
in the lengths of the rows of a Young diagram, it is convenient that the application of
each finite difference operator decreases the degree of this multivariate polynomial by one.
\subsection{The difficulty}
The subtle issue is that
for a function $F\colon \mathbb{Y}\to\left\langleurent$ on the set of Young diagrams,
the evaluation on a Young diagram $(\lambda_1,\dots,\lambda_k)$
\begin{multline}
\label{eq:finite-difference-explanation}
\left( \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F\right)(\lambda_1,\dots,\lambda_k)= \\
\sum_{\epsilon_1,\dots,\epsilon_k\in\{0,1\} } (-1)^{k-(\epsilon_1+\cdots+\epsilon_k)}
F(\lambda_1+\epsilon_1,\dots,\lambda_k+\epsilon_k)
\end{multline}
is a linear combination (with integer coefficients) of the values of $F$ on
vectors $(\lambda_1+\epsilon_1,\dots,\lambda_k+\epsilon_k)$ which might \emph{not} be Young diagrams;
therefore the function $F$ might be not well-defined there.
The way to overcome this difficulty is to extend in some convenient way
the domain of the multivariate function
\[ \mathbb{Y}\ni (\lambda_1,\dots,\lambda_k) \mapsto F(\lambda_1,\dots,\lambda_k); \]
for the extension
\begin{equation}
\label{eq:extension-o-co-chodzi}
\mathbb{N}_0^k \ni (\lambda_1,\dots,\lambda_k) \mapsto F^{\sym}(\lambda_1,\dots,\lambda_k)
\end{equation}
the corresponding analogue of \eqref{eq:finite-difference-explanation} is well-defined.
Regretfully, this extension \eqref{eq:extension-o-co-chodzi} is no longer given by
a multivariate polynomial. For this reason it is not clear if the virtues
of the finite difference operators which we discussed in \cref{sec:why-finite-difference-nice}
are still applicable. Since the objects which we work with are no longer polynomials,
we cannot say that the finite difference operator decreases their degree.
\subsection{Solution: row functions}
A solution which we present in the current paper
is to replace the notion of multivariate polynomials
by a larger algebra of functions
(\emph{the algebra of row functions $\mathscr{R}$}) which would be more compatible with the
aforementioned procedure of extension of the domain.
The difficulty is to define the filtration on this algebra in such a way that
the application of the finite difference operator would still
decrease the degree.
\subsection{Content of the paper}
The only information about the Jack characters that
is necessary for our purposes is contained in the work of
Dołęga and F\'eray \cite{DoleegaFeray2014}.
We review their findings in \cref{sec:vk-scaling}.
\cref{sec:RST} provides technical tools
which are necessary to translate the results of Dołęga and F\'eray
to our notations.
In \cref{sec:row-functions} we introduce the aforementioned algebra $\mathscr{R}$
of row functions.
In \cref{sec:how-to-prove-small} we state the key tool
(which was discussed in \cref{sec:keykeykeytool}) for proving the degree bounds for
$\alpha$-polynomial functions.
\cref{sec:afp-vertical,sec:cumulants-long-diagonal,sec:key-tool-proof}
are devoted specifically to the proof of the second main result,
\cref{theo:factorization-of-characters}.
\cref{sec:degree-n+1,sec:proof} are devoted specifically to the proof of the first main result,
\cref{theo:second-main-bis}.
\section{Preliminaries: Various functionals of Young diagrams}
\label{sec:RST}
\subsection{Smooth functionals of shape}
For an integer $n\geq 2$ we define the \emph{(anisotropic) functional of shape}
\begin{equation}
\label{eq:smooth-functional}
\mathcal{S}_n (\lambda):=
(n-1) \iint_{(x,y)\in\lambda} \big( \text{$\alpha$-$\content$}(x,y) \big)^{n-2}\, \mathrm{d}x\, \mathrm{d}y \in\left\langleurent,
\end{equation}
where the integral is taken over the Young diagram $\lambda$ viewed as a subset of $\mathbb{R}^2$;
in other words it is an integration over $x$ and $y$ such that
\[ y> 0 \qquad \text{and} \qquad 0< x \leq \lambda_{\lceil y \rceil}.\]
\subsection{Anisotropic Stanley polynomials}
\subsubsection{Multirectangular coordinates}
\label{sec:multirectangular-coordinates}
\begin{figure}
\caption{Multirectangular Young diagram $P'\times Q'$.}
\label{fig:multirectangular}
\end{figure}
We start with \emph{anisotropic multirectangular coordinates} $P=(p_1,\dots,p_\ell)$ and $Q=(q_1,\dots,q_\ell)$.
They give rise to \emph{isotropic multirectangular coordinates} given by
\begin{align*}
P'=(p'_1,\dots,p'_\ell):&= \left( A p_1, \dots, A p_\ell \right),\\
Q'=(q'_1,\dots,q'_\ell):&= \left( \frac{1}{A} q_1, \dots, \frac{1}{A} q_\ell \right).
\end{align*}
Suppose that $A\in\mathbb{R}\setminus\{0\}$ and $P,Q$ are such that
$P'=(p'_1,\dots,p'_\ell)$ and $Q=(q'_1,\dots,q'_\ell)$ are sequences of non-negative integers
such that $q'_1\geq \cdots \geq q'_\ell$; we consider the \emph{multirectangular} Young diagram
\[P' \times Q' = (\underbrace{q'_1,\dots,q'_1}_{\text{$p'_1$ times}},\dots,
\underbrace{q'_\ell,\dots,q'_\ell}_{\text{$p'_\ell$ times}}).\]
This concept is illustrated in \cref{fig:multirectangular}.
\subsubsection{Anisotropic Stanley polynomials}
\label{subsec:stanley-polynomials}
Let $\St=(\St_1,\St_2,\dots)$ be a sequence of polynomials such that for each $\ell\geq 1$
\[\St_\ell=\St_\ell(\gamma;p_1,\dots,p_\ell;q_1,\dots,q_\ell)=\St_\ell(\gamma; P; Q) \]
is a polynomial in $2\ell+1$ variables and
\[ \St_{\ell+1}(\gamma;p_1,\dots,p_\ell,0;q_1,\dots,q_\ell,0)=\St_\ell(\gamma;p_1,\dots,p_\ell;q_1,\dots,q_\ell). \]
We assume furthermore that the degrees of the polynomials $\St_1,\St_2,\dots$ are uniformly bounded by some integer $d$;
we say then that the degree of $\St$ is at most $d$.
\begin{definition}
\label{def:stanley-polynomial}
Let $F\colon \mathbb{Y}\to \left\langleurent$ be a function on the set $\mathbb{Y}$ of Young diagrams.
Suppose that for each $\ell\geq 1$ the equality
\[ F(P'\times Q')=\St_\ell(\gamma;P ; Q)\]
--- with the usual substitution \eqref{eq:gamma} for the variable $\gamma$ ---
holds true for all choices of $\ell\geq 1$, $P$, $Q$ and $A\neq 0$ for which the multirectangular diagram $P'\times Q'$ is well-defined.
Then we say that $\St$ is the \emph{anisotropic Stanley polynomial} for $F$.
For a given function $F$, the corresponding Stanley polynomial, if exists, is unique
(in order to show this, one can adapt the corresponding part of the proof of \cite[Lemma 2.4]{DolegaFeraySniady2013}).
\end{definition}
\subsection{Isotropic Stanley polynomials}
\label{subsec:stanley-polynomials-isotropic}
We consider now the specialization of the concept of anisotropic Stanley polynomials
to the special choice of $A=1$, $\gamma=0$. The resulting objects will be called
\emph{isotropic Stanley polynomials}.
More specifically,
let $\St'=(\St'_1,\St'_2,\dots)$ be a sequence of polynomials such that for each $\ell\geq 1$
\[\St'_\ell=\St'_\ell(p_1,\dots,p_\ell;q_1,\dots,q_\ell)=\St'_\ell(P; Q) \]
is a polynomial in $2\ell$ variables and
\[ \St'_{\ell+1}(p_1,\dots,p_\ell,0;q_1,\dots,q_\ell,0)=\St'_\ell(p_1,\dots,p_\ell;q_1,\dots,q_\ell). \]
We assume furthermore that the degrees of the polynomials $\St'_1,\St'_2,\dots$ are uniformly bounded by some integer $d$;
we say then that the degree of $\St'$ is at most $d$.
\begin{definition}
\label{def:stanley-polynomial-iso}
Let $F\colon \mathbb{Y}\to \mathbb{Q}$ be a function on the set $\mathbb{Y}$ of Young diagrams.
Suppose that for each $\ell\geq 1$ the equality
\[ F(P \times Q)=\St'_\ell(P ; Q)\]
holds true for all choices of $\ell\geq 1$, $P$, $Q$ for which the multirectangular diagram $P\times Q$ is well-defined.
Then we say that $\St'$ is the \emph{isotropic Stanley polynomial} for $F$.
\end{definition}
\subsection{The isotropic case $\alpha=1$}
For $k\geq 2$ we denote by $T_k$, $S_k$ and $R_k$
the versions of the functionals $\mathcal{T}_k$, $\mathcal{S}_k$ and $\mathcal{R}_k$
which were specialized to the case $A=1$:
\begin{align}
\nonumber
T_k (\lambda) &:= (n-1) \sum_{\Box=(x,y)\in\lambda} \big( x-y \big)^{k-2} \in \mathbb{Q}, \\
\label{eq:what-is-isotropic-s}
S_k (\lambda) &:=
(k-1) \iint_{(x,y)\in\lambda} (x-y)^{k-2}\, \mathrm{d}x\, \mathrm{d}y \in \mathbb{Q}, \\
\label{eq:what-is-isotropic-r}
R_k(\lambda) &:= (-1) \sum_{\sigma_1,\sigma_2} (-1)^{|C(\sigma_2)|} N_{\sigma_1,\sigma_2}(\lambda)\in \mathbb{Q},
\end{align}
for any Young diagram $\lambda$,
where the sum in \eqref{eq:what-is-isotropic-r} runs over the same set as in \eqref{eq:definition-free-cumulant}.
\subsection{Anisotropic vs isotropic}
\label{sec:anisotropic-isotropic}
\begin{lemma}
\label{lem:stanley-polunomial-isotropic-and-anisotropic-the-same}
For each bicolored graph $G$
the anisotropic Stanley polynomial for the function
\begin{equation}
\label{eq:re-signed-embed}
\lambda\mapsto(-1)^{|\mathcal{V}_\bullet(G)|}\ \mathfrak{N}_G(\lambda)
\end{equation}
exists and coincides with
the isotropic Stanley polynomial for the function $\lambda\mapsto N_G(\lambda)$.
This polynomial is homogeneous of degree $|\mathcal{V}(G)|$.
When viewed as a polynomial in the variables $q_1,q_2,\dots$ with coefficients in
$\mathbb{Q}[p_1,p_2,\dots]$, this polynomial is homogeneous of degree $|\mathcal{V}_{\circ}(G)|$.
\end{lemma}
\begin{proof}
As pointed out in the proof of \cite[Lemma 2.4]{DolegaFeraySniady2013},
a slight variation of \cite[Lemma 3.9]{FeraySniady2011} shows that if $G$ is an arbitrary bicolored graph,
then for the function \eqref{eq:re-signed-embed}
the corresponding anisotropic Stanley polynomial exists and
does not involve the variable $\gamma$.
In particular, it
coincides with
the isotropic Stanley polynomial for the function $N_G$ from \cref{def:embeddings}.
\end{proof}
\begin{lemma}
\label{eq:S-is-for-Stanley}
For each integer $n\geq 2$,
the anisotropic Stanley polynomial for
$\lambda\mapsto \mathcal{S}_n(\lambda)$ exists and
coincides with the isotropic Stanley polynomial for the isotropic
functional $\lambda\mapsto S_n(\lambda)$.
These polynomials are homogeneous of degree $n$.
\end{lemma}
\begin{proof}
We use the notations from \cref{sec:multirectangular-coordinates}.
An elementary integration shows that
whenever $P'\times Q'\in\mathbb{Y}$, then
\begin{multline}
\label{eq:sfunct-in-stanley-coordinates}
\mathcal{S}_n(P'\times Q')=\\
\frac{-1}{n} \sum_{i\geq 1}
\Big[
\big(-(p_1+\dots+p_{i-1})\big)^{n}
-\big(-(p_1+\dots+p_{i})\big)^{n}-\\
-\big(q_i-(p_1+\dots+p_{i-1})\big)^n
+\big(q_i-(p_1+\dots+p_{i})\big)^n
\Big].
\end{multline}
It is worth pointing out that
the right-hand side is a polynomial which does not involve the variable $\gamma$.
An analogous calculation performed for $S_n(P\times Q)$ gives the same polynomial.
\end{proof}
\begin{lemma}
\label{eq:R-is-for-Ranley}
For each integer $n\geq 2$,
the anisotropic Stanley polynomial for
$\lambda\mapsto \mathcal{R}_n(\lambda)$ exists and
coincides with the isotropic Stanley polynomial for the isotropic
functional $\lambda\mapsto R_n(\lambda)$.
These polynomials are homogeneous of degree $n$.
\end{lemma}
\begin{proof}
It is a direct consequence of \cref{lem:stanley-polunomial-isotropic-and-anisotropic-the-same}
and the definitions \eqref{eq:definition-free-cumulant}, \eqref{eq:what-is-isotropic-r}.
\end{proof}
\subsection{Discrete vs smooth}
\begin{proposition}
\label{prop:generate-the-same}
The filtered unital algebra $\mathscr{P}$ of $\alpha$-polynomial functions
(cf.~\cref{sec:polynomial-functions})
can be alternatively viewed as generated by the elements $\gamma,\mathcal{S}_2,\mathcal{S}_3,\dots$
with the degrees of the generators given by
\begin{equation}
\label{eq:filtration2}
\left\{
\begin{aligned}
\degg \gamma &= 1, \\
\degg \mathcal{S}_n &= n \qquad \text{for $n\geq 2$}.
\end{aligned}
\right.
\end{equation}
\end{proposition}
The above result follows from the following lemma which shows that the passage from the algebraic base
$\mathcal{S}_2,\mathcal{S}_3,\dots$ to the algebraic base $\mathcal{T}_2,\mathcal{T}_3,\dots$ (as well as
passage in the opposite direction)
is given by \emph{linear} equations (with the coefficients in $\mathbb{Q}[\gamma]$)
with appropriate degree bounds.
\begin{lemma}
\label{eq:same-filtered-vector-space}
The following two $\mathbb{Q}[\gamma]$-modules are equal as filtered vector spaces over $\mathbb{Q}$:
\begin{itemize}
\item the module spanned by $\mathcal{T}_2,\mathcal{T}_3,\dots$ with the filtration defined on the generators by
\eqref{eq:filtration};
\item the module spanned by $\mathcal{S}_2,\mathcal{S}_3,\dots$ with the filtration defined on the generators by
\eqref{eq:filtration2}.
\end{itemize}
\end{lemma}
In other words, the lemma states that for each integer $d$ the following two linear spaces are equal:
\begin{multline}
\label{eq:filtrations-equal}
\operatorname{span}\left\{ \gamma^k \mathcal{T}_n : k\geq 0, \; n\geq 2, \; k+n \leq d \right\} = \\
\operatorname{span}\left\{ \gamma^k \mathcal{S}_n : k\geq 0, \; n\geq 2, \; k+n \leq d \right\}.
\end{multline}
\begin{proof}
We start by expressing $\mathcal{S}_n$ in terms of the generators \eqref{eq:filtration}.
A single box $(x_0,y_0)\in\mathbb{N}^2$ of a Young diagram, when viewed as a subset of the plane, becomes
the square
\[\{(x,y): x_0< x \leq x_0+1, \quad y_0<y \leq y_0+1\} \subset \mathbb{R}^2.\]
The integral on the right-hand side of \eqref{eq:smooth-functional}
restricted to this box is given by:
\begin{multline}
\label{eq:integration-single-box}
(n-1) \int_{x_0}^{x_0+1} \left[ \int_{y_0}^{y_0+1} (A x - A^{-1} y)^{n-2} \, \mathrm{d}y \right] \mathrm{d}x = \\
\frac{-1}{n} \left[
\left(c+A-A^{-1}\right)^{n}
- \left(c+A\right)^{n}
- \left(c-A^{-1}\right)^{n}
+c^n
\right] ,
\end{multline}
where on the right-hand side
\[ c:= \text{$\alpha$-$\content$}(x_0,y_0)= Ax_0-A^{-1} y_0.\]
We shall view the right-hand side of \eqref{eq:integration-single-box} as a polynomial in the variable $c$
of the following form:
\[ \sum_{2\leq k\leq n+2} d_k\ (k-1)\ c^{k-2}\]
with the coefficients $d_2,\dots,d_{n+2}\in\left\langleurent$ given by
\[
d_k=
\frac{\binom{n}{k-2}}{n (k-1)}
\left[
- \left(A-A^{-1}\right)^{n+2-k}
+ A^{n+2-k}
+ \left(-A^{-1}\right)^{n+2-k}
- 0^{n+2-k}
\right].\]
Each coefficient $d_k$ is a Laurent polynomial which is invariant under
the automorphism
\begin{equation}
\label{eq:automorphism}
A\leftrightarrow -\frac{1}{A};
\end{equation}
an automorphism which is given explicitly as
\[ \left\langleurent\ni \sum_{k\in\mathbb{Z}} f_k A^k \mapsto \sum_{k\in\mathbb{Z}} f_k \left( - \frac{1}{A} \right)^k \in \left\langleurent.
\]
An elementary calculation based on the binomial formula shows that --- due to cancellations ---
$d_k$ is a Laurent polynomial of degree at most $n-k$ for each $2\leq k\leq n$.
Furthermore, $d_{n+1}=d_{n+2}=0$ and $d_n=1$.
By comparing the dimensions it follows that the space of the Laurent polynomials
of degree at most $n-k$ which are invariant under the automorphism \eqref{eq:automorphism}
is spanned by $1,\gamma,\dots,\gamma^{n-k}$.
In this way we proved existence of a polynomial
$P_k\in\mathbb{Q}[\gamma]$ of degree at most $n-k$ with the property that
\[ d_k = P_k(\gamma),\]
where on the right-hand side the usual substitution \eqref{eq:gamma} is applied.
As the integral over a Young diagram $\lambda\subset\mathbb{R}^2$ can be written as a sum of the integrals
over the individual boxes, it follows immediately that the following equality of functions on $\mathbb{Y}$
holds true
\begin{equation}
\label{eq:s-versus-t}
\mathcal{S}_n= \sum_{2\leq k\leq n} P_k(\gamma)\ \mathcal{T}_k
\end{equation}
with the polynomials $P_2,\dots,P_n$ given by the above construction.
\emph{This shows that the right-hand side of \eqref{eq:filtrations-equal} is a subset of its
left-hand side, as required.}
We will show that for each $n\geq 2$
there exist polynomials $Q_2,\dots,Q_{n}\in\mathbb{Q}[\gamma]$ with the property that
\begin{equation}
\label{eq:t-versus-s}
\mathcal{T}_n= \sum_{2\leq k\leq n} Q_k(\gamma)\ \mathcal{S}_k
\end{equation}
and, furthermore, the degree of $Q_k$ is bounded from above by $n-k$.
Our proof will use induction with respect to the variable $n$.
Equation \eqref{eq:s-versus-t} can be written in the form
\[ \mathcal{T}_n = \mathcal{S}_n - \sum_{2 \leq k \leq n-1} P_k(\gamma)\ \mathcal{T}_k.
\]
The inductive assertion can be applied to each of the expressions $\mathcal{T}_2,\dots,\mathcal{T}_{n-1}$ on
the right-hand side. It follows that $\mathcal{T}_n$ can be written, as required, in the form \eqref{eq:t-versus-s}
with the proper bounds on the degrees of the polynomials $Q_2,\dots,Q_n$.
This concludes the proof of the inductive step.
Equation \eqref{eq:t-versus-s} and the degree bounds on the polynomials $Q_2,\dots,Q_n$ imply
that
\emph{the left-hand side of \eqref{eq:filtrations-equal} is a subset of its
right-hand side, as required.}
\end{proof}
\subsection{Yet another basis of $\mathscr{P}$: free cumulants}
\begin{proposition}
\label{prop:generate-the-same=free}
The filtered unital algebra $\mathscr{P}$ of $\alpha$-polynomial functions
can be alternatively viewed as generated by the elements $\gamma,\mathcal{R}_2,\mathcal{R}_3,\dots$
with the degrees of the generators given by
\begin{equation}
\label{eq:filtration3}
\left\{
\begin{aligned}
\degg \gamma &= 1, \\
\degg \mathcal{R}_n &= n \qquad \text{for $n\geq 2$}.
\end{aligned}
\right.
\end{equation}
\end{proposition}
\begin{proof}
In the light of \cref{prop:generate-the-same} it is enough to show that
$\mathcal{S}_2,\mathcal{S}_3,\dots$ and $\mathcal{R}_2,\mathcal{R}_3,\dots$ generate the same
filtered algebra with the usual choice of the degrees of the generators
\eqref{eq:filtration2} and \eqref{eq:filtration3}. More specifically, we will show that
for each $n\geq 2$:
\begin{itemize}
\item $\mathcal{R}_n$ can be expressed as a polynomial
$F(\mathcal{S}_2,\dots,\mathcal{S}_n)$
for some multivariate polynomial $F(x_2,\dots,x_n)$,
\item $\mathcal{S}_n$ can be expressed as a polynomial
$G(\mathcal{R}_2,\dots,\mathcal{R}_n)$
for some multivariate polynomial $G(x_2,\dots,x_n)$,
\end{itemize}
and that $F$ and $G$ are homogeneous polynomials of degree $n$,
where the degrees of the variables are specified by $\degg x_i=i$.
In order to do this we shall study the relationship between the anisotropic
Stanley polynomials (\cref{subsec:stanley-polynomials})
and their isotropic counterparts (\cref{sec:anisotropic-isotropic}).
In the following we concentrate on the problem of finding the polynomial $F$.
Each anisotropic Stanley polynomial determines uniquely the corresponding function on the set $\mathbb{Y}$
of Young diagrams.
Therefore our problem can be reformulated as expressing
\emph{the anisotropic Stanley polynomial for $\mathcal{R}_n$} as a polynomial (which is equal to
our wanted polynomial $F$)
in terms of
\emph{the anisotropic Stanley polynomial for $\mathcal{S}_2$},
\emph{the anisotropic Stanley polynomial for $\mathcal{S}_3$},\dots.
\cref{eq:R-is-for-Ranley} (respectively, \cref{eq:S-is-for-Stanley})
shows equality
between the anisotropic Stanley polynomial for $\mathcal{R}_n$ (respectively, $\mathcal{S}_n$)
and the isotropic Stanley polynomial for $R_n$
(respectively, for $S_n$).
Since Stanley polynomial for a given function on $\mathbb{Y}$ (if exists) is unique,
our goal is equivalent to proving the existence of a multivariate polynomial $F$
with the property that
\[ R_n(\lambda)= F\big(S_2(\lambda),S_3(\lambda),\dots\big)\]
holds for every $\lambda\in\mathbb{Y}$.
The latter polynomial is known to exist and its exact form is known \cite[Eq.~(15)]{DolegaFeraySniady2008}.
The latter formula also implies the required degree bound.
The problem of finding the polynomial $G$
is analogous with the roles of the quantities $\mathcal{S}_n$ and $\mathcal{R}_n$ interchanged.
In the last step of the proof one should use \cite[Eq.~(14)]{DolegaFeraySniady2008} instead.
\end{proof}
\section{Degree bounds of Dołęga and F\'eray}
\label{sec:vk-scaling}
\subsection{Kerov--Lassalle polynomial}
\label{sec:KVpolynomial}
If $\mu=(\mu_1,\mu_2,\dots)$ is a partition which does not contain any parts equal to $1$, we define
the function $\mathcal{R}_\mu$ by a multiplicative extension of free cumulants:
\[
\mathcal{R}_\mu := \prod_{k} \mathcal{R}_{\mu_k}.
\]
Dołęga and F\'eray \cite{DoleegaFeray2014} studied the \emph{Kerov--Lassalle polynomial},
i.e., the expansion of the Jack character $\mathbb{C}h_\pi$ in
the linear basis $(\mathcal{R}_\mu)$ with the coefficients which a priori belong to
the field $\mathbb{Q}(A)$ of rational functions in the variable $A$.
They proved \cite[Corollary 3.5]{DoleegaFeray2014} that each such a coefficient
$\left[ \mathcal{R}_\mu\right] \mathbb{C}h_\pi$ is, in fact, a polynomial in the variable $\gamma$.
\subsection{Degree bounds of Dołęga and F\'eray}
In the following we will consider partitions $\mu$ and $\pi$ for which the corresponding coefficient
\begin{equation}
\label{eq:KerovLassalle}
\left[ \mathcal{R}_\mu\right] \mathbb{C}h_\pi\in \mathbb{Q}[\gamma]
\end{equation}
of Kerov--Lassalle polynomial is non-zero and we denote by
\[ \ddeg:=\ddeg \big( [\mathcal{R}_\mu] \mathbb{C}h_\pi\big) \]
the degree of this polynomial.
Dołęga and F\'eray also proved \cite[Proposition 3.7, Proposition 3.10]{DoleegaFeray2014}
that
\begin{align}
\label{eq:restriction-linear-combination-first}
|\mu| + \ddeg &\leq |\pi|+\ell(\pi), \\
\label{eq:restriction-linear-combination}
|\mu|-2\ell(\mu) +\ddeg &\leq |\pi|-\ell(\pi).\\
\intertext{
By taking the mean of the above inequalities we obtain}
\label{eq:restriction-linear-combination-conclusion}
|\mu|-\ell(\mu)+\ddeg &\leq |\pi|.
\end{align}
\subsection{Degree of the Jack characters}
\begin{theorem}
\label{thm:degree-of-Jack-character}
Let $\pi$ be an arbitrary partition.
Then $\mathbb{C}h_\pi\in\mathscr{P}$ is an $\alpha$-polynomial function of degree at most $|\pi|+\ell(\pi)$.
\end{theorem}
\begin{proof}
This is a direct consequence of \eqref{eq:restriction-linear-combination-first}
combined with \cref{prop:generate-the-same=free}.
\end{proof}
\begin{proposition}
\label{prop:filtration-by-characters}
The family
\begin{equation}
\label{eq:all-your-base-are-belong-to-us}
\big\{ \gamma^d \mathbb{C}h_\pi : d\geq 0, \text{$\pi$ is a partition} \}
\end{equation}
is a linear basis of $\mathscr{P}$.
The usual filtration on $\mathscr{P}$ can be equivalently defined by
setting the degrees of elements of this linear basis as
\[ \degg \gamma^d \mathbb{C}h_\pi = d + |\pi|+\ell(\pi).\]
\end{proposition}
\begin{proof}
The linear independence of \eqref{eq:all-your-base-are-belong-to-us}
was proved (in a wider generality) by Dołęga and F\'eray \cite[Proposition 2.9]{DoleegaFeray2014}.
Our goal is to show the equality between vector spaces:
\begin{multline*} \operatorname{span}\big\{ \gamma^d \mathbb{C}h_\pi : d\geq 0, \ d+ |\pi|+\ell(\pi) \leq n \} = \\
\operatorname{span}\big\{ \gamma^d \mathcal{R}_\mu : d\geq 0,\ d+ |\mu| \leq n \},
\end{multline*}
where on the left-hand side the span runs over partitions $\pi$,
while on the right-hand side the span runs over partitions $\mu$ which do not contain any part equal to $1$.
The inclusion $\subseteq$ is a direct consequence of \eqref{eq:restriction-linear-combination-first}.
On the other hand, the cardinality of the base of the
left-hand side is equal to the cardinality of the generating set of the right-hand side;
thus these finite-dimensional linear spaces are indeed equal.
\end{proof}
\subsection{Vershik--Kerov scaling}
\label{sec:vk-concrete}
Vershik and Kerov \cite{VershikKerov1981a}
proved a special case of the following result
(namely in the case $A=1$ which
corresponds to the usual characters $\mathbb{C}h_\pi^{A=1}$ of the symmetric groups).
In the setup of Jack characters it was proved (in a slightly different formulation) by Lassalle
\cite[Proposition~2]{Lassalle2008a}.
Yet another proof, based on the results of Dołęga and F\'eray can be found in the early
version of the current work \cite[Proposition 3.4]{Sniady2016a}.
\begin{proposition}
\label{prop:vershik-kerov-jack-character}
Let $\pi$ be a partition and $m\geq 1$ be an integer.
Then
\begin{equation}
\label{eq:vershik-kerov-polynomial-jack-character}
\mathbb{Y} \ni(\lambda_1,\dots,\lambda_m) \mapsto \mathbb{C}h_\pi(\lambda_1,\dots,\lambda_m)\in\left\langleurent
\end{equation}
is a polynomial of degree $|\pi|$; its homogeneous top-degree part is equal to
\[ A^{|\pi|-\ell(\pi)}\ p_{\pi}(\lambda_1,\dots,\lambda_m), \]
where $p_\pi$ is the power-sum symmetric polynomial.
\end{proposition}
\subsection{Degrees of Laurent polynomials}
\begin{definition}
\label{def:degree-Laurent}
For an integer $d$ we will say that a \emph{Laurent polynomial
\[ f=\sum_{k\in\mathbb{Z}} f_k A^k\in \left\langleurent \] is of degree at most $d$} if
$f_k=0$ holds for each integer $k>d$.
\end{definition}
\begin{proposition}
\label{prop:degree-laurent}
For any partition $\pi$ and any Young diagram $\lambda$ the evaluation
$\mathbb{C}h_\pi(\lambda)\in\left\langleurent$ is a Laurent polynomial of degree at most $|\pi|-\ell(\pi)$.
\end{proposition}
\begin{proof}
From the very definition of free cumulants \eqref{eq:definition-free-cumulant} it follows
that for any $n\geq 2$ and any Young diagram $\lambda$, the evaluation
$\mathcal{R}_n(\lambda)\in\left\langleurent$ is a Laurent polynomial of degree at most $n-2$.
By multiplicativity it follows that
$\mathcal{R}_\mu(\lambda)\in\left\langleurent$ is a Laurent polynomial of degree at most $|\mu|-2\ell(\mu)$.
On the other hand, $\gamma\in\left\langleurent$ is a Laurent polynomial of degree $1$.
The bound \eqref{eq:restriction-linear-combination} concludes the proof.
\end{proof}
\section{The algebras $\mathscr{R}$ and $\mathscr{R}_\otimes$ of row functions}
\label{sec:row-functions}
\subsection{Row functions}
\begin{definition}
\label{def:row-functions}
Let a sequence (indexed by $r\geq 0$) of symmetric functions
$f_{r}:\mathbb{N}_0^r \rightarrow \left\langleurent$
be given, where
\[\mathbb{N}_0=\{0,1,2,\dots\}.\]
We assume that:
\begin{itemize}
\item if\/ $0\in \{x_1,\dots,x_r\}$ then $f_{r}(x_1,\dots,x_r)=0$,
\item $f_{r}=0$ except for finitely many values of $r$,
\item there exists some integer $d\geq 0$ with the property that for all $r\geq 0$ and all $x_1,\dots,x_r\in\mathbb{N}_0$,
the evaluation $f_r(x_1,\dots,x_r)\in\left\langleurent$ is a Laurent polynomial of degree at most $d-2r$.
\end{itemize}
We define a function $F\colon \mathbb{Y}\rightarrow\left\langleurent$ given by
\begin{equation}
\label{eq:row-functions}
F(\lambda) :=
\sum_{r\geq 0} \sum_{i_1 < \dots < i_r} f_{r}(\lambda_{i_1},\dots,\lambda_{i_r}).
\end{equation}
We will say that $F$ is a \emph{row function of degree at most $d$}
and that $\left(f_r\right)$ is the \emph{kernel} of $F$.
The set of such row functions will be denoted by $\mathscr{R}$.
\end{definition}
\subsection{Filtration on $\mathscr{R}$}
\label{sec:filtration-on-functions}
\begin{proposition}
\label{lem:filtered-algebra}
The set $\mathscr{R}$ of row functions equipped with the pointwise product and pointwise addition forms a
filtered algebra.
\end{proposition}
\begin{proof}
A product of two terms appearing on the right-hand side of \eqref{eq:row-functions}
is again of the same form:
\begin{multline}
\label{eq:product-convolution-kernels}
\sum_{i_1 < \dots < i_r} f_{r}(\lambda_{i_1},\dots,\lambda_{i_r}) \cdot
\sum_{j_1 < \dots < j_s} g_{s}(\lambda_{j_1},\dots,\lambda_{j_s}) \\
\shoveleft{=
\sum_{0\leq t\leq r+s} \;
\sum_{k_1 < \dots < k_t}} \\
\underbrace{\sum_{\substack{ i_1 < \dots < i_r \\ j_1 < \dots < j_s \\
\{k_1,\dots,k_t\}=\{i_1,\dots,i_r\} \cup \{j_1,\dots,j_s\} }}
f_{r}(\lambda_{i_1},\dots,\lambda_{i_r}) \
g_{s}(\lambda_{j_1},\dots,\lambda_{j_s})}_{h_{t}(\lambda_{k_1},\dots,\lambda_{k_t}):=}= \\
\sum_{0\leq t\leq r+s} \;
\sum_{k_1 < \dots < k_t}
h_t(\lambda_{k_1},\dots,\lambda_{k_t})
\end{multline}
which shows that $\mathscr{R}$ forms an algebra.
Assume that the two factors on the left-hand of \eqref{eq:product-convolution-kernels}
are row functions of degree at most, respectively, $d$ and $e$.
It follows that each value of $f_r$ is a Laurent polynomial of degree at most $d-2r$
and each value of $g_s$ is a Laurent polynomial of degree at most $e-2s$.
Thus each value of $h_t$, the kernel defined by the curly bracket
on the right-hand side of \eqref{eq:product-convolution-kernels},
is a Laurent polynomial of degree at most $d+e-2(r+s)\leq d+e-2t$ which shows that
the product is a row function of degree at most $d+e$, which concludes the proof that
$\mathscr{R}$ is a filtered algebra.
\end{proof}
\begin{proposition}
\label{lem:polynimial-are-row}
Let $F\in\mathscr{P}$ be an $\alpha$-polynomial function of degree $d$.
Then $F\in\mathscr{R}$ is also a row function of degree at most $d$.
\end{proposition}
\begin{proof}
By \cref{lem:filtered-algebra} it is enough to
prove the claim for the generators \eqref{eq:filtration},
i.e., to show for each $n\geq 2$ that $\mathcal{T}_n$ is a row function of degree at most $n$
and that $\gamma$ is a row function of degree $1$.
We will do it in the following.
For a Young diagram $\lambda$ we denote by $\lambda^T=(\lambda^T_1,\lambda^T_2,\dots)\in\mathbb{Y}$ the transposed diagram.
The binomial formula implies that
\begin{multline}
\label{eq:tfunct-as-a-row-function}
\mathcal{T}_n(\lambda) =
(n-1)
\sum_{x\geq 1} \sum_{1\leq y\leq \lambda^T_x } \left( Ax - A^{-1} y\right)^{n-2} =\\
(n-1) \sum_{q\geq 0} A^{n-2-2q} \binom{n-2}{q} (-1)^q
\sum_{x\geq 1} x^{n-2-q} \sum_{1\leq y\leq \lambda^T_x} y^q= \\
(n-1) \sum_{u\geq 1} A^{n-2u} \binom{n-2}{u-1} (-1)^{u-1}
\sum_{x\geq 1} x^{n-1-u} \underbrace{\sum_{1\leq y\leq \lambda^T_x} y^{u-1}}_{(\spadesuit)},
\end{multline}
where the last equality follows from the change of variables $u:=q+1$.
The expression $(\spadesuit)$ marked above by the curly bracket, namely
\[ \mathbb{N}_0\ni s \mapsto \sum_{1\leq y\leq s} y^{u-1}, \]
is a polynomial function of degree $u$, thus it can be written as a
linear combination (with rational coefficients) of the family of polynomials
$\mathbb{N}_0\ni s\mapsto \binom{s}{r}$ indexed by $r\in\{0,1,\dots,u\}$.
Notice that $\lambda^T_x$ is the number of rows of $\lambda$ which are bigger or equal than $x$,
thus $(\spadesuit)$ is a linear combination (with rational coefficients) of
\begin{equation}
\label{eq:binomials-form-a-basis}
\binom{\lambda^T_x}{r} = \sum_{i_1 < \dots < i_r} [\lambda_{i_1} \geq x] \cdots [\lambda_{i_r} \geq x]
\end{equation}
over $r\in\{0,1,\dots,u\}$.
This shows that $\mathcal{T}_n$ is a row function.
Let $f_0,f_1,\dots$ be the corresponding kernel.
Equation \eqref{eq:tfunct-as-a-row-function} shows that each value
$f_r(x_1,\dots,x_r)$ is a linear combination (with rational coefficients) of the expressions
\[ A^{n-2u}\ [x_1 \geq x] \cdots [x_r \geq x] \in\left\langleurent\]
over $u\geq 1$, over $x\geq 1$, and $r\leq u$. This Laurent polynomial is degree at most
$n-2u\leq n-2r$, which shows that $\mathcal{T}_n$ is a row function of degree at most $n$, as required.
We define
\[ f_r = \begin{cases}
\gamma & \text{if $r=0$}, \\
0 & \text{if $r\geq 1$}.
\end{cases}
\]
Clearly, the corresponding row function $F$ fulfills $F(\lambda)=\gamma$ for any $\lambda\in\mathbb{Y}$.
This shows that $\gamma$ is a row function of degree at most $1$, as required.
\end{proof}
\subsection{Separate product of row functions}
The set of row functions can be equipped with another product, which we will call
\emph{the separate product}.
It is defined on the linear basis by declaring
\begin{multline*}
\sum_{i_1 < \dots < i_r} f(\lambda_{i_1},\dots,\lambda_{i_r}) \otimes
\sum_{j_1 < \dots < j_s} g(\lambda_{j_1},\dots,\lambda_{j_s}):= \\
{
\sum_{k_1 < \dots < k_{r+s}}}
\underbrace{\sum_{\substack{ i_1 < \dots < i_r \\ j_1 < \dots < j_s \\
\{k_1,\dots,k_{r+s}\}=\{i_1,\dots,i_r\} \sqcup \{j_1,\dots,j_s\} }}
f(\lambda_{i_1},\dots,\lambda_{i_r})
g(\lambda_{j_1},\dots,\lambda_{j_s})}_{h_{r+s}(\lambda_{k_1},\dots,\lambda_{k_{r+m}})};
\end{multline*}
we extend this definition by bilinearity to general row functions.
This corresponds to selecting in \eqref{eq:product-convolution-kernels} only
the summand for which $t=r+s$.
This product is well-defined since the kernel is
uniquely determined by the row function.
We define $\mathscr{R}_\otimes$ as the linear space of row functions equipped with
\emph{the separate product} $\otimes$.
It is a very simple exercise to check that the algebra $\mathscr{R}_\otimes$
equipped with the notion of degree from \cref{def:row-functions} becomes a
\emph{filtered algebra}.
\subsection{Summary: four filtered algebras. Cumulants}
So far we have introduced four filtered algebras of functions on $\mathbb{Y}$.
They can be summarized by the following commutative diagram.
\begin{equation}
\label{eq:commutative-diagram-B2}
\begin{tikzpicture}[node distance=3cm,auto, baseline=-14ex]
\node (A) {$\mathscr{P}_\bullet$};
\node (B) [right of=A] {$\mathscr{P}$};
\node (B1) [right of=B,node distance=1.2cm] {};
\node (B2) [below of=B1,node distance=1.2cm] {$\mathscr{R}$};
\node (C) [below of=B2]{$\mathscr{R}_\otimes$};
\draw[->] (A) to node {$\id$} (B);
\draw[->] (B) to node {$\id$} (B2);
\draw[->] (B2) to node {$\id$} (C);
\draw[->] (A) to node [swap]{$\id$} (C);
\end{tikzpicture}
\end{equation}
Each of the arrows is a unital linear map given by the identity (inclusion).
Each of the arrows is compatible with the corresponding filtrations in the sense that
the degree of the image of any element $x$ is bounded from above by the degree of $x$ itself
(for the horizontal and the vertical arrow this follows from the fact
that the corresponding pairs of algebras are isomorphic as \emph{filtered vector spaces};
for both diagonal arrows this corresponds to \cref{lem:polynimial-are-row}).
The short diagonal arrow is an inclusion of algebras.
For some arrows in this diagram we will investigate the corresponding cumulants.
We recall that for
the horizontal arrow $\mathbb{E}=\id \colon \mathscr{P}_\bullet \to \mathscr{P}$ the corresponding cumulant
is denoted by $\kappaDisjointPoint$.
For the long diagonal arrow $\mathbb{E}=\id \colon \mathscr{P}_\bullet \to \mathscr{R}_\otimes$
the corresponding cumulant
will be denoted by $\kappaDisjointRow$.
For the vertical arrow $\mathbb{E}=\id \colon \mathscr{R} \to \mathscr{R}_\otimes$
the corresponding cumulant will be denoted by $\kappaPointRow$.
\section{How to show that an $\alpha$-polynomial function is of small degree?}
\label{sec:how-to-prove-small}
\subsection{The difference operator}
\begin{definition}
\label{def:difference}
If $F=F(\lambda_1,\dots,\lambda_\ell)$ is a function of $\ell$ arguments and $1\leq j\leq \ell$,
we define a new function $\Delta_{\lambda_j} F$ by
\begin{multline*}
\left( \Delta_{\lambda_j} F \right) (\lambda_1,\dots,\lambda_\ell):= \\
F(\lambda_1,\dots,\lambda_{j-1},\lambda_j+1,\lambda_{j+1},\dots,\lambda_\ell)-
F(\lambda_1,\dots,\lambda_\ell).
\end{multline*}
We call $\Delta_{\lambda_j}$ a \emph{difference operator}.
\end{definition}
\subsection{Extension of the domain of functions on $\mathbb{Y}$}
\label{sec:extension}
Let $F$ be a function on the set of Young diagrams.
Such a function can be viewed as a function $F(\lambda_1,\dots,\lambda_\ell)$ defined
for all non-negative integers $\lambda_1\geq \dots\geq \lambda_\ell$.
We will extend its domain, as follows.
\begin{definition}
\label{def:extension}
If $(\xi_1,\dots,\xi_\ell)$ is an arbitrary sequence of non-negative integers, we denote
\[ F^{\sym}(\xi_1,\dots,\xi_\ell):= F(\lambda_1,\dots,\lambda_\ell), \]
where $(\lambda_1,\dots,\lambda_\ell)\in \mathbb{Y}$ is the sequence $(\xi_1,\dots,\xi_\ell)$
sorted in the reverse order $\lambda_1\geq \dots \geq \lambda_\ell$.
In this way $F^{\sym}(\xi_1,\dots,\xi_\ell)$ is a symmetric function of its arguments.
\end{definition}
Note that the definition \eqref{eq:row-functions} of a row function
does not require any modifications in order to give rise to such an extension.
For this reason, if $F$ is a row function, we will identify it with its extension
$F^{\sym}$.
\subsection{The difference operator vanishes on elements of small degree}
\newcommand{k}{k}
\begin{lemma}
\label{coro:small-degree-killed}
Let $d\geq 1$ be an integer and assume that $F\in\mathscr{R}$ is of degree at most $d-1$.
Then for each integer $k\geq 0$ and each Young diagram $\lambda=(\lambda_1,\lambda_2,\dots)$
\[ [A^{d-2k}] \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F^{\sym}(\lambda_1,\lambda_2,\dots) =0.\]
\end{lemma}
\begin{proof}
We know that
$F$ is a sum of the functions of the form
\[
\sum_{i_1 < \dots < i_l} f_{l}(\lambda_{i_1},\dots,\lambda_{i_l}),
\]
over $l\geq 0$ and each value of $f_l$ is a Laurent polynomial of degree at most $d-1-2l$.
Clearly,
\[ \Delta_{\lambda_1} \dots \Delta_{\lambda_k} f_{l}(\lambda_{i_1},\dots,\lambda_{i_l}) = 0
\qquad \text{if } \{1,\dots,k\}\not\subseteq \{i_1,\dots,i_l\}
\]
thus
\begin{equation}
\label{eq:tralala}
[A^{d-2k}] \Delta_{\lambda_1} \dots \Delta_{\lambda_k}
\sum_{i_1 < \dots < i_l} f_{l}(\lambda_{i_1},\dots,\lambda_{i_l})
\end{equation}
vanishes if $l<k$.
On the other hand, for $l\geq k$, the expression $f_l(\lambda)$
is a Laurent polynomial of degree at most $d-1-2l\leq d-1-2k $
thus \eqref{eq:tralala} vanishes as well.
\end{proof}
\subsection{What happens to an $\alpha$-polynomial function if we view it as a row function?}
\label{sec:top-degree-row-function}
\begin{definition}
Let $d\geq 0$ be an integer and
let $F$ given by \eqref{eq:row-functions} be a row function of degree at most $d$.
We define its \emph{top-degree part} as:
\begin{equation}
\label{eq:top-degree-row-function}
F^{\ttop}(\lambda) :=
\sum_{r \geq 0}
A^{d-2r}
\sum_{i_1 < \dots < i_r}
\left[A^{d-2r} \right] f_{r}(\lambda_{i_1},\dots,\lambda_{i_r}).
\end{equation}
We will also say that the summand corresponding to a specified value of $r$
(i.e., the $r$-fold sum over the rows) \emph{has rank $r$}.
\end{definition}
\begin{lemma}
\label{lem:minimal-rank}
Let $r\geq 0$ and $d\geq 2r$ be integers and
let $p(c_1,\dots,c_r)$ be a symmetric polynomial in its $r$ arguments with the coefficients
in the polynomial ring $\mathbb{Q}[\gamma]$. We assume that $p$, viewed as a polynomial in $\gamma,c_1,\dots,c_r$,
is a homogeneous polynomial of degree $d-2r$.
We consider the row function of degree at most $d$ given by
\[
F(\lambda)= \sum_{\Box_1,\dots,\Box_r\in \lambda}
p(c_1,\dots,c_r),
\]
where
\[c_1:=\text{$\alpha$-$\content$}(\Box_1),\quad \dots, \quad c_r:=\text{$\alpha$-$\content$}(\Box_r).
\]
Then each non-zero summand in \eqref{eq:top-degree-row-function}
for the top-degree part of $F$ has rank at least $r$;
the summand with the rank equal to $r$ is given by
\begin{equation}
\label{eq:minimal-rank}
\lambda\mapsto A^{d-2r} r! \sum_{i_1<\cdots<i_r}
\sum_{1\leq x_1 \leq \lambda_{i_1}} \cdots \sum_{1\leq x_r \leq \lambda_{i_r}}
p(x_1,\dots,x_r) \bigg|_{\gamma=-1},
\end{equation}
where on the right-hand side we consider the evaluation of the polynomial $p$ for $\gamma=-1$.
\end{lemma}
\begin{proof}
We start with a general investigation of the top-degree part of various row functions.
The proof of \cref{lem:polynimial-are-row} shows that
\[ \gamma^{\ttop}=-A \]
consists of a single summand of rank $0$.
The extraction of the top-degree part of the row function \eqref{eq:tfunct-as-a-row-function}
corresponds to the restriction to the summand $r=u\geq 1$
(with the notations of \eqref{eq:binomials-form-a-basis}).
For this reason $\mathcal{T}_n^{\ttop}(\lambda)$ involves only the summands with the rank at least $1$.
Furthermore, the term of rank $1$ is given explicitly in the following expansion:
\[\mathcal{T}_n^{\ttop}(\lambda) = A^{n-2} \sum_i \sum_{1\leq x\leq \lambda_i} (n-1) x^{n-2} +
(\text{summands of rank at least $2$}).
\]
We shall revisit \eqref{eq:product-convolution-kernels} in order to investigate the top-degree part
of a product of two row functions.
The summands on the right-hand side do not contribute to the top-degree part unless $t=r+s$ and
\[ \{k_1,\dots,k_t\}=\{i_1,\dots,i_r\} \sqcup \{j_1,\dots,j_s\} \]
is a decomposition into disjoint sets.
This shows that the top-degree part of a product involves only the summands with the rank at least
the sum of the ranks of the original factors.
Furthermore, the top-degree summand of this minimal rank is given very explicitly.
We come back to the proof of the Lemma.
Assume for simplicity that the polynomial $p$ is a monomial.
In this case $F$ is --- up to simple numerical factors --- a product of some power of
$\gamma$ and of exactly $r$ factors of the form $\mathcal{T}_n$ over $n\geq 2$.
The above discussion shows that $F^{\ttop}$ involves only the summands of rank
at least $r$, and gives a concrete formula for the summand of rank $r$.
It is easy to check that it is the formula \eqref{eq:minimal-rank}
in which the monomial $p$ has been replaced by its symmetrization.
By linearity, this result remains true for a general polynomial $p$.
In particular, if $p$ is already symmetric, formula \eqref{eq:minimal-rank} holds true
without modifications.
\end{proof}
\subsection{Multivariate polynomials having lots of zeros}
\label{sec:preparation-proof-key-lemma-start}
The final ingredient in the proof of \cref{lem:keylemma}
is the following result which shows that
if a multivariate polynomial has a specific set of zeros, it must be identically equal to zero.
\begin{lemma}
\label{lem:lots-of-zeros}
Let $k\geq 0$ and $d\geq 0$ be integers.
\begin{itemize}
\item
Let $p(x_1,\dots,x_k)\in\mathbb{Q}[x_1,\dots,x_k]$ be a polynomial of degree at most~$d$.
Assume that
\begin{equation}
\label{eq:let-be-zero}
p(x_1,\dots,x_k)=0
\end{equation}
holds true for all integers $x_1,\dots,x_k\geq 1$ such that
\begin{equation}
\label{eq:sum-of-xes}
x_1+\dots+x_k\leq d+k.
\end{equation}
Then $p=0$.
\item
Let $p(x_1,\dots,x_k)\in\mathbb{Q}[x_1,\dots,x_k]$ be a \emph{symmetric} polynomial of degree at most $d$.
Assume that \eqref{eq:let-be-zero} holds true
for all integers $x_1\geq \dots \geq x_k\geq 1$ such that \eqref{eq:sum-of-xes} holds true.
Then $p=0$.
\end{itemize}
\end{lemma}
\begin{proof}
We will show the first part of the claim by induction over $k$.
\emph{The case $k=0$.} In this extreme case the empty sequence fulfills the assumption
\eqref{eq:sum-of-xes}; the resulting \eqref{eq:let-be-zero} gives the desired claim.
\emph{The case $k=1$.} It follows that $p(x_1)\in\mathbb{Q}[x_1]$ is a polynomial of degree at most $d$
which has at least $d+1$ zeros; it follows that $p=0$.
\emph{We consider the case $k\geq 2$ and we assume that the first part of the lemma is true
for $k':=k-1$.}
The polynomial $p$ can be written in the form
\[
p = \sum_{0\leq r\leq d}
p_{r}(x_1,\dots,x_{k-1})\ \underbrace{(x_k-1) (x_k-2) \dots (x_k-r)}_{\text{$r$ factors}},
\]
where $p_{r}\in\mathbb{Q}[x_1,\dots,x_{k-1}]$ is a polynomial of degree at most $d-r$.
We will show by a nested induction over the variable $r$ that $p_r=0$ for each $0\leq r \leq d$.
Assume that $p_l=0$ for each $l<r$. It follows that
\[ p(x_1,\dots,x_{k-1},r+1)=r!\ p_r(x_1,\dots,x_{k-1}),\]
thus
\[ p_r(x_1,\dots,x_{k-1})=0 \]
holds true for all integers $x_1,\dots,x_{k-1}\geq 1$ such that
\[x_1+\dots+x_{k-1}\leq (d-r)+(k-1).\]
It follows that $p_r$ fulfills the condition \eqref{eq:let-be-zero} for $d':=d-r$ and $k':=k-1$ thus
the inductive hypothesis (with respect to the variable $k$) can be applied.
It follows that $p_r=0$.
This concludes the proof of the inductive step over the variable $r$.
The second part of the lemma is a direct consequence of the first part.
\end{proof}
\subsection{The key tool}
\newcommand{r}{r}
The assumptions of the following theorem have been modeled after the properties of the Jack characters;
in particular $F:=\mathbb{C}h_\pi$ fulfills the assumptions
--- except for the assumption \ref{zero:topdegree} ---
for $n:=|\pi|$ and $r:=\ell(\pi)$.
\begin{theorem}[The key tool]
\label{lem:keylemma}
Let integers $n\geq 1$ and $r\geq 1$ be given.
Assume that:
\begin{enumerate}[label=(Z\arabic*)]
\item
\label{zero:initial-bound}
$F\in\mathscr{P}$ is of degree at most $n+r$;
\item \label{zero:topdegree}
we assume that
for each $m\geq 1$ the polynomial in $m$ variables
\[ \mathbb{Y} \ni(\lambda_1,\dots,\lambda_m) \mapsto F(\lambda_1,\dots,\lambda_m) \]
is of degree at most $n-1$;
\item
\label{zero:vanishing}
the equality
\begin{equation}
\label{eq:top-key-equation}
[A^{n+r-2k}] \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F^{\sym}(\lambda_1,\dots,\lambda_k) = 0
\end{equation}
holds true for the following values of\/ $k$ and $\lambda$:
\begin{itemize}
\item $k=r$ and $\lambda=(\lambda_1,\dots,\lambda_r)\in\mathbb{Y}$ with at most $r$ rows is such that
$|\lambda|\leq n+r-2k-1$;
\item $k>r$ and $\lambda=(\lambda_1,\dots,\lambda_k)\in\mathbb{Y}$ with at most $k$ rows is such that
$|\lambda|\leq n+r-2k$;
\end{itemize}
\item
\label{zero:laurent}
for each $\lambda\in\mathbb{Y}$, the Laurent polynomial $F(\lambda)\in\left\langleurent$
is of degree at most $n-r+1$.
\end{enumerate}
Then $F\in\mathscr{P}$ is of degree at most $n+r-1$.
\emph{Alternative version:} the result remains valid
for all integers $n\geq 0$ and $r\geq 1$
if the assumption \ref{zero:topdegree} is removed and
the condition \ref{zero:vanishing} is replaced by the following one:
\begin{enumerate}[label=(Z3a)]
\item
\label{zero:vanishing-B}
the equality \eqref{eq:top-key-equation}
holds true for all $k\geq r$ and $\lambda=(\lambda_1,\dots,\lambda_k)\in\mathbb{Y}$ with at most $k$ rows
such that $|\lambda|\leq n+r-2k$.
\end{enumerate}
\end{theorem}
\begin{proof}
The function $F\in\mathscr{P}$ can be written in the form
\begin{equation}
\label{eq:polynomial}
F(\lambda)= \sum_{k\geq 0}
\underbrace{\sum_{\Box_1,\dots,\Box_k\in
\lambda}
p_k(c_1,\dots,c_k)}_{F_k(\lambda)},
\end{equation}
where
$c_i:=\text{$\alpha$-$\content$}(\Box_i)$
and where $p_k$ is a symmetric polynomial in its $k$ arguments with the coefficients
in the polynomial ring $\mathbb{Q}[\gamma]$.
Furthermore, the degree bound \ref{zero:initial-bound} implies that
$p_k$ --- this time viewed as a polynomial in $k+1$ variables: $\gamma, c_1,\dots,c_k$ ---
is a polynomial of degree at most $n+r-2k$.
Let $p_k^{\ttop}$ denote its homogeneous part of degree $n+r-2k$.
\emph{The statement of the theorem would follow if we can show that $p_k$ is, in fact, of degree at most
$n+r-2k-1$ or, equivalently, $p_k^{\ttop}=0$.
We will show this claim by induction over $k\geq 0$;
assume that $p_{m}$ is of degree at most
$n+r-2m-1$ for each $m<k$.}
The curly bracket in \eqref{eq:polynomial} serves as the definition of the functions
$F_0,F_1,\dots$ on the set $\mathbb{Y}$ of Young diagrams. In the following we will investigate the quantity
--- which is analogous to \eqref{eq:top-key-equation} --- given by
\begin{equation}
\label{eq:mysterious-laurent}
[A^{n+r-2k}] \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F_m^{\sym}(\lambda_1,\dots,\lambda_k)
\end{equation}
for $(\lambda_1,\dots,\lambda_k)\in\mathbb{Y}$ and various choices of the variable $m\geq 0$.
\begin{itemize}[itemsep=2ex]
\item \emph{The case $m>k$.}
Firstly, observe that $\gamma$ as well as $\text{$\alpha$-$\content$}(\Box)$ (for any box $\Box\in\mathbb{N}^2$),
viewed as Laurent polynomials in the variable $A$, are of degree at most $1$, thus
$F_m(\lambda)$ is a Laurent polynomial of degree
bounded from above by the degree of the polynomial $p_m$ which is
at most $n+r-2m<n+r-2k$.
It follows that for $m>k$ we have that
\[ [A^{n+r-2k}] F_m^{\sym}(\lambda_1,\dots,\lambda_k) = 0 \]
hence, a fortiori,
the quantity
\eqref{eq:mysterious-laurent} vanishes as well.
\item \emph{The case $m<k$.}
From the inductive hypothesis, $F_m\in\mathscr{P}$ is an $\alpha$-polynomial function of degree at most $n+r-1$.
We apply \cref{coro:small-degree-killed} for $d:=n+r$.
In this way we proved that for $m<k$ the quantity \eqref{eq:mysterious-laurent} vanishes.
\item
\emph{The case $m=k$.}
We will revisit the case $m<k$ considered above and discuss the changes in the reasoning.
We study now the function $F_k$.
As the induction hypothesis cannot be applied,
the assumption of \cref{coro:small-degree-killed} is not satisfied for
$F:=F_k$ and $d:=n+r$
and we have to revisit its proof. We shall do it in the following.
The upper bound on the degree of the Laurent polynomial $f_l(\lambda)$ is weaker, given by
$n+r-2l$.
One can easily see that the only case in which \eqref{eq:tralala} could possibly be non-zero is for $l=k=m$,
thus
\begin{equation}
\label{eq:po-bezsennej-nocy-1}
\eqref{eq:mysterious-laurent} =
\Delta_{\lambda_1} \cdots \Delta_{\lambda_k}
\underbrace{[A^{n+r-2k}]
\sum_{i_1 < \dots < i_k} f_{k}(\lambda_{i_1},\dots,\lambda_{i_k})}_{(\diamondsuit)}.
\end{equation}
Clearly, the expression $(\diamondsuit)$ is directly related to the top-degree part of $F_k\in\mathscr{R}$ of rank $k$.
The latter can be computed explicitly by \cref{lem:minimal-rank} (applied for $r:=k$).
Thus
\begin{multline*}
\eqref{eq:po-bezsennej-nocy-1}= \\
\Delta_{\lambda_1} \cdots \Delta_{\lambda_k}
k!
\sum_{i_1 < \dots < i_k}
\sum_{1\leq x_1\leq \lambda_{i_1}} \cdots \sum_{1\leq x_k\leq \lambda_{i_k}}
\left. p^{\ttop}_{k}(x_{i_1},\dots,x_{i_k}) \right|_{\gamma:=-1} =
\\
k!\; \left. p_k^{\ttop}(\lambda_1+1,\dots,\lambda_k+1) \right|_{\gamma:=-1}.
\end{multline*}
\end{itemize}
\emph{This finishes our discussion of the quantity \eqref{eq:mysterious-laurent} for various choices of the variable $m$.
The conclusion is that
\begin{multline}
\label{eq:difference-gives-something}
[A^{n+r-2k}] \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F^{\sym}(\lambda_1,\dots,\lambda_k) = \\
k!\; \left. p_k^{\ttop}(\lambda_1+1,\dots,\lambda_k+1) \right|_{\gamma:=-1}
\end{multline}
holds true for an arbitrary Young diagram $(\lambda_1,\dots,\lambda_k)\in\mathbb{Y}$ with at most $k$ rows.
We will use this equality to finish the proof of the inductive step over the variable $k$.}
Notice that the polynomial
\begin{equation}
\label{eq:top-polynomial-top-model}
\left. p_k^{\ttop}(c_1,\dots,c_k) \right|_{\gamma:=-1}
\end{equation}
in which we used the substitution $\gamma:=-1$ is an
(inhomogeneous) symmetric polynomial in the indeterminates $c_1,\dots,c_k$ of degree at most $n+r-2k$.
\emph{In order to achieve our ultimate goal and show that
the homogeneous polynomial $p_k^{\ttop}$ is equal to zero
it is enough to show that the inhomogeneous
polynomial \eqref{eq:top-polynomial-top-model} is equal to zero.
We shall do it in the following.}
\begin{itemize}[itemsep=2ex]
\item
\emph{Firstly, consider the case $k<r$.} Assumption \ref{zero:laurent}
on the degrees of Laurent polynomials implies
that
\[ [A^{n+r-2k}] F^{\sym}(\lambda_1,\dots,\lambda_k)=0 \]
holds true for any Young diagram with at most $k$ rows. Thus
the left-hand side of \eqref{eq:difference-gives-something} is constantly equal to zero.
\cref{lem:lots-of-zeros} can be applied to the polynomial \eqref{eq:top-polynomial-top-model};
it follows that \eqref{eq:top-polynomial-top-model}
is the zero polynomial as required.
\item
\emph{Secondly, consider the case $k>r$.} Assumption \ref{zero:vanishing} implies that the left-hand side of
\eqref{eq:difference-gives-something} is equal to zero for $|\lambda|\leq n+r-2 k$,
therefore
$\left. p_k^{\ttop}(c_1,\dots,c_k) \right|_{\gamma:=-1}=0$
for all integers $c_1,\dots,c_k$ such that $c_1\geq \cdots \geq c_k \geq 1$ and $c_1+\dots+c_k\leq n+r-k$.
Thus \cref{lem:lots-of-zeros} implies that \eqref{eq:top-polynomial-top-model}
is the zero polynomial, as required.
The same proof works for the alternative assumption \ref{zero:vanishing-B} in the case $k\geq r$.
\item
\emph{Finally, consider the case $k=r$.}
Note that this case for the alternative assumption \ref{zero:vanishing-B} was already considered above and
the following discussion is not applicable.
We consider the set of Young diagrams $\lambda=(\lambda_1,\dots,\lambda_k)$ with the property that
$\lambda_1>\cdots>\lambda_k$.
For any Young diagram in this set
\begin{equation}
\label{eq:difference-hard-and-soft}
\Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F^{\sym}(\lambda_1,\dots,\lambda_k) =
\Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F(\lambda_1,\dots,\lambda_k)
\end{equation}
and the extension of the domain of $F$ by symmetrization is not necessary.
We can view $F$ as a polynomial
in the indeterminates $\lambda_1,\dots,\lambda_k$.
One can easily show that if two polynomials in the variables $\lambda_1,\dots,\lambda_k$
coincide on the above set of Young diagrams
then they must be equal; \eqref{eq:difference-gives-something} and \eqref{eq:difference-hard-and-soft}
imply therefore the following equality between
polynomials:
\[ [A^{n+r-2k}] \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F(\lambda_1,\dots,\lambda_k) = \\
k!\; \left. p_k^{\ttop}(\lambda_1+1,\dots,\lambda_k+1) \right|_{\gamma:=-1}.\]
Each application of a difference operator decreases the degree of a polynomial by one.
Together with assumption \ref{zero:topdegree} this implies that the left-hand side
is as a polynomial in $\lambda_1,\dots,\lambda_k$
of degree at most $n-r-1$, so $p_k^{\ttop}\big|_{\gamma:=-1}$ must be also of degree at most $n-r-1$.
Assumption \ref{zero:vanishing} implies that the left-hand side of
\eqref{eq:difference-gives-something} is equal to zero for $|\lambda|\leq n-r-1$;
thus \cref{lem:lots-of-zeros} can be applied again to show that \eqref{eq:top-polynomial-top-model}
is the zero polynomial, as required.
\end{itemize}
\emph{This concludes the proof of the inductive step over the variable $k$.}
\end{proof}
\section{Approximate factorization property for the vertical arrow}
\label{sec:afp-vertical}
We recall that the conditional cumulants which correspond to the vertical arrow
in \eqref{eq:commutative-diagram-B2} between $\mathscr{R}$ and $\mathscr{R}_\otimes$
are denoted by $\kappaPointRow$.
\subsection{Closed formula for the cumulants $\kappaPointRow$}
Our goal in this section will be to find a closed formula
for the cumulant $\kappaPointRow(x_1,\dots,x_n)$
for $x_1,\dots,x_n\in\mathscr{R}$.
By linearity of cumulants we may
assume that for each value of the index $i$, the function $x_i\in \mathscr{R}$ has the form
\begin{equation}
\label{eq:what-is-x}
x_i(\lambda)= \sum_{j^{(i)}_1 < \dots < j^{(i)}_{m(i)}}
g_i\left(\lambda_{j^{(i)}_1},\dots,\lambda_{j^{(i)}_{m(i)}}\right).
\end{equation}
It follows that the pointwise product of functions is given by
\begin{multline}
\label{eq:monster-product}
( x_1 \cdots x_n) (\lambda) = \\
\sum_{j^{(1)}_1 < \dots < j^{(1)}_{m(1)}} \cdots \sum_{j^{(n)}_1 < \dots < j^{(n)}_{m(n)}}
\prod_{1\leq i\leq n} g_i\left(\lambda_{j^{(i)}_1},\dots,\lambda_{j^{(i)}_{m(i)}}\right).
\end{multline}
Let us fix some summand on the right-hand side.
We denote
\begin{equation}
\label{eq:sets-J}
J^{(i)}:=\{ j^{(i)}_1, \dots ,j^{(i)}_{m(i)}\}
\end{equation}
and consider the graph $\mathcal{G}$
with the vertex set $[n]=\{1,2,\dots,n\}$ the elements of which correspond to the factors;
we draw an edge between the vertices $a$ and $b$ if the sets
$J^{(a)}$ and $J^{(b)}$
are not disjoint. The connected components of the graph $\mathcal{G}$
define a certain partition of the set $[n]$.
It follows that the right-hand side of \eqref{eq:monster-product}
can be written in the form
\begin{equation}
\label{eq:moment-cumulant-connected}
x_1 \cdots x_n = \sum_\nu \prod_{b\in\nu} \widetilde{\kappaPointRow}(x_i : i\in b),
\end{equation}
where the sum runs over all set-partitions $\nu$ of the set $[n]$ and
the product runs over the blocks of $\nu$.
In the above formula $\widetilde{\kappaPointRow}$ denotes the contribution of a prescribed
connected component of the graph $\mathcal{G}$, i.e.
\newcommand{rr}{l}
\begin{multline}
\label{eq:cumulants-point-row-explicit}
\left( \widetilde{\kappaPointRow}(x_{i_1}, \dots, x_{i_rr})\right)
(\lambda_1,\lambda_2,\dots) := \\
\sum_{j^{(i_1)}_1 < \dots < j^{(i_1)}_{m(i_1)}} \cdots \sum_{j^{(i_rr)}_1 < \dots < j^{(i_rr)}_{m(i_rr)}}
\prod_{1\leq k\leq rr}
g_{i_k}\left(\lambda_{j^{(i_k)}_1},\dots,\lambda_{j^{(i_k)}_{m(i_k)}}\right)
\end{multline}
is defined as the sum over such choices of the indices that
the restriction of the above graph $\mathcal{G}$ to the vertex set
$\{i_1,\dots,i_rr\}$ is a connected graph.
It is a simple classical result (\emph{`the moment-cumulant formula'})
that the relation \eqref{eq:what-is-cumulant} between the cumulants
and moments can be inverted;
in our current setup this yields
\begin{equation}
\label{eq:moment-cumulant-connected-2}
x_1 \cdots x_n = \sum_\nu \prod_{b\in\nu} {\kappaPointRow}(x_i : i\in b),
\end{equation}
where the sum runs over set-partitions of $[n]$.
Comparison of \eqref{eq:moment-cumulant-connected} with \eqref{eq:moment-cumulant-connected-2}
shows that the
quantities $\widetilde{\kappaPointRow}$ fulfill the same recurrence relations
as the cumulants $\kappaPointRow$.
Since the system of equations \eqref{eq:moment-cumulant-connected-2} has the unique solution,
it follows that
\[ {\kappaPointRow}=\widetilde{\kappaPointRow} \]
thus \eqref{eq:cumulants-point-row-explicit} gives an explicit formula for the latter cumulants.
In this way we proved the following result.
\begin{lemma}
\label{lem:cumulants-concretely}
If $x_i\in\mathscr{R}$ are given by \eqref{eq:what-is-x}
then the corresponding cumulant
$\kappaPointRow(x_{i_1}, \dots, x_{i_r})$ is given by
the right-hand side of
\eqref{eq:cumulants-point-row-explicit}.
\end{lemma}
In the following lemma we shall use the notations from the above proof.
Also, for a graph $\mathcal{G}$ we denote by $c(\mathcal{G})$ the number of its connected components.
\begin{lemma}
\label{lem:connected-components}
Let a family $J^{(1)},\dots,J^{(n)}$ of sets \eqref{eq:sets-J} be given.
\begin{enumerate}
\item
Assume that $\mathcal{G}'$ is a subgraph of $\mathcal{G}$ with the same vertex set $[n]$.
Then
\[ \sum_C \left| \bigcup_{a\in C} J^{(a)}\right| \leq m(1)+\cdots+m(n)+ c(\mathcal{G}') -n, \]
where the first sum on the left-hand side runs over the connected components of $\mathcal{G}'$.
\item
\[
\left| \bigcup_{1\leq a\leq n} J^{(a)} \right|
\leq m(1)+\cdots+m(n)+ c(\mathcal{G})-n\]
\end{enumerate}
\end{lemma}
\begin{proof}
The proof of the first part of the lemma is a simple induction with respect to
the number of the edges of the graph $\mathcal{G}'$ based on the inclusion-exclusion principle
$|A\cup B|=|A|+|B|-|A\cap B|$.
The second part follows from the first part by setting $\mathcal{G}':=\mathcal{G}$.
\end{proof}
\subsection{The vertical arrow has approximate factorization property}
\begin{proposition}
\label{lem:vertical-arrow-R}
The vertical arrow from \eqref{eq:commutative-diagram-B2} has approximate factorization property.
\end{proposition}
\begin{proof}
Let $x_1,\dots,x_n\in\mathscr{R}$ be of the form \eqref{eq:what-is-x}.
\cref{lem:cumulants-concretely}
gives explicitly the kernel $(f_r)$ for the cumulant
\[ \kappaPointRow(x_{1}, \dots, x_{n})
=
\sum_{r\geq 0} \sum_{i_1 < \dots < i_r} f_{r}(\lambda_{i_1},\dots,\lambda_{i_r}).
\]
More specifically, the summand on the right-hand side
for some specified value of $r$ corresponds to the summands on the right-hand side of
\eqref{eq:cumulants-point-row-explicit} for which
\[ | J^{(1)} \cup \cdots \cup J^{(n)} | = r.\]
We keep notations from \eqref{eq:what-is-x}.
Assume that $x_i\in\mathscr{R}$ is of degree at most $d_i$;
in other words we assume that the corresponding kernel
$g_i$ takes only values in Laurent polynomials of degree at most
$d_i - 2 m(i)$. It follows that the function $f_r$ takes values in Laurent
polynomials of degree at most
\[ \sum_{1\leq i\leq n} d_i - 2 m(i).\]
On the other hand, the second part of \cref{lem:connected-components} shows that non-zero
contribution can be obtained only for the values of $r$ which fulfill the bound
\[r= | J^{(1)} \cup \cdots \cup J^{(n)} | \leq m(1)+\cdots+m(n)+ 1-n.\]
It follows that $\kappaPointRow(x_{1}, \dots, x_{n})$ is a row-function
of degree at most
\[ \left( \sum_{1\leq i\leq n} d_i - 2 m(i)\right) + 2 r \leq
\left( \sum_{1\leq i\leq n} d_i \right) - 2 (n-1), \]
which concludes the proof.
\end{proof}
\section{Cumulants for the long diagonal arrow}
\label{sec:cumulants-long-diagonal}
Recall that we denote by $\kappaDisjointRow$ the cumulants which correspond to the long diagonal arrow
in \eqref{eq:commutative-diagram-B2} between $\mathscr{P}_\bullet$ and $\mathscr{R}_\otimes$.
\subsection{Vanishing on small Young diagrams}
\begin{lemma}
\label{property:cool-vanishing}
Assume that $a,b\geq 0$ are integers and
$F,G\in\mathscr{R}$ are row functions such that
\begin{align*}
F(\lambda)&=0 \qquad \text{holds for each $\lambda\in\mathbb{Y}$ such that } |\lambda|<a, \\
\nonumber
G(\lambda)&=0 \qquad \text{holds for each $\lambda\in\mathbb{Y}$ such that } |\lambda|<b.
\intertext{\indent Then}
\nonumber
(F\otimes G)(\lambda)&=0 \qquad \text{holds for each $\lambda\in\mathbb{Y}$ such that } |\lambda|<a+b.
\end{align*}
\end{lemma}
\begin{proof}
Let $(f_r)$ be the kernel of $F$, see \eqref{eq:row-functions}.
The right-hand side of \eqref{eq:row-functions}
involves only the values of $f_{r}$ over $r\leq \ell(\lambda)$
thus the collection of equalities \eqref{eq:row-functions}
can be viewed as an upper-triangular system of linear equations.
It follows immediately that
\[ f_r(x_1,\dots,x_r) =0 \]
holds true for all $r\geq 0$ and all non-negative integers $x_1,\dots,x_r$
such that
\[ x_1+\cdots+x_r<a.\]
An analogous property is fulfilled by the kernel of the row function
$G$ (with the variable $a$ replaced by $b$).
From the very definition of the disjoint product it follows that also the kernel
of $F\otimes G$ also fulfills this property (with the variable $a$ replaced by $a+b$).
\end{proof}
\subsection{M\"obius invertion}
It is easy to show from the very definition \eqref{eq:what-is-cumulant} that
a cumulant
\[ \kappa(X_1,\dots,X_n) \]
is a linear combination (with rational coefficients)
of the expressions of the form
\begin{equation}
\label{eq:sample-product}
\prod_{b\in \nu} \mathcal{E}E\left( \prod_{i\in b} X_i \right)
\end{equation}
over set-partitions of the set $[n]$.
We will show now that if $n\geq 2$ then the sum of these coefficients is equal to zero.
Indeed, if we set $X_1=\cdots=X_n=1$ to be the unit of the algebra
then from the very definition \eqref{eq:what-is-cumulant}
of the cumulants it follows that the corresponding cumulant
$\kappa(1,\dots,1)=0$ vanishes while each product \eqref{eq:sample-product} is equal to $1$.
By specifying the conditional expectation $\mathcal{E}E=\id\colon\mathscr{P}_\bullet\to\mathscr{R}$
to be the long diagonal arrow in \eqref{eq:commutative-diagram-B2},
we have proved the following result.
\begin{lemma}[M\"obius invertion]
\label{lem:cumulants-expression}
For any partitions $\pi_1,\dots,\pi_l$ the function
\begin{equation}
\label{eq:cond-kumu}
\kappaDisjointRow(\mathbb{C}h_{\pi_1},\dots,\mathbb{C}h_{\pi_l})
\end{equation}
is a linear combination (with rational coefficients) of expressions of the form
\begin{equation*}
\bigotimes_{b\in \nu} \mathbb{C}h_{\prod_{i\in b} \pi_i}
\end{equation*}
over set-partitions $\nu$ of the set $[l]$. For example, in the case $l=3$
the function \eqref{eq:cond-kumu} is a linear combination of the following
five expressions:
\begin{multline*}
\mathbb{C}h_{\pi_1}\otimes \mathbb{C}h_{\pi_2} \otimes \mathbb{C}h_{\pi_3}, \quad
\mathbb{C}h_{\pi_1 \pi_2} \otimes \mathbb{C}h_{\pi_3}, \quad
\mathbb{C}h_{\pi_1 \pi_3} \otimes \mathbb{C}h_{\pi_2}, \\
\mathbb{C}h_{\pi_2 \pi_3} \otimes \mathbb{C}h_{\pi_1}, \quad
\mathbb{C}h_{\pi_1 \pi_2 \pi_3}.
\end{multline*}
Furthermore, if $l\geq 2$ then the sum of the coefficients in this linear combination is equal to $0$.
The above results hold true also for the cumulants $\kappaDisjointPoint$; in the latter case
the product $\otimes$ should be replaced by the usual pointwise multiplication of functions
on the set $\mathbb{Y}$ of Young diagrams.
\end{lemma}
\subsection{Conditional cumulants for the diagonal arrow}
\begin{proposition}
\label{lem:disjoint-row-vanish-nice}
For any partitions $\pi_1,\dots,\pi_n$
\[ \kappaDisjointRow(\mathbb{C}h_{\pi_1},\dots,\mathbb{C}h_{\pi_n})(\lambda)=0 \]
holds true for any Young diagram $\lambda$ such that
$|\lambda|< |\pi_1|+\dots+|\pi_n|$.
\end{proposition}
\begin{proof}
This result is a straightforward consequence of
\cref{lem:cumulants-expression} and \cref{property:cool-vanishing}.
\end{proof}
\section{Proof of the second main result: approximate factorization property}
\label{sec:key-tool-proof}
\subsection{Conditional cumulants for a commutative diagram}
\begin{lemma}[\cite{Brillinger}]
\label{lem:iterated-cumulants}
Assume that $\mathcal{A}$, $\mathcal{A}B$ and $\mathcal{A}C$ are commutative unital algebras
and let $\condExp{\mathcal{A}}{\mathcal{A}B}$, $\condExp{\mathcal{A}B}{\mathcal{A}C}$ and $\condExp{\mathcal{A}}{\mathcal{A}C}$
be unital maps between them such that the following diagram commutes:
\[ \begin{tikzpicture}[node distance=2cm, auto, baseline=-5ex]
\node (A) {$\mathcal{A}$};
\node (B) [right of=A] {$\mathcal{A}B$};
\node (C) [below of=B]{$\mathcal{A}C$};
\draw[->] (A) to node {$\condExp{\mathcal{A}}{\mathcal{A}B}$} (B);
\draw[->] (B) to node {$\condExp{\mathcal{A}B}{\mathcal{A}C}$} (C);
\draw[->] (A) to node [swap]{$\condExp{\mathcal{A}}{\mathcal{A}C}$} (C);
\end{tikzpicture}
\]
Then
\[ \condKumu{\mathcal{A}}{\mathcal{A}C}(x_1,\dots,x_n) = \sum_{\nu\in\partitions(n)}
\condKumu{\mathcal{A}B}{\mathcal{A}C} \Big( \condKumu{\mathcal{A}}{\mathcal{A}B}(x_i : i\in b) : b \in \nu \Big).
\]
\end{lemma}
\begin{example}
\begin{align*}
\condKumu{\mathcal{A}}{\mathcal{A}C}(x_1) &= \condKumu{\mathcal{A}B}{\mathcal{A}C} \Big( \condKumu{\mathcal{A}}{\mathcal{A}B}(x_1) \Big), \\
\condKumu{\mathcal{A}}{\mathcal{A}C}(x_1,x_2) &= \condKumu{\mathcal{A}B}{\mathcal{A}C} \Big( \condKumu{\mathcal{A}}{\mathcal{A}B}(x_1,x_2) \Big)+
\condKumu{\mathcal{A}B}{\mathcal{A}C} \Big( \condKumu{\mathcal{A}}{\mathcal{A}B}(x_1), \condKumu{\mathcal{A}}{\mathcal{A}B}(x_2) \Big).
\end{align*}
\end{example}
\subsection{Proof of the second main result}
We are now ready to show the proof of \cref{theo:factorization-of-characters}.
For Reader's convenience we will restate this theorem in the following form.
\begin{theorem}[Reformulation of \cref{theo:factorization-of-characters}]
For any partitions $\pi_1,\dots,\pi_l$ the conditional cumulant
\[ F:=\kappaDisjointPoint(\mathbb{C}h_{\pi_1},\ldots,\mathbb{C}h_{\pi_l})\in\mathscr{P} \]
is of degree at most
\[ |\pi_1|+\cdots+|\pi_l|+\ell(\pi_1)+\cdots+\ell(\pi_l)-2(l-1).\]
\end{theorem}
\begin{proof}
We use induction over $l$. For the induction base $l=1$
\[ F=\kappaDisjointPoint(\mathbb{C}h_{\pi_1})=\mathbb{C}h_{\pi_1} \]
and there is nothing to prove.
In the following we shall
consider the case $l\geq 2$; we assume that the statement of the theorem holds true for all $l'<l$.
\newcommand{j}{j}
\newcommand{J}{J}
We start with an observation that if for some value of the index $i$ we have
$\pi_i=\emptyset$ then $\mathbb{C}h_{\pi_i}=1$ is the unit in $\mathscr{P}$ thus
(for $l\geq 2$) the corresponding cumulant vanishes
by the very definition \eqref{eq:what-is-cumulant}:
\[ F=\kappaDisjointPoint(\mathbb{C}h_{\pi_1},\dots,1,\dots,\mathbb{C}h_{\pi_l})=0 \]
and the claim holds true trivially. From the following on we shall assume that
$\pi_1,\dots,\pi_l\neq \emptyset$ are all non-empty.
We denote
\[ d:= |\pi_1|+\cdots+|\pi_l|+\ell(\pi_1)+\cdots+\ell(\pi_l).\]
We will use a nested induction over $J\in\{0,\dots,2l-2\}$ and show that
$F$ is of degree at most $d-J$.
This result (for the special choice $J=2l-2$)
would finish the proof of the inductive step with respect
to the variable $l$ and thus would conclude the proof.
We start by noticing that
M\"obius invertion (\cref{lem:cumulants-expression} in the alternative formulation,
for the cumulants $\kappaDisjointPoint$)
implies that $F\in\mathscr{P}$ is of degree at most
$d$ and thus the induction base $J=0$ holds trivially true.
\emph{The inductive step over $J$.}
The inductive hypothesis with respect to the variable $J$
states that $F$ is of degree (at most) $d-J$
for some choice of $J\in\{0,\dots,2l-3\}$.
We define $j\in\{0,\dots,l-2\}$ by setting
\begin{align*}
J &= \begin{cases}
2j &\text{if $J$ is even},\\
2j+1 &\text{if $J$ is odd}
\end{cases}
\intertext{and set}
n & := |\pi_1|+\cdots+|\pi_l|-j \geq 2,\\
r &:= \ell(\pi_1)+\cdots+\ell(\pi_l)+j-J \geq 1.
\intertext{In this way}
n+r &= d- J, \\
n-r &\geq |\pi_1|+\cdots+|\pi_l| - \ell(\pi_1)- \cdots- \ell(\pi_l).
\end{align*}
Our strategy is to apply \cref{lem:keylemma}
either:
\begin{itemize}
\item in the original formulation (in the case $j=0$), or,
\item in the alternative formulation (in the case $j\geq 1$)
\end{itemize}
for the above choice of $n$ and $r$.
We first check that its assumptions are fulfilled.
\emph{Assumption \ref{zero:initial-bound}.}
This assumption is just the inductive hypothesis.
\emph{Assumption \ref{zero:topdegree}.}
We have to verify this assumption only in the case $j=0$.
By M\"obius invertion (\cref{lem:cumulants-expression}) and
\cref{prop:vershik-kerov-jack-character}
it follows that
\begin{equation}
\label{eq:VK-applied}
\mathbb{Y} \ni(\lambda_1,\dots,\lambda_m) \mapsto F(\lambda_1,\dots,\lambda_m)\in\left\langleurent
\end{equation}
is a priori a polynomial of degree $|\pi_1|+\cdots+|\pi_l|$ and its homogeneous top-degree part is equal to
some multiple of
\[ A^{|\pi_1|+\cdots+|\pi_l|-\ell(\pi_1)-\cdots-\ell(\pi_l)}\
p_{\pi_1\cdots\pi_l}(\lambda_1,\dots,\lambda_m). \]
However, since $l\geq 2$, the second part of \cref{lem:cumulants-expression}
implies that this multiple is actually equal to zero.
In other words, \eqref{eq:VK-applied} is a polynomial of degree at most
$|\pi_1|+\cdots+|\pi_l|-1=n-1$, as required.
\emph{Assumption \ref{zero:laurent}.}
M\"obius invertion (\cref{lem:cumulants-expression}) and
\cref{prop:degree-laurent}
imply that for any Young diagram $\lambda$
the evaluation $F(\lambda)$ is a Laurent polynomial of degree at most
\[|\pi_1|+\cdots+|\pi_l|-\ell(\pi_1)-\cdots-\ell(\pi_l) < n-r+1\]
as required.
\emph{Assumptions \ref{zero:vanishing} and \ref{zero:vanishing-B}.}
Our strategy is to consider the following simplified version of the
commutative diagram \eqref{eq:commutative-diagram-B2}.
\begin{equation}
\label{eq:commutative-diagram-B2-simple}
\begin{tikzpicture}[node distance=2cm, auto, baseline=-5ex]
\node (A) {$\mathscr{P}_\bullet$};
\node (B) [right of=A] {$\mathscr{P}$};
\node (C) [below of=B]{$\mathscr{R}_\otimes$};
\draw[->] (A) to node {$\id$} (B);
\draw[->] (B) to node {$\id$} (C);
\draw[->] (A) to node [swap]{$\id$} (C);
\end{tikzpicture}
\end{equation}
Recall that the conditional cumulants which correspond to
the horizontal arrow are denoted by $\kappaDisjointPoint$;
the ones which correspond to the vertical arrow are denoted by $\kappaPointRow$;
and the ones which correspond to the diagonal arrow are denoted by $\kappaDisjointRow$.
Since $\kappaPointRow(x)=x$ it follows that
\begin{multline}
\label{eq:diagonal-vertical-horizontal}
F=\kappaDisjointPoint(\mathbb{C}h_{\pi_1},\dots,\mathbb{C}h_{\pi_l})=
\kappaPointRow \big( \kappaDisjointPoint(\mathbb{C}h_{\pi_1},\dots,\mathbb{C}h_{\pi_l}) \big) = \\
\kappaDisjointRow(\mathbb{C}h_{\pi_1},\dots,\mathbb{C}h_{\pi_l}) -
\sum_{
\nu\neq \mathbf{1}}
\kappaPointRow \Big( \kappaDisjointPoint(\mathbb{C}h_{\pi_i} : i\in b) : b \in \nu \Big),
\end{multline}
where the last equality follows from Brillinger's formula (\cref{lem:iterated-cumulants});
the sum on the right-hand side runs over set-partitions of $[l]$
which are different from the maximal partition $\mathbf{1}=\big\{ \{1,\dots,l\} \big\}$.
We will substitute the summands which contribute to the right-hand side into \eqref{eq:top-key-equation}
and we will investigate the resulting expressions.
Firstly, \cref{lem:disjoint-row-vanish-nice} shows that
$\big( \kappaDisjointRow(\mathbb{C}h_{\pi_1},\dots,\mathbb{C}h_{\pi_l})\big)(\lambda)=0$
for all $\lambda\in\mathbb{Y}$ such that $|\lambda|\leq |\pi_1|+\dots+|\pi_l|-1$ thus
for any integer $k\geq r$
\begin{equation}
\label{eq:zerozero}
\Delta_{\lambda_1} \cdots \Delta_{\lambda_k}
\big( \kappaDisjointRow(\mathbb{C}h_{\pi_1},\dots,\mathbb{C}h_{\pi_l})\big)^{\sym} (\lambda) =0
\end{equation}
for all $\lambda\in\mathbb{Y}$ such that
\[|\lambda| \leq |\pi_1|+\dots+|\pi_l|-1-k = (n+r-2k) + (j-1)+\underbrace{( k-r)}_{\geq 0}.\]
Secondly, let us fix the value of the set-partition $\nu\neq \mathbf{1}$ and
let us investigate the corresponding summand on the right-hand side of \eqref{eq:diagonal-vertical-horizontal}.
From the inductive hypothesis over the variable $l$ it follows for each block $b\in\nu$ that
the cumulant $\kappaDisjointPoint(\mathbb{C}h_{\pi_i} : i\in b)\in\mathscr{P}$
is of degree at most
\[ \left(\sum_{i\in b} |\pi_i|+\ell(\pi_i) \right)-2( |b|-1).\]
Thus the approximate factorization property for the vertical arrow
(\cref{lem:vertical-arrow-R}) implies that
\begin{equation*}
G:= \kappaPointRow \Big( \kappaDisjointPoint(\mathbb{C}h_{m_i} : i\in b) : b \in \nu \Big)
\in\mathscr{R}
\end{equation*}
is a row function of degree (at most)
\begin{multline*}
\sum_{b\in\nu } \left[
\left(\sum_{i\in b} |\pi_i|+\ell(\pi_i) \right)-2( |b|-1)
\right] +2 -2 |\nu| =\\
d-2(l-1) \leq (d-J)-1 =(n+r)-1.
\end{multline*}
The latter bound on the degree of $G$ and \cref{coro:small-degree-killed} imply that
\begin{equation}
\label{eq:zerozero2}
[A^{n+r-2k}] \Delta_{\lambda_1} \dots \Delta_{\lambda_k}
G(\lambda_1,\dots,\lambda_k)=0
\end{equation}
for an arbitrary choice of the integers $\lambda_1,\dots,\lambda_k\geq 0$.
From \eqref{eq:zerozero} and \eqref{eq:zerozero2} it follows that:
\begin{itemize}
\item if $j=0$ then condition \ref{zero:vanishing} holds true;
\item if $j\geq 1$ then stronger condition \ref{zero:vanishing-B} holds true.
\end{itemize}
\emph{Conclusion.}
In this way we verified that the assumptions of \cref{lem:keylemma} are
indeed fulfilled. It follows therefore that
$F$ is of degree at most $d-J-1$.
This concludes the proof of the induction step over the variable $J$.
\end{proof}
\section{$\mathbb{C}htt_n\in\mathscr{P}$ is of degree $n+1$}
\label{sec:degree-n+1}
The current section is devoted to the proof of the following
result which will be essential for the proof of \cref{theo:second-main-bis}.
\begin{proposition}
\label{prop:candidate-is-polynomial}
For each integer $n\geq 1$ the function
$\mathbb{C}htt_n\in\mathscr{P}$ (cf.~\eqref{eq:top-top-top})
is an $\alpha$-polynomial function of degree at most $n+1$.
\end{proposition}
The proof is split into two parts:
first in \cref{prop:candidate-is-polynomial} we will show that $\mathbb{C}htt_n\in\mathscr{P}$
and then in \cref{sec:degree-bound-chttn} we will show the degree bound.
\subsection[]{The first part of proof of \cref{prop:candidate-is-polynomial}: $\mathbb{C}htt_n\in\mathscr{P}$}
\label{sec:is-a-polynomial}
\newcommand{\cumulantB}[2]{\mathcal{K}_{#1,#2}}
\newcommand{\cumulant}[1]{\mathcal{K}_{#1}}
\newcommand{\moment}[1]{\mathcal{M}_{#1}}
Our proof of the claim that $\mathbb{C}htt_n\in\mathscr{P}$
will be based on the following result
(the proof of which is postponed to \cref{sec:missing-proof-of-polynomial}).
\begin{proposition}
\label{prop:something-simpler-is-polynomial}
For any integer $n\geq 1$ and permutation $\pi\in\Sym{n}$
the function
on $\mathbb{Y}$ given by
\begin{equation}
\label{eq:cumulant-definition}
\cumulant{\pi} :=
(-1)^{|C(\pi)|}
\sum_{\substack{
\sigma_1,\sigma_2\in \Sym{n}, \\ \sigma_1 \sigma_2= \pi, \\
\langle \sigma_1,\sigma_2 \rangle \text{ is transitive}
}}
\mathfrak{N}_{\sigma_1,\sigma_2}
\end{equation}
is an $\alpha$-polynomial function.
\end{proposition}
For an integer $l$ we consider
the homogeneous part of degree $l$ of the
anisotropic Stanley polynomial for $\cumulant{\pi}$.
This new anisotropic Stanley polynomial defines a function on $\mathbb{Y}$
which is explicitly given by
\begin{equation}
\label{eq:polypolypoly}
\cumulant{\pi}^l :=
(-1)^{|C(\pi)|}
\sum_{\substack{
\sigma_1,\sigma_2\in \Sym{n}, \\ \sigma_1 \sigma_2= \pi, \\
|C(\sigma_1)|+|C(\sigma_2)|=l,\\
\langle \sigma_1,\sigma_2 \rangle \text{ is transitive}
}}
\mathfrak{N}_{\sigma_1,\sigma_2}.
\end{equation}
On the other hand, by \cref{prop:generate-the-same} the function $\cumulant{\pi}\in\mathscr{P}$ can be expressed as a
polynomial in the indeterminates $\gamma,\mathcal{S}_2,\mathcal{S}_3,\dots$.
To these indeterminates we associate the degrees as in \eqref{eq:filtration2};
the corresponding
homogeneous part of degree $l$ of this polynomial
is clearly an element of $\mathscr{P}$.
By \cref{eq:S-is-for-Stanley} this polynomial is equal to $\cumulant{\pi}^l$.
In this way we proved that $\cumulant{\pi}^l\in\mathscr{P}$.
Since $\mathbb{C}htt_n$ is a linear combination of such functions,
this completes the proof of the claim that $\mathbb{C}htt_n\in\mathscr{P}$.
\subsection{Proof of \cref{prop:something-simpler-is-polynomial}}
\label{sec:missing-proof-of-polynomial}
\begin{proof}[Proof of \cref{prop:something-simpler-is-polynomial}]
If $X$ is an arbitrary set, we denote by $\partitions(X)$ the set of all set partitions of $X$.
Let $\pi\in\Sym{n}$ be a fixed permutation.
We denote
\[ \partitions_\pi:=\{ P \in \partitions([n]) : P \geq C(\pi)\}\]
which is the set of the partitions $P$ of the underlying set $[n]$ which have the property
that each cycle $c\in C(\pi)$
is contained in one of the blocks of the partition $P$.
We define
\begin{equation}
\label{eq:moment-cumulant}
\moment{\pi}(\lambda) := \sum_{P\in\partitions_\pi} \prod_{B\in P} \cumulant{\pi|_{B}}(\lambda),
\end{equation}
where
the product runs over the blocks of the partition $P$ and
$\pi|_{B}: B \to B$ denotes the restriction of the permutation $\pi$ to the set $B\subseteq [n]$.
It is not hard to see that
\begin{equation}
\label{eq:moment-cumulant-nice}
\moment{\pi}(\lambda) =
(-1)^{|C(\pi)|}
\sum_{\substack{\sigma_1,\sigma_2\in \Sym{n}, \\ \sigma_1 \sigma_2= \pi}}
\mathfrak{N}_{\sigma_1,\sigma_2}(\lambda)
\end{equation}
(the difference between the right-hand side of \eqref{eq:moment-cumulant-nice} and \eqref{eq:cumulant-definition}
lies in the requirement on transitivity);
indeed, the summands on the right-hand side of \eqref{eq:moment-cumulant-nice} can be pigeonholed according
to the set of orbits of the group $\langle \sigma_1,\sigma_2\rangle$ and each such a class of summands corresponds to
an appropriate summand on the right-hand side of \eqref{eq:moment-cumulant}.
The function $\cumulant{\pi}$ (respectively, the function $\moment{\pi}$)
depends only on the conjugacy class of the permutation $\pi$.
Since such conjugacy classes are in a bijective correspondence with the integer partitions,
we may index the family $(\cumulant{\pi})$ (respectively, the family $(\moment{\pi})$)
by $\pi$ being an integer partition.
With this perspective, \eqref{eq:moment-cumulant} can be viewed as the following system of
equalities:
\begin{equation}
\label{eq:moment-cumulant-2}
\left\{
\begin{aligned}
\moment{i} &= \cumulant{i} & \text{for all $i\geq 1$}, \\
\moment{i,j} &= \cumulant{i,j}+ \cumulant{i} \cumulant{j}
& \text{for all $i\geq j\geq 1$}, \\
\moment{i,j,k} &= \cumulant{i,j,k}+ \cumulant{i} \cumulant{j,k}
+ \cumulant{j} \cumulant{i,k}
+ \cumulant{j} \cumulant{i,k} + \hspace{-7ex}
\\ &
+ \cumulant{i} \cumulant{j} \cumulant{k}
& \text{for all $i\geq j\geq k \geq 1$},\\
& \vdots
\end{aligned}
\right.
\end{equation}
If we view $(\cumulant{\pi})$ as variables, \eqref{eq:moment-cumulant-2}
becomes an upper-triangular system of algebraic equations which can be solved.
This shows that each $\cumulant{\sigma}$ can be expressed as a polynomial in the variables
$(\moment{\pi})$.
Thus for our purposes it is enough to show that
$\moment{\pi}$ is an $\alpha$-polynomial function.
This is exactly the content of \cref{lem:non-transitive-polynomial} which we prove below.
\end{proof}
\begin{lemma}
\label{lem:non-transitive-polynomial}
For each integer $n\geq 1$ and permutation $\pi\in\Sym{n}$
the function
\[
\lambda\mapsto \sum_{\substack{
\sigma_1,\sigma_2\in \Sym{n} \\
\sigma_1 \sigma_2=\pi \\
}}
\ \mathfrak{N}_{\sigma_1,\sigma_2}(\lambda).
\]
is an $\alpha$-polynomial function.
\end{lemma}
\begin{proof}
The specialization $A=1$ of the Jack characters coincides with the usual characters
of the symmetric groups
$\lambda\mapsto \mathbb{C}h^{\alpha=1}_\pi(\lambda)$
for which several convenient results are known.
Firstly, the following closed formula for the characters of the symmetric groups is known,
cf.~\cite[Theorem 2]{FeraySniady2011a}:
\begin{multline*}
\mathbb{C}h^{\alpha=1}_\pi =
\sum_{\substack{\sigma_1,\sigma_2\in \Sym{n}, \\ \sigma_1 \sigma_2= \pi}}
(-1)^{\sigma_1} N_{\sigma_1,\sigma_2} = \\
(-1)^{|C(\pi)|} \sum_{\substack{\sigma_1,\sigma_2\in \Sym{n}, \\ \sigma_1 \sigma_2= \pi}}
(-1)^{|C(\sigma_2)|} N_{\sigma_1,\sigma_2}
\end{multline*}
where both sides of the above equality should be viewed as functions on $\mathbb{Y}$.
Secondly, there exists a multivariate polynomial $H\in\mathbb{Q}[s_2,\dots,s_{2n}]$ with the property that
\[ \mathbb{C}h^{\alpha=1}_\pi = H(S_2,S_3,\dots,S_{2n}), \]
where $S_2,\dots,S_{2n}$ are the isotropic functionals of shape \eqref{eq:what-is-isotropic-s};
the existence and the explicit form of this polynomial $H$ were discussed in
\cite{DolegaFeraySniady2008}.
By combining the above two results it follows that
\begin{align}
\label{eq:alpha-1-magic-sum}
\sum_{\substack{\sigma_1,\sigma_2\in \Sym{n}, \\ \sigma_1 \sigma_2= \pi}}
(-1)^{|C(\sigma_2)|} N_{\sigma_1,\sigma_2}
&= (-1)^{|C(\pi)|} H(S_2,S_3,\dots,S_{2n}),
\intertext{where both sides should be viewed as functions on $\mathbb{Y}$.
We claim that}
\label{eq:flaming-lips}
\sum_{\substack{\sigma_1,\sigma_2\in \Sym{n}, \\ \sigma_1 \sigma_2= \pi
} }
\mathfrak{N}_{\sigma_1,\sigma_2}
&= (-1)^{|C(\pi)|} H(\mathcal{S}_2,\mathcal{S}_3,\dots,\mathcal{S}_{2n});
\end{align}
indeed \cref{lem:stanley-polunomial-isotropic-and-anisotropic-the-same} and \cref{eq:S-is-for-Stanley}
imply that isotropic Stanley polynomials for both sides of
\eqref{eq:alpha-1-magic-sum} are equal to the corresponding
anisotropic Stanley polynomials of both sides of \eqref{eq:flaming-lips}.
An application of \cref{prop:generate-the-same} implies that \eqref{eq:flaming-lips}
is an $\alpha$-polynomial function, as required.
\end{proof}
\subsection{The second part of proof of \cref{prop:candidate-is-polynomial}: degree bound on $\mathbb{C}htt_n\in\mathscr{P}$}
\label{sec:degree-bound-chttn}
We are ready to prove the remaining part of \cref{prop:candidate-is-polynomial}, namely the degree bound.
We already proved in \cref{sec:is-a-polynomial} that $\mathbb{C}htt_n\in\mathscr{P}$;
it follows therefore by \cref{prop:generate-the-same} that there exists a
multivariate polynomial $H(\gamma,s_2,s_3,\dots)$ with the property that
\begin{equation}
\label{eq:ch-in-s}
\mathbb{C}htt_n = H(\gamma,\mathcal{S}_2,\mathcal{S}_3,\dots),
\end{equation}
where we use the usual substitution \eqref{eq:gamma} for the variable $\gamma$.
We associate to the indeterminates $\gamma,s_2,s_3,\dots$ the degrees by setting
\begin{equation}
\label{eq:gradacja-ZZtop}
\left\{ \begin{aligned} \degg \gamma &= 1, \\
\degg s_n &= n, \qquad \text{for }n\geq 2.
\end{aligned} \right.
\end{equation}
\emph{We will prove below that with respect to this choice of degrees, $H$
is a homogeneous polynomial of degree $n+1$.}
Indeed; let $H_d(\gamma,s_2,s_3,\dots)$ denote the homogeneous part of
the polynomial $H$ of degree $d$.
It follows by \cref{eq:S-is-for-Stanley} that the anisotropic Stanley polynomial
which corresponds to the specialization
\begin{equation}
\label{eq:yes-you-are-special}
H_d(\gamma,\mathcal{S}_2,\mathcal{S}_3,\dots)
\end{equation}
is homogeneous of degree $d$;
thus the anisotropic Stanley polynomial for \eqref{eq:yes-you-are-special}
is the homogeneous part of degree $d$
of the anisotropic Stanley polynomial for \eqref{eq:ch-in-s}.
On the other hand,
from the way $\mathbb{C}htt_n$ was defined in \eqref{eq:top-top-top}
and by \cref{lem:stanley-polunomial-isotropic-and-anisotropic-the-same}
it follows that the anisotropic Stanley polynomial for $\mathbb{C}htt_n$ is homogeneous
of degree $n+1$.
By combining the above two observations it follows that if $d\neq n+1$
then Stanley polynomial for \eqref{eq:yes-you-are-special} must be the zero polynomial.
Finally, it follows that $H_d=0$ for $d\neq n+1$.
This completes the proof that $H$ is homogeneous of degree $n+1$.
The degree bound on the polynomial $H$ implies, by \cref{prop:generate-the-same}, the desired
bound on the degree of $\mathbb{C}htt_n$, regarded as an element of the filtered algebra $\mathscr{P}$.
This concludes the proof of \cref{prop:candidate-is-polynomial}.
\section{Proof of the first main result: top-degree of Jack characters}
\label{sec:proof}
\begin{proof}[Proof of \cref{theo:second-main-bis}]
We define functions $F$, $F_1$, and $F_2$ by
\[ F:= \underbrace{\mathbb{C}h_n}_{F_1:=} - \underbrace{\mathbb{C}htt_n}_{F_2:=}; \]
we will show that $F$ fulfills the assumptions of \cref{lem:keylemma} for $r=1$.
\emph{Condition \ref{zero:initial-bound}.}
An analogue of condition \ref{zero:initial-bound} is fulfilled both for $F_1$
(cf.~\cref{thm:degree-of-Jack-character})
as well as for $F_2$
(cf.~\cref{prop:candidate-is-polynomial}).
It follows that \ref{zero:initial-bound} is satisfied for $F=F_1-F_2$, as required.
\emph{Condition \ref{zero:topdegree}.}
The polynomial
\begin{equation}
\label{eq:VK-chtt}
\mathbb{Y}\ni(\lambda_1,\dots,\lambda_m) \mapsto \mathbb{C}htt_n (\lambda_1,\dots,\lambda_m)\in\left\langleurent
\end{equation}
is equal to the anisotropic Stanley polynomial for $\mathbb{C}htt_n$ evaluated for the anisotropic coordinates
\[ P=\left( A^{-1}, \dots,A^{-1} \right), \qquad
Q=\left( A \lambda_1,\dots, A\lambda_m\right).
\]
Thus, by \cref{lem:stanley-polunomial-isotropic-and-anisotropic-the-same},
the top-degree part of the polynomial \eqref{eq:VK-chtt} corresponds to the summands
$(\sigma_1,\sigma_2)$ from the definition \eqref{eq:top-top-top} of $\mathbb{C}htt_n$
for which the number of cycles of $\sigma_2$ takes the maximal possible value;
in other words $\sigma_1=\id$ must be the identity permutation.
The transitivity requirement implies that in this case $\sigma_2$
must have exactly one cycle; there are $(n-1)!$ such permutations.
It follows that the polynomial \eqref{eq:VK-chtt} is of degree $n$ and its homogeneous
top-degree part is equal to
\[A^{n-1}\ p_n(\lambda_1,\dots,\lambda_m).\]
On the other hand, \cref{prop:vershik-kerov-jack-character} gives the same polynomial
as the leading term for the map
\[ \mathbb{Y}\ni(\lambda_1,\dots,\lambda_m) \mapsto \mathbb{C}h_n (\lambda_1,\dots,\lambda_m)\in\left\langleurent.\]
Due to the cancellation, it follows that
\[ \mathbb{Y}\ni(\lambda_1,\dots,\lambda_m) \mapsto F(\lambda_1,\dots,\lambda_m) \]
is a polynomial of degree (at most) $n-1$.
This completes the proof of condition \ref{zero:topdegree}.
\newcommand{k}{k}
\newcommand{\mathcal{F}unctionG}{H}
\emph{Condition \ref{zero:vanishing}.}
Since
\begin{equation}
\label{eq:small-boxes-vanishes}
F_1(\lambda)=\mathbb{C}h_n(\lambda)=0 \qquad \text{if $|\lambda|<n$},
\end{equation}
it follows that
\begin{equation}
\label{eq:small-boxes-vanishes-B}
\Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F_1^{\sym}(\lambda_1,\lambda_2,\dots) = 0 \qquad
\text{if $|\lambda|+k< n$};
\end{equation}
it follows that an analogue of condition \ref{zero:vanishing} is indeed fulfilled for $F_1$.
In the remaining part of the proof we concentrate on $F_2$.
The definition of an embedding, as well as the definitions of $N_{G}(\lambda)$ and $\mathfrak{N}_{G}(\lambda)$
from \cref{sec:number-of-embeddings}
can be naturally extended to
an arbitrary tuple $\lambda=(\lambda_1,\dots,\lambda_k)$ of non-negative integers
(which does not necessarily forms a Young diagram);
condition \eqref{embedding:young} should be simply replaced by
\[
1\leq f_1(w) \leq \lambda_{f_2(b)}
\]
for each pair of vertices $w\in V_{\circ}$, $b\in V_{\bullet}$ connected by an edge.
It is easy to check that so defined $N_G(\lambda_1,\dots,\lambda_k)$ is a symmetric function of its $k$
arguments.
Thus $\mathbb{C}htt_n(\lambda_1,\dots,\lambda_k)$ given by \eqref{eq:top-top-top} is a symmetric
function of its $k$ arguments; in other words $F_2^{\sym}=F_2=\mathbb{C}htt_n$
and no additional symmetrization is necessary.
For $\sigma_1,\sigma_2\in\Sym{n}$, any embedding $(f_1,f_2)$ of the bicolored graph $G_{\sigma_1,\sigma_2}$
into $\lambda=(\lambda_1,\dots,\lambda_k)$
can be alternatively viewed as a pair of functions
\[ f_1:[n]\rightarrow\mathbb{N}, \qquad f_2:[n]\rightarrow [k] \]
with the property that $f_s$ is constant on each cycle of $\sigma_s$ for $s\in\{1,2\}$ and such that
\begin{equation}
\label{eq:embedding-is-nice}
1\leq f_1(m) \leq \lambda_{f_2(m)} \qquad \text{holds true for any $m\in[n]$}.
\end{equation}
It follows that the sum on the right-hand-side of
\eqref{eq:top-top-top} can be alternatively written as
\begin{multline}
\label{eq:curly-bracket}
\mathbb{C}htt_n(\lambda_1,\dots,\lambda_k)=\\
\ \hspace{-30ex}
{\sum_{\substack{f_1\colon[n]\rightarrow\mathbb{N}, \\ f_2\colon[n]\rightarrow[k], \\
\text{condition \eqref{eq:embedding-is-nice} holds true}}}
\sum_{\substack{ \sigma_2\in\Sym{n}, \\ \text{$f_2$ is constant on each cycle of $\sigma_2$}}}
}
\\
\underbrace{ \Bigg\{ \sum_{\substack{ \sigma_1\in\Sym{n}, \\ \text{$f_1$ is constant on each cycle of $\sigma_1$,} \\
\text{$\langle \sigma_1,\sigma_2\rangle$ is transitive}}}\!\!\!\!\!\!\!\!\!\!
\gamma^{n+1-|C(\sigma_1)|-|C(\sigma_2)|}\;
A^{|C(\sigma_1)|} \left(\frac{-1}{A}\right)^{|C(\sigma_2)|} \Bigg\}
}_{\mathcal{F}unctionG(\lambda_1,\dots,\lambda_k):=}.
\end{multline}
Let us fix the values of $f_1$, $f_2$ and $\sigma_2$; we denote by $\mathcal{F}unctionG(\lambda_1,\dots,\lambda_k)$
the value of the curly bracket in the above expression \eqref{eq:curly-bracket}.
We will investigate in the following
the contribution of $\mathcal{F}unctionG$ to
\begin{equation}
\label{eq:here-contribution}
[A^{n+r-2k}] \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} \mathbb{C}htt_n(\lambda_1,\dots,\lambda_k),
\end{equation}
cf.~the left-hand-side of \eqref{eq:top-key-equation}.
Firstly, notice that if $i\in[k]$ is such that $i\notin \Image f_2$ then
$\mathcal{F}unctionG(\lambda_1,\dots,\lambda_k)$ does not depend on the variable
$\lambda_i$ thus $\Delta_{\lambda_i} \mathcal{F}unctionG(\lambda_1,\dots,\lambda_k)=0$
and thus the contribution of $\mathcal{F}unctionG$ to \eqref{eq:here-contribution} vanishes.
Thus it is enough to consider only surjective functions
$f_2:[n]\rightarrow[k]$.
Secondly, $\mathcal{F}unctionG(\lambda_1,\dots,\lambda_k)$ is a Laurent polynomial of degree at most
$n+1-2|C(\sigma_2)|$, thus in order for the coefficient of $A^{n+1-2k}$ to be non-zero, we must have
$|C(\sigma_2)|\leqk$.
The above two observations imply that in order to have a nontrivial contribution we must have $|C(\sigma_2)|=k$
and $f_2:C(\sigma_2)\rightarrow[k]$ must be a bijection; we will assume this in the following.
Assume that $f:[n]\rightarrow \mathbb{N}^2$ given by $f(i)=\big( f_1(i), f_2(i) \big)$ is not injective.
It follows that there exist $i\neq j$ with $i,j\in[n]$ such that $f(i)= f(j)$.
For a given $\sigma_1\in\Sym{n}$ we denote $\sigma'_1:=(i,j)\; \sigma_1$, where $(i,j)\in\Sym{n}$ denotes the transposition
interchanging $i$ and $j$.
Note that $f=f\circ (i,j)$ thus
\begin{multline*}
\big( \text{$f_1$ is constant on each cycle of $\sigma_1$}\big) \iff \\
f_1=f_1 \circ \sigma_1 \iff f_1=f_1 \circ (i,j)\; \sigma_1 \iff \\
\big( \text{$f_1$ is constant on each cycle of $\sigma_1'$}\big).
\end{multline*}
We will show now that
\begin{equation}
\label{eq:equivalence-transitive}
\text{$\langle \sigma_1,\sigma_2\rangle$ is transitive} \iff
\text{$\langle \sigma'_1,\sigma_2\rangle$ is transitive}.
\end{equation}
We will show only that the left-hand side implies the right-hand side; the opposite implication will follow
by interchanging the values of $\sigma_1$ and $\sigma_1'$.
Consider the case when $i$ and $j$ belong to different cycles of $\sigma_1$.
Then
$C(\sigma_1')=C(\sigma_1) \vee \big\{ \{i,j\} \big\}$ is the set-partition obtained from the set-partition $C(\sigma_1)$
by merging the two blocks containing $i$ and $j$.
The left-hand side of \eqref{eq:equivalence-transitive}
implies that $C(\sigma_1)\vee C(\sigma_2)=1_n$ is the maximal partition, thus
$C(\sigma_1')\vee C(\sigma_2)= C(\sigma_1)\vee C(\sigma_2) \vee \big\{ \{i,j\} \big\}=1_n$ as well.
This implies the right-hand side of \eqref{eq:equivalence-transitive}.
Consider the case when $i$ and $j$ belong to the same cycle of $\sigma_1$. Then
$C(\sigma_1)=C(\sigma'_1) \vee \big\{ \{i,j\} \big\}$.
Since $f_2:C(\sigma_2)\rightarrow[k]$ is a bijection, the equality $f_2(i)=f_2(j)$ implies that
$i$ and $j$ belong to the same cycle of $\sigma_2$. It follows that
\begin{multline*}
1_n= C(\sigma_1) \vee C(\sigma_2) = \big( C(\sigma_1') \vee \{\{i,j\}\} \big) \vee C(\sigma_2) = \\
C(\sigma_1') \vee \big( \{\{i,j\}\} \vee C(\sigma_2) \big) =
C(\sigma_1') \vee C(\sigma_2).
\end{multline*}
The latter is equivalent to the right-hand side of \eqref{eq:equivalence-transitive}.
The equivalence \eqref{eq:equivalence-transitive} implies that
$\sigma_1$ contributes to the sum within $\mathcal{F}unctionG$ in \eqref{eq:curly-bracket} if and only if
$\sigma_1'$ contributes to this sum.
The map $\sigma_1\mapsto\sigma_1'$ is an involution without fixpoints.
It is easy to check that the contributions of $\sigma_1$ and $\sigma_1'$ to $[A^{n+1-2k}]\mathcal{F}unctionG$ cancel
each other.
In this way we proved that the contribution of a summand in \eqref{eq:curly-bracket}
to \eqref{eq:here-contribution} vanishes unless $f$ is injective.
In other words, if we introduce a new function $\widetilde{\mathbb{C}htt_n}$
given by \eqref{eq:curly-bracket} in which we \emph{additionally restrict the summation}
only to functions $f$ which are injective, this would not change the value of
\eqref{eq:here-contribution} and
\begin{multline}
\label{eq:almost-home}
[A^{n+r-2k}] \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} F_2^{\sym}(\lambda_1,\dots,\lambda_k) =\\
[A^{n+r-2k}] \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} \mathbb{C}htt_n(\lambda_1,\dots,\lambda_k) =\\
[A^{n+r-2k}] \Delta_{\lambda_1} \cdots \Delta_{\lambda_k} \widetilde{\mathbb{C}htt_n}(\lambda_1,\dots,\lambda_k).
\end{multline}
The injectivity assertion on $f$ implies that
the function $\widetilde{\mathbb{C}htt_n}$ fulfills an analogue of \eqref{eq:small-boxes-vanishes}
and henceforth an analogue of \eqref{eq:small-boxes-vanishes-B} as well.
It follows that the right-hand side of \eqref{eq:almost-home} vanishes if $|\lambda|+k<n$.
The latter observation together with \eqref{eq:small-boxes-vanishes-B} implies that
$F=F_1-F_2$ indeed fulfills condition \ref{zero:vanishing}.
\emph{Condition \ref{zero:laurent}.}
\cref{prop:degree-laurent}
implies that $\mathbb{C}h_n(\lambda)\in\left\langleurent$ is a Laurent polynomial of degree at most
$n-1$.
By \eqref{eq:normalized-embedding} it follows that the summand
\[ \gamma^{n+1-|C(\sigma_1)|-|C(\sigma_2)|}
\ \mathfrak{N}_{\sigma_1,\sigma_2} \in\left\langleurent\]
in \eqref{eq:top-top-top} is a Laurent polynomial of degree
\[ \big( n+1-|C(\sigma_1)|-|C(\sigma_2)| \big) + \big( |C(\sigma_1)| - |C(\sigma_2)| \big) \leq n-1 \]
and, henceforth, $\mathbb{C}htt_n(\lambda)\in\left\langleurent$ is a Laurent polynomial of degree at most $n-1$.
By combining the above two observations it follows that $F$ indeed fulfills condition \ref{zero:laurent}.
\emph{Conclusion.}
We verified that $F$ fulfills the assumptions of \cref{lem:keylemma} for $r=1$;
it follows that $F= \mathbb{C}h_n - \mathbb{C}htt_n\in\mathscr{P}$ is of degree at most $n$.
\emph{The homogeneous part of degree $n$ is zero.}
We revisit \cref{sec:degree-bound-chttn};
there exist polynomials $H$ and $H'$ such that
\begin{align*}
\mathbb{C}htt_n &= H(\gamma,\mathcal{S}_2,\mathcal{S}_3,\dots), \\
\mathbb{C}h_n &= H'(\gamma,\mathcal{S}_2,\mathcal{S}_3,\dots).
\end{align*}
The already proved bound on the degree of $F$ implies that --- with respect to the degrees given by \eqref{eq:gradacja-ZZtop} --- the difference
$H-H'$ is a polynomial of degree at most $n$.
The results from \cref{sec:degree-bound-chttn} imply that
--- with respect to the degrees given by \eqref{eq:gradacja-ZZtop} --- the homogeneous part of $H$ of degree $n$ is equal to zero.
The homogeneous part of $H'$ of degree $n$ is also
equal to zero by a parity argument (even vs.~odd)
based on a result of Dołęga and F\'eray
\cite[Proposition 3.7]{DoleegaFeray2014}.
In this way we proved that $H-H'$ is a polynomial of degree at most $n-1$ which completes the proof.
\end{proof}
\appendix
\section{Proof of \cref{coro:Kerov-Lassalle}}
\label{sec:proof-theo-kerov-lassalle}
\subsection{Expanders}
\label{sec:expander}
\begin{definition}
\label{def:weighted-expander}
We say that $(G,q)$ is an \emph{expander} if the following conditions are fulfilled:
\begin{enumerate}[label=(\alph*)]
\item $G$ is a bicolored graph with the set of black vertices $\mathcal{V}_\bullet$ and the set of white vertices
$\mathcal{V}_\circ$;
\item $q\colon \mathcal{V}_\bullet \to\{2,3,\dots\}$ is a function on the set of the black vertices;
\item $|\mathcal{V}_\circ|= \sum_{v \in \mathcal{V}_\bullet} \big( q(v)-1 \big)$,
\item \label{enum:marriage} for every set $A\subset \mathcal{V}_\bullet$
such that $A\neq\emptyset$ and $A\neq \mathcal{V}_\bullet$ we require that
\begin{multline*} \#\big\{ v\in \mathcal{V}_\circ: \text{$v$ is connected to at least one vertex in $A$}\big\} > \\
\sum_{i\in A} \big( q(i)-1 \big).
\end{multline*}
\end{enumerate}
\end{definition}
\subsection{Kerov--Lassalle polynomials and expanders}
\newcommand{\mathcal{G}}{\mathcal{G}}
\begin{proposition}
\label{lem:extract-kerov}
Let $F\in\mathscr{P}$, let $\mathcal{G}$ be a finite collection of connected bicolored graphs and
let $\mathcal{G}\ni G\mapsto {m}_G\in \mathbb{Q}[\gamma]$
be a function on it. Assume that for each $\lambda\in\mathbb{Y}$
$$ F(\lambda) = \sum_{G\in\mathcal{G}}
{m}_G\ \mathfrak{N}_G(\lambda).
$$
Then the Kerov--Lassalle polynomial for $F$ is explicitly given by
\[ F = \sum_{G \in \mathcal{G}} \sum_{q} (-{m}_G) \prod_{v\in \mathcal{V}_\bullet(G)} \mathcal{R}_{q(v)} ,\]
where the sums run over $G$ and $q$ for which $(G,q)$ is an expander.
\end{proposition}
\begin{proof}
This kind of result was proved in the special case $A=1$, $\gamma=0$ in
our joint work with Dołęga and F\'eray \cite{DolegaFeraySniady2008}.
In the following we will explain how to extend that result to our more general setup.
Our goal is to find a multivariate polynomial $K$ (with coefficients in $\mathbb{Q}[\gamma]$)
with the property that
\[ F= K( \mathcal{R}_2,\mathcal{R}_3,\dots ).\]
We shall reuse the ideas presented in the proof of
\cref{prop:generate-the-same=free}.
Our current goal can be reformulated as expressing
\emph{the anisotropic Stanley polynomial for $F$} as the polynomial $K$ in terms of
\emph{the anisotropic Stanley polynomial for $\mathcal{R}_2$},
\emph{the anisotropic Stanley polynomial for $\mathcal{R}_3$},\dots
with the coefficients in $\mathbb{Q}[\gamma]$.
\cref{lem:stanley-polunomial-isotropic-and-anisotropic-the-same} shows
equalities between the Stanley polynomials in the isotropic setup
and in its anisotropic counterpart, thus the original problem
is equivalent to the following one: we define
\begin{equation}
\label{eq:isotropic-Stanley-formula}
\bar{F}(\lambda) := \sum_{G\in\mathcal{G}} (-1)^{|\mathcal{V}_\bullet(G)|}\ {m}_G\ N_G(\lambda)
\end{equation}
and we ask \emph{how to express the function $\bar{F}$ in terms of the isotropic free cumulants:}
\[ \bar{F}= K( R_2,R_3,\dots )?\]
This problem has been explicitly solved in \cite{DolegaFeraySniady2008}
for the special case when
$\bar{F}=\mathbb{C}h_n^{A=1}$ is the character of the symmetric groups and \eqref{eq:isotropic-Stanley-formula}
takes a specific form of the Stanley's character formula.
However, as we explained in a joint work with F\'eray \cite[Lemma 4.2]{FeraySniady2011},
the argument holds for any polynomial function $\bar{F}$
(note that the sign in \cite[Lemma 4.2]{FeraySniady2011} is incorrect).
\end{proof}
\subsection{Proof of \cref{coro:Kerov-Lassalle}}
\label{sec:proof-of-theo:kerov-lassalle}
\cref{coro:Kerov-Lassalle} is a consequence of the following more precise result.
\begin{restatable}
{theorem}{kerovlassallefortop}
\label{theo:kerov-lassalle}
For each $n\geq 1$
the homogeneous part of degree $n+1$
of Kerov--Lassalle polynomial for $\mathbb{C}h_n$ is given by
\begin{equation}
\label{eq:kerov-lassalle-exact}
\mathbb{C}htt_n =
\sum_{M}
\gamma^{n+1-|\mathcal{V}(M)|}
\sum_{\substack{q\colon \mathcal{V}_\bullet(G) \to\{2,3,\dots\} \\ \text{$(M,q)$ is an expander}}}
\prod_{v\in \mathcal{V}_\bullet(G)} \mathcal{R}_{q(v)}
,
\end{equation}
where the sum runs over \emph{rooted, oriented, bicolored, connected maps $M$ with $n$ unlabeled edges}.
\end{restatable}
\begin{proof}
It is a direct consequence of \cref{coro:nonoriented-maps} and \cref{lem:extract-kerov}.
\end{proof}
\section*{Acknowledgments}
I thank Maciej Dołęga and Valentin F\'eray for several years of collaboration on topics related to the current paper.
A part of \cref{sec:jack-polynomials-motivations} was written by Maciej Dołęga.
Research supported by \emph{Narodowe Centrum Nauki}, grant number \linebreak 2014/15/B/ST1/00064.
\end{document}
|
\begin{document}
\title{REAL AND COMPLEX INTEGRAL CLOSURE, LIPSCHITZ EQUISINGULARITY AND APPLICATIONS ON SQUARE MATRICES}
\author{Thiago F. da Silva}
\author{Nivaldo G. Grulha Jr.}
\author{Miriam S. Pereira}
\maketitle
\begin{center}
{ {\small \textit{Dedicated to Terence Gaffney and Maria Ruas, on the occasion of their 70th birthday, and to Marcelo Saia, on the occasion of his 60th birthday.}}}
\end{center}
\begin{abstract}
{\small Recently the authors investigated the Lipschitz triviality of simple germs of matrices. In this work, we improve some previous results and we present an extension of an integral closure result for the real setting. These tools are applied to investigate classes of square matrices singularities classified by Bruce and Tari.}
\end{abstract}
\let\thefootnote\relax\footnote{2010 \textit{{Mathematics Subjects Classification} 32S15, 14J17, 32S60.}
{ \textit{Key words and phrases.}Bi-Lipschitz Equisingularity, Real and Complex integral closure, The double structure, Finite Determinacy, Canonical vector fields}}
\thispagestyle{empty}
\section*{Introduction}
The study of Lipschitz equisingularity has risen from works of Zariski \cite{Za}, Pham \cite{Pham} and Teissier \cite{PT} and further developed by Parusi\'nski (\cite{PA1,PA2}), Gaffney (\cite{SG,G1,G2}), Fernandes, Ruas (\cite{FR2}) and others.
In \cite{M1} Mostowski introduced a new technique for the study of this subject from the existence of Lipschitz vector fields. In general, this vector field is not canonical from the variety. Nevertheless, Gaffney \cite{G1} presented conditions to find a canonical Lipschitz vector field in the context of a family of irreducible curves {using the \textit{double} structure, defined for ideals in \cite{G2} and generalized for modules in \cite{SG}.}
Families of square matrices were first studied by Arnold in \cite{Arnold}, where the parametrised invertible matrices act by conjugation. Recently, many authors presented a series of interesting results about determinacy and classification using parametrised families or smooth changes of coordinates in the source of the germ (\cite{Bruce}, \cite{BruceTari}, \cite{FK}, \cite{FN} and \cite{Miriam}).
More recently, Gaffney's result was extended in \cite{SGP}, where the authors presented conditions which ensure the canonical vector field is Lipschitz in the context of $1$-unfoldings of singularities of matrices, following the approach of Pereira and Ruas \cite{RP}.
In this work we prove a real version of the result proved in \cite{SGP} in order to investigate the Lipschitz triviality in the real case. Finally, we study some deformations of simple singularities classified by Bruce and Tari \cite{Bruce, BruceTari} in real and complex cases, using a similar approach as in \cite{SGP}.
\section{Notation and Background}
We start with some notation. Let $\mathbb{K}$ be a field which is $\mathbb{R}$ or $\mathbb{C}$ and let $\mathcal{R}$ be the group of diffeomorphisms $\mathbb{K}^r,0\to\mathbb{K}^r,0$. Let $\mathcal{H}$ denote the set of germs of smooth mappings $\mathbb{K}^r,0\to GL_n(V)\times GL_p(W)$, and $M$
the set of germs $F:\mathbb{K}^r,0\to \mbox{Hom}(V;W)$. The set $\mathcal{H}$ can be endowed with a group structure inherited from the product group in the target.
We define a notion of bi-Lipschitz equivalence between two matrices as in \cite{MP}.
\begin{definition}\label{action}
Let ${\mathcal{G}}={\mathcal{R}}\ltimes {\mathcal{H}}$ be the semi-direct product of ${\mathcal{R}}$ and ${\mathcal{H}}$. We say that two germs $F_1,
\,\ F_2:\mathbb{K}^r,0\to \mbox{Hom}(V;W)$ are $\mathcal{G}$-Lipschitz equivalent if there exist a germ $\phi:(\mathbb{K}^r,0)\rightarrow (\mathbb{K}^r,0)$ of a bi-Lipschitz homeomorphism and germs of continuous mappings \linebreak $X:(\mathbb{K}^r,0)\rightarrow \mbox{GL}_n(V)$, $Y:(\mathbb{K}^r,0)\rightarrow \mbox{GL}_p(W)$ such that $$F_1=X^{-1}(F_2\circ \phi^{-1})Y.$$
\end{definition}
An element of $M$ can also be considered as a map $\mathbb{K}^r,0\to\mathbb{K}^N$, where we identify $\mbox{Hom}(V;W)$ with the $n\times p$ matrices, and $N= np$.
It is not difficult to see that $\mathcal{G}$ is one of Damon's
geometric subgroups of $\mathcal{K}$. As a consequence of Damon's result
we can use the techniques of singularity theory. For instance, those
concerning finite determinacy (see \cite{Damon}, \cite{Miriam} and \cite{BruceTari}).
It is possible to determine the tangent space to the orbit for the action of the group $\mathcal{G}$ on $M$. Given a matrix $F$, we write $F_{x(i)}$ for the matrix $\dfrac{\partial F}{\partial x_i}$ and we denote ${\mathcal E}_r$ for the ring of smooth functions $\mathbb{K}^r, 0\to\mathbb{K}$. So the tangent space could be viewed as an ${\mathcal E}_r$-submodule of ${\mathcal E}_N$ spanned by the set of matrices $R_{il}$ (respectively $C_{jm}$) with $l^{\mbox{\tiny{th}}}$ row (respectively $m^{\mbox{\tiny{th}}}$ th column) the $i^{\mbox{\tiny{th}}}$ row of $F$ (respectively $j^{\mbox{\tiny{th}}}$ column) and with zeros elsewhere, for $1\leq i,\,l\leq n$ and $1\leq j,\, m\leq p$ (see \cite{Damon}, \cite{Miriam} and \cite{BruceTari}).
\section{Real integral closure and Lipschitz Equisingularity}
For the complex case, in \cite{SGP} the authors obtained conditions so that the canonical vector field defined in a family of simple germs of matrices is Lipschitz, depending of a specific inclusion of ideals, involving the integral closure and the double of an ideal.
A new comprehension of the integral closure in the real case plays a key role in the proof of Theorem \ref{T2.4}. Let us recall this notion.
Let $({\mathcal A}_n,m_n)$ be the local ring of real analytic functions germs at the origin in ${\mathbb{R}}^n$, and let ${\mathcal A}_n^p$ be the ${\mathcal A}_n$-free module of rank $p$. For a germ of a real analytic set $(X,x)$, denote by ${\mathcal A}_{X,x}$ the local ring of real analytic function germs at $(X,x)$.
\begin{definition}Let $I$ be an ideal of ${\mathcal A}_{X,x}$. An element $h\in{\mathcal A}_{X,x}$ is in the \textbf{real integral closure} of $I$, denoted $\overline{I}$, if $h\circ\phi\in \phi^*(I){\mathcal A}_1$, for all real analytic path $\phi:({\mathbb{R}},0)\rightarrow (X,x)$.
\end{definition}
For an algebraic definition of the real integral closure of an ideal one can see \cite{B}.
The key step to obtain the main results of \cite{SGP} for the real case is the fact that the definition of the real integral closure of an ideal is equivalent to the following formulation using analytic inequalities.
\begin{theorem}[\cite{G3}]\label{GR}
Let $I$ be an ideal of ${\mathcal A}_{X,x}$ and $h\in {\mathcal A}_{X,x}$. Then: $h\in\overline{I}$ if and only if for each choice of generators $\{f_i\}$ there exist a positive constant $C$ and a neighborhood $U$ of $x$ such that
\begin{center}
$\parallel h(z)\parallel\leq C \underset{i}{\max}\parallel f_i(z) \parallel $
\end{center}
\noindent for all $z\in U$.
\end{theorem}
Let us recall some definitions and fix some notations.
Here we work with one parameter deformations and unfoldings. The parameter space is denoted by $Y={\mathbb{R}}\equiv {\mathbb{R}}\times 0$.
\begin{definition}
Let $h\in{\mathcal A}_N$. The {\bf double of }$h$ is the element denoted by $h_D\in{\mathcal A}_{2N}$ defined by the equation $$h_D(z,z'):=h(z)-h(z').$$
If $h=(h_1,...,h_r)$ is a map, with $h_i\in{\mathcal A}_N$, $\forall i$, then we define $I_D(h)$ as the the ideal of ${\mathcal A}_{2N}$ generated by $\{(h_1)_D,...,(h_r)_D\}$.
\end{definition}
We obtain a relation between the real integral closure of the double and the canonical vector field induced by a one parameter unfolding to be Lipschitz.
Let $\tilde{F}: {\mathbb{R}}\times{\mathbb{R}}^q\longrightarrow {\mathbb{R}}\times{\mathbb{R}}^n$ be an analytic map, which is a homeomorphism onto its image, and such that we can write $\tilde{F}(y,x)=(y,\tilde{f}(y,x))$, with $\tilde{f}(y,x)=(\tilde{f}_1(y,x),...,\tilde{f}_n(y,x))$. Let us denote by
$$\frac{\partial}{\partial y} + \sum\limits_{j=1}^{n}\frac{\partial \widetilde{f_j}}{\partial y}\cdot\frac{\partial}{\partial z_j}$$
\noindent the vector field $v: \tilde{F}({\mathbb{R}}\times{\mathbb{R}}^q)\longrightarrow {\mathbb{R}}\times{\mathbb{R}}^n$ given by $$v(y,z)=(1,\frac{\partial \tilde{f}_1}{\partial y}(\tilde{F}^{-1}(y,z)),...,\frac{\partial \tilde{f}_n}{\partial y}(\tilde{F}^{-1}(y,z))).$$
\begin{theorem}\label{T2.4}
The vector field $\frac{\partial}{\partial y} + \sum\limits_{j=1}^{n}\frac{\partial \widetilde{f}}{\partial y}\cdot\frac{\partial}{\partial z_j}$ is Lipschitz if and only if $$I_D(\frac{\partial \tilde{F}}{\partial y})\subseteq\overline{I_D(\tilde{F})}.$$
\end{theorem}
\begin{proof}
Since we are working in a finite dimensional ${\mathbb{R}}$-vector space then all the norms are equivalent. To simplify the argument, we use the notation $\Vert . \Vert$ for the \textit{maximum norm} on ${\mathbb{R}}\times{\mathbb{R}}^q$ and ${\mathbb{R}}\times{\mathbb{R}}^n$, i.e, $\Vert (x_1,...,x_{n+1})\Vert = \max_{i=1}^{n+1}\{\Vert x_i\Vert\}$.
Suppose the canonical vector field is Lipschitz. By hypothesis there exists a constant $c>0$ such that $$\parallel v(y,z)-v(y',z') \parallel \leq c\parallel (y,z)-(y',z') \parallel$$ $\forall (y,z),(y',z')\in U$, where $U$ is an open subset of $\tilde{F}({\mathbb{R}}\times{\mathbb{R}}^q)$.
Thus, given $(y,x),(y',x')\in \tilde{F}^{-1}(U)$, and applying the above inequality on these points, we get $$\parallel (\frac{\partial \tilde{f}_j}{\partial y})_D(y,x,y',x') \parallel \leq c\parallel \tilde{F}(y,x)-\tilde{F}(y',x') \parallel $$ for all $j=1,...n$. By the previous theorem, each generator of $I_D(\frac{\partial \tilde{F}}{\partial y})$ belongs to $\overline{I_D(\tilde{F})}$.
Now suppose that $I_D(\frac{\partial \tilde{F}}{\partial y})\subset\overline{I_D(\tilde{F})}$. Using the hypothesis and Theorem \ref{GR}, for each $j\in\{1,...n\}$ there exists a constant $c_j>0$ and an open subset $U_j\subset{\mathbb{R}}\times{\mathbb{R}}^q$ such that $$\parallel (\frac{\partial \tilde{f}_j}{\partial y})_D(y,x,y',x') \parallel \leq c_j\parallel \tilde{F}(y,x)-\tilde{F}(y',x') \parallel $$
$\forall (y,x),(y',x')\in U_j$. Take $U:=\bigcap\limits_{j=1}^{n}U_j$, $c:=\max\{c_j\}_{j=1}^{n}$ and $V:=\tilde{F}(U)$, which is an open subset of $\tilde{F}({\mathbb{R}}\times{\mathbb{R}}^q)$, since $\tilde{F}$ is a homeomorphism onto its image. Hence, $$\parallel v(y,z)-v(y',z') \parallel \leq c\parallel (y,z)-(y',z') \parallel$$ $\forall (y,z),(y',z')\in V$.
Therefore, the vector field $\frac{\partial}{\partial y} + \sum\limits_{j=1}^{n}\frac{\partial \tilde{f}_j}{\partial y}\cdot\frac{\partial}{\partial z_j}$ is Lipschitz.
\end{proof}
\begin{corollary}\label{2.2}
Suppose that $\tilde{F}:{\mathbb{R}}\times{\mathbb{R}}^q\longrightarrow {\mathbb{R}}\times \mbox{Hom}({\mathbb{R}}^m,{\mathbb{R}}^n)$ is an analytic map and a homeomorphism onto its image, and suppose we can write $$\tilde{F}(y,x)=(y,F(x)+y\theta(x))$$.
\begin{enumerate}
\item [a)] The vector field $\frac{\partial}{\partial y} + \sum\limits_{j=1}^{n}\frac{\partial \widetilde{f}}{\partial y}\cdot\frac{\partial}{\partial z_j}$ is Lipschitz if, and only if, $$I_D(\theta)\subseteq\overline{I_D(\tilde{F})}.$$
\item [b)] If $\theta$ is constant then the vector field $\frac{\partial}{\partial y} + \sum\limits_{j=1}^{n}\frac{\partial \widetilde{f}}{\partial y}\cdot\frac{\partial}{\partial z_j}$ is Lipschitz.
\end{enumerate}
\end{corollary}
\section{Applications in some classes of square matrices}
In this section we study if the Lipschitz condition is satisfied on the canonical vector field naturally associated to the 1-unfolding of a $\mathcal{G}$-simple square matrices singularities classified in \cite{Bruce, BruceTari}. Our goal is to obtain a better understanding of its behaviour. In \cite{SGP} we consider versal deformation of determinantal singularities of codimension $2$ and we showed this behaviour depends on the type of the normal form.
The next result presents a part of the classification of $\mathcal{G}$-simple symmetric matrices obtained by Bruce on Theorem 1.1 of \cite{Bruce}.
\begin{proposition}
The $\mathcal{G}$-simple germs $F: \mathbb{C}^{2} \to Sym_{2}$ of rank $0$ at the origin are given in the following table.
\end{proposition}
\begin{center}
\begin{tabular}{lccc}
\hline
& Normal Form & & Discriminant \\
\hline
1. & $\left( \begin{matrix}
y^k & x \\
x & y^{\ell}
\end{matrix}\right)$ & $k\geq1,\ell\geq 2$ & ${\mathcal A}_{k+\ell +1}$\\
2. & $\left( \begin{matrix}
x & 0 \\
0 & y^2+x^k
\end{matrix}\right)$ & $k\geq2$ & $D_{k+2}$\\
3. & $\left( \begin{matrix}
x & 0 \\
0 & xy+y^k
\end{matrix}\right)$ & $k\geq2$ & $D_{2k}$\\
4. & $\left( \begin{matrix}
x & y^k \\
y^k & xy
\end{matrix}\right)$ & $k\geq2$ & $D_{2k+1}$\\
5. & $\left( \begin{matrix}
x & y^2 \\
y^2 & x^2
\end{matrix}\right)$ & & $E_6$\\
6. & $\left( \begin{matrix}
x & 0 \\
0 & x^2+y^3
\end{matrix}\right)$ & & $E_7$\\
\end{tabular}
\end{center}
In the following result we establish conditions for the Lipschitz triviality of the canonical vector field associated to the normal forms introduced in the above proposition. Differently from the cases exhibited on \cite{SGP}, here we present examples with different nature. Taking the versal deformation of a normal formal we can find directions that produce Lipschitz trivial deformations, Lipschitz deformations off the origin or non-Lipschitz.
\begin{proposition}
Following the table of normal forms of $\mathcal{G}$-simple germs $F: \mathbb{C}^{2} \to Sym_{2}$ of rank $0$ at the origin, the canonical vector field associated to the $1$-parameter deformation $\tilde{F}$ induced by $\theta\in\frac{Sym_2}{T{\mathcal G}_{e}F}$ is Lipschitz in the following conditions:
\begin{enumerate}
\item[1.]{For the normal form 1 of the table, if the canonical vector field associated to $\tilde{F}$ is Lipschitz then $\theta$ can be written on the form $$\theta=\left( \begin{matrix}
a_0+\sum\limits_{i=r}^{k-1}a_iy^i & 0 \\
0 & b_0+ \sum\limits_{j=r}^{\ell-2}b_jy^j \\
\end{matrix}\right)$$\noindent with $a_i,b_j\in{\mathbb{C}}$ and $r=\min\{k,\ell\}$.}
\item[2.]{For the normal form 2 of the table, the canonical vector field associated to $\tilde{F}$ is Lipschitz if and only of $\theta$ can be written on the form $\theta=\left( \begin{matrix}
a & b \\
b & \sum\limits_{i=0}^{k-2}d_ix^i \\
\end{matrix}\right)$, with $a,b,d_i\in{\mathbb{C}}$.}
\item[3.]{For the normal form 3 of the table, the canonical vector field associated to $\tilde{F}$ is Lipschitz if and only of $\theta$ is constant.}
\item[4.]{For the normal form 4 of the table,the canonical vector field associated to $\tilde{F}$ is Lipschitz if and only of $\frac{\partial\tilde{F}}{\partial y}=\frac{\partial F}{\partial y}$, i.e, $\theta$ can be written on the form $\theta=\left( \begin{matrix}
a & b \\
b & \sum\limits_{j=0}^{k-1}b_jx^j \\
\end{matrix}\right)$, with $a,b,b_j\in{\mathbb{C}}$.}
\item[5.]{For the normal form 5 of the table, the canonical vector field associated to the $1$-parameter deformation $\tilde{F}$ induced by $\theta\in\frac{Sym_2}{T{\mathcal G}_{e}F}$ is Lipschitz if and only if the $1$-jet type of $\tilde{F}$ and $F$ agree.}
\item[6.]{For the normal form 6 of the table, the canonical vector field associated to $\tilde{F}$ is Lipschitz if and only of $\theta$ is constant.}
\end{enumerate}
\end{proposition}
The proof follows from the following lemmas.
\begin{lemma}
Let $F: (\mathbb{C}^2,0)\to Sym_2$ be a ${\mathcal G}$-simple germ of rank $0$ at the origin whose discriminant of type ${\mathcal A}_{k+\ell-1}$. Let $\tilde{F}$ be a deformation induced by $\theta\in\frac{Sym_2}{T{\mathcal G}_{e} F}$. If the canonical vector field associated to $\tilde{F}$ is Lipschitz then $\theta$ can be written on the form \\$\theta=\left( \begin{matrix}
a_0+\sum\limits_{i=r}^{k-1}a_iy^i & 0 \\
0 & b_0+ \sum\limits_{j=r}^{\ell-2}b_jy^j \\
\end{matrix}\right)$, with $a_i,b_j\in{\mathbb{C}}$ and $r=\min\{k,\ell\}$.
In particular, in the case $\ell=k$, the canonical vector field associated to $\tilde{F}$ is Lipschitz if and only if $\theta$ is constant.
\end{lemma}
\begin{proof}
The normal form of $F$ is $\left(
\begin{matrix}
y^k & x \\
x & y^{\ell} \\
\end{matrix}\right)$.
Then, the normal space $\frac{Sym_2}{T{\mathcal G}_{e}F}$ is generated by
$$\left\{\left( \begin{matrix}
1 & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & 1 \\
\end{matrix}\right),\left( \begin{matrix}
y & 0 \\
0 & 0 \\
\end{matrix}\right),...,\left( \begin{matrix}
y^{k-1} & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & y \\
\end{matrix}\right),...,\left( \begin{matrix}
0 & 0 \\
0 & y^{\ell-2} \\
\end{matrix}\right)\right\}.$$
If $\theta \in\frac{Sym_2}{T{\mathcal G}_{e} F}$ then $\theta$ is a ${\mathbb{C}}$-linear combination of the above elements, i.e, there exist $a_i,b_j\in{\mathbb{C}}$ such that $$\theta=\left( \begin{matrix}
\sum\limits_{i=0}^{k-1}a_iy^i & 0 \\
0 & \sum\limits_{j=0}^{\ell-2}b_jy^j \\
\end{matrix}\right).$$
Thus, $\tilde{F}=\left( \begin{matrix}
y^k+t\sum\limits_{i=0}^{k-1}a_iy^i & x \\
x & y^k+t\sum\limits_{j=0}^{k-2}b_jy^j \\
\end{matrix}\right)$.
Notice that $I_D(\tilde{F})$ is generated by $$\{x-x', y^k-y'^k+t\sum\limits_{i=1}^{k-1}a_i(y^i-y'^i),y^{\ell}-y'^{\ell}+t\sum\limits_{j=1}^{\ell-2}b_j(y^j-y'^j)\}$$ and $I_D(\theta)$ is generated by $\left\{\sum\limits_{i=1}^{k-1}a_i(y^i-y'^i),\sum\limits_{j=1}^{\ell-2}b_j(y^j-y'^j)\right\}$.
Consider the curve $\phi(s)=(s^{k+\ell},2s^{k+\ell},2s,s^{k+\ell},s^{k+\ell},s)$. Thus, $\phi^*(I_D(\tilde{F}))=\langle s^{k+\ell}, (2^k-1)s^k+s^{k+\ell}\sum\limits_{i=1}^{k-1}a_i(2^i-1)s^i, (2^{\ell}-1)s^{\ell}+s^{k+\ell}\sum\limits_{j=1}^{\ell-2}b_j(2^j-1)s^j \rangle$ which is contained in $\langle s^r \rangle$. Since $I_D(\theta)\subseteq\overline{I_D(\tilde{F})}$ then $$\langle \sum\limits_{i=1}^{k-1}a_i(2^i-1)s^i, \sum\limits_{j=1}^{\ell-2}b_j(2^j-1)s^j \rangle\subseteq\langle s^r\rangle$$ which finishes the proof.
\end{proof}
\begin{lemma}
Let $F: (\mathbb{C}^2,0)\to Sym_2$ be a ${\mathcal G}$-simple germ of rank $0$ at the origin whose discriminant of type ${\mathcal D}_{k+2}$, $k\geq 2$. Let $\tilde{F}$ be a deformation induced by $\theta\in\frac{Sym_2}{T{\mathcal G}_{e} F}$. Then the canonical vector field associated to $\tilde{F}$ is Lipschitz if and only of $\theta$ can be written on the form $\theta=\left( \begin{matrix}
a & b \\
b & \sum\limits_{i=0}^{k-2}d_ix^i \\
\end{matrix}\right)$, with $a,b,d_i\in{\mathbb{C}}$.
\end{lemma}
\begin{proof}
The normal form of $F$ is $\left(
\begin{matrix}
x & 0 \\
0 & y^2+x^k \\
\end{matrix}\right)$.
Then, the normal space $\frac{Sym_2}{T{\mathcal G}_{e}F}$ is generated by
$$\left\{\left( \begin{matrix}
1 & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 1 \\
1 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & y \\
y & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & 1 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & x \\
\end{matrix}\right),...,\left( \begin{matrix}
0 & 0 \\
0 & x^{k-2} \\
\end{matrix}\right)\right\}.$$
Thus, we can write $\theta=\left( \begin{matrix}
a & b+cy \\
b+cy & \sum\limits_{i=0}^{k-2}d_ix^i \\
\end{matrix}\right)$, with $a,b,c,d_i\in{\mathbb{C}}$,
$I_D(\theta)=\langle c(y-y'),\sum\limits_{i=1}^{k-2}d_i(x^i-x'^i) \rangle$ and \\$I_D(\tilde{F})=\langle x-x', tc(y-y'),y^2-y'^2+x^k-x'^k+t\sum\limits_{i=1}^{k-2}(x^i-x'^i) \rangle$.
Consider the curve $\phi(s)=(s,2s^2,2s,s,s^2,s)$. Notice that $\phi^*(I_D(\tilde{F}))\\=\langle s^2,cs^2,3s^2+(2^k-1)s^{2k}+s\sum\limits_{i=1}^{k-2}d_i(2^i-1)s^i \rangle\subseteq \langle s^2 \rangle $.
Suppose the canonical vector field is Lipschitz, i.e, $I_D(\theta)\subseteq\overline{I_D(\tilde{F})}$. Then, $cs=\phi^*(c(y-y'))\in\langle s^2\rangle$ and so $c=0$.
Conversely, if $c=0$ then $I_D(\theta)=\langle \sum\limits_{i=1}^{k-2}d_i(x^i-x'^i) \rangle\subseteq \langle x-x'\rangle\subseteq I_D(\tilde{F})$.
\end{proof}
\begin{lemma}
Let $F: (\mathbb{C}^2,0)\to Sym_2$ be a ${\mathcal G}$-simple germ of rank $0$ at the origin whose discriminant of type ${\mathcal D}_{2k}$, $k\geq 2$. Let $\tilde{F}$ be a deformation induced by $\theta\in\frac{Sym_2}{T{\mathcal G}_{e} F}$. Then the canonical vector field associated to $\tilde{F}$ is Lipschitz if and only of $\theta$ is constant.
\end{lemma}
\begin{proof}
The normal form of $F$ is $\left(
\begin{matrix}
x & 0 \\
0 & xy+y^k \\
\end{matrix}\right)$.
Then, the normal space $\frac{Sym_2}{T{\mathcal G}_{e}F}$ is generated by
$$\left\{\left( \begin{matrix}
1 & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & 1 \\
\end{matrix}\right)\left( \begin{matrix}
0 & 1 \\
1 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
y & 0 \\
0 & 0 \\
\end{matrix}\right),...,\left( \begin{matrix}
y^{k-2} & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & y \\
\end{matrix}\right),...,\left( \begin{matrix}
0 & 0 \\
0 & y^{k-1} \\
\end{matrix}\right)\right\}.$$
So we can write $\theta=\left( \begin{matrix}
\sum\limits_{i=0}^{k-2}a_iy^i & a \\
a & \sum\limits_{j=0}^{k-1}b_jy^j \\
\end{matrix}\right)$, for some $a,a_i,b_j\in{\mathbb{C}}$, $I_D(\theta)=\langle \sum\limits_{i=1}^{k-2}a_i(y^i-y'^i),\sum\limits_{j=1}^{k-1}b_j(y^j-y'^j) \rangle$ and \\$I_D(\tilde{F})=\langle x-x'+t\sum\limits_{i=1}^{k-2}a_i(y^i-y'^i) ,xy-x'y'+y^k-y'^k+t\sum\limits_{j=1}^{k-1}b_j(y^j-y'^j) \rangle$.
Consider the curve $\phi(s)=(s^k,2s^k,2s,s^k,s^k,s)$. Then $\phi^*(I_D(\tilde{F}))\\=\langle s^k+s^k\sum\limits_{i=1}^{k-2}a_i(2^i-1)s^i,3s^{k+1}+(2^k-1)s^k+s^k\sum\limits_{j=1}^{k-1}b_j(2^j-1)s^j \rangle\subseteq\langle s^k\rangle$.
If the canonical vector field is Lipschitz then $\sum\limits_{i=1}^{k-2}a_i(2^i-1)s^i$ and $\sum\limits_{j=1}^{k-1}b_j(2^j-1)s^j$ belong to $\langle s^k\rangle$. Hence, $a_i=0$ and $b_j=0$ for all $i$ and $j$. Therefore, $\theta$ is constant.
\end{proof}
\begin{lemma}
Let $F: (\mathbb{C}^2,0)\to Sym_2$ be a ${\mathcal G}$-simple germ of rank $0$ at the origin whose discriminant of type ${\mathcal D}_{2k+1}$, $k\geq 2$. Let $\tilde{F}$ be a deformation induced by $\theta\in\frac{Sym_2}{T{\mathcal G}_{e} F}$. Then the canonical vector field associated to $\tilde{F}$ is Lipschitz if and only of $\frac{\partial\tilde{F}}{\partial y}=\frac{\partial F}{\partial y}$, i.e, $\theta$ can be written on the form $\theta=\left( \begin{matrix}
a & b \\
b & \sum\limits_{j=0}^{k-1}b_jx^j \\
\end{matrix}\right)$, with $a,b,b_j\in{\mathbb{C}}$.
\end{lemma}
\begin{proof}
The normal form of $F$ is $\left(
\begin{matrix}
x & y^k \\
y^k & xy \\
\end{matrix}\right)$.
Then, the normal space $\frac{Sym_2}{T{\mathcal G}_{e}F}$ is generated by
$$\left\{\left( \begin{matrix}
1 & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 1 \\
1 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & 1 \\
\end{matrix}\right),\left( \begin{matrix}
y & 0 \\
0 & 0 \\
\end{matrix}\right),...,\left( \begin{matrix}
y^{k-1} & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & x \\
\end{matrix}\right),...,\left( \begin{matrix}
0 & 0 \\
0 & x^{k-1} \\
\end{matrix}\right)\right\}.$$
Thus, we can write $\theta=\left( \begin{matrix}
a+\sum\limits_{i=1}^{k-1}a_iy^i & b \\
b & \sum\limits_{j=0}^{k-1}b_jx^j \\
\end{matrix}\right)$, \\with $a,a_i,b,b_j\in{\mathbb{C}}$,
$I_D(\theta)=\langle \sum\limits_{i=1}^{k-1}a_i(y^i-y'^i),\sum\limits_{j=1}^{k-1}b_j(x^j-x'^j) \rangle$ and \\$I_D(\tilde{F})=\langle x-x'+t\sum\limits_{i=1}^{k-1}a_i(y^i-y'^i), y^k-y'^k,xy-x'y'+t\sum\limits_{j=1}^{k-1}b_j(x^j-x'^j) \rangle$.
Consider the curve $\phi(s)=(s^k,2s^k,2s,s^k,s^k,s)$. Then $\phi^*(I_D(\tilde{F}))=\langle s^k+s^k\sum\limits_{i=1}^{k-1}a_i(2^i-1)s^i, (2^k-1)s^k,3s^{k+1}+s^k\sum\limits_{j=1}^{k-1}b_j(2^j-1)s^{kj} \rangle\subseteq\langle s^k\rangle$.
If $I_D(\theta)\subseteq\overline{I_D(\tilde{F})}$ then $\sum\limits_{i=1}^{k-1}a_i(2^i-1)s^i\in\langle s^k\rangle$, hence $a_i=0,\forall i\in\{1,...,k-1\}$. Conversely, if $a_i=0,\forall i\in\{1,...,k-1\}$ then $I_D(\theta)=\langle \sum\limits_{j=1}^{k-1}b_j(x^j-x'^j) \rangle\subseteq\langle x-x'\rangle\subseteq I_D(\tilde{F})$.
\end{proof}
\begin{lemma}
Let $F: (\mathbb{C}^2,0)\to Sym_2$ be a ${\mathcal G}$-simple germ of rank $0$ at the origin with discriminant of type $E_6$. Then the canonical vector field associated to the $1$-parameter deformation $\tilde{F}$ induced by $\theta\in\frac{Sym_2}{T{\mathcal G}_{e}F}$ is Lipschitz if and only if the $1$-jet type of $\tilde{F}$ and $F$ agree.
\end{lemma}
\begin{proof}
The normal form of $F$ is $\left( \begin{matrix}
x & y^2 \\
y^2 & x^2 \\
\end{matrix}\right)$.
Then, the normal space $\frac{Sym_2}{T{\mathcal G}_{e}F}$ is generated by
$$\left\{\left( \begin{matrix}
1 & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & 1 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 1 \\
1 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
y & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & y \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & y^2 \\
\end{matrix}\right)\right\}.$$
If $\theta\in\frac{Sym_2}{T{\mathcal G}_{e}F}$ induces a non-trivial deformation $\tilde{F}$ then we can write $$\theta(x,y)=\left( \begin{matrix}
a_1+a_3y+a_4y^2 & 0 \\
0 & a_2+a_5y+a_6y^2 \\
\end{matrix}\right).$$
Thus $\tilde{F}=\left( \begin{matrix}
x+t(a_1+a_3y+a_4y^2) & y^2 \\
y^2 & x^2+t(a_2+a_5y+a_6y^2) \\
\end{matrix}\right)$.
Notice that
$I_D(\theta)=\langle a_3(y-y')+a_4(y^2-y'^2),a_5(y-y')+a_6(y^2-y'^2) \rangle$.
Suppose the $1$-jet type of $\tilde{F}$ and $F$ agree. Then $a_3=a_5=0$ and in this case $I_D(\theta)=\langle a_4(y^2-y'^2),a_6(y^2-y'^2) \rangle$. Since $y^2-y'^2\in I_D(\tilde{F})$ then $I_D(\theta)\subseteq I_D(\tilde{F})$ and the canonical vector field is Lipschitz.
Conversely, if the canonical vector field is Lipschitz then $a_3=a_5=0$. In fact, we are assuming that $I_D(\theta)\subseteq \overline{I_D(\tilde{F})}$.
We have $I_D(\tilde{F})$ is generated by $$\{ y^2-y'^2, x-x'+t(a_3(y-y')+a_4(y^2-y'^2)), x^2-x'^2+t(a_5(y-y')+a_6(y^2-y'^2)) \}.$$
Consider the curve $\phi(s)=(s,2s^3,2s^2,s,s^3,s^2)$. Then we have that $\phi^*(I_D(\tilde{F}))=\langle 3s^4,s^3+s(a_3s^2+3a_4s^4),3s^6+s(a_5s^2+3a_6s^4) \rangle\subseteq \langle s^3 \rangle$. Since $\phi^*(I_D(\theta))\subseteq\phi^*(I_D(\tilde{F}))\subseteq\langle s^3\rangle$ then $\phi^*(a_3(y-y')+a_4(y^2-y'^2))\\=a_3s^2+3a_4s^4\in\langle s^3\rangle$ which implies that $a_3s^2\in\langle s^3\rangle$, hence $a_3=0$. Analogously, using the same curve, we prove that $a_5=0$.
\end{proof}
\begin{lemma}
Let $F: (\mathbb{C}^2,0)\to Sym_2$ be a ${\mathcal G}$-simple germ of rank $0$ at the origin whose discriminant of type $E_7$. Let $\tilde{F}$ be a deformation induced by $\theta\in\frac{Sym_2}{T{\mathcal G}_{e} F}$. Then the canonical vector field associated to $\tilde{F}$ is Lipschitz if and only of $\theta$ is constant.
\end{lemma}
\begin{proof}
The normal form of $F$ is $\left(
\begin{matrix}
x & 0 \\
0 & x^2+y^3 \\
\end{matrix}\right)$.
Then, the normal space $\frac{Sym_2}{T{\mathcal G}_{e}F}$ is generated by
$$\left\{\left( \begin{matrix}
1 & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & 1 \\
\end{matrix}\right)\left( \begin{matrix}
0 & 1 \\
1 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & 0 \\
0 & y \\
\end{matrix}\right),\left( \begin{matrix}
y & 0 \\
0 & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & y \\
y & 0 \\
\end{matrix}\right),\left( \begin{matrix}
0 & y^2 \\
y^2 & 0 \\
\end{matrix}\right)\right\}.$$
So we can write $\theta=\left( \begin{matrix}
a_1+a_5y & a_3+a_6y+a_7y^2 \\
a_3+a_6y+a_7y^2 & a_2+a_4y \\
\end{matrix}\right)$, for some $a_i\in{\mathbb{C}}$, $I_D(\theta)=\langle a_5(y-y'),a_4(y-y'),a_6(y-y')+a_7(y^2-y'^2) \rangle$ and \\$I_D(\tilde{F})=\langle x-x'+ta_5(y-y') ,t(a_6(y-y')+a_7(y^2-y'^2)),x^2-x'^2+y^3-y'^3+ta_4(y-y') \rangle$. Consider the curve $\phi(s)=(s^2,2s^3,2s,s^2,s^3,s)$. Thus, $\phi^*(I_D(\tilde{F}))=\langle s^3+a_5s^3,a_6s^3+3a_7s^4,3s^6+7s^3+a_4s^3\rangle\subseteq\langle s^3 \rangle$.
If the canonical vector field is Lipschitz then $a_5s,a_4s,a_6s+3a_7s^2\in\langle s^3\rangle$ which implies that $a_4=a_5=a_6=a_7=0$. Therefore, $\theta$ is constant.
\end{proof}
As in \cite{SGP}, the canonical vector field associated to the $1$-parameter deformation $\tilde{F}$ of the normal forms presented in \cite{Bruce} induced by $\theta\in\frac{Sym_3}{T{\mathcal G}_{e}F}$ is Lipschitz if and only if the $1$-jet type of $\tilde{F}$ and $F$ agree. The proof of the next result is analogous to the proof of the main result of \cite{SGP}.
\begin{proposition}
For all $\mathcal{G}$-simple germs $F: \mathbb{C}^{r} \to Sym_{3}$ of rank $0$ at the origin we have that the canonical vector field associated to the $1$-parameter deformation $\tilde{F}$ induced by $\theta\in\frac{Sym_3}{T{\mathcal G}_{e}F}$ is Lipschitz.
\end{proposition}
\begin{proof}
Suppose that $F$ is of $1$-jet-type of the form in the tables in items (5) and (6) of Theorem 1.1 from \cite{B}. Since $\theta\in\frac{Mat_{(3)}({\mathcal O}_r)}{T\mathcal{G}F}$ then the $r$ order $1$ entries of the matrix $F$ stay unperturbed, thus the differences of the monomial generators of the maximal ideal are in $I_D(\tilde{F})$. In particular the ideal $I_\Delta$ from the diagonal satisfies the inclusion $I_\Delta\subseteq I_D(\tilde{F})$. Let $\theta_i$, $i\in\{1,...,6\}$ be the components of $\theta$. Notice that every $(\theta_i)_D$ vanishes on the diagonal $\Delta$ which implies that all the generators of $I_D(\theta)$ belong to $I_\Delta$. Therefore, $I_D(\theta)\subseteq I_\Delta\subseteq I_D(\tilde{F})$ and Proposition 3.4 of \cite{SGP} ensures the canonical vector field is Lipschitz.
\end{proof}
\begin{remark}[\cite{Bruce}, Remark 1.2.] In the cases when $r = 2$ and $n = 2, 3$ the $\mathcal{G}$-codimension of the germs and the Milnor number of the discriminant coincide.
\end{remark}
The next result is an application of the results of the previous section for the real case. The proof follows the same steps of Theorem 2.8 of \cite{SGP}.
\begin{theorem}
Consider the $\mathcal{G}$-simple germs $F: \mathbb{R}^{r} \to \textrm{Hom}(\mathbb{R}^{n},\mathbb{R}^{n})$ of rank $0$ at the origin, classified in Theorem 1.1 of \cite{BruceTari}, and consider the semi-universal unfolding $\tilde{F}:{\mathbb{R}}\times{\mathbb{R}}^{n}\rightarrow {\mathbb{R}}\times \mbox{Hom}({\mathbb{R}}^n,{\mathbb{R}}^n)$, where $\theta\in\frac{Mat_{n}(\mathcal{A}_{r})}{T\mathcal{G}_{e}F}$.
If the ideal of $1$-minors of $F$ defines a reduced point then the canonical vector field is Lipschitz.
\end{theorem}
\begin{proof}
Since the ideal of $1$-minors of $F$ defines a reduced point and $\theta\in\frac{Mat_{n}(\mathcal{A}_{r})}{T\mathcal{G}_{e}F}$ then the $r$ order $1$ entries of $F$ stay unperturbed, thus the differences of the monomial generators of the maximal ideal are in $I_D(\tilde{F})$. Consequentely, $I_{\Delta}\subseteq I_D(\tilde{F})$. Let $\theta_{ij}$ be the components of $\theta$, $i,j\in\{1,\hdots,n\}$. Clearly all $(\theta_{ij})_D$ vanish on $\Delta$. Hence, $I_D(\theta)\subseteq I_{\Delta}$ and the proof is done by Corollary \ref{2.2}.
\end{proof}
\begin{remark}
In \cite{MP} the author obtain sufficient conditions for topological triviality of $1$-parameter deformations of weighted homogeneous matrix $M$ (see Proposition 6.1 and Proposition 6.2 ). Considering the action defined in the Definition \ref{action}, the triviality condition is related to the tangent space to the $\mathcal{G}$-orbit of $M$. These condition ensure that the canonical vector field is integrable.
At this point, one way to continue our study is to show that the homeomorphism obtained by integration of the canonical Lipschitz vector fields gives the bi-Lipschitz equivalence of the members of the respective family of square matrix map-germs according to Definition \ref{action}.
\end{remark}
\begin{small}
{\sc Thiago Filipe da Silva
Departamento de Matem\'atica, Universidade Federal do Esp\'irito Santo \\
Av. Fernando Ferrari, 514 - Goiabeiras, 29075-910 - Vit\'oria - ES, Brazil, [email protected]}
{\sc Nivaldo de G\'oes Grulha J\'unior
Instituto de Ci\^encias Matem\'aticas e de Computa\'c\~ao - USP\\
Av. Trabalhador S\~ao Carlense, 400 - Centro, 13566-590 - S\~ao Carlos - SP, Brazil, [email protected]}
{\sc Miriam da Silva Pereira
Departamento de Matem\'atica, Universidade Federal da Para\'iba, 58.051-900 Jo\~ao Pessoa, Brazil, [email protected]}
\end{small}
\end{document}
|
\begin{document}
\title[Quenched Large Deviations for RWRE]{Quenched Large Deviations for \\Random Walk in a Random Environment}
\author{Atilla Yilmaz}
\address
{Department of Mathematics\newline
\indent Weizmann Institute of Science\newline
\indent Rehovot 76100\newline
\indent ISRAEL}
\email{[email protected]}
\urladdr{http://www.wisdom.weizmann.ac.il/$\sim$yilmaz/}
\date{March 27, 2008. Revised on December 15, 2008.}
\subjclass[2000]{60K37, 60F10, 82C44.}
\keywords{Disordered media, rare events, point of view of the particle, Doob $h$-transform, invariant measure.}
\thanks{This research was supported partially by a grant from the National Science Foundation: DMS-06-04380.}
\maketitle
\begin{abstract}
We take the point of view of a particle performing random walk with bounded jumps on $\mathbb{Z}^d$ in a stationary and ergodic random environment. We prove the quenched large deviation principle (LDP) for the pair empirical measure of the environment Markov chain. By an appropriate contraction, we deduce the quenched LDP for the mean velocity of the particle and obtain a variational formula for the corresponding rate function. We propose an Ansatz for the minimizer of this formula. When $d=1$, we verify this Ansatz and generalize the nearest-neighbor result of Comets, Gantert and Zeitouni to walks with bounded jumps.
\end{abstract}
\section{Introduction}
\subsection{The model}
The random motion of a particle on $\mathbb{Z}^d$ can be modelled by a discrete time Markov chain. Write $\pi(x,x+z)$ for the transition probability from $x$ to $x+z$ for each $x,z\in\mathbb{Z}^d$ and refer to $\omega_x:=(\pi(x,x+z))_{z\in\mathbb{Z}^d}$ as the environment at $x$. If the environment $\omega:=(\omega_x)_{x\in\mathbb{Z}^d}$ is sampled from a probability space $(\Omega,\mathcal{B},\mathbb{P})$, then the particle is said to perform random walk in a random environment (RWRE). Here, $\mathcal{B}$ is the Borel $\sigma$-algebra.
For each $z\in\mathbb{Z}^d$, define the shift $T_z$ on $\Omega$ by $\left(T_z\omega\right)_x=\omega_{x+z}$ and assume that $\mathbb{P}$ is stationary and ergodic under $\left(T_z\right)_{z\in\mathbb{Z}^d}$. Plus, assume that the jumps are bounded by a constant $B$, i.e., for any $z=(z_1,\ldots,z_d)\in\mathbb{Z}^d$, $\pi(0,z)=0$ $\mathbb{P}$-a.s.\ unless $0<|z_1|+\cdots+|z_d|\leq B$. Denote the set of allowed jumps of the walk by \[\mathcal{R} := \{(z_1,\ldots,z_d)\in\mathbb{Z}^d:\;0<|z_1|+\cdots+|z_d|\leq B\}.\] When $B=1$, the walk is said to be nearest-neighbor and the set of allowed jumps is \[U:=\{(z_1,\ldots,z_d)\in\mathbb{Z}^d:\;|z_1|+\cdots+|z_d|=1\}.\]
For any $x\in\mathbb{Z}^d$ and $\omega\in\Omega$, the Markov chain with transition probabilities given by $\omega$ induces what is called the ``quenched" probability measure $P_x^\omega$ on the space of paths starting at $x$. The semi-direct product $P_x:=\mathbb{P}\times P_x^\omega$ is referred to as the ``averaged" measure. Expectations under $\mathbb{P}$, $P_x^\omega$ and $P_x$ are denoted by $\mathbb{E}$, $E_x^\omega$ and $E_x$, respectively.
Because of the extra layer of randomness in the model, the standard questions of recurrence vs.\ transience, the law of large numbers (LLN), the central limit theorem (CLT) and the large deviation principle (LDP) --- which have well known answers for classical random walk --- become hard. However, it is possible by taking the ``point of view of the particle" to treat the two layers of randomness as one: If we denote the random path of the particle by $X:=(X_n)_{n\geq0}$, then $(T_{X_n}\omega)_{n\geq0}$ is a Markov chain (referred to as the ``environment Markov chain") on $\Omega$ with transition kernel $\overline{\pi}$ given by \[\overline{\pi}(\omega,\omega'):=\sum_{z:T_z\omega=\omega'}\pi(0,z).\] This is a standard approach in the study of random media. (See, for example, \cite{DeMasi}, \cite{KV}, \cite{Kozlov}, \cite{Olla} or \cite{PV}.)
Instead of viewing the environment Markov chain as an auxiliary construction, one can introduce it first and then deduce the particle dynamics from it.
\begin{definition}\label{ortamkeli}
A function $\hat{\pi}:\Omega\times\mathcal{R}\to\mathbb{R}^+$ is said to be an ``environment kernel" if\\(i) $\hat{\pi}(\cdot,z)$ is $\mathcal{B}$-measurable for each $z\in\mathcal{R}$, and (ii) $\sum_{z\in\mathcal{R}}\hat{\pi}(\cdot,z)=1,\ \mathbb{P}$-a.s.\\
It can be viewed as a transition kernel on $\Omega$ via the following identification: \[\overline{\pi}(\omega,\omega'):=\sum_{z:T_z\omega=\omega'}\hat{\pi}(\omega,z).\]
Given $x\in\mathbb{Z}^d$, $\omega\in\Omega$ and any environment kernel $\hat{\pi}$, the ``quenched" probability measure $P_x^{\hat{\pi},\omega}$ on the space of particle paths $(X_n)_{n\geq0}$ starting at $x$ in environment $\omega$ is defined by setting $P_x^{\hat{\pi},\omega}\left(X_o=x\right)=1$ and \[P_x^{\hat{\pi},\omega}\left(X_{n+1}=y+z\left|X_n=y\right.\right)=\hat{\pi}(T_y\omega,z)\] for all $n\geq0$, $y\in\mathbb{Z}^d$ and $z\in\mathcal{R}$. The semi-direct product $P_x^{\hat{\pi}}:=\mathbb{P}\times P_x^{\hat{\pi},\omega}$ is referred to as the ``averaged" measure, and expectations under $P_x^{\hat{\pi},\omega}$ and $P_x^{\hat{\pi}}$ are denoted by $E_x^{\hat{\pi},\omega}$ and $E_x^{\hat{\pi}}$, respectively.
\end{definition}
See \cite{Sznitman} or \cite{rwrereview} for a more detailed description of RWRE, examples and a survey of the literature. In this work, we focus on the quenched large deviation properties of this model.
\subsection{Previous results}\label{egridogru}
Greven and den Hollander \cite{GdH} prove the quenched LDP for the mean velocity of a particle performing nearest-neighbor random walk on $\mathbb{Z}$ in a product environment (i.e., when $\mathbb{P}$ is a product measure) and show that the rate function is convex but typically has parts consisting of line segments. Their proof makes use of an auxiliary branching process formed by the excursions of the walk. Using a completely different technique, Comets, Gantert and Zeitouni \cite{CGZ} extend the results of \cite{GdH} to stationary and ergodic environments. Their argument involves first proving a quenched LDP for the passage times of the walk by an application of the G\"artner--Ellis theorem and then inverting this to get the desired LDP for the mean velocity.
For $d\geq2$, the first result on quenched large deviations is given by Zerner \cite{Zerner}. He uses a subadditivity argument again for certain passage times to get the quenched LDP in the case of product environments. He assumes that the environment is ``nestling", i.e., the convex hull of the support of the law of $\sum_{z\in\mathcal{R}}\pi(0,z)z$ contains the origin. By a more direct use of the subadditive ergodic theorem, Varadhan \cite{Raghu} drops the nestling assumption and generalizes Zerner's result to stationary and ergodic environments.
The drawback of using subadditivity arguments is that one does not obtain a formula for the rate function. Rosenbluth \cite{jeffrey} takes the point of view of the particle and gives an alternative proof of the quenched LDP for the mean velocity in the case of stationary and ergodic environments. He provides a variational formula for the rate function. His approach is parallel to the work of Kosygina, Rezakhanlou and Varadhan \cite{KRV} on diffusions in random environments.
\subsection{Our results}\label{results}
For any measurable space $(Y,\mathcal{F})$, write $M_1(Y,\mathcal{F})$ (or simply $M_1(Y)$ whenever no confusion occurs) for the space of probability measures on $(Y,\mathcal{F})$. Consider random walk $X=(X_n)_{n\geq0}$ on $\mathbb{Z}^d$ in a stationary and ergodic random environment, and focus on \[\nu_{n,X} := \frac{1}{n}\sum_{k=0}^{n-1}{{\rm 1\mkern-1.5mu}\!{\rm I}}_{T_{X_k}\omega,X_{k+1}-X_k}\] which is a random element of $M_1(\Omega\times\mathcal{R})$. The map $(\omega,z)\mapsto(\omega,T_z\omega)$ imbeds $M_1(\Omega\times\mathcal{R})$ into $M_1(\Omega\times\Omega)$, and we therefore refer to $\nu_{n,X}$ as the pair empirical measure of the environment Markov chain. For any $\mu\in M_1(\Omega\times\mathcal{R})$, define the probability measures $(\mu)^1$ and $(\mu)^2$ on $\Omega$ by \[\mathrm{d}(\mu)^1(\omega):=\sum_{z\in\mathcal{R}}\mathrm{d}\mu(\omega,z)\ \ \mbox{and}\ \ \mathrm{d}(\mu)^2(\omega):=\sum_{z\in\mathcal{R}}\mathrm{d}\mu(T_{-z}\omega,z)\] which are the marginals of $\mu$ when $\mu$ is seen as an element of $M_1(\Omega\times\Omega)$. With this notation, set
\begin{equation*}
M_{1,s}^{\ll}(\Omega\times\mathcal{R}):= \left\{\mu\in M_1(\Omega\times\mathcal{R}): (\mu)^1=(\mu)^2\ll\mathbb{P},\ \frac{\mathrm{d}\mu(\omega,z)}{\mathrm{d}(\mu)^1(\omega)}>0\ \mathbb{P}\mbox{-a.s.\ for each }z\in U\right\}.
\end{equation*}
Our first result is the following theorem whose proof constitutes Section \ref{pairLDPsection}.
\begin{theorem}\label{level2LDP} If there exists an $\alpha>0$ such that
\begin{equation}
\int|\log \pi(0,z)|^{d+\alpha}\,\mathrm{d}\mathbb{P}<\infty\label{kimimvarki}
\end{equation} for each $z\in\mathcal{R}$, then
$\mathbb{P}$-a.s.\ $(P_o^\omega(\nu_{n,X}\in\cdot))_{n\geq1}$ satisfy the LDP with the good rate function $\mathfrak{I}^{**}$, the double Fenchel--Legendre transform of $\mathfrak{I}:M_1(\Omega\times\mathcal{R})\to\mathbb{R}^+$ given by
\begin{equation}
\mathfrak{I}(\mu)=\left\{
\begin{array}{ll}
\int_{\Omega}\sum_{z\in\mathcal{R}} \mathrm{d}\mu(\omega,z)\log\frac{\mathrm{d}\mu(\omega,z)}{\mathrm{d}(\mu)^1(\omega)\pi(0,z)}& \mbox{if }\mu\in M_{1,s}^{\ll}(\Omega\times\mathcal{R}),\\ \infty & \mbox{otherwise.}
\end{array}\right.\label{level2ratetilde}
\end{equation}
\end{theorem}
\begin{remark}
$\mathfrak{I}$ is convex, but it may not be lower semicontinuous. Therefore, $\mathfrak{I}^{**}$ is not a-priori equal to $\mathfrak{I}$. See Appendix A for a detailed explanation.
\end{remark}
We start Section \ref{birboyutadonus} by deducing the quenched LDP for the mean velocity of the particle by an application of the contraction principle. For any $\mu\in M_1(\Omega\times\mathcal{R})$, set
\begin{equation}
\xi_{\mu}:=\int\sum_{z\in\mathcal{R}}\mathrm{d}\mu(\omega,z)z.\label{ximu}
\end{equation} For any $\xi\in\mathbb{R}^d$, define
\begin{equation}
A_\xi:=\{\mu\in M_1(\Omega\times\mathcal{R}):\xi_{\mu}=\xi\}.\label{Axi}
\end{equation} The corollary below follows immediately from Theorem \ref{level2LDP} and reproduces the central result of \cite{jeffrey}.
\begin{corollary}\label{level1LDP}
If there exists $\alpha>0$ such that (\ref{kimimvarki}) holds for each $z\in\mathcal{R}$, then $(P_o^\omega(\frac{X_n}{n}\in\cdot))_{n\geq1}$ satisfy the LDP for $\mathbb{P}$-a.e.\ $\omega$. The good rate function $I$ is given by
\begin{eqnarray}
I(\xi)&=&\inf_{\mu\in A_\xi} \mathfrak{I}^{**}(\mu)\label{level1rate}\\
&=&\inf_{\mu\in A_\xi} \mathfrak{I}(\mu)\label{level1ratetilde}
\end{eqnarray} where $\mathfrak{I}$ and $A_\xi$ are defined in (\ref{level2ratetilde}) and (\ref{Axi}), respectively. $I$ is convex.
\end{corollary}
One would like to get a more explicit expression for the rate function $I$. This is not an easy task in general. $M_1(\Omega\times\mathcal{R})$ is compact (when equipped with the weak topology), $A_\xi$ is closed, and $\mathfrak{I}^{**}$ is lower semicontinuous. Therefore, the infimum in (\ref{level1rate}) is attained. However, due to the possible lack of lower semicontinuity of $\mathfrak{I}$, the infimum in (\ref{level1ratetilde}) may not be attained.
\begin{definition} \label{K}
A measurable function $F:\Omega\times\mathcal{R}\rightarrow\mathbb{R}$ is said to be in class $\mathcal{K}$ if it satisfies the following conditions:
\begin{description}
\item[Moment] For each $z\in\mathcal{R}$, $F(\cdot,z)\in\bigcup_{\alpha>0}L^{d+\alpha}(\mathbb{P})$.
\item[Mean zero] For each $z\in\mathcal{R}$, $\mathbb{E}\left[F(\cdot,z)\right]=0$.
\item[Closed loop] For $\mathbb{P}$-a.e.\ $\omega$ and any finite sequence $(x_{k})_{k=0}^n$ in $\mathbb{Z}^d$ such that $x_{k+1}-x_k\in\mathcal{R}$ and $x_0=x_n$, \[\sum_{k=0}^{n-1}F(T_{x_k}\omega,x_{k+1}-x_k)=0.\]
\end{description}
\end{definition}
\noindent The following lemma provides an Ansatz and states that whenever an element of $A_\xi$ fits this Ansatz, it is the unique minimizer of (\ref{level1ratetilde}). Its proof concludes Section \ref{birboyutadonus}.
\begin{lemma}\label{lagrange}
For any $\xi\in\mathbb{R}^d$, if there exists a $\mu_\xi\in A_\xi\cap M_{1,s}^{\ll}(\Omega\times\mathcal{R})$ such that \[\mathrm{d}\mu_\xi(\omega,z)=\mathrm{d}(\mu_\xi)^1(\omega) \pi(0,z)\mathrm{e}^{\langle\theta,z\rangle+F(\omega,z)+ r}\] for some $\theta\in\mathbb{R}^d$, $F\in\mathcal{K}$ and $r\in\mathbb{R}$, then $\mu_\xi$ is the unique minimizer of (\ref{level1ratetilde}).
\end{lemma}
In Sections \ref{vandiseksin} and \ref{dolapdere}, we show that the recipe given in Lemma \ref{lagrange} works when $d=1$. We make the following assumptions:
\begin{enumerate}
\item [(A1)] There exists an $\alpha>0$ such that $\int|\log\pi(0,z)|^{1+\alpha}\mathrm{d}\mathbb{P}<\infty$ for each $z\in\mathcal{R}$.
\item [(A2)] There exists a $\delta>0$ such that $\mathbb{P}(\pi(0,\pm 1)\geq\delta)=1$. This is called ``uniform ellipticity".
\end{enumerate}
For every $y\in\mathbb{Z}$,
\begin{equation}
\tau_y:=\inf\{k\geq0:X_k\geq y\}\quad\mbox{and}\quad\bar{\tau}_y:=\inf\{k\geq0:X_k\leq y\}\label{nihatgomermis}
\end{equation} denote the right and left passage times of the walk. The following lemma is central to our argument.
\begin{lemma}\label{lifeisrandom}
Suppose $d=1$. Under the assumptions (A1) and (A2), the limits
$$\lambda(r):=\lim_{n\to\infty}\frac{1}{n}\log E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]\quad\mbox{and}\quad\bar{\lambda}(r):=\lim_{n\to\infty}\frac{1}{n}\log E_o^\omega\left[\mathrm{e}^{r\bar{\tau}_{-n}},\bar{\tau}_{-n}<\infty\right]$$ exist for $\mathbb{P}$-a.e. $\omega$. The functions $r\mapsto\lambda(r)$ and $r\mapsto\bar{\lambda}(r)$ are
\begin{enumerate}
\item [(i)] deterministic,
\item [(ii)] finite precisely on $(-\infty,r_c]$ for some $r_c\in[0,\infty)$, and
\item [(iii)] strictly convex and differentiable on $(-\infty,r_c)$.
\end{enumerate}
The constants $$\xi_c:=\left(\lambda'(r_c-)\right)^{-1}\quad\mbox{and}\quad\bar{\xi}_c:=-\left(\bar{\lambda}'(r_c-)\right)^{-1}$$
satisfy $-B<\bar{\xi}_c\leq0\leq\xi_c<B$.
\end{lemma}
For every $\xi\in(-B,\bar{\xi}_c)\cup(\xi_c,B)$, we construct a $\mu_\xi$ that fits the Ansatz given in Lemma \ref{lagrange}. Substituting it in (\ref{level2ratetilde}), we get an explicit expression for (\ref{level1ratetilde}).
\begin{theorem}\label{explicitformulah}
Suppose $d=1$. Under the assumptions (A1) and (A2),
\begin{equation}\label{nnayni}
I(\xi)=\left\{
\begin{array}{ll}
\sup_{r\in\mathbb{R}}\{r-\xi\lambda(r)\}&\mbox{if }\xi>0,\\
\sup_{r\in\mathbb{R}}\{r+\xi\bar{\lambda}(r)\}&\mbox{if }\xi<0,\\
r_c&\mbox{if }\xi=0
\end{array}\right.
\end{equation}
with $r_c$ given in Lemma \ref{lifeisrandom}. The function $\xi\mapsto I(\xi)$ is
\begin{enumerate}
\item [(i)] affine linear on $[\bar{\xi}_c,0]$ and $[0,\xi_c]$,
\item [(ii)] strictly convex on $(-B,\bar{\xi}_c)$ and $(\xi_c,B)$, and
\item [(iii)] differentiable on $(-B,0)$ and $(0,B)$.
\end{enumerate}
\end{theorem}
Section \ref{vandiseksin} focuses on nearest-neighbor walks under assumption (A1). In that case, the proof of Lemma \ref{lifeisrandom} is straightforward since
$$\lambda(r)=\mathbb{E}\left(\log E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\right)\quad\mbox{and}\quad\bar{\lambda}(r)=\mathbb{E}\left(\log E_o^\omega\left[\mathrm{e}^{r\bar{\tau}_{-1}},\bar{\tau}_{-1}<\infty\right]\right).$$ Naturally, Theorem \ref{explicitformulah} is identical to the quenched LDP result of \cite{CGZ}.
The general case of walks with bounded jumps is studied in Section \ref{dolapdere} where the proofs are more technical. Theorem \ref{explicitformulah} generalizes the quenched LDP result of \cite{CGZ}, but there is a qualitative difference:
\begin{proposition}\label{Wronskian}
Suppose $d=1$. For nearest-neighbor walks, $I(\xi)=I(-\xi)+\xi\cdot(\bar{\lambda}(0)-\lambda(0))$ if $\xi\in[-1,0)$.
Such a symmetry is generally absent for walks with bounded jumps.
\end{proposition}
\section{Large deviation principle for the pair empirical measure}\label{pairLDPsection}
As mentioned in Subsection \ref{egridogru}, Rosenbluth \cite{jeffrey} takes the point of view of a particle performing RWRE and proves the quenched LDP for the mean velocity. In this section, we generalize his argument and prove Theorem \ref{level2LDP}. The strategy is to show the existence of the logarithmic moment generating function $\Lambda: C_b(\Omega\times\mathcal{R})\rightarrow\mathbb{R}$ given by
\begin{eqnarray*}
\Lambda(f)&=&\lim_{n\rightarrow\infty}\frac{1}{n}\log E_o^{\omega}\left[\mathrm{e}^{n\langle f,\nu_{n,X}\rangle}\right]\\
&=&\lim_{n\rightarrow\infty}\frac{1}{n}\log E_o^{\omega}\left[\exp\left(\sum_{k=0}^{n-1} f(T_{X_k}\omega,X_{k+1}-X_k)\right)\right]
\end{eqnarray*} where $C_b$ denotes the space of bounded continuous functions.
\begin{theorem}\label{LMGF}
Suppose there exists an $\alpha>0$ such that (\ref{kimimvarki}) holds for each $z\in\mathcal{R}$. Then, the following are true:
\begin{description}
\item[Lower bound] For $\mathbb{P}$-a.e.\ $\omega$,
\begin{align*}
&\liminf_{n\rightarrow\infty}\frac{1}{n}\log E_o^{\omega}\left[\exp\left(\sum_{k=0}^{n-1} f(T_{X_k}\omega,X_{k+1}-X_k)\right)\right]\\
&\geq\sup_{\mu\in M_{1,s}^{\ll}(\Omega\times\mathcal{R})}\int\sum_{z\in\mathcal{R}}\mathrm{d}\mu(\omega,z)\left(f(\omega,z)-\log \frac{\mathrm{d}\mu(\omega,z)}{\mathrm{d}(\mu)^1(\omega)\pi(0,z)}\right)=:\Gamma(f).
\end{align*}
\item[Upper bound] For $\mathbb{P}$-a.e.\ $\omega$,
\begin{align*}
&\limsup_{n\rightarrow\infty}\frac{1}{n}\log E_o^{\omega}\left[\exp\left(\sum_{k=0}^{n-1} f(T_{X_k}\omega,X_{k+1}-X_k)\right)\right]\\
&\leq\inf_{F\in\mathcal{K}}\mathrm{ess}\sup_{\mathbb{P}}\log\sum_{z\in\mathcal{R}}\pi(0,z)\mathrm{e}^{f(\omega,z)+F(\omega,z)}=:\Lambda(f).
\end{align*}
\item[Equivalence of the bounds] For every $\epsilon>0$, there exists an $F_\epsilon\in\mathcal{K}$ such that
\[\mathrm{ess}\sup_{\mathbb{P}}\log\sum_{z\in\mathcal{R}}\pi(0,z)\mathrm{e}^{f(\omega,z)+F_\epsilon(\omega,z)}\leq\Gamma(f)+\epsilon.\]
\end{description}Thus, $\Lambda(f)\leq\Gamma(f)$. This clearly implies the existence of the logarithmic moment generating function.
\end{theorem}
Subsection \ref{LMGFsubsection} is devoted to the proof of Theorem \ref{LMGF}. After that, proving Theorem \ref{level2LDP} is easy: the LDP lower bound is obtained by a change of measure argument, and the LDP upper bound is a standard result since $M_1(\Omega\times\mathcal{R})$ is compact. Details are given in Subsection \ref{LDPproof}.
In our proofs, we will make frequent use of
\begin{lemma}[Kozlov \cite{Kozlov}]\label{Kozlov}
If an environment kernel $\hat{\pi}$ satisfies $\hat{\pi}(\cdot,z)>0$ $\mathbb{P}$-a.s.\ for each $z\in U$, and if there exists a $\hat{\pi}$-invariant probability measure $\mathbb{Q}\ll\mathbb{P}$, then the following hold:
\begin{itemize}
\item[(a)] The measures $\mathbb{P}$ and $\mathbb{Q}$ are in fact mutually absolutely continuous.
\item[(b)] The environment Markov chain with transition kernel $\hat{\pi}$ and initial distribution $\mathbb{Q}$ is stationary and ergodic.
\item[(c)] $\mathbb{Q}$ is the unique $\hat{\pi}$-invariant probability measure on $\Omega$ that is absolutely continuous relative to $\mathbb{P}$.
\item[(d)] The following LLN is satisfied: \[P_o^{\hat{\pi}}\left(\lim_{n\rightarrow\infty}\frac{X_n}{n}=\int\sum_{z\in\mathcal{R}}\hat{\pi}(\omega,z)z\;\mathrm{d}\mathbb{Q}\right) = 1.\]
\end{itemize}
\end{lemma}
\subsection{Logarithmic moment generating function}\label{LMGFsubsection}
\subsubsection{Lower bound}\label{pourum}
This is a standard change of measure argument. For any environment kernel $\hat{\pi}$ as in Definition \ref{ortamkeli},
\begin{align*}
&E_o^{\omega}\left[\exp\left(\sum_{k=0}^{n-1} f(T_{X_k}\omega,X_{k+1}-X_k)\right)\right]\\
&=E_o^{\hat{\pi},\omega}\left[\exp\left(\sum_{k=0}^{n-1} f(T_{X_k}\omega,X_{k+1}-X_k)\right)\,\frac{\mathrm{d}P_o^\omega}{\mathrm{d}P_o^{\hat{\pi},\omega}}\right]\\
&=E_o^{\hat{\pi},\omega}\left[\exp\left(\sum_{k=0}^{n-1}f(T_{X_k}\omega,X_{k+1}-X_k)-\log\frac{\hat{\pi}(T_{X_k}\omega,X_{k+1}-X_k)}{\pi(X_k,X_{k+1})}\right)\right].
\end{align*} If $\hat{\pi}(\cdot,z)>0$ $\mathbb{P}$-a.s.\ for each $z\in U$, and if there exists a $\phi\in L^1(\mathbb{P})$ such that $\phi\,\mathrm{d}\mathbb{P}$ is an invariant probability measure for the kernel $\hat{\pi}$, i.e., if $$\phi(\omega)=\sum_{z\in\mathcal{R}}\phi(T_{-z}\omega)\hat{\pi}(T_{-z}\omega,z)$$ for $\mathbb{P}$-a.e.\ $\omega$, then it follows from Lemma \ref{Kozlov} that $\phi\,\mathrm{d}\mathbb{P}$ is in fact an ergodic invariant measure for $\hat{\pi}$. By Jensen's inequality,
\begin{align}
&\liminf_{n\rightarrow\infty}\frac{1}{n}\log E_o^{\omega}\left[\exp\left(\sum_{k=0}^{n-1} f(T_{X_k}\omega,X_{k+1}-X_k)\right)\right]\nonumber\\
&\geq\liminf_{n\rightarrow\infty}E_o^{\hat{\pi},\omega}\left[\frac{1}{n}\sum_{k=0}^{n-1}f(T_{X_k}\omega,X_{k+1}-X_k)-\log\frac{\hat{\pi}(T_{X_k}\omega,X_{k+1}-X_k)}{\pi(X_k,X_{k+1})}\right]\nonumber\\
&=\int\sum_{z\in\mathcal{R}}\hat{\pi}(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}(\omega,z)}{\pi(0,z)}\right)\phi(\omega)\mathrm{d}\mathbb{P}=:H_f(\hat{\pi},\phi).\label{hakkariye}
\end{align} Therefore,
\begin{align}
&\liminf_{n\rightarrow\infty}\frac{1}{n}\log E_o^{\omega}\left[\exp\left(\sum_{k=0}^{n-1} f(T_{X_k}\omega,X_{k+1}-X_k)\right)\right]\nonumber\\
&\geq\sup_{(\hat{\pi},\phi)}\int\sum_{z\in\mathcal{R}}\hat{\pi}(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}(\omega,z)}{\pi(0,z)}\right)\phi(\omega)\mathrm{d}\mathbb{P}\label{stef}
\end{align} where the supremum is taken over the set of all $(\hat{\pi},\phi)$ pairs where $\hat{\pi}(\cdot,z)>0$ $\mathbb{P}$-a.s.\ for each $z\in U$ and $\phi\,\mathrm{d}\mathbb{P}$ is a $\hat{\pi}$-invariant probability measure. Note that there is a one-to-one correspondence between this set and $M_{1,s}^{\ll}(\Omega\times\mathcal{R})$. Hence, (\ref{stef}) is the desired lower bound.
Before proceeding with the upper bound, let us put (\ref{stef}) in a form that will turn out to be more convenient for showing the equivalence of the bounds. We start by giving a lemma.
\begin{lemma}\label{camilla}
For every $f\in C_b(\Omega\times\mathcal{R})$, $H_f$ (defined in (\ref{hakkariye})) has the following concavity property: For each $t\in(0,1)$ and any two pairs $(\hat{\pi}_1,\phi_1)$ and $(\hat{\pi}_2,\phi_2)$ where $\phi_i\,\mathrm{d}\mathbb{P}$ is $\hat{\pi}_i$-invariant (for $i=1,2$), define \[\gamma=\frac{t\phi_1}{t\phi_1+(1-t)\phi_2},\ \ \phi_3=t\phi_1+(1-t)\phi_2\ \ \mbox{and}\ \ \hat{\pi}_3=\gamma\hat{\pi}_1+(1-\gamma)\hat{\pi}_2.\] Then, $\phi_3\,\mathrm{d}\mathbb{P}$ is $\hat{\pi}_3$-invariant and
\begin{equation}
H_f(\hat{\pi}_3,\phi_3)\geq tH_f(\hat{\pi}_1,\phi_1)+(1-t)H_f(\hat{\pi}_2,\phi_2).\label{yozgat}
\end{equation}
\end{lemma}
\begin{proof}
For any $t\in(0,1)$, it follows from the definitions and the assumptions in the statement of the lemma that $\mathbb{P}$-a.s.
\begin{align*}
\sum_{z\in\mathcal{R}}\phi_3(T_{-z}\omega)\hat{\pi}_3(T_{-z}\omega,z)&=\sum_{z\in\mathcal{R}}\phi_3(T_{-z}\omega)\gamma(T_{-z}\omega)\hat{\pi}_1(T_{-z}\omega,z)+\sum_{z\in\mathcal{R}}\phi_3(T_{-z}\omega)(1-\gamma(T_{-z}\omega))\hat{\pi}_2(T_{-z}\omega,z)\\
&=\ t\sum_{z\in\mathcal{R}}\phi_1(T_{-z}\omega)\hat{\pi}_1(T_{-z}\omega,z)+(1-t)\sum_{z\in\mathcal{R}}\phi_2(T_{-z}\omega)\hat{\pi}_2(T_{-z}\omega,z)\\
&=\ t\phi_1(\omega)+(1-t)\phi_2(\omega)=\phi_3(\omega).
\end{align*} In words, $\phi_3\,\mathrm{d}\mathbb{P}$ is $\hat{\pi}_3$-invariant. Finally,
\begin{align*}
H_f(\hat{\pi}_3,\phi_3)&=\int\sum_{z\in\mathcal{R}}\hat{\pi}_3(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}_3(\omega,z)}{\pi(0,z)}\right)\phi_3(\omega)\mathrm{d}\mathbb{P}\\&\geq\int\gamma(\omega)\sum_{z\in\mathcal{R}}\hat{\pi}_1(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}_1(\omega,z)}{\pi(0,z)}\right)\phi_3(\omega)\mathrm{d}\mathbb{P}\\&\ \ \ +\int(1-\gamma(\omega))\sum_{z\in\mathcal{R}}\hat{\pi}_2(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}_2(\omega,z)}{\pi(0,z)}\right)\phi_3(\omega)\mathrm{d}\mathbb{P}\\&=\ t\int\sum_{z\in\mathcal{R}}\hat{\pi}_1(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}_1(\omega,z)}{\pi(0,z)}\right)\phi_1(\omega)\mathrm{d}\mathbb{P}\\&\ \ \ +(1-t)\int\sum_{z\in\mathcal{R}}\hat{\pi}_2(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}_2(\omega,z)}{\pi(0,z)}\right)\phi_2(\omega)\mathrm{d}\mathbb{P}\\&=\ tH_f(\hat{\pi}_1,\phi_1)+(1-t)H_f(\hat{\pi}_2,\phi_2)
\end{align*} where the second line is obtained by applying Jensen's inequality to the integrand.
\end{proof}
Going back to the argument, let $\hat{\pi}_1(\omega,z):={1}/{(2d)}$ for each $z\in U$ and $\phi_1(\omega):=1$ for $\mathbb{P}$-a.e.\ $\omega$. An easy computation gives $H_f(\hat{\pi}_1,\phi_1)>-\infty$. Take any pair $(\hat{\pi}_2,\phi_2)$ such that $\phi_2\,\mathrm{d}\mathbb{P}$ is $\hat{\pi}_2$-invariant and $H_f(\hat{\pi}_2,\phi_2)>-\infty$. For any $t\in(0,1)$, define $(\hat{\pi}_3,\phi_3)$ as in Lemma \ref{camilla} and see that $\hat{\pi}_3(\omega,z)>0$ $\mathbb{P}$-a.s.\ for each $z\in U$. Recalling (\ref{yozgat}), note that $H_f(\hat{\pi}_3,\phi_3)\geq (1-t)H_f(\hat{\pi}_2,\phi_2) + O(t)$. Since one can take $t$ arbitrarily small, the value of (\ref{stef}) does not change if the supremum there is taken over the set of all $(\hat{\pi},\phi)$ pairs where $\phi\,\mathrm{d}\mathbb{P}$ is a $\hat{\pi}$-invariant probability measure, dropping the positivity condition on $\hat{\pi}$. Finally, decouple $\hat{\pi}$ and $\phi$, and express the lower bound $\Gamma(f)$ as
\begin{equation}
\sup_{\phi}\sup_{\hat{\pi}}\inf_{h}\int\sum_{z\in\mathcal{R}}\hat{\pi}(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}(\omega,z)}{\pi(0,z)}+h(\omega)-h(T_z\omega)\right)\phi\,\mathrm{d}\mathbb{P}\label{putinh}
\end{equation} where the suprema are over all probability densities and all environment kernels, and the infimum is over all bounded measurable functions. This is due to the observation that if $\phi\,\mathrm{d}\mathbb{P}$ is not $\hat{\pi}$-invariant, then there exists a bounded measurable function $h:\Omega\to\mathbb{R}$ that satisfies \[\int\sum_{z\in\mathcal{R}}\hat{\pi}(\omega,z)\left(h(\omega)-h(T_z\omega)\right)\phi(\omega)\mathrm{d}\mathbb{P}\neq0,\] and taking scalar multiples of $h$ shows that the infimum in (\ref{putinh}) is $-\infty$.
\subsubsection{Upper bound}
Fix $f\in C_b(\Omega\times\mathcal{R})$. For any $F\in\mathcal{K}$, define \[K(F):=\mathrm{ess}\sup_{\mathbb{P}}\log\sum_{z\in\mathcal{R}}\pi(0,z)\mathrm{e}^{f(\omega,z)+F(\omega,z)}.\] Then, $\mathbb{P}$-a.s.
\begin{align}
&E_o^{\omega}\left[\left.\mathrm{e}^{f(T_{X_{n-1}}\omega, X_n-X_{n-1})+F(T_{X_{n-1}}\omega, X_n-X_{n-1})}\right|X_{n-1}\right]\label{itelebabam}\\
&=\sum_{z\in\mathcal{R}}\pi(X_{n-1},X_{n-1}+z)\mathrm{e}^{f(T_{X_{n-1}}\omega,z)+F(T_{X_{n-1}}\omega,z)}\nonumber\\
&\leq\mathrm{e}^{K(F)}.\nonumber
\end{align}
Taking conditional expectations and iterating (\ref{itelebabam}), one sees that $\mathbb{P}$-a.s. \[E_o^{\omega}\left[\exp\left(\sum_{k=0}^{n-1}f(T_{X_k}\omega,X_{k+1}-X_k)+F(T_{X_k}\omega,X_{k+1}-X_k)\right)\right]\leq\mathrm{e}^{nK(F)}.\] At this point, for any $\epsilon>0$, use Lemma \ref{GRR} (stated below) to write \[E_o^{\omega}\left[\exp\left(-c_\epsilon-n\epsilon+\sum_{k=0}^{n-1}f(T_{X_k}\omega,X_{k+1}-X_k)\right)\right]\leq\mathrm{e}^{nK(F)}\] where $c_\epsilon = c_\epsilon(\omega)$ is some constant. Arrange the terms to obtain \[\frac{1}{n}\log E_o^{\omega}\left[\exp\left(\sum_{k=0}^{n-1} f(T_{X_k}\omega,X_{k+1}-X_k)\right)\right]\leq K(F)+\epsilon+\frac{c_\epsilon}{n}.\] Let $n\to\infty,\ \epsilon\to 0$, and take infimum over $F\in\mathcal{K}$. This is the desired upper bound.
\begin{lemma}\label{GRR}
For every $F\in\mathcal{K}$, $\epsilon>0$ and $\mathbb{P}$-a.e.\ $\omega$, there exists $c_\epsilon= c_\epsilon(\omega)\geq 0$ such that for any sequence $(x_{k})_{k=0}^n$ with $x_o=0$ and $x_{k+1}-x_k\in\mathcal{R}$, \[\left|\sum_{k=0}^{n-1}F(T_{x_k}\omega,x_{k+1}-x_k)\right| \leq c_\epsilon+n\epsilon\] for all $n\geq1$.
\end{lemma}
\begin{remark}\label{nilosmu}
Lemma \ref{GRR} is proved in Chapter 2 of \cite{jeffrey}. See Appendix B for a sketch of the proof. In his definition of class $\mathcal{K}$, Rosenbluth takes $F: \Omega\times U \to\mathbb{R}$. But, such functions uniquely extend to $\Omega\times\mathcal{R}$ by the closed loop condition in Definition \ref{K}, and Lemma \ref{GRR} remains to be valid without any extra work.
\end{remark}
\subsubsection{Equivalence of the bounds}
Consider a sequence $\left(\mathcal{E}_k\right)_{k\geq1}$ of finite $\sigma$-algebras such that
$\mathcal{B}=\sigma\left(\bigcup_k \mathcal{E}_k\right)$ and $\mathcal{E}_{k}\subset T_z\mathcal{E}_{k+1}$ for all $z\in\mathcal{R}$. Then, recall (\ref{putinh}) and see that $\Gamma(f)$ can be bounded below by
\begin{align}
&\sup_{\phi}\sup_{\hat{\pi}}\inf_{h}\int\sum_{z\in\mathcal{R}}\hat{\pi}(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}(\omega,z)}{\pi(0,z)}+h(\omega)-h(T_z\omega)\right)\phi\,\mathrm{d}\mathbb{P}\label{thingone}\\=&\sup_{\phi}\inf_{h}\sup_{\hat{\pi}}\int\sum_{z\in\mathcal{R}}\hat{\pi}(\omega,z)\left(f(\omega,z)-\log \frac{\hat{\pi}(\omega,z)}{\pi(0,z)}+h(\omega)-h(T_z\omega)\right)\phi\,\mathrm{d}\mathbb{P}\label{thingtwo}\\=&\sup_{\phi}\inf_{h}\sup_{\hat{\pi}}\int\sum_{z\in\mathcal{R}}\left[v(\omega,z)-\log\hat{\pi}(\omega,z)\right]\hat{\pi}(\omega,z)\phi\,\mathrm{d}\mathbb{P}\label{thingthree}\\=&\sup_{\phi}\inf_{h}\int\sup_{\hat{\pi}(\omega,\cdot)}\left(\sum_{z\in\mathcal{R}}[v(\omega,z)-\log\hat{\pi}(\omega,z)]\hat{\pi}(\omega,z)\right)\phi\,\mathrm{d}\mathbb{P}\label{thingfour}\\=&\sup_{\phi}\inf_{h}\int\left(\log\sum_{z\in\mathcal{R}}\mathrm{e}^{v(\omega,z)}\right)\phi\,\mathrm{d}\mathbb{P}\label{thingfive}\\=&\inf_{h}\sup_{\phi}\int\left(\log\sum_{z\in\mathcal{R}}\mathrm{e}^{v(\omega,z)}\right)\phi\,\mathrm{d}\mathbb{P}\label{thingsix}\\=&\inf_{h}\mathrm{ess}\sup_{\mathbb{P}}\log\sum_{z\in\mathcal{R}}\mathrm{e}^{v(\omega,z)}.\label{thingseven}
\end{align}
Explanation: In (\ref{thingone}), the first supremum is taken over $\mathcal{E}_k$-measurable probability densities, the second supremum is over $\mathcal{E}_k$-measurable environment kernels and the infimum is over bounded $\mathcal{B}$-measurable functions. For each $\phi$, the second supremum in (\ref{thingone}) is over a compact set, the integral is concave and continuous in $\hat{\pi}$ and affine (hence convex) in $h$. Thus, one can apply the minimax theorem of Ky Fan \cite{KyFan} and obtain (\ref{thingtwo}). The integral in (\ref{thingtwo}) can be evaluated in two steps by first taking a conditional expectation with respect to $\mathcal{E}_k$. This gives (\ref{thingthree}) where \[v(\omega,z):=\mathbb{E}\left[\log \pi(0,z) + f(\omega,z) + h(\omega) - h(T_z\omega)\left|\mathcal{E}_k\right.\right].\] The integrand in (\ref{thingthree}) is a local function of $\hat{\pi}(\omega,\cdot)$, therefore the supremum can be taken inside the integral to obtain (\ref{thingfour}). Apply the method of Lagrange multipliers and see that the supremum in (\ref{thingfour}) is attained at \[\hat{\pi}(\omega,z)=\frac{\mathrm{e}^{v(\omega,z)}}{\sum_{z'\in\mathcal{R}}\mathrm{e}^{v(\omega,z')}}.\] Plugging this back in (\ref{thingfour}) gives (\ref{thingfive}). The integral in (\ref{thingfive}) is convex in $h$, and affine (hence concave) and continuous in $\phi$. Plus, the supremum is taken over a compact set. Thus, one can again apply the minimax theorem of Ky Fan \cite{KyFan} and arrive at (\ref{thingsix}) which is clearly equal to (\ref{thingseven}).
Let us proceed with the proof. (\ref{thingseven}) implies that $\forall\epsilon >0$ and $k\geq1$, there exists an $h_{k,\epsilon}$ that satisfies
\begin{equation}
\log\sum_{z\in\mathcal{R}}\exp \mathbb{E}\left[\log \pi(0,z) + f(\omega,z) + h_{k,\epsilon}(\omega) - h_{k,\epsilon}(T_z\omega)\left|\mathcal{E}_k\right.\right]\leq\Gamma(f) + \epsilon\label{coklugot}
\end{equation} for $\mathbb{P}$-a.e.\ $\omega$. Therefore,
\begin{equation}
\mathbb{E}\left[h_{k,\epsilon}(\omega) - h_{k,\epsilon}(T_z\omega)\left|\mathcal{E}_k\right.\right]\leq\mathbb{E}\left[-\log \pi(0,z)\left|\mathcal{E}_k\right.\right]+\|f\|_{\infty}+\Gamma(f)+\epsilon\label{yarinbitersekral}
\end{equation} for each $z\in\mathcal{R}$. Define $F_{k,\epsilon}:\Omega\times\mathcal{R}\to\mathbb{R}$ by $F_{k,\epsilon}(\omega,z):=\mathbb{E}\left[h_{k,\epsilon}(\omega) - h_{k,\epsilon}(T_z\omega)\left|\mathcal{E}_{k-1}\right.\right]$. Then,
\begin{equation}
F_{k,\epsilon}(\omega,z)\leq\mathbb{E}\left[-\log \pi(0,z)\left|\mathcal{E}_{k-1}\right.\right]+\|f\|_{\infty}+\Gamma(f)+\epsilon\label{kirmizigul}
\end{equation} holds $\mathbb{P}$-a.s.\ for each $z\in\mathcal{R}$. Also, note that
\begin{align*}
-\mathbb{E}\left[h_{k,\epsilon}(\omega) - h_{k,\epsilon}(T_z\omega)\left|T_{-z}\mathcal{E}_k\right.\right]&=-\mathbb{E}\left[h_{k,\epsilon}(T_{-z}\omega) - h_{k,\epsilon}(\omega)\left|\mathcal{E}_k\right.\right](T_z\cdot)\\
&=\mathbb{E}\left[h_{k,\epsilon}(\omega) - h_{k,\epsilon}(T_{-z}\omega)\left|\mathcal{E}_k\right.\right](T_z\cdot)\\
&\leq\mathbb{E}\left[-\log \pi(0,-z)\left|\mathcal{E}_k\right.\right](T_z\cdot)+\|f\|_{\infty}+\Gamma(f)+\epsilon\\
&=\mathbb{E}\left[-\log \pi(z,0)\left|T_{-z}\mathcal{E}_k\right.\right]+\|f\|_{\infty}+\Gamma(f)+\epsilon
\end{align*} where the inequality follows from (\ref{yarinbitersekral}). Since $\mathcal{E}_{k-1}\subset T_{-z}\mathcal{E}_k$, taking conditional expectation with respect to $\mathcal{E}_{k-1}$ gives \[-F_{k,\epsilon}(\omega,z)\leq\mathbb{E}\left[-\log \pi(z,0)\left|\mathcal{E}_{k-1}\right.\right]+\|f\|_{\infty}+\Gamma(f)+\epsilon.\] Recall (\ref{kirmizigul}) and deduce that \[\left|F_{k,\epsilon}(\omega,z)\right|\leq\mathbb{E}\left[-\log \pi(0,z)\left|\mathcal{E}_{k-1}\right.\right]+\mathbb{E}\left[-\log \pi(z,0)\left|\mathcal{E}_{k-1}\right.\right]+\|f\|_{\infty}+\Gamma(f)+\epsilon.\] This implies by (\ref{kimimvarki}) that $\left(F_{k,\epsilon}(\cdot,z)\right)_{k\geq1}$ is uniformly bounded in $L^{d+\alpha}(\mathbb{P})$ for each $z\in\mathcal{R}$. Passing to a subsequence if necessary, $F_{k,\epsilon}(\cdot,z)$ converges weakly to a limit $F_{\epsilon}(\cdot,z)\in L^{d+\alpha}(\mathbb{P})$.
For $j\geq1$ and any sequence $(x_{i})_{i=0}^n$ in $\mathbb{Z}^d$ such that $x_{i+1}-x_i\in\mathcal{R}$ and $x_0=x_n$,
\begin{align}
&\mathbb{E}\left(\left.\sum_{i=0}^{n-1}F_{\epsilon}(T_{x_i}\omega,x_{i+1}-x_i)\right|\mathcal{E}_j\right)\nonumber\\
&=\sum_{i=0}^{n-1}\mathbb{E}\left(\left.\lim_{k\to\infty}F_{k,\epsilon}(T_{x_i}\omega,x_{i+1}-x_i)\right|\mathcal{E}_j\right)\nonumber\\
&=\sum_{i=0}^{n-1}\lim_{k\to\infty}\mathbb{E}\left(\left.F_{k,\epsilon}(T_{x_i}\omega,x_{i+1}-x_i)\right|\mathcal{E}_j\right)\nonumber\\
&=\sum_{i=0}^{n-1}\lim_{k\to\infty}\mathbb{E}\left(\left.\mathbb{E}\left[h_{k,\epsilon}(\omega) - h_{k,\epsilon}(T_{x_{i+1}-x_i}\omega)\left|\mathcal{E}_{k-1}\right.\right](T_{x_i}\omega)\right|\mathcal{E}_j\right)\nonumber\\
&=\sum_{i=0}^{n-1}\lim_{k\to\infty}\mathbb{E}\left(\left.\mathbb{E}\left[h_{k,\epsilon}(T_{x_i}\omega) - h_{k,\epsilon}(T_{x_{i+1}}\omega)\left|T_{-x_i}\mathcal{E}_{k-1}\right.\right]\right|\mathcal{E}_j\right)\nonumber\\
&=\sum_{i=0}^{n-1}\lim_{k\to\infty}\mathbb{E}\left(\left.h_{k,\epsilon}(T_{x_i}\omega) - h_{k,\epsilon}(T_{x_{i+1}}\omega)\right|\mathcal{E}_j\right)\label{dursunmus}\\
&=\lim_{k\to\infty}\mathbb{E}\left(\left.\sum_{i=0}^{n-1}\left(h_{k,\epsilon}(T_{x_i}\omega) - h_{k,\epsilon}(T_{x_{i+1}}\omega)\right)\right|\mathcal{E}_j\right)=0\nonumber
\end{align} holds $\mathbb{P}$-a.s., where (\ref{dursunmus}) follows from the fact that $\mathcal{E}_j\subset T_{-x_i}\mathcal{E}_{k-1}$ whenever $k$ is large enough. Therefore, $\sum_{i=0}^{n-1}F_{\epsilon}(T_{x_i}\omega,x_{i+1}-x_i)=0$ for $\mathbb{P}$-a.e.\ $\omega$, and $F_{\epsilon}:\Omega\times\mathcal{R}\to\mathbb{R}$ satisfies the closed loop condition given in Definition \ref{K}. We already know that it satisfies the moment condition, and it is also clearly mean zero. Hence, $F_{\epsilon}\in\mathcal{K}$.
Since $\mathbb{E}\left[\log \pi(0,z) + f(\omega,z)\left|\mathcal{E}_{k-1}\right.\right]$ is an $L^{d+\alpha}(\mathbb{P})$-bounded martingale, it converges to $\log \pi(0,z) + f(\cdot,z)$ in $L^{d+\alpha}(\mathbb{P})$. Therefore, \[\mathcal{L}_{k,\epsilon}(\cdot,z):=\mathbb{E}\left[\log \pi(0,z) + f(\omega,z)\left|\mathcal{E}_{k-1}\right.\right]+F_{k,\epsilon}(\cdot,z)\] converges weakly in $L^{d+\alpha}(\mathbb{P})$ to $\log \pi(0,z) + f(\cdot,z)+F_{\epsilon}(\cdot,z)$. By Mazur's theorem (see \cite{Rudin}), there exist $\mathcal{L}_{k,\epsilon}':\Omega\times\mathcal{R}\to\mathbb{R}$ for $k\geq1$ such that $\mathcal{L}_{k,\epsilon}'(\cdot,z)$ converges strongly in $L^{d+\alpha}(\mathbb{P})$ to $\log \pi(0,z) + f(\cdot,z)+F_{\epsilon}(\cdot,z)$ for each $z\in\mathcal{R}$ and $\mathcal{L}_{k,\epsilon}'$ is a convex combination of $\{\mathcal{L}_{1,\epsilon},\mathcal{L}_{2,\epsilon},\ldots,\mathcal{L}_{k,\epsilon}\}$. Passing to a further subsequence, $\mathcal{L}_{k,\epsilon}'(\cdot,z)$ converges $\mathbb{P}$-a.s.\ to $\log \pi(0,z) + f(\cdot,z)+F_{\epsilon}(\cdot,z)$. Take conditional expectation of both sides of (\ref{coklugot}) with respect to $\mathcal{E}_{k-1}$ and use Jensen's inequality to write \[\log\sum_{z\in\mathcal{R}}\exp\left(\mathbb{E}\left[\log \pi(0,z) + f(\omega,z)\left|\mathcal{E}_{k-1}\right.\right]+F_{k,\epsilon}(\cdot,z)\right)\leq\Gamma(f) + \epsilon.\] Again by Jensen's inequality, $\log\sum_{z\in\mathcal{R}}\exp\left(\mathcal{L}_{k,\epsilon}'(\cdot,z)\right)\leq\Gamma(f) + \epsilon$. Taking $k\to\infty$ gives \[\log\sum_{z\in\mathcal{R}}\pi(0,z)\mathrm{e}^{f(\omega,z)+F_{\epsilon}(\omega,z)}\leq\Gamma(f) + \epsilon\] for $\mathbb{P}$-a.e.\ $\omega$. Theorem \ref{LMGF} is proved.
\subsection{Large deviation principle}\label{LDPproof}
Putting together (\ref{level2ratetilde}) and Theorem \ref{LMGF}, one sees that
\begin{align*}
\Lambda(f)&=\sup_{\mu\in M_{1,s}^{\ll}(\Omega\times\mathcal{R})}\int\sum_{z\in\mathcal{R}}\mathrm{d}\mu(\omega,z)\left(f(\omega,z)-\log \frac{\mathrm{d}\mu(\omega,z)}{\mathrm{d}(\mu)^1(\omega)\pi(0,z)}\right)\\
&=\sup_{\mu\in M_{1,s}^{\ll}(\Omega\times\mathcal{R})}\left\{\left\langle f,\mu\right\rangle - \mathfrak{I}(\mu)\right\}\\
&=\sup_{\mu\in M_1(\Omega\times\mathcal{R})}\left\{\left\langle f,\mu\right\rangle - \mathfrak{I}(\mu)\right\}\\
&=\mathfrak{I}^*(f),
\end{align*} the Fenchel-Legendre transform of $\mathfrak{I}$. Therefore, $\mathfrak{I}^{**}=\Lambda^*$.
Since $M_1(\Omega\times\mathcal{R})$ is compact,
\begin{displaymath}
\limsup_{n\rightarrow\infty}\frac{1}{n}\log P_o^{\omega}(\nu_{n,X}\in C)\leq-\inf_{\mu\in C}\Lambda^*(\mu)=-\inf_{\mu\in C}\mathfrak{I}^{**}(\mu)
\end{displaymath}
for $\mathbb{P}$-a.e.\ $\omega$ and any closed subset $C$ of $M_1(\Omega\times\mathcal{R})$. (See Theorem 4.5.3 of \cite{DemboZeitouni}.)
To conclude the proof of Theorem \ref{level2LDP}, one needs to obtain the LDP lower bound. Note that, for any open subset $G$ of $M_1(\Omega\times\mathcal{R})$, $\inf_{\nu\in G}\mathfrak{I}^{**}(\nu)=\inf_{\nu\in G}\mathfrak{I}(\nu)$. (See \cite{Rockafellar}, page 104.) Therefore, it suffices to show that, for any $\mu\in M_{1,s}^{\ll}(\Omega\times\mathcal{R})$, any open set $O$ containing $\mu$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{equation}
\liminf_{n\rightarrow\infty}\frac{1}{n}\log P_o^{\omega}(\nu_{n,X}\in O)\geq-\mathfrak{I}(\mu).\label{LB}
\end{equation}
Take the pair \[(\hat{\pi},\phi):=\left(\frac{\mathrm{d}\mu}{\mathrm{d}(\mu)^1},\frac{\mathrm{d}(\mu)^1}{\mathrm{d}\mathbb{P}}\right)\] corresponding to a given $\mu\in M_{1,s}^{\ll}(\Omega\times\mathcal{R})$. Then, $\phi\in L^1(\mathbb{P})$, $\phi\,\mathrm{d}\mathbb{P}$ is a $\hat{\pi}$-invariant probability measure, and $\hat{\pi}(\cdot,z)>0$ $\mathbb{P}$-a.s.\ for each $z\in U$. With this notation, (\ref{LB}) becomes \[\liminf_{n\rightarrow\infty}\frac{1}{n}\log P_o^{\omega}(\nu_{n,X}\in O)\geq-\int_{\Omega}\sum_{z\in\mathcal{R}}\hat{\pi}(\omega,z)\log\frac{\hat{\pi}(\omega,z)}{\pi(0,z)}\phi(\omega)\mathrm{d}\mathbb{P}.\] Recall Definition \ref{ortamkeli} and introduce a new measure $R_o^{\hat{\pi},\omega}$ by setting \[\mathrm{d}R_o^{\hat{\pi},\omega}:=\frac{{{\rm 1\mkern-1.5mu}\!{\rm I}}_{\nu_{n,X}\in O}}{P_o^{\hat{\pi},\omega}(\nu_{n,X}\in O)}\,\mathrm{d}P_o^{\hat{\pi},\omega}.\] Then,
\begin{align*}
\liminf_{n\rightarrow\infty}\frac{1}{n}\log P_o^\omega(\nu_{n,X}\in O)=&\liminf_{n\rightarrow\infty}\frac{1}{n}\log E_o^{\hat{\pi},\omega}\left[{{\rm 1\mkern-1.5mu}\!{\rm I}}_{\nu_{n,X}\in O}\,\frac{\mathrm{d}P_o^\omega}{\mathrm{d}P_o^{\hat{\pi},\omega}}\right]\\
=&\liminf_{n\rightarrow\infty}\frac{1}{n}\left(\log P_o^{\hat{\pi},\omega}(\nu_{n,X}\in O)+\log \int\frac{\mathrm{d}P_o^\omega}{\mathrm{d}P_o^{\hat{\pi},\omega}}\mathrm{d}R_o^{\hat{\pi},\omega}\right)\\
\geq&\liminf_{n\rightarrow\infty}\frac{1}{n}\left(\log P_o^{\hat{\pi},\omega}(\nu_{n,X}\in O)- \int\log\frac{\mathrm{d}P_o^{\hat{\pi},\omega}}{\mathrm{d}P_o^\omega}\mathrm{d}R_o^{\hat{\pi},\omega}\right)\\
=&\liminf_{n\rightarrow\infty}\frac{1}{n}\left(\log P_o^{\hat{\pi},\omega}(\nu_{n,X}\in O)-\frac{1}{P_o^{\hat{\pi},\omega}(\nu_{n,X}\in O)} E_o^{\hat{\pi},\omega}\left[{{\rm 1\mkern-1.5mu}\!{\rm I}}_{\nu_{n,X}\in O}\,\log\frac{\mathrm{d}P_o^{\hat{\pi},\omega}}{\mathrm{d}P_o^\omega}\right]\right)
\end{align*} where the third line uses Jensen's inequality. It follows from Lemma \ref{Kozlov} that $\lim_{n\rightarrow\infty}P_o^{\hat{\pi},\omega}(\nu_{n,X}\in O)=1$. Therefore,
\begin{align*}
\liminf_{n\rightarrow\infty}\frac{1}{n}\log P_o^\omega(\nu_{n,X}\in O)&\geq-\limsup_{n\rightarrow\infty}\frac{1}{n}E_o^{\hat{\pi},\omega}\left[{{\rm 1\mkern-1.5mu}\!{\rm I}}_{\nu_{n,X}\in O}\,\log\frac{\mathrm{d}P_o^{\hat{\pi},\omega}}{\mathrm{d}P_o^\omega}\right]\\
&=-\int_{\Omega}\sum_{z\in\mathcal{R}}\hat{\pi}(\omega,z)\log\frac{\hat{\pi}(\omega,z)}{\pi(0,z)}\phi(\omega)\mathrm{d}\mathbb{P}
\end{align*} again by Lemma \ref{Kozlov} and the $L^1$-ergodic theorem. Theorem \ref{level2LDP} is proved. Finally, note that the convexity of $\mathfrak{I}$ follows from an argument similar to the proof of Lemma \ref{camilla}.
\begin{remark}
$\mathfrak{I}^{**}$ is a good rate function since $M_1(\Omega\times\mathcal{R})$ is compact.
\end{remark}
\section{Contraction principle and the Ansatz for the minimizer}\label{birboyutadonus}
\begin{proof}[Proof of Corollary \ref{level1LDP}]
Recall (\ref{ximu}) and observe that \[\xi_{\nu_{n,X}}=\int\sum_{z\in\mathcal{R}}\mathrm{d}\nu_{n,X}(\omega,z)z=\frac{1}{n}\sum_{k=0}^{n-1}\left(X_{k+1}-X_k\right)=\frac{X_n-X_o}{n}.\] Therefore, as noted in Subsection \ref{results}, Corollary \ref{level1LDP} follows from Theorem \ref{level2LDP} by the contraction principle (see \cite{DemboZeitouni}), and the rate function is given by (\ref{level1rate}).
In order to justify (\ref{level1ratetilde}), let us define $J:\mathbb{R}^d\rightarrow\mathbb{R}^+$ by $J(\xi)=\inf_{\mu\in A_\xi}\mathfrak{I}(\mu)$. We would like to show that $J\equiv I$. Since $\mathfrak{I}$ and $\mathfrak{I}^{**}$ are convex, $I$ and $J$ are convex functions on $\mathbb{R}^d$. Therefore, it suffices to show that $J^*\equiv I^*$. For any $\eta\in\mathbb{R}^d$, define $f_{\eta}\in C_b(\Omega\times\mathcal{R})$ by $f_{\eta}(\omega,z):=\langle z,\eta\rangle$. Recalling (\ref{ximu}),
\begin{eqnarray*}
I^*(\eta)&=&\sup_{\xi}\{\langle\eta,\xi\rangle - \inf_{\mu\in A_\xi}\mathfrak{I}^{**}(\mu)\}\\
&=&\sup_{\xi}\sup_{\mu\in A_\xi}\{\langle\eta,\xi_{\mu}\rangle - \mathfrak{I}^{**}(\mu)\}\\
&=&\sup_{\mu\in M_1(\Omega\times\mathcal{R})}\{\langle f_{\eta},\mu\rangle - \mathfrak{I}^{**}(\mu)\}\\
&=&\mathfrak{I}^{***}(f_{\eta})=\Lambda(f_{\eta}).
\end{eqnarray*}
Similarly, $J^*(\eta)=\mathfrak{I}^*(f_{\eta})=\Lambda(f_{\eta})$. We are done.
\end{proof}
\begin{proof}[Proof of Lemma \ref{lagrange}]
The rate function given by formula (\ref{level1ratetilde}) is
\begin{equation}
I(\xi)=\inf_{\mu\in A_\xi\cap M_{1,s}^{\ll}(\Omega\times\mathcal{R})}\int_{\Omega}\sum_{z\in\mathcal{R}} \mathrm{d}\mu(\omega,z)\log\frac{\mathrm{d}\mu(\omega,z)}{\mathrm{d}(\mu)^1(\omega)\pi(0,z)}.\label{budur}
\end{equation} Fix a $\xi=(\xi_1,\ldots,\xi_d)\in\mathbb{R}^d$ with $|\xi_1|+\cdots+|\xi_d|\leq B$. (Otherwise, the set $A_\xi$ is empty.) If there exists a $\mu_\xi\in A_\xi\cap M_{1,s}^{\ll}(\Omega\times\mathcal{R})$ such that \[\mathrm{d}\mu_\xi(\omega,z)=\mathrm{d}(\mu_\xi)^1(\omega) \pi(0,z)\mathrm{e}^{\langle\theta,z\rangle+F(\omega,z)+ r}\] for some $\theta\in\mathbb{R}^d$, $F\in\mathcal{K}$ and $r\in\mathbb{R}$, then for any $\nu\in A_\xi\cap M_{1,s}^{\ll}(\Omega\times\mathcal{R})$,
\begin{align*}
\mathfrak{I}(\nu)&=\int_{\Omega}\sum_{z\in\mathcal{R}} \mathrm{d}\nu(\omega,z)\log\frac{\mathrm{d}\nu(\omega,z)}{\mathrm{d}(\nu)^1(\omega)\pi(0,z)}\\
&=\int_{\Omega}\sum_{z\in\mathcal{R}} \mathrm{d}\nu(\omega,z)\log\frac{\mathrm{d}\nu(\omega,z)\mathrm{e}^{\langle\theta,z\rangle+F(\omega,z)+r}}{\mathrm{d}(\nu)^1(\omega)\pi(0,z)\mathrm{e}^{\langle\theta,z\rangle+F(\omega,z)+r}}\\
&=\int_{\Omega}\sum_{z\in\mathcal{R}} \mathrm{d}\nu(\omega,z)\left(\langle\theta,z\rangle+F(\omega,z)+r+\log\frac{\mathrm{d}\nu(\omega,z)\;\mathrm{d}(\mu_\xi)^1(\omega)}{\mathrm{d}(\nu)^1(\omega)\;\mathrm{d}\mu_\xi(\omega,z)}\right)\\ &=\langle\theta,\xi\rangle+r+\int_{\Omega}\sum_{z\in\mathcal{R}} \mathrm{d}\nu(\omega,z)F(\omega,z)+\int_{\Omega}\sum_{z\in\mathcal{R}} \mathrm{d}\nu(\omega,z)\log\frac{\mathrm{d}\nu(\omega,z)\;\mathrm{d}(\mu_\xi)^1(\omega)}{\mathrm{d}(\nu)^1(\omega)\;\mathrm{d}\mu_\xi(\omega,z)}.
\end{align*}
Under the Markov kernel $\frac{\mathrm{d}\nu}{\mathrm{d}(\nu)^1}$ with invariant measure $(\nu)^1$, $\mathbb{P}$-a.s.\[\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{k=0}^{n-1}F(T_{X_k}\omega,X_{k+1}-X_k)=\int_{\Omega}\sum_{z\in\mathcal{R}} \mathrm{d}\nu(\omega,z)F(\omega,z)\] by Lemma \ref{Kozlov} and the ergodic theorem. But, the same limit is $0$ by Lemma \ref{GRR}. Therefore,
\begin{equation}
\mathfrak{I}(\nu)=\langle\theta,\xi\rangle+r+\int_{\Omega}\sum_{z\in\mathcal{R}} \mathrm{d}\nu(\omega,z)\log\frac{\mathrm{d}\nu(\omega,z)\;\mathrm{d}(\mu_\xi)^1(\omega)}{\mathrm{d}(\nu)^1(\omega)\;\mathrm{d}\mu_\xi(\omega,z)}.
\label{sifirladik}
\end{equation}
By an application of Jensen's inequality, it is easy to see that the integral on the RHS of (\ref{sifirladik}) is nonnegative. Moreover, this integral is zero if and only if $\frac{\mathrm{d}\nu}{\mathrm{d}(\nu)^1}=\frac{\mathrm{d}\mu_\xi}{\mathrm{d}(\mu_\xi)^1}$ holds $(\nu)^1$-a.s.\ and hence $\mathbb{P}$-a.s.\ by Lemma \ref{Kozlov}. Since $(\mu_\xi)^1$ is the unique invariant measure of $\frac{\mathrm{d}\mu_\xi}{\mathrm{d}(\mu_\xi)^1}$ that is absolutely continuous relative to $\mathbb{P}$ (again by Lemma \ref{Kozlov}), $\mu_\xi$ is the unique minimizer of (\ref{budur}).
\end{proof}
\section{Nearest-neighbor walks on $\mathbb{Z}$}\label{vandiseksin}
In this section, we carry out the recipe given in Lemma \ref{lagrange} and prove Theorem \ref{explicitformulah} in the case of nearest-neighbor random walk on $\mathbb{Z}$ in a stationary and ergodic environment. As mentioned in Subsection \ref{results}, we assume that the following holds:
\begin{enumerate}
\item [(A1)] There exists an $\alpha>0$ such that $\int|\log\pi(0,\pm1)|^{1+\alpha}\mathrm{d}\mathbb{P}<\infty$.
\end{enumerate}
\begin{proof}[Proof of Lemma \ref{lifeisrandom} for nearest-neighbor walks on $\mathbb{Z}$]
Define $\zeta(r,\omega):=E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]$ for any $r\in\mathbb{R}$. Then,
\begin{align*}
\lambda(r)&=\lim_{n\to\infty}\frac{1}{n}\log E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]=\lim_{n\to\infty}\frac{1}{n}\log\left(\prod_{k=0}^{n-1}E_k^\omega\left[\mathrm{e}^{r\tau_{k+1}},\tau_{k+1}<\infty\right]\right)\\&=\lim_{n\to\infty}\frac{1}{n}\sum_{k=0}^{n-1}\log\zeta(r,T_{k}\omega)=\mathbb{E}\left[\log\zeta(r,\cdot)\right]
\end{align*} by the ergodic theorem, where the limits hold for $\mathbb{P}$-a.e.\ $\omega$. If $\zeta(r,\omega)$ is finite, then
\begin{align}
\zeta(r,\omega)&=\pi(0,1)\mathrm{e}^r+\pi(0,-1)\mathrm{e}^r\zeta(r,T_{-1}\omega)\zeta(r,\omega),\nonumber\\
1&=\pi(0,1)\mathrm{e}^r\zeta(r,\omega)^{-1}+\pi(0,-1)\mathrm{e}^r\zeta(r,T_{-1}\omega).\label{masterof}
\end{align} Since $\pi(0,-1)>0$ holds $\mathbb{P}$-a.s., the set $\{\omega:\zeta(r,\omega)<\infty\}$ is $T$-invariant, and its $\mathbb{P}$-probability is $0$ or $1$. The function $r\mapsto\zeta(r,\omega)$ is strictly increasing. There exists an $r_c\geq0$ such that $\mathbb{P}\left(\omega: \zeta(r,\omega)<\infty\right)=1$ if $r<r_c$ and $\mathbb{P}\left(\omega: \zeta(r,\omega)=\infty\right)=1$ if $r>r_c$. When $r<r_c$,
\begin{align*}
E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]&\geq E_o^\omega\left[\mathrm{e}^{r\tau_1}, X_k=-k, X_{2k}=0, \tau_1<\infty\right]\\&=\mathrm{e}^{2rk}\left(\prod_{i=0}^{-k+1}\pi(i,i-1)\prod_{j=-k}^{-1}\pi(j,j+1)\right)E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]
\end{align*} for any $k\geq1$. Cancelling the $E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]$ term on both sides and taking logarithms give $$2rk + \sum_{i=0}^{-k+1}\log\pi(i,i-1) + \sum_{j=-k}^{-1}\log\pi(j,j+1) \leq 0.$$ Divide both sides by $k$, let $k\to\infty$, and see that $2r\leq-\mathbb{E}[\log\pi(0,-1)] - \mathbb{E}[\log\pi(0,1)]$ by the ergodic theorem. This, in combination with (A1), implies that $r_c<\infty.$
By (\ref{masterof}), $1\geq\pi(0,-1)\mathrm{e}^r\zeta(r,T_{-1}\omega)$ and $\log\zeta(r,T_{-1}\omega)\leq-\log\pi(0,-1)-r$. Thus, \begin{equation}\lambda(r)=\mathbb{E}[\log\zeta(r,\cdot)]\leq\int|\log\pi(0,-1)|\mathrm{d}\mathbb{P}-r<\infty\label{turran}\end{equation} for $r<r_c$, and also for $r=r_c$ by the monotone convergence theorem.
It is easy to see that $r\mapsto\lambda(r)=\mathbb{E}[\log\zeta(r,\cdot)]$ is analytic on $(-\infty,r_c)$. Assumption (A1) ensures that the walk under $P_o^\omega$ is not deterministic, therefore
$$\lambda''(r)=\mathbb{E}\left[\frac{E_o^\omega\left[\tau_1^2\mathrm{e}^{r\tau_1},\tau_1<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]} - \left(\frac{E_o^\omega\left[\tau_1\mathrm{e}^{r\tau_1},\tau_1<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]}\right)^2\right]$$ is strictly positive by Jensen's inequality. Hence, $r\mapsto\lambda(r)$ is strictly convex on $(-\infty,r_c)$.
Recall that $\xi_c:=\lambda'(r_c-)^{-1}$. Use again the fact that the walk under $P_o^\omega$ is not deterministic, and write \[\xi_c^{-1}=\lambda'(r_c-)\geq\lambda'(0-)=\mathbb{E}\left(E_o^\omega[\left.\tau_1\right|\tau_1<\infty]\right)>1.\] We have proved half of Lemma \ref{lifeisrandom}, namely the statements involving $r\mapsto\lambda(r)$. Simply replace $\tau_n$ by $\bar{\tau}_{-n}$ to prove the other half of the lemma.
What remains to be shown is that the same $r_c$ works for $\lambda(\cdot)$ and $\bar{\lambda}(\cdot)$. This is proved in Appendix C.
\end{proof}
Let us start the construction. Note that $$\lim_{r\to-\infty}\lambda'(r)=\lim_{r\to-\infty}\mathbb{E}\left(\frac{E_o^\omega\left[\tau_1\mathrm{e}^{r\tau_1},\tau_1<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]}\right)=1.$$ The map $r\mapsto\lambda'(r)$ is strictly increasing and, therefore, it is a bijection from $(-\infty,r_c)$ to $(1,\xi_c^{-1})$. In other words, for any $\xi\in(\xi_c,1)$, there is a unique $r=r(\xi)<r_c$ such that $\xi^{-1}=\lambda'(r)$.
Taking $r=r(\xi)$, recall (\ref{masterof}) and define an environment kernel $\hat{\pi}_r$ (in the sense of Definition \ref{ortamkeli}) by
\begin{equation}
\hat{\pi}_r(\omega,1):=\pi(0,1)\mathrm{e}^r\zeta(r,\omega)^{-1}\quad\mbox{and}\quad\hat{\pi}_r(\omega,-1):=\pi(0,-1)\mathrm{e}^r\zeta(r,T_{-1}\omega).\label{yyedin}
\end{equation}
For every $x\in\mathbb{Z}$, in order to simplify the notation, $P_x^{\hat{\pi}_r,\omega}, E_x^{\hat{\pi}_r,\omega}, P_x^{\hat{\pi}_r}$ and $E_x^{\hat{\pi}_r}$ are denoted by $P_x^{r,\omega}, E_x^{r,\omega}, P_x^r$ and $E_x^r$, respectively.
For $\mathbb{P}$-a.e.\ $\omega$ and any finite sequence $(x_{k})_{k=0}^n$ in $\mathbb{Z}$ such that $x_{k+1}-x_k\in U$ and $x_n=1$, it is easy to see that
\begin{equation}\label{kfer}
P_o^{r,\omega}(X_1=x_1, \ldots, X_n = x_n) = \mathrm{e}^{rn}\zeta(r,\omega)^{-1}P_o^\omega(X_1=x_1, \ldots, X_n = x_n).
\end{equation}
\begin{lemma}
$P_o^{r}(\tau_1<\infty)=1$.
\end{lemma}
\begin{proof}
For $\mathbb{P}$-a.e.\ $\omega$, $$P_o^{r,\omega}(\tau_1<\infty)=E_o^\omega[\mathrm{e}^{r\tau_1}\zeta(r,\omega)^{-1},\tau_1<\infty]=\zeta(r,\omega)^{-1}E_o^\omega[\mathrm{e}^{r\tau_1},\tau_1<\infty]=1$$ where the first equality follows from (\ref{kfer}).
\end{proof}
\begin{lemma}\label{shmeryahu}
$E_o^{r}[\tau_1]=\xi^{-1}<\infty$.
\end{lemma}
\begin{proof}
For any $s\in\mathbb{R}$ and $\mathbb{P}$-a.e.\ $\omega$, recall (\ref{kfer}) and observe that
\begin{align*}E_o^{r,\omega}[\mathrm{e}^{s\tau_1}]=E_o^{r,\omega}[\mathrm{e}^{s\tau_1},\tau_1<\infty]&=E_o^\omega[\mathrm{e}^{(r+s)\tau_1}\zeta(r,\omega)^{-1},\tau_1<\infty]\\&=\zeta(r+s,\omega)\zeta(r,\omega)^{-1}.
\end{align*}
Therefore, $\mathbb{E}\left(\log E_o^{r,\omega}[\mathrm{e}^{s\tau_1}]\right)=\lambda(r+s)-\lambda(r)<\infty$ by (\ref{turran}) whenever $r+s<r_c$, and \[E_o^{r}[\tau_1]=\left.\frac{\mathrm{d}}{\mathrm{d}s}\right|_{s=0}\!\!\!\!\!\mathbb{E}\left(\log E_o^{r,\omega}[\mathrm{e}^{s\tau_1}]\right)=\lambda'(r)=\xi^{-1}.\qedhere\]
\end{proof}
Since $\hat{\pi}_r(\cdot,\pm1)>0$ holds $\mathbb{P}$-a.s., there exists a $\phi_r\in L^1(\mathbb{P})$ such that $\phi_r\,\mathrm{d}\mathbb{P}$ is a $\hat{\pi}_r$-invariant probability measure. (See, for example, \cite{alili}.) The pair $(\hat{\pi}_r,\phi_r)$ corresponds to a $\mu_\xi\in M_{1,s}^{\ll}(\Omega\times U)$ with $\mathrm{d}\mu_\xi(\omega,\pm1)=\hat{\pi}_r(\omega,\pm1)\phi_r(\omega)\mathrm{d}\mathbb{P}(\omega)$. By Lemma \ref{Kozlov}, the LLN for the mean velocity of the particle holds under $P_o^{r}$. The limiting velocity is \[\int\sum_{z\in U}\hat{\pi}_r(\omega,z)z\,\phi_r(\omega)\mathrm{d}\mathbb{P}=\xi_{\mu_\xi}\] with the notation in (\ref{ximu}). Therefore, $\xi_{\mu_\xi}^{-1}=E_o^{r}[\tau_1]=\xi^{-1}$ by Lemma \ref{shmeryahu}. In other words, $\mu_\xi\in A_\xi$.
Define $F_r:\Omega\times\{-1,1\}\to\mathbb{R}$ by setting \[F_r(\omega,-1):=\log\zeta(r,T_{-1}\omega)-\lambda(r)\quad\mbox{and}\quad F_r(\omega,1):=-\log\zeta(r,\omega)+\lambda(r).\] Then, recall (\ref{yyedin}) and see that
\begin{equation}
\mathrm{d}\mu_\xi(\omega,z)=\hat{\pi}_r(\omega,z)\phi_r(\omega)\mathrm{d}\mathbb{P}(\omega)=\mathrm{d}(\mu_\xi)^1(\omega)\pi(0,z)\mathrm{e}^{-z\lambda(r)+F_r(\omega,z)+r}\label{veriguut}
\end{equation} for $z\in\{-1,1\}$. In order to conclude that $\mu_\xi$ fits the Ansatz given in Lemma \ref{lagrange}, $F_r\in\mathcal{K}$ needs to be shown. $F_r$ clearly satisfies the mean zero and the closed loop conditions in Definition \ref{K}. For $z\in\{-1,1\}$,
\[\pi(0,z)\mathrm{e}^{-z\lambda(r)+F_r(\omega,z)+r}=\hat{\pi}_r(\omega,z)\leq1\] gives $F_r(\omega,z)\leq|\log\pi(0,z)| +z\lambda(r)-r$. Use the fact that $-F_r(\omega,z)=F_r(T_z\omega,-z)$ to write $$|F_r(\omega,z)|\leq|\log\pi(0,1)|+|\log\pi(1,0)|+|\lambda(r)|-r.$$ The moment condition on $F_r(\cdot,z)$ follows from (A1).
So far, we have obtained a $\mu_\xi$ that fits the Ansatz given in Lemma \ref{lagrange} when $\xi\in(\xi_c,1)$. An analogous construction works for $\xi\in(-1,\bar{\xi}_c)$.
\begin{proof}[Proof of Theorem \ref{explicitformulah} for nearest-neighbor walks on $\mathbb{Z}$]
For any $\xi\in(\xi_c,1)$, the measure $\mu_\xi$ given in (\ref{veriguut}) is the unique minimizer of (\ref{level1ratetilde}) by Lemma \ref{lagrange}. Therefore, $I(\xi) = \mathfrak{I}(\mu_\xi)=r(\xi)-\xi\lambda(r(\xi))$ by (\ref{sifirladik}). Since $\lambda'(r(\xi))=\xi^{-1}$, it is clear that $$I(\xi)=\sup_{r\in\mathbb{R}}\left\{r-\xi\lambda(r)\right\}=\xi\sup_{r\in\mathbb{R}}\left\{r\xi^{-1}-\lambda(r)\right\}=\xi\lambda^{*}(\xi^{-1}).$$
In the proof of Lemma \ref{lifeisrandom} for nearest-neighbor walks on $\mathbb{Z}$, we saw that $r\mapsto\lambda(r)$ is strictly convex and analytic on $(-\infty,r_c)$. By convex duality, $\xi\mapsto I(\xi)$ is strictly convex and analytic on $(\xi_c,1)$.
If $\xi_c=0$, then we have identified $I(\cdot)$ on $(0,1)$. Let us now suppose $\xi_c>0$. Note that $$I'(\xi)=\frac{\mathrm{d}}{\mathrm{d}\xi}[r(\xi)-\xi\lambda(r(\xi))]=r'(\xi)-\lambda(r(\xi))-\xi\lambda'(r(\xi))r'(\xi)=-\lambda(r(\xi)).$$ Therefore, $I(\xi_c)-\xi_cI'(\xi_c+)=r_c$. This implies by convexity that $I(0)\geq r_c$. On the other hand, $$E_o^\omega[\mathrm{e}^{r\tau_1},\tau_1<\infty]=\sum_{k=1}^{\infty}\mathrm{e}^{rk}P_o^\omega(\tau_1=k)\leq\sum_{k=1}^{\infty}\mathrm{e}^{rk}P_o^\omega(X_k=1)\leq\sum_{k=1}^{\infty}\mathrm{e}^{(r-I(0))k + o(k)}<\infty$$ for any $r<I(0)$. Hence, $r_c = I(0)$. The equality $I(\xi_c)-\xi_cI'(\xi_c+)=I(0)$ forces $I(\cdot)$ to be affine linear on $[0,\xi_c]$ with a slope of $I'(\xi_c+)$. In particular, $\xi\mapsto I(\xi)$ is differentiable on $(0,1)$.
Still supposing $\xi_c>0$, fix $\xi\in(0,\xi_c]$. Then, $\frac{\mathrm{d}}{\mathrm{d}r}\left(r-\xi\lambda(r)\right)>0$ for every $r<r_c$. Therefore, $$\sup_{r\in\mathbb{R}}\left\{r-\xi\lambda(r)\right\}=r_c-\xi\lambda(r_c)=I(0)+\xi I'(\xi_c+)=I(\xi).$$ In short, $I(\xi)=\sup_{r\in\mathbb{R}}\left\{r-\xi\lambda(r)\right\}$ for every $\xi\in(0,1)$.
Let us no longer suppose $\xi_c>0$. At $\xi=1$,
\begin{align*}
\sup_{r\in\mathbb{R}}\left\{r-\lambda(r)\right\}&=\sup_{r\in\mathbb{R}}\mathbb{E}\left[r-\log E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\right]=\lim_{r\to-\infty}\mathbb{E}\left[-\log E_o^\omega\left[\mathrm{e}^{r(\tau_1-1)},\tau_1<\infty\right]\right]\\
&=\mathbb{E}\left[-\log P_o^\omega\left(\tau_1=1\right)\right]=\mathbb{E}\left[-\log\pi(0,1)\right]=-\lim_{n\to\infty}\frac{1}{n}\log P_o^\omega\left(X_n=n\right)=I(1).
\end{align*}
It is easy to check that $I(\xi)=\sup_{r\in\mathbb{R}}\left\{r-\xi\lambda(r)\right\}=\infty$ when $\xi>1$. This concludes the proof of Theorem \ref{explicitformulah} for $\xi\geq0$. The arguments regarding $\xi<0$ are similar.
\end{proof}
\section{Walks with bounded jumps on $\mathbb{Z}$}\label{dolapdere}
Recall the statement of Lemma \ref{lifeisrandom}. We start this section by constructing a new (tilted) environment kernel $\hat{\pi}_r$ for every $r<r_c$. We then prove that $r\mapsto\lambda(r)$ exists and that it is differentiable on $(-\infty,r_c)$. At that point, we note that if there exists a $\phi_r\in L^1(\mathbb{P})$ such that $\phi_r\,\mathrm{d}\mathbb{P}$ is a $\hat{\pi}_r$-invariant probability measure, then $\mu_\xi\in M_1(\Omega\times\mathcal{R})$ with $\mathrm{d}\mu_\xi(\omega,z)=\hat{\pi}_r(\omega,z)\phi_r(\omega)\mathrm{d}\mathbb{P}(\omega)$ fits the Ansatz given in Lemma \ref{lagrange} for $\xi=(\lambda'(r))^{-1}$. We proceed by constructing such a $\phi_r$. Finally, we prove Lemma \ref{lifeisrandom}, Theorem \ref{explicitformulah} and Proposition \ref{Wronskian}.
\begin{remark}
Some of the notation (e.g., $\zeta(r,\omega), r_c, \hat{\pi}_r, \phi_r$ and $F_r$) introduced in Section \ref{vandiseksin} is reintroduced in Section \ref{dolapdere} in a slightly different way. This is done in order to emphasize the fact that the arguments in these two sections are parallel. Note that this practice does not cause any confusion since Sections \ref{vandiseksin} and \ref{dolapdere} can be read independently of each other.
\end{remark}
Many of the arguments in this section use the following lemma.
\begin{lemma}\label{muhacir}
Given $m\in\mathbb{Z}$ and $\epsilon>0$, suppose there exist two functions $L:\left((-\infty,m)\cap\mathbb{Z}\right)\times\mathcal{R}\to[0,1]$ and $v:\mathbb{Z}\to\mathbb{R}$ such that $L(y,\pm1)\geq\epsilon$, $\sum_{z\in\mathcal{R}}L(y,z)=1$ and $v(y)=\sum_{z\in\mathcal{R}}L(y,z)v(y+z)$ for any $y<m$. The function $L$ defines a Markov chain and, for any $x<m$, induces a probability measure $Q_x$ on paths starting at $x$. $E_x^Q$ denotes expectation under $Q_x$.
If $Q_x(\tau_m<\infty)=1$ and $x'<x$, then
\begin{equation}\label{faikinbaci}
|v(x)-v(x')|\leq \left(1-\epsilon^B\right)^{\frac{m-x}{B}}\sup_{0\leq z<B \atop 0\leq z'<B}\left[v(m+z)-v(m+z')\right].
\end{equation}
\end{lemma}
\begin{proof}
Fix $x'<x<m$. For any $k\geq0$ with $x+(k+1)B\leq m$, $$v(x')=E_{x'}^Q\left[v\left(X_{\tau_{x+kB}}\right)\right]=\sum_{z=0}^{B-1}Q_{x'}\left(X_{\tau_{x+kB}}=x+kB+z\right)v(x+kB+z).$$ There exists an $x_k\in\mathbb{Z}$ such that $x+kB\leq x_k<x+(k+1)B$ and $v(x_k)\leq v(x')$. The collection of $x_k$'s constitute a set $S:=\left\{x_k:0\leq k\leq\frac{m-x}{B}-1\right\}$. Let $\tau_S:=\inf\left\{k\geq0:X_k\in S\right\}$. Observe that
\begin{align*}
v(x)=E_x^Q\left[v\left(X_{\tau_S\wedge\tau_m}\right)\right]&=E_x^Q\left[v\left(X_{\tau_S}\right),\tau_S<\infty\right]+E_x^Q\left[v\left(X_{\tau_m}\right),\tau_S=\infty\right]\\
&\leq Q_x(\tau_S<\infty)v(x')\quad\ \ \,+Q_x(\tau_S=\infty)\sup_{0\leq z<B}v(m+z)\\
&=v(x')+Q_x(\tau_S=\infty)\left(\sup_{0\leq z<B}v(m+z)-v(x')\right).
\end{align*}
On the other hand, $v(x')=E_{x'}^Q\left[v\left(X_{\tau_m}\right)\right]\geq\inf_{0\leq z'<B}v(m+z')$. Therefore, $$v(x)-v(x')\leq Q_x(\tau_S=\infty)\sup_{0\leq z<B \atop 0\leq z'<B}\left[v(m+z)-v(m+z')\right].$$
It is easy to see that $Q_x(\tau_S=\infty)\leq\left(1-\epsilon^B\right)^{\frac{m-x}{B}}$. This proves half of (\ref{faikinbaci}). The other half is proved similarly.
\end{proof}
\subsection{Construction of a new environment kernel $\hat{\pi}_r$}\label{haifaguzel}
Recall our assumptions:
\begin{enumerate}
\item [(A1)] There exists an $\alpha>0$ such that $\int|\log\pi(0,z)|^{1+\alpha}\mathrm{d}\mathbb{P}<\infty$ for each $z\in\mathcal{R}$.
\item [(A2)] There exists a $\delta>0$ such that $\mathbb{P}(\pi(0,\pm 1)\geq\delta)=1$.
\end{enumerate}
Let $\zeta(r,\omega):=E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]$ for any $r\in\mathbb{R}$. If $\zeta(r,\omega)<\infty$, then
\begin{align*}
\zeta(r,\omega)&=\sum_{z\in\mathcal{R}}\pi(0,z)\mathrm{e}^rE_z^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\geq\pi(0,-1)\mathrm{e}^rE_{-1}^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\\
&\geq\delta\mathrm{e}^r\left(E_{-1}^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty, X_{\tau_o}\geq1\right] + E_{-1}^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty, X_{\tau_o}=0\right]\right)\\
&=\delta\mathrm{e}^r\left(E_{-1}^\omega\left[\mathrm{e}^{r\tau_o},\tau_o<\infty, X_{\tau_o}\geq1\right] + E_{-1}^\omega\left[\mathrm{e}^{r\tau_o},\tau_o<\infty, X_{\tau_o}=0\right]E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\right)\\
&\geq\min(1,\zeta(r,\omega))\delta\mathrm{e}^r\zeta(r,T_{-1}\omega).
\end{align*}
Therefore, $\{\omega:\zeta(r,\omega)<\infty\}$ is $T$-invariant, and its $\mathbb{P}$-probability is $0$ or $1$. The function $r\mapsto\zeta(r,\omega)$ is strictly increasing. There exists an $r_c\geq0$ such that $\mathbb{P}\left(\omega: \zeta(r,\omega)<\infty\right)=1$ if $r<r_c$ and $\mathbb{P}\left(\omega: \zeta(r,\omega)=\infty\right)=1$ if $r>r_c$. When $r<r_c$, $$E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\geq E_o^\omega\left[\mathrm{e}^{r\tau_1}, X_1=-1, X_2=0, \tau_1<\infty\right]\geq\left(\delta\mathrm{e}^r\right)^2E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right].$$ This shows that $\delta\mathrm{e}^r\leq1$ and $r_c\leq-\log\delta<\infty$. For $r<r_c$ and $n\geq2$,
\begin{align*}
E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]&=\sum_{z=1}^{B}E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty,X_{\tau_1}=z\right]E_z^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]\\
&=\sum_{z=1}^{B}E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty,X_{\tau_1}=z\right]E_o^{T_z\omega}\left[\mathrm{e}^{r\tau_{n-z}},\tau_{n-z}<\infty\right].
\end{align*} By induction, $\mathbb{P}\left(\omega: E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]<\infty\right)=1$.
From now on, consider $r<r_c$. For $x<n$, note that
\begin{align}
u_{r,n}(\omega,x):\!&=\frac{E_x^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}=\sum_{z\in\mathcal{R}}\pi(x,x+z)\mathrm{e}^r\frac{E_{x+z}^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}\nonumber\\
&=\sum_{z\in\mathcal{R}}\pi(x,x+z)\mathrm{e}^ru_{r,n}(\omega,x+z).\nonumber\\
1&=\sum_{z\in\mathcal{R}}\pi(x,x+z)\mathrm{e}^r\frac{u_{r,n}(\omega,x+z)}{u_{r,n}(\omega,x)}=:\sum_{z\in\mathcal{R}}\hat{\pi}_{r,n}(x,x+z)\label{huseyindayi}
\end{align} defines a new (random) transition kernel $\hat{\pi}_{r,n}(x,x+z)$ for $x<n$. It is clear that the jumps under $\hat{\pi}_{r,n}$ are bounded by $B$. If $x<y<n$, then
$$E_{y}^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]\geq E_{y}^\omega\left[\mathrm{e}^{r\tau_n},X_1=y-1,\ldots,X_{y-x}=x,\tau_n<\infty\right]\geq(\delta\mathrm{e}^r)^{y-x}E_{x}^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right].$$ Similarly, $E_{x}^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]\geq(\delta\mathrm{e}^r)^{y-x}E_{y}^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]$. Therefore,
\begin{equation}
(\delta\mathrm{e}^r)^{|y-x|}\leq\frac{u_{r,n}(\omega,y)}{u_{r,n}(\omega,x)}\leq(\delta\mathrm{e}^r)^{-|y-x|}.\label{isilam}
\end{equation}
Putting (\ref{huseyindayi}) and (\ref{isilam}) together, we obtain the following ellipticity bound:
\begin{equation}
\mathbb{P}\left(\hat{\pi}_{r,n}(x,x\pm1)\geq(\delta\mathrm{e}^r)^2\right)=1\mbox{ for every }x<n-1.\label{reginbogin}
\end{equation}\pagebreak
\begin{lemma}\label{Qgot}
If $0<x<n+B$, then $(\delta\mathrm{e}^r)^{4(B-1)}\leq u_{r,n}(\omega,x)E_o^\omega\left[\mathrm{e}^{r\tau_x},\tau_x<\infty\right]\leq(\delta\mathrm{e}^r)^{-4(B-1)}$ for $\mathbb{P}$-a.e.\ $\omega$.
\end{lemma}
\begin{proof}
Suppose $0<x\leq n-B$. Observe that
\begin{align}
u_{r,n}(\omega,x)^{-1}&=\frac{E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}{E_x^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}=\frac{1}{E_x^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}\sum_{z=0}^{B-1}E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty,X_{\tau_x}=x+z\right]\nonumber\\
&=\sum_{z=0}^{B-1}E_o^\omega\left[\mathrm{e}^{r\tau_x},\tau_x<\infty,X_{\tau_x}=x+z\right]\frac{E_{x+z}^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}{E_x^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}\nonumber\\
&=\sum_{z=0}^{B-1}E_o^\omega\left[\mathrm{e}^{r\tau_x},\tau_x<\infty,X_{\tau_x}=x+z\right]\frac{u_{r,n}(\omega,x+z)}{u_{r,n}(\omega,x)}.\label{ucuzkurtuldum}
\end{align}
It follows immediately from (\ref{isilam}) that
\begin{equation}\label{deyyus}
(\delta\mathrm{e}^r)^{(B-1)}\leq u_{r,n}(\omega,x)E_o^\omega\left[\mathrm{e}^{r\tau_x},\tau_x<\infty\right]\leq(\delta\mathrm{e}^r)^{-(B-1)}.
\end{equation}
Next, suppose $n-B<x<n$. Note that (\ref{ucuzkurtuldum}) still holds. If $x+z<n$, then
\begin{equation}\label{hizlanmakiyi}
(\delta\mathrm{e}^r)^{(B-1)}\leq\frac{u_{r,n}(\omega,x+z)}{u_{r,n}(\omega,x)}\leq(\delta\mathrm{e}^r)^{-(B-1)}
\end{equation} again by (\ref{isilam}). On the other hand, if $x+z\geq n$, then
\begin{equation}\label{havvaanan}
\frac{u_{r,n}(\omega,x+z)}{u_{r,n}(\omega,x)}=\frac{E_{x+z}^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}{E_x^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}=\frac{1}{E_x^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}.
\end{equation} However, for any $m\geq n+B$,
\begin{equation}\label{ruyalarda}
(\delta\mathrm{e}^r)^{2(B-1)}\leq(\delta\mathrm{e}^r)^{(B-1)}\frac{u_{r,m}(\omega,n)}{u_{r,m}(\omega,x)}\leq\frac{1}{E_x^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}\leq(\delta\mathrm{e}^r)^{-(B-1)}\frac{u_{r,m}(\omega,n)}{u_{r,m}(\omega,x)}\leq(\delta\mathrm{e}^r)^{-2(B-1)}.
\end{equation} In (\ref{ruyalarda}), the inner inequalities follow from (\ref{deyyus}) after an appropriate change of variables, and the outer inequalities hold by (\ref{isilam}). Use (\ref{ucuzkurtuldum}) in combination with (\ref{hizlanmakiyi}), (\ref{havvaanan}) and (\ref{ruyalarda}) to deduce that
\begin{equation}\label{deyyusiki}
(\delta\mathrm{e}^r)^{2(B-1)}\leq u_{r,n}(\omega,x)E_o^\omega\left[\mathrm{e}^{r\tau_x},\tau_x<\infty\right]\leq(\delta\mathrm{e}^r)^{-2(B-1)}.
\end{equation}
If $x=n$, there is nothing to prove. Finally, suppose $n<x<n+B$. It is easy to see that
\begin{equation}\label{goncayla}
u_{r,n}(\omega,x)E_o^\omega\left[\mathrm{e}^{r\tau_x},\tau_x<\infty\right]=\frac{E_o^\omega\left[\mathrm{e}^{r\tau_x},\tau_x<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}=\frac{1}{u_{r,x}(\omega,n)E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}\cdot E_n^\omega\left[\mathrm{e}^{r\tau_x},\tau_x<\infty\right].
\end{equation} Reversing the roles of $x$ and $n$ in both (\ref{ruyalarda}) and (\ref{deyyusiki}) gives upper and lower bounds for the terms on the RHS of (\ref{goncayla}). This implies the desired result.
\end{proof}
In order to indicate the $\omega$-dependence of $\hat{\pi}_{r,n}$, the probability measure it induces on paths starting at any $x<n$ is denoted by $Q_x^{n,\omega}$.
\begin{lemma}\label{ewayak}
For every $x<n$ and $\mathbb{P}$-a.e.\ $\omega$, $Q_x^{n,\omega}(\tau_n<\infty)=1$.
\end{lemma}
\begin{proof}
For any path $(x_j)_{0\leq j\leq k}$ with $x_o=x$, $x_j<n$ and $x_{j+1}-x_j\in\mathcal{R}$, it follows from (\ref{huseyindayi}) that $$Q_x^{n,\omega}(X_1 = x_1,\ldots, X_k = x_k)=P_x^\omega(X_1 = x_1,\ldots, X_k = x_k)\mathrm{e}^{rk}\frac{u_{r,n}(\omega,x_k)}{u_{r,n}(\omega,x)}.$$
Also, note that $P_x^\omega\left(\left.X_{\tau_n}\geq n\,\right|\tau_n<\infty\right)=1$. Therefore,
$$Q_x^{n,\omega}(\tau_n<\infty)=E_x^\omega[\mathrm{e}^{r\tau_n}\frac{u_{r,n}(\omega,X_{\tau_n})}{u_{r,n}(\omega,x)},\tau_n<\infty]=E_x^\omega[\mathrm{e}^{r\tau_n}\frac{E_{X_{\tau_n}}^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}{E_x^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]},\tau_n<\infty] = 1.\qedhere$$
\end{proof}
\begin{lemma}
For every $x,z\in\mathbb{Z}$ and $\mathbb{P}$-a.e.\ $\omega$, $u_r(\omega,x):=\lim_{n\to\infty}u_{r,n}(\omega,x)$ exists and
\begin{equation}\label{cangorecek}
(\delta\mathrm{e}^r)^{|z|}\leq u_r(T_x\omega,z)=\frac{u_r(\omega,x+z)}{u_r(\omega,x)}\leq(\delta\mathrm{e}^r)^{-|z|}.
\end{equation}
\end{lemma}
\begin{proof}
Given $x\in\mathbb{Z}$, take any $n_1, n_2\in\mathbb{N}$ such that $x<n_1<n_2$. For every $y\in\mathbb{Z}$ with $n_1\leq y<n_1+B$, Lemma \ref{Qgot} implies that $$(\delta\mathrm{e}^r)^{8(B-1)}\leq\frac{u_{r,n_2}(\omega,y)}{u_{r,n_1}(\omega,y)}\leq(\delta\mathrm{e}^r)^{-8(B-1)}.$$
Since $\hat{\pi}_{r,n_1}$ is defined in (\ref{huseyindayi}) via a Doob $h$-transform, it is not surprising that
\begin{align*}
\sum_{z\in\mathcal{R}}\hat{\pi}_{r,n_1}(x,x+z)\frac{u_{r,n_2}(\omega,x+z)}{u_{r,n_1}(\omega,x+z)}&=\sum_{z\in\mathcal{R}}\pi(x,x+z)\mathrm{e}^r\frac{u_{r,n_1}(\omega,x+z)}{u_{r,n_1}(\omega,x)}\frac{u_{r,n_2}(\omega,x+z)}{u_{r,n_1}(\omega,x+z)}\\&=\sum_{z\in\mathcal{R}}\pi(x,x+z)\mathrm{e}^r\frac{u_{r,n_2}(\omega,x+z)}{u_{r,n_1}(\omega,x)}\\&=\frac{u_{r,n_2}(\omega,x)}{u_{r,n_1}(\omega,x)}.
\end{align*}
Therefore, Lemma \ref{muhacir} implies that
$$\left|\frac{u_{r,n_2}(\omega,x)}{u_{r,n_1}(\omega,x)}-1\right| = \left|\frac{u_{r,n_2}(\omega,x)}{u_{r,n_1}(\omega,x)}-\frac{u_{r,n_2}(\omega,0)}{u_{r,n_1}(\omega,0)}\right|\leq c(r)^{n_1-|x|}\left(\delta\mathrm{e}^r\right)^{-8(B-1)}$$
where $c(r) := \left(1-\left(\delta\mathrm{e}^r\right)^{2B}\right)^{1/B}<1$. Substitute $y=0$ in (\ref{isilam}) and conclude that
$$\left|u_{r,n_2}(\omega,x)-u_{r,n_1}(\omega,x)\right|=u_{r,n_1}(\omega,x)\left|\frac{u_{r,n_2}(\omega,x)}{u_{r,n_1}(\omega,x)}-1\right|\leq c(r)^{n_1-|x|}\left(\delta\mathrm{e}^r\right)^{-8(B-1)-|x|}.$$ In particular, $\left(u_{r,n}(\omega,x)\right)_{n>x}$ is a Cauchy sequence. Therefore, $u_r(\omega,x):=\lim_{n\to\infty}u_{r,n}(\omega,x)$ exists.
For every $x,z\in\mathbb{Z}$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{align}
\frac{u_r(\omega,x+z)}{u_r(\omega,x)}&=\lim_{n\to\infty}\!\!\frac{u_{r,n}(\omega,x+z)}{u_{r,n}(\omega,x)}=\lim_{n\to\infty}\!\!\frac{E_{x+z}^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}{E_x^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}=\lim_{n\to\infty}\!\!\frac{E_z^{T_x\omega}\left[\mathrm{e}^{r\tau_{n-x}},\tau_{n-x}<\infty\right]}{E_o^{T_x\omega}\left[\mathrm{e}^{r\tau_{n-x}},\tau_{n-x}<\infty\right]}\label{feyziogluyla}\\&=u_r(T_x\omega,z).\nonumber
\end{align}
Finally, note that the inequalities in (\ref{cangorecek}) follow from (\ref{isilam}).
\end{proof}
\begin{definition}
For every $z\in\mathcal{R}$ and $\mathbb{P}$-a.e.\ $\omega$, let
\begin{equation}\label{birincicinko}
\hat{\pi}_r(\omega,z):=\pi(0,z)\mathrm{e}^r u_r(\omega,z).
\end{equation}
\end{definition}
It follows immediately from (\ref{huseyindayi}) that $\hat{\pi}_r:\Omega\times\mathcal{R}\to[0,1]$ is an environment kernel in the sense of Definition \ref{ortamkeli}. For every $x\in\mathbb{Z}$, in order to simplify the notation, $P_x^{\hat{\pi}_r,\omega}, E_x^{\hat{\pi}_r,\omega}, P_x^{\hat{\pi}_r}$ and $E_x^{\hat{\pi}_r}$ are denoted by $P_x^{r,\omega}, E_x^{r,\omega}, P_x^r$ and $E_x^r$, respectively.
\begin{lemma} For every $z\in\mathcal{R}$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{equation}\label{ikincicinko}
\hat{\pi}_r(\omega,z)\geq\left(\delta\mathrm{e}^r\right)^{|z|}\mathrm{e}^r\pi(0,z).
\end{equation} In particular, $\hat{\pi}_r$ satisfies the following ellipticity condition:
\begin{equation}\label{tombala}
\mathbb{P}\left(\omega: \hat{\pi}_r(\omega,\pm1)\geq\left(\delta\mathrm{e}^r\right)^2\right)=1.
\end{equation}
\end{lemma}
\begin{proof}
(\ref{cangorecek}) and (\ref{birincicinko}) imply (\ref{ikincicinko}) which gives (\ref{tombala}) since (A2) holds.
\end{proof}
\begin{lemma}\label{kayahan}
For every $n\geq1$, $P_o^r(\tau_n<\infty)=1$.
\end{lemma}
\begin{proof}
Recall the proof of Lemma \ref{ewayak}. For every $n\geq1$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{align*}
P_o^{r,\omega}\left(\tau_n<\infty\right)&=E_o^\omega\left[\mathrm{e}^{r\tau_n}u_r(\omega,X_{\tau_n}), \tau_n<\infty\right]\\
&=\sum_{z=0}^{B-1}E_o^\omega\left[\mathrm{e}^{r\tau_n}, X_{\tau_n}=n+z, \tau_n<\infty\right]u_r(\omega,n+z)\\
&=\lim_{m\to\infty}\sum_{z=0}^{B-1}E_o^\omega\left[\mathrm{e}^{r\tau_n}, X_{\tau_n}=n+z, \tau_n<\infty\right]u_{r,m}(\omega,n+z)\\
&=\lim_{m\to\infty}E_o^\omega\left[\mathrm{e}^{r\tau_n}u_{r,m}(\omega,X_{\tau_n}), \tau_n<\infty\right]\\
&=\lim_{m\to\infty}\frac{E_o^\omega\left[\mathrm{e}^{r\tau_n}E_{X_{\tau_n}}^\omega\left[\mathrm{e}^{r\tau_m}, \tau_m<\infty\right], \tau_n<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_m}, \tau_m<\infty\right]}\\
&=\lim_{m\to\infty}\frac{E_o^\omega\left[\mathrm{e}^{r\tau_m}, \tau_m<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_m}, \tau_m<\infty\right]}=1.\qedhere
\end{align*}
\end{proof}
\begin{lemma}\label{mavisap}
For every $m\geq1$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{equation}\label{boundoyleolmazboyleolur}
E_o^{r,\omega}\left[\tau_1^m\right]\leq\frac{m!}{(r_c-r)^m}(\delta\mathrm{e}^r)^{-2B}=:H_m(r).
\end{equation}
\end{lemma}
\begin{proof}
It follows from (\ref{isilam}) and (\ref{deyyus}) that $E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\leq(\delta\mathrm{e}^r)^{-B}$ for $\mathbb{P}$-a.e.\ $\omega$. By the monotone convergence theorem, this bound holds for $r=r_c$ as well. Note that
\begin{align*}
E_o^{r,\omega}\left[\mathrm{e}^{(r_c-r)\tau_1}\right]&=E_o^{r,\omega}\left[\mathrm{e}^{(r_c-r)\tau_1},\tau_1<\infty\right]=E_o^\omega\left[\mathrm{e}^{r_c\tau_1}u_r(\omega,X_{\tau_1}),\tau_1<\infty\right]\\
&\leq E_o^\omega\left[\mathrm{e}^{r_c\tau_1}(\delta\mathrm{e}^r)^{-B},\tau_1<\infty\right]\leq (\delta\mathrm{e}^r)^{-B}(\delta\mathrm{e}^{r_c})^{-B}\leq(\delta\mathrm{e}^r)^{-2B}.
\end{align*} Here, Lemma \ref{kayahan} and (\ref{cangorecek}) imply the first equality and the first inequality, respectively. For every $m\geq1$ and $a\in\mathbb{R}^+$, $\mathrm{e}^{a}=\sum_{n=0}^\infty\frac{a^n}{n!}\geq\frac{a^m}{m!}$. Therefore,
$$\frac{(r_c-r)^m}{m!}E_o^{r,\omega}\left[\tau_1^m\right]\leq E_o^{r,\omega}\left[\mathrm{e}^{(r_c-r)\tau_1}\right]\leq(\delta\mathrm{e}^r)^{-2B}.\qedhere$$
\end{proof}
\begin{lemma}\label{bugeceler}
For $\mathbb{P}$-a.e.\ $\omega$,
$$\lim_{n\to\infty}\frac{1}{n}E_o^{r,\omega}\left[\tau_n\right]=\mathbb{E}\left[\lim_{x\to-\infty}P_x^{r,\omega}\left(X_{\tau_o}=0\right)E_o^{r,\omega}\left[\tau_1\right]\right]=:g(r).$$
\end{lemma}
\begin{proof}
Let $c(r) := \left(1-\left(\delta\mathrm{e}^r\right)^{2B}\right)^{1/B}<1$. For every $n\geq1$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{align}
E_o^{r,\omega}\left[\tau_n\right] &= \sum_{i=1}^nE_o^{r,\omega}\left[\tau_i - \tau_{i-1}\right] = \sum_{i=1}^nE_o^{r,\omega}\left[\tau_i - \tau_{i-1}, X_{\tau_{i-1}}=i-1\right]\nonumber\\
& = \sum_{i=1}^nP_o^{r,\omega}\left(X_{\tau_{i-1}}=i-1\right)E_{i-1}^{r,\omega}\left[\tau_i\right]\nonumber\\
& \leq \sum_{i=1}^n\left(\lim_{x\to-\infty}P_x^{r,\omega}\left(X_{\tau_{i-1}}=i-1\right) + c(r)^{i-1}\right)E_{i-1}^{r,\omega}\left[\tau_i\right]\label{bengurion}\\
& = \sum_{i=1}^n\left(\lim_{x\to-\infty}P_x^{r,T_{i-1}\omega}\left(X_{\tau_o}=0\right) + c(r)^{i-1}\right)E_o^{r,T_{i-1}\omega}\left[\tau_1\right]\nonumber\\
& \leq \frac{H_1(r)}{1-c(r)} + \sum_{i=1}^n\lim_{x\to-\infty}P_x^{r,T_{i-1}\omega}\left(X_{\tau_o}=0\right)E_o^{r,T_{i-1}\omega}\left[\tau_1\right]\nonumber
\end{align} where (\ref{bengurion}) and the existence of $\lim_{x\to-\infty}P_x^{r,\omega}\left(X_{\tau_{i-1}}=i-1\right)$ follow from Lemma \ref{muhacir}. Therefore,
$$\limsup_{n\to\infty}\frac{1}{n}E_o^{r,\omega}\left[\tau_n\right]\leq\mathbb{E}\left[\lim_{x\to-\infty}P_x^{r,\omega}\left(X_{\tau_o}=0\right)E_o^{r,\omega}\left[\tau_1\right]\right]$$ by the ergodic theorem. The proof of the other direction is similar.
\end{proof}
\subsection{Differentiability of $r\mapsto\lambda(r)$}\label{otayak}
For every $r<r_c$, $n\geq1$ and $\mathbb{P}$-a.e.\ $\omega$, let $$\lambda_n(r,\omega):=\log E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right].$$
\begin{lemma}\label{ongbak}
For every $r<r_c$ and $\mathbb{P}$-a.e.\ $\omega$, $$\lambda(r):=\lim_{n\to\infty}\frac{1}{n}\lambda_n(r,\omega)=-\mathbb{E}\left[\log u_r(\cdot,1)\right].$$
\end{lemma}
\begin{proof}
It follows from Lemma \ref{Qgot} that $(\delta\mathrm{e}^r)^{4(B-1)}\leq u_r(\omega,n)E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]\leq(\delta\mathrm{e}^r)^{-4(B-1)}$ for every $n\geq1$. Therefore, $$\lim_{n\to\infty}\left(\frac{1}{n}\log u_r(\omega,n) + \frac{1}{n}\log E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]\right) = 0.$$ However, by (\ref{feyziogluyla}), $$u_r(\omega,n)=\prod_{i=0}^{n-1}\frac{u_r(\omega,i+1)}{u_r(\omega,i)} = \prod_{i=0}^{n-1}u_r(T_i\omega,1).$$ Hence, it follows from the ergodic theorem that $$\lim_{n\to\infty}\frac{1}{n}\log E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right] = -\lim_{n\to\infty}\frac{1}{n}\log u_r(\omega,n) = -\lim_{n\to\infty}\frac{1}{n}\sum_{i=0}^{n-1}\log u_r(T_i\omega,1) = -\mathbb{E}\left[\log u_r(\cdot,1)\right].\qedhere$$
\end{proof}
In this subsection, we prove that $r\mapsto\lambda(r)$ is differentiable on $(-\infty,r_c)$. For that purpose, we first obtain certain bounds on $\lambda_n'(r,\omega)$ and $\lambda_n''(r,\omega)$. These bounds are given in the next two lemmas which involve the function $$G(\omega):=\inf_{1\leq z'\leq B}\pi\left(-1-z',-1\right)\inf_{0\leq z<B}\pi(-1,z).$$
For every $n\geq1$, $0\leq z<B$ and $\mathbb{P}$-a.e.\ $\omega$, note that
\begin{align}
P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)&\geq P_o^{r,\omega}\left(X_{\tau_{n-1}}=n-1, X_{\tau_n}=n+z\right)\nonumber\\
&\geq\inf_{1\leq z'\leq B}\hat{\pi}_r\left(T_{n-1-z'}\omega,z'\right)\hat{\pi}_r(T_{n-1}\omega,z+1)\nonumber\\
&\geq(\delta\mathrm{e}^r)^{2B}\mathrm{e}^{2r}\inf_{1\leq z'\leq B}\pi\left(n-1-z',n-1\right)\pi(n-1,n+z)\label{madakdamak}\\
&\geq(\delta\mathrm{e}^r)^{2B}\mathrm{e}^{2r}G(T_n\omega)\nonumber
\end{align} where (\ref{madakdamak}) follows from (\ref{ikincicinko}).
\begin{lemma}\label{tornacihuso}
For every $r<r_c$, $n\geq1$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{equation}\label{cirkinsonja}
\left|\lambda_n'(r,\omega)-E_o^{r,\omega}\left[\tau_n\right]\right|\leq\frac{W_1(r)}{G(T_n\omega)}
\end{equation} is satisfied with $$W_1(r):=\frac{(\delta\mathrm{e}^r)^{-2B}\mathrm{e}^{-2r}H_1(r)c(r)^{1-B}}{1-c(r)},\quad H_1(r)\mbox{ as in (\ref{boundoyleolmazboyleolur}), and}\quad c(r) := \left(1-\left(\delta\mathrm{e}^r\right)^{2B}\right)^{1/B}<1.$$
\end{lemma}
\begin{proof}
For every $r<r_c$, $n\geq1$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{align*}
\lambda_n'(r,\omega)&=\frac{E_o^\omega\left[\tau_n\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}=\frac{E_o^{r,\omega}\left[\tau_nu_r\left(\omega,X_{\tau_n}\right)^{-1}\right]}{E_o^{r,\omega}\left[u_r\left(\omega,X_{\tau_n}\right)^{-1}\right]}=\frac{\sum_{z=0}^{B-1}E_o^{r,\omega}\left[\tau_n, X_{\tau_n}=n+z\right]u_r\left(\omega,n+z\right)^{-1}}{\sum_{z=0}^{B-1}P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)u_r\left(\omega,n+z\right)^{-1}}.
\end{align*} Therefore,
\begin{equation}\label{turgayevlendi}
\inf_{0\leq z<B}E_o^{r,\omega}\left[\left.\tau_n\,\right| X_{\tau_n}=n+z\right]\leq\lambda_n'(r,\omega)\leq\sup_{0\leq z<B}E_o^{r,\omega}\left[\left.\tau_n\,\right| X_{\tau_n}=n+z\right].
\end{equation}
If $1\leq i\leq n$ and $0\leq z<B$, then
\begin{align}
E_o^{r,\omega}\left[\tau_i - \tau_{i-1}, X_{\tau_n}=n+z\right]&=\sum_{z'=0}^{B-1}E_o^{r,\omega}\left[\tau_i - \tau_{i-1}, X_{\tau_i}=i+z', X_{\tau_n}=n+z\right]\nonumber\\
&=\sum_{z'=0}^{B-1}E_o^{r,\omega}\left[\tau_i - \tau_{i-1}, X_{\tau_i}=i+z'\right]P_{i+z'}^{r,\omega}\left(X_{\tau_n}=n+z\right)\nonumber\\
&\leq\sum_{z'=0}^{B-1}E_o^{r,\omega}\left[\tau_i - \tau_{i-1}, X_{\tau_i}=i+z'\right]\left(P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)+c(r)^{n-(i+z')}\right)\label{yigitozludon}\\
&\leq E_o^{r,\omega}\left[\tau_i - \tau_{i-1}\right]\left(P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)+c(r)^{n-(i+(B-1))}\right)\label{wissensie}
\end{align} where (\ref{yigitozludon}) follows from Lemma \ref{muhacir}. Recall Lemma \ref{mavisap} and see that
\begin{align*}
E_o^{r,\omega}\left[\left.\tau_n\,\right| X_{\tau_n}=n+z\right]&=\sum_{i=1}^nE_o^{r,\omega}\left[\left.\tau_i - \tau_{i-1}\,\right| X_{\tau_n}=n+z\right]\\
&\leq\sum_{i=1}^nE_o^{r,\omega}\left[\tau_i - \tau_{i-1}\right]\left(\frac{P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)+c(r)^{n-(i+(B-1))}}{P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)}\right)\\
&=E_o^{r,\omega}\left[\tau_n\right] + \frac{1}{P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)}\sum_{i=1}^nc(r)^{n-(i+(B-1))}E_o^{r,\omega}\left[\tau_i - \tau_{i-1}\right]\\
&\leq E_o^{r,\omega}\left[\tau_n\right] + \frac{(\delta\mathrm{e}^r)^{-2B}\mathrm{e}^{-2r}}{G(T_n\omega)}\sum_{j=1}^n c(r)^{j-B}E_o^{r,T_{n-j}\omega}\left[\tau_1\right]\\
&\leq E_o^{r,\omega}\left[\tau_n\right] + \frac{(\delta\mathrm{e}^r)^{-2B}\mathrm{e}^{-2r}H_1(r)}{G(T_n\omega)}\sum_{j=1}^n c(r)^{j-B}\\
&\leq E_o^{r,\omega}\left[\tau_n\right] + \frac{(\delta\mathrm{e}^r)^{-2B}\mathrm{e}^{-2r}H_1(r)c(r)^{1-B}}{(1-c(r))G(T_n\omega)}\\
&= E_o^{r,\omega}\left[\tau_n\right] + \frac{W_1(r)}{G(T_n\omega)}.
\end{align*} This bound, in combination with (\ref{turgayevlendi}), implies that
$$\lambda_n'(r,\omega)-E_o^{r,\omega}\left[\tau_n\right]\leq\sup_{0\leq z<B}E_o^{r,\omega}\left[\left.\tau_n\,\right| X_{\tau_n}=n+z\right]-E_o^{r,\omega}\left[\tau_n\right]\leq\frac{W_1(r)}{G(T_n\omega)}.$$
The proof of the other direction is similar.
\end{proof}
\begin{lemma}
For every $r<r_c$, $n\geq1$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{equation}\label{cirkinmarina}
\lambda_n''(r,\omega)\leq\left(\frac{W_1(r)}{G(T_n\omega)}\right)^2 + n\left(\frac{W_2(r)+2H_1(r)W_1(r)}{G(T_n\omega)}\right)
\end{equation} is satisfied with $$W_2(r):=(\delta\mathrm{e}^r)^{-2B}\mathrm{e}^{-2r}\left(H_2(r)+\frac{6\left(H_1(r)\right)^2c(r)^{-2(B-1)}}{1-c(r)}\right),$$ $H_1(r)$ and $H_2(r)$ as in (\ref{boundoyleolmazboyleolur}), and $W_1(r)$ as in Lemma \ref{tornacihuso}.
\end{lemma}
\begin{proof}
For every $r<r_c$, $n\geq1$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{align}
\lambda_n''(r,\omega)&=\frac{E_o^\omega\left[\tau_n^2\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}{E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]}-\left(\lambda_n'(r,\omega)\right)^2=\frac{E_o^{r,\omega}\left[\tau_n^2u_r\left(\omega,X_{\tau_n}\right)^{-1}\right]}{E_o^{r,\omega}\left[u_r\left(\omega,X_{\tau_n}\right)^{-1}\right]}-\left(\lambda_n'(r,\omega)\right)^2\nonumber\\
&=\frac{\sum_{z=0}^{B-1}E_o^{r,\omega}\left[\tau_n^2, X_{\tau_n}=n+z\right]u_r\left(\omega,n+z\right)^{-1}}{\sum_{z=0}^{B-1}P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)u_r\left(\omega,n+z\right)^{-1}}-\left(\lambda_n'(r,\omega)\right)^2\nonumber\\
&\leq\sup_{0\leq z<B}E_o^{r,\omega}\left[\left.\tau_n^2\,\right| X_{\tau_n}=n+z\right] - \left(\lambda_n'(r,\omega)\right)^2.\label{lafalasol}
\end{align}
If $1\leq i<j\leq n$, then
\begin{align}
&E_o^{r,\omega}\left[(\tau_i-\tau_{i-1})(\tau_j-\tau_{j-1})\right]\nonumber\\
&\quad= E_o^{r,\omega}\left[(\tau_i-\tau_{i-1})(\tau_j-\tau_{j-1}), X_{\tau_{j-1}}=j-1\right]\nonumber\\
&\quad=E_o^{r,\omega}\left[\tau_i-\tau_{i-1}, X_{\tau_{j-1}}=j-1\right]E_{j-1}^{r,\omega}\left[\tau_j\right]\nonumber\\
&\quad\leq E_o^{r,\omega}\left[\tau_i-\tau_{i-1}\right]\left(P_o^{r,\omega}(X_{\tau_{j-1}}=j-1) + c(r)^{(j-1)-(i+(B-1))}\right)E_{j-1}^{r,\omega}\left[\tau_j\right]\label{wurden}\\
&\quad\leq E_o^{r,\omega}\left[\tau_i-\tau_{i-1}\right]E_o^{r,\omega}\left[\tau_j-\tau_{j-1}\right] +\left(H_1(r)\right)^2c(r)^{(j-1)-(i+(B-1))}\nonumber
\end{align} where (\ref{wurden}) follows from (\ref{wissensie}).
\noindent If $0\leq z<B$, then
\begin{align*}
&E_o^{r,\omega}\left[(\tau_i-\tau_{i-1})(\tau_j-\tau_{j-1}), X_{\tau_n}=n+z\right]\\
&\quad\leq E_o^{r,\omega}\left[(\tau_i-\tau_{i-1})(\tau_j-\tau_{j-1})\right]\left(P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)+c(r)^{n-(j+(B-1))}\right)\\
&\quad\leq \left(E_o^{r,\omega}\left[\tau_i-\tau_{i-1}\right]E_o^{r,\omega}\left[\tau_j-\tau_{j-1}\right] +\left(H_1(r)\right)^2c(r)^{(j-1)-(i+(B-1))}\right)P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)\\
&\quad\quad+\left(H_1(r)\right)^2\left(1+c(r)^{(j-1)-(i+(B-1))}\right)c(r)^{n-(j+(B-1))}.
\end{align*}
Therefore,
\begin{align*}
&E_o^{r,\omega}\left[\left.\tau_n^2\,\right| X_{\tau_n}=n+z\right]\\
&\quad=\sum_{i=1}^nE_o^{r,\omega}\left[\left.\left(\tau_i-\tau_{i-1}\right)^2\,\right| X_{\tau_n}=n+z\right]+2\sum_{j=1}^n\sum_{i=1}^{j-1}E_o^{r,\omega}\left[\left.\left(\tau_i-\tau_{i-1}\right)\left(\tau_j-\tau_{j-1}\right)\,\right| X_{\tau_n}=n+z\right]\\
&\quad\leq\frac{nH_2(r)}{P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)}+2\sum_{j=1}^n\sum_{i=1}^{j-1}E_o^{r,\omega}\left[\tau_i-\tau_{i-1}\right]E_o^{r,\omega}\left[\tau_j-\tau_{j-1}\right]\\
&\quad\quad+2\left(H_1(r)\right)^2\sum_{j=1}^n\sum_{i=1}^{j-1}\left[c(r)^{(j-1)-(i+(B-1))}+\frac{c(r)^{n-(j+(B-1))}}{P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)}\left(1+c(r)^{(j-1)-(i+(B-1))}\right)\right]\\
&\quad\leq\frac{nH_2(r)}{P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)}+\left(E_o^{r,\omega}\left[\tau_n\right]\right)^2\\
&\quad\quad+\frac{2\left(H_1(r)\right)^2}{P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)}\left[\frac{n\cdot c(r)^{-(B-1)}}{1-c(r)}+\frac{n\cdot c(r)^{-(B-1)}}{1-c(r)}+\frac{n\cdot c(r)^{-2(B-1)}}{1-c(r)}\right]\\
&\quad=\left(E_o^{r,\omega}\left[\tau_n\right]\right)^2+\frac{n}{P_o^{r,\omega}\left(X_{\tau_n}=n+z\right)}\left(H_2(r)+\frac{6\left(H_1(r)\right)^2c(r)^{-2(B-1)}}{1-c(r)}\right)\\
&\quad\leq\left(E_o^{r,\omega}\left[\tau_n\right]\right)^2+\frac{n\cdot(\delta\mathrm{e}^r)^{-2B}\mathrm{e}^{-2r}}{G(T_n\omega)}\left(H_2(r)+\frac{6\left(H_1(r)\right)^2c(r)^{-2(B-1)}}{1-c(r)}\right)\\
&\quad=\left(E_o^{r,\omega}\left[\tau_n\right]\right)^2+n\left(\frac{W_2(r)}{G(T_n\omega)}\right).
\end{align*}
Recall the bounds in (\ref{cirkinsonja}) and (\ref{lafalasol}), and conclude that
\begin{align*}
\lambda_n''(r,\omega)&\leq\sup_{0\leq z<B}E_o^{r,\omega}\left[\left.\tau_n^2\,\right| X_{\tau_n}=n+z\right] - \left(\lambda_n'(r,\omega)\right)^2\\
&=\sup_{0\leq z<B}E_o^{r,\omega}\left[\left.\tau_n^2\,\right| X_{\tau_n}=n+z\right] - \left(E_o^{r,\omega}\left[\tau_n\right]\right)^2 + \left(\left(E_o^{r,\omega}\left[\tau_n\right]\right)^2 - \left(\lambda_n'(r,\omega)\right)^2\right)\\
&\leq n\left(\frac{W_2(r)}{G(T_n\omega)}\right)+ \left[E_o^{r,\omega}\left[\tau_n\right] + \lambda_n'(r,\omega)\right]\left[E_o^{r,\omega}\left[\tau_n\right] - \lambda_n'(r,\omega)\right]\\
&\leq n\left(\frac{W_2(r)}{G(T_n\omega)}\right)+ \left(2nH_1(r)+\frac{W_1(r)}{G(T_n\omega)}\right)\frac{W_1(r)}{G(T_n\omega)}\\
&= \left(\frac{W_1(r)}{G(T_n\omega)}\right)^2 + n\left(\frac{W_2(r)+2H_1(r)W_1(r)}{G(T_n\omega)}\right).\qedhere
\end{align*}
\end{proof}
\begin{lemma}\label{sakinnazik}
$r\mapsto\lambda(r)$ is differentiable on $(-\infty,r_c)$ with $$\lambda'(r)=g(r)=\mathbb{E}\left[\lim_{x\to-\infty}P_x^{r,\omega}\left(X_{\tau_o}=0\right)E_o^{r,\omega}\left[\tau_1\right]\right].$$
\end{lemma}
\begin{proof}
Assumption (A1) implies that $\mathbb{P}\left(\omega: G(\omega)>0\right)=1$. Therefore, $\mathbb{P}\left(\omega: G(\omega)\geq\epsilon\right)\geq\frac{1}{2}$ for some $\epsilon>0$. For $\mathbb{P}$-a.e.\ $\omega$, there exists a sequence $(n_k)_{k\geq1} = (n_k(\omega))_{k\geq1}$ of integers such that $G(T_{n_k}\omega)\geq\epsilon$. (This follows from the ergodic theorem.) For every $r<r_c$, $k\geq1$ and $\mathbb{P}$-a.e.\ $\omega$,
$$\left|\lambda_{n_k}'(r,\omega)-E_o^{r,\omega}\left[\tau_{n_k}\right]\right|\leq\frac{W_1(r)}{G(T_{n_k}\omega)}\leq\epsilon^{-1}W_1(r)$$ by (\ref{cirkinsonja}). Thus,
$$\lim_{k\to\infty}\frac{1}{n_k}\lambda_{n_k}'(r,\omega)=\lim_{k\to\infty}\frac{1}{n_k}E_o^{r,\omega}\left[\tau_{n_k}\right]=g(r)=\mathbb{E}\left[\lim_{x\to-\infty}P_x^{r,\omega}\left(X_{\tau_o}=0\right)E_o^{r,\omega}\left[\tau_1\right]\right]$$ where the last two equalities follow from Lemma \ref{bugeceler}.
Given any $r<r_c$, pick $r_1, r_2\in\mathbb{R}$ such that $r_1<r<r_2<r_c$. For $\mathbb{P}$-a.e.\ $\omega$, note that
$$\sup_{r_1\leq s\leq r_2 \atop k\geq1}\frac{1}{n_k}\lambda_{n_k}'(s,\omega)\leq\sup_{r_1\leq s\leq r_2 \atop k\geq1}\left(H_1(s) + \frac{1}{n_k}\epsilon^{-1}W_1(s)\right)<\infty.$$ Therefore, the bounded convergence theorem implies that $$\lambda(r)-\lambda(r_1)=\lim_{k\to\infty}\frac{1}{n_k}\left(\lambda_{n_k}(r,\omega)-\lambda_{n_k}(r_1,\omega)\right)=\lim_{k\to\infty}\int_{r_1}^r\frac{1}{n_k}\lambda_{n_k}'(s,\omega)\mathrm{d}s=\int_{r_1}^rg(s)\mathrm{d}s.$$ It is easy to see that $g(\cdot)$ is Lipschitz continuous at $r$ since $$\sup_{r_1\leq s\leq r_2 \atop k\geq1}\frac{1}{n_k}\lambda_{n_k}''(s,\omega)<\infty$$ by (\ref{cirkinmarina}). The desired result follows from the fundamental theorem of calculus.
\end{proof}
\subsection{Verification of the Ansatz}\label{fluxdensity}
\begin{lemma}\label{cakabeycan}
For every $r<r_c$, $$P_o^r\left(\lim_{n\to\infty}\frac{\tau_n}{n}=\lambda'(r)\right)=1.$$
\end{lemma}
\begin{proof}
For every $r<r_c$, $s<r_c-r$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{align*}
\lim_{n\to\infty}\frac{1}{n}\log E_o^{r,\omega}\left[\mathrm{e}^{s\tau_n}\right]&=\lim_{n\to\infty}\frac{1}{n}\log E_o^\omega\left[\mathrm{e}^{(r+s)\tau_n}u_r\left(\omega,X_{\tau_n}\right),\tau_n<\infty\right]\\
&=\lim_{n\to\infty}\frac{1}{n}\log E_o^\omega\left[\mathrm{e}^{(r+s)\tau_n},\tau_n<\infty\right] + \lim_{n\to\infty}\frac{1}{n}\log u_r\left(\omega,n\right)\\
&=\lambda(r+s)-\lambda(r)
\end{align*} where the last equality follows from Lemma \ref{ongbak}.
For every $\epsilon>0$, a standard application of Chebyshev's inequality shows that
\begin{align*}
\limsup_{n\to\infty}\frac{1}{n}\log P_o^{r,\omega}\left(\frac{\tau_n}{n}-\lambda'(r)>\epsilon\right)&\leq\limsup_{n\to\infty}\frac{1}{n}\log E_o^{r,\omega}\left[\mathrm{e}^{s\tau_n}\right] - s\left(\lambda'(r)+\epsilon\right)\\
&=\lambda(r+s)-\lambda(r)-s\left(\lambda'(r)+\epsilon\right)<0
\end{align*} when $s>0$ is small enough. Similarly, $$\limsup_{n\to\infty}\frac{1}{n}\log P_o^{r,\omega}\left(\frac{\tau_n}{n}-\lambda'(r)<-\epsilon\right)<0\quad\mbox{and}\quad\limsup_{n\to\infty}\frac{1}{n}\log P_o^{r,\omega}\left(\left|\frac{\tau_n}{n}-\lambda'(r)\right|>\epsilon\right)<0.$$ Since $\epsilon>0$ is arbitrary, the Borel--Cantelli lemma implies the desired result.
\end{proof}
\begin{lemma}
$F_r:\Omega\times\mathcal{R}\to\mathbb{R}$, defined by $F_r(\omega,z):=\log u_r(\omega,z) + z\lambda(r)$ for each $z\in\mathcal{R}$, is in class $\mathcal{K}$.
\end{lemma}
\begin{proof}
For each $z\in\mathcal{R}$ and $\mathbb{P}$-a.e.\ $\omega$, $$\left|F_r(\omega,z)\right|\leq\left|\log u_r(\omega,z)\right| + \left|z\lambda(r)\right|\leq B\left(-\log\left(\delta\mathrm{e}^r\right)+\left|\lambda(r)\right|\right)<\infty.$$ Therefore, $F_r$ satisfies the moment condition of Definition \ref{K}. The closed loop condition follows immediately from (\ref{cangorecek}). Finally, if $1\leq z\leq B$, then
$$\mathbb{E}\left[\log u_r(\omega,z)\right]=\mathbb{E}\left[\log\left(\prod_{i=0}^{z-1}u_r(T_i\omega,1)\right)\right]= \sum_{i=0}^{z-1}\mathbb{E}\left[\log u_r(T_i\omega,1)\right]=-z\lambda(r).$$
(The case $-B\leq z\leq -1$ is similar.) Hence, $F_r$ satisfies the mean zero condition as well.
\end{proof}
It follows easily from Lemma \ref{cakabeycan} that the LLN for the mean velocity of the particle holds with limiting velocity $(\lambda'(r))^{-1}$.
If there exists a $\phi_r\in L^1(\mathbb{P})$ such that $\phi_r\,\mathrm{d}\mathbb{P}$ is a $\hat{\pi}_r$-invariant probability measure, then $\mu_\xi\in M_1(\Omega\times\mathcal{R})$ with
\begin{equation}\label{selimgelirmi}
\mathrm{d}\mu_\xi(\omega,z):=\hat{\pi}_r(\omega,z)\phi_r(\omega)\mathrm{d}\mathbb{P}(\omega)=\pi(0,z)\mathrm{e}^{-z\lambda(r)+F_r(\omega,z)+r}\phi_r(\omega)\mathrm{d}\mathbb{P}(\omega)
\end{equation} fits the Ansatz given in Lemma \ref{lagrange} for $\xi=(\lambda'(r))^{-1}$. The existence of such a $\phi_r$ is a corollary of the following general result which completes our construction.
\begin{theorem}\label{density}
Suppose $d=1$. If an environment kernel $\hat{\pi}:\Omega\times\mathcal{R}\to\mathbb{R}^+$ satisfies $\mathbb{P}\left(\omega: \hat{\pi}(\omega,\pm1)\geq\epsilon\right)=1$ for some $\epsilon>0$, and if $E_o^{\hat{\pi}}[\tau_1]<\infty$, then the following hold:
\begin{itemize}
\item[(a)] $\phi(\omega):=\lim_{x\rightarrow-\infty} E_x^{\hat{\pi},\omega}\left[\sum_{k=0}^\infty{{\rm 1\mkern-1.5mu}\!{\rm I}}_{X_k=0}\right]\geq\epsilon^B$ exists for $\mathbb{P}$-a.e.\ $\omega$.
\item[(b)] $\phi\in L^1(\mathbb{P})$.
\item[(c)] $\mathbb{Q}\in M_1(\Omega)$, defined by $\mathrm{d}\mathbb{Q}(\omega):=\left({1}/{\left\|\phi\right\|_{L^1(\mathbb{P})}}\right)\phi(\omega)\mathrm{d}\mathbb{P}(\omega)$, is $\hat{\pi}$-invariant.
\end{itemize}
\end{theorem}
\begin{remark}
Br\'emont \cite{Bremont} also shows the existence of a $\hat{\pi}$-invariant probability measure $\mathbb{Q}\ll\mathbb{P}$ in the case of ballistic random walk with bounded jumps on $\mathbb{Z}$. However, his argument is not elementary, assumes a stronger ellipticity condition, and does not provide a formula for the density. Rassoul-Agha \cite{Firas} takes an approach similar to ours, but resorts to Ces\`aro means and weak limits instead of showing the almost sure convergence in part (a) of Theorem \ref{density}, and assumes that the so-called Kalikow condition holds. For the related model of ``random walk on a strip", Roitershtein \cite{roiter} shows the existence of the ergodic invariant measure. It is easy to see that the natural analog of our formula works in that setting.
\end{remark}
\begin{proof}[Proof of Theorem \ref{density}]
Consider the hitting time $V_o:=\inf\{k\geq0:\,X_k=0\}$. For every $x\in\mathbb{Z}$ and $\omega\in\Omega$, let $\psi(\omega,x):=P_x^{\hat{\pi},\omega}(V_o<\infty)$. If $x\neq0$, then \[\psi(\omega,x)=\sum_{z\in\mathcal{R}}\hat{\pi}(T_x\omega,z)\psi(\omega,x+z).\] The function $\phi(\omega,x):=E_x^{\hat{\pi},\omega}\left[\sum_{k=0}^\infty{{\rm 1\mkern-1.5mu}\!{\rm I}}_{X_k=0}\right]$ clearly satisfies $\phi(\omega,x)=\phi(\omega,0)\psi(\omega,x)$. Hence, \[\phi(\omega):=\lim_{x\rightarrow-\infty}\phi(\omega,x)=\phi(\omega,0)\lim_{x\rightarrow-\infty}\psi(\omega,x)\] exists for $\mathbb{P}$-a.e.\ $\omega$ by Lemma \ref{muhacir}. The ellipticity condition implies that $\mathbb{P}\left(\omega: \phi(\omega)\geq\epsilon^B\right)=1$. This proves part (a) of the theorem.
Let us now show that $\phi\in L^1(\mathbb{P})$. For every $n\geq1$ and $\mathbb{P}$-a.e.\ $\omega$,
\begin{align}
\sum_{i=0}^{n-1}\phi(T_i\omega)&=\sum_{i=0}^{n-1}\lim_{x\rightarrow-\infty}E_x^{\hat{\pi},T_i\omega}\left[\sum_{k=0}^\infty{{\rm 1\mkern-1.5mu}\!{\rm I}}_{X_k=0}\right]=\sum_{i=0}^{n-1}\lim_{x\rightarrow-\infty}E_x^{\hat{\pi},\omega}\left[\sum_{k=0}^\infty{{\rm 1\mkern-1.5mu}\!{\rm I}}_{X_k=i}\right]\nonumber\\
&=\lim_{x\rightarrow-\infty}E_x^{\hat{\pi},\omega}\left[\#\{k\geq0:\,0\leq X_k\leq n-1\}\right]\nonumber\\
&\leq\lim_{x\rightarrow-\infty}E_x^{\hat{\pi},\omega}\left[\tau_n-\tau_o\right]+\lim_{x\rightarrow-\infty}E_x^{\hat{\pi},\omega}\left[\#\{k\geq \tau_n:\,X_k\leq n-1\}\right]\nonumber\\
&=\lim_{x\rightarrow-\infty}E_x^{\hat{\pi},\omega}\left[\tau_n-\tau_o\right]+\lim_{x\rightarrow-\infty}E_x^{\hat{\pi},T_n\omega}\left[\#\{k\geq \tau_o:\,X_k\leq -1\}\right]\nonumber\\
&\leq\lim_{x\rightarrow-\infty}E_x^{\hat{\pi},\omega}\left[\tau_n-\tau_o\right]+\sup_{0\leq z<B}E_z^{\hat{\pi},T_n\omega}\left[\#\{k\geq0:\,X_k\leq -1\}\right].\label{asil}
\end{align} Here, $\#$ denotes the number of elements of a set. If $0\leq z<B$, then
\begin{align*}
&E_z^{\hat{\pi},\omega}\left[\#\{k\geq0:\,X_k\leq-1\}\right]=E_z^{\hat{\pi},\omega}\left[\#\{k\geq0:\,X_k\leq-1\},\bar{\tau}_{-1}<\infty\right]\\
&\quad\quad=\sum_{z'=-B}^{-1}P_z^{\hat{\pi},\omega}\left(\bar{\tau}_{-1}<\infty,X_{\bar{\tau}_{-1}}=z'\right)E_{z'}^{\hat{\pi},\omega}\left[\#\{k\geq0:\,X_k\leq-1\}\right]\\
&\quad\quad=\sum_{z'=-B}^{-1}P_z^{\hat{\pi},\omega}\left(\bar{\tau}_{-1}<\infty,X_{\bar{\tau}_{-1}}=z'\right)\left(E_{z'}^{\hat{\pi},\omega}\left[\tau_o\right]+E_{z'}^{\hat{\pi},\omega}\left[\#\{k\geq\tau_o:\,X_k\leq-1\}\right]\right)\\
&\quad\quad\leq\sum_{z'=-B}^{-1}P_z^{\hat{\pi},\omega}\left(\bar{\tau}_{-1}<\infty,X_{\bar{\tau}_{-1}}=z'\right)\left(E_{z'}^{\hat{\pi},\omega}\left[\tau_o\right]+\sup_{0\leq z^{''}<B}E_{z^{''}}^{\hat{\pi},\omega}\left[\#\{k\geq0:\,X_k\leq-1\}\right]\right)\\
&\quad\quad\leq P_z^{\hat{\pi},\omega}\left(\bar{\tau}_{-1}<\infty\right)\left(\sup_{-B\leq z'\leq-1}E_{z'}^{\hat{\pi},\omega}\left[\tau_o\right]+\sup_{0\leq z^{''}<B}E_{z^{''}}^{\hat{\pi},\omega}\left[\#\{k\geq0:\,X_k\leq-1\}\right]\right).
\end{align*}
Therefore, $$\sup_{0\leq z<B}E_z^{\hat{\pi},\omega}\left[\#\{k\geq0:\,X_k\leq -1\}\right]\leq\left(\frac{\sup_{0\leq z< B}P_z^{\hat{\pi},\omega}(\bar{\tau}_{-1}<\infty)}{1-\sup_{0\leq z< B}P_z^{\hat{\pi},\omega}(\bar{\tau}_{-1}<\infty)}\right)\sup_{-B\leq z'\leq-1}E_{z'}^{\hat{\pi},\omega}\left[\tau_o\right]=:D(\omega).$$
Since $\mathbb{P}\left(\omega: D(\omega)<\infty\right)=1$, there exists a $C<\infty$ such that $\mathbb{P}\left(\omega: D(\omega)\leq C\right)\geq\frac{1}{2}$. For $\mathbb{P}$-a.e.\ $\omega$, there exists a sequence $(n_j)_{j\geq1}=(n_j(\omega))_{j\geq1}$ of integers such that $D(T_{n_j}\omega)\leq C$. (This follows from the ergodic theorem.) By (\ref{asil}) and the ergodic theorem,
\begin{align*}
\left\|\phi\right\|_{L^1(\mathbb{P})}&=\lim_{j\rightarrow\infty}\frac{1}{n_j}\sum_{i=0}^{n_j-1}\phi(T_i\omega)\leq\lim_{j\rightarrow\infty}\frac{1}{n_j}\lim_{x\rightarrow-\infty}E_x^{\hat{\pi},\omega}\left[\tau_{n_j}-\tau_o\right]\\
&=\lim_{j\rightarrow\infty}\frac{1}{n_j}\lim_{x\rightarrow-\infty}\sum_{i=0}^{n_j-1}E_x^{\hat{\pi},\omega}\left[\tau_{i+1}-\tau_i\right]\leq\lim_{j\rightarrow\infty}\frac{1}{n_j}\sum_{i=0}^{n_j-1}E_o^{\hat{\pi},T_i\omega}\left[\tau_1\right]=E_o^{\hat{\pi}}\left[\tau_1\right]<\infty.
\end{align*} This proves part (b) of the theorem.
For every $x\neq0$ and $\mathbb{P}$-a.e.\ $\omega$, note that
\begin{align*}
&\sum_{z\in\mathcal{R}}E_{x+z}^{\hat{\pi},T_{-z}\omega}\left[\sum_{k=0}^\infty{{\rm 1\mkern-1.5mu}\!{\rm I}}_{X_k=0}\right]\hat{\pi}(T_{-z}\omega,z)=\sum_{z\in\mathcal{R}}E_x^{\hat{\pi},\omega}\left[\sum_{k=0}^\infty{{\rm 1\mkern-1.5mu}\!{\rm I}}_{X_k=-z}\right]\hat{\pi}(T_{-z}\omega,z)\\&=E_x^{\hat{\pi},\omega}\left[\sum_{k=0}^\infty{{\rm 1\mkern-1.5mu}\!{\rm I}}_{X_{k+1}=0}\right]=E_x^{\hat{\pi},\omega}\left[\sum_{k=0}^\infty{{\rm 1\mkern-1.5mu}\!{\rm I}}_{X_{k}=0}\right].
\end{align*}
Let $x\to-\infty$ and conclude that $$\sum_{z\in\mathcal{R}}\phi(T_{-z}\omega)\hat{\pi}(T_{-z}\omega,z)=\phi(\omega).$$ This proves part (c) of the theorem.
\end{proof}
\subsection{Explicit formula for the rate function}\label{formulabir}
\begin{proof}[Proof of Lemma \ref{lifeisrandom}]
For every $r<r_c$, $n\geq B+1$ and $\mathbb{P}$-a.e.\ $\omega$, $$\left(\delta\mathrm{e}^r\right)E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\leq u_{r,n}(\omega,1)E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\leq\left(\delta\mathrm{e}^r\right)^{-(B-1)}$$ where the first and the second inequalities follow from (\ref{isilam}) and (\ref{deyyus}), respectively. Thus, $$\mathbb{P}\left(\omega: E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\leq\left(\delta\mathrm{e}^r\right)^{-B}\right)=1$$ for $r<r_c$, and also for $r=r_c$ by the monotone convergence theorem. Lemma \ref{ongbak} and (\ref{cangorecek}) are clearly valid for $r\leq r_c$, and $$\lambda(r):=\lim_{n\to\infty}\frac{1}{n}\log E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]=-\mathbb{E}\left[\log u_r(\cdot,1)\right]\leq-\log\left(\delta\mathrm{e}^r\right)<\infty.$$
Suppose $r>r_c$. Then, $E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]=\infty$ for $\mathbb{P}$-a.e.\ $\omega$. For every $n\geq B$,
\begin{align*}
E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]&=\sum_{z=1}^BE_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty,X_{\tau_1}=z\right]E_z^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]\\
&\geq\sum_{z=1}^BE_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty,X_{\tau_1}=z\right]\left(\delta\mathrm{e}^r\right)^{n-z}\\
&\geq E_o^\omega\left[\mathrm{e}^{r\tau_1},\tau_1<\infty\right]\left(\delta\mathrm{e}^r\right)^{n-1}=\infty.
\end{align*}
Therefore, $\lambda(r):=\lim_{n\to\infty}\frac{1}{n}\log E_o^\omega\left[\mathrm{e}^{r\tau_n},\tau_n<\infty\right]=\infty$. This proves that $r\mapsto\lambda(r)$ is (i) deterministic, and (ii) finite precisely on $(-\infty,r_c]$. Note that $0\leq r_c\leq-\log\delta<\infty$.
The function $r\mapsto\lambda(r)$ is differentiable on $(-\infty,r_c)$ by Lemma \ref{sakinnazik}. Suppose there exist $r_1<r_c$ and $r_2<r_c$ such that $\lambda'(r_1)=\lambda'(r_2)$. Then, for $r=r_1$, the measure $\mu_\xi$ (defined in (\ref{selimgelirmi})) fits the Ansatz given in Lemma \ref{lagrange} for $\xi=(\lambda'(r_1))^{-1}$. The same is true for $r=r_2$. However, such a $\mu_\xi$ is unique by Lemma \ref{lagrange}. Therefore, $\mathbb{P}\left(\omega:u_{r_1}(\omega,1)=u_{r_2}(\omega,1)\right)=1$, $\lambda(r_1)=\lambda(r_2)$ and $r_1=r_2$. This proves that $r\mapsto\lambda(r)$ is strictly convex on $(-\infty,r_c)$.
For any $r<r_c$, Lemma \ref{cakabeycan} says that $P_o^r\left(\lim_{n\to\infty}\frac{\tau_n}{n}=\lambda'(r)\right)=1$. The function $r\mapsto\lambda'(r)$ is strictly increasing and the jumps of the walk under $P_o^r$ are bounded by $B$. Therefore, $\xi_c^{-1}=\lambda'(r_c-)>\lambda'(r)\geq B^{-1}$.
We have proved half of Lemma \ref{lifeisrandom}, namely the statements involving $r\mapsto\lambda(r)$. As usual, we leave the proof of the other half to the reader.
What remains to be shown is that the same $r_c$ works for $\lambda(\cdot)$ and $\bar{\lambda}(\cdot)$. This is proved in Appendix C.
\end{proof}
\begin{proof}[Proof of Theorem \ref{explicitformulah}]
For every $r<r_c$,
\begin{align}
\lambda(r)&=\lim_{n\to\infty}\frac{1}{nB}\log E_o^\omega\left[\mathrm{e}^{r\tau_{nB}},\tau_{nB}<\infty\right]\geq\lim_{n\to\infty}\frac{1}{nB}\log E_o^\omega\left[\mathrm{e}^{r\tau_{nB}},X_{n+1}=nB\right]\label{kafatek}\\
&\geq\lim_{n\to\infty}\frac{1}{nB}\log\left(\mathrm{e}^{rn-|r|}P_o^\omega\left(X_{n+1}=nB\right)\right)=B^{-1}\left(r-I(B)\right).\label{kafacift}
\end{align}
In (\ref{kafatek}), $X_{n+1}$ is used instead of $X_n$ in order to avoid problems when $\mathbb{P}$ is not ergodic under $T_B$ (e.g.\ when the environment is $B$-periodic.)
The function $r\mapsto\lambda(r)$ is strictly convex and differentiable on $(-\infty,r_c)$ by Lemma \ref{lifeisrandom}.
Since $\lambda'(r)\geq B^{-1}$, (\ref{kafacift}) implies that $\lim_{r\to-\infty}\lambda'(r)=B^{-1}$.
For every $\xi\in(\xi_c,B)$, there exists a unique $r=r(\xi)\in(-\infty,r_c)$ such that $\xi^{-1}=\lambda'(r)$. Lemma \ref{lagrange} implies that the measure $\mu_\xi$ (given in (\ref{selimgelirmi})) is the unique minimizer of (\ref{level1ratetilde}). Therefore, $$I(\xi) = \mathfrak{I}(\mu_\xi)=r(\xi)-\xi\lambda(r(\xi))$$ by (\ref{sifirladik}). Since $\lambda'(r(\xi))=\xi^{-1}$, it is clear that $$I(\xi)=\sup_{r\in\mathbb{R}}\left\{r-\xi\lambda(r)\right\}=\xi\sup_{r\in\mathbb{R}}\left\{r\xi^{-1}-\lambda(r)\right\}=\xi\lambda^{*}(\xi^{-1}).$$
By convex duality, $\xi\mapsto I(\xi)$ is strictly convex and differentiable on $(\xi_c,B)$.
If $\xi_c=0$, then we have identified $I(\cdot)$ on $(0,B)$. Let us now suppose $\xi_c>0$. Note that $$I'(\xi)=\frac{\mathrm{d}}{\mathrm{d}\xi}[r(\xi)-\xi\lambda(r(\xi))]=r'(\xi)-\lambda(r(\xi))-\xi\lambda'(r(\xi))r'(\xi)=-\lambda(r(\xi)).$$ Therefore, $I(\xi_c)-\xi_cI'(\xi_c+)=r_c$. This implies by convexity that $I(0)\geq r_c$. On the other hand, $$E_o^\omega[\mathrm{e}^{r\tau_1},\tau_1<\infty]=\sum_{k=1}^{\infty}\mathrm{e}^{rk}P_o^\omega(\tau_1=k)\leq\sum_{k=1}^{\infty}\mathrm{e}^{rk}P_o^\omega(1\leq X_k\leq B)\leq\sum_{k=1}^{\infty}\mathrm{e}^{(r-I(0))k + o(k)}<\infty$$ for any $r<I(0)$. Hence, $r_c = I(0)$. The equality $I(\xi_c)-\xi_cI'(\xi_c+)=I(0)$ forces $I(\cdot)$ to be affine linear on $[0,\xi_c]$ with a slope of $I'(\xi_c+)$. In particular, $\xi\mapsto I(\xi)$ is differentiable on $(0,B)$.
Still supposing $\xi_c>0$, fix $\xi\in(0,\xi_c]$. Then, $\frac{\mathrm{d}}{\mathrm{d}r}\left(r-\xi\lambda(r)\right)>0$ for every $r<r_c$. Therefore, $$\sup_{r\in\mathbb{R}}\left\{r-\xi\lambda(r)\right\}=r_c-\xi\lambda(r_c)=I(0)+\xi I'(\xi_c+)=I(\xi).$$ In short, $I(\xi)=\sup_{r\in\mathbb{R}}\left\{r-\xi\lambda(r)\right\}$ for every $\xi\in(0,B)$.
Let us no longer suppose $\xi_c>0$. At $\xi=B$,
\begin{align*}
I(B)&=\lim_{\xi\to B-}I(\xi)=\lim_{\xi\to B-}\left[r(\xi)-\xi\lambda(r(\xi))\right]=\lim_{r\to-\infty}\left[r-\frac{\lambda(r)}{\lambda'(r)}\right]\\
&\leq\lim_{r\to-\infty}\left[r-B\lambda(r)\right]\leq\sup_{r\in\mathbb{R}}\left\{r-B\lambda(r)\right\}\leq I(B).
\end{align*}
Here, the last inequality follows from (\ref{kafacift}). It is easy to check that $I(\xi)=\sup_{r\in\mathbb{R}}\left\{r-\xi\lambda(r)\right\}=\infty$ when $\xi>B$. This concludes the proof of Theorem \ref{explicitformulah} for $\xi\geq0$. The arguments regarding $\xi<0$ are similar.
\end{proof}
\begin{proof}[Proof of Proposition \ref{Wronskian}]
Suppose that the walk is nearest-neighbor. For every $\omega\in\Omega$, let $\rho(\omega):=\frac{\pi(0,-1)}{\pi(0,1)}$. For every $r<r_c$, recall that the function $u_r:\Omega\times\mathbb{Z}\to\mathbb{R}^+$ satisfies
\begin{equation}\label{saykool}
u_r(\omega,x)=\pi(x,x-1)\mathrm{e}^ru_r(\omega,x-1)+\pi(x,x+1)\mathrm{e}^ru_r(\omega,x+1)
\end{equation} and that $\lambda(r)=-\mathbb{E}\left[\log u_r(\cdot,1)\right]$. Replacing $(\tau_n)_{n\geq1}$ by $(\bar{\tau}_{-n})_{n\geq1}$ in the whole construction, one can similarly obtain a function $\bar{u}_r:\Omega\times\mathbb{Z}\to\mathbb{R}^+$ such that
\begin{equation}\label{saykoolmak}
\bar{u}_r(\omega,x)=\pi(x,x-1)\mathrm{e}^r\bar{u}_r(\omega,x-1)+\pi(x,x+1)\mathrm{e}^r\bar{u}_r(\omega,x+1)
\end{equation}
and $\bar{\lambda}(r)=\mathbb{E}\left[\log \bar{u}_r(\cdot,1)\right]$. Introduce
$$U_r(\omega,x):=\left(
\begin{array}{ll}
u_r(\omega,x+1)&\bar{u}_r(\omega,x+1)\\
u_r(\omega,x)&\bar{u}_r(\omega,x)
\end{array}
\right)$$ and the Wronskian $W_r(\omega,x) := \mathrm{det}\left(U_r(\omega,x)\right)$.
By (\ref{saykool}) and (\ref{saykoolmak}),
$$U_r(\omega,x)=\left(
\begin{array}{ll}
\mathrm{e}^{-r}(1+\rho(T_x\omega))&-\rho(T_x\omega)\\
1&0
\end{array}
\right)U_r(\omega,x-1)$$ and $W_r(\omega,x)=\rho(T_x\omega)W_r(\omega,x-1)$. However, it follows from (\ref{cangorecek}) that
$$W_r(\omega,x)=u_r(\omega,x+1)\bar{u}_r(\omega,x)-u_r(\omega,x)\bar{u}_r(\omega,x+1)=u_r(\omega,x)\bar{u}_r(\omega,x)W_r(T_x\omega,0).$$
Therefore, at $x=1$,
\begin{align*}
\rho(T_1\omega)W_r(\omega,0)&=W_r(\omega,1)=u_r(\omega,1)\bar{u}_r(\omega,1)W_r(T_1\omega,0),\\
\log\rho(T_1\omega)+\log W_r(\omega,0)&=\log u_r(\omega,1)+\log\bar{u}_r(\omega,1)+\log W_r(T_1\omega,0).
\end{align*} Take $\mathbb{E}$-expectation to deduce that $\mathbb{E}\left[\log\rho(\cdot)\right]=\bar{\lambda}(r)-\lambda(r)$. In particular, $\bar{\lambda}(r)-\lambda(r)=\bar{\lambda}(0)-\lambda(0)$.
Hence, for every $\xi\in[-1,0)$,
$$I(\xi)=\sup_{r\in\mathbb{R}}\left\{r+\xi\bar{\lambda}(r)\right\}=\sup_{r\in\mathbb{R}}\left\{r-(-\xi)\lambda(r)\right\}+\xi\cdot\mathbb{E}\left[\log\rho(\cdot)\right]=I(-\xi)+\xi\cdot\mathbb{E}\left[\log\rho(\cdot)\right].$$
In order to prove that such a symmetry is generally absent for walks with bounded jumps, let us provide a counterexample. Consider classical random walk on $\mathbb{Z}$. Let $p(z):=P_o(X_1=z)$ for every $z\in\mathbb{Z}$. Suppose $p(-2)=1/7$, $p(-1)=3/7$, $p(1)=1/7$ and $p(2)=2/7$. For $r<0$, it is easy to see that $\mathrm{e}^{-\lambda(r)}$ and $\mathrm{e}^{\bar{\lambda}(r)}$ are the two positive roots $x_r$ and $\bar{x}_r$ of the polynomial $2x^4 + x^3 -7\mathrm{e}^{-r}x^2 + 3x + 1$. By plugging in various values for $r$, one can check that $\bar{\lambda}(r)-\lambda(r)=\log(x_r\bar{x}_r)$ is not independent of $r$.
\end{proof}
\section*{Appendix A}
\begin{proposition}
For nearest-neighbor random walk on $\mathbb{Z}$ in a uniformly elliptic product environment, the function $\mathfrak{I}:M_1(\Omega\times\mathcal{R})\to\mathbb{R}^+$, given by (\ref{level2ratetilde}), is not lower semicontinuous. Hence, $\mathfrak{I}\neq\mathfrak{I}^{**}$.
\end{proposition}
\begin{proof}
Define $a_{\infty}:=\mathbb{E}[\rho]^{-1/2}$ where $\rho(\omega) := {\pi(0,-1)}/{\pi(0,1)}$ for every $\omega\in\Omega$. Given any sequence $(a_n)_{n\geq1}$ that is strictly increasing to $a_{\infty}$, introduce a sequence $(\hat{\pi}_n)_{n\geq1}$ of environment kernels by setting $$\hat{\pi}_n(\omega,-1) := \frac{a_n\pi(0,-1)}{a_n\pi(0,-1) + a_n^{-1}\pi(0,1)}\qquad\mbox{and}\qquad\hat{\pi}_n(\omega,1) := \frac{a_n^{-1}\pi(0,1)}{a_n\pi(0,-1) + a_n^{-1}\pi(0,1)}$$ for $1\leq n\leq\infty$.
When $1\leq n<\infty$, $\rho_n(\omega) := {\hat{\pi}_n(\omega,-1)}/{\hat{\pi}_n(\omega,1)}$ satisfies $\mathbb{E}[\rho_n]=a_n^2\mathbb{E}[\rho]={a_n^2}/{a_{\infty}^2}<1$. It follows from \cite{Solomon} that the LLN holds under the environment kernel $\hat{\pi}_n$, and the limiting velocity is positive. By \cite{alili}, there exists a $\hat{\pi}_n$-invariant probability measure $\mathbb{Q}_n\ll\mathbb{P}$. Let us define $\mu_n\in M_1(\Omega\times U)$ by $\mathrm{d}\mu_n(\omega,z):=\mathrm{d}\mathbb{Q}_n(\omega)\hat{\pi}_n(\omega,z)$. Then, $\mu_n\in M_{1,s}^{\ll}(\Omega\times U)$.
The case $n=\infty$ is different since $\rho_\infty(\omega) := {\hat{\pi}_\infty(\omega,-1)}/{\hat{\pi}_\infty(\omega,1)}$ satisfies $\mathbb{E}[\rho_\infty]=1$. By Jensen's inequality, $\mathbb{E}[\log\rho_\infty]<\log\mathbb{E}[\rho_\infty]=0$. Therefore, the walk under the environment kernel $\hat{\pi}_\infty$ is transient to the right, but the limiting velocity is zero. (See \cite{Solomon}.)
$M_1(\Omega)$ is weakly compact. There exists a subsequence $(\mathbb{Q}_{n_k})_{k\geq1}$ of $(\mathbb{Q}_n)_{n\geq1}$ that converges to some $\mathbb{Q}_\infty\in M_1(\Omega)$. Define $\mu_\infty\in M_1(\Omega\times U)$ by $\mathrm{d}\mu_\infty(\omega,z):=\mathrm{d}\mathbb{Q}_\infty(\omega)\hat{\pi}_\infty(\omega,z)$. Clearly, $\mu_{n_k}$ converges weakly to $\mu_\infty$. Also, $(\mu_\infty)^1=(\mu_\infty)^2=\mathbb{Q}_\infty$, i.e., $\mathbb{Q}_\infty$ is $\hat{\pi}_\infty$-invariant. However, since the walk under the environment kernel $\hat{\pi}_\infty$ is transient but not ballistic, $\mathbb{Q}_\infty$ is not absolutely continuous relative to $\mathbb{P}$. (See \cite{Bremont}.) Therefore, $\mu_\infty\not\in M_{1,s}^{\ll}(\Omega\times U)$. By (\ref{level2ratetilde}), $\mathfrak{I}(\mu_\infty)=\infty$. On the other hand, it is easy to see that $$\lim_{k\to\infty}\mathfrak{I}(\mu_{n_k})=\int\sum_{z\in U}\hat{\pi}_\infty(\omega,z)\log\frac{\hat{\pi}_\infty(\omega,z)}{\pi(0,z)}\mathrm{d}\mathbb{Q}_\infty(\omega)$$ which is finite by the uniform ellipticity assumption. This proves that $\mathfrak{I}$ is not lower semicontinuous.
\end{proof}
\begin{remark}
In the case of random walk on $\mathbb{Z}^d$ in an elliptic periodic environment, $\Omega$ has finitely many elements. Therefore, $M_1(\Omega\times\mathcal{R})$ is finite-dimensional. Ellipticity ensures that $\mathfrak{I}$ is finite on $M_1(\Omega\times\mathcal{R})$. Note that a convex function on a finite-dimensional space is continuous whenever it is finite. Hence, $\mathfrak{I}$ is continuous on $M_1(\Omega\times\mathcal{R})$.
\end{remark}
\section*{Appendix B}
\begin{proof} [Sketch of the proof of Lemma \ref{GRR}]
For every $F\in\mathcal{K}$, $y\in\mathbb{Z}^d$ and $\omega\in\Omega$, let $f(\omega,y):=\sum_{i=0}^{j-1}F(T_{y_i}\omega,y_{i+1}-y_i)$ where $(y_{i})_{i=0}^j$ is any sequence in $\mathbb{Z}^d$ with $y_o=0$, $y_j=y$ and $y_{i+1}-y_i\in\mathcal{R}$. The closed loop condition (given in Definition \ref{K}) ensures that $f:\Omega\times\mathbb{Z}^d\to\mathbb{R}$ is well defined. Extend $f$ to $\Omega\times\mathbb{R}^d$ via interpolation. For every $n\geq1$, define $g_n:\mathbb{R}^d\to\mathbb{R}$ by $g_n(t):=f(\omega,nt)/n$.
The crucial step is to show that $(g_n)_{n\geq1}$ is equicontinuous and hence compact. This is accomplished by estimating the modulus of continuity of $g_n$ from the moment condition in Definition \ref{K} via a theorem of Garsia, Rodemich and Rumsey (given in \cite{SV79}.) Once equicontinuity is established, the mean zero condition and the ergodic theorem are used to prove that $(g_n)_{n\geq1}$ converges uniformly to zero on bounded sets. This immediately implies the desired result. See Chapter 2 of \cite{jeffrey} for the complete proof.
\end{proof}
\section*{Appendix C}
\begin{proposition}
Suppose that $d=1$. For every $r\in\mathbb{R}$, $\mathbb{P}\left(\omega: E_o^\omega[\mathrm{e}^{r\tau_1}, \tau_1<\infty]<\infty\right)=1$ if and only if $\mathbb{P}\left(\omega: E_o^\omega[\mathrm{e}^{r\bar{\tau}_{-1}}, \bar{\tau}_{-1}<\infty]<\infty\right)=1$.
\end{proposition}
\begin{proof}
If $\mathbb{P}\left(\omega: E_o^\omega[\mathrm{e}^{r\tau_1}, \tau_1<\infty]<\infty\right)=1$, then there exists a $u_r:\Omega\times\mathbb{Z}\to\mathbb{R}^+$ that satisfies $u_r(\omega,0)=1$ and $$u_r(\omega,x)=\sum_{z\in\mathcal{R}}\pi(x,x+z)\mathrm{e}^ru_r(\omega,x+z)$$ for every $x\in\mathbb{Z}$ and $\mathbb{P}$-a.e.\ $\omega$. Therefore, $u_r(\omega,X_n)\mathrm{e}^{rn}$ is a martingale under $P_o^\omega$. By the stopping time theorem,
\begin{align*}
1=u_r(\omega,0)&=E_o^\omega\left[u_r\left(\omega,X_{\bar{\tau}_{-1}\wedge\tau_x}\right)\mathrm{e}^{r(\bar{\tau}_{-1}\wedge\tau_x)}\right]\\
&\geq E_o^\omega\left[u_r\left(\omega,X_{\bar{\tau}_{-1}}\right)\mathrm{e}^{r\bar{\tau}_{-1}},\bar{\tau}_{-1}<\tau_x\right]\\
&\geq\inf_{-B\leq z<0}u_r(\omega,z)E_o^\omega\left[\mathrm{e}^{r\bar{\tau}_{-1}},\bar{\tau}_{-1}<\tau_x\right]
\end{align*} for every $x\geq1$. Taking $x\to\infty$ shows that $E_o^\omega[\mathrm{e}^{r\bar{\tau}_{-1}}, \bar{\tau}_{-1}<\infty]<\infty$. The proof of the other direction is similar.
\end{proof}
\end{document}
|
\begin{document}
\title[moduli spaces of semistable sheaves on plane quartics]
{on the geometry of the
moduli spaces of semistable sheaves supported on plane quartics}
\author{Jean-Marc Dr\'ezet \and Mario Maican}
\address{
Jean-Marc Dr\'ezet \\
Institut de Math\'ematiques \\
173 Rue du Chevaleret \\
F-75013 Paris, France \\
E-mail: [email protected]}
\address{
Mario Maican \\
1 University Circle \\
Western Illinois University \\
Macomb, IL 61455, USA \\
E-mail: [email protected]}
\begin{abstract}
We decompose each moduli space of semistable sheaves on the complex projective
plane with support of dimension one and degree four into locally closed
subvarieties, each subvariety being the good or geometric quotient of a set of
morphisms of locally free sheaves modulo a reductive or a nonreductive group.
We find locally free resolutions of length one of all these sheaves and
describe them.
\end{abstract}
\maketitle
\tableofcontents
\section{Introduction}
Let $V$ be a three dimensional vector space over $\mathbb C$, and $\mathbb{P}^2=\mathbb{P}(V)$ the
projective plane of lines in $V$. Let $\text{M}_{\mathbb{P}^2}(r,\chi)$ denote the moduli space of
semistable sheaves $\mathcal F$ on $\mathbb{P}^2$ with Hilbert polynomial $P_{\mathcal F}(t)=rt+\chi$.
The positive integer $r$ is the multiplicity of $\mathcal F$ while $\chi$ is its Euler
characteristic. The generic stable sheaves in this moduli space are the line
bundles of Euler characteristic $\chi$ on smooth plane curves of degree $r$. The
map sending $\mathcal F$ to the twisted sheaf $\mathcal F(1)$ gives an isomorphism between
$\text{M}_{\mathbb{P}^2}(r,\chi)$ and $\text{M}_{\mathbb{P}^2}(r,r+\chi)$, so we can restrict our attention to the case $0
< \chi \le r$. It is known from \cite{lepotier} that the spaces $\text{M}_{\mathbb{P}^2}(r,\chi)$ are
projective, irreducible, locally factorial, of dimension $r^2 +1$, and smooth at
all points given by stable sheaves.
It is easy to see that $\text{M}_{\mathbb{P}^2}(2,1)$ is isomorphic to the space of conics in $\mathbb{P}^2$
while $\text{M}_{\mathbb{P}^2}(2,2)$ is the good quotient modulo the action by conjugation
of the group $\mathcal GL(2,\mathbb C) \times \mathcal GL(2,\mathbb C)$ on the space of $2 \times
2$-matrices with entries in $V^*$ and nonzero determinant.
The case of multiplicity three is also well-understood. J.~Le Potier showed in
\cite{lepotier} that $\text{M}_{\mathbb{P}^2}(3,2)$ and $\text{M}_{\mathbb{P}^2}(3,1)$ are both isomorphic to the
universal cubic in $\mathbb{P}(V) \times \mathbb{P}(S^3V^*)$.
It was first noticed in \cite{maican-diplom} that $\text{M}_{\mathbb{P}^2}(3,2)$
is the geometric quotient of the set of injective morphisms \ \mM{\f:
\mathcal O(-2) \oplus \mathcal O(-1)\to 2\mathcal O} \ for which $\f_{12}$ and $\f_{22}$ are linearly
independent regarded as elements of $V^*$, modulo the action by conjugation of
the nonreductive algebraic group \ $\mathcal Aut(\mathcal O(-2) \oplus \mathcal O(-1)) \times
\mathcal Aut(2\mathcal O)$. The corresponding result for M$_{\mathbb{P}^2}(3,1)$ was established in
\cite{freiermuth-diplom} and \cite{freiermuth-trautmann}.
According to \cite{lepotier}
$\text{M}_{\mathbb{P}^2}(3,3)$ contains an open dense subset which is
a good quotient of the space of injective morphisms
$3 \mathcal O(-1) \to 3\mathcal O$ modulo the action by conjugation of
$\mathcal GL(3,\mathbb C)\times \mathcal GL(3,\mathbb C)$. The complement of this set is
isomorphic to the space of cubics $\mathbb{P}(S^3 V^*)$.
We summarize in the following table the facts about $\text{M}_{\mathbb{P}^2}(r,\chi)$, $r=1,2,3$,
that are known:
\begin{center}
\noindent \\
\begin{tabular}{|l|p{8cm}|}
\hline
\multicolumn{2}{|l|}{$\text{M}_{\mathbb{P}^2}(1,1)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=1$
\end{tabular}
&
$0 \longrightarrow \mathcal O(-1) \longrightarrow \mathcal O \longrightarrow \mathcal F \longrightarrow 0$ \\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|l|p{8cm}|}
\hline
\multicolumn{2}{|l|}{$\text{M}_{\mathbb{P}^2}(2,1)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=0$
\end{tabular}
&
$ 0 \longrightarrow \mathcal O(-2) \longrightarrow \mathcal O \longrightarrow \mathcal F \longrightarrow 0$ \\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|l|p{8cm}|}
\hline
\multicolumn{2}{|l|}{$\text{M}_{\mathbb{P}^2}(2,2)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=2$
\end{tabular}
&
$0 \longrightarrow 2\mathcal O(-1) \longrightarrow 2\mathcal O \longrightarrow \mathcal F \longrightarrow 0$ \\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|l|p{8cm}|}
\hline
\multicolumn{2}{|l|}{$\text{M}_{\mathbb{P}^2}(3,1)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=0$
\end{tabular}
&
$0 \longrightarrow 2\mathcal O(-2) \longrightarrow \mathcal O(-1) \oplus \mathcal O \longrightarrow \mathcal F \longrightarrow 0$ \\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|l|p{8cm}|}
\hline
\multicolumn{2}{|l|}{$\text{M}_{\mathbb{P}^2}(3,2)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$ \\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=1$
\end{tabular}
&
$0 \longrightarrow \mathcal O(-2) \oplus \mathcal O(-1) \longrightarrow 2\mathcal O \longrightarrow \mathcal F \longrightarrow 0$ \\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|l|p{8cm}|}
\hline
\multicolumn{2}{|l|}{$\text{M}_{\mathbb{P}^2}(3,3)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$ \\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=3$
\end{tabular}
&
$0 \longrightarrow 3\mathcal O(-1) \longrightarrow 3\mathcal O \longrightarrow \mathcal F \longrightarrow 0$ \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=1$ \\
$h^1(\mathcal F)=0$ \\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=3$
\end{tabular}
&
$0 \longrightarrow \mathcal O(-2) \longrightarrow \mathcal O(1) \longrightarrow \mathcal F \longrightarrow 0$ \\
\hline
\end{tabular}
\end{center}
For each moduli space (except $\text{M}_{\mathbb{P}^2}(3,3)$) the left column indicates the
cohomological conditions verified by the corresponding sheaves. These sheaves
are isomorphic to cokernels of morphisms of locally free sheaves described in
the right column. The moduli space is isomorphic to the good quotient, modulo
the action of the appropriate group, of a certain open subset of the set of
these morphisms. The moduli space $\text{M}_{\mathbb{P}^2}(3,3)$ is the disjoint union of a dense
open subset described in the first line, and of a locally closed subset
described in the second line.
In this paper we will study the spaces $\text{M}_{\mathbb{P}^2}(4,\chi)$ for $1\leq\chi\leq 4$..
We will decompose each moduli space into locally closed subvarieties (which we
call {\em strata}) given by cohomological conditions and we will describe these
subvarieties as good or geometric quotients of spaces of morphisms. The
work of finding resolutions for sheaves $\mathcal F$ in $\text{M}_{\mathbb{P}^2}(4,\chi)$, apart from the
case $\chi=4$, $h^0(\mathcal F(-1))=1$, has already been carried out in \cite{maican}
and is summarized in the next table. Each stratum in $\text{M}_{\mathbb{P}^2}(4,\chi)$
described by the cohomological conditions from the left column of the table
below is isomorphic to the good quotient, modulo the action of the appropriate
group, of a certain open subset of the set of morphisms of locally free sheaves
from the middle column. The right column gives the codimension of the stratum.
\begin{center}
\noindent \\
\begin{tabular}{|l|p{9cm}|c|}
\hline
\multicolumn{3}{|l|}{$\text{M}_{\mathbb{P}^2}(4,1)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=0$
\end{tabular}
&
$0 \to 3\mathcal O(-2) \to 2\mathcal O(-1) \oplus \mathcal O \to \mathcal F \to 0$ & 0\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|l|p{9cm}|c|}
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=1$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=1$
\end{tabular}
&
$0 \to \mathcal O(-3) \oplus \mathcal O(-1) \to 2\mathcal O \to \mathcal F \to 0$ & 2\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|l|p{9cm}|c|}
\hline
\multicolumn{3}{|l|}{$\text{M}_{\mathbb{P}^2}(4,2)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=0$
\end{tabular}
&
$0 \to 2\mathcal O(-2) \to 2\mathcal O \to \mathcal F \to 0$ & 0\\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=1$
\end{tabular}
&
$0 \to 2\mathcal O(-2) \oplus \mathcal O(-1) \to \mathcal O(-1) \oplus 2\mathcal O \to \mathcal F \to 0$ & 1\\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=1$ \\
$h^1(\mathcal F)=1$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=3$
\end{tabular}
&
$0 \to \mathcal O(-3) \to \mathcal O(1) \to \mathcal F \to 0$ & 3\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|l|p{9cm}|c|}
\hline
\multicolumn{3}{|l|}{$\text{M}_{\mathbb{P}^2}(4,3)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=2$
\end{tabular}
&
$0 \to \mathcal O(-2) \oplus 2\mathcal O(-1) \to 3\mathcal O \to \mathcal F \to 0$ & 0\\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=1$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=3$
\end{tabular}
&
$0 \to 2\mathcal O(-2) \to \mathcal O(-1) \oplus \mathcal O(1) \to \mathcal F \to 0$ & 2\\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|l|p{9cm}|c|}
\hline
\multicolumn{3}{|l|}{$\text{M}_{\mathbb{P}^2}(4,4)$} \\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=0$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=4$
\end{tabular}
&
$0 \to 4\mathcal O(-1) \to 4\mathcal O \to \mathcal F \to 0$ & 0\\
\hline
\begin{tabular}{l}
$h^0(\mathcal F(-1))=1$ \\
$h^1(\mathcal F)=0$\\
$h^0(\mathcal F \otimes \mathcal Om^1(1))=4$
\end{tabular}
&
$0 \to \mathcal O(-2) \oplus \mathcal O(-1) \to \mathcal O \oplus \mathcal O(1) \to \mathcal F \to 0$ & 1\\
\hline
\end{tabular}
\end{center}
One of our difficulties will be to show that the natural maps from the spaces of
morphisms of sheaves to the subsets in the moduli spaces are good or geometric
quotient maps. At \ref{4.E} and \ref{4.E3} the difficulty is compounded by the
fact that the group is not reductive and we do not know a priori the existence
of a good quotient. A similar situation was considered by the first author in
\cite{drezet-1991}, cf. the proof of theorem D. Our method, exhibited in the
proof of \ref{3.B}, is reminiscent of \cite{drezet-1991}, in that we use an
already established quotient modulo a reductive group, but different, in that we
do not have a diagram as at 5.4 in \cite{drezet-1991}, but only local morphisms.
These local morphisms are constructed using the relative Beilinson spectral
sequence.
The second chapter of this paper contains some tools that are used in the next
chapters to study the moduli spaces. The chapters 3,4,5 are devoted to
$\text{M}_{\mathbb{P}^2}(4,1)$ and $\text{M}_{\mathbb{P}^2}(4,3)$, $\text{M}_{\mathbb{P}^2}(4,2)$, $\text{M}_{\mathbb{P}^2}(4,4)$ respectively.
Now we summarize the descriptions and properties of the moduli spaces
$\text{M}_{\mathbb{P}^2}(4,\chi)$ that are contained in this paper :
\vskip 1cm
\mathcal Ssect{The moduli spaces $\text{M}_{\mathbb{P}^2}(4,1)$ and $\text{M}_{\mathbb{P}^2}(4,3)$}{m4143}
These moduli spaces are isomorphic : $\text{M}_{\mathbb{P}^2}(4,1)\simeq\text{M}_{\mathbb{P}^2}(4,-1)$ by duality (cf.
\ref{dual}) and $\text{M}_{\mathbb{P}^2}(4,-1)\simeq\text{M}_{\mathbb{P}^2}(4,3)$ (the isomorphism sending to the point
corresponding to the sheaf $\mathcal F$ the point corresponding to $\mathcal F(1)$). So we will
treat together in chapter \ref{M4143} the moduli spaces $\text{M}_{\mathbb{P}^2}(4,1)$ and $\text{M}_{\mathbb{P}^2}(4,3)$.
These varieties are smooth because in this case semi-stability is equivalent to
stability. We show that, as in the preceeding table, $\text{M}_{\mathbb{P}^2}(4,3)$ is the disjoint
union of two strata : $\text{M}_{\mathbb{P}^2}(4,3)=X_0(4,3)\amalg X_1(4,3)$, where $X_0(4,3)$ is an
open subset and $X_1(4,3)$ a closed smooth subvariety of codimension 2. These
strata correspond to sheaves having the cohomological conditions given in the
first column of the preceeding table. For example the open stratum $X_0(4,3)$
contains the points representing the sheaves $\mathcal F$ such that \
$h^0(\mathcal F(-1))=h^1(\mathcal F)=0$, $h^0(\mathcal F\otimesimes\mathcal Omega^1(1))=2$.
The sheaves in the open stratum $X_0(4,3)$ are isomorphic to cokernels of
injective morphisms
$$\mathcal O(-2)\oplus 2\mathcal O(-1)\longrightarrow 3\mathcal O \ \ .$$
Let $W=\text{Hom}(\mathcal O(-2)\oplus 2\mathcal O(-1),3\mathcal O)$, on which acts the non reductive
algebraic group
$$G=\big(\mathcal Aut(\mathcal O(-2)\oplus 2\mathcal O(-1))\times\mathcal Aut(3\mathcal O)\big)/\mathbb C^*$$
in an obvious way. Following \cite{drezet-trautmann} we describe in \ref{MSM}
and \ref{open1} an open $G$--invariant subset $\mathbf{W}$ of $W$ such that
there exists a geometric quotient $\mathbf{W}/G$ which is a smooth projective
variety. Then $X_0(4,3)$ is canonically isomorphic to the open subset of
$\mathbf{W}/G$ corresponding to injective morphisms.
The sheaves in the closed stratum $X_1(4,3)$ are isomorphic to cokernels of
injective morphisms
$$2\mathcal O(-2)\longrightarrow\mathcal O(-1)\oplus\mathcal O(1) \ ,$$
and $X_1(4,3)$ is isomorphic to a geometric quotient of a suitable open subset
of \hfil\break$\text{H}om(2\mathcal O(-2), \mathcal O(-1)\oplus\mathcal O(1))$ by the non reductive group
$$\big(\mathcal Aut(2\mathcal O(-2))\times\mathcal Aut(\mathcal O(-1)\oplus\mathcal O(1))\big)/\mathbb C^* \ .$$
Similarly $\text{M}_{\mathbb{P}^2}(4,1)$ is the disjoint union of the open subset $X_0(4,1)$ and the
closed smooth subvariety $X_1(4,1)$. Of course the canonical isomorphism
$\text{M}_{\mathbb{P}^2}(4,1)\simeq \text{M}_{\mathbb{P}^2}(4,3)$ induces isomorphisms of the strata.
We can give a precise description of the sheaves that appear in the strata. We
define a closed subvariety $\widetilde{Y}$ of $X_0(4,3)$ corresponding to
sheaves $\mathcal F$ such that there is a non trivial extension
\[0\longrightarrow\mathcal O_\ell(-1)\longrightarrow\mathcal F\longrightarrow\mathcal O_X(1)\longrightarrow 0\]
where $\ell$ is a line and $X$ a cubic. The open subset
$X_0(4,3)\begin{array}ckslash\widetilde{Y}$ consists of kernels of surjective morphisms
$\mathcal O_C(2)\to\mathcal O_Z$, where $C$ is a quartic and $Z$ a length 3 finite subscheme of
$\mathbb{P}^2$ not contained in any line. The sheaves in $X_1(4,1)$ are the kernels of
the surjective morphisms $\mathcal O_C(1)\to\mathcal O_P$, $C$ beeing a quartic and $P$ a
closed point of $C$. The sheaves in the other strata can be described similarly
using the isomorphism $\text{M}_{\mathbb{P}^2}(4,1)\simeq \text{M}_{\mathbb{P}^2}(4,3)$.
\end{sub}
\vskip 1cm
\mathcal Ssect{The moduli space $\text{M}_{\mathbb{P}^2}(4,2)$}{m42}
It is treated in chapter 4.
Here we have 3 strata : an open subset $X_0$, a locally closed smooth
subvariety $X_1$ of codimension 1, and a closed smooth subvariety
$X_2$ of codimension 3 which is in the closure of $X_1$. These strata are
defined by cohomological conditions on the corresponding sheaves, as indicated
in the preceeding table.
The smallest stratum $X_2$ is the set of sheaves of the form $\mathcal O_C(1)$, where
$C$ is a quartic. Hence $X_2$ is isomorphic to $\mathbb{P}(S^4V^*)$.
The stratum $X_0$ contains the cokernels of the injective semi-stable morphisms
$$2\mathcal O(-2)\longrightarrow 2\mathcal O \ .$$
Let $N(6,2,2)$ denote the moduli space of semi-stable morphisms $2\mathcal O(-2)\to 2\mathcal O$
(cf. \ref{KM}). The closed subset corresponding to non injective morphisms is
naturally identified with $\mathbb{P}^2\times\mathbb{P}^2$, hence $X_0$ is isomorphic to
$N(6,2,2)\begin{array}ckslash(\mathbb{P}^2\times\mathbb{P}^2)$.
The sheaves of the second stratum $X_1$ are the cokernels of injective morphisms
$$2\mathcal O(-2)\oplus\mathcal O(-1)\longrightarrow\mathcal O(-1)\oplus 2\mathcal O$$
corresponding to matrices
$\f= \begin{pmatrix}
X_1 & X_2 & 0 \\
\star & \star & Y_1 \\
\star & \star & Y_2
\end{pmatrix}$
where $X_1,X_2 \in V^*$ are linearly independent one-forms and the same for
$Y_1,Y_2 \in V^*$.
Let
$$G=\mathcal Aut(2\mathcal O(-2) \oplus \mathcal O(-1)) \times \mathcal Aut(\mathcal O(-1) \oplus 2\mathcal O) \ .$$
This non reductive group acts naturally on the variety $W_1$ of the preceeding
matrices and we prove that there is a geometric quotient $W_1/G$ which is
isomorphic to $X_1$. Using this description we conclude that the generic sheaf
in $X_1$ is of the form $\mathcal O_C(1)(P-Q)$, where $C$ is a smooth quartic and $P$,
$Q$ are distinct points of $C$.
Let $X=X_0\cup X_1$. We prove that the sheaves in $X$ are precisely the
cokernels of the injective morphisms
$$2\mathcal O(-2)\oplus\mathcal O(-1)\longrightarrow\mathcal O(-1)\oplus 2\mathcal O$$
which are $G$--semi-stable with respect to the
polarization $(\frac{1-\mu}{2},\mu,\mu,\frac{1-\mu}{2})$, where $\mu$ is a
rational number such that $\frac{1}{3}<\mu<\frac{1}{2}$ (cf.
\cite{drezet-trautmann}, \ref{MSM}). Let $W$ be the set of such injective
$G$--semi-stable morphisms. We prove that there is a good quotient $W//G$ which
is isomorphic to $X$. The existence of this quotient cannot be obtained from
the results of \cite{drezet-trautmann} and \cite{dr3b}.
The inclusion $X_0\subset N(6,2,2)$ can be extended to a morphism \
$\delta:X\to N(6,2,2)$. Let $\widetilde{\mathbf{N}}$ denote the blowing-up of
$N(6,2,2)$ along $\mathbb{P}^2\times\mathbb{P}^2$. We prove that $X$ is naturally isomorphic to
an open subset of $\widetilde{\mathbf{N}}$ and $\delta$ can be identified to
the restriction to $X$ of the natural projection $\widetilde{\mathbf{N}}\to
N(6,2,2)$. The complement of $X$ in $\widetilde{\mathbf{N}}$ is contained in
the inverse image of the diagonal of $\mathbb{P}^2\times\mathbb{P}^2\subset N(6,2,2)$, and for
every $P\in\mathbb{P}^2$ the inverse image of $(P,P)$ contains exactly one point of
$\widetilde{\mathbf{N}}\begin{array}ckslash X$. Hence $\widetilde{\mathbf{N}}\begin{array}ckslash X$
is isomorphic to $\mathbb{P}^2$, whereas $\text{M}_{\mathbb{P}^2}(4,2)\begin{array}ckslash X$ is isomorphic to
$\mathbb{P}(S^4V^*)$.
\end{sub}
\vskip 1cm
\mathcal Ssect{The moduli space $\text{M}_{\mathbb{P}^2}(4,4)$}{m44}
It has been completely described by J. Le Potier in \cite{lepotier}.
Let $N(3,4,4)$ be the moduli space of semi-stable morphisms $4\mathcal O(-1)\to 4\mathcal O$
(cf. \ref{KM}). Then $\text{M}_{\mathbb{P}^2}(4,4)$ is isomorphic to the blowing-up of $N(3,4,4)$
along a subvariety isomorphic to $\mathbb{P}(V^*)$.
The open stratum $X_0$ of $\text{M}_{\mathbb{P}^2}(4,4)$ contains the sheaves which are cokernels of
injective morphisms $4\mathcal O(-1)\to 4\mathcal O$ (which are then semi-stable). Our
contribution to the study of $\text{M}_{\mathbb{P}^2}(4,4)$ is a description of the complement \
$X_1=\text{M}_{\mathbb{P}^2}(4,4)\begin{array}ckslash X_0$, i.e of the exceptional divisor in $\text{M}_{\mathbb{P}^2}(4,4)$. We
show that the sheaves in $X_1$ are precisely the cokernels of the injective
morphisms
$$\mathcal O(-2)\oplus\mathcal O(-1)\longrightarrow\mathcal O\oplus\mathcal O(1)$$
such that $\mathfrak{p}hi_{12}\not=0$. Let \ $W=\text{H}om(\mathcal O(-2)\oplus\mathcal O(-1),\mathcal O\oplus\mathcal O(1))$,
on which acts the non reductive group
$$G=\big(\mathcal Aut(\mathcal O(-2)\oplus\mathcal O(-1))\times\mathcal Aut(\mathcal O\oplus\mathcal O(1))\big) \ .$$
les $W_1$ be the set of $G$-semi-stable points of $W$ with respect to a
polarization $(\lambda_1,\lambda_2,\mu_1,\mu_2)$ such that
$\lambda_1=\mu_2<\frac{1}{4}$ (cf. \ref{MSM}). According to
\cite{drezet-trautmann} there exists a good quotient $W_1//G$ which is a
projective variety. We prove that $X_1$ and $W_1//G$ are canonically isomorphic.
\end{sub}
\vskip 1cm
\mathcal Ssect{Clifford's theorem}{clif}
To show that there are no sheaves in $\text{M}_{\mathbb{P}^2}(4,\chi)$ other than those from the
table we prove at \ref{3.E}, \ref{4.B} and \ref{5.F} the following cohomology
estimates:
\noindent \\
\emph{For any sheaf $\mathcal F$ in $\text{M}_{\mathbb{P}^2}M(4,0)$ or in $\text{M}_{\mathbb{P}^2}M(4,1)$ we have $h^1(\mathcal F) \le
1$.}
\noindent \\
\emph{For any sheaf $\mathcal F$ in $\text{M}_{\mathbb{P}^2}M(4,2)$ we have $h^1(\mathcal F)=0$, unless $\mathcal F \simeq
\mathcal O_C(1)$ for a quartic $C \subset \mathbb{P}^2$.}
\noindent \\
For generic sheaves in $\text{M}_{\mathbb{P}^2}(4,\chi)$ the above estimates already follow from
Clifford's theorem. Indeed, a generic sheaf in $\text{M}_{\mathbb{P}^2}(r,\chi)$ is a line bundle
supported on a smooth curve of degree $r$. Clifford's theorem, cf.
\cite{griffiths-harris} p. 251, states that, if $\mathcal L$ is a line bundle on a
compact Riemann surface $S$ corresponding to an effective divisor and such that
$h^1(\mathcal L)>0$, then we have the inequality
\begin{displaymath}
h^0(\mathcal L) \le 1 + \frac{\text{deg}(\mathcal L)}{2},
\end{displaymath}
with equality only if $\mathcal L = \mathcal O_S$ or $\mathcal L = \o_S$ or $S$ is hyperelliptic.
\noindent \\
If $\mathcal L$ has Hilbert polynomial $P(t)=rt+\chi$, the Riemann-Roch theorem and the
genus formula give
\begin{displaymath}
\text{deg}(\mathcal L)= g(S)-1+\chi = \frac{(r-1)(r-2)}{2}-1+\chi = \frac{r(r-3)}{2} +
\chi
\end{displaymath}
and the above inequality takes the form
\begin{displaymath}
h^0(\mathcal L) \le 1 + \frac{\chi}{2} + \frac{r(r-3)}{4}.
\end{displaymath}
Taking $r=4$ and $0 \le \chi < 4$ and noting that equality in Clifford's
theorem is achieved for non-generic sheaves, we conclude that for a generic
sheaf $\mathcal F$ in $\text{M}_{\mathbb{P}^2}(4,\chi)$ we have the relation
\begin{displaymath}
h^0(\mathcal F) < 2 + \frac{\chi}{2}.
\end{displaymath}
This yields \ref{3.E}, \ref{4.B} and \ref{5.F} for generic sheaves. What we
prove in this paper is that, in fact, Clifford's theorem is true for all sheaves
in $\text{M}_{\mathbb{P}^2}(4,\chi)$, $0\leq\chi<4$. By inspecting the first table from above we see
that Clifford's theorem is also true for all sheaves in $\text{M}_{\mathbb{P}^2}(r,\chi)$, $r=1,2,3$,
$0\leq \chi<r$. There is thus enough evidence to suggest that the following
``generalized Clifford's theorem" be true:
\noindent \\
{\bf Conjecture:} {\em Let $\mathcal F$ be a semistable sheaf on $\mathbb{P}^2$ with Hilbert
polynomial $P(t)=rt+\chi$, where $r \ge 1$ and $0\leq\chi<r$ are integers.
If $h^1(\mathcal F)>0$, then we have the inequality
\begin{displaymath}
h^0(\mathcal F) \ \leq \ 1 + \frac{\chi}{2} + \frac{r(r-3)}{4}
\end{displaymath}
with equality only in the following two cases :
(i) $r=3$, $\chi=0$ and $\mathcal F=\mathcal O_C$ for some cubic $C$.
(ii) $r=4$, $\chi=2$ and $\mathcal F=\mathcal O_C(1)$ for some quartic $C$.}
\end{sub}
\vskip 1.4cm
\section{Preliminaries}
\mathcal Ssect{Duality}{dual}
For a sheaf $\mathcal F$ on $\mathbb{P}^2$ with support of dimension one we will consider the
dual sheaf \hfil\break$\mathcal F^D={\mathcal Ext}^1(\mathcal F,\omega_{\mathbb{P}^2})$. According to
\cite{maican-duality} we have the following duality result:
\mathcal SEP
\begin{subsub}\label{2.A}{\bf Theorem : }
The map $\xymatrix{\mathcal F \ar@{|-{>}}[r]& \mathcal F^D}$ gives an isomorphism \hfil\break
$\text{M}_{\mathbb{P}^2}M(r,\chi) \stackrel{\simeq}{\longrightarrow}\text{M}_{\mathbb{P}^2}M(r,-\chi)$.
\end{subsub}
\mathcal SEP
For a semistable sheaf $\mathcal F$ on $\mathbb{P}^2$ with support of dimension one Serre
duality gives the relations
\mathcal SEP
\begin{subsub}\label{2.B}{\bf Proposition : }
We have \ $h^i(\mathcal F \otimes \mathcal Om^j(j))=h^{1-i}(\mathcal F^D \otimes
\mathcal Om^{2-j}(3-j))$, in particular \ $h^i(\mathcal F)=h^{1-i}(\mathcal F^D)$.
\end{subsub}
\mathcal SEP
\begin{subsub}\label{2.C}{\bf Proposition : }
Let $\mathcal F$ be a semistable sheaf on $\mathbb{P}^2$ with Hilbert polynomial\hfil\break
$P_\mathcal F(t)=rt+\chi$. Then we have \ $h^0(\mathcal F(i))=0$ \ for \ ${\displaystyle i <
\frac{3-r}{2}- \frac{\chi}{r}}$ , and \ $h^1(\mathcal F(i))=0$ \ for \ ${\displaystyle i
> \frac{r-3}{2}-\frac{\chi}{r}}$.
\end{subsub}
\mathbb{P}roof
Let $D$ be the schematic support of $\mathcal F$ (defined by the associated Fitting
ideal). It is a curve of degree $r$ and $\mathcal F$ can be viewed as a sheaf on $D$.
Assume that $h^0(\mathcal F(i))>0$. Then there is a nonzero morphism $\mathcal O\to\mathcal F(i)$,
inducing a nonzero morphism $\sigma:\mathcal O_D\to\mathcal F$. Let $\mathcal I=\ker(\sigma)$. We
claim that $\sigma$ factors through an injective morphism $\mathcal O_C \to \mathcal F(i)$,
for a curve $C\subset\mathbb{P}^2$ contained in $D$. Let $f\in H^0(\mathcal O(r))$ be an
equation of $C$. According to 6.7 in \cite{maican}, there is a polynomial
$g$ dividing $f$ such that $\mathcal I$ is contained in the ideal sheaf $\mathcal G$ in $\mathcal O_D$
defined by $g$ and that $\mathcal G/\mathcal I$ is supported on finitely many points. Since
$\mathcal F(i)$ has no zero dimensional torsion, $\mathcal G/\mathcal I$ is mapped to zero in $\mathcal F(i)$,
hence $\mathcal G=\mathcal I$ and we may take for $C$ the support of $\mathcal G$.
Let $d=\deg(C)\leq r$. From the semistability of $\mathcal F(i)$ we get
\begin{displaymath}
\frac{3-d}{2} = p(\mathcal O_C) \ \le \ p(\mathcal F) = \frac{\chi}{r} +i, \mathfrak{q}uad \text{hence}
\mathfrak{q}uad
\frac{3-r}{2} - \frac{\chi}{r} \le i.
\end{displaymath}
This proves the first part of proposition \ref{2.C}. The second part follows
from the first and \ref{2.B}, \ref{2.A}.
$\Box$
\end{sub}
\vskip 1cm
\mathcal Ssect{Beilinson spectral sequence}{beil}
For every coherent sheaf $\mathcal F$ on $\mathbb{P}^2$ there is a free monad, called the
{\em Beilinson free monad}, with middle cohomology $\mathcal F$:
\begin{displaymath}
0 \longrightarrow \mathbb CC^{-2} \longrightarrow \mathbb CC^{-1} \longrightarrow \mathbb CC^0 \longrightarrow \mathbb CC^1 \longrightarrow \mathbb CC^2 \longrightarrow 0,
\end{displaymath}
\begin{displaymath}
\mathbb CC^i = \bigoplus_{0 \le j \le n} \text{H}^{i+j}(\mathcal F \otimes \mathcal Om^j(j)) \otimes \mathcal O(-j).
\end{displaymath}
All maps \ $\text{H}^{i+j}(\mathcal F \otimes \mathcal Om^j(j)) \otimes \mathcal O(-j) \longrightarrow \text{H}^{i+j+1}(\mathcal F
\otimes \mathcal Om^j(j)) \otimes \mathcal O(-j)$ \ in the monad are zero. For sheaves with
support of dimension one the Beilinson free monad takes the form
\setcounter{subsub}{1}
\begin{equation}\label{2.E}
0 \longrightarrow \mathbb CC^{-2} \longrightarrow \mathbb CC^{-1} \longrightarrow \mathbb CC^0 \longrightarrow \mathbb CC^1 \longrightarrow 0,
\end{equation}
\begin{align*}
\mathbb CC^{-2} & = \text{H}^0(\mathcal F(-1)) \otimes \mathcal O(-2), \\
\mathbb CC^{-1} & = \big(\text{H}^0(\mathcal F \otimes \mathcal Om^1(1)) \otimes \mathcal O(-1) \big) \oplus \big(
\text{H}^1(\mathcal F(-1))\otimes \mathcal O(-2)\big) , \\
\mathbb CC^0 & = \big(\text{H}^0(\mathcal F) \otimes \mathcal O \big) \oplus\big(\text{H}^1(\mathcal F \otimes \mathcal Om^1(1))
\otimes\mathcal O(-1)\big) , \\
\mathbb CC^1 & = \text{H}^1(\mathcal F) \otimes \mathcal O.
\end{align*}
Dualizing {(\ref{2.E})} we get a free monad for $\mathcal F^D$:
\setcounter{subsub}{2}
\begin{equation}\label{2.F}
0 \longrightarrow \mathbb CC^{-2}_D \longrightarrow \mathbb CC^{-1}_D \longrightarrow \mathbb CC^0_D \longrightarrow \mathbb CC^1_D \longrightarrow 0,
\end{equation}
\begin{displaymath}
\mathbb CC^i_D = {\mathcal Hom}(\mathbb CC^{-1-i},\omega_{\mathbb{P}^2}).
\end{displaymath}
For every coherent sheaf $\mathcal F$ on $\mathbb{P}^2$ there is a spectral sequence of sheaves,
called the {\em Beilinson spectral sequence}, which converges to $\mathcal F$ in degree
zero and to 0 in degree nonzero. Its first term, $\mathcal EE^1(\mathcal F)$, is given by
\begin{displaymath}
\mathcal EE^1_{ij} = \text{H}^j (\mathcal F \otimes \mathcal Om^{-i}(-i)) \otimes \mathcal O(i).
\end{displaymath}
If $\mathcal F$ is supported on a curve, the relevant part of $\mathcal EE^1(\mathcal F)$ is exhibited
in the following tableau:
\setcounter{subsub}{3}
\begin{equation}\label{2.G}
\end{equation}
$$\xymatrix@R=12pt{
\text{H}^1(\mathcal F(-1))\otimes\mathcal O(-2)\ar@{=}[d] & \text{H}^1(\mathcal F\otimes\mathcal Om^1(1))\otimes\mathcal O(-1)\ar@{=}[d]
& \text{H}^1(\mathcal F)\otimes\mathcal O\ar@{=}[d]\\
\mathcal EE_{-2,1}^1\ar[r]^-{\f_1} & \mathcal EE_{-1,1}^1\ar[r]^-{\f_2} & \mathcal EE_{01}^1
}$$
$$\xymatrix@R=12pt{
\mathcal EE_{-2,0}^1\ar[r]^-{\f_3}\ar@{=}[d] & \mathcal EE_{-1,0}^1\ar[r]^-{\f_4}\ar@{=}[d] &
\mathcal EE_{00}^1\ar@{=}[d]\\
\text{H}^0(\mathcal F(-1))\otimes\mathcal O(-2) & \text{H}^0(\mathcal F\otimes\mathcal Om^1(1))\otimes\mathcal O(-1)
& \text{H}^0(\mathcal F)\otimes\mathcal O
}
$$
\mathcal SEP
All the other $\mathcal EE^1_{ij}$ are zero. The relevant part of $\mathcal EE^2$
is
\begin{displaymath}
\xymatrix
{
\mathcal EE^2_{-2,1} = \mathcal Ker(\f_1) \ar[rrd]^{\f_5} & \mathcal EE^2_{-1,1} = \mathcal Ker(\f_2)/\mathcal Im(\f_1)
& \mathcal EE^2_{01} =\mathbb Coker(\f_2) \\
\mathcal EE^2_{-2,0} = \mathcal Ker(\f_3) & \mathcal EE^2_{-1,0} = \mathcal Ker(\f_4)/\mathcal Im(\f_3) & \mathcal EE^2_{00} =
\mathbb Coker(\f_4)
}.
\end{displaymath}
All the other $\mathcal EE^2_{ij}$ are zero. The relevant part of $\mathcal EE^3$ is
\begin{displaymath}
\xymatrix
{
\mathcal EE^3_{-2,1} = \mathcal Ker(\f_5) & \mathcal EE^3_{-1,1} = \mathcal Ker(\f_2)/\mathcal Im(\f_1) & \mathcal EE^3_{01}
=\mathbb Coker(\f_2) \\
\mathcal EE^3_{-2,0} = \mathcal Ker(\f_3) & \mathcal EE^3_{-1,0} = \mathcal Ker(\f_4)/\mathcal Im(\f_3) & \mathcal EE^3_{00} =
\mathbb Coker(\f_5)
}.
\end{displaymath}
All the maps in $\mathcal EE^3(\mathcal F)$ are zero. This shows that $\mathcal EE^3=\mathcal EE^{\infty}$,
hence all the terms in $\mathcal EE^3$, except, possibly, $\mathcal EE^3_{00}$ and
$\mathcal EE^3_{-1,1}$, are zero. Moreover, there is an exact sequence
\begin{displaymath}
0 \longrightarrow \mathbb Coker(\f_5) \longrightarrow \mathcal F \longrightarrow \mathcal Ker(\f_2)/\mathcal Im(\f_1) \longrightarrow 0.
\end{displaymath}
We conclude that $\f_2$ is surjective and that there are exact sequences
\setcounter{subsub}{4}
\begin{equation}\label{2.H}
0 \longrightarrow \text{H}^0(\mathcal F(-1)) \otimes \mathcal O(-2) \stackrel{\f_3}{\longrightarrow} \text{H}^0 (\mathcal F \otimes
\mathcal Om^1(1)) \otimes \mathcal O(-1) \stackrel{\f_4}{\longrightarrow} \text{H}^0(\mathcal F) \otimes \mathcal O
\end{equation}
$$\longrightarrow \mathbb Coker(\f_4) \longrightarrow 0,$$
\setcounter{subsub}{5}
\begin{equation}\label{2.I}
0 \longrightarrow \mathcal Ker(\f_1) \stackrel{\f_5}{\longrightarrow} \mathbb Coker(\f_4) \longrightarrow \mathcal F \longrightarrow
\mathcal Ker(\f_2)/\mathcal Im(\f_1) \longrightarrow 0.
\end{equation}
\vskip 0.6cm
Let $S$ be a scheme over $\mathbb C$. For every coherent sheaf $\mathcal F$ on $\mathbb{P}^2 \times S$
there is a spectral sequence of sheaves on $\mathbb{P}^2 \times S$, called the relative
Beilinson spectral sequence, which converges to $\mathcal F$ in degree zero and to 0 in
degree nonzero. Its $\mathcal EE^1$-term is given by
\begin{displaymath}
\mathcal EE^1_{ij} = \text{R}^j_{p_*}(\mathcal F \otimes \mathcal Om^{-i}(-i)) \boxtimes \mathcal O_{\mathbb{P}^2}(i).
\end{displaymath}
Here \ $p: \mathbb{P}^2 \times S \to S$ \ is the projection onto the second component.
\mathcal SEP
\begin{subsub}\label{2.J}{\bf Proposition : }
Let $s$ be a closed point of $S$. If all the base change homomorphisms
\begin{displaymath}
\text{R}^j_{p_*}(\mathcal F \otimes \mathcal Om^{-i}(-i))_s \longrightarrow \text{H}^j (\mathcal F_s \otimes \mathcal Om^{-i}(-i))
\end{displaymath}
are isomorphisms, then the restriction of the $\text{\emph{E}}^1$-term of
the relative Beilinson spectral sequence for $\mathcal F$ to \ $\mathbb{P}^2 \times \{ s\}$ \
is the $\text{\emph{E}}^1$-term for the Beilinson spectral sequence for the
restriction $\mathcal F_s$ of $\mathcal F$ to \ $\mathbb{P}^2 \times \{ s \}$.
\end{subsub}
\mathbb{P}roof Let $p_1, p_2: \mathbb{P}^2 \times \mathbb{P}^2 \to \mathbb{P}^2$ be the projections
onto the first and second component. We consider the resolution of the diagonal
$\Delta
\subset \mathbb{P}^2 \times \mathbb{P}^2$ given on p. 242 in \cite{oss}:
\begin{displaymath}
0 \longrightarrow \mathcal O_{\mathbb{P}^2}(-2) \boxtimes \mathcal Om_{\mathbb{P}^2}^2 (2) \longrightarrow \mathcal O_{\mathbb{P}^2}(-1) \boxtimes
\mathcal Om_{\mathbb{P}^2}^1(1) \longrightarrow \mathcal O_{\mathbb{P}^2 \times \mathbb{P}^2} \longrightarrow \mathcal O_{\Delta} \longrightarrow 0.
\end{displaymath}
The maps in $\mathcal EE^1(\mathcal F)$ are the induced maps
\begin{align*}
\mathcal EE^1_{ij}(\mathcal F) =
& \text{R}^j_{(p_1 \times 1_S)_*}\big(\mathcal O_{\mathbb{P}^2}(i) \boxtimes (p_2 \times 1_S)^*(\mathcal F
\otimes \mathcal Om_{\mathbb{P}^2}^{-i}(-i))\big) \longrightarrow \\
& \text{R}^j_{(p_1 \times 1_S)_*}\big(\mathcal O_{\mathbb{P}^2}(i+1) \boxtimes (p_2 \times 1_S)^*(\mathcal F
\otimes \mathcal Om_{\mathbb{P}^2}^{-i-1}(-i-1))\big) = \mathcal EE^1_{i+1,j}(\mathcal F).
\end{align*}
Restricting to $\mathbb{P}^2 \times \{ s \}$ we get the map
\begin{displaymath}
\mathcal O_{\mathbb{P}^2}(i) \otimes \text{R}^j_{p_*}(\mathcal F \otimes \mathcal Om_{\mathbb{P}^2}^{-i}(-i))_s \longrightarrow
\mathcal O_{\mathbb{P}^2}(i+1) \otimes\text{R}^j_{p_*}(\mathcal F \otimes \mathcal Om_{\mathbb{P}^2}^{-i-1}(-i-1))_s.
\end{displaymath}
From the naturality of the base-change homomorphism we see that the above is the
induced map
\begin{displaymath}
\mathcal O_{\mathbb{P}^2}(i) \otimes \text{H}^j (\mathcal F_s \otimes \mathcal Om_{\mathbb{P}^2}^{-i}(-i)) \longrightarrow \mathcal O_{\mathbb{P}^2}(i+1)
\otimes\text{H}^j (\mathcal F_s \otimes \mathcal Om_{\mathbb{P}^2}^{-i-1}(-i-1)).
\end{displaymath}
But this is the map from $\mathcal EE^1(\mathcal F_s)$, which finishes the proof of the claim.
$\Box$
\mathcal SEP
\begin{subsub}\label{2.K}{\bf Proposition : }
Let $S$ be a noetherian integral scheme over $\mathbb C$ and let
$\mathcal F$ be a coherent sheaf on $\mathbb{P}^2 \times S$ which is $S$-flat. For a closed
point $s$ in $S$ we denote by $\mathcal F_s$ the restriction of $\mathcal F$ to $\mathbb{P}^2 \times \{
s \}$. Assume that for all $i$ and $j$, $h^j(\mathcal F_s \times \mathcal Om^{-i}(-i))$ is
independent of $s$. Then, for all closed points $s$ in $S$, the restriction of
$\text{\emph{E}}^1(\mathcal F)$ to $\mathbb{P}^2 \times \{ s\}$ is
$\text{\emph{E}}^1(\mathcal F_s)$.
\end{subsub}
\mathbb{P}roof According to III 12.9 from \cite{hartshorne}, all base change
homomorphisms from \ref{2.J} are isomorphisms, so \ref{2.K} is a corollary of
\ref{2.J}.
$\Box$
\end{sub}
\vskip 1cm
\mathcal Ssect{Quotients by reductive groups and moduli spaces of sheaves}{mod}
We first recall the definition of good and geometric quotients (cf.
\cite{mumf}, \cite{newstead}) :
\mathcal SEP
\begin{subsub}\label{2.L0}{\bf Definition : }
Let an algebraic group $G$ act on an algebraic variety $X$. Then a pair
$(\varphi, Y)$ of a variety and a morphism $X\xrightarrow{\varphi} Y$ is called
a good quotient if
\begin{enumerate}
\item [(i)] $\varphi$ is $G$--invariant (for the trivial action of $G$ on
$Y$),
\item [(ii)] $\varphi$ is affine and surjective,
\item [(iii)] If $U$ is an open subset of $Y$ then $\varphi^\ast$ induces an
isomorphism $\mathcal O_Y(U)\simeq\mathcal O_X(\varphi^{-1}U)^G$, where the latter
denotes the ring of $G$--invariant functions,
\item [(iv)] If $F_1, F_2$ are disjoint closed and $G$--invariant subvarieties
of $X$ then $\varphi(F_1)$, $\varphi(F_2)$ are closed and disjoint.
\end{enumerate}
If in addition the fibres of $\varphi$ are the orbits of the action the quotient
$(\varphi, Y)$ is called a geometric quotient.
\end{subsub}
\mathcal SEP
C.~Simpson's construction of the moduli spaces $\text{M}_{\mathbb{P}^2}(r,\chi)$ (cf.
\cite{lepotier}, \cite{si}) is based on the following facts: there are a smooth
variety $R$ and a reductive group $G$ (to be precise, $G$ is a special linear
group) acting algebraically on $R$, such that $\text{M}_{\mathbb{P}^2}(r,\chi)$ is a good quotient of
$R$ by $G$. Moreover, the open subset $\text{M}_{\mathbb{P}^2}^{\text{s}}(r,\chi)$ of isomorphism
classes of stable sheaves is the geometric quotient of an open subset $R_0
\subset R$ modulo $G$. There is a coherent $R$-flat sheaf $\widetilde{\mathcal F}$ on $\mathbb{P}^2
\times R$ whose restriction $\widetilde{\mathcal F}_s$ to every closed fiber $\mathbb{P}^2 \times \{
s\}$ is a semistable sheaf with Hilbert polynomial $P(t)=rt+\chi$. The quotient
morphism
$$\mathfrak{p}i : R \longrightarrow \text{M}_{\mathbb{P}^2}(r,\chi)$$
maps $s$ to the stable-equivalence class of $\widetilde{\mathcal F}_s$, denoted
$[\widetilde{\mathcal F}_s]$. The quotient morphism \mM{R_0 \to \text{M}_{\mathbb{P}^2}^{\text{s}}(r,\chi)} sends
$s$ to the isomorphism class of $\widetilde{\mathcal F}_s$.
\mathcal SEP
\begin{subsub}\label{2.L}{\bf Proposition : }
Let $X$ be an irreducible locally closed subvariety
of $\text{M}_{\mathbb{P}^2}(r,\chi)$ and $S' \subset R$ the preimage of $X$ equipped with the
canonical reduced induced structure. Then
(i) The restriction of $\mathfrak{p}i:S'\to X$ is a good quotient of $S'$ by $G$.
(ii) There exists an irreducible component $S$ of $S'$ such that $\mathfrak{p}i(S)=X$.
(iii) The restriction of $\mathfrak{p}i:S\to X$ is a good quotient of $S$ by $G$.
\end{subsub}
\mathbb{P}roof
It follows from \cite{sw}, 3.2 (i), that $\mathfrak{p}i^{-1}(X)\to X$ is a good quotient
by $G$. We have \mM{S'=\mathfrak{p}i^{-1}(X)_{red}}. Since $\mathfrak{p}i$ is affine (i) is
reduced to the following~: let $Z={spec}(A)$ be an affine scheme and suppose
given an algebraic action of $G$ on $X$. Suppose that $Z//G={spec}(A^G)$ is
reduced. Then the canonical morphism \hbox{$\mathfrak{p}hi:A^G\to(A/\text{rad}(A))^G$} is
an
isomorphism. We have $\mathcal Ker(\mathfrak{p}hi)=A^G\cap\text{rad}(A)$, and since $A^G$ is
reduced, $\mathfrak{p}hi$ is injective. We have a commutative square
\[
\xymatrix{A\ar@{->>}[d]\ar@{->>}[r]^R & A^G\ar[d]^\mathfrak{p}hi \\
A/\text{rad}(A)\ar@{->>}[r]^R & (A/\text{rad}(A))^G}\]
\noindent
$R$ beeing the Reynolds operators (cf. \cite{mumf}), showing that $\mathfrak{p}hi$ is
surjective. This proves (i).
Let $S''$ be an irreducible component of $X$. We have \ $S''\subset G.S''$,
and $G.S''$ is irreducible (because $G$ and $S''$ are). It follows
that $GS''=S''$ and that $\mathfrak{p}i(S'')$ is closed in $X$. Since $X$ is irreducible
and the union of the closed images of the irreducible components of $S'$ there
exists an irreducible component $S$ of $S'$ such that $\mathfrak{p}i(S)=X$. This proves
(ii).
Again in the case of (iii) the problem is local. So we can suppose that
$S'={spec}(A)$ and $X={spec}(A^G)$. Let $I$ be the $G$-invariant ideal of $S$.
Then we have to prove that the canonical morphism $\mathfrak{p}hi:A^G\to(A/I)^G$ is an
isomorphism. The surjectivity of $\mathfrak{p}i:S\to X$ implies that the composition
$$A^G \stackrel{\mathfrak{p}hi}{\longrightarrow} (A/I)^G\subset A/I$$
is injective, so $\mathfrak{p}hi$ is injective. The surjectivity of $\mathfrak{p}hi$ can be seen
using the Reynolds operators as before. This proves (iii).
$\Box$
\mathcal SEP
\begin{subsub}\label{2.M}{\bf Proposition : }
With the above notations, let $\widetilde{\mathcal F}_{S}$ be the
restriction
of $\widetilde{\mathcal F}$ from $\mathbb{P}^2 \times R$ to $\mathbb{P}^2 \times S$. Assume that for all $i$
and $j$, $h^j(\widetilde{\mathcal F}_s \otimes \mathcal Om^{-i}(-i))$ is independent of the closed
point $s$ in $S$.
Then, for all closed points $s$ in $S$, the restriction of
$\text{\emph{E}}^1(\widetilde{\mathcal F})$ to $\mathbb{P}^2 \times \{ s\}$ is
$\text{\emph{E}}^1(\widetilde{\mathcal F}_s)$.
\end{subsub}
\mathbb{P}roof The hypotheses of \ref{2.K} are satisfied because flatness is
preserved under pulling back. $\Box$
\end{sub}
\vskip 1cm
\mathcal Ssect{Kronecker modules}{KM}
Let $L$ be a finite dimensional nonzero vector space over $\mathbb C$, and $m$, $n$
positive integers. Let $W=L(\mathbb C^m\otimesimes L,\mathbb C^n)\begin{array}ckslash\lbrace 0\rbrace$. We
have an algebraic action of
$$\mathcal Gamma=(\text{GL}(m)\times\text{GL}(n)/\mathbb C^*$$
on $W$ given by
$$\xymatrix@R=4pt{\mathcal Gamma\times W\ar[r] & W\\
((g_1,g_2),f)\ar@{|-{>}}[r] & g_2\circ f\circ(g_1\otimesimes I_L)^{-1}
}$$
Let $\mathbb{P}=\mathbb{P}(L(\mathbb C^m\otimesimes L,\mathbb C^n))$. The preceeding action induces an action of
the reductive group \ $G=\text{SL}(m)\times\text{SL}(m)$ \ on $\mathbb{P}$ with an
obvious linearization. According to K.~Hulek \cite{hu}, the linear maps
$\mathbb C^m\otimesimes L\to\mathbb C^n$ are called {\em $L$-Kronecker modules}. A Kronecker
module will be called {\em semi-stable} (resp. {\em stable}) if it is nonzero
and if the corresponding point in $\mathbb{P}$ is semi-stable (resp. stable) for the
above action. We have
\mathcal SEP
\begin{subsub}\label{2.N}{\bf Proposition : }
A $L$-Kronecker module \ $\tau:\mathbb C^m\otimesimes L\to\mathbb C^n$ is semi-stable (resp.
stable) if and only for every linear subspaces $H\subset\mathbb C^m$, $K\subset\mathbb C^n$,
with $H\not=\lbrace 0\rbrace$, such that \ $\tau(H\otimesimes L)\subset K$ \ we have
\[\frac{\dim(K)}{\dim(H)} \ \geq \ \frac{n}{m} \mathfrak{q}uad\mathfrak{q}uad
\text{(resp.} \mathfrak{q}uad > \text{)} .\]
\end{subsub}
(cf. \cite{dr2}, prop. 15, \cite{ki}).
\mathcal SEP
Let $\mathbb{P}^{ss}$ (resp. $\mathbb{P}^{s}$) denote the $G$-invariant open subset of
semi-stable (resp. stable) points of $\mathbb{P}$. Let
\[N(L,m,n)=\mathbb{P}^{ss}//G , \mathfrak{q}uad N_s(L,m,n)=\mathbb{P}^{s}/G .\]
Of course these varieties depend only on $m$, $n$ and $\dim(L)$.
If $\dim(L)=q$ we will also use the notations $N(q,m,n)$ and $N_s(q,m,n)$.
The variety $N(q,m,n)$ is projective, irreducible and locally factorial, and
$N_s(q,m,n)$ is a smooth open subset of $N(q,m,n)$.
Let $x_q$ be the smallest solution of the equation \ $X^2-qX+1=0$ . Then we
have \hfil\break $\dim(N(q,m,n))>0$ \ if and only if \
$x_q<\frac{m}{n}<\frac{1}{x_q}$ .
In this case $N_s(q,m,n)$ is not empty and we have \
$\dim(N(q,m,n))=qmn-m^2-n^2+1$ .
If $m$ and $n$ are relatively prime then \ $N(q,m,n)=N_s(q,m,n)$ \ hence
$N(q,m,n)$ is a projective smooth variety. In this case there exists a {\em
universal morphism} on $N(q,m,n)$: there are algebraic vector bundles $E$,
$F$ on $N(q,m,n)$ of rank $m$, $n$ respectively, and a morphism \ $\tau:E\otimesimes
L\to F$ \ such that for every closed point $x$ of $N(q,m,n)$, and isomorphisms
$E_x\simeq\mathbb C^m$, $F_x\simeq\mathbb C^n$, the linear map \ $\tau_x:\mathbb C^m\otimesimes
L\to\mathbb C^n$ \ belongs to the $G$-orbit represented by $x$.
The moduli spaces of Kronecker modules appear in the following context :
suppose given two vector bundles $U$, $V$ on $\mathbb{P}^2$. Then a morphism
$U\otimesimes\mathbb C^m\to V\otimesimes\mathbb C^n$ is equivalent to a $\text{Hom}(U,V)^*$-Kronecker
module $\text{Hom}(U,V)^*\otimesimes\mathbb C^m\to \mathbb C^n$.
\end{sub}
\vskip 1cm
\mathcal Ssect{Moduli spaces of morphisms}{MSM}
Let $X$ be a projective algebraic variety, and $r$, $s$ positive integers. For
\mM{1\leq i\leq r} (resp. \mM{1\leq j\leq s}) let $m_i$ (resp. $n_j$) be a
positive integer and $\mathcal F_i$ (resp. $\mathcal E_j$) a coherent sheaf on $X$. Let
\[
\mathcal E=\underset{1\leq i\leq r}{\bigoplus}\mathcal E_i\otimesimes\mathbb C^{m_i}\mathfrak{q}uad\text{
and }\mathfrak{q}uad\mathcal F = \underset{1\leq j\leq s}{\bigoplus} \mathcal F_j\otimesimes\mathbb C^{n_j} .
\]
We suppose that the sheaves $\mathcal E_i$, $\mathcal F_j$ are simple and that
\[\text{H}om(\mathcal E_i,\mathcal E_k)=\text{H}om(\mathcal F_j,\mathcal F_l)=\lbrace 0\rbrace\]
if $i>k$ and $j>l$.
Let $\mathbb{W}=\text{H}om(\mathcal E,\mathcal F)$. The algebraic group \ $G=\mathcal Aut(\mathcal E)\times\mathcal Aut(\mathcal F)$ \ acts on
$\mathbb{W}$ in an obvious way. We can see the elements of $\mathcal Aut(\mathcal E)$ as matrices
\[
\left (
\begin{array}{ccccc}
g_1 & 0 & \cdots & 0\\
u_{21} & g_2 & & \vdots\\
\vdots & \ddots & \ddots & 0\\
u_{r1} & \cdots & u_{r, r-1} & g_r
\end{array}
\right)
\]
where $g_i\in \mathcal GL(m_i)$ and \ $u_{ki} \in L(\mathbb C^{m_i},\mathbb C^{m_k})\otimesimes \text{H}om(\mathcal E_i,
\mathcal F_k)$ \ (and similarly for $\mathcal Aut(\mathcal F)$).
Let \ $G_{red}=\mathfrak{p}rod\ \mathcal GL(m_i)\times\mathfrak{p}rod\mathcal GL(n_l)$ , which is a reductive
subgroup of $G$, and $H$ the maximal normal unipotent subgroup of $G$,
consisting of pairs of matrices with identities as diagonal terms.
The action of $G_{red}$ is well known (cf. \cite{ki}). Let
\ $\sigma=(\lambda_1,\ldots,\lambda_r,\mu_1,\ldots,\mu_s)$ \ be a
sequence of positive rational numbers such that
\[\mathop{\hbox{$\displaystyle\sum$}}\limits_{1\leq i\leq r}\lambda_i m_i \ = \ \mathop{\hbox{$\displaystyle\sum$}}\limits_{1\leq j\leq s}\mu_j n_j
\ = \ 1 \]
(such a sequence is called a {\em polarization}).
An element $f\in\mathbb{W}$ is called {\em $G_{red}$--semi-stable} (resp. {\em
$G_{red}$--stable}) with respect to $\sigma$ if for any choice of subspaces
$M_i\subset\mathbb C^{m_i}$, $N_j\subset\mathbb C^{n_j}$ such that $N_j\not=\mathbb C^{m_j}$ for at
least one $j$, and that $f$ maps $\oplus(\mathcal E_i\otimesimes M_i)$ into
$\oplus(\mathcal F_j\otimesimes N_j)$, we have
\[\mathop{\hbox{$\displaystyle\sum$}}\limits_{1\leq i\leq r}\lambda_i\dim(M_i)\leq\mathop{\hbox{$\displaystyle\sum$}}\limits_{1\leq j\leq s}\mu_j\dim(N_j)
\mathfrak{q}uad \text{(resp. }<\text{)} . \]
There exists a good quotient of the open subset of $G_{red}$--semi-stable points
of $\mathbb{W}$ with respect to $\sigma$.
We consider now the action of the whole group $G$ which is not reductive
in general. An element $f\in\mathbb{W}$ is called {\em $G$--semi-stable} (resp. {\em
$G$--stable}) with respect to $\sigma$ if all the elements of $H.f$ are
$G_{red}$--semi-stable (resp. $G_{red}$--stable) with respect to $\sigma$. Let
$\mathbb{W}^{ss}(\sigma)$ (resp. $\mathbb{W}^s(\sigma)$) be the open $G$-invariant subset of
$G$--semi-stable (resp. $G$--stable) points of $\mathbb{W}$ with respect to $\sigma$.
If suitable numerical conditions are satisfied by $\sigma$ then
$\mathbb{W}^{ss}(\sigma)$ admits a good and projective quotient and $\mathbb{W}^s(\sigma)$
admits a geometric quotient, which is smooth (cf. \cite{drezet-trautmann}).
\end{sub}
\vskip 1cm
\vskip 1.4cm
\section{Euler Characteristics One and Three}\label{M4143}
\mathcal Ssect{The open strata}{open1}
According to 4.2 in \cite{maican}, the sheaves $\mathcal G$ giving a point in $\text{M}_{\mathbb{P}^2}(4,3)$
and satisfying \hfil\break\mM{h^0(\mathcal G(-1))=0} are precisely the sheaves that have
a resolution of the form
\setcounter{subsub}{1}
\begin{equation}\label{3.A1}
0 \longrightarrow \mathcal O(-2) \oplus 2\mathcal O(-1) \stackrel{\f}{\longrightarrow} 3\mathcal O \longrightarrow \mathcal G \longrightarrow 0
\end{equation}
with $\f_{12}$ having all maximal minors nonzero.
\mathcal SEP
\begin{subsub}\label{3.A0} {\bf Moduli spaces of morphisms -} \rm
It is easy to see, using the stability conditions of \ref{2.N}, that a morphism
$f:2\mathcal O(-1)\to 3\mathcal O$ is stable (as a $V$-Kronecker module) if and only if all its
maximal minors are nonzero. Moreover a stable morphism is injective (as a
morphism of sheaves).
We consider now morphisms as in (3.1.1)
\[\f : \mathcal O(-2)\oplus 2\mathcal O(-1)\longrightarrow 3\mathcal O .\]
Let $W=\text{Hom}(\mathcal O(-2)\oplus 2\mathcal O(-1),3\mathcal O)$. Then the linear algebraic group
$$G=\big(\mathcal Aut(\mathcal O(-2)\oplus 2\mathcal O(-1))\times\mathcal Aut(3\mathcal O)\big)/\mathbb C^*$$
acts on $W$ in an obvious way. Good quotients of some $G$-invariant open
subsets of $W$ are given in \cite{drezet-trautmann}, 9.3 (cf. \ref{MSM}). The
quotient related to $\text{M}_{\mathbb{P}^2}(4,3)$ is the obvious one, and we will describe it.
We begin with the following remark : let \ $f:2\mathcal O(-1)\to 3\mathcal O$ \ be an injective
morphism, and $\sigma\in \text{H}^0(\mathbb Coker(f)(2))$. Then $\sigma$ can be lifted to a
morphism $\mathcal O(-2)\to 3\mathcal O$ and defines thus with $f$ a morphism as in
(3.1.1). All the morphisms constructed in this way are in the same $G$-orbit,
and every morphism $\f:\mathcal O(-2)\oplus 2\mathcal O(-1)\to 3\mathcal O$ \ such that
$\f_{12}=f$ comes from a section of $\mathbb Coker(f)(2)$.
Let $\tau:E\times V\to
F$ be a universal morphism on $N(3,2,3)$ (cf. \ref{KM}). Let $p_1$, $p_2$ be the
projections \ $N(3,2,3)\times\mathbb{P}^2\to N(3,2,3)$, $N(3,2,3)\times\mathbb{P}^2\to\mathbb{P}^2$
respectively. From $\tau$ we get a morphism of sheaves on \ $N(3,2,3)\times\mathbb{P}^2$
$$\theta : p_1^*(E)\otimesimes p_2^*(\mathcal O(-1))\otimesimes V\longrightarrow p_1^*(F)
\ .$$
For every $x\in N(3,2,3)$, this morphism is injective on the fiber \ $\lbrace
x\rbrace\times\mathbb{P}^2$. Hence $\mathbb Coker(\theta)$ is flat on $N(3,2,3)$, and for
every $x\in N(3,2,3)$ we have $\mathbb Coker(\theta)_x=\mathbb Coker(\theta_x)$.
Let
$$U \ = \ p_{1*}(\mathbb Coker(\theta)\otimesimes p_2^*(\mathcal O(2))) .$$
It is a rank 3 vector bundle on $N(3,2,3)$, and for every $x\in N(3,2,3)$ we
have \hfil\break $U_x=\text{H}^0(\mathbb Coker(\theta_x)(2))$.
Let $\mathbb{W}=\mathbb{P}(U)$. Let $\boldsymbol W$ be the open $G$-invariant subset of
$W$ consisting of morphisms $\f$ such that $\f_{12}$ is stable, and such that
the section of $\mathbb Coker(\f_{12})$ defined by $\f_{22}$ is nonzero. We have
an obvious morphism $\boldsymbol W\to \mathbb{W}$ which is a geometric
quotient, i.e. $\mathbb{W}=\boldsymbol W/G$. Hence $\boldsymbol W/G$ is a
smooth projective variety.
\end{subsub}
\mathcal SEP
\begin{subsub}\label{3.A2} {\bf The open stratum of $\text{M}_{\mathbb{P}^2}(4,3)$ -} \rm Let
$X_0(4,3)$ be the open subset of $\text{M}_{\mathbb{P}^2}(4,3)$ corresponding to sheaves $\mathcal G$ such
that $h^0(\mathcal G(-1))=0$. Let $W_0(4,3)\subset\boldsymbol W$ be the $G$-invariant
open subset consisting of injective morphisms. We will see later that the
morphism $\rho_{4,3}:W_0(4,3)\to X_0(4,3)$ sending a morphism to its cokernel is
a geometric quotient by $G$, and hence $X_0(4,3)$ is isomorphic to the open
subset of $\mathbb{W}$ corresponding to injective morphisms.
\end{subsub}
\mathcal SEP
Now we will describe the open stratum of $\text{M}_{\mathbb{P}^2}(4,1)$. As at 2.2.2, dualizing the
exact sequence (3.1.1) we get a resolution for the sheaf $\mathcal F=\mathcal G^D(1)$. According
to \ref{2.A}, the latter is in $\text{M}_{\mathbb{P}^2}(4,1)$. Hence we obtain
\mathcal SEP
\begin{subsub}\label{3.A}{\bf Proposition : }
The sheaves $\mathcal F$ in $\text{M}_{\mathbb{P}^2}M(4,1)$ satisfying $h^1(\mathcal F)=0$
are precisely the sheaves with resolution of the form
\begin{displaymath}
0 \longrightarrow 3\mathcal O(-2) \stackrel{\f}{\longrightarrow} 2\mathcal O(-1) \oplus \mathcal O \longrightarrow \mathcal F \longrightarrow 0,
\end{displaymath}
where all maximal minors of $\f_{11}$ are nonzero.
\end{subsub}
\mathcal SEP
Let $W_0=W_0(4,1)$ denote the set of injective morphisms
\begin{displaymath}
\f : 3\mathcal O(-2) \longrightarrow 2\mathcal O(-1) \oplus \mathcal O,
\end{displaymath}
such that $\f_{11}$ is stable. The linear algebraic group
$$\big(\mathcal Aut(3\mathcal O(-2)) \times \mathcal Aut(2\mathcal O(-1)\oplus\mathcal O)\big)/\mathbb C^*$$
acts on $\text{Hom}(3\mathcal O(-2),2\mathcal O(-1)\oplus\mathcal O)$ by conjugation. Of course this
group is canonically isomorphic to $G$, and the isomorphism
\[\xymatrix@R=3pt{\text{Hom}(\mathcal O(-2)\oplus 2\mathcal O(-1),3\mathcal O)\ar[r]^\mathcal Lambda &
\text{Hom}(3\mathcal O(-2),2\mathcal O(-1)\oplus\mathcal O)\\
\f\ar@{|-{>}}[r] & {}^t\f\otimesimes I_{\mathcal O(-2)}}\]
is $G$-invariant and sends $W_0(4,3)$ to $W_0(4,1)$.
\mathcal SEP
\begin{subsub}\label{3.A3}{\bf The open stratum of $\text{M}_{\mathbb{P}^2}(4,1)$ -} \rm
Let $X_0=X_0(4,1)$ be the open subset of $\text{M}_{\mathbb{P}^2}(4,1)$ of sheaves $\mathcal F$ such that
$h^1(\mathcal F)=0$. Let
\[\xymatrix@R=3pt{\lambda : \text{M}_{\mathbb{P}^2}(4,3)\ar[r]^\simeq & \text{M}_{\mathbb{P}^2}(4,1)\\
\mathcal G\ar@{|-{>}}[r] & \mathcal G^D(1)}\]
be the isomorphism of \ref{2.A}.
\end{subsub}
The map \ $\rho : W_0 \to X_0$ \ (resp. \ $\rho_{4,3}:W_0(4,3)\to
X_0(4,3)$) which sends $\f$ to the isomorphism class
of $\mathbb Coker(\f)$ is a surjective morphism whose fibers are $G$-orbits. We have a
commutative diagram
\[\xymatrix{W_0(4,3)\ar[r]^\mathcal Lambda\ar[d]^{\rho_{4,3}} & W_0(4,1)\ar[d]^\rho\\
X_0(4,3)\ar[r]^\lambda & X_0}\]
We claim that $\rho$ (anf hence also $\rho_{4,3}$) is a geometric quotient map:
\mathcal SEP
\begin{subsub}\label{3.B}{\bf Theorem : }
The geometric quotient $W_0/G$ is isomorphic to $X_0$.
\end{subsub}
\mathbb{P}roof We will show that $\rho$ is a categorical quotient map and the
isomorphism \hfil\break$W_0/G \simeq X_0$ will follow from the uniqueness of the
categorical quotient. Given a $G$-invariant morphism of varieties $f: W_0 \to
Y$, there is a unique map $g: X_0 \to Y$ such that $g \circ \rho = f$. We need
to show that $g$ is a morphism of varieties. To see this we consider the good
quotient $\mathfrak{p}i: S \to X_0$ of \ref{2.L}. We will show that $S$ can
be covered with open sets $U$ for which there are morphisms $\s_U : U \to W_0$
making the diagram commute:
\begin{displaymath}
\xymatrix
{
U \ar[rr]^{\s_U} \ar[rd]_{\mathfrak{p}i} & & W_0 \ar[ld]^{\rho} \\
& X_0
}.
\end{displaymath}
Now we note that \ $g \circ \mathfrak{p}i : S \to Y$ is a morphism because its restriction
to each open set $U$ is $f \circ \s_U$. Thus $g$ is the unique morphism
associated to $g \circ \mathfrak{p}i$ by the categorical quotient property of $\mathfrak{p}i$.
It remains to construct the morphisms $\s_U$. This can be achieved as follows:
we notice that $\widetilde{\mathcal F}_S$ satisfies the hypotheses of \ref{2.M}.
This is so because for every sheaf $\mathcal F$ giving a point in $X_0$ we have
\begin{align*}
& h^1(\mathcal F \otimes \mathcal Om^2(2))=3, \mathfrak{q}quad \mathfrak{q}quad && h^1(\mathcal F \otimes \mathcal Om^1(1))=2,
\mathfrak{q}quad \mathfrak{q}quad &&& h^1(\mathcal F)=0, \\
& h^0(\mathcal F \otimes \mathcal Om^2(2))=0, \mathfrak{q}quad \mathfrak{q}quad && h^0(\mathcal F \otimes \mathcal Om^1(1))=0,
\mathfrak{q}quad \mathfrak{q}quad &&& h^0(\mathcal F)=1.
\end{align*}
Thus the higher direct images $\text{R}^j_{p_*}(\widetilde{\mathcal F}_S \otimes \mathcal Om^{-i}(-i))$
are locally free sheaves on $S$; we cover $S$ with open subsets $U$ on which
they are trivial and we fix such trivializations. For an arbitrary closed point
$s$ in $U$ we restrict $\mathcal EE^1(\widetilde{\mathcal F}_S)$ to $\mathbb{P}^2 \times \{ s \}$.
In this manner we construct a morphism $\z_U$ from $U$ to an open subset
$E$ of the space of spectral sequences with $\mathcal EE^1$-term
\begin{displaymath}
\xymatrix
{
\mathcal EE^1_{-2,1}= 3\mathcal O(-2) \ar[r]^{\f_1} & \mathcal EE^1_{-1,1}= 2\mathcal O(-1) \ar[r]^{\f_2} &
\mathcal EE^1_{01} =0 \\ \mathcal EE^1_{-2,0}=0 & \mathcal EE^1_{-1,0}=0 & \mathcal EE^1_{00}=\mathcal O
}.
\end{displaymath}
All the other $\mathcal EE^1_{ij}$ are zero. By \ref{2.M} $\z_U (s)$ is isomorphic to
$\mathcal EE^1(\widetilde{\mathcal F}_s)$. It remains to construct a morphism $\xi : E \to W_0$
which maps $\z_U(s)$ to a point in $\f \in W_0$ satisfying $\mathbb Coker(\f) \simeq
\widetilde{\mathcal F}_s$; then we can put $\s_U = \xi \circ \z_U$. In other words, we need
to obtain a resolution of the form \ref{3.A} for $\widetilde{\mathcal F}_s$ starting
with
$\mathcal EE^1(\widetilde{\mathcal F}_s)$ and performing algebraic operations.
For the problem at hand it is easier to construct $\xi$ for the dual sheaf
$\mathcal G=\widetilde{\mathcal F}_s^D(1)$. In view of \ref{2.A} and (2.2.2) we can dualize the
problem: given $\mathcal G$ in $\text{M}_{\mathbb{P}^2}(4,3)$ with $h^0(\mathcal G(-1))=0$, we would like to
construct the dual to resolution \ref{3.A} starting from $\mathcal EE^1(\mathcal G)$.
Tableau (2.2.3) is
\begin{displaymath}
\xymatrix
{
\mathcal O(-2) & 0 & 0 \\
0 & 2\mathcal O(-1) \ar[r]^{\f_4} & 3\mathcal O
}
\end{displaymath}
and the exact sequence (2.2.5) takes the form
\begin{displaymath}
0 \longrightarrow \mathcal O(-2) \stackrel{\f_5}{\longrightarrow} \mathbb Coker(\f_4) \longrightarrow \mathcal G \longrightarrow 0.
\end{displaymath}
It is now clear that $\mathcal G$ is isomorphic to the cokernel of the morphism
\begin{displaymath}
\f = (\f_5,\f_4) : \mathcal O(-2) \oplus 2\mathcal O(-1) \longrightarrow 3\mathcal O.
\end{displaymath}
This finishes the proof of \ref{3.B}. $\Box$
\end{sub}
\vskip 1cm
\mathcal Ssect{The closed strata}{clos1}
We will treat only the closed stratum in $\text{M}_{\mathbb{P}^2}(4,1)$, the case of $\text{M}_{\mathbb{P}^2}(4,3)$ can
be deduced by duality as in \ref{open1}.
According to \ref{2.C}, we have $h^0(\mathcal F(-1))=0$ for all $\mathcal F$ in $\text{M}_{\mathbb{P}^2}(4,1)$.
From this and 5.3 in \cite{maican} we obtain:
\mathcal SEP
\begin{subsub}\label{3.C}{\bf Theorem : }
The sheaves $\mathcal F$ in $\text{M}_{\mathbb{P}^2}M(4,1)$ satisfying $h^1(\mathcal F)=1$
are precisely the sheaves with resolution of the form
\begin{displaymath}
0 \longrightarrow \mathcal O(-3) \oplus \mathcal O(-1) \stackrel{\f}{\longrightarrow} 2\mathcal O \longrightarrow \mathcal F \longrightarrow 0,
\end{displaymath}
where $\f_{12}$ and $\f_{22}$ are linearly independent vectors in $V^*$.
\end{subsub}
\mathcal SEP
The description of the closed strata is very similar to that of
the open ones. Let \hfil\break\mM{W_1=W_1(4,1)} denote the set of morphisms from
\ref{3.C} that is, the set of injective morphisms
\begin{displaymath}
\f : \mathcal O(-3) \oplus \mathcal O(-1) \longrightarrow 2\mathcal O,
\end{displaymath}
such that $\f_{12}$ and $\f_{22}$ are linearly independent.
The linear algebraic group
$$G=\big(\mathcal Aut(\mathcal O(-3) \oplus \mathcal O(-1)) \times \mathcal Aut(2\mathcal O)\big)/\mathbb C^*$$
acts on $W_1$ by conjugation. Note that a morphism $f:\mathcal O(-1)\to 2\mathcal O$ is stable
(as a $V$-Kronecker module) if and only if $f_{12}$ and $f_{22}$ are linearly
independent, and a stable morphism is injective. The variety $N(3,1,2)$ is
canonically isomorphic to $\mathbb{P}^2$: to every point $P$ of $\mathbb{P}^2$, represented by
a line $D\subset V$, the corresponding stable morphism is the canonical one
$\mathcal O(-1)\to\mathcal O\otimesimes (V/D)$. The cokernel of this morphism is
isomorphic to $\mathcal I_P(1)$ ($\mathcal I_P$ beeing the ideal sheaf of $P$).
As for the open strata, given a stable $f:\mathcal O(-1)\to 2\mathcal O$ with cokernel
$\mathcal I_P(1)$, the morphisms $\mathcal O(-3)\oplus\mathcal O(-1)\to 2\mathcal O$ whose restriction to
$\mathcal O(-1)$ is $f$ correspond to sections of $\mathcal I_P(4)$. The morphisms
corresponding to nonzero sections are injective. Let $Q_4$ denote the cokernel
of the canonical morphism $\mathcal O(-4)\to\mathcal O\otimesimes S^4V$ (cf. \cite{dr2})
which is a rank 14 vector bundle on $N(3,1,2)=\mathbb{P}^2$. We have a canonical
isomorphism $Q_{4P}^*\simeq\text{H}^0(\mathcal I_P(4))$, for every point $P$ of $\mathbb{P}^2$.
Let $\mathbb{W}'=\mathbb{P}(Q_4^*)$. Then it is easy to see that we obtain a geometric
quotient \
$W_1\to\mathbb{W}'$.
\mathcal SEP
Let $X_1=X_1(4,1)$ be the locally closed subset of $\text{M}_{\mathbb{P}^2}(4,1)$ given by the
condition \hfil\break$h^1(\mathcal F)=1$. We equip $X_1$ with the canonical induced
reduced structure.
\mathcal SEP
\begin{subsub}\label{3.D}{\bf Theorem : }
The geometric quotient $\mathbb{W}'$ is isomorphic to $X_1$.
In particular, $X_1$ is a smooth closed subvariety of codimension 2.
\end{subsub}
\mathbb{P}roof As explained in the proof of \ref{3.B}, we have to construct the
resolution of \ref{3.C} starting from $\mathcal EE^1(\mathcal F)$. Tableau (2.2.3) is
\begin{displaymath}
\xymatrix
{
3\mathcal O(-2) \ar[r]^{\f_1} & 3\mathcal O(-1) \ar[r]^{\f_2} & \mathcal O \\
0 & \mathcal O(-1) \ar[r]^{\f_4} & 2\mathcal O
}.
\end{displaymath}
As $\f_2$ is surjective, we may assume that it is given by the matrix
${\displaystyle
\begin{pmatrix}
X & Y & Z
\end{pmatrix}
}$.
Thus $\f_1$ has columns that are linear combinations of the columns of the
matrix
\begin{displaymath}
\begin{pmatrix}
-Y & -Z & 0 \ \\
X & 0 \ & -Z \\
0 \ & X & Y
\end{pmatrix}
.
\end{displaymath}
As $\mathcal F$ surjects onto $\mathcal Ker(\f_2)/\mathcal Im(\f_1)$, the latter has rank zero, hence
$\mathcal Im(\f_1)$ has rank two, hence the columns of $\f_1$ span a two or three
dimensional vector space. In the former case we have the isomorphism
$\mathcal Ker(\f_2)/\mathcal Im(\f_1) \simeq\mathcal Om^1/2\mathcal O(-2)$. This sheaf has Hilbert polynomial
$P(t)=t-2$, which contradicts the semistabilty of $\mathcal F$. We conclude that $\f_1$
has three linearly independent columns, hence $\mathcal Ker(\f_2) = \mathcal Im(\f_1)$,
$\mathcal Ker(\f_1) \simeq \mathcal O(-3)$ and (2.2.5) takes the form
\begin{displaymath}
0 \longrightarrow \mathcal O(-3) \stackrel{\f_5}{\longrightarrow} \mathbb Coker(\f_4) \longrightarrow \mathcal F \longrightarrow 0.
\end{displaymath}
It follows that $\mathcal F$ is isomorphic to the cokernel of the morphism
\begin{displaymath}
\f=(\f_5,\f_4): \mathcal O(-3) \oplus \mathcal O(-1) \longrightarrow 2\mathcal O.
\end{displaymath}
This finishes the proof of \ref{3.D}. $\Box$
\mathcal SEP
\begin{subsub}\label{3.E}{\bf Theorem : }
There are no sheaves $\mathcal F$ in $\text{M}_{\mathbb{P}^2}M(4,1)$ satisfying the relation
\hfil\break$h^1(\mathcal F)\ge 2$.
\end{subsub}
\mathbb{P}roof Let $\mathcal F$ be an arbitrary sheaf in $\text{M}_{\mathbb{P}^2}(4,1)$. The Beilinson monad (2.2.1)
for $\mathcal F(-1)$ has the form
\begin{displaymath}
0 \longrightarrow 7\mathcal O(-2) \longrightarrow 10\mathcal O(-1) \longrightarrow 3\mathcal O \longrightarrow 0.
\end{displaymath}
Thus $\text{M}_{\mathbb{P}^2}(4,1)$ is parametrized by an open subset $M$ inside the space of monads
of the form
\begin{displaymath}
0 \longrightarrow 7\mathcal O(-1) \stackrel{A}{\longrightarrow} 10\mathcal O \stackrel{B}{\longrightarrow} 3\mathcal O(1) \longrightarrow 0.
\end{displaymath}
Let \ $\eta: M \to \text{M}_{\mathbb{P}^2}(4,1)$ \ be the surjective morphism which sends a monad to
the isomorphism class of its cohomology. The tangent space of $M$ at an
arbitrary point $(A,B)$ is
\begin{displaymath}
{\mathbb{T}}_{(A,B)}M = \{ (\a,\b), \mathfrak{q}uad \b \circ A +B \circ \a = 0 \}.
\end{displaymath}
Let \ $\mathbb{P}hi: M \to \text{H}om(10\mathcal O,3\mathcal O(1))$ \ be the projection onto the second
component. We claim that $\mathbb{P}hi$ has surjective differential at every point.
Indeed, $\text{d}\mathbb{P}hi_{(A,B)}(\a,\b)=\b$, so we need to show that, given $\b$,
there is $\a$ such that $\b \circ A + B \circ \a = 0$. This follows from the
surjectivity of the map
\begin{displaymath}
\text{H}om(7\mathcal O(-1),10\mathcal O) \longrightarrow \text{H}om(7\mathcal O(-1),3\mathcal O(1)), \mathfrak{q}quad \a \longrightarrow B \circ \a.
\end{displaymath}
To see this we apply the long $\mathcal Ext(7\mathcal O(-1),\_\_)$ sequence to the exact
sequence
\begin{displaymath}
0 \longrightarrow \mathcal Ker(B) \longrightarrow 10\mathcal O \stackrel{B}{\longrightarrow} 3\mathcal O(1) \longrightarrow 0
\end{displaymath}
and we use the vanishment $\mathcal Ext^1(7\mathcal O(-1),\mathcal Ker(B))=0$.
The latter follows from the exact sequence
\begin{displaymath}
0 \longrightarrow 7\mathcal O(-1) \longrightarrow \mathcal Ker(B) \longrightarrow \mathcal F \longrightarrow 0
\end{displaymath}
and the vanishment $\text{H}^1(\mathcal F(1))=0$. The latter follows from \ref{2.C}.
\noindent \\
Note that $h^0(\mathcal F)=10-\text{rank}(B)$. The subset $N \subset M$ of monads with
cohomology sheaf $\mathcal F$ satisfying $h^1(\mathcal F) \ge 2$ is the preimage under $\mathbb{P}hi$
of the set of morphisms of rank at most 7. Since any matrix of rank 7 is the
limit of a sequence of matrices of rank 8, and since the derivative of $\mathbb{P}hi$ is
surjective at every point, we deduce that $N$ is included in
$\overline{\eta^{-1}(X_1)} \setminus \eta^{-1}(X_1)$. But, according to
\ref{3.D}, $X_1$ is closed. We conclude that $N$ is empty, which proves
\ref{3.E}. $\Box$
\end{sub}
\vskip 1cm
\mathcal Ssect{Description of the strata}{desc1}
We will describe the open stratum $X_0(4,3)$ and the closed stratum $X_1(4,1)$.
One can obtain a description of the two other strata by duality.
We consider first the open stratum $X_0(4,3)$.
Let $Y$ denote the closed subvariety of $N(3,2,3)$ corresponding to morphisms
\hfil\break$f:2\mathcal O(-1)\to 3\mathcal O$ such that $\mathbb Coker(f)$ is not torsion free, and
$\widetilde{Y}$ the closed subvariety of $X_0(4,3)$ of points over $Y$.
From \cite{dr3} prop. 4.5, we have
\mathcal SEP
\begin{subsub}\label{3.F}{\bf Proposition : }Let $f:2\mathcal O(-1)\to 3\mathcal O$ be a stable
morphism. Then
$\mathbb Coker(f)$ is torsion free if and only if it is isomorphic to $\mathcal I_Z(2)$, $Z$
beeing a finite subscheme of length 3 of $\mathbb{P}^2$ not contained in any line, and
$\mathcal I_Z$ its ideal sheaf.
\end{subsub}
\mathcal SEP
Let $f:2\mathcal O(-1)\to 3\mathcal O$ be a morphism as in \ref{3.F}, i.e. such that $\mathbb Coker(f)$
is torsion free. Let \ $\f:\mathcal O(-2)\oplus
2\mathcal O(-1)\to 3\mathcal O$ \ such that $\f_{12}=f$, and $\mathcal F=\mathbb Coker(\f)$. Then $\f$ is
injective and we have an exact sequence
\[0\longrightarrow\mathcal O(-2)\longrightarrow\mathcal I_Z(2)\longrightarrow\mathcal F\longrightarrow 0 .\]
Let $C$ be the quartic defined by the composition
$\mathcal O(-2)\to\mathcal I_Z(2)\subset\mathcal O(2)$, this quartic contains $Z$. Then from the
preceeding exact sequence we get the following one
\[0\longrightarrow\mathcal F\longrightarrow\mathcal O_C(2)\longrightarrow\mathcal O_Z\longrightarrow 0 .\]
It follows easily that
\mathcal SEP
\begin{subsub}{\bf Proposition : } The open subset
$X_0(4,3)\begin{array}ckslash\widetilde{Y}$ consists of the
kernels of the surjective morphisms \mM{\mathcal O_C(2)\to\mathcal O_Z}, where $C$ is a quartic
and $Z$ a length 3 finite subscheme of $\mathbb{P}^2$ not contained in any line.
The generic point of $X_0(4,3)$ is of the form $\mathcal O_C(2)(-P-Q-R)$, where $C$ is
a smooth quartic and $P$, $Q$, $R$ are points of $C$ not contained in the same
line.
\end{subsub}
\mathcal SEP
\begin{subsub} {\bf Non torsion free cokernels -} \rm Let $f:2\mathcal O(-1)\to 3\mathcal O$ be
a stable morphism such that $\mathbb Coker(f)$ is not torsion free. This is the case if
and only if the maximal minors of $f$ have a common divisor, which is a linear
form. It follows easily that there exists a basis $(z_0,z_1,z_2)$ of $V^*$ such
that $f$ is given (up to the action of $\text{GL}(2)\times\text{GL}(3)$) by the
matrix $\begin{pmatrix}z_1 & -z_0\\ z_2 & 0\\ 0 & z_2\end{pmatrix}$. Here all
the maximal minors are multiples of $z_2$. Let $D\subset V$ be the plane defined
by $z_2$ and $\ell$ the corresponding line of $\mathbb{P}^2$. Then $f$ is equivalent to
the canonical morphism \ $\mathcal O(-1)\otimesimes D\to\mathcal O\otimesimes\mathcal Lambda^2V$. Hence
$\mathbb Coker(f)$ depends only on $\ell$, and we can denote $E_\ell=\mathbb Coker(f)$. We
have proved that $Y$ is canonically isomorphic to $\mathbb{P}(V^*)$. Using the canonical
complex
\[0\longrightarrow\mathcal O(-2)\longrightarrow\mathcal O(-1)\otimesimes V\longrightarrow\mathcal O\otimesimes\mathcal Lambda^2V\longrightarrow\mathcal O(1)\otimesimes
\mathcal Lambda^3V\longrightarrow 0\]
it is easy to see that we have an exact sequence
\[0\longrightarrow\mathcal O_\ell(-1)\longrightarrow E_\ell\longrightarrow\mathcal O(1)\longrightarrow 0 .\]
We have $\text{Ext}^1(\mathcal O(1),\mathcal O_\ell(-1))\simeq\mathbb C$ and the preceeding extension
is not trivial, because $E_\ell$ is simple.
A morphism $\mathcal O(-2)\to E_\ell$ is non injective if and only if its image is
contained in $\mathcal O_\ell(-1)$. It follows that the fiber over $\ell$ of the
projection $\widetilde{Y}\to Y$ is precisely \ $\mathbb{P}(\text{H}^0(E_\ell(2))\begin{array}ckslash
\text{H}^0(\mathcal O_\ell(1))$, and that \ $(\boldsymbol W/G)\begin{array}ckslash X_0(4,3)$ is
canonically isomorphic to the projective bundle $S^2(T_{\mathbb{P}(V^*)}(1))$ over
$\mathbb{P}(V^*)$.
The closed subvariety $\widetilde{Y}$ can also be described precisely. It
consists of the non trivial extensions
\[0\longrightarrow\mathcal O_\ell(-1)\longrightarrow\mathcal F\longrightarrow\mathcal O_X(1)\longrightarrow 0\]
where $\ell$ is a line and $X$ a cubic. We have
$\dim(\text{Ext}^1(\mathcal O_X(1),\mathcal O_\ell(-1))=3$, and $\widetilde{Y}$ is a projective
bundle over \ $\mathbb{P}(V^*)\times\mathbb{P}(S^3V^*)$ . The generic sheaves in
$\widetilde{Y}$ are obtained as follows: take $\ell$ and $X$ transverse, the
sheaves $\mathcal F$ are obtained by glueing $\mathcal O_\ell(2)$ and $\mathcal O_X(1)$ at the
intersection points of $\ell$ and $X$.
\end{subsub}
\mathcal SEP
We will now describe the closed stratum $X_1(4,1)$. Using the description of
$\mathbb{W'}$ in \ref{clos1} we get easily
\mathcal SEP
\begin{subsub}\label{3.A4}{\bf Proposition : }The sheaves of $X_1(4,1)$ are the
kernels of the
surjective morphisms $\mathcal O_C(1)\to\mathcal O_P$, $C$ beeing a quartic curve
in $\mathbb{P}^2$ and $P$ a closed point of $C$.
\end{subsub}
\end{sub}
\vskip 1.4cm
\section{Euler Characteristic Two}
\mathcal Ssect{Preliminaries}{prel4}
We quote 4.5 from \cite{maican}:
\mathcal SEP
\begin{subsub}\label{4.A}{\bf Theorem : }
Let $\mathcal F$ be a sheaf in $\text{M}_{\mathbb{P}^2}M(4,2)$ satisfying $h^0(\mathcal F(-1))=0$ and $h^1(\mathcal F)=0$.
Then $h^0(\mathcal F\otimes \mathcal Om^1(1))$ is zero or one. The sheaves of the first kind are
precisely the sheaves with resolution of the form
\begin{displaymath}
\tag{i}
0 \longrightarrow 2\mathcal O(-2) \longrightarrow 2\mathcal O \longrightarrow \mathcal F \longrightarrow 0.
\end{displaymath}
The sheaves of the second kind are precisely the sheaves with resolution
\begin{displaymath}
\tag{ii}
0 \longrightarrow 2\mathcal O(-2) \oplus \mathcal O(-1) \stackrel{\f}{\longrightarrow} \mathcal O(-1) \oplus 2\mathcal O \longrightarrow \mathcal F \longrightarrow
0,
\end{displaymath}
\begin{displaymath}
\f= \begin{pmatrix}
X_1 & X_2 & 0 \\
\star & \star & Y_1 \\
\star & \star & Y_2
\end{pmatrix}
.
\end{displaymath}
Here $X_1,X_2 \in V^*$ are linearly independent one-forms and the same for
$Y_1,Y_2 \in V^*$.
\end{subsub}
\mathcal SEP
\begin{subsub}\label{4.A1}{\bf Remark : } \rm Let $\mathcal E$ be a coherent sheaf on
$\mathbb{P}^2$ with Hilbert polynomial $4n+2$. Then there $\mathcal E$ is isomorphic to the
cokernel on an injective morphism
\[f:2\mathcal O(-2)\longrightarrow 2\mathcal O\]
if and only if \ $h^0(\mathcal F\otimes \mathcal Om^1(1))=0$. We have then also $h^0(\mathcal F(-1))
=h^1(\mathcal F)=0$. This can be seen with a {\em generalized Beilinson spectral
sequence} using exceptional bundles on $\mathbb{P}^2$ (cf. \cite{dr1},
\cite{go_ru},\cite{dr3} 5-). Here we use the triad $(\mathcal O(-2),\mathcal O,\mathcal Omega^1(2))$
(cf. \cite{dr1}) instead of $(\mathcal O(-2),\mathcal O(-1),\mathcal O)$. Then $f$ is equivalent to the
canonical morphism
\[\mathcal O(-2)\otimesimes\text{H}^1(\mathcal E(-1))\longrightarrow\mathcal O\otimesimes\text{H}^1(\mathcal E\otimesimes Q_2(-1))\]
(where $Q_2$ is the exceptional bundle cokernel of the canonical morphism \
\hfil\break$\mathcal O(-2)\to S^2V\otimesimes\mathcal O$).
\end{subsub}
\mathcal SEP
\begin{subsub}\label{4.B}{\bf Theorem : }
There are no sheaves $\mathcal F$ in $\text{M}_{\mathbb{P}^2}M(4,2)$ satisfying the relations
\hfil\break$h^0(\mathcal F(-1))=0$ and $h^1(\mathcal F)>0$.
\end{subsub}
\mathbb{P}roof Let $\mathcal F$ be a semi-stable sheaf with Hilbert polynomial $4m+2$.
Let \hfil\break\mM{p=h^0(\mathcal F\otimes\mathcal Omega^1(1))} and suppose that \ $h^1(\mathcal F)>0$ \ and
\ $h^0(\mathcal F(-1))=0$. Then we can write \ $h^1(\mathcal F)=q+1$ , with $q\geq 0$. The
Beilinson diagram (2.2.3) of $\mathcal F$ is
\[\xymatrix{\mathcal O(-2)\otimes\mathbb C^2 & & \mathcal O(-1)\otimes\mathbb C^p & & \mathcal O\otimes\mathbb C^{q+1} \\
0 & & \mathcal O(-1)\otimes\mathbb C^p & & \mathcal O\otimes\mathbb C^{q+3}
}\]
The morphism \ $\varphi_2:\mathcal O(-1)\otimes\mathbb C^p\to\mathcal O\otimes\mathbb C^{q+1}$ \ is surjective,
hence we must have $p\geq q+3$.
The morphism \ $\varphi_3:\mathcal O(-1)\otimes\mathbb C^p\to\mathcal O\otimes\mathbb C^{q+3}$ \ is injective,
hence we have \ $p\leq q+3$. Finally we get $p=q+3$.
The Beilinson complex of $\mathcal F$ is then
\[0\longrightarrow
2\mathcal O(-2)\oplus(q+3)\mathcal O(-1)\longrightarrow(q+3)\mathcal O(-1)\oplus(q+3)\mathcal O\longrightarrow(q+1)\mathcal O\longrightarrow 0 .\]
Let $\mathcal E=\mathbb Coker(\varphi_3)$ and $E=\ker(\varphi_2)$ (it is a rank 2 vector
bundle). We have a commutative diagram with exact rows
\[\xymatrix{0\ar[r] & (q+3)\mathcal O(-1)\ar[r]^-{\varphi_3}\ar@{^{(}->}[d] &
(q+3)\mathcal O\ar[r]\ar@{^{(}->}[d] & \mathcal E\ar[r]\ar[d]^\alpha & 0\\
0\ar[r] & 2\mathcal O(-2)\oplus(q+3)\mathcal O(-1)\ar[r] & E\oplus(q+3)\mathcal O\ar[r] & \mathcal F\ar[r] &
0 }\]
If follows that $\ker(\alpha)\subset\ 2\mathcal O(-2)$. But $\mathcal E$ is a torsion sheaf.
Hence $\alpha$ is injective.
But the Hilbert polynomial of $\mathcal E$
is $(q+3)(m+1)$. This contradicts the semi-stability of $\mathcal F$.
$\Box$
\end{sub}
\vskip 1cm
\mathcal Ssect{The two sub-strata of the open stratum}{open2}
Let $X$ be the open subset of $\text{M}_{\mathbb{P}^2}(4,2)$ corresponding to sheaves $\mathcal F$ satisfying
the conditions
\begin{displaymath}
h^0(\mathcal F(-1))=0, \mathfrak{q}quad h^1(\mathcal F)=0, \mathfrak{q}quad h^0(\mathcal F \otimes\mathcal Om^1(1))\leq 1 .
\end{displaymath}
It is the disjoint union of the open subset $X_0$ and the locally closed subset
$X_1$, where for $i=0,1$, $X_i$ corresponds to sheaves $\mathcal F$ such that \
$ h^0(\mathcal F \otimes\mathcal Om^1(1))=i$ .
We will first describe the subsets $X_0$, $X_1$.
\mathcal SEP
\begin{subsub}\label{4.D0} {\bf The open subset $X_0$ -} \rm Let
$W=\text{H}om(2\mathcal O(-2),2\mathcal O)$ on which acts the reductive group
$G_0=\text{GL}(2)\times\text{GL}(2)$. Let $W_0\subset W$ be the set of
morphisms from \ref{4.A}(i) that is, the set of injective morphisms
\begin{displaymath}
\f : 2\mathcal O(-2) \longrightarrow 2\mathcal O.
\end{displaymath}
The corresponding $S^2V$-Kronecker modules \ $S^2V\otimesimes\mathbb C^2\to\mathbb C^2$ \ are
then semistable (cf. \ref{KM}). Hence $W_0$ is an open $G_0$-invariant subset
inside the set $W^{ss}$ of semistable points and contains the set
$W^{ss}\setminus W^s$ of properly semistable points. $W^{ss}$ is the set of $2
\times 2$-matrices with entries in $S^2V^*$ having linearly independent rows and
columns. $W^{ss} \setminus W^s$ is the subset of matrices equivalent, modulo row
and column operations, to a matrix having a zero entry. Incidentally, note that
$W_0$ is a proper subset of $W^{ss}$ (cf. \ref{desc2}). Thus $W_0/\!\!/G_0$ is a
proper open subset of the projective variety $N(6,2,2)$.
The morphism \ $\rho_0: W_0 \to X_0$ \ given by \ $\xymatrix{\f \ar@{|-{>}}[r] &
[\mathbb Coker(\f)]}$ \ is $G_0$-invariant. We claim that $\rho_0(\f_1)=\rho_0(\f_2)$
if and only if \ $\overline{G_0\f_1} \cap \overline{G_0\f_2}\neq \emptyset$.
This is clear if $\mathcal F=\mathbb Coker(\f_1)$ is stable, in fact, in this case, we have
$G_0\f_1 = G_0\f_2$. If $\mathcal F$ is properly semistable, then there is an extension
\begin{displaymath}
0 \longrightarrow \mathcal O_{C_1} \longrightarrow \mathcal F \longrightarrow \mathcal O_{C_2} \longrightarrow 0
\end{displaymath}
for some conics $C_1= \{ f_1=0 \}$ and $C_2= \{ f_2 = 0 \}$.
From the horseshoe lemma we get a resolution
\begin{displaymath}
0 \longrightarrow 2\mathcal O(-2) \stackrel{\mathfrak{p}si}{\longrightarrow} 2\mathcal O \longrightarrow \mathcal F \longrightarrow 0, \mathfrak{q}quad \mathfrak{p}si =
\begin{pmatrix}
f_2 & 0 \\
\star & f_1
\end{pmatrix}
.
\end{displaymath}
Thus $f_1 \oplus f_2$ is in the closure of the orbit $G_0\mathfrak{p}si = G_0\f_1$.
Analogously, $f_1 \oplus f_2$ is in the closure of $G_0\f_2$.
\end{subsub}
\mathcal SEP
\begin{subsub}\label{4.D}{\bf Theorem : }
The good quotient $W_0/\!\!/G_0$ is isomorphic to $X_0$.
The subvariety \mM{(W^{ss}\setminus W^s)/\!\!/G_0} \ of
$W_0/\!\!/G_0$ given by properly semistable points is isomorphic to the
subvariety of $\text{M}_{\mathbb{P}^2}M(4,2)$ given by properly semistable sheaves and is isomorphic
to the symmetric space \ $\big(\mathbb{P}(S^2V^*) \times \mathbb{P}(S^2
V^*)\big)/\text{\emph{S}}_2$.
\end{subsub}
\mathbb{P}roof We will show that $\rho_0$ is a categorical quotient map and the
isomorphism $W_0/\!\!/G_0\simeq X_0$ will follow from the uniqueness of the
categorical quotient. Let \hbox{$f: W_0 \to Y$} \ be a $G_0$-invariant morphism
of varieties. On the closure of each $G_0$-orbit $f$ is constant hence, by the
above remark, $f$ is constant on the fibers of $\rho$. Thus $f$ factors through
a map \ $g:X_0 \to Y$.
We continue the proof as at \ref{3.B}. We need to construct resolution
\ref{4.A}(i)
starting from the Beilinson spectral sequence for $\mathcal F$. Tableau (2.2.3) takes
the form
\begin{displaymath}
\xymatrix
{
2\mathcal O(-2) & 0 & 0 \\
0 & 0 & 2\mathcal O
}
\end{displaymath}
and (2.2.5) yields the resolution
\begin{displaymath}
0 \longrightarrow 2\mathcal O(-2) \stackrel{\f_5}{\longrightarrow} 2\mathcal O \longrightarrow \mathcal F \longrightarrow 0.
\end{displaymath}
This allows us to construct the morphism \ $\xi : E \to W_0$ as at \ref{3.B} \
and proves the isomorphism $W_0/\!\!/G_0\simeq X_0$.
The statement about properly semistable sheaves follows from the easily
verifiable fact that $\rho_0(\f)$ is properly semistable if and only if $\f$ is
properly semistable. $\Box$
\mathcal SEP
\begin{subsub}\label{4.E0} {\bf The locally closed subset $X_1$ -} \rm
Let $W_1$ be the set of morphisms from \ref{4.A}(ii) that is, the set of
injective morphisms
\begin{displaymath}
\f : 2\mathcal O(-2) \oplus \mathcal O(-1) \longrightarrow \mathcal O(-1) \oplus 2\mathcal O
\end{displaymath}
satisfying $\f_{12}=0$ and such that $\mathbb Coker(\f)$ is semistable.
The linear algebraic group
$$G=\mathcal Aut(2\mathcal O(-2) \oplus \mathcal O(-1)) \times \mathcal Aut(\mathcal O(-1) \oplus 2\mathcal O)$$
acts on $W_1$ in an obvious way. Let $X_1$ be the locally closed subset of
$\text{M}_{\mathbb{P}^2}(4,2)$ defined by the conditions
\begin{displaymath}
h^0(\mathcal F(-1))=0, \mathfrak{q}quad h^1(\mathcal F)=0, \mathfrak{q}quad h^0(\mathcal F \otimes \mathcal Om^1(1))=1.
\end{displaymath}
We equip $X_1$ with the canonical induced reduced structure. The morphism
$$\xymatrix@R=4pt{\rho_1: W_1 \ar[r] & X_1\\ \f \ar@{|-{>}}[r] & [\mathbb Coker(\f)]}$$
is surjective and its fibers are $G$-orbits.
\end{subsub}
\mathcal SEP
\begin{subsub}\label{4.E}{\bf Theorem : }
The morphism $\rho_1:W_1\to X_1$ is a geometric quotient by $G$.
\end{subsub}
\mathbb{P}roof According to \cite{mumf}, prop. 0.2, since the fibers of $\rho_1$ are the
$G$-orbits we need only to prove that $\rho_1$ is a categorical quotient.
As in the proof of \ref{3.B}, we need to recover resolution
\ref{4.A}(ii) of a sheaf $\mathcal F$ in $X_1$ starting from the Beilinson spectral
sequence for $\mathcal F$. Tableau (2.2.3) is
\begin{displaymath}
\xymatrix
{
2\mathcal O(-2) \ar[r]^{\f_1} & \mathcal O(-1) & 0 \\
0 & \mathcal O(-1) \ar[r]^{\f_4} & 2\mathcal O
}.
\end{displaymath}
$\mathbb Coker(\f_1)$ cannot be of the form $\mathcal O_L$ for a line $L \subset \mathbb{P}^2$ because,
by semistability, $\mathcal F$ cannot surject onto such a sheaf. Thus $\mathbb Coker(\f_1)$ is
supported on a point and $\mathcal Ker(\f_1) \simeq \mathcal O(-3)$.
Clearly $\f_5$ lifts to a morphism $\mathfrak{p}si_5: \mathcal O(-3) \longrightarrow 2\mathcal O$. We have
a resolution
$$\xymatrix{0\ar[r] & \mathcal O(-3) \oplus \mathcal O(-1)\ar[rr]^-{[\mathfrak{p}si_5, \f_4 ]} & &
2\mathcal O\ar[r] & \mathbb Coker(\f_5)\ar[r] & 0 \ .}$$
We now apply the horseshoe lemma to the extension
\begin{displaymath}
0 \longrightarrow \mathbb Coker(\f_5) \longrightarrow \mathcal F \longrightarrow \mathbb Coker(\f_1) \longrightarrow 0,
\end{displaymath}
to the resolution of $\mathbb Coker(\f_5)$ given above and to the resolution
\begin{displaymath}
0 \longrightarrow \mathcal O(-3) \longrightarrow 2\mathcal O(-2) \longrightarrow \mathcal O(-1) \longrightarrow \mathbb Coker(\f_1) \longrightarrow 0.
\end{displaymath}
We obtain the exact sequence
\begin{displaymath}
0 \longrightarrow \mathcal O(-3) \longrightarrow \mathcal O(-3) \oplus \mathcal O(-1) \oplus 2\mathcal O(-2) \longrightarrow 2\mathcal O \oplus \mathcal O(-1)
\longrightarrow \mathcal F \longrightarrow 0.
\end{displaymath}
From the fact that $h^1(\mathcal F)=0$, we see that $\mathcal O(-3)$ can be cancelled to yield a
resolution
as in \ref{4.A}(ii). $\Box$
\end{sub}
\vskip 1cm
\mathcal Ssect{The open stratum}{open3}
Let
\[\mathbb{W} = \text{H}om(2\mathcal O(-2)\oplus\mathcal O(-1),\mathcal O(-1)\oplus 2\mathcal O) .\]
The elements of $\mathbb{W}$ are represented as matrices
\[\begin{pmatrix}X_1 & X_2 & \alpha\\ q_{11} & q_{12} & Y_1\\
q_{21} & q_{22} & Y_2\end{pmatrix} ,\]
where $\alpha\in\mathbb C$, $X_1$, $X_2$, $Y_1$, $Y_2$ are linear forms and
$q_{11}$,$q_{12}$,$q_{21}$,$q_{22}$ quadratic forms on $V$. The linear algebraic
group
$$G=\mathcal Aut(2\mathcal O(-2) \oplus \mathcal O(-1)) \times \mathcal Aut(\mathcal O(-1) \oplus 2\mathcal O)$$
acts on $\mathbb{W}$ as in \ref{4.E0} and $W_1$ is a locally closed $G$-invariant
subset of $\mathbb{W}$.
We are in the situation of \ref{MSM}, and we will use the polarization
\[\sigma=(\frac{1-\mu}{2},\mu,\mu,\frac{1-\mu}{2}) ,\]
where $\mu$ is a rational number such that $\frac{1}{3}<\mu<\frac{1}{2}$.
\mathcal SEP
\begin{subsub}\label{4.E1}{\bf Lemma : } let $f\in\mathbb{W}$ be an injective morphism.
Then
$\mathbb Coker(f)$ is semi-stable if and only if $f$ is $G$--semi-stable with respect
to $\sigma$.
\end{subsub}
\mathbb{P}roof
Let $m_1,m_2,n_1,n_2$ be integers such that $0\leq m_1\leq 2$, $0\leq m_2\leq
1$, $0\leq n_1\leq 1$, $0\leq n_2\leq 2$. Let $f\in\mathbb{W}$. We say that \ {\em
``$(m_1,m_2)\to(n_1,n_2)$ is forbidden for $f$ ''} if for every linear subspaces
$M_1\subset\mathbb C^2$, $M_2\subset\mathbb C$, $N_1\subset\mathbb C$, $N_2\subset\mathbb C^2$ such that
$\dim(M_i)=m_i$ and $\dim(N_j)=n_j$ we don't have
\[f((\mathcal O(-2)\otimesimes M_1)\oplus(\mathcal O(-1)\otimesimes M_2))\subset(\mathcal O(-1)\otimesimes N_1)
\oplus(\mathcal O\otimesimes N_2) .\]
Then $f$ is $G$--semi-stable with respect to $\sigma$ if and only the following
are forbidden for $f$ :
\[(2,0)\to(1,0), (2,0)\to(0,1), (0,1)\to(0,1), (1,1)\to(0,2), (1,1)\to(1,0),\]
and the cases where $n_1=n_2=0$ and $m_1+m_2\not=0$. The results follows then
easily from \ref{open2}.
$\Box$
\mathcal SEP
The elements of the group $G$ can be seen as pairs of matrices
\[(\nu_1,\nu_2) \ = \ \biggl(\begin{pmatrix}\alpha & 0\\ \mathfrak{p}hi & A\end{pmatrix}
, \begin{pmatrix}B & 0\\ \mathfrak{p}si & \beta\end{pmatrix}\biggr)\]
where $A,B\in\mathcal GL(2)$, $\alpha,\beta\in\mathbb C^*$, $\mathfrak{p}hi$ is a column vector
$\begin{pmatrix}\mathfrak{p}hi_1\\ \mathfrak{p}hi_2\end{pmatrix}$ and $\mathfrak{p}si=(\mathfrak{p}si_1,\mathfrak{p}si_2)$ a pair
of linear forms on $V$. As in \ref{4.D0} we consider the space \
$W=\text{H}om(2\mathcal O(-2),2\mathcal O)$ \ on which acts the reductive group \
$G_0=\mathcal GL(2)\times\mathcal GL(2)$ . Let
\[\tau:G\longrightarrow G_0\]
(cf. \ref{4.D0}) be the morphism of groups defined by \ $\tau(\nu_1,\nu_2)=
\alpha\beta^{-1}AB^{-1}$. An easy calcultation shows that
\mathcal SEP
\begin{subsub}\label{4.E2}{\bf Lemma : } The morphism
$$\Delta:\mathbb{W}\longrightarrow W$$
defined by
\[\Delta\biggl(\begin{pmatrix}X_1 & X_2 & \alpha\\ q_{11} & q_{12} & Y_1\\
q_{21} & q_{22} & Y_2\end{pmatrix}\biggr) \ = \
\alpha\begin{pmatrix}q_{11} &
q_{12}\\mathfrak{q}_{21} & q_{22}\end{pmatrix}-\begin{pmatrix}Y_1\\Y_2\end{pmatrix}
(X_1,X_2)\]
is compatible with $\tau$, i.e. for every $w\in\mathbb{W}$ and $g\in G$ we have
$\Delta(gw)=\tau(g)\Delta(w)$.
\end{subsub}
\mathcal SEP
Note that the image of $\Delta$ is $W^{ss}$.
The locus of non injective morphism in N(6,2,2) is isomorphic to \
$\mathbb{P}^2\times\mathbb{P}^2$. To a pair $(P_0,P_2)$ of points of $\mathbb{P}^2$ corresponds the
$\text{GL}(2)\times\text{GL}(2)$-orbit of the matrix
$\begin{pmatrix}\alpha_0\beta_0 & \alpha_0\beta_1\\ \alpha_1\beta_0 &
\alpha_1\beta1\end{pmatrix}$, where for $i=0,1$, $(\alpha_i,\beta_i)$ is a
pair of linear forms defining $P_i$. Thus we have an isomorphism
\[X_0 \ \simeq \ N(6,2,2)\begin{array}ckslash(\mathbb{P}^2\times\mathbb{P}^2) .\]
Let \ $\mathfrak{p}i:W^{ss}\to N(6,2,2)$ \ be the quotient morphism. We have
\[\Delta(W_1)=\mathfrak{p}i^{-1}(\mathbb{P}^2\times\mathbb{P}^2)\subset W^s .\]
We have a surjective $G$--invariant morphism
$$\xymatrix@R=4pt{\rho: \mathbb{W} \ar[r] & X\\ \f \ar@{|-{>}}[r] & [\mathbb Coker(\f)]}$$
\mathcal SEP
\begin{subsub}\label{4.E3}{\bf Theorem : }
The morphism $\rho$ is a good quotient by $G$.
\end{subsub}
\mathbb{P}roof Let $\mathbb{W}_0=\rho^{-1}(W^s)$. According to \cite{mumf}, prop. 0.2, the
restriction of $\rho$, $\mathbb{W}_0\to W^s$ is a geometric quotient by $G$. Let \
\[\mathbb{W}_2=\Delta^{-1}\big(N(6,2,2)\begin{array}ckslash(\mathbb{P}^2\times\mathbb{P}^2)\big)=\rho^{-1}(X_0)
.\]
We will show that the restriction of $\rho$, $\mathbb{W}_2\to X_0$ is a good quotient.
Since \ \mM{\mathbb{W}=\mathbb{W}_0\cup\mathbb{W}_2} \ it follows easily from definition \ref{2.L0}
that $\rho$ is a good quotient.
Let \ $H=\mathbb C^*\times(V^*)^4$.
We have an isomorphism
\[\theta:\mathfrak{p}i^{-1}(N(6,2,2)\begin{array}ckslash(\mathbb{P}^2\times\mathbb{P}^2)\big)\times H\to\mathbb{W}_2\]
given by
\[\theta(q_0,\mu,X_1,X_2,Y_1,Y_2)=\begin{pmatrix}\vec X & \mu\\Q & \vec Y
\end{pmatrix} \ ,
\]
with
\[{\vec X}=(X_1,X_2) , \mathfrak{q}uad {\vec Y}=\begin{pmatrix}Y_1\\Y_2\end{pmatrix} ,
\mathfrak{q}uad Q=\frac{1}{\mu}\big(q_0+\begin{pmatrix}Y_1\\Y_2\end{pmatrix}(X_1,X_2)
\big) .\]
It is easy to verify that the $G$--orbits of $\mathbb{W}_2$ are of the form $Y\times H$,
where $Y$ is a $G_0$--orbit of $N(6,2,2)\begin{array}ckslash(\mathbb{P}^2\times\mathbb{P}^2)$. The fact
that $\rho$ is a good quotient by $G$ follows then immediately from the fact
that the restriction of $\mathfrak{p}i$,
\[\mathfrak{p}i^{-1}(N(6,2,2)\begin{array}ckslash(\mathbb{P}^2\times\mathbb{P}^2)\big)\longrightarrow
N(6,2,2)\begin{array}ckslash(\mathbb{P}^2\times\mathbb{P}^2)\]
is a good quotient by $G_0$.
$\Box$
\mathcal SEP
From $\Delta$ and \ref{4.E3} we get a surjective morphism
\[\delta:X\longrightarrow N(6,2,2)\]
which induces an isomorphism \ $X_0\simeq N(6,2,2)\begin{array}ckslash(\mathbb{P}^2\times\mathbb{P}^2)$.
\mathcal SEP
\begin{subsub}\label{4.E4}\bf The fibers of $\delta$ over points of
$\mathbb{P}^2\times\mathbb{P}^2$ - \rm Let $P_1$, $P_2$ distinct points of $\mathbb{P}^2$. Let $X_i$,
$Z$ ($i=1,2$) be linearly independant linear forms on $V$ vanishing at $P_i$.
Then $\delta^{-1}(P_1,P_2)$ contains the sheaves which are cokernels of
injective morphisms of type
\[\begin{pmatrix}X_1 & Z & 0\\ q_{11} & q_{12} & Z\\ q_{21} & q_{22} &
X_2 \end{pmatrix} .\]
Each $G$--orbit of such a morphism contains a matrix of the following type :
\[\begin{pmatrix}X_1 & Z & 0\\ \alpha X_2^2 & q_{12} & Z\\ q_{21} & \beta X_1^2
& X_2 \end{pmatrix}\]
where the quadratic form $q_{12}$ in $X_1$, $X_2$, $Z$ has no term in $Z^2$,
and all the matrices of this type in the $G$--orbit are obtained by replacing
the submatrix $\begin{pmatrix}\alpha X_2^2 & q_{12}\\mathfrak{q}_{21} & \beta X_1^2
\end{pmatrix}$ by a nonzero multiple. The corresponding morphism is non
injective if and only if the submatrix vanishes. It follows that \
$\delta^{-1}(P_1,P_2)\simeq\mathbb{P}^{12}$ .
Let $P_1$ be a point of $\mathbb{P}^2$, and $X_1$, $Z$ be linearly independant linear
forms on $V$ vanishing at $P_1$. Let $X_2$ be a linear form such that
$(X_1,X_2,Z)$ is a basis of $V^*$. Then $\delta^{-1}(P_1,P_1)$ contains the
sheaves which are cokernels of injective morphisms of type
\[\begin{pmatrix}X_1 & Z & 0\\ q_{11} & q_{12} & Z\\ q_{21} & q_{22} &
X_1 \end{pmatrix} .\]
Each $G$--orbit of such a morphism contains a matrix of the following type :
\[\begin{pmatrix}X_1 & Z & 0\\ \alpha X_2^2 & q_{12} & Z\\ q_{21} & \beta X_2^2
& X_1 \end{pmatrix}\]
where the quadratic form $q_{12}$ in $X_1$, $X_2$, $Z$ has no term in $Z^2$,
and all the matrices of this type in the $G$--orbit are obtained by replacing
the submatrix $\begin{pmatrix}\alpha X_2^2 & q_{12}\\mathfrak{q}_{21} & \beta X_2^2
\end{pmatrix}$ by a nonzero multiple. The corresponding morphism is non
injective if and only if the submatrix vanishes or is a multiple of
$\begin{pmatrix}X_2^2 & 0\\0 & -X_2^2\end{pmatrix}$. Hence we see that
$\delta^{-1}(P_1,P_1)$ is isomorphic to the complement of a point in $\mathbb{P}^{14}$.
\end{subsub}
\mathcal SEP
\begin{subsub}\label{4.E5}{\bf Theorem : } Let $\widetilde{\mathbf{N}}$ be the
blowing-up of
$N(6,2,2)$ along $\mathbb{P}^2\times\mathbb{P}^2$. Then $X$ is isomorphic to an open subset of
$\widetilde{\mathbf{N}}$.
\end{subsub}
\mathbb{P}roof We have \ $\delta^{-1}(\mathbb{P}^2\times\mathbb{P}^2)=W_1$ , which is a smooth
hypersurface of $X$. It follows from the universal property of the blowing-up
that $\delta$ factors through $\widetilde{\mathbf{N}}$ : we have a morphism \
$\widetilde{\delta}:X\to\widetilde{\mathbf{N}}$ \ such that \
$\delta=p\circ\widetilde{\delta}$, $p$ beeing the projection \
$\widetilde{\mathbf{N}}\to N(6,2,2)$. We want to prove that
$\widetilde{\delta}$ induces an isomorphism $X\simeq\widetilde{\delta}(X)$. For
this it suffices to prove that $\widetilde{\delta}$ does not contract $W_1$ to
a subvariety of codimension $\geq 2$ (cf. \cite{sha}, II,4, theorem 2).
The morphism $\widetilde{\delta}$ can be described precisely on $W_1$. Recall
that $p^{-1}(\mathbb{P}^2\times\mathbb{P}^2)$ is the projective bundle
$\mathbb{P}(\mathcal N_{\mathbb{P}^2\times\mathbb{P}^2})$, $\mathcal N_{\mathbb{P}^2\times\mathbb{P}^2}$ beeing the normal bundle of
$\mathbb{P}^2\times\mathbb{P}^2$ in $N(6,2,2)$. Let $w\in W_1$ and $C$ a smooth curve through
$w$ in $X$, not tangent to $W_1$ at $w$. Then $\widetilde{\delta}(w)$ is the
image in $\mathcal N_{\mathbb{P}^2\times\mathbb{P}^2,\delta(w)}$ of the tangent to $C$ at $w$. Suppose
first that $\delta(w)$ is a pair of distinct points $(P_1,P_2)$ and that
$w$ is the cokernel of a morphism
\[\mathfrak{p}hi_0=\begin{pmatrix}X_1 & Z & 0\\ q_{11} & q_{12} & Z\\ q_{21} & q_{22} &
X_2 \end{pmatrix}\]
(we use the notations of \ref{4.E4}). Then \
$\overline{\mathfrak{p}hi_0}=\begin{pmatrix}ZX_1 & Z^2\\ X_1X_2 & ZX_2\end{pmatrix}$ \ is
a point of $W^s$ over $w$ and $\mathcal N_{\mathbb{P}^2\times\mathbb{P}^2,\delta(w)}$ is
isomorphic to $\mathcal N_{\mathcal Gamma,\overline{\mathfrak{p}hi_0}}$, where $\mathcal Gamma\subset W^s$ is the
inverse image of $\mathbb{P}^2\times\mathbb{P}^2$ and $\mathcal N_\mathcal Gamma$ its normal bundle. The vector
space $\mathcal N_{\mathcal Gamma,\overline{\mathfrak{p}hi_0}}$ is a quotient of $W$. Supppose that $C$ is
defined by the family $(\mathfrak{p}hi_t)$ (for $t$ in a neighbourhood of $0$ in $\mathbb C$),
with
\[\mathfrak{p}hi_t=\begin{pmatrix}X_1 & Z & t\\ q_{11} & q_{12} & Z\\ q_{21} & q_{22} &
X_2 \end{pmatrix} \ .\]
Then from the formula defining $\Delta$ we deduce that $\widetilde{\delta}(w)$
is the image of $\begin{pmatrix}q_{11} & q_{12}\\ q_{21} & q_{22}\end{pmatrix}$
in $\mathcal N_{\mathcal Gamma,\overline{\mathfrak{p}hi_0}}$. It follows from \ref{4.E4} that
$\mathcal N_{\mathbb{P}^2\times\mathbb{P}^2,\delta(w)}$ is contained in the image of
$\widetilde{\delta}$, and that $\widetilde{\delta}$ does not contract $W_1$ to
a subvariety of codimension $\geq 2$.
$\Box$
\mathcal SEP
\begin{subsub}\label{4.E6}{\bf Remark : }\rm Using \ref{4.E4} and the
proof of \ref{4.E5} is is not difficult to prove that \
$\widetilde{\mathbf{N}}\begin{array}ckslash X\simeq\mathbb{P}^2$ .
\end{subsub}
\end{sub}
\vskip 1cm
\mathcal Ssect{The closed stratum}{clos2}
\begin{subsub}\label{4.F}{\bf Theorem : }
A sheaf $\mathcal F$ giving a point in
$\text{M}_{\mathbb{P}^2}M(4,2)$ satisfies $h^0(\mathcal F(-1))> 0$
if and only if there is a quartic $C \subset \mathbb{P}^2$ such that $\mathcal F \simeq \mathcal O_C(1)$.
The subvariety of such sheaves in $\text{M}_{\mathbb{P}^2}M(4,2)$ is isomorphic to $\mathbb{P}(S^4V^*)$.
\end{subsub}
\mathbb{P}roof As explained in the comments before \ref{2.C}, a nonzero morphism
\hbox{$\mathcal O \to \mathcal F(-1)$} \ must factor through an injective morphism \
$\mathcal O_C \to \mathcal F(-1)$ \ for a curve $C\subset \mathbb{P}^2$. From the semistability of $\mathcal F$
we see that $\mathcal F$ must be a quartic, and the above morphism must be an
isomorphism. $\Box$
\end{sub}
\vskip 1cm
\mathcal Ssect{Description of the strata}{desc2}
We will describe $X_1$.
Let \ $\f:2\mathcal O(-2)\oplus\mathcal O(-1)\to\mathcal O(-1)\oplus 2\mathcal O$ \ be an injective morphism as
in \ref{4.A} (ii) and $\mathcal F=\mathbb Coker(\f)$. Let $P$ (resp. $Q$) be the point of
$\mathbb{P}^2$ defined by the linear forms $X_1$, $X_2$ (resp. $Y_1$, $Y_2$). We have a
commutative diagram with exact rows
\[\xymatrix{
0\ar[r] & \mathcal O(-1)\ar[r]\ar@{^{(}->}[d] & 2\mathcal O\ar[r]\ar@{^{(}->}[d] &
\mathcal I_Q(1)\ar[r]\ar[d]^\alpha & 0\\
0\ar[r] & 2\mathcal O(-2)\oplus\mathcal O(-1)\ar[r]^-\f & \mathcal O(-1)\oplus 2\mathcal O\ar[r] &
\mathcal F\ar[r] & 0
}\]
It follows that we have $\mathcal Ker(\alpha)\simeq\mathcal O(-3)$ and
$\mathbb Coker(\alpha)\simeq\mathcal O_P$ (the structural sheaf of $P$). So we have an exact
sequence
\[0\longrightarrow\mathcal O(-3)\stackrel{\beta}{\longrightarrow}\mathcal I_Q(1)\longrightarrow\mathcal F\longrightarrow\mathcal O_P\longrightarrow 0 .\]
Up to a scalar multiple there is only one surjective morphism \
$\mathcal O_C(1)\to\mathcal O_Q$ . We denote by $\mathcal O_C(1)(-Q)$ its kernel. If $C$ is not smooth
at $P$ this sheaf can be non locally free. We have \
$\mathbb Coker(\beta)\simeq\mathcal O_C(1)(-Q)$, and an exact sequence
\setcounter{subsub}{1}
\begin{equation}\label{4.G2}
0\longrightarrow\mathcal O_C(1)(-Q)\longrightarrow\mathcal F\longrightarrow\mathcal O_P\longrightarrow 0 .
\end{equation}
Hence we could denote $\mathcal F$ by ``$\mathcal O_C(1)(P-Q)$''. This notation is justified if
$P\not=Q$ and if $C$ is smooth at $P$. So we can already state
\mathcal SEP
\begin{subsub}\label{4.G}{\bf Proposition : }The generic sheaf in $X_1$ is of
the form
$\mathcal O_C(1)(P-Q)$, where $C$ is a smooth quartic and $P$, $Q$ distinct points of
$C$.
\end{subsub}
\mathcal SEP
We want to prove now that the notation ``$\mathcal O_C(1)(P-Q)$'' has a meaning if
$P\not=Q$ even if $C$ is not necessarily smooth. We have to compute
$\text{Ext}^1_{\mathcal O_C}\big(\mathcal O_P,\mathcal O_C(1)(-Q)\big)$. First we note that
\[\text{Ext}^1_{\mathcal O_C}\big(\mathcal O_P,\mathcal O_C(1)(-Q)\big) \ \simeq \
\text{Ext}^1_{\mathcal O_{\mathbb{P}^2}}\big(\mathcal O_P,\mathcal O_C(1)(-Q)\big) \]
(this can be seen using prop. 2.3.1 of \cite{dr4}). We use now the exact
sequence
\[0\to\mathcal O(-3)\to\mathcal I_Q(1)\to\mathcal O_C(1)(-Q)\to 0\]
and get the exact sequence
\[0\longrightarrow\text{Ext}^1_{\mathcal O_{\mathbb{P}^2}}\big(\mathcal O_Q,\mathcal I_P(1)\big)\longrightarrow\text{Ext}^1_{\mathcal O_{\mathbb{P}^2}
} \big(\mathcal O_P,\mathcal O_C(1)(-Q)\big)\longrightarrow\text{Ext}^2_{\mathcal O_{\mathbb{P}^2}}\big(\mathcal O_Q,\mathcal O(-3)\big)\]
\[\longrightarrow\text{Ext}^2_{\mathcal O_{\mathbb{P}^2}}(\mathcal O_Q,\mathcal I_P(1))\longrightarrow\text{Ext}^2_{\mathcal O_{\mathbb{P}^2}}(\mathcal O_P,
\mathcal O_C(1)(-Q))\longrightarrow 0 .\]
Using the exact sequence \ $0\to\mathcal O(-1)\to 2\mathcal O\to\mathcal I_P(1)\to 0$ , we find that
\[\text{Ext}^1_{\mathcal O_{\mathbb{P}^2}}\big(\mathcal O_Q,\mathcal I_P(1)\big)=\lbrace 0\rbrace \ ,\mathfrak{q}uad
\text{Ext}^2_{\mathcal O_{\mathbb{P}^2}}\big(\mathcal O_Q,\mathcal I_P(1)\big)\simeq\mathbb C\]
if $P\not=Q$ and
\[\text{Ext}^1_{\mathcal O_{\mathbb{P}^2}}\big(\mathcal O_P,\mathcal I_P(1)\big)\simeq\mathbb C \ ,\mathfrak{q}uad
\text{Ext}^2_{\mathcal O_{\mathbb{P}^2}}\big(\mathcal O_P,\mathcal I_P(1)\big)\simeq\mathbb C^2 .\]
It now follows easily that \
$\text{Ext}^1_{\mathcal O_C}\big(\mathcal O_P,\mathcal O_C(1)(-Q)\big)\simeq\mathbb C$
if \ $P\not=Q$, hence there is only one non trivial extension ({\ref{4.G2}}) and
the notation $\mathcal O_C(1)(P-Q)$ is justified in this case.
If $P=Q$ we first remark that $C$ is never smooth at $P$, and this implies
that the morphism
$$\text{Ext}^2_{\mathcal O_{\mathbb{P}^2}}\big(\mathcal O_Q,\mathcal O(-3)\big)\longrightarrow\text{Ext}^2_{\mathcal O_{\mathbb{P}^2}}
\big(\mathcal O_Q ,\mathcal I_P(1)\big)$$
vanishes: by Serre duality it is the transpose of
\[\text{H}om(\mathcal I_P(1),\mathcal O_Q)\longrightarrow\text{H}om(\mathcal O(-3),\mathcal O_Q)\]
which is just the multiplication by an equation of $C$. It follows that we have
\hfil\break $\text{Ext}^1_{\mathcal O_C}\big(\mathcal O_P,\mathcal O_C(1)(-P)\big)\simeq\mathbb C^2$ \ and an
injective
map
\[\xymatrix{\lambda:\mathbb C=\text{Ext}^1_{\mathcal O_{\mathbb{P}^2}}\big(\mathcal O_P,\mathcal I_P(1)\big)\ar@{^{(}->}[r] &
\text{Ext}^1_{\mathcal O_C}\big(\mathcal O_P,\mathcal O_C(1)(-P)\big)\simeq\mathbb C^2 .}\]
Its image corresponds to the extension ({\ref{4.G2}}) given by $\mathcal F=\mathcal O_C(1)$ and
the other extensions, which are in $X_1$ are defined by the other elements of
$\text{Ext}^1_{\mathcal O_C}\big(\mathcal O_P,\mathcal O_C(1)(-P)\big)$.
\end{sub}
\vskip 1.4cm
\section{Euler Characteristic Four}
\mathcal Ssect{The open stratum}{open4}
Let $X_0$ be the open subset of $\text{M}_{\mathbb{P}^2}(4,4)$ corresponding to sheaves $\mathcal F$ such
that \hfil\break$h^0(\mathcal F(-1))=0$. The complement $\text{M}_{\mathbb{P}^2}(4,4)\begin{array}ckslash X_0$ is the
{\em theta divisor} (cf. \cite{lepotier}). Combining \cite{lepotier}, 4.3 and
\cite{dr2}, th\'eor\`eme 2, we obtain
\mathcal SEP
\begin{subsub}{\bf Theorem : }
1 - The sheaves on $\mathbb{P}^2$ with Hilbert polynomial $4m+4$ satisfying
$h^0(\mathcal F(-1))=0$ are precisely the sheaves which are isomorphic to the cokernel
of an injective morphism
\[f:4\mathcal O(-1)\longrightarrow 4\mathcal O \ .\]
Moreover, $\mathcal F$ is not stable if and only if $\f$ is equivalent,
modulo operations on rows and columns, to a morphism of the form
\begin{displaymath}
\begin{pmatrix}
\f_{11} & 0 \\
\f_{21} & \f_{22}
\end{pmatrix}
\mathfrak{q}quad \text{\emph{with}} \mathfrak{q}uad \f_{22} : m\mathcal O(-1) \longrightarrow m\mathcal O, \mathfrak{q}uad m=1,
\, 2 \ \text{\emph{or}}\ 3.
\end{displaymath}
2 - Let $\mathbf{N}_0$ denote the open subset of $N(3,4,4)$ corresponding to
injective morphisms. By associating $\mathcal F$ to the
$\big(\mathcal GL(4)\times\mathcal GL(4)\big)$-orbit of $\f$ we get an isomorphism \
$\mathbf{N}_0\simeq X_0$.
\end{subsub}
\mathcal SEP
According to \cite{lepotier} the complement $N(3,4,4)\begin{array}ckslash X_0$ is
isomorphic to $\mathbb{P}^2$, the inclusion \hfil\break$X_0\subset N(3,4,4)$ can be
extended to a morphism \ $\text{M}_{\mathbb{P}^2}(4,4)\to N(3,4,4)$ \ which is the blowing-up of
$N(3,4,4)$ along $\mathbb{P}^2$.
\end{sub}
\vskip 1cm
\mathcal Ssect{The closed stratum}{clos4}
\begin{subsub}\label{5.C}{\bf Proposition : }
The sheaves $\mathcal F$ in $\text{M}_{\mathbb{P}^2}M(4,4)$ satisfying $h^0(\mathcal F(-1))=1$ are
precisely the sheaves with resolution
\begin{displaymath}
0 \longrightarrow \mathcal O(-2) \oplus \mathcal O(-1) \stackrel{\f}{\longrightarrow} \mathcal O \oplus \mathcal O(1) \longrightarrow \mathcal F \longrightarrow 0,
\mathfrak{q}quad \f_{12}\neq 0.
\end{displaymath}
Moreover, $\mathcal F$ is not stable if and only if $\f_{12}$ divides
$\f_{11}$ or $\f_{22}$.
\end{subsub}
\mathbb{P}roof We assume that $\mathcal F$ has a resolution as above and we need to
show that $\mathcal F$ is semistable. Assume that there is a destabilizing subsheaf $\mathcal E$
of $\mathcal F$. We may assume that $\mathcal E$ is semistable. As $\mathcal F$ is generated by global
sections, we must have $h^0(\mathcal E) < h^0(\mathcal F)=4$. Thus $\mathcal E$ is in $\text{M}_{\mathbb{P}^2}(2,3)$,
$\text{M}_{\mathbb{P}^2}(1,3)$ or in $\text{M}_{\mathbb{P}^2}(1,2)$. In other words, $\mathcal E$ must be isomorphic to $\mathcal O_C(1)$,
$\mathcal O_L(2)$ or $\mathcal O_L(1)$ for a conic $C$ or a line $L$ in $\mathbb{P}^2$. In the first
case we have a commutative diagram
\begin{displaymath}
\xymatrix
{
0 \ar[r] & \mathcal O(-1) \ar[r] \ar[d]^{\b} & \mathcal O(1) \ar[r] \ar[d]^{\a} & \mathcal E \ar[r]
\ar[d] & 0 \\
0 \ar[r] & \mathcal O(-2) \oplus \mathcal O(-1) \ar[r] & \mathcal O \oplus \mathcal O(1) \ar[r] &\mathcal F \ar[r] & 0
}.
\end{displaymath}
Notice that $\a$ is injective, because it is injective on global sections, hence
$\b$ is injective too. We obtain $\f_{12}=0$, which contradicts the hypothesis
on $\f$. The other two cases similarly lead to contradictions: if $\mathcal F \simeq
\mathcal O_L(2)$, then $\a=0$; if $\mathcal F \simeq \mathcal O_L(1)$, then $\b=0$, which is impossible.
Conversely, we are given $\mathcal F$ in $\text{M}_{\mathbb{P}^2}(4,4)$ satisfying the condition
$h^0(\mathcal F(-1))=1$ and we need to construct a resolution as above. As explained in
the comments above \ref{2.C}, there is an injective morphism \ $\mathcal O_C \to \mathcal F(-1)$
\ for a curve $C$ in $\mathbb{P}^2$. From the semistability of $\mathcal F(-1)$ we see that $C$
is a cubic or a quartic. Assume that $C$ is a cubic curve. The quotient
$\mathcal F/\mathcal O_C(1)$ has Hilbert polynomial $P(t)=t+1$ and has no zero-dimensional
torsion. Indeed, if $\mathcal F/\mathcal O_C(1)$ had zero-dimensional torsion $\mathcal T \neq 0$, then
the preimage of $\mathcal T$ in $\mathcal F$ would be a subsheaf which violates the
semistability of $\mathcal F$. We conclude that $\mathcal F/\mathcal O_C(1) \simeq \mathcal O_L$ for a line $L
\subset \mathbb{P}^2$. We apply the horseshoe lemma to the extension
\begin{displaymath}
0 \longrightarrow \mathcal O_C(1) \longrightarrow \mathcal F \longrightarrow \mathcal O_L \longrightarrow 0
\end{displaymath}
and to the standard resolutions for $\mathcal O_C(1)$ and $\mathcal O_L$ to get the desired
resolution for $\mathcal F$. This also proves the statement about properly semistable
sheaves from the claim. Indeed, if $\mathcal F$ is properly semistable, then $\mathcal F$ has
stable factors $\mathcal O_C(1)$ and $\mathcal O_L$ and we can apply the horseshoe lemma as
above. For the rest of the proof we may assume that $\mathcal F$ is stable.
Next we examine the situation when $C$ is a quartic. Notice that $\mathcal F/\mathcal O_C(1)$
has zero-dimensional support and Euler characteristic 2. There is a subsheaf
$\mathcal T \subset \mathcal F/\mathcal O_C(1)$ satisfying $h^0(\mathcal T)=1$. Let $\mathcal E$ be the
preimage of $\mathcal T$ in $\mathcal F$ and $\mathcal G=\mathcal F/\mathcal E$. Notice that $\mathcal E$ is semistable because
any subsheaf ruining the semistability of $\mathcal E$ must contradict the stability of
$\mathcal F$. From the semicontinuity theorem III 12.8 in \cite{hartshorne} we have
$h^0(\mathcal F\otimes \mathcal Om^1(1)) \ge 4$. Indeed, this inequality
is true for all sheaves in the open dense subset $X_0$ of $\text{M}_{\mathbb{P}^2}(4,4)$, so it must
be true for all sheaves giving a point in $\text{M}_{\mathbb{P}^2}(4,4)$. We have
\begin{displaymath}
4 \le h^0(\mathcal F \otimes \mathcal Om^1(1)) \le h^0(\mathcal E \otimes \mathcal Om^1(1)) + h^0(\mathcal G \otimes
\mathcal Om^1(1)) = h^0(\mathcal E \otimes \mathcal Om^1(1)) + 1.
\end{displaymath}
From our results on in section 3, and from the duality theorem \ref{2.A},
we see that there is a resolution
\begin{displaymath}
0 \longrightarrow 2\mathcal O(-2) \longrightarrow \mathcal O(-1) \oplus \mathcal O(1) \longrightarrow \mathcal E \longrightarrow 0.
\end{displaymath}
We apply the horseshoe lemma to the extension
\begin{displaymath}
0 \longrightarrow \mathcal E \longrightarrow \mathcal F \longrightarrow \mathcal G \longrightarrow 0,
\end{displaymath}
to the resolution of $\mathcal E$ from above and to the resolution
\begin{displaymath}
0 \longrightarrow \mathcal O(-2) \longrightarrow 2\mathcal O(-1) \longrightarrow \mathcal O \longrightarrow \mathcal G \longrightarrow 0.
\end{displaymath}
The morphism \ $\mathcal O \longrightarrow \mathcal G$ \ lifts to a morphism \ $\mathcal O \longrightarrow \mathcal F$ \ because
$h^1(\mathcal E)=0$. We obtain a resolution
\begin{displaymath}
0 \longrightarrow \mathcal O(-2) \longrightarrow 2\mathcal O(-1) \oplus 2\mathcal O(-2) \longrightarrow \mathcal O \oplus \mathcal O(-1) \oplus \mathcal O(1)
\longrightarrow \mathcal F \longrightarrow 0.
\end{displaymath}
From the condition $h^0(\mathcal F(-1))=1$ we see that the map $\mathcal O(-2) \longrightarrow 2\mathcal O(-2)$
from the above sequence is nonzero. We may cancel $\mathcal O(-2)$ to get the exact
sequence
\begin{displaymath}
0 \longrightarrow \mathcal O(-2) \oplus 2\mathcal O(-1) \longrightarrow \mathcal O(-1) \oplus \mathcal O \oplus \mathcal O(1) \longrightarrow \mathcal F \longrightarrow 0.
\end{displaymath}
The morphism \ $2\mathcal O(-1)\to\mathcal O(-1)$ \ from the above sequence is nonzero,
otherwise $\mathcal F$ would surject onto a sheaf of the form $\mathcal O_L(-1)$ for a line $L
\subset \mathbb{P}^2$, in violation of the semistability of $\mathcal F$. Thus we may cancel
$\mathcal O(-1)$ in the above sequence to get the resolution of $\mathcal F$ from the claim.
$\Box$
\mathcal SEP
Let $W_1$ be the set of morphisms from \ref{5.C} that is, the set of injective
morphisms
\begin{displaymath}
\f: \mathcal O(-2) \oplus \mathcal O(-1) \longrightarrow \mathcal O \oplus \mathcal O(1),
\end{displaymath}
for which $\f_{12} \neq 0$.
The linear nonreductive algebraic group
$$G=\big(\mathcal Aut(\mathcal O(-2) \oplus \mathcal O(-1)) \times\mathcal Aut(\mathcal O \oplus \mathcal O(1))\big)/\mathbb C^*$$
acts on $W_1$ by conjugation. According to \cite{drezet-trautmann}, there is a
good and projective quotient $W_1/\!\!/G$ which contains a geometric quotient as
a proper open subset.For a polarization $\sigma=(\l_1,\l_2,\m_1,\m_2)$
satisfying $\l_1=\m_2 < 1/4$ (cf. \ref{MSM}) the
set of semistable morphisms is $W^{ss}(\sigma)=W_1$. The open subset of stable
points $W^{s}(\sigma) \subset W^{ss}(\sigma)$ is given by the conditions
$\f_{12}\nmid \f_{11}$ and $\f_{12} \nmid \f_{22}$. The geometric quotient
$W^s(\sigma)/G$ is an open subset of $W_1/\!\!/G$.
Let $X_1$ be the locally closed subset of $\text{M}_{\mathbb{P}^2}(4,4)$ given by the relation
$h^0(\mathcal F(-1))=1$. We equip $X_1$ with the canonical induced reduced structure.
The morphism $\rho: W_1 \to X_1$, $\rho(\f)= [\mathbb Coker(\f)]$, is surjective and
$G$-invariant. Since $W_1$ is irreducible, $X_1$ is irreducible, too. From
\ref{5.C} we know that $\rho(\f)$ is the isomorphism class of a stable sheaf if
and only if $\f$ is in $W^s(\sigma)$. Arguments similar to those in the
beginning of \ref{open2} show that $\rho(\f_1)=\rho(\f_2)$ if and only if
$\overline{G\f_1} \cap \overline{G\f_2} \neq \emptyset$. If $\mathcal F$ is stable, then
$\rho^{-1}([\mathcal F])$ is a $G$-orbit, so it has dimension equal to $\text{dim}(G)$.
Indeed, the stabilizer for any stable morphism consists only of the neutral
element of $G$. We have
\begin{displaymath}
\text{dim}(X_1) = \text{dim}(W^s(\sigma)) - \text{dim}(G) = 25-9 =16.
\end{displaymath}
\mathcal SEP
\begin{subsub}\label{5.D}{\bf Theorem : }
The good quotient $W_1/\!\!/G$ is isomorphic to $X_1$.
In particular, $X_1$ is a closed hypersurface of $\text{M}_{\mathbb{P}^2}M(4,4)$. The closed
subvariety $(W_1 \setminus W^s(\sigma))/\!\!/G$ of $X_1$ is isomorphic to the
subvariety of $\text{M}_{\mathbb{P}^2}M(4,4)$ given by non stable sheaves which have a
factor of the form $\mathcal O_C(1)$ in their Jordan-H\"older filtration, for a cubic
curve $C \subset \mathbb{P}^2$, and is isomorphic to $\mathbb{P}(S^3V^*) \times \mathbb{P}(V^*)$.
\end{subsub}
\mathbb{P}roof As in the proof of \ref{3.B}, we need to construct resolution \ref{5.C}
starting from the Beilinson spectral sequence for $\mathcal F$. Tableau (2.2.3) takes
the form
\begin{displaymath}
\xymatrix
{
\mathcal O(-2) & 0 & 0 \\
\mathcal O(-2) \ar[r]^{\f_3} & 4\mathcal O(-1) \ar[r]^{\f_4} & 4\mathcal O
}.
\end{displaymath}
The exact sequence (2.2.5) becomes
\begin{displaymath}
0 \longrightarrow \mathcal O(-2) \stackrel{\f_5}{\longrightarrow} \mathbb Coker(\f_4) \longrightarrow \mathcal F \longrightarrow 0.
\end{displaymath}
Note that $\f_5$ lifts to a morphism $\mathcal O(-2) \longrightarrow 4\mathcal O$ because
$\mathcal Ext^1(\mathcal O(-2),\mathbb Coker(\f_3))=\lbrace 0\rbrace$. Combining with the exact
sequence (2.2.4) we obtain the resolution
\[\xymatrix{
0\ar[r] & \mathcal O(-2)\ar[r]^-{\begin{pmatrix}0 \\ \mathfrak{p}si\end{pmatrix}} &
\mathcal O(-2) \oplus 4\mathcal O(-1)\ar[r]^-f & 4\mathcal O\ar[r] & \mathcal F\ar[r] & 0.
}\]
The argument from remark 6.3 in \cite{maican} shows that, up to equivalence,
\begin{displaymath}
\mathfrak{p}si^{\text{T}} =
\begin{pmatrix}
0 & X & Y & Z
\end{pmatrix}
\end{displaymath}
Now we distinguish two possibilities: firstly, up to equivalence,
\begin{displaymath}
\f =
\begin{pmatrix}
\f_{11} & 0 \\
\f_{21} & \f_{22}
\end{pmatrix}
\mathfrak{q}uad \text{for a morphism} \mathfrak{q}uad \f_{11}:\mathcal O(-2) \oplus \mathcal O(-1) \longrightarrow 2\mathcal O.
\end{displaymath}
We see that $\mathcal F$ surjects onto $\mathbb Coker(\f_{11})$, hence this sheaf is supported
on a proper closed subset of $\mathbb{P}^2$, hence $\f_{11}$ is injective, hence
$\mathbb Coker(\f_{11})$ has Hilbert polynomial $P(t)=3t+2$. This contradicts the
semistability of $\mathcal F$. The second possibility, in fact the only feasible one, is
that, modulo equivalence,
\begin{displaymath}
\f =
\begin{pmatrix}
\f_{11} & 0 \\
\f_{21} & \f_{22}
\end{pmatrix}
\mathfrak{q}uad \text{with} \mathfrak{q}uad \f_{11} =
\begin{pmatrix}
q & \ell
\end{pmatrix}
\mathfrak{q}uad \text{and} \mathfrak{q}uad \f_{22} =
\begin{pmatrix}
-Y & X & 0 \ \\
-Z & 0 \ & X \\
0 \ & -Y & Z
\end{pmatrix}
,
\end{displaymath}
\begin{displaymath}
\f_{11}: \mathcal O(-2) \oplus \mathcal O(-1) \longrightarrow \mathcal O, \mathfrak{q}quad \f_{22}: 3\mathcal O(-1) \longrightarrow 3\mathcal O.
\end{displaymath}
From the semistability of $\mathcal F$ we see that $\ell \neq 0$. It is also easy to see
that
$\mathcal F$ is properly semistable if and only if $\ell$ divides $q$. Let $E$ denote
the set parametrized by $(\mathfrak{p}si,\f)$. We can identify $E$ with an open subset
inside the affine space parametrized by the entries of $\f_{11}$ and $\f_{21}$,
so $E$ is irreducible and smooth. The subset given by the condition that $\ell$
divide $q$ has codimension 3, hence the morphism \ $\xi : E \to W_1$ \ can be
defined on its complement and then extended algebraically to $E$. Thus, for the
purpose of constructing $\xi$, we may assume in the sequel that $\ell$ does not
divide $q$. The snake lemma gives the exact sequence
\begin{displaymath}
0 \longrightarrow \mathcal Ker(\f_{22}) \longrightarrow \mathcal O(-2) \longrightarrow \mathcal Ker(\f_{11}) \longrightarrow \mathbb Coker(\f_{22}) \longrightarrow \mathcal F
\longrightarrow
\end{displaymath}
$$\mathfrak{q}uad\mathfrak{q}uad\mathfrak{q}uad\mathfrak{q}uad\mathfrak{q}uad\longrightarrow \mathbb Coker(\f_{11})\longrightarrow 0 .$$
As $\mathcal Ker(\f_{22})= \mathcal O(-2)$, $\mathcal Ker(\f_{11})=\mathcal O(-3)$, $\mathbb Coker(\f_{22})= \mathcal O(1)$, we
obtain
the extension
\begin{displaymath}
0 \longrightarrow \mathcal O_C(1) \longrightarrow \mathcal F \longrightarrow \mathbb Coker(\f_{11}) \longrightarrow 0.
\end{displaymath}
Here $C$ is the quartic curve given as the zero-set of the polynomial
\begin{displaymath}
f =
\begin{pmatrix}
Z & -Y & X
\end{pmatrix}
\ \f_{21} \
\begin{pmatrix}
-\ell \\ q
\end{pmatrix}
.
\end{displaymath}
From here on we construct a morphism to $W_1$ in the same manner as in the proof
of \ref{5.C}. $\Box$
\mathcal SEP
\begin{subsub}\label{5.E}{\bf Remark: }\rm
In the course of the above proof we have rediscovered lemma 4.10
from \cite{lepotier} which states that every stable sheaf $\mathcal F$ satisfying
$h^0(\mathcal F(-1))=1$ occurs as an extension
\begin{displaymath}
0 \longrightarrow \mathcal O_C(1) \longrightarrow \mathcal F \longrightarrow \mathcal O_S \longrightarrow 0
\end{displaymath}
with $C$ a quartic curve and $S$ a zero-dimensional scheme of length 2.
The ideal of $S$ is generated by $q$ and $\ell$.
\end{subsub}
\mathcal SEP
\begin{subsub}\label{5.F}{\bf Theorem : }
There are no sheaves $\mathcal F$ in $\text{M}_{\mathbb{P}^2}M(4,4)$ satisfying the
condition \hfil\break$h^0(\mathcal F(-1)) \ge 2$.
\end{subsub}
\mathbb{P}roof
Let $r$ be an integer such that $1\leq r\leq 3$. It is easy, using the
descriptions of $\text{M}_{\mathbb{P}^2}(r,r)$ given in the Introduction, to see that there are no
sheaves $\mathcal E$ in $\text{M}_{\mathbb{P}^2}(r,r)$ such that \ $h^0(\mathcal E(-1))\geq 2$. It follows that if
$\mathcal F$ is semistable non stable sheaf with Hilbert polynomial $4m+4$ then we have
\ $h^0(\mathcal F(-1))\leq 1$.
According to \ref{open4}, $\text{M}_{\mathbb{P}^2}(4,4)\begin{array}ckslash X_0$ is the exceptional divisor of
the blowing-up \hfil\break$\text{M}_{\mathbb{P}^2}(4,4)\to N(3,4,4)$ \ along $\mathbb{P}^2$. Hence it is an
irreducible hypersurface. Since this hypersurface contains $X_1$ we have \
$\text{M}_{\mathbb{P}^2}(4,4)\begin{array}ckslash X_0=X_1$. The result follows immediately.
$\Box$
\end{sub}
\vskip 2cm
\end{document}
|
\begin{document}
\title{Temperature effects on quantum non-Markovianity via collision models}
\author{Zhong-Xiao Man}
\email{[email protected]}
\affiliation{School of Physics and Physical Engineering, Shandong Provincial Key Laboratory of Laser Polarization and Information Technology, Qufu Normal University, 273165, Qufu, China}
\author{Yun-Jie Xia}
\email{[email protected]}
\affiliation{School of Physics and Physical Engineering, Shandong Provincial Key Laboratory of Laser Polarization and Information Technology, Qufu Normal University, 273165, Qufu, China}
\author{Rosario Lo Franco}
\email{[email protected]}
\affiliation{Dipartimento di Energia, Ingegneria dell'Informazione e Modelli Matematici, Universit\`{a} di Palermo, Viale delle Scienze, Edificio 9, 90128 Palermo, Italy}
\affiliation{Dipartimento di Fisica e Chimica, Universit\`a di Palermo, via Archirafi 36, 90123 Palermo, Italy}
\begin{abstract}
Quantum non-Markovianity represents memory during the system dynamics, which is typically weakened by the temperature.
We here study the effects of environmental temperature on the non-Markovianity of an open quantum system by virtue of collision models. The environment is simulated by a chain of ancillary qubits that are prepared in thermal states with a finite temperature $T$. Two distinct non-Markovian mechanisms are considered via two types of collision models, one where the system $S$ consecutively interacts with the ancillas and a second where $S$ collides only with an intermediate system $S'$ which in turn interacts with the ancillas. We show that in both models the relation between non-Markovianity and temperature is non-monotonic. In particular, revivals of non-Markovianity may occur as temperature increases. We find that the physical reason behind this behavior can be revealed by examining a peculiar system-environment coherence exchange, leading to ancillary qubit coherence larger than system coherence which triggers information backflow from the environment to the system. These results provide insights on the mechanisms underlying the counterintuitive phenomenon of temperature-enhanced quantum memory effects.
\end{abstract}
\maketitle
\section{Introduction}
In most practical situations a quantum system is open, being coupled to an environment that induces decoherence and dissipation of the system quantum properties \cite{open}. The dynamics of an open quantum system is usually described with a Markov approximation through a family of completely positive trace-preserving reduced dynamical maps and a corresponding quantum master equation with a Lindblad generator \cite{Lindblad1,Lindblad2}. In this case, the memoryless environment is assumed to be able to recover instantly from the interaction, which induces a monotonic one-way flow of information from the system to the environment.
However, due to the increasing capability to manipulate quantum systems, in many scenarios the Markov approximation is no longer valid leading to the occurrence of non-Markovian dynamics \cite{NM1,NM2} and a backflow of information from the environment to the system. The non-Markovian dynamics not only embodies an important physical phenomenon linked to dynamical memory effects but also proves useful to enhance practical procedures, such as quantum-state engineering and quantum control \cite{NMuse1,NMuse2,NMuse3,NMuse4,ManPRA2015,
LoFrancoNatCom,darrigo2012AOP,lofrancoPRB}.
Non-Markovianity has recently attracted considerable attention, particularly concerning the
formulation of its quantitative measures \cite{BLP,measure2,measure3,measure4,measure5,measure6,ACHL}, its experimental demonstration \cite{NMexp1,NMexp2,NMexp3,NMexp4,NMexp5} and the exploration of its origin \cite{origin1,origin2}. Nevertheless, the role of non-Markovianity for the assessment of the properties of non-equilibrium quantum systems has remained so far little explored \cite{NMentropy,cascaded,NMPower,MultiLandauer,Implications}.
Non-Markovian dynamics can lead to a new type of entropy production term which is indispensable to recover the fluctuation relations for entropy \cite{NMentropy}. In a bipartite system interacting dissipatively with a thermal reservoir in a cascaded model, the emerging non-Markovianity of one of the subsystems enables a heat flow with non-exponential time behavior \cite{cascaded}. By means of Landauer's principle, it has been also shown that memory effects are strategical in maintaining work extraction by erasure in realistic environments \cite{NMPower}. Moreover, non-Markovian dynamics can induce the breakdown of the validity of Landauer's principle \cite{MultiLandauer,Implications}.
An efficient tool that makes the study of quantum thermodynamics in the non-Markovian regime possible \cite{cascaded,MultiLandauer,Implications} is the \textit{collision} model \cite{Rau1963,colli1,colli2,colli3,colli4,colli5,colli6,colli7,colli8,colli9,colli10,colli11,
colli12,colli13,colli14,colli15,colli16,colli17,colli18,colli19,colli20,colli21,colli22,cicc2017}. In the collision model, the environment is taken as a collection of $N$ ancillas organized in a chain and the system of interest $S$ interacts, or collides, at each time step with an ancilla. It has been shown that when the ancillas are initially uncorrelated and no correlations are created among them along the process, a Lindblad master equation can be derived \cite{colli1,colli2}. By introducing either correlations in the initial state of the ancillas or inter-ancilla collisions, one can then recover the dynamics of any indivisible, and thus non-Markovian, channel \cite{colli3,colli4,colli5,colli6}.
In other words, the non-Makovian dynamics can be achieved in the collision model when the system-environment interaction is mediated by the ancillary degrees of freedom. In analogy to the well-known situation where the non-Markovian dynamics of a system arises when it is coherently coupled to an auxiliary system in contact with a Markovian bath, a class of Lindblad-type master equations for a bipartite system has been also found through collision models such that the reduced master equation of the system of interest is derived exactly \cite{colli7}. By constructing such composite collision models, one can simulate a lot of known instances of quantum non-Markovian dynamics, such as the emission of an atom into a reservoir with a Lorentzian, or multi-Lorentzian, spectral density or a qubit subject to random telegraph noise \cite{colli16}.
Albeit it is generally believed that quantum memory effects are more important at low temperatures \cite{Weiss}, the way temperature influences non-Markovianity depends on both quantum thermodynamics and open quantum system dynamics. For a qubit subject to a dephasing bath with an Ohmic class spectrum, there exists a temperature-dependent critical value of the Ohmicity parameter for the onset of non-Markovianity which increases for high temperatures \cite{NMT1}. For a qubit in contact with a critical Ising spin thermal bath it has been then shown that the non-Markovianity decreases close to the critical point of the system in such a way that the higher the temperature, the higher the decrease \cite{NMT2}. Moreover, it is known that the non-Markovianity of a chromophore qubit in a super-Ohmic bath is reduced when the temperature increases \cite{NMT3}.
However, temperature may also enhance the non-Markovianity in some situations. For an inhomogeneous bosonic finite-chain environment, temperature has been shown to be a crucial factor in determining the character of the evolution and for certain parameter values non-Markovianity can increase with the temperature \cite{NMT4}. In a spin-boson model made of a two-level system which is linearly coupled to an environment of harmonic oscillators, a non-monotonic behavior of non-Markovianity as a function of temperature has been reported, with the system dynamics being strongly non-Markovian at low temperatures \cite{ClosPRA}. Another analysis, studying both entanglement and non-Markovianity measures to reveal how second-order weak-coupling master equations either overestimate or underestimate memory effects, suggests that non-Markonivity can be enriched by temperature \cite{NMT5}.
The above results, limited to specific situations, already show how subtle the effect of temperature on quantum non-Markovianity can be during an open system dynamics. In particular, the occurrence of temperature-enhanced memory effects remains counterintuitive and requires further studies which can unveil the underlying mechanisms. In this work we address this issue by means of suitable collision models, which reveal themselves specially advantageous to unveil the role of environmental elements in ruling the temperature-dependent non-Markovian dynamics of the system. We consider two types of collision models with different non-Markovian mechanisms, finding that in both models the variation of non-Markovianity as a function of temperature is not monotonic and providing the possible physical reason behind this phenomenon.
\section{Measure of non-Markovianity}
The degree of non-Markovianity in a dynamical process can be quantified by different measures, such as the BLP measure based on the distinguishability between the evolutions of two different initial states of the system \cite{BLP}, the LPP measure based on the volume of accessible states of the system \cite{measure2}, the RHP measure \cite{measure3} and the ACHL measure \cite{ACHL} based on the time-behavior of the master equation decay rates.
The trace distance between the evolutions of two different initial states $\rho_{1}(0)$ and $\rho _{2}(0)$ of an open system is one of the most employed quantifiers. Since a Markovian evolution can never increase the trace distance, when this happens it is a signature of non-Markovian dynamics of the system. Based on this concept, the non-Markovianity can be quantified by a the BLP measure $\mathcal{N}$ defined as
\cite{BLP}
\begin{equation}
\mathcal{N}=\max_{\rho _{1}(0),\rho _{2}(0)}\int_{\sigma >0}\sigma [t,\rho
_{1}(0),\rho _{2}(0)]dt, \label{N}
\end{equation}
where $\sigma [t,\rho _{1}(0),\rho _{2}(0)]=dD[\rho _{1}(t),\rho_{2}(t)]/dt$ is the rate of change of the trace distance given by
\begin{equation}
D[\rho _{1}(t),\rho _{2}(t)]=\frac{1}{2}\mathrm{Tr}|\rho _{1}(t)-\rho
_{2}(t)|, \label{Tra-Dis}
\end{equation}
with $|A|=\sqrt{A^{\dag }A}$.
To evaluate the non-Markovianity $\mathcal{N}$, one then has to find a specific pair of optimal initial states to maximize the time derivative of the trace distance. In Ref. \cite{optimal}, it is proved that the pair of optimal states is associated with two antipodal pure states on the surface of the Bloch sphere. We thus adopt, as usual, the pair of optimal initial states $\rho _{1,2}(0)=\left| \psi _{1,2}(0)\right\rangle\left\langle \psi _{1,2}(0)\right|$ with $\left| \psi
_{1,2}(0)\right\rangle =(\left| 0\right\rangle \pm \left| 1\right\rangle )/\sqrt{2}$.
Since the dynamics of the system in the collision model is implemented via $N$ equal discrete time steps, in the following the measure $\mathcal{N}$ shall be computed by substituting $\sigma [t,\rho _{1}(0),\rho _{2}(0)]dt $ with the difference
$\Delta D[n] = D[\rho_{1,n},\rho_{2,n}]-D[\rho_{1,n-1},\rho_{2,n-1}]$ between the trace distances at steps $n$ and $n-1$ and then summing up all the positive contributions, that is
\begin{equation}
\mathcal{N}=\max_{\rho _{1}(0),\rho _{2}(0)} \sum_{n,\Delta D[n]>0}^N \Delta D[n].
\quad (n=1,2,\ldots,N)
\end{equation}
The value of the final collision step $N$ is taken such as to cover all the oscillations of the trace distance during the evolution.
\section{Non-Markovianity in the direct collision model}\label{DCM}
\begin{figure}
\caption{(Color online) Schematic diagram of the direct collision model. (a) The system $S$ collides with the ancilla qubit $R_{1}
\label{M1}
\end{figure}
In the first model, illustrated in Fig.~\ref{M1}, the system qubit $S$ directly interacts with the environment $\mathcal{R}$ which comprises $N$ identical qubits $R_{1},R_{2},\ldots,R_{N}$. The system qubit and a generic environment qubit are described, respectively, by the Hamiltonians ($\hbar=1$)
\begin{equation}
\hat{H}_{S}=\omega_{S} \hat{\sigma}_{z}^{S}/2,\quad
\hat{H}_{R}\equiv\hat{H}_{R_{n}}=\omega_{R} \hat{\sigma}_{z}^{R_{n}}/2,
\end{equation}
where $\hat{\sigma}_{z}^{\mu}=\left|1\right\rangle_{\mu}\left\langle1\right|-\left|0\right\rangle_{\mu}\left\langle0\right|$ is the Pauli operator and $\{\left|0\right\rangle_{\mu},\left|1\right\rangle_{\mu}\}$ are the logical states of the qubit $\mu=S,$ $R_{n}$ ($n=1,2,...,N$) with transition frequency $\omega_{\mu}$ (hereafter, for simplicity, we take $\omega_{R_{n}}=\omega_{R}=\omega_S = \omega$). The system-bath coupling is assumed to be ``white-noise'' (very large environment) so that the system never collides twice with the same qubit \cite{cicc2017}. As a consequence, at each collision step $n$ the system $S$ collides with a ``fresh" $R_{n}$.
Such a model can emulate, for a suitable combination of parameters and interactions, an atom coupled to a lossy cavity \cite{colli5}.
Among the possible choices for the interaction between $S$ and environment qubit $R_{n}$, here we focus on a Heisenberg-like coherent interaction described by the Hamiltonian
\begin{equation}\label{H}
\hat{H}_\mathrm{int}=g(\hat{\sigma}_{x}^{S}\otimes\hat{\sigma}_{x}^{R_{n}}+\hat{\sigma}_{y}^{S}\otimes\hat{\sigma}_{y}^{R_{n}}+\hat{\sigma}_{z}^{S}\otimes\hat{\sigma}_{z}^{R_{n}}),
\end{equation}
where $\hat{\sigma}_{j}^{\mu}$ ($j=x,y,z$) is the Pauli operator, $g$ denotes a coupling constant and each collision is described by a unitary operator $\hat{U}_{S,R_{n}}=e^{-i\hat{H}_\mathrm{int}\tau}$, $\tau$ being the collision time.
By means of the equality
\begin{equation}\label{eq}
e^{i\frac{\phi}{2}(\hat{\sigma}_{x}\otimes\hat{\sigma}_{x}+\hat{\sigma}_{y}\otimes\hat{\sigma}_{y}+\hat{\sigma}_{z}\otimes\hat{\sigma}_{z})}
=e^{-i\frac{\phi}{2}}(\cos\phi \ \hat{\mathbb{I}} +i\sin\phi \ \hat{\mathcal{S}})
\end{equation}
with $\hat{\mathbb{I}}$ the identity operator and $\hat{\mathcal{S}}$ the two qubit swap operator with the action $\left|\psi_{1}\right\rangle\otimes\left|\psi_{2}\right\rangle\rightarrow\left|\psi_{2}\right\rangle\otimes\left|\psi_{1}\right\rangle$
for all $\left|\psi_{1}\right\rangle,\left|\psi_{2}\right\rangle\in \mathbb{C}^{2}$, the unitary time evolution operator can be written as
\begin{equation}\label{swapSR1}
\hat{U}_{SR_{n}}=(\cos J)\ \hat{\mathbb{I}}_{SR_{n}}+i(\sin J)\ \hat{\mathcal{S}}_{SR_{n}},
\end{equation}
where $J=2g\tau$ is a dimensionless interaction strength between $S$ and $R_{n}$ which is supposed to be the same for any $n=1,2,\ldots,N$.
It is immediate to see that $J=\pi/2$ induces a complete swap between the state of $S$ and that of $R_{n}$. Thus, $0<J<\pi/2$ means a partial swap conveying the intuitive idea that, at each collision, part of the information contained in the state of $S$ is transferred into $R_{n}$.
In the ordered basis $\{\left|00\right\rangle_{SR_{n}},\left|01\right\rangle_{SR_{n}},\left|10\right\rangle_{SR_{n}},\left|11\right\rangle_{SR_{n}}\}$, $\hat{U}_{SR_{n}}$ reads
\begin{equation}\label{swapSR2}
\hat{U}_{SR_{n}}=\left(
\begin{array}{cccc}
e^{iJ} & 0 & 0 & 0 \\
0 & \cos J & i\sin J & 0 \\
0 & i\sin J & \cos J & 0 \\
0 & 0 & 0 & e^{iJ} \\
\end{array}
\right).
\end{equation}
In the present model, the non-Markovian dynamics of the system is introduced via the interactions between two nearest-neighbor qubits $R_{n}$ and $R_{n+1}$. Such interactions are described by an operation similar to that of Eq.~(\ref{swapSR2}), namely
\begin{equation}\label{swapRR}
\hat{V}_{R_{n}R_{n+1}}=\left(
\begin{array}{cccc}
e^{i\Omega} & 0 & 0 & 0 \\
0 & \cos \Omega & i\sin \Omega & 0 \\
0 & i\sin \Omega & \cos \Omega & 0 \\
0 & 0 & 0 & e^{i\Omega} \\
\end{array}
\right),
\end{equation}
where $0\leq\Omega\leq\pi/2$ is the dimensionless $R_{n}$-$R_{n+1}$ interaction strength which is taken to be the same for any $n$.
\begin{figure}
\caption{(Color online) \textbf{(a)}
\label{NM-Omiga-T}
\end{figure}
As illustrated in Fig.~\ref{M1} exemplifying the first two steps of collisions, in each step we consider the ordered triplet $(S, R_{n-1}, R_{n})$ in such a way that after the collision between $S$ and $R_{n-1}$ via the unitary operation $\hat{U}_{SR_{n-1}}$, the system shifts by
one site while $R_{n-1}$ collides with $R_{n}$ via $\hat{V}_{R_{n-1}R_{n}}$. Notice that $R_{n-1}$-$R_n$ collision occurs before $S$-$R_n$ collision so that $S$ and $R_n$ are already correlated before they collide with each other. The three qubits after the two collisions can now be all correlated with the total state $\rho_{SR_{n-1}R_{n}}$ (the correlations are labeled by the dashed lines in Fig.~\ref{M1}). Then, we trace out the qubit $R_{n-1}$ giving rise to the reduced state $\rho_{SR_{n}}$ of $S-R_{n}$ and proceed to the next step with the new ordered triplet $(S, R_{n}, R_{n+1})$.
Under the actions of $\hat{U}_{SR_{n}}$ of Eq.~(\ref{swapSR2}) and $\hat{V}_{R_{n}R_{n+1}}$ of Eq.~(\ref{swapRR}), the total state of $SR_{n}R_{n+1}$ at the step $n$ is obtained from the step $n-1$ as
\begin{eqnarray}\label{state-n}
&\rho_{SR_{n}R_{n+1}}=&\nonumber\\
&\hat{V}_{R_{n}R_{n+1}}\hat{U}_{SR_{n}}\left(\rho_{SR_{n}}\otimes
\rho_{R_{n+1}}\right)\hat{U}_{SR_{n}}^{\dag} \hat{V}_{R_{n}R_{n+1}}^{\dag},&
\end{eqnarray}
where $\rho_{R_{n+1}}\equiv\rho_{R}$ is the pre-collision state of the environmental qubit.
Here, to reveal the effect of environmental temperature on the non-Markovianity, we assume the environmental qubits are initially prepared in the same thermal states $\rho_{R}=e^{-\beta \hat{H}_{R}}/Z$ at temperature $T_R$, where $\beta=1/k_B T_R$ ($k_B$ being the Boltzmann constant) and $Z$ is the partition function. In our analysis, we consider a dimensionless temperature $T$ defined by $T \equiv k_BT_R / (\hbar\omega_0)$, where $\omega_0$ is a reference frequency. We also take values of $J\ll\omega/\omega_0$ so to have small collision times and a weak interaction between the system and the environment qubits.
\begin{figure}
\caption{(Color online) Contour plot of the non-Markovianity $\mathcal{N}
\label{cont-NM}
\end{figure}
In such a model, the system experiences a homogenization process and reaches asymptotically the very same state $\rho_{R}$ \cite{Implications}. The forward transfer of the lost information of the system $S$ via intracollisions of environment qubits triggers dynamical memory effects of the system, so that the non-Markovianity is closely related to the intracollision strength $\Omega$. Fig.~\ref{NM-Omiga-T}(a) shows the dependence of non-Markovianity $\mathcal{N}$ on $\Omega$ for different temperatures $T$ of the environment. In both zero temperature ($T=0$) and thermal environments ($T>0$), the non-Markovianity is activated when $\Omega$ exceeds a given threshold (see the inset of Fig.~\ref{NM-Omiga-T}(a) for a more evident demonstration) and then monotonically increases with $\Omega$. From this first analysis, it emerges that the thermal environment does not affect the monotonic relation between $\mathcal{N}$ and $\Omega$, while the thresholds of $\Omega$ triggering the non-Markovianity depend on the temperature.
On the other hand, the variations of non-Markovianity $\mathcal{N}$ with respect to the temperature $T$ can be rich and non-monotonic, as shown in Fig.~\ref{NM-Omiga-T}(b).
For relatively small values of $\Omega$ (e.g., $\Omega=0.83$), the increase of $T$ can enable the non-Markovianity which maintains nonzero values within a finite region of $T>0$. For larger values of $\Omega$, the system dynamics exhibits non-Markovian character already for a zero-temperature environment. In this case, the non-Markovianity approximately exhibits a plateau for small $T$ and then experiences successive decreasing and increasing behaviors, eventually vanishing at high temperatures. For particular values of the environment qubits interaction strength (e.g., $\Omega=0.9$, $0.95$), when $T$ increases we also observe that the non-Markovianity $\mathcal{N}$ may vanish within a finite interval of $T$ and then revive again. In other words, manipulations of the environment temperature $T$ can induce successive transitions between non-Markovian and Markovian regimes for the system dynamics.
A comprehensive picture for the dependence of $\mathcal{N}$ on $T$ and $\Omega$ is shown in Fig.~\ref{cont-NM}, where we can see the non-Markovianity thresholds of $\Omega$ (identified by the dotted red line) for a given $T$ and the crossovers between non-Markovian and Markovian regimes as $T$ increases for a given $\Omega$.
\begin{figure}
\caption{(Color online) Coherences $C_{S,n}
\label{M1coh}
\end{figure}
In order to gain a deeper understanding of the temperature effects on the non-Markovianity, we examine the coherence of the system, which is related to the trace distance of this model as $D[\rho_{1,n},\rho_{2,n}]=2C_{S,n}$, where $C_{S,n}=|\left\langle0\right|\rho_{S,n}\left|1\right\rangle|=|\left\langle1\right|\rho_{S,n}\left|0\right\rangle|$ is the coherence degree of the state $\rho_{S,n}$ of $S$ after the $n$-th collision, the initial state being prepared in
$\left| \psi\right\rangle_{S,0} =(\left| 0\right\rangle_{S,0} \pm \left| 1\right\rangle_{S,0} )/\sqrt{2}$. Notice that the latter is a \textit{bona fide} quantifier of coherence, being the half of the so-called $l_1$-norm measure within a resource theory \cite{baumgratzPRL,streltsovRMP}. Therefore, the initial coherence of $S$ has the maximum value $C_{S,0}=0.5$. The temporary growth of $C_{S,n}$ thus serves as a witness for the onset of non-Markovian dynamics. Moreover, to assess the role of the environmental constituents, we consider the coherence $C_{R,n}$ of the environment qubit $R_{n+1}$ transferred from $S$ after the $n$-th collision of $S$-$R_{n}$-$R_{n+1}$.
In Fig.~\ref{M1coh}(a)-(f), we illustrate the evolution of $C_{S,n}$ and $C_{R,n}$ versus $n$ for different temperatures with $\Omega=0.95$, whose non-Markovian character is plotted in Fig.~\ref{NM-Omiga-T}(b) (blue dot-dashed curve). An overall comparison of the panels in Fig.~\ref{M1coh}(a)-(f), which indicate a temperature range from $T=1$ to $T=7$, verifies the fact that the initial increase of temperature speeds up the decay of the system coherence $C_{S,n}$. Therefore, on the one hand, the increase of temperature suppresses and eventually terminates the non-Markovianity, as seen in Fig.~\ref{M1coh}(a)-(b) for $T=1$ and $T=2$ and already confirmed in Fig.~\ref{NM-Omiga-T}(b). On the other hand, however, the quick decay of $C_{S,n}$ can cause the coherence $C_{R,n}$ of the environment qubit $R_{n+1}$ to approach (see Fig.~\ref{M1coh}(c)) and even to exceed (see Fig.~\ref{M1coh}(d)-(e)-(f)) the coherence $C_{S,n}$ of the system. This behavior in turn induces the information backflow from the environment to the system, namely, a revival of non-Markovian regime. In fact, $C_{R,n}$ overcomes $C_{S,n}$ in correspondence to the recovery of the non-Markovian character of the system dynamics from a Markovian one (compare Fig.~\ref{M1coh}(d)-(e)-(f) and the blue dot-dashed curve of Fig.~\ref{NM-Omiga-T}(b)).
In the high temperature regime, the system coherence decays more quickly and the non-Markovian dynamics will cease if the intracollision strength $\Omega$ is not sufficiently large. For instance, from Fig.~\ref{NM-Omiga-T}(b) one sees that at $T=10$ the non-Markovianity vanishes if $\Omega=0.9$, while it remains nonzero when $\Omega=0.95$. In other words, to get non-Markovian dynamics (quantum memory effects) at high temperatures, one has to increase the interaction strength $\Omega$ between environmental ancillary qubits, which allows a more efficient transfer of environmental quantum coherence
\section{Non-Markovianity in the indirect collision model}
\begin{figure}
\caption{(Color online) Schematic diagram of the indirect collision model. (a) The system $S$ collides with the intermediate qubit $S'$ and they become correlated, as denoted by the dashed line in panel (b). (b) The qubit $S'$ interacts with $R_{1}
\label{M2}
\end{figure}
We now consider a mechanism of non-Markovian dynamics based on another collision model, where the interaction of the system qubit $S$ with the environment qubit $R_{n}$ is mediated by an intermediate qubit $S'$, as depicted in Fig.~\ref{M2}. Such a scenario implies that the information contained in $S$ is first transferred to $S'$ and then damped into $\mathcal{R}$ via the collisions between $S'$ and $R_{n}$. It is known that, in the absence of environmental intracollisions, this composite model can emulate (for short collision times and Jaynes-Cummings-type interactions) a two-level atom in a lossy cavity, $S'$ playing the role of the cavity mode \cite{colli16}. For straightforward extension, in the presence of environmental intracollisions, this model may represent a two-level atom in a reservoir with a photonic band gap \cite{colli16,laurapseudo,ManSciRep2015}.
We choose the Heisenberg-type coherent interaction between $S$ and $S'$, with interaction strength $0\leq \kappa\leq\pi/2$, represented by the unitary operator $\hat{U}_{SS'}$, analogous to that of Eq.~(\ref{swapSR2}), having the form
\begin{equation}\label{swapSS}
\hat{U}_{SS'}=\left(
\begin{array}{cccc}
e^{i\kappa} & 0 & 0 & 0 \\
0 & \cos \kappa & i\sin \kappa & 0 \\
0 & i\sin \kappa & \cos \kappa & 0 \\
0 & 0 & 0 & e^{i\kappa} \\
\end{array}
\right).
\end{equation}
The unitary operators $\hat{U}_{S'R_{n}}$ and $\hat{V}_{R_{n}R_{n+1}}$ representing, respectively, the $S'$-$R_{n}$ interaction and the interaction between adjacent environment qubits are the same of Eqs.~(\ref{swapSR2}) and (\ref{swapRR}) with interaction strengths $J$ and $\Omega$.
As shown in Fig.~\ref{M2}, in each round of collisions we deal with four qubits $(S, S', R_{n-1}, R_{n})$ in such a way that, after the collisions of $S$-$S'$ and $S'$-$R_{n-1}$, the qubits $S$ and $S'$ shift by one site while $R_{n-1}$ collides with $R_{n}$, which results in the correlated total state $\rho_{SS'R_{n-1}R_{n}}$ (the correlations are indicated by dashed lines). Then, we trace out the qubit $R_{n-1}$ obtaining the reduced state $\rho_{SS'R_{n}}$ of $SS'R_{n}$ and proceed to the next step with the new ordered group $(S, S', R_{n}, R_{n+1})$.
As a consequence, the total state of $SS'R_{n}R_{n+1}$ at the $n$-th collision is determined from the $(n-1)$-th collision as
\begin{eqnarray}\label{state-SSRR}
&\rho_{SS'R_{n}R_{n+1}}=&\nonumber\\
&\hat{V}_{R_{n}R_{n+1}}\hat{U}_{S'R_{n}}\hat{U}_{SS'}\left(\rho_{SS'R_{n}}\otimes \rho_{R_{n+1}}\right)\hat{U}_{SS'}^{\dag}\hat{U}_{S'R_{n}}^{\dag} \hat{V}_{R_{n}R_{n+1}}^{\dag}.&\nonumber\\
\end{eqnarray}
The temperature effects are included in this model by considering the qubit $S'$ and all the environmental qubits prepared in the same thermal states $\rho_{R}=e^{-\beta \hat{H}_{R}}/Z$ with temperature $T$.
\subsection{Absence of collisions between environment qubits}
\begin{figure}
\caption{(Color online) Non-Markovianity $\mathcal{N}
\label{M2NMkT1}
\end{figure}
In this subsection, we consider the non-Markovianity in the absence of collisions between environment qubits, i.e., for $\Omega=0$. For this indirect collision model, the information of the system $S$ is first transferred to the qubit $S'$ via the coherent interaction and then dissipated to the environment through the collisions between $S'$ and environment qubits.
In this case, the intermediate qubit $S'$ can have the role of a quantum memory leading to the non-Markovian dynamics even without collisions between environment qubits.
The interaction strength $\kappa$ between $S$ and $S'$ is then crucial in activating the non-Markovianity, as verified in Fig.~\ref{M2NMkT1} where $\mathcal{N}$ increases with $\kappa$ for a given $J$ at a fixed temperature. Moreover, the non-Markovianity achieves a nonzero value only when $\kappa$ is greater than a threshold and the larger the value of $J$, the larger the threshold of $\kappa$ required to trigger the non-Markovian regime. From Fig.~\ref{M2NMkT1} one also observes that, for a given $\kappa$, the non-Markovianity $\mathcal{N}$ decreases with $J$, which implies that a strong interaction between $S'$ and the environment qubits weakens the non-Markovianity of the system $S$.
In Fig.~\ref{NM-T-Omiga0}, the effect of the temperature $T$ on the non-Markovianity is taken into account for different values of $\kappa$. We notice that the non-Markovianity as a function of $T$ is very sensitive to the value of $\kappa$, in that it can: decrease directly to zero (e.g., for $\kappa=0.30$), disappear for a finite range of temperature and then revive (e.g., for $\kappa=0.33$), or decrease to a minimum value and then slowly grow for larger $\kappa$. Remarkably, non-Markovianity can persist at high temperatures provided that the values of
$\kappa$ are sufficiently large.
\begin{figure}
\caption{(Color online) Non-Markovianity $\mathcal{N}
\label{NM-T-Omiga0}
\end{figure}
\begin{figure}
\caption{(Color online) Non-Markovianity $\mathcal{N}
\label{M2_NMOmiga_T0_J}
\end{figure}
\subsection{Presence of collisions between environment qubits}
\begin{figure}
\caption{(Color online) (a) Non-Markovianity $\mathcal{N}
\label{M2_NMOmiga_T}
\end{figure}
Now we take the intracollisions between environment qubits $R_{n}$ and $R_{n+1}$ into account so that the two mechanisms of non-Markovian dynamics, namely the interaction
$S$-$S'$ ruled by $\kappa$ and the interaction $R_{n}$-$R_{n+1}$ ruled by $\Omega$, coexist in one and the same model.
We first explore the role of $\Omega$ in enhancing the non-Markovianity at zero temperature. When the coupling between $S$ and $S'$ is weak with relatively small $\kappa$, we know from the previous subsection that the dynamics of $S$ is Markovian ($\mathcal{N}=0$) if
$\Omega=0$. As shown in Fig.~\ref{M2_NMOmiga_T0_J}(a), by introducing the interactions $R_{n}$-$R_{n+1}$ a threshold of $\Omega$ exists which triggers a non-Markovian regime. Such a threshold increases with $\kappa$: namely, the smaller the value of $\kappa$, the larger the threshold of $\Omega$. The subsequent variations of $\mathcal{N}$ with $\Omega$ are non-monotonic. In particular, we find that the activated non-Markovianity $\mathcal{N}$ can disappear within a finite interval of $\Omega$ and then reappear (e.g., for $\kappa=0.04, 0.06$). When the coupling between $S$ and $S'$ is strong with larger $\kappa$, the dynamics may be already non-Markovian even for $\Omega=0$, as seen in Fig.~\ref{NM-T-Omiga0} at $T=0$ and shown more in detail in Fig.~\ref{M2_NMOmiga_T0_J}(b). In this case, the non-Markovianity can be further enriched by introducing the interactions between environmental qubits $R_{n}$-$R_{n+1}$.
The effects of the environment temperature on non-Markovianity are displayed in
Fig.~\ref{M2_NMOmiga_T}. In particular, $\mathcal{N}$ exhibits a non-monotonic variation with respect to the environmental qubit interaction strength $\Omega$ (see
Fig.~\ref{M2_NMOmiga_T}(a)), with a first descent and a successive ascent. Once again, we notice that the non-Markovianity can completely disappear for a finite range of $\Omega$ and then revive. The non-Markovianity $\mathcal{N}$ as a function of $T$ is then shown in
Fig.~\ref{M2_NMOmiga_T}(b), where we observe that the non-Markovianity is unavoidably weakened by increasing $T$ from zero for all the given $\Omega$, but it does not necessarily vanish for larger values of temperature. In fact, $\mathcal{N}$ can increase slowly (e.g., for $\Omega=0.2$), disappear completely (e.g., for $\Omega=0.5$), collapse and revive (e.g., for $\Omega=0.9$) and oscillate (e.g., for $\Omega=1.0$). A comprehensive picture of the variation of $\mathcal{N}$ as a function of both $\Omega$ and $T$, for fixed $\kappa$ and $J$, is given in Fig.~\ref{CPM2NMTOmiga}, where the above detailed behaviors can be retrieved. The values of $\kappa$ and $J$ are such that the system dynamics is non-Markovian for $T=0$ and $\Omega=0$. Such a plot is useful to immediately see how, in this composite indirect collision model, the temperature affects the system non-Markovianity in a different way from the case of the direct collision model treated in Sec.~\ref{DCM}. As a matter of fact,
Fig.~\ref{CPM2NMTOmiga} shows that the temperature has a general detrimental effect on non-Markovianity, which can never overcome its value at $T=0$ for the higher values of $T$, as instead happens for the direct collision model (see Fig.~\ref{NM-Omiga-T}(b)). However, a range of values of $\Omega$ exists for which a temperature threshold can be found which reactivate dynamical memory effects for the system lost at lower temperatures. In analogy with the direct collision model, such a feature is to be related to peculiar coherence exchanges from the system $S$ to the environmental components.
\begin{figure}
\caption{(Color online) Contour plot of the non-Markovianity $\mathcal{N}
\label{CPM2NMTOmiga}
\end{figure}
\section{Conclusion}
In conclusion, we have studied the effects of temperature on the non-Markovian character of an open quantum system dynamics by means of two types of collision models which entail different mechanisms for the occurrence of non-Markovianity.
In the first model, that is the direct collision model, the system $S$ consecutively interacts with a chain of environment qubits that are prepared in the same thermal states at temperature $T$, and the non-Markovianity $\mathcal{N}$ is induced by the intracollisions of environment qubits. As expected, the non-Markovian dynamics can be triggered when the intracollision strength is greater than a temperature-dependent threshold. In striking contrast to the usual understanding of the effect of the temperature on the non-Markovianity \cite{NMT1,NMT2,NMT3,NMT4,NMT5}, we have found that the behavior of $\mathcal{N}$ as a function of $T$ is non-monotonic, exhibiting a process of reduction and enhancement when temperature increases. In particular, we have shown that the non-Markovianity can vanish within a finite interval of $T$ and then reappear when $T$ increases. We have given a possible interpretation of this counterintuitive revival of dynamical memory effects by resorting to the exchanges of coherence between the system and environment qubits. In fact, albeit the temperature can accelerate the decay of coherence of the system and suppress the non-Markovianity until certain values, in the regime of high temperature this quick decay of system coherence can cause the coherence transferred to environment qubits to exceed that of the system. This mechanism in turn induces a backflow of information from the environment to the system and thus non-Markovian dynamics.
In the second model, that is the indirect collision model, the system $S$ indirectly interacts with the environment qubits through collisions with an intermediate qubit $S'$. In this case, $S'$ serves as the memory for the transferred information from $S$ towards the environment, representing a distinct non-Markovian mechanism. Without intracollisions between environment qubits, the non-Markovian dynamics for the system can still arise provided that the interaction strength of $S$-$S'$ is sufficiently large. Moreover, the non-monotonic relation between the non-Markovianity measure $\mathcal{N}$ and $T$ is once again observed. When the environmental intracollisions are taken into account, the two mentioned non-Markovian mechanisms coexist in the same model. In this case we have found that the presence of interactions between environmental qubits enriches non-Markovianity. The temperature has now the general effect to reduce the degree of non-Markovianity with respect to its value at zero temperature. However, once again non-Markovianity of the system can exhibit revivals as a function of the temperature.
Our findings within collision models are confirmed by some realistic composite quantum systems which exhibit a non-monotonic relation between non-Markovianity and temperature \cite{NMT4,ClosPRA}. More in general, our results contribute towards the capability of engineering suitable environments with optimal temperature conditions to exploit dynamical memory effects of an open quantum system, which is strategic for noisy intermediate scale quantum information processing \cite{NISQ}.
\acknowledgements
R.L.F. acknowledges Francesco Ciccarello for fruitful discussions and comments.
This work is supported by National Natural Science Foundation (China) under Grant Nos.~11574178 and 61675115, and Shandong Provincial Natural Science Foundation (China) under Grant No.~ZR2016JL005.
\begin{thebibliography}{69}
\makeatletter
\providecommand \@ifxundefined [1]{
\@ifx{#1\undefined}
}
\providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \natexlab [1]{#1}
\providecommand \enquote [1]{``#1''}
\providecommand \bibnamefont [1]{#1}
\providecommand \bibfnamefont [1]{#1}
\providecommand \citenamefont [1]{#1}
\providecommand \href@noop [0]{\@secondoftwo}
\providecommand \href [0]{\begingroup \@sanitize@url \@href}
\providecommand \@href[1]{\@@startlink{#1}\@@href}
\providecommand \@@href[1]{\endgroup#1\@@endlink}
\providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax}
\providecommand \@@startlink[1]{}
\providecommand \@@endlink[0]{}
\providecommand \url [0]{\begingroup\@sanitize@url \@url }
\providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }}
\providecommand \urlprefix [0]{URL }
\providecommand \Eprint [0]{\href }
\providecommand \doibase [0]{http://dx.doi.org/}
\providecommand \selectlanguage [0]{\@gobble}
\providecommand \bibinfo [0]{\@secondoftwo}
\providecommand \bibfield [0]{\@secondoftwo}
\providecommand \translation [1]{[#1]}
\providecommand \BibitemOpen [0]{}
\providecommand \bibitemStop [0]{}
\providecommand \bibitemNoStop [0]{.\EOS\space}
\providecommand \EOS [0]{\spacefactor3000\relax}
\providecommand \BibitemShut [1]{\csname bibitem#1\endcsname}
\let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {Breuer}\ and\ \citenamefont
{Petruccione}(2002)}]{open}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.-P.}\ \bibnamefont
{Breuer}}\ and\ \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Petruccione}},\ }\href@noop {} {\emph {\bibinfo {title} {Theory of Open
Quantum Systems}}}\ (\bibinfo {publisher} {Oxford University Press, New
York},\ \bibinfo {year} {2002})\BibitemShut {NoStop}
\bibitem [{\citenamefont {Lindblad}(1976)}]{Lindblad1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Lindblad}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Commun. Math. Phys.}\ }\textbf {\bibinfo {volume} {48}},\ \bibinfo {pages}
{119} (\bibinfo {year} {1976})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gorini}\ \emph {et~al.}(1976)\citenamefont {Gorini},
\citenamefont {Kossakowski},\ and\ \citenamefont {Sudarshan}}]{Lindblad2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Gorini}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Kossakowski}},
\ and\ \bibinfo {author} {\bibfnamefont {E.~C.~G.}\ \bibnamefont
{Sudarshan}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J.
Math. Phys. (NY)}\ }\textbf {\bibinfo {volume} {17}},\ \bibinfo {pages} {821}
(\bibinfo {year} {1976})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {de~Vega}\ and\ \citenamefont {Alonso}(2017)}]{NM1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont
{de~Vega}}\ and\ \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Alonso}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Dynamics of
non-{Markovian} open quantum systems},}\ }\href {\doibase
10.1103/RevModPhys.89.015001} {\bibfield {journal} {\bibinfo {journal}
{Rev. Mod. Phys.}\ }\textbf {\bibinfo {volume} {89}},\ \bibinfo {pages}
{015001} (\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Breuer}\ \emph {et~al.}(2016)\citenamefont {Breuer},
\citenamefont {Laine}, \citenamefont {Piilo},\ and\ \citenamefont
{Vacchini}}]{NM2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.-P.}\ \bibnamefont
{Breuer}}, \bibinfo {author} {\bibfnamefont {E.-M.}\ \bibnamefont {Laine}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Piilo}}, \ and\ \bibinfo
{author} {\bibfnamefont {B.}~\bibnamefont {Vacchini}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {{Non-Markovian} dynamics in open quantum
systems},}\ }\href {\doibase 10.1103/RevModPhys.88.021002} {\bibfield
{journal} {\bibinfo {journal} {Rev. Mod. Phys.}\ }\textbf {\bibinfo {volume}
{88}},\ \bibinfo {pages} {021002} (\bibinfo {year} {2016})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Bellomo}\ \emph {et~al.}(2008)\citenamefont
{Bellomo}, \citenamefont {{Lo Franco}},\ and\ \citenamefont
{Compagno}}]{NMuse1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Bellomo}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {{Lo
Franco}}}, \ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Compagno}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Entanglement
dynamics of two independent qubits in environments with and without
memory},}\ }\href {\doibase 10.1103/PhysRevA.77.032342} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {77}},\
\bibinfo {pages} {032342} (\bibinfo {year} {2008})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bellomo}\ \emph {et~al.}(2007)\citenamefont
{Bellomo}, \citenamefont {{Lo Franco}},\ and\ \citenamefont
{Compagno}}]{NMuse2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Bellomo}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {{Lo
Franco}}}, \ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Compagno}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Non-{Markovian} effects on the dynamics of entanglement},}\ }\href {\doibase
10.1103/PhysRevLett.99.160502} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {99}},\ \bibinfo {pages}
{160502} (\bibinfo {year} {2007})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Huelga}\ \emph {et~al.}(2012)\citenamefont {Huelga},
\citenamefont {Rivas},\ and\ \citenamefont {Plenio}}]{NMuse3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~F.}\ \bibnamefont
{Huelga}}, \bibinfo {author} {\bibfnamefont {\'A.}\ \bibnamefont {Rivas}}, \
and\ \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Non-{Markovianity}-assisted
steady state entanglement},}\ }\href {\doibase
10.1103/PhysRevLett.108.160402} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {108}},\ \bibinfo {pages}
{160402} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Chin}\ \emph {et~al.}(2012)\citenamefont {Chin},
\citenamefont {Huelga},\ and\ \citenamefont {Plenio}}]{NMuse4}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~W.}\ \bibnamefont
{Chin}}, \bibinfo {author} {\bibfnamefont {S.~F.}\ \bibnamefont {Huelga}}, \
and\ \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Quantum metrology in
non-{Markovian} environments},}\ }\href {\doibase
10.1103/PhysRevLett.109.233601} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {109}},\ \bibinfo {pages}
{233601} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Man}\ \emph {et~al.}(2015{\natexlab{a}})\citenamefont
{Man}, \citenamefont {Xia},\ and\ \citenamefont {{Lo Franco}}}]{ManPRA2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Z.~X.}\ \bibnamefont
{Man}}, \bibinfo {author} {\bibfnamefont {Y.~J.}\ \bibnamefont {Xia}}, \ and\
\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {{Lo Franco}}},\
}\bibfield {title} {\enquote {\bibinfo {title} {{Harnessing non-Markovian
quantum memory by environmental coupling}},}\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{92}},\ \bibinfo {pages} {012315} (\bibinfo {year}
{2015}{\natexlab{a}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Xu}\ \emph {et~al.}(2013)\citenamefont {Xu} \emph
{et~al.}}]{LoFrancoNatCom}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.-S.}\ \bibnamefont
{Xu}} \emph {et~al.},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Experimental recovery of quantum correlations in absence of
system-environment back-action},}\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Nat. Comm.}\ }\textbf {\bibinfo {volume} {4}},\
\bibinfo {pages} {2851} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {{D'Arrigo}}\ \emph {et~al.}(2014)\citenamefont
{{D'Arrigo}}, \citenamefont {{Lo Franco}}, \citenamefont {Benenti},
\citenamefont {Paladino},\ and\ \citenamefont {Falci}}]{darrigo2012AOP}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{{D'Arrigo}}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {{Lo
Franco}}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Benenti}},
\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Paladino}}, \ and\
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Falci}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Recovering entanglement by local
operations},}\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Ann. Phys.}\ }\textbf {\bibinfo {volume} {350}},\ \bibinfo {pages} {211}
(\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {{Lo Franco}}\ \emph {et~al.}(2014)\citenamefont {{Lo
Franco}}, \citenamefont {D'Arrigo}, \citenamefont {Falci}, \citenamefont
{Compagno},\ and\ \citenamefont {Paladino}}]{lofrancoPRB}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {{Lo
Franco}}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {D'Arrigo}},
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Falci}}, \bibinfo
{author} {\bibfnamefont {G.}~\bibnamefont {Compagno}}, \ and\ \bibinfo
{author} {\bibfnamefont {E.}~\bibnamefont {Paladino}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Preserving entanglement and nonlocality in
solid-state qubits by dynamical decoupling},}\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. B}\ }\textbf {\bibinfo {volume}
{90}},\ \bibinfo {pages} {054304} (\bibinfo {year} {2014})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Breuer}\ \emph {et~al.}(2009)\citenamefont {Breuer},
\citenamefont {Laine},\ and\ \citenamefont {Piilo}}]{BLP}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.-P.}\ \bibnamefont
{Breuer}}, \bibinfo {author} {\bibfnamefont {E.-M.}\ \bibnamefont {Laine}}, \
and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Piilo}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Measure for the degree of
non-{Markovian} behavior of quantum processes in open systems},}\ }\href
{\doibase 10.1103/PhysRevLett.103.210401} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {103}},\ \bibinfo
{pages} {210401} (\bibinfo {year} {2009})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Lorenzo}\ \emph {et~al.}(2013)\citenamefont
{Lorenzo}, \citenamefont {Plastina},\ and\ \citenamefont
{Paternostro}}]{measure2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Lorenzo}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Plastina}}, \
and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Paternostro}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Geometrical characterization
of non-{Markovianity}},}\ }\href {\doibase 10.1103/PhysRevA.88.020102}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo
{volume} {88}},\ \bibinfo {pages} {020102} (\bibinfo {year}
{2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Rivas}\ \emph {et~al.}(2010)\citenamefont {Rivas},
\citenamefont {Huelga},\ and\ \citenamefont {Plenio}}]{measure3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {\'A.}\ \bibnamefont
{Rivas}}, \bibinfo {author} {\bibfnamefont {S.~F.}\ \bibnamefont {Huelga}}, \
and\ \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Entanglement and
non-{Markovianity} of quantum evolutions},}\ }\href {\doibase
10.1103/PhysRevLett.105.050403} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo {pages}
{050403} (\bibinfo {year} {2010})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Laine}\ \emph {et~al.}(2010)\citenamefont {Laine},
\citenamefont {Piilo},\ and\ \citenamefont {Breuer}}]{measure4}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.-M.}\ \bibnamefont
{Laine}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Piilo}}, \ and\
\bibinfo {author} {\bibfnamefont {H.-P.}\ \bibnamefont {Breuer}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Measure for the non-{Markovianity} of
quantum processes},}\ }\href {\doibase 10.1103/PhysRevA.81.062115} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{81}},\ \bibinfo {pages} {062115} (\bibinfo {year} {2010})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Chru\'{s}ci\'{n}ski}\ \emph
{et~al.}(2011)\citenamefont {Chru\'{s}ci\'{n}ski}, \citenamefont
{Kossakowski},\ and\ \citenamefont {Rivas}}]{measure5}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Chru\'{s}ci\'{n}ski}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Kossakowski}}, \ and\ \bibinfo {author} {\bibfnamefont {\'A.}\ \bibnamefont
{Rivas}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Measures of
non-{Markovianity}: Divisibility versus backflow of information},}\ }\href
{\doibase 10.1103/PhysRevA.83.052128} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {83}},\ \bibinfo
{pages} {052128} (\bibinfo {year} {2011})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Chru\'{s}ci\'{n}ski}\ and\ \citenamefont
{Maniscalco}(2014)}]{measure6}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Chru\'{s}ci\'{n}ski}}\ and\ \bibinfo {author} {\bibfnamefont
{S.}~\bibnamefont {Maniscalco}},\ }\bibfield {title} {\enquote {\bibinfo
{title} {Degree of non-{Markovianity} of quantum evolution},}\ }\href
{\doibase 10.1103/PhysRevLett.112.120404} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {112}},\ \bibinfo
{pages} {120404} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Hall}\ \emph {et~al.}(2014)\citenamefont {Hall},
\citenamefont {Cresser}, \citenamefont {Li},\ and\ \citenamefont
{Andersson}}]{ACHL}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~J.~W.}\
\bibnamefont {Hall}}, \bibinfo {author} {\bibfnamefont {J.~D.}\ \bibnamefont
{Cresser}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Li}}, \ and\
\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Andersson}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {{Canonical form of master equations and
characterization of non-Markovianity}},}\ }\href {\doibase
10.1103/PhysRevA.89.042120} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {89}},\ \bibinfo {pages} {042120}
(\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Chiuri}\ \emph {et~al.}(2012)\citenamefont {Chiuri},
\citenamefont {Greganti}, \citenamefont {Mazzola}, \citenamefont
{Paternostro},\ and\ \citenamefont {Mataloni}}]{NMexp1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Chiuri}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Greganti}},
\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Mazzola}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Paternostro}}, \ and\ \bibinfo
{author} {\bibfnamefont {P.}~\bibnamefont {Mataloni}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Linear optics simulation of quantum
non-{Markovian} dynamics},}\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {2}},\ \bibinfo {pages}
{968} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Liu}\ \emph {et~al.}(2011)\citenamefont {Liu},
\citenamefont {Li}, \citenamefont {Huang}, \citenamefont {Li}, \citenamefont
{Guo}, \citenamefont {Laine}, \citenamefont {Breuer},\ and\ \citenamefont
{Piilo}}]{NMexp2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.~H.}\ \bibnamefont
{Liu}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Li}}, \bibinfo
{author} {\bibfnamefont {Y.~F.}\ \bibnamefont {Huang}}, \bibinfo {author}
{\bibfnamefont {C.~F.}\ \bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont
{G.~C.}\ \bibnamefont {Guo}}, \bibinfo {author} {\bibfnamefont {E.~M.}\
\bibnamefont {Laine}}, \bibinfo {author} {\bibfnamefont {H.~P.}\ \bibnamefont
{Breuer}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Piilo}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Experimental
control of the transition from {Markovian} to non-{Markovian} dynamics of
open quantum systems},}\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Nat. Phys.}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages}
{931--934} (\bibinfo {year} {2011})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bernardes}\ \emph {et~al.}(2016)\citenamefont
{Bernardes}, \citenamefont {Peterson}, \citenamefont {Sarthour},
\citenamefont {Souza}, \citenamefont {Monken}, \citenamefont {Roditi},
\citenamefont {Oliveira},\ and\ \citenamefont {Santos}}]{NMexp3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {N.~K.}\ \bibnamefont
{Bernardes}}, \bibinfo {author} {\bibfnamefont {J.~P.~S.}\ \bibnamefont
{Peterson}}, \bibinfo {author} {\bibfnamefont {R.~S.}\ \bibnamefont
{Sarthour}}, \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont {Souza}},
\bibinfo {author} {\bibfnamefont {C.~H.}\ \bibnamefont {Monken}}, \bibinfo
{author} {\bibfnamefont {I.}~\bibnamefont {Roditi}}, \bibinfo {author}
{\bibfnamefont {I.~S.}\ \bibnamefont {Oliveira}}, \ and\ \bibinfo {author}
{\bibfnamefont {M.~F.}\ \bibnamefont {Santos}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {High resolution non-{Markovianity} in NMR},}\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Sci. Rep.}\
}\textbf {\bibinfo {volume} {6}} (\bibinfo {year} {2016})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Bernardes}\ \emph {et~al.}(2015)\citenamefont
{Bernardes}, \citenamefont {Cuevas}, \citenamefont {Orieux}, \citenamefont
{Monken}, \citenamefont {Mataloni}, \citenamefont {Sciarrino},\ and\
\citenamefont {Santos}}]{NMexp4}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {N.~K.}\ \bibnamefont
{Bernardes}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Cuevas}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Orieux}}, \bibinfo
{author} {\bibfnamefont {C.~H.}\ \bibnamefont {Monken}}, \bibinfo {author}
{\bibfnamefont {P.}~\bibnamefont {Mataloni}}, \bibinfo {author}
{\bibfnamefont {F.}~\bibnamefont {Sciarrino}}, \ and\ \bibinfo {author}
{\bibfnamefont {M.~F.}\ \bibnamefont {Santos}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Experimental observation of weak
non-{Markovianity}},}\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {5}} (\bibinfo {year}
{2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Orieux}\ \emph {et~al.}(2015)\citenamefont {Orieux},
\citenamefont {D'Arrigo}, \citenamefont {Ferranti}, \citenamefont {{Lo
Franco}}, \citenamefont {Benenti}, \citenamefont {Paladino}, \citenamefont
{Falci}, \citenamefont {Sciarrino},\ and\ \citenamefont {Mataloni}}]{NMexp5}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Orieux}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {D'Arrigo}},
\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Ferranti}}, \bibinfo
{author} {\bibfnamefont {R.}~\bibnamefont {{Lo Franco}}}, \bibinfo {author}
{\bibfnamefont {G.}~\bibnamefont {Benenti}}, \bibinfo {author} {\bibfnamefont
{E.}~\bibnamefont {Paladino}}, \bibinfo {author} {\bibfnamefont
{G.}~\bibnamefont {Falci}}, \bibinfo {author} {\bibfnamefont
{F.}~\bibnamefont {Sciarrino}}, \ and\ \bibinfo {author} {\bibfnamefont
{P.}~\bibnamefont {Mataloni}},\ }\bibfield {title} {\enquote {\bibinfo
{title} {Experimental on-demand recovery of entanglement by local operations
within non-{Markovian} dynamics},}\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {5}},\ \bibinfo
{pages} {8575} (\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mazzola}\ \emph {et~al.}(2012)\citenamefont
{Mazzola}, \citenamefont {Rodr\'{\i}guez-Rosario}, \citenamefont {Modi},\
and\ \citenamefont {Paternostro}}]{origin1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Mazzola}}, \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont
{Rodr\'{\i}guez-Rosario}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Modi}}, \ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Paternostro}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Dynamical
role of system-environment correlations in non-{Markovian} dynamics},}\
}\href {\doibase 10.1103/PhysRevA.86.010102} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {86}},\ \bibinfo
{pages} {010102} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Smirne}\ \emph {et~al.}(2013)\citenamefont {Smirne},
\citenamefont {Mazzola}, \citenamefont {Paternostro},\ and\ \citenamefont
{Vacchini}}]{origin2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Smirne}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Mazzola}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Paternostro}}, \ and\
\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Vacchini}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Interaction-induced correlations and
non-{Markovianity} of quantum dynamics},}\ }\href {\doibase
10.1103/PhysRevA.87.052129} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {87}},\ \bibinfo {pages} {052129}
(\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kutvonen}\ \emph {et~al.}(2015)\citenamefont
{Kutvonen}, \citenamefont {Ala-Nissila},\ and\ \citenamefont
{Pekola}}]{NMentropy}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Kutvonen}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Ala-Nissila}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Pekola}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Entropy
production in a non-{Markovian} environment},}\ }\href {\doibase
10.1103/PhysRevE.92.012107} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. E}\ }\textbf {\bibinfo {volume} {92}},\ \bibinfo {pages} {012107}
(\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Lorenzo}\ \emph
{et~al.}(2015{\natexlab{a}})\citenamefont {Lorenzo}, \citenamefont {Farace},
\citenamefont {Ciccarello}, \citenamefont {Palma},\ and\ \citenamefont
{Giovannetti}}]{cascaded}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Lorenzo}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Farace}},
\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Ciccarello}}, \bibinfo
{author} {\bibfnamefont {G.~M.}\ \bibnamefont {Palma}}, \ and\ \bibinfo
{author} {\bibfnamefont {V.}~\bibnamefont {Giovannetti}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Heat flux and quantum correlations in
dissipative cascaded systems},}\ }\href {\doibase 10.1103/PhysRevA.91.022121}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo
{volume} {91}},\ \bibinfo {pages} {022121} (\bibinfo {year}
{2015}{\natexlab{a}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bylicka}\ \emph {et~al.}(2016)\citenamefont
{Bylicka}, \citenamefont {Tukiainen}, \citenamefont {Chru\'{s}ci\'{n}ski},
\citenamefont {Piilo},\ and\ \citenamefont {Maniscalco}}]{NMPower}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Bylicka}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Tukiainen}},
\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Chru\'{s}ci\'{n}ski}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Piilo}}, \ and\ \bibinfo
{author} {\bibfnamefont {S.}~\bibnamefont {Maniscalco}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Thermodynamic power of non-{Markovianity}},}\
}\href {\doibase 10.1038/srep27989} {\bibfield {journal} {\bibinfo
{journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {6}} (\bibinfo {year}
{2016}),\ 10.1038/srep27989}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Lorenzo}\ \emph
{et~al.}(2015{\natexlab{b}})\citenamefont {Lorenzo}, \citenamefont
{McCloskey}, \citenamefont {Ciccarello}, \citenamefont {Paternostro},\ and\
\citenamefont {Palma}}]{MultiLandauer}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Lorenzo}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {McCloskey}},
\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Ciccarello}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Paternostro}}, \ and\ \bibinfo
{author} {\bibfnamefont {G.~M.}\ \bibnamefont {Palma}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Landauer's principle in multipartite open
quantum system dynamics},}\ }\href {\doibase 10.1103/PhysRevLett.115.120403}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf
{\bibinfo {volume} {115}},\ \bibinfo {pages} {120403} (\bibinfo {year}
{2015}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Pezzutto}\ \emph {et~al.}(2016)\citenamefont
{Pezzutto}, \citenamefont {Paternostro},\ and\ \citenamefont
{Omar}}]{Implications}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Pezzutto}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Paternostro}}, \ and\ \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont
{Omar}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Implications of
non-{Markovian} quantum dynamics for the {Landauer} bound},}\ }\href
{\doibase 10.1088/1367-2630/18/12/123018} {\bibfield {journal} {\bibinfo
{journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {18}},\ \bibinfo
{pages} {123018} (\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Rau}(1963)}]{Rau1963}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Rau}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Relaxation
phenomena in spin and harmonic oscillator systems},}\ }\href {\doibase
10.1103/PhysRev.129.1880} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev.}\ }\textbf {\bibinfo {volume} {129}},\ \bibinfo {pages} {1880--1888}
(\bibinfo {year} {1963})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Scarani}\ \emph {et~al.}(2002)\citenamefont
{Scarani}, \citenamefont {Ziman}, \citenamefont {Stelmachovic}, \citenamefont
{Gisin},\ and\ \citenamefont {Bu\v{z}ek}}]{colli1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Scarani}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Ziman}},
\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Stelmachovic}}, \bibinfo
{author} {\bibfnamefont {N.}~\bibnamefont {Gisin}}, \ and\ \bibinfo {author}
{\bibfnamefont {V.}~\bibnamefont {Bu\v{z}ek}},\ }\bibfield {title} {\enquote
{\bibinfo {title} {Thermalizing quantum machines: Dissipation and
entanglement},}\ }\href {\doibase 10.1103/PhysRevLett.88.097905} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo
{volume} {88}},\ \bibinfo {pages} {097905} (\bibinfo {year}
{2002})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ziman}\ \emph {et~al.}(2002)\citenamefont {Ziman},
\citenamefont {Stelmachovic}, \citenamefont {Bu\v{z}ek}, \citenamefont
{Hillery}, \citenamefont {Scarani},\ and\ \citenamefont {Gisin}}]{colli2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Ziman}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Stelmachovic}},
\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Bu\v{z}ek}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Hillery}}, \bibinfo {author}
{\bibfnamefont {V.}~\bibnamefont {Scarani}}, \ and\ \bibinfo {author}
{\bibfnamefont {N.}~\bibnamefont {Gisin}},\ }\bibfield {title} {\enquote
{\bibinfo {title} {Diluting quantum information: An analysis of information
transfer in system-reservoir interactions},}\ }\href {\doibase
10.1103/PhysRevA.65.042105} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {65}},\ \bibinfo {pages} {042105}
(\bibinfo {year} {2002})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ryb\'{a}r}\ \emph {et~al.}(2012)\citenamefont
{Ryb\'{a}r}, \citenamefont {Filippov}, \citenamefont {Ziman},\ and\
\citenamefont {Bu\v{z}ek}}]{colli3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Ryb\'{a}r}}, \bibinfo {author} {\bibfnamefont {S.~N.}\ \bibnamefont
{Filippov}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Ziman}}, \
and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Bu\v{z}ek}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Simulation of indivisible
qubit channels in collision models},}\ }\href
{http://stacks.iop.org/0953-4075/45/i=15/a=154006} {\bibfield {journal}
{\bibinfo {journal} {J. Phys. B}\ }\textbf {\bibinfo {volume} {45}},\
\bibinfo {pages} {154006} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ciccarello}\ and\ \citenamefont
{Giovannetti}(2013)}]{colli4}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Ciccarello}}\ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Giovannetti}},\ }\bibfield {title} {\enquote {\bibinfo {title} {A quantum
non-{Markovian} collision model: incoherent swap case},}\ }\href
{http://stacks.iop.org/1402-4896/2013/i=T153/a=014010} {\bibfield {journal}
{\bibinfo {journal} {Phys. Scr.}\ }\textbf {\bibinfo {volume} {2013}},\
\bibinfo {pages} {014010} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ciccarello}\ \emph {et~al.}(2013)\citenamefont
{Ciccarello}, \citenamefont {Palma},\ and\ \citenamefont
{Giovannetti}}]{colli5}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Ciccarello}}, \bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont
{Palma}}, \ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Giovannetti}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Collision-model-based approach to non-{Markovian} quantum dynamics},}\
}\href {\doibase 10.1103/PhysRevA.87.040103} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {87}},\ \bibinfo
{pages} {040103} (\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kretschmer}\ \emph {et~al.}(2016)\citenamefont
{Kretschmer}, \citenamefont {Luoma},\ and\ \citenamefont {Strunz}}]{colli6}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Kretschmer}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Luoma}}, \
and\ \bibinfo {author} {\bibfnamefont {W.~T.}\ \bibnamefont {Strunz}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Collision model for
non-{Markovian} quantum dynamics},}\ }\href {\doibase
10.1103/PhysRevA.94.012106} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {94}},\ \bibinfo {pages} {012106}
(\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Lorenzo}\ \emph {et~al.}(2016)\citenamefont
{Lorenzo}, \citenamefont {Ciccarello},\ and\ \citenamefont {Palma}}]{colli7}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Lorenzo}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Ciccarello}},
\ and\ \bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont {Palma}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Class of exact memory-kernel
master equations},}\ }\href {\doibase 10.1103/PhysRevA.93.052111} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{93}},\ \bibinfo {pages} {052111} (\bibinfo {year} {2016})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Ziman}\ and\ \citenamefont
{Bu\v{z}ek}(2005)}]{colli8}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Ziman}}\ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Bu\v{z}ek}},\ }\bibfield {title} {\enquote {\bibinfo {title} {All (qubit)
decoherences: Complete characterization and physical implementation},}\
}\href {\doibase 10.1103/PhysRevA.72.022110} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {72}},\ \bibinfo
{pages} {022110} (\bibinfo {year} {2005})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ziman}\ \emph {et~al.}(2005)\citenamefont {Ziman},
\citenamefont {Stelmachovic},\ and\ \citenamefont {Bu\v{z}k}}]{colli9}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Ziman}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Stelmachovic}},
\ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Bu\v{z}k}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Description of quantum
dynamics of open systems based on collision-like models},}\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Open Syst. Inf. Dyn}\ }\textbf
{\bibinfo {volume} {12}},\ \bibinfo {pages} {81--91} (\bibinfo {year}
{2005})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Benenti}\ and\ \citenamefont
{Palma}(2007)}]{colli10}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Benenti}}\ and\ \bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont
{Palma}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Reversible and
irreversible dynamics of a qubit interacting with a small environment},}\
}\href {\doibase 10.1103/PhysRevA.75.052110} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {75}},\ \bibinfo
{pages} {052110} (\bibinfo {year} {2007})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gennaro}\ \emph {et~al.}(2008)\citenamefont
{Gennaro}, \citenamefont {Benenti},\ and\ \citenamefont {Palma}}]{colli11}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Gennaro}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Benenti}}, \
and\ \bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont {Palma}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Entanglement dynamics and
relaxation in a few-qubit system interacting with random collisions},}\
}\href {http://stacks.iop.org/0295-5075/82/i=2/a=20006} {\bibfield {journal}
{\bibinfo {journal} {EPL}\ }\textbf {\bibinfo {volume} {82}},\ \bibinfo
{pages} {20006} (\bibinfo {year} {2008})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gennaro}\ \emph
{et~al.}(2009{\natexlab{a}})\citenamefont {Gennaro}, \citenamefont
{Benenti},\ and\ \citenamefont {Palma}}]{colli12}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Gennaro}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Benenti}}, \
and\ \bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont {Palma}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Relaxation due to random
collisions with a many-qudit environment},}\ }\href {\doibase
10.1103/PhysRevA.79.022105} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {79}},\ \bibinfo {pages} {022105}
(\bibinfo {year} {2009}{\natexlab{a}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gennaro}\ \emph
{et~al.}(2009{\natexlab{b}})\citenamefont {Gennaro}, \citenamefont
{Campbell}, \citenamefont {Paternostro},\ and\ \citenamefont
{Palma}}]{colli13}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Gennaro}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Campbell}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Paternostro}}, \ and\
\bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont {Palma}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Structural change in multipartite
entanglement sharing: A random matrix approach},}\ }\href {\doibase
10.1103/PhysRevA.80.062315} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {80}},\ \bibinfo {pages} {062315}
(\bibinfo {year} {2009}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bernardes}\ \emph {et~al.}(2017)\citenamefont
{Bernardes}, \citenamefont {Carvalho}, \citenamefont {Monken},\ and\
\citenamefont {Santos}}]{colli14}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {N.~K.}\ \bibnamefont
{Bernardes}}, \bibinfo {author} {\bibfnamefont {A.~R.~R.}\ \bibnamefont
{Carvalho}}, \bibinfo {author} {\bibfnamefont {C.~H.}\ \bibnamefont
{Monken}}, \ and\ \bibinfo {author} {\bibfnamefont {M.~F.}\ \bibnamefont
{Santos}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Coarse graining
a non-{Markovian} collisional model},}\ }\href {\doibase
10.1103/PhysRevA.95.032117} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {95}},\ \bibinfo {pages} {032117}
(\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {\c{C}akmak}\ \emph {et~al.}(2017)\citenamefont
{\c{C}akmak}, \citenamefont {Pezzutto}, \citenamefont {Paternostro},\ and\
\citenamefont {M\"ustecaplio\u{g}lu}}]{colli15}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{\c{C}akmak}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Pezzutto}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Paternostro}}, \ and\ \bibinfo {author} {\bibfnamefont {\"O.~E.}\
\bibnamefont {M\"ustecaplio\u{g}lu}},\ }\bibfield {title} {\enquote
{\bibinfo {title} {Non-{Markovianity}, coherence, and system-environment
correlations in a long-range collision model},}\ }\href {\doibase
10.1103/PhysRevA.96.022109} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {96}},\ \bibinfo {pages} {022109}
(\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Lorenzo}\ \emph
{et~al.}(2017{\natexlab{a}})\citenamefont {Lorenzo}, \citenamefont
{Ciccarello},\ and\ \citenamefont {Palma}}]{colli16}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Lorenzo}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Ciccarello}},
\ and\ \bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont {Palma}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Composite quantum collision
models},}\ }\href {\doibase 10.1103/PhysRevA.96.032107} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {96}},\
\bibinfo {pages} {032107} (\bibinfo {year} {2017}{\natexlab{a}})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Lorenzo}\ \emph
{et~al.}(2017{\natexlab{b}})\citenamefont {Lorenzo}, \citenamefont
{Ciccarello}, \citenamefont {Palma},\ and\ \citenamefont
{Vacchini}}]{colli17}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Lorenzo}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Ciccarello}},
\bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont {Palma}}, \ and\
\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Vacchini}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Quantum non-{Markovian} piecewise
dynamics from collision models},}\ }\href {\doibase
10.1142/S123016121740011X} {\bibfield {journal} {\bibinfo {journal} {Open
Syst. Inf. Dyn}\ }\textbf {\bibinfo {volume} {24}},\ \bibinfo {pages}
{1740011} (\bibinfo {year} {2017}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Filippov}\ \emph {et~al.}(2017)\citenamefont
{Filippov}, \citenamefont {Piilo}, \citenamefont {Maniscalco},\ and\
\citenamefont {Ziman}}]{colli18}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~N.}\ \bibnamefont
{Filippov}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Piilo}},
\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Maniscalco}}, \ and\
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Ziman}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Divisibility of quantum dynamical maps
and collision models},}\ }\href {\doibase 10.1103/PhysRevA.96.032111}
{\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo
{volume} {96}},\ \bibinfo {pages} {032111} (\bibinfo {year}
{2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mccloskey}\ and\ \citenamefont
{Paternostro}(2014)}]{colli19}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Mccloskey}}\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Paternostro}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Non-{Markovianity} and system-environment correlations in a microscopic
collision model},}\ }\href {\doibase 10.1103/PhysRevA.89.052120} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{89}},\ \bibinfo {pages} {052120} (\bibinfo {year} {2014})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Bernardes}\ \emph {et~al.}(2014)\citenamefont
{Bernardes}, \citenamefont {Carvalho}, \citenamefont {Monken},\ and\
\citenamefont {Santos}}]{colli20}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {N.~K.}\ \bibnamefont
{Bernardes}}, \bibinfo {author} {\bibfnamefont {A.~R.~R.}\ \bibnamefont
{Carvalho}}, \bibinfo {author} {\bibfnamefont {C.~H.}\ \bibnamefont
{Monken}}, \ and\ \bibinfo {author} {\bibfnamefont {M.~F.}\ \bibnamefont
{Santos}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Environmental
correlations and {Markovian} to non-{Markovian} transitions in collisional
models},}\ }\href {\doibase 10.1103/PhysRevA.90.032111} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {90}},\
\bibinfo {pages} {032111} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Giovannetti}\ and\ \citenamefont
{Palma}(2012)}]{colli21}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont
{Giovannetti}}\ and\ \bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont
{Palma}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Master equations
for correlated quantum channels},}\ }\href {\doibase
10.1103/PhysRevLett.108.040401} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {108}},\ \bibinfo {pages}
{040401} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Jin}\ and\ \citenamefont {Yu}(2017)}]{colli22}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Jin}}\ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Yu}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Non-{Markovianity} in a
collision model with environmental block},}\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {arXiv [quant-ph]: 1710.07142v1}\ } (\bibinfo
{year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Ciccarello}(2017)}]{cicc2017}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Ciccarello}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Collision
models in quantum optics},}\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Quantum Measurements and Quantum Metrology}\ }\textbf {\bibinfo
{volume} {4}},\ \bibinfo {pages} {53} (\bibinfo {year} {2017})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Weiss}(2008)}]{Weiss}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {U.}~\bibnamefont
{Weiss}},\ }\href@noop {} {\emph {\bibinfo {title} {Quantum Dissipative
Systems, Series in Modern Condensed Matter Systems}}}\ (\bibinfo {publisher}
{Scientific, Singapore},\ \bibinfo {year} {2008})\BibitemShut {NoStop}
\bibitem [{\citenamefont {Haikka}\ \emph {et~al.}(2013)\citenamefont {Haikka},
\citenamefont {Johnson},\ and\ \citenamefont {Maniscalco}}]{NMT1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Haikka}}, \bibinfo {author} {\bibfnamefont {T.~H.}\ \bibnamefont {Johnson}},
\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Maniscalco}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Non-{Markovianity} of local
dephasing channels and time-invariant discord},}\ }\href {\doibase
10.1103/PhysRevA.87.010103} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {87}},\ \bibinfo {pages} {010103}
(\bibinfo {year} {2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Z.D.}\ \emph {et~al.}(2014)\citenamefont {Z.D.},
\citenamefont {Y.X.},\ and\ \citenamefont {Y.Q.}}]{NMT2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Hu}~\bibnamefont
{Z.D.}}, \bibinfo {author} {\bibfnamefont {Zhang}\ \bibnamefont {Y.X.}}, \
and\ \bibinfo {author} {\bibfnamefont {Zhang}\ \bibnamefont {Y.Q.}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Negativity of quantumness
and non-{Markovianity} in a qubit coupled to a thermal ising spin bath
system},}\ }\href {http://stacks.iop.org/0253-6102/62/i=5/a=01} {\bibfield
{journal} {\bibinfo {journal} {Comm. Theo. Phys.}\ }\textbf {\bibinfo
{volume} {62}},\ \bibinfo {pages} {634} (\bibinfo {year} {2014})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Liu}\ \emph {et~al.}(2016)\citenamefont {Liu},
\citenamefont {Hu}, \citenamefont {Huang}, \citenamefont {Li}, \citenamefont
{Guo}, \citenamefont {Karlsson}, \citenamefont {Laine}, \citenamefont
{Maniscalco}, \citenamefont {Macchiavello},\ and\ \citenamefont
{Piilo}}]{NMT3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.~H.}\ \bibnamefont
{Liu}}, \bibinfo {author} {\bibfnamefont {X.~M.}\ \bibnamefont {Hu}},
\bibinfo {author} {\bibfnamefont {Y.~F.}\ \bibnamefont {Huang}}, \bibinfo
{author} {\bibfnamefont {C.~F.}\ \bibnamefont {Li}}, \bibinfo {author}
{\bibfnamefont {G.~C.}\ \bibnamefont {Guo}}, \bibinfo {author} {\bibfnamefont
{A.}~\bibnamefont {Karlsson}}, \bibinfo {author} {\bibfnamefont {E.~M.}\
\bibnamefont {Laine}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Maniscalco}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont
{Macchiavello}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Piilo}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Efficient
superdense coding in the presence of non-{Markovian} noise},}\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {EPL}\ }\textbf {\bibinfo {volume}
{114}},\ \bibinfo {pages} {10005} (\bibinfo {year} {2016})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Vasile}\ \emph {et~al.}(2014)\citenamefont {Vasile},
\citenamefont {Galve},\ and\ \citenamefont {Zambrini}}]{NMT4}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Vasile}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Galve}}, \
and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Zambrini}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Spectral origin of
non-{Markovian} open-system dynamics: A finite harmonic model without
approximations},}\ }\href {\doibase 10.1103/PhysRevA.89.022109} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{89}},\ \bibinfo {pages} {022109} (\bibinfo {year} {2014})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Clos}\ and\ \citenamefont {Breuer}(2012)}]{ClosPRA}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Clos}}\ and\ \bibinfo {author} {\bibfnamefont {H.-P.}\ \bibnamefont
{Breuer}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Quantification
of memory effects in the spin-boson model},}\ }\href {\doibase
10.1103/PhysRevA.86.012115} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. A}\ }\textbf {\bibinfo {volume} {86}},\ \bibinfo {pages} {012115}
(\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Chen}\ \emph {et~al.}(2015)\citenamefont {Chen},
\citenamefont {Lambert}, \citenamefont {Cheng}, \citenamefont {Chen},\ and\
\citenamefont {Nori}}]{NMT5}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~B.}\ \bibnamefont
{Chen}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Lambert}},
\bibinfo {author} {\bibfnamefont {Y.~C.}\ \bibnamefont {Cheng}}, \bibinfo
{author} {\bibfnamefont {Y.~N.}\ \bibnamefont {Chen}}, \ and\ \bibinfo
{author} {\bibfnamefont {F}~\bibnamefont {Nori}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Using non-{Markovian} measures to evaluate
quantum master equations for photosynthesis},}\ }\href {\doibase
10.1038/srep12753} {\bibfield {journal} {\bibinfo {journal} {Sci Rep}\
}\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages} {12753} (\bibinfo {year}
{2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Wi\ss{}mann}\ \emph {et~al.}(2012)\citenamefont
{Wi\ss{}mann}, \citenamefont {Karlsson}, \citenamefont {Laine}, \citenamefont
{Piilo},\ and\ \citenamefont {Breuer}}]{optimal}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Wi\ss{}mann}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Karlsson}}, \bibinfo {author} {\bibfnamefont {E.-M.}\ \bibnamefont {Laine}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Piilo}}, \ and\ \bibinfo
{author} {\bibfnamefont {H.-P.}\ \bibnamefont {Breuer}},\ }\bibfield {title}
{\enquote {\bibinfo {title} {Optimal state pairs for non-{Markovian} quantum
dynamics},}\ }\href {\doibase 10.1103/PhysRevA.86.062108} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{86}},\ \bibinfo {pages} {062108} (\bibinfo {year} {2012})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Baumgratz}\ \emph {et~al.}(2014)\citenamefont
{Baumgratz}, \citenamefont {Cramer},\ and\ \citenamefont
{Plenio}}]{baumgratzPRL}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Baumgratz}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Cramer}}, \
and\ \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Quantifying coherence},}\
}\href {\doibase 10.1103/PhysRevLett.113.140401} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {113}},\
\bibinfo {pages} {140401} (\bibinfo {year} {2014})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Streltsov}\ \emph {et~al.}(2017)\citenamefont
{Streltsov}, \citenamefont {Adesso},\ and\ \citenamefont
{Plenio}}]{streltsovRMP}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Streltsov}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Adesso}}, \
and\ \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Colloquium: Quantum
coherence as a resource},}\ }\href {\doibase 10.1103/RevModPhys.89.041003}
{\bibfield {journal} {\bibinfo {journal} {Rev. Mod. Phys.}\ }\textbf
{\bibinfo {volume} {89}},\ \bibinfo {pages} {041003} (\bibinfo {year}
{2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Mazzola}\ \emph {et~al.}(2009)\citenamefont
{Mazzola}, \citenamefont {Maniscalco}, \citenamefont {Piilo}, \citenamefont
{Suominen},\ and\ \citenamefont {Garraway}}]{laurapseudo}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Mazzola}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Maniscalco}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Piilo}}, \bibinfo
{author} {\bibfnamefont {K.-A.}\ \bibnamefont {Suominen}}, \ and\ \bibinfo
{author} {\bibfnamefont {B.~M.}\ \bibnamefont {Garraway}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Pseudomodes as an effective description
of memory: Non-{Markovian} dynamics of two-state systems in structured
reservoirs},}\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. A}\ }\textbf {\bibinfo {volume} {80}},\ \bibinfo {pages} {012104}
(\bibinfo {year} {2009})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Man}\ \emph {et~al.}(2015{\natexlab{b}})\citenamefont
{Man}, \citenamefont {Xia},\ and\ \citenamefont {{Lo
Franco}}}]{ManSciRep2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Z.~X.}\ \bibnamefont
{Man}}, \bibinfo {author} {\bibfnamefont {Y.~J.}\ \bibnamefont {Xia}}, \ and\
\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {{Lo Franco}}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Cavity-based architecture to
preserve quantum coherence and entanglement},}\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume}
{5}},\ \bibinfo {pages} {13843} (\bibinfo {year}
{2015}{\natexlab{b}})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Preskill}()}]{NISQ}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Preskill}},\ }\href@noop {} {\enquote {\bibinfo {title} {Quantum computing
in the {NISQ} era and beyond},}\ }\bibinfo {howpublished} {arXiv:1801.00862
[quant-ph]}\BibitemShut {NoStop}
\end{thebibliography}
\end{document}
|
\begin{equation}gin{document}
\title{Using continuous measurement to protect a universal set of quantum gates within a perturbed decoherence-free subspace}
\author{Paulo E. M. F. Mendon\c{c}a}
\affiliation{School of Physical Sciences, The University of Queensland, \\
Queensland 4072, Australia \\
E-mail address: [email protected]}
\author{Marcelo A. Marchiolli, Reginaldo d. J. Napolitano}
\affiliation{Instituto de F\'{\i}sica de S\~{a}o Carlos, Universidade de S\~{a}o Paulo, \\
Caixa Postal 369, 13560-970, S\~{a}o Carlos, SP, Brazil \\
E-mail address: marcelo$\[email protected] and [email protected]}
\date{\today}
\begin{equation}gin{abstract}
\vspace*{0.1mm}
\begin{equation}gin{center}
\rule[0.1in]{142mm}{0.4mm}
\end{center}
We consider a universal set of quantum gates encoded within a perturbed decoherence-free subspace of four physical qubits. Using
second-order perturbation theory and a measuring device modeled by an infinite set of harmonic oscillators, simply coupled to the
system, we show that continuous observation of the coupling agent induces inhibition of the decoherence due to spurious perturbations.
We thus advance the idea of protecting or even creating a decoherence-free subspace for processing quantum information. \\
\vspace*{0.1mm}
\begin{equation}gin{center}
\rule[0.1in]{142mm}{0.4mm}
\end{center}
\end{abstract}
\maketitle
\section{Introduction}
The reality of entanglement and state superposition inherent to quantum mechanics has opened up astounding possibilities. In particular,
some problems whose known classical solutions require exponential-time algorithms can now, in principle, be solved in polynomial time,
if use is made of these quantum resources \cite{i1,i2,i3}. Unfortunately, the implementation of such an efficient algorithm encounters
an almost unsurmountable obstacle: the degrading and ubiquitous decoherence due to the unavoidable coupling with the environment
\cite{i4,i5}. However, if the agent coupling the quantum computer to its environment is degenerate, any quantum information processed
within the corresponding degenerate subspace does not decohere \cite{i6,i7,i8,i9,i10,i11,i12,i13,i14}. The recent experimental
verification and investigation of decoherence-free subspaces (DFS) \cite{i15,i16} have continuously increased their potential
application in quantum information processing \cite{i17,i18,i19,i20}. In practice, these DFS's are only approximate, because all
observables of a quantum system are ultimately coupling agents to the surroundings, some of these being incompatible with a particular
degenerate one that gives rise to a DFS. In this sense, any proposal intended to further protect such an approximate DFS is relevant to
the effort to make quantum computing a realistic endeavor.
In any physical implementation of a quantum computer, the basic unit of information processed is the qubit, as the bit is the elementary
information processed by a classical computer. Whatever the representation of a qubit in a quantum computer, it should be operated
through unitary transformations, including the identity. Since the subspace spanned by a qubit is two dimensional, the manipulation of a
qubit must be represented by ${\rm SU(2)}$ transformations. The information processed by a quantum computer is, therefore, represented by
states of a tensor product of two-dimensional qubit subspaces. Universal quantum computation requires a set of quantum gates represented
by unitary transformations of two qubits at least \cite{i12,i21,i22}.
An $N$-dimensional decoherence-free subspace, ${\cal H}_{\mbox{\tiny $N$}}$, is possible only when the agent coupling it to its surroundings is
an observable degenerate on ${\cal H}_{\mbox{\tiny $N$}}$ \cite{i9}. If one considers the implementation of a minimal set of universal quantum
gates operating on two qubits only, then there is no nontrivial degenerate observable available to establish a four-dimensional
decoherence-free subspace. Without loss of generality, in a system of four qubits (e.g., four $1/2$ spins), the nontrivial choice of
${\bf J}_{z}$ as the coupling observable allows us to construct two distinct four-dimensional decoherence-free subspaces associated with
the eigenvalues of ${\bf J}_{z}$ equal to $M_{z} = \pm \hbar$. Here we consider the four-dimensional subspace with $M_{z}=+\hbar$,
within which we assume that gate operations are described by the usual spin-boson Hamiltonian:
\begin{equation}
\label{e1}
{\bf H}_{0}(t) = - \frac{\hbar}{2} \sum_{n=1}^{4} \left[ \mathbb{B}^{(n)}(t) \right]^{\dagger} \mbox{\boldmath $\Sigma$}^{(n)} + \frac{\hbar^{2}}{4}
\sum_{n,m=1}^{4} \left[ \mbox{\boldmath $\Sigma$}^{(m)} \right]^{\dagger} \mathbb{G}^{(mn)}(t) \, \mbox{\boldmath $\Sigma$}^{(n)}
\end{equation}
where
\begin{displaymath}
\mbox{\boldmath $\Sigma$}^{(n)} = \left[ \begin{equation}gin{array}{c} \mbox{\boldmath $\sigma$}_{x}^{(n)} \\ \mbox{\boldmath $\sigma$}_{y}^{(n)} \\ \mbox{\boldmath $\sigma$}_{z}^{(n)} \end{array} \right] \qquad
\mathbb{B}^{(n)}(t) = \left[ \begin{equation}gin{array}{c} B_{x}^{(n)}(t) \\ B_{y}^{(n)}(t) \\ B_{z}^{(n)}(t) \end{array} \right] \qquad \mbox{and} \qquad
\mathbb{G}^{(mn)}(t) = \left[ \begin{equation}gin{array}{ccc} G_{xx}^{(mn)}(t) & G_{xy}^{(mn)}(t) & G_{xz}^{(mn)}(t) \\
G_{yx}^{(mn)}(t) & G_{yy}^{(mn)}(t) & G_{yz}^{(mn)}(t) \\
G_{zx}^{(mn)}(t) & G_{zy}^{(mn)}(t) & G_{zz}^{(mn)}(t) \end{array} \right] \; .
\end{displaymath}
The operators $\mbox{\boldmath $\sigma$}_{j}^{(n)}$ with $j=x,y,z$ correspond to Pauli matrices for spin $n$, obeying the well-known ${\rm su}(2)$ commutation
relations. Furthermore, the real $(3 \times 1)$-matrix $\mathbb{B}^{(n)}(t)$ is associated with an external field acting locally on
qubit $n$, and the real $(3 \times 3)$-matrix $\mathbb{G}^{(mn)}(t)$ represents externally-controlled interactions between qubits $m$
and $n$.
Decoherence is universal for real systems \cite{i23}. Essentially all the observables of a system are coupled to the environment,
rendering decoherence unavoidable. However, it has been proposed that continuous measurement of an observable protects (through the
quantum Zeno effect) states defined in the subspace associated to a degenerate eigenvalue of that observable \cite{i14}. In the present
work we show, using the simple system above as a paradigm, that a strong enough coupling of ${\bf J}_{z}$ to the environment effectively
creates ${\cal H}_{4}$. In other words, provided the initial states are chosen within $M_{z} = + \hbar$ and ${\bf H}_{0}(t)$ commutes
with ${\bf J}_{z}$, we show that the environment effects induced by couplings to system observables incompatible with ${\bf J}_{z}$ are
immaterial if the frequency of measurement of ${\bf J}_{z}$ is high enough.
This work is organized as follows. Section II describes the construction of a universal set of quantum gates operating within a DFS. In
Section III we introduce a perturbation that degrades the original DFS and establish a scheme to inhibit the perturbing effects through
continuous measurement. Finally, the conclusion is presented in Section IV.
\section{Quantum-gate construction in a decoherence-free subspace}
Universal quantum computation can be achieved by using any single member of the infinite class of universal sets of quantum gates. Any
gate capable of entangling two qubits, together with a minimal set of one-qubit gates form such a universal set \cite{i12,i21,i22}. For
our purposes, we choose the traditional set composed by the two-qubit controlled-not (CNOT) gate, the Hadamard gate, and the $\pi /8$
gate \cite{i24}.
To obtain the effective Hamiltonians for the mentioned universal set of quantum gates, we start from imposing that the general spin-boson
Hamiltonian ${\bf H}_{0}(t)$ commute with ${\bf J}_{z}$. After some algebra we obtain
\begin{eqnarray}
\label{s1}
{\bf H}_{0}(t) &=& - \frac{\hbar}{2} \sum_{n=1}^{4} B_{z}^{(n)}(t) \, \mbox{\boldmath $\sigma$}_{z}^{(n)} + \frac{\hbar^{2}}{4} \sum_{n=1}^{3}
\sum_{m=n+1}^{4} \left[ G_{zz}^{(mn)}(t) \, \mbox{\boldmath $\sigma$}_{z}^{(m)} \mbox{\boldmath $\sigma$}_{z}^{(n)} + G_{xx}^{(mn)} (t) ( \mbox{\boldmath $\sigma$}_{x}^{(m)} \mbox{\boldmath $\sigma$}_{x}^{(n)} +
\mbox{\boldmath $\sigma$}_{y}^{(m)} \mbox{\boldmath $\sigma$}_{y}^{(n)} ) \right. \nonumber \\
& & + \left. G_{xy}^{(mn)}(t) ( \mbox{\boldmath $\sigma$}_{x}^{(m)} \mbox{\boldmath $\sigma$}_{y}^{(n)} - \mbox{\boldmath $\sigma$}_{y}^{(m)} \mbox{\boldmath $\sigma$}_{x}^{(n)} ) \right]
\end{eqnarray}
where the independent coefficients $B_{z}^{(n)}(t)$, $G_{zz}^{(mn)}(t)$, $G_{xx}^{(mn)}(t)$, and $G_{xy}^{(mn)}(t)$ are arbitrary real
functions of time to ensure that ${\bf H}_{0}(t)$ be Hermitian. Below we show, by explicit construction, that the restriction of this
Hamiltonian to the four-dimensional subspace spanned by the eigenstates of ${\bf J}_{z}$ with eigenvalue $M_{z} = + \hbar$,
${\cal H}_{4}$, is sufficiently general to form all the possible four-by-four Hermitian matrices. In this way, we equivalently prove
that universal quantum computation is possible within the decoherence-free subspace (DFS), ${\cal H}_{4}$, of our four-qubit model,
since the set of all Hermitian matrices produces the set of all unitary matrices through the exponential operation.
It is straightforward to show that the following choices of independent parameters form a complete set of Hermitian matrices within
${\cal H}_{4}$:
\begin{equation}
\label{s2}
{\bf H}_{\mbox{\tiny ${\rm CNOT}$}} = \frac{\pi \hbar}{4 \tau} \, ( \mbox{\boldmath $\sigma$}_{z}^{(3)} + \mbox{\boldmath $\sigma$}_{z}^{(4)} - \mbox{\boldmath $\sigma$}_{x}^{(1)} \mbox{\boldmath $\sigma$}_{x}^{(2)} - \mbox{\boldmath $\sigma$}_{y}^{(1)}
\mbox{\boldmath $\sigma$}_{y}^{(2)} )
\end{equation}
for the controlled-not gate,
\begin{equation}
\label{s3}
{\bf H}_{\mbox{\tiny ${\rm T}_{1}$}} = - \frac{\pi \hbar}{8 \tau } \, ( \mbox{\boldmath $\sigma$}_{z}^{(3)} + \mbox{\boldmath $\sigma$}_{z}^{(4)} )
\end{equation}
for the $\pi /8$ gate for the first qubit,
\begin{equation}
\label{s4}
{\bf H}_{\mbox{\tiny ${\rm T}_{2}$}} = - \frac{\pi \hbar}{8 \tau} \, ( \mbox{\boldmath $\sigma$}_{z}^{(2)} + \mbox{\boldmath $\sigma$}_{z}^{(4)} )
\end{equation}
for the $\pi /8$ gate for the second qubit,
\begin{eqnarray}
\label{s5}
{\bf H}_{\mbox{\tiny ${\rm H}_{1}$}} &=& \frac{\pi \hbar}{8 \tau} \left[ (2 - \sqrt{2}) ( \mbox{\boldmath $\sigma$}_{z}^{(1)} + \mbox{\boldmath $\sigma$}_{z}^{(2)} ) + (2 + \sqrt{2}) ( \mbox{\boldmath $\sigma$}_{z}^{(3)}
+ \mbox{\boldmath $\sigma$}_{z}^{(4)} ) \right. \nonumber \\
& & - \left. \sqrt{2} \, ( \mbox{\boldmath $\sigma$}_{x}^{(1)} \mbox{\boldmath $\sigma$}_{x}^{(3)} + \mbox{\boldmath $\sigma$}_{y}^{(1)} \mbox{\boldmath $\sigma$}_{y}^{(3)} + \mbox{\boldmath $\sigma$}_{x}^{(2)} \mbox{\boldmath $\sigma$}_{x}^{(4)} + \mbox{\boldmath $\sigma$}_{y}^{(2)}
\mbox{\boldmath $\sigma$}_{y}^{(4)} ) \right]
\end{eqnarray}
for the Hadamard gate for the first qubit, and
\begin{eqnarray}
\label{s6}
{\bf H}_{\mbox{\tiny ${\rm H}_{2}$}} &=& \frac{\pi \hbar}{8 \tau} \left[ (2 - \sqrt{2}) ( \mbox{\boldmath $\sigma$}_{z}^{(1)} + \mbox{\boldmath $\sigma$}_{z}^{(3)} ) + (2 + \sqrt{2}) ( \mbox{\boldmath $\sigma$}_{z}^{(2)}
+ \mbox{\boldmath $\sigma$}_{z}^{(4)} ) \right. \nonumber \\
& & - \left. \sqrt{2} \, ( \mbox{\boldmath $\sigma$}_{x}^{(1)} \mbox{\boldmath $\sigma$}_{x}^{(2)} + \mbox{\boldmath $\sigma$}_{y}^{(1)} \mbox{\boldmath $\sigma$}_{y}^{(2)} + \mbox{\boldmath $\sigma$}_{x}^{(3)} \mbox{\boldmath $\sigma$}_{x}^{(4)} + \mbox{\boldmath $\sigma$}_{y}^{(3)}
\mbox{\boldmath $\sigma$}_{y}^{(4)} ) \right]
\end{eqnarray}
for the Hadamard gate for the second qubit, where $\tau $ is a positive and real constant with dimension of time. We note that
${\bf H}_{\mbox{\tiny ${\rm T}_{2}$}}$ and ${\bf H}_{\mbox{\tiny ${\rm H}_{2}$}}$ are related with ${\bf H}_{\mbox{\tiny ${\rm T}_{1}$}}$ and ${\bf H}_{\mbox{\tiny ${\rm H}_{1}$}}$ through the interchange of the
superscripts $2$ and $3$. These Hamiltonians are just one possible set chosen, since the abundance of independent parameters renders the
associated linear system indeterminate. For this particular set of independent Hamiltonians to work according to conventional two-qubit
quantum computation, we define two abstract qubits spanning ${\cal H}_{4}$ according to the mapping:
\begin{eqnarray}
| 0,0 \rangle & \equiv & | \uparrow, \uparrow, \uparrow, \downarrow \rangle \nonumber \\
| 0,1 \rangle & \equiv & | \uparrow, \uparrow, \downarrow, \uparrow \rangle \nonumber \\
| 1,0 \rangle & \equiv & | \uparrow, \downarrow, \uparrow, \uparrow \rangle \nonumber \\
| 1,1 \rangle & \equiv & | \downarrow, \uparrow, \uparrow, \uparrow \rangle. \nonumber
\end{eqnarray}
Now, by multiplying each of the above time-independent Hamiltonians by $- {\rm i} \tau / \hbar$ and exponentiating, we obtain, in matrix
format with respect to the ordered subspace basis $( | \uparrow, \uparrow, \uparrow, \downarrow \rangle, | \uparrow, \uparrow,
\downarrow, \uparrow \rangle, | \uparrow, \downarrow, \uparrow, \uparrow \rangle, | \downarrow, \uparrow, \uparrow, \uparrow \rangle )$:
\begin{equation}
\label{s7}
\begin{equation}gin{array}{ccc}
{\bf U}_{\mbox{\tiny ${\rm CNOT}$}} = \exp (- {\rm i} \tau {\bf H}_{\mbox{\tiny ${\rm CNOT}$}} / \hbar ) = \left[ \begin{equation}gin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0 \end{array} \right]
\end{array}
\end{equation}
for the controlled-not operation,
\begin{equation}
\label{s8}
\begin{equation}gin{array}{cc}
{\bf U}_{\mbox{\tiny ${\rm T}_{1}$}} = \exp (- {\rm i} \tau {\bf H}_{\mbox{\tiny ${\rm T}_{1}$}} / \hbar ) = \left[ \begin{equation}gin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & \exp \left( \frac{{\rm i} \pi}{4} \right) & 0 \\
0 & 0 & 0 & \exp \left( \frac{{\rm i} \pi}{4} \right) \end{array} \right]
\end{array}
\end{equation}
for the $\pi /8$ operation on the first abstract qubit,
\begin{equation}
\label{s9}
\begin{equation}gin{array}{cc}
{\bf U}_{\mbox{\tiny ${\rm T}_{2}$}} = \exp (- {\rm i} \tau {\bf H}_{\mbox{\tiny ${\rm T}_{2}$}} / \hbar ) = \left[ \begin{equation}gin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & \exp \left( \frac{{\rm i} \pi}{4} \right) & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & \exp \left( \frac{{\rm i} \pi}{4} \right) \end{array} \right]
\end{array}
\end{equation}
for the $\pi /8$ operation on the second abstract qubit,
\begin{equation}
\label{s10}
\begin{equation}gin{array}{cc}
{\bf U}_{\mbox{\tiny ${\rm H}_{1}$}} = \exp (- {\rm i} \tau {\bf H}_{\mbox{\tiny ${\rm H}_{1}$}} / \hbar ) = \dfrac{1}{\sqrt{2}} \left[ \begin{equation}gin{array}{cccc}
1 & 0 & 1 & 0 \\
0 & 1 & 0 & 1 \\
1 & 0 & -1 & 0 \\
0 & 1 & 0 & -1 \end{array} \right]
\end{array}
\end{equation}
for the Hadamard operation on the first abstract qubit, and
\begin{equation}
\label{s11}
\begin{equation}gin{array}{cc}
{\bf U}_{\mbox{\tiny ${\rm H}_{2}$}} = \exp (- {\rm i} \tau {\bf H}_{\mbox{\tiny ${\rm H}_{2}$}} / \hbar ) = \dfrac{1}{\sqrt{2}} \left[ \begin{equation}gin{array}{cccc}
1 & 1 & 0 & 0 \\
1 & -1 & 0 & 0 \\
0 & 0 & 1 & 1 \\
0 & 0 & 1 & -1 \end{array} \right]
\end{array}
\end{equation}
for the Hadamard operation on the second abstract qubit. As mentioned above, these unitary transformations form a possible set of
operations suitable for universal computing.
Inspecting the right-hand sides of Eqs. (\mbox{${\rm Re}$}f{s2})-(\mbox{${\rm Re}$}f{s6}), we notice that the construction of the above model of universal
computation, within a decoherence-free subspace, does not require interactions represented by the tensor product of two spin-$1/2$
operators of different directions. We believe that being able to choose all the $G_{xy}^{(mn)}(t)$ equal to zero, and yet obtain
universal computing, might be relevant in the context of realistic implementation. In this section we have established, therefore, one
of the simplest protections against decoherence for a complete set of quantum-computation gates. In the next section we investigate how
robust this protection can be, by introducing an additional perturbing term to the Hamiltonian of a general quantum computation being
processed within the DFS.
\section{Measurement-induced inhibition of decoherence within a perturbed decoherence-free subspace}
A two-qubit quantum computation can be described as a sequence of quantum-gate operations on an input density matrix $\mbox{\boldmath $\rho$}(0) = | \varphi
(0) \rangle \langle \varphi(0) |$, where $| \varphi(0) \rangle $ is the ket specifying the initial state of the two abstract qubits. If
the input state belongs to ${\cal H}_{4}$, then any computation involving the two abstract qubits can be performed by a sequence of the
universal operations described by (\mbox{${\rm Re}$}f{s2}) to (\mbox{${\rm Re}$}f{s6}), or, in a condensed form, by (\mbox{${\rm Re}$}f{s1}), where the time-dependent
coefficients vary as functions of time according to the specific computation, reproducing the required gate sequence.
We model the coupling with the environment by the product of ${\bf J}_{z}$ and observables of infinitely many harmonic oscillators representing the environment. By construction, the system Hamiltonian, at all times, satisfies
$[ {\bf H}_{0}(t),{\bf J}_{z} ] = 0$, since the computation is designed to occur entirely in ${\cal H}_{4}$. Now, it is obvious that any
perturbation added to ${\bf H}_{0}$, even if it acts only on the system, that couples ${\cal H}_{4}$ to its complement in the original
four-qubit Hilbert space, triggers the process of decoherence. Therefore, to simulate the possibility of this degradation of the
computation, it suffices to study the dynamics of the following Hamiltonian:
\begin{equation}
\label{t1}
{\bf H}(t) = {\bf H}_{0}(t) + {\bf H}_{\mbox{\tiny ${\rm E}$}} + \lambda {\bf J}_{z} \sum_{k} g_{k} ({\bf a}_{k} + {\bf a}_{k}^{\dagger}) + \epsilon
{\bf J}_{x}
\end{equation}
where $g_{k}$ is a coupling constant to the $k$th environmental degree of freedom, $\lambda$ and $\epsilon$ are positive real constants
satisfying $\lambda \gg \epsilon$, and the environment Hamiltonian is given by
\begin{equation}
\label{t2}
{\bf H}_{\mbox{\tiny ${\rm E}$}} = \sum_{k} \hbar \omega_{k} \, {\bf a}_{k}^{\dagger} {\bf a}_{k}
\end{equation}
where ${\bf a}_{k}$ and ${\bf a}_{k}^{\dagger}$ are the annihilation and creation operators, respectively, of the $k$th quantum harmonic
oscillator, of frequency $\omega_{k}$, representing one of the environmental degrees of freedom. We remark that here the meaning of the term ``environment" also possibly includes a measuring apparatus.
Next we show that, as $\lambda$ increases as compared to $\epsilon$, the degradation of the computation dynamics due to decoherence decreases
substantially, reaching a regime in which it can be safely neglected. Our interpretation of this fact parallels the ideas of \cite{i14}:
``the strong coupling to the environment functions as a continuous measurement of whether the perturbation takes the system state out of
${\cal H}_{4}$, protecting the dynamics against the perturbation." We are, therefore, benefiting from the quantum Zeno effect to induce
the inhibition of the decoherence process.
For convenience, let us define the unitary time-evolution operator
\begin{equation}
\label{t3}
\mbox{\boldmath ${\cal U}$}(t) \equiv \exp \left[ - \frac{{\rm i} t}{\hbar} \left( {\bf H}_{\mbox{\tiny ${\rm E}$}} + \lambda {\bf J}_{z} \sum_{k} g_{k}
( {\bf a}_{k} + {\bf a}_{k}^{\dagger} ) \right) \right] \; .
\end{equation}
Using the results of \cite{i25} and ${\bf J}_{\pm} = {\bf J}_{x} \pm {\rm i} {\bf J}_{y}$, we deduce that
\begin{eqnarray}
\label{t4}
\mbox{\boldmath ${\cal U}$}^{\dagger}(t)\, {\bf J}_{x} \, \mbox{\boldmath ${\cal U}$}(t) &=& e^{\mbox{\boldmath {\scriptsize $\gamma$}}(t) {\bf J}_{z}} e^{- {\rm i} \alpha(t) {\bf J}_{z}^{2}} \, {\bf J}_{x} \, e^{{\rm i} \alpha(t)
{\bf J}_{z}^{2}} e^{- \mbox{\boldmath {\scriptsize $\gamma$}}(t) {\bf J}_{z}} \nonumber \\
&=& \frac{e^{{\rm i} \alpha(t)}}{2} \left( e^{[\mbox{\boldmath {\scriptsize $\gamma$}}(t) - 2 {\rm i} \alpha(t) {\bf J}_{z} ]} \, {\bf J}_{+} + e^{- [ \mbox{\boldmath {\scriptsize $\gamma$}}(t)- 2 {\rm i} \alpha(t)
{\bf J}_{z} ]} \, {\bf J}_{-} \right)
\end{eqnarray}
where
\begin{displaymath}
\alpha(t) = \sum_{k} \left( \frac{\lambda g_{k}}{\omega_{k}} \right)^{2} \left[ (\omega_{k} t) - \sin(\omega_{k} t) \right] \qquad \qquad
\mbox{\boldmath $\gamma$}(t) = \sum_{k} [ f_{k}(t) {\bf a}_{k}^{\dagger} - f_{k}^{\ast}(t) {\bf a}_{k} ]
\end{displaymath}
with
\begin{displaymath}
f_{k}(t) = - \frac{\lambda g_{k}}{\omega_{k}} [ 1 - e^{{\rm i} (\omega_{k} t)} ] \; .
\end{displaymath}
Furthermore, let us denote by ${\bf U}_{0}(t)$ the unitary operator that satisfies the evolution equation
\begin{displaymath}
{\rm i} \hbar \, \frac{d{\bf U}_{0}(t)}{dt} = {\bf H}_{0}(t) {\bf U}_{0}(t)
\end{displaymath}
with ${\bf U}_{0}(0) = {\bf 1}$, where ${\bf 1}$ is the identity operator on ${\cal H}_{4}$. Because ${\bf H}_{0}(t)$ commutes with
${\bf J}_{z}$, it follows that ${\bf U}_{0}(t)$ commutes with $\mbox{\boldmath ${\cal U}$}(t)$, and the Hamiltonian for the qubits and its environment in the
interaction picture is written as
\begin{equation}
\label{t5}
\mbox{\boldmath ${\cal H}$}(t) = \epsilon \, {\bf U}_{0}^{\dagger}(t) \left[ \mbox{\boldmath ${\cal U}$}^{\dagger}(t) \, {\bf J}_{x} \, \mbox{\boldmath ${\cal U}$}(t) \right] {\bf U}_{0}(t) \; .
\end{equation}
\begin{equation}gin{figure}[!t]
\centering
\begin{equation}gin{minipage}[b]{0.60\linewidth}
\includegraphics[width=\linewidth]{figure1.EPS}
\end{minipage}
\caption{Fidelity as a function of dimensionless time $\epsilon t$ for different values of $\Lambda$. For this particular illustration, we
take $\nu_{c} = 10^{5}$ and consider the following values of $\Lambda$: 2000 (solid line), 1150 (dashed line), and 800 (dotted line).
In this figure, the gate-operation time is $\tau$, $\epsilon$ being such that $\epsilon \tau = 1$. The inset shows the details of the fidelity
as a function of $\epsilon t$, where we can see that this function never exceeds one preserving the established superior limit, as it
should.}
\end{figure}
The fidelity of the computing process is given by \cite{i24}
\begin{equation}
\label{t6}
{\cal F}(t) = \mbox{${\rm Tr}$}_{\mbox{\tiny ${\rm E}$}} \left[ \langle \varphi (0) | \mbox{\boldmath $\rho$}_{\mbox{\tiny ${\rm I}$}}(t) | \varphi (0) \rangle \right]
\end{equation}
where the trace is taken over the environmental degrees of freedom and $\mbox{\boldmath $\rho$}_{\mbox{\tiny ${\rm I}$}}(t)$ is the density matrix of the qubits and its
environment in the interaction picture, i.e., $\mbox{\boldmath $\rho$} _{\mbox{\tiny ${\rm I}$}}(t) = | \Psi_{\mbox{\tiny ${\rm I}$}}(t) \rangle \langle \Psi_{\mbox{\tiny ${\rm I}$}}(t) |$, with
$| \Psi_{\mbox{\tiny ${\rm I}$}}(t) \rangle$ satisfying the Schr\"{o}dinger equation
\begin{equation}
\label{t7}
{\rm i} \hbar \, \frac{d | \Psi_{\mbox{\tiny ${\rm I}$}}(t) \rangle}{dt} = \mbox{\boldmath ${\cal H}$}(t) | \Psi_{\mbox{\tiny ${\rm I}$}}(t) \rangle \; .
\end{equation}
Equation (\mbox{${\rm Re}$}f{t7}) can be solved iteratively in the usual approach of perturbation theory and it is easy to show that the first-order
term does not contribute to the fidelity. To obtain the second-order contribution, we need to calculate the quantity $\langle \varphi (0)
| \mbox{\boldmath ${\cal H}$}(t_{1}) \mbox{\boldmath ${\cal H}$}(t_{2}) | \Psi_{\mbox{\tiny ${\rm I}$}}(0) \rangle$. Of course, the qubits are initially prepared in a pure state and $| \Psi_{\mbox{\tiny ${\rm I}$}}
(0) \rangle$ is initially factored so that $| \Psi_{\mbox{\tiny ${\rm I}$}}(0) \rangle = | \varphi (0) \rangle \otimes | {\rm E} \rangle$, where
$| {\rm E} \rangle $ is the initial state of the environment. Hence, to evaluate the second-order perturbation contribution to the
fidelity we need to calculate the quantity $\langle \Phi (t_{1}) | \Phi (t_{2}) \rangle$, where
\begin{displaymath}
| \Phi(t) \rangle = \mbox{\boldmath ${\cal H}$}(t) | \varphi (0) \rangle \; .
\end{displaymath}
Now, let us define the auxiliary quantities $| \chi _{\pm}(t) \rangle \equiv {\bf U}_{0}^{\dagger}(t) \, {\bf J}_{\pm} \, {\bf U}_{0}(t)
| \varphi (0) \rangle$. Because $| \varphi (0) \rangle$ belongs to ${\cal H}_{4}$, it follows that ${\bf J}_{z} |\chi _{-}(t)\rangle = 0$
and ${\bf J}_{z} | \chi_{+} (t) \rangle = 2 | \chi_{+}(t) \rangle$. Therefore, it is easy to check that
\begin{eqnarray}
\label{t8}
\langle \Phi (t_{1}) | \Phi (t_{2}) \rangle &=& \Gamma_{++}(t_{1},t_{2}) \, e^{-\mbox{\boldmath {\scriptsize $\gamma$}}(t_{1})} e^{\mbox{\boldmath {\scriptsize $\gamma$}}(t_{2})} +
\Gamma_{+-}(t_{1},t_{2}) \, e^{-\mbox{\boldmath {\scriptsize $\gamma$}}(t_{1})} e^{-\mbox{\boldmath {\scriptsize $\gamma$}}(t_{2})} \nonumber \\
& & + \, \Gamma_{-+}(t_{1},t_{2}) \, e^{\mbox{\boldmath {\scriptsize $\gamma$}}(t_{1})} e^{\mbox{\boldmath {\scriptsize $\gamma$}}(t_{2})} + \Gamma_{--}(t_{1},t_{2}) \, e^{\mbox{\boldmath {\scriptsize $\gamma$}}(t_{1})}
e^{-\mbox{\boldmath {\scriptsize $\gamma$}}(t_{2})}
\end{eqnarray}
where we define the c-number functions as
\begin{eqnarray}
\Gamma_{++}(t_{1},t_{2}) &=& \frac{1}{4} \, e^{3 {\rm i} [ \alpha(t_{1}) - \alpha(t_{2}) ]} \, \langle \chi_{+}(t_{1}) | \chi_{+}(t_{2}) \rangle
\nonumber \\
\Gamma_{+-}(t_{1},t_{2}) &=& \frac{1}{4} \, e^{{\rm i} [ 3 \alpha(t_{1}) + \alpha(t_{2}) ]} \, \langle \chi_{+}(t_{1}) | \chi_{-}(t_{2}) \rangle
\nonumber \\
\Gamma_{-+}(t_{1},t_{2}) &=& \frac{1}{4} \, e^{- {\rm i} [ \alpha(t_{1}) + 3 \alpha(t_{2}) ]} \, \langle \chi_{-}(t_{1}) | \chi_{+}(t_{2})
\rangle \nonumber \\
\Gamma_{--}(t_{1},t_{2}) &=& \frac{1}{4} \, e^{- {\rm i} [ \alpha(t_{1}) - \alpha(t_{2}) ]} \, \langle \chi_{-}(t_{1}) | \chi_{-}(t_{2}) \rangle
\nonumber \; .
\end{eqnarray}
To calculate the contribution of order $\epsilon^{2}$ to the fidelity, the trace over the environmental degrees of freedom requires the
evaluation of expectation values like $\langle {\rm E} | e^{- \mbox{\boldmath {\scriptsize $\gamma$}}(t_{1}) } e^{\mbox{\boldmath {\scriptsize $\gamma$}}(t_{2}) } | {\rm E} \rangle$. For this purpose,
we first observe that $\mbox{\boldmath $\gamma$}(t)$ has a mathematical structure analogous to the argument of the displacement operator ${\bf D}(z_{k}) =
\exp ( z_{k} {\bf a}_{k}^{\dagger} - z_{k}^{\ast} {\bf a}_{k} )$ for a particular $k$-oscillator belonging to the environment. The
second step consists in the expansion of $| {\rm E} \rangle$ in a convenient basis which permits us to include a wide class of
environmental states (e.g., a Fock basis expansion with arbitrary coefficients). Thus, the expectation values can be promptly calculated
and their final result are proportional to $\exp(- \lambda^{2} {\rm F})$, where F is a real function of $t_{1}$ and $t_{2}$ \cite{i26}. It
is important to mention that contributions of higher order than $\epsilon^{2}$ present the same factor in the calculations of
expectation values and consequently similar analysis can be applied, implying that the fidelity tends toward unity as $\lambda$ increases.
To ilustrate these calculations, for computational convenience, we have assumed a continuum-mode approximation with a non-ohmic spectral
density $R(\omega) = (\omega^{2} / 2 \omega_{c}^{3}) e^{- \omega / \omega_{c}}$, cutting off exponentially as $\omega$ gets greater than the cut-off
frequency $\omega_{c}$. Furthermore, for the sake of simplicity, we also take a constant coupling $g_{k} = 1$ for all $k$, and a nontrivial
initial superposition state given by $| \varphi (0) \rangle = (1/ \sqrt{2}) (|1,0 \rangle - | 0,0 \rangle)$ in the abstract basis. The
fidelity (\mbox{${\rm Re}$}f{t6}) for this case is shown in figure 1, where we define the dimensionless parameters $\Lambda = \lambda / \epsilon$ and
$\nu_{c} = \omega_{c} / \epsilon$. We confirm that, in this simple ilustrative example, as $\Lambda$ increases the fidelity function oscillates
tending to unity and this fact corroborates our general model.
\section{Conclusion}
Nowadays, the implementation of quantum gates within decoherence-free subspaces is one of the fundamental strategies in the development
of a realistic quantum-computer technology. The current experimental investigations have only considered existing DFS's, without
attempting to protect the quantum-information processing \cite{i15,i16,i17,i18,i19,i20}. Intending to improve this scenario, our present
proposal advances the idea of inhibiting the degradation of the gate operation within the DFS by continuous measurement. Although we
have illustrated this idea through a simplistic model, within a perturbation-theory context, we strongly believe that our results
are not particular. A non-perturbative generalization of the present approach, independent of the model used to describe the environment
and its coupling to the system, is currently under our scrutiny.
\section*{Acknowledgments}
This work has been supported by Funda\c{c}\~{a}o de Amparo \`{a} Pesquisa do Estado de S\~{a}o Paulo (FAPESP), Brazil, projects $\sharp$
01/11562-2 (PEMFM), $\sharp$ 01/11209-0 (MAM) and $\sharp$ 00/15084-5 (RJN). We also aknowledge supports from the Millennium Institute
for Quantum Information - Conselho Nacional de Desenvolvimento Cient\'{\i}fico e Tecnol\'{o}gico (CNPq), Brazil.
\begin{equation}gin{thebibliography}{99}
\bibitem{i1} Shor P W 1994 {\em Proceedings of the 35th Annual Symposium on the Foundations of Computer Science} edited by S Goldwasser
(IEEE Computer Science, Los Alamitos, CA) p. 124
\bibitem{i2} Shor P W 1997 Polynomial-time algorithms for prime factorization and discrete logarithms on a quantum computer {\em SIAM J.
Comput.} {\bf 26} 1484
\bibitem{i3} Grover L K 1997 Quantum Computers Can Search Arbitrarily Large Databases by a Single Query {\em Phys. Rev. Lett.} {\bf 79} 4709
\bibitem{i4} Shor P W 1995 Scheme for reducing decoherence in quantum computer memory {\em Phys. Rev. A} {\bf 52} R2493
\bibitem{i5} Zurek W H 2003 Decoherence, einselection, and the quantum origins of the classical {\em Rev. Mod. Phys.} {\bf 75} 715
\bibitem{i6} Palma G M, Suominen K A and Ekert A K 1996 Quantum computers and dissipation {\em Proc. R. Soc. London Ser. A} {\bf 452} 567
\bibitem{i7} Duan L M and Guo G C 1997 Preserving coherence in quantum computation by pairing quantum bits {\em Phys. Rev. Lett.} {\bf 79}
1953
\bibitem{i8} Zanardi P and Rasetti M 1997 Noiseless quantum codes {\em Phys. Rev. Lett.} {\bf 79} 3306
\bibitem{i9} Lidar D A, Chuang I L and Whaley K B 1998 Decoherence-Free Subspaces for Quantum Computation {\em Phys. Rev. Lett.} {\bf 81}
2594
\bibitem{i10} Zanardi P 1999 Computation on an error-avoiding quantum code and symmetrization {\em Phys. Rev. A} {\bf 60} R729
\bibitem{i11} Bacon D, Lidar D A and Whaley K B 1999 Robustness of decoherence-free subspaces for quantum computation {\em Phys. Rev. A}
{\bf 60} 1944
\bibitem{i12} Kempe J, Bacon D, Lidar D A and Whaley K B 2001 Theory of decoherence-free fault-tolerant universal quantum computation
{\em Phys. Rev. A} {\bf 63} 042307
\bibitem{i13} Braun D, Braun P A and Haake F 2000 Long-lived quantum coherence between macroscopically distinct states in superradiance
{\em Opt. Commun.} {\bf 179} 411
\bibitem{i14} Beige A, Braun D, Tregenna B and Knight P L 2000 Quantum Computing Using Dissipation to Remain in a Decoherence-Free Subspace
{\em Phys. Rev. Lett.} {\bf 85} 1762
\bibitem{i15} Kwiat P G, Berglund A J, Altepeter J B and White A G 2000 Experimental Verification of Decoherence-Free Subspaces {\em Science}
{\bf 290} 498
\bibitem{i16} Altepeter J B, Hadley P G, Wendelken S M, Berglund A J and Kwiat P G 2004 Experimental Investigation of a Two-Qubit
Decoherence-Free Subspace {\em Phys. Rev. Lett.} {\bf 92} 147901
\bibitem{i17} Kielpinski D, Meyer V, Rowe M A, Sackett C A, Itano W M, Monroe C and Wineland D J 2001 A Decoherence-Free Quantum Memory Using
Trapped Ions {\em Science} {\bf 291} 1013
\bibitem{i18} Mohseni M, Lundeen J S, Resch K J and Steinberg A M 2003 Experimental Application of Decoherence-Free Subspaces in an Optical
Quantum-Computing Algorithm {\em Phys. Rev. Lett.} {\bf 91} 187903
\bibitem{i19} Ollerenshaw J E, Lidar D A and Kay L E 2003 Magnetic Resonance Realization of Decoherence-Free Quantum Computation {\em Phys.
Rev. Lett.} {\bf 91} 217904
\bibitem{i20} Bourennane M, Eibl M, Gaertner S, Kurtsiefer C, Cabello A and Weinfurter H Decoherence-Free Quantum Information Processing
with Four-Photon Entangled States {\em Phys. Rev. Lett.} {\bf 92} 107901
\bibitem{i21} DiVincenzo D P 1995 Two-bit gates are universal for quantum computation {\em Phys. Rev. A} {\bf 51} 1015
\bibitem{i22} Bremner M J, Dawson C M, Dodd J L, Gilchrist A, Harrow A W, Mortimer D, Nielsen M A and Osborne T J 2002 Practical Scheme for
Quantum Computation with Any Two-Qubit Entangling Gate {\em Phys. Rev. Lett.} {\bf 89} 247902
\bibitem{i23} Braun D, Haake F and Strunz W T 2001 Universality of Decoherence {\em Phys. Rev. Lett.} {\bf 86} 2913
\bibitem{i24} Nielsen M A and Chuang I L 2000 {\em Quantum Computation and Quantum Information} (United Kingdom: Cambridge University Press)
\bibitem{i25} Reina J H, Quiroga L and Johnson N F 2002 Decoherence of quantum registers {\em Phys. Rev. A} {\bf 65} 032326
\bibitem{i26} Perelomov A 1986 {\em Generalized Coherent States and Their Applications} (Berlin: Springer-Verlag)
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Quantum Mechanical Inclusion of the Source in the Aharonov-Bohm Effects}
\author{Philip Pearle}
\email{[email protected]}
\affiliation{Emeritus, Department of Physics, Hamilton College, Clinton, NY 13323}
\author{Anthony Rizzi}
\email{[email protected]}
\affiliation{Institute for Advanced Physics, PO Box 15030, Baton Rouge, Louisiana, 70895}
\date{\today}
\pacs{03.65.-w, 03.65.Vf, 03.65.Ta, 03.65.Ud}
\begin{abstract}
Following semiclassical arguments by Vaidman we show, for the first time in a fully quantum mechanical way, that the phase shifts arising in the Aharonov Bohm (A-B) magnetic or electric effects can be treated as due to the electric force of a classical electron, respectively acting on quantized solenoid particles or quantized capacitor plates. This is in contrast to the usual approach which treats both effects as arising from non-field producing potentials acting on the quantized electron. Moreover, we consider the problems of interacting quantized electron and quantized solenoid or quantized capacitor to see what phase shift their joint wave function acquires. We show, in both cases, that the net phase shift is indeed the A-B shift (for, one might have expected twice the A-B shift, given the above two mechanisms for each effect.) The solution to the exact Schrodinger equation may be treated (approximately for the magnetic A-B effect, which we show using a variational approach, exactly for the electric A-B effect) as the product of two solutions of separate Schrodinger equations for each of the two quantized entities, but with an extra phase. The extra phase provides the negative of the A-B shift, while the two separate Schrodinger equations without the extra phase each provide the A-B phase shift, so that the product wave function produces the net A-B phase shift. \end{abstract}
\maketitle
\section{Introduction}
The Aharonov-Bohm (A-B) magnetic effect \cite{AB,FR, AR} predicts relative phase shifts of two electron wave packets moving in alternative paths around the outside of a long cylindrical solenoid. Classically, it appears impossible that there should be a measurable effect in such circumstances, where the electron moves in a region of non-field producing potentials, i.e., in a region where there is a vector and/or scalar potential, but no electric or magnetic fields. That such potentials can produce physical effects is considered by many to be a prime example of the marvelous novelty quantum theory reveals vis-a-vis classical theory.
Recently, Vaidman \cite{Vaidman} argued, using a semi-classical analysis of a model of the solenoid that, under the influence of the electric-field-producing vector potential of the electron treated classically, the solenoid produces a relative phase shift (depending upon the direction of traverse of the electron wave packet) exactly equal to the standard magnetic A-B phase shift.
The importance of the A-B effect suggests that Vaidman's result should be carefully considered in the context of a fully quantum mechanical treatment of the solenoid particles. We do this in this paper and obtain Vaidman's result.
However, given the radical interconnectedness implied by quantum mechanics, it is natural to consider the problem where \textit{both} the electron and solenoid
particles are quantized\cite{PTT}. Does one then get twice the A-B phase shift, the sum of the usual and Vaidman results? The answer is no: we consider that problem here, and find
that, indeed, one gets the usual A-B phase shift. This occurs through a consistent treatment of the interaction between the particles of the solenoid and the electron.
We model the solenoid as consisting of $N$ particles. They, and the electron are considered to be described by well-defined wave packets, and the joint wave function is approximated as the product of such wave functions. Putting this approximate wave function into the variational principle for the Schr\"odinger equation results in $N+1$ Schr\"odinger equations, where
each particle evolves under the vector potential of the other particle's mean positions and momenta, which are the positions and momenta of the same classical problem.
However, there is an extra phase term in each Schr\"odinger equation. By a phase transformation which does not change the overall phase of the wave function,
the extra phase term may be removed from all Schr\"odinger equations but one. We choose to put that extra phase term into the electron's Schr\"odinger equation. That extra phase turns out to be
the time integral of the interaction term in the Hamiltonian, when operators are replaced by their classical counterparts. We then suppose
the $N+1$ particles are involved in an interference experiment, and show that the extra phase
is the negative of the A-B phase shift. The Schr\"odinger equation for the electron (evolving in the classical field of the solenoid), without the extra phase term,
gives the usual A-B phase shift. Therefore, the Schr\"odinger equation for the electron with the extra phase term gives 0 phase shift. The Schr\"odinger equation for the $N$
solenoid particles gives the A-B phase shift. Thus, the net result for the product wave function is the A-B phase shift.
Although we do not bother to consider it here, the extra phase might just as well have been eliminated from the electron's Schr\"odinger equation
and distributed in any way among the $N$ solenoid particles. The result then is that the joint wave function of the solenoid particles, including the extra phases, produces zero
phase shift in the interference experiment. Then, the A-B phase shift is attributable solely to the electron's wave function, as in the usual description.
In a companion paper\cite{PR2}, we point out that consideration of \textit{all} relevant quantum objects in this problem requires quantizing the vector potential as well.
A similar analysis of the joint wave function of electron, solenoid and vector potential, approximated as a product wave function and
governed by the separate Schr\"odinger equations which arise from the variational principle, gives the usual A-B phase shift. As in this paper, depending upon which
Schr\"odinger equation(s) get the extra phase, the A-B shift may be attributed solely to the wave functon of the electron, or solely to the solenoid, or additionally solely to the vector potential, or
to a combination thereof.
Our analysis also shows why the usual phase shift due to the electron, and the phase shift due to the solenoid have to be the same. To complete
the story we spell out the details of the model of the solenoid, show that the amplitude of the interference term is essentially 1 for reasonable values of the model parameters, and do the direct calculation of the A-B phase shift arising from the solenoid.
Next, we turn to show that similar considerations apply in the case of the electric A-B effect.
Vaidman proposed a simplified physical setup wherein the electric potential of two classical charges produces a zero-field environment for the electron and, thereby, an electric A-B phase shift.
Then, he argued for an alternative treatment, where the electron is treated classically and the charges are quantized, and showed that the electric
field of the electron produces the same electric A-B phase shift for the two charges.
We shall consider here, not Vaidman's simplified model, but the standard A-B electric phase shift situation, of an electron passing over or under a charged capacitor. The usual calculation attributes the phase shift to the electron packets moving in
regions of different constant scalar potential created by a classical capacitor. We shall show, as Vaidman did in his quasi-classical analysis for his model, that this same A-B phase shift
is obtained when the electron is treated classically
and the capacitor plates (more precisely, the plate's center of mass coordinates) are treated quantum mechanically, thereby acquiring the A-B phase shift in the electric field of the electron.
Again, these are two alternative, mathematically but not conceptually equivalent, ways to calculate the same thing. So, we consider the situation where \textit{both} the electron and the capacitor plates have quantum dynamics. Similarly to the magnetic A-B effect, we show how the joint phase shift may be attributed solely to the electron's motion, or may be attributed solely to the capacitor plate motion, simply by shifting a term in the Hamiltonian to either the electron part of the Hamiltonian or to the capacitor plate part of the Hamiltonian,
or that other splits may be made.
Thus, for both these classic examples of the A-B effects, there is support for an alternative (espoused by Vaidman) to the usual views of
these effects as due to the electron moving in a non-field producing potential. It is that the effects may, with equal justification be viewed
as due to motion of charged objects due to forces exerted by the electron. However, other considerations than ours may lead one to prefer one point of view over the other.
The plan of this paper is as follows.
In Section II, we calculate the amplitude and phase of a wave packet describing a charged particle with well-localized position and momentum on a one-dimensional pre-determined path
(e.g., moving in a tube with high potential walls), under arbitrary time-varying (but, not space-varying) externally applied electric field-producing vector and scalar potentials, for a time interval $T$. We show that the phase can be completely expressed in terms of the classical motion of the particle (which is the mean motion of the wave packet).
In Section III, we generalize the result of the previous section, to calculate the amplitude and phase associated to a wave packet in the more general case where the
external electromagnetic forces experienced by the particle depend not only upon arbitrarily upon time, but also arbitrarily upon the location of the particle as well.
The approximate solution we develop is for the case in which the particle's mean position follows the classical path.
In Section IV, we first consider two quantized particles interacting via their mutual vector and scalar potentials. As described above, we approximate
the wave function as the product of localized wave packets for each particle. Using the variational principle
for the exact Schr\"odinger equation, we derive Schr\"odinger equations for each particle.
We show the Schr\"odinger equations describe each particle as moving under the potentials of the
other particle treated classically. This allows us to use the results of Section III to calculate the amplitude and phase of each particle. But, in addition,
each Schr\"odinger equation contains an extra phase term. By adding a phase to one particle and subtracting it from the other, so the overall
phase of the wave function is not affected, the extra phase is removed from one particle and belongs solely the other.
It is shown that the extra phase is the time integral of the interaction term in the Hamiltonian,
where all operators have been replaced by the corresponding classical variables: call this $-\Phi$. It is shown that the
Schr\"odinger equation for the particle without the extra phase, and the other particle's Schr\"odinger equation
with the extra phase removed, each produce the same phase, $\Phi$. So, the total phase is $\Phi$. And, in an interference experiment involving the two particles
moving on two different paths, it is the value of $\Phi$ on one path minus the value of $\Phi$ on the other path that provides the total phase shift.
We then extend these results to the case of $N$ particles, each interacting with one ``special" particle we call the ``electron."
Again, the exact wave function is approximated as a product of all $N+1$ particle wave functions.
Schr\"odinger equations are obtained for each particle and each equation has an extra phase term which may be eliminated from the $N$ particles,
leaving the electron wave function with all the extra phase.
The resulting phase situation is as follows. The electron's extra phase term is $-\Phi$, so the electron Schr\"odinger equation
produces 0 phase. Then, the net phase is that of the $N$ particles, which is $\Phi$.
Again, in a considered interference experiment where there are two different paths for all particles, the total net phase shift is
the difference of this phase $\Phi$ for the two paths.
Additionally, for this interference experiment, we obtain an expression for the amplitude of the
interference term, expressed as a function of the $N$ particle classical positions and momenta.
In Section V we present a fully quantum mechanical approach to Vaidman's
semi-classical considerations. In particular, we apply the argument given above to a detailed model
of the solenoid as a collection of $N$ well-localized circulating quantized particles, interacting with the electron. The results of the preceding section immediately apply. The total net phase shift, the difference of the net phase of the joint electron-solenoid wave function for two different paths taken by the electron, is the A-B phase shift.
Also, it is explained why the Vaidman scenario calculation \textit{must} produce
the same phase shift as the usual calculation. For further insight, we give an intuitive time-averaged derivation of our result.
Furthermore, we consider the magnitude of the interference term in this A-B experiment, which experimentally\cite{Ton} is 1. In Vaidman's semi-classical calculation of the magnetic A-B effect, it is necessary for the electron at the end of its traverse to
decelerate back to zero speed and thus cancel the initial impulse that started the solenoid cylinders moving. This is in order that each cylinder's final motion be unchanged from its initial (pre-acceleration) motion and its entanglement with the electron
packets be removed, else there would be no interference\cite{Vaidman}. In our case, there is no need for such a caveat. We show in our model, despite the two different displacements of each solenoid particle associated with the two different electron paths, that the magnitude of the interference term is close to 1 because the displacements are very small
(since each contributes only a very tiny amount to the phase shift.)
This completes our presentation of the A-B magnetic effect, showing how the A-B phase shift with interference amplitude 1 is obtained when considering both the quantized electron and quantized solenoid.
Section VI applies the result of Section III to the electric A-B effect.
We show that the electric case is exactly parallel to the magnetic A-B case. We
consider the exact problem wherein we have a quantized electron and quantized capacitor plates (modeled as sheets of glued-together charges, so their centers of mass
are the plate dynamical variables).
We explain how the phase shift expression can either be written as if it were due to the electron moving in the non-field producing potential external to the capacitor or, alternatively, as if it were due to the capacitor plates moving in the electric field of the electron, or in some other equivalent way.
Section VII contains some concluding, summary remarks.
We set $\hbar=1$. In order to avoid superfluous $4\pi$ factors, we employ cgs units in all but Section VI and Appendix D, where we employ rationalized MKS units.
\section{Particle Moving Under Time-Dependent Non-Spatially Varying Forces.}
In this and the next section, we consider the general problem of a particle well-localized in position and momentum and moving in one dimension under scalar and vector potentials, and calculate the magnitude and phase of its wave function.
The problem of a particle moving in one dimension is to be considered as a limiting case of the three dimensional problem of a particle moving in
a three dimensional ``pipe." This is relevant here, because we shall model the solenoid as a collection of particles moving in stacked tubes, i.e., the ``pipe" is a tube of toroidal shape. A bonus is that we can treat the orbiting ``electron" the same way, taking its motion to be in a circular tube
of its own, concentric with and outside the solenoid.
In cylindrical coordinates, the kinetic energy in the Hamiltonian is separable, and so a tube may be modeled as providing, say, a square well potential in the radial and $z$-directions, and supposing that the particle under consideration is
in the ground state in these directions. Thus, it has free motion in the azimuthal direction. If the sides of the tube are allowed to be as small as desired, the separation in energy between the ground and lowest excited states gets as large as desired, so that any external field has negligible probability of exciting the radial or $z$-motion, and just consists in a distortion of the ground state. Thus, the only interesting motion is the free, one-dimensional particle motion.
Of course, this one dimensional motion approximation to describing a linearly constrained three dimensional motion has greater applicability than to
just circular motion in cylindrical coordinates. Immediately, one notes that this argument is just as readily applied to motion along a constant parameter curve in any of the 13
coordinate systems in which the Laplacian is separable. It should also be applicable to motion along a larger class of curves, along which one can choose
local coordinates so that the Laplacian is effectively separable, at least into the one-dimensional linear motion and two-dimensional orthogonal motion: we shall not pursue here an analysis of the class of curves for which
the one-dimensional motion approximation to constrained three dimensional motion has validity.
Obviously, this is just one possible configuration for the magnetic A-B effect. In the only actually rigorously performed experiment\cite{Ton}, the solenoid was replaced by a small magnetized torus and, after one packet goes through the torus hole and the other passes outside, the electron wave functions were allowed to spread and overlap.
While the results obtained here really only apply to the configuration of our model, they suggest that there \textcolor{red}{should} be counterparts when other configurations are considered.
In this section, we consider that the motion of the particle in one dimension is under scalar and vector potentials associated to forces which are solely time-dependent (i.e., not space-independent). This is an exactly solvable problem, and we find the expression for the amplitude and phase acquired by the particle's wave function in this case. We do this before tackling in the next section the more general, but not exactly solvable problem where the forces depends upon the particle's position as well as upon time.
The result from this section, limited to solely time-dependent (non-spatially varying) forces, is however applicable in Section VI, to the case of the quantized capacitor plates moving under the electron's
classical Coulomb field. The plates are assumed to be so massive that they do not
displace significantly during the electron's traverse, which simplifies the calculation.
\subsection {Classical motion}
First, consider the \textit{classical} motion of a particle, of charge $q$ and mass $m$, under solely time-dependent electric forces due to a vector and scalar potential.
If the vector potential is $A(t)$, then
its associated electric force $-q\frac{d}{cdt}A(t)$ has just time dependence.
Expand the potential energy $qV(x,t)$ for a particle which moves on the classical trajectory $x=x_{cl}(t)$
about the particle's position: $qV(x,t)=qV(x_{cl}(t),t)+(x-x_{cl}(t))qV'(x_{cl}(t),t)+...$, where $x$ is the distance along the path. Since the force felt by the particle is
to have no spatial dependence, we assume no higher power of $(x-x_{cl}(t))$ than the first. Moreover, since all the problems of concern to us are of first order in $q$, and since
$x_{cl}(t)=x_{0}+v_{0}t+o(q)$ ($x_{0}$ is the particle's initial position, $v_{0}$ its initial velocity), we replace $x_{cl}(t)$ by $x_{0}+v_{0}t$. Thus, we write the scalar potential energy
we shall consider as $qV(t)+(x-x_{0}-v_{0}t)qV'(t)\equiv qg(t)+qxV'(t)$ (where $g(t)\equiv V(t)-(x_{0}+v_{0}t)V'(t)$), i.e., we define $V(t)\equiv V(x_{cl}(t),t)$ and $V'(t)\equiv V'(x_{cl}(t),t)$.
The classical Hamiltonian we consider is therefore
\begin{equation}\label{1}
H=\frac{1}{2m}[p-\frac{q}{c}A(t)]^{2}+qxV'(t) +qg(t),
\end{equation}
\noindent where $x$, $p$ are the canonical coordinates.
The equations of motion follow from Hamilton's Poisson bracket equations:
\begin{eqnarray}\label{2}
\frac{d}{dt}x&=&\frac{1}{m}[p-\frac{q}{c}A(t)]\nonumber\\
\frac{d}{dt}[p-\frac{q}{c}A(t)]&=&-qV'(t)-\frac{q}{c}\frac{d}{dt}A(t)
\end{eqnarray}
\noindent and solution
\begin{subequations}\label{3}
\begin{eqnarray}
v_{cl}(t)&=&v_{0}-\frac{q}{mc}[A(t)-A(0)]-\frac{q}{m}U(t),\label{3a}\\
x_{cl}(t)&=&x_{0}+v_{0}t-\frac{q}{mc}[{\cal A}-A(0)t]-\frac{q}{m}[tU(t)-W(t)], \\
p_{cl}(t)&=&mv_{cl}(t)+\frac{q}{c}A(t) =p_{0}-qU(t) \\
\hbox{where }{\cal A}&\equiv&\int_{0}^{t}dt'A(t')\\
\int_{0}^{t}dt'\int_{0}^{t'}dt''V'(t'')&=&\int_{0}^{t}dt'[t-t']V'(t')=tU(t)-W(t),\\
p_{0}&\equiv&mv_{0}+\frac{q}{c}{A}(0),\quad U(t)\equiv \int_{0}^{t}dt'V'(t'), \quad W(t)\equiv \int_{0}^{t}dt't'V'(t').
\end{eqnarray}\label{3}
\end{subequations}
\noindent (The subscript $cl$ is to distinguish these classical variables from the quantum variables to appear later.)
Note, the electric force only appears in these equations: a magnetic force does not appear for motion constrained to one dimension since that force is in the direction perpendicular to the velocity.
\subsection{Wave function}
The wave function in the momentum representation satisfies
\begin{equation}\label{4}
i\frac{\partial}{\partial t}\psi(p,t)=\bigg[\frac{1}{2m}[p-\frac{q}{c}A(t)]^{2}+qg(t)+qV'(t)i\frac{\partial}{\partial p}\bigg]\psi(p,t),
\end{equation}
(we set $\hbar=1$.) It is initially taken to describe an object of momentum $p_{0}$ located at time 0 at $x_{0}$:
\begin{equation}\label{5}
\psi(p,0)=N e^{-(p-p_{0})^{2}\sigma^{2}}e^{-ipx_{0}}.
\end{equation}
To solve Eq.(\ref{4}) with the initial condition (\ref{5}), we make the ansatz
\begin{equation}\label{6}
\psi(p,t)=Ne^{-(\sigma^{2}+it/2m) p^{2}+\beta(t) p+i\gamma(t)-\beta_{R}^{2}(t)/4\sigma^{2}},
\end{equation}
\noindent where $\beta=\beta_{R}+i\beta_{I}$, and $\gamma$ is real. The last factor in the exponent of (\ref{6}) ensures $\int dp|\psi(p,t)|^{2}=N^{2}$. From (\ref{5},\ref{6}), the initial conditions are $\gamma(0)=0$ and $\beta(0)=2p_{0}\sigma^{2}-ix_{0}.$
In Appendix \ref{A} we put (\ref{6}) into (\ref{4}) and equate coefficients of $p$ and 1, with the results
\begin{eqnarray}\label{7}
\beta_{R}&=&2\sigma^{2}p_{cl}(t), \quad \beta_{I}=-x_{cl}(t)+\frac{p_{cl}(t)t}{m},\quad \gamma=-q\int_{0}^{t}dt'V(t')+v_{0}W(t).
\nonumber\\
\end{eqnarray}
We then go to the position representation by taking the Fourier transform of (\ref{6}). The result, (\ref{A12}), is
\begin{equation}\label{8}
\psi(x,t)=Ne^{-\frac{(x-x_{cl}(t))^{2}}{4\sigma^{2}}}e^{ip_{cl}(t)(x-x_{cl}(t))}e^{ip_{cl}^{2}(t)t/2m}e^{-iq\int_{0}^{t}dt'V(t')+iv_{0}W(t)}.
\end{equation}
\noindent under the assumptions that the mass of the object is large enough so
that there is negligible spreading of the wave packet over time $t$ and that the packet width $\sigma$ is much larger than the wavelength associated to the momentum,
$\sigma>>h/mv$, so the momentum is well-defined.
Thus we see that it is a consequence of quantum theory that the mean particle motion follows the classical trajectory, and that
the phase shift is expressed in terms of the classical variables as well.
\section{Particle Moving Under General Forces}
We now consider the motion of a charged particle in one dimension under externally applied potentials with arbitrary space and time dependence.
An exact solution of Schr\"odinger's equation cannot be obtained for this more general case. Instead, we resort
to an approximate method (which, as we shall see, gives the same result for the phase as above when the force is restricted to be solely time-dependent).
We set the stage for the approximation in subsections A and B. The approximation, consisting of expanding the exact Hamiltonian
about the classical trajectory of the particle, is presented in subsection C. For a wave packet whose mean follows the classical trajectory, this is an exactly soluble problem, and the phase of the wave function is obtained in subsection D.
\subsection{Classical Motion}
The motion is governed by the classical Hamiltonian
\begin{equation}\label{9}
H=\frac{1}{2m}[p_{cl}-\frac{q}{c}A(x_{cl},t)]^{2}+qV(x_{cl},t)
\end{equation}
\noindent where $x_{cl}$, $p_{cl}$ are the canonical coordinates.
The equations of motion follow from Hamilton's Poisson bracket equations:
\begin{eqnarray}\label{10}
\frac{d}{dt}x_{cl}(t)&=&\frac{1}{m}[p_{cl}(t)-\frac{q}{c}A(x_{cl}(t),t)]\nonumber\\
\frac{d}{dt}[p_{cl}(t)-\frac{q}{c}A(x_{cl}(t),t)]&=&-qV'(x_{cl}(t),t)-\frac{q}{c}\dot A(x_{cl}(t),t)
\end{eqnarray}
\noindent where the dot means $\partial_{t}$ and the prime means $\partial_{x}$, and we have used $dF(x_{cl}, p_{cl}, t)/dt=[F,H]_{PB}+\partial F(x_{cl}, p_{cl}, t)/\partial t$..
Denoting ${\cal E}(x_{cl}(t),t)\equiv -V'(x_{cl}(t),t)-\frac{1}{c}\dot A(x_{cl}(t),t)$, the solution may formally be written
\begin{eqnarray}\label{11}
v_{cl}(t)&\equiv&\frac{1}{m}[p_{cl}(t)-\frac{q}{c}A(x_{cl}(t),t)]=v_{0}+\frac{q}{m}\int_{0}^{t}dt_{1}{\cal E}(x_{cl}(t_{1}),t_{1}),\nonumber\\
x_{cl}(t)&=&x_{0}+v_{0}t+\frac{q}{m}\int_{0}^{t}dt_{1}\int_{0}^{t_{1}}dt_{2}{\cal E}(x_{cl}(t_{2}),t_{2}).
\end{eqnarray}
\subsection{Quantum Phase}
For the comparable quantum problem, the Hamiltonian is
\begin{equation}\label{12}
H=\frac{1}{2m}[P-\frac{q}{c}A(X,t)]^{2}+qV(X,t) .
\end{equation}
\noindent where $X,P$ are the conjugate operators.
In this case we shall \textit{assume} the wave packet follows the classical trajectory, supposing that it
spreads negligibly over the time interval of interest (i.e., $t/2m<<\sigma^{2}$):
\begin{equation}\label{13}
\psi(x,t)=e^{-[x-x_{cl}(t)]^{2}/4\sigma^{2}}e^{i\theta(x,t)},
\end{equation}
\noindent and see whether the phase $\theta(x,t)$ of the wave function can be obtained from Schr\"odinger's equation in some reasonable approximation.
As is well known from deBroglie-Bohm theory, putting (\ref{13}) into Schr\"odinger's equation results in two equations, the imaginary part yielding conservation
of probability, in the form $\frac{\partial}{\partial t}\rho(x,t)+ \frac{\partial}{\partial x}[\rho(x,t)v(x,t)]$, the real part yielding the Hamilton Jacobi equation, modified by a``quantum potential:"
\begin{eqnarray}\label{14}
0&=&\frac{\partial}{\partial t}e^{-[x-x_{cl}(t)]^{2}/2\sigma^{2}}+\frac{\partial}{\partial x}\Big[e^{-[x-x_{cl}(t)]^{2}/2\sigma^{2}}\frac{1}{m}[\theta'(x,t)-\frac{q}{c}A(x,t)]\Big],\nonumber\\
-\dot\theta(x,t)&=&\frac{1}{2m}[\theta'(x,t)-\frac{q}{c}A(x,t)]^{2}+qV(x,t)-\frac{1}{2m}e^{[x-x_{cl}(t)]^{2}/4\sigma^{2}}\frac{\partial^{2}}{\partial x^{2}}e^{-[x-x_{cl}(t)]^{2}/4\sigma^{2}}.
\end{eqnarray}
The conservation equation, the first of (\ref{14}), becomes
\begin{equation}\label{15}
0=-\frac{1}{\sigma^{2}}[x-x_{cl}(t)]\Big[-v_{cl}(t)+\frac{1}{m}[\theta'(x,t)-\frac{q}{c}A(x,t)]\Big]+\frac{\partial}{\partial x}\frac{1}{m}[\theta'(x,t)-\frac{q}{c}A(x,t)],
\end{equation}
\noindent with solution
\begin{equation}\label{16}
v_{cl}(t)-\frac{1}{m}[\theta'(x,t)-\frac{q}{c}A(x,t)]=Ce^{[x-x_{cl}(t)]^{2}/2\sigma^{2}}.
\end{equation}
\noindent The expectation value of the momentum, $\int dx |\psi(x,t)|^{2}\theta'(x,t)$, will diverge unless $C=0$ (since the integrand goes $\sim C $ for large $x$), so the conservation
of probability provides the condition
\begin{equation}\label{17}
\theta'(x,t)=mv_{cl}(t)+\frac{q}{c}A(x,t).
\end{equation}
The ``quantum potential" part of the second equation of (\ref{14}) is
\begin{equation}\label{18}
-\frac{1}{2m}e^{[x-x_{cl}(t)]^{2}/4\sigma^{2}}\frac{\partial^{2}}{\partial x^{2}}e^{-[x-x_{cl}(t)]^{2}/4\sigma^{2}}=-\frac{1}{8m\sigma^{4}}\Big[[x-x_{cl}(t)]^{2}-2\sigma^{2}\Big].
\end{equation}
\noindent Because the magnitude of the wave function keeps $[x-x_{cl}(t)]^{2}$ of the order of $\sigma^{2}$, (\ref{18}) is of magnitude $1/m\sigma^{2}$. This may be neglected compared
to the kinetic energy term in $(\ref{14})$, $mv_{cl}^{2}/2$, since we assume that there are many wavelengths within $\sigma$, so that the momentum of the packet is well-defined.
Thus, we have the classical Hamilton-Jacobi equation,
\begin{equation}\label{19}
-\dot\theta(x,t)=\frac{1}{2m}[\theta'(x,t)-\frac{q}{c}A(x,t)]^{2}+qV(x,t).
\end{equation}
In order that these two equations, (\ref{17}), (\ref{19}), be integrable, it must be that $\partial_{t}\partial_{x}\theta(x,t)=\partial_{x}\partial_{t}\theta(x,t)$. From
(\ref{17}) we obtain:
\begin{eqnarray}\label{20}
\partial_{t}\partial_{x}\theta(x,t)&=&\partial_{t}[mv_{cl}(t)+\frac{q}{c}A(x,t)]=\frac{d}{dt}mv_{cl}(t)+\frac{q}{c}\dot A(x,t)\nonumber\\
&=&-qV'(x_{cl}(t),t)-\frac{q}{c}[\dot A(x_{cl}(t),t)-\dot A(x,t)].
\end{eqnarray}\noindent where, in going from the first line to the second line in (\ref{20}), we have used the second of Eqs.(\ref{10}).
From (\ref{19}), with use of (\ref{17}), we obtain:
\begin{equation}\label{21}
\partial_{x}\partial_{t}\theta(x,t)=-\partial_{x}\frac{m}{2}v_{cl}^{2}(t)-qV'(x,t)=-qV'(x,t).
\end{equation}
We see that the right hand sides of (\ref{20}), (\ref{21}) are not, in general, identical, so these equations are not, in general, integrable.
The reason is that we have assumed the wave packet to have a very specific trajectory, its center at $x=x_{cl}(t)$, with specific initial conditions $x_{0}$ and $v_{0}$,
whereas the Hamiltonian dynamics is general, not specific to this trajectory.
While not integrable for this general case of the electric force depending upon position and time,
they \textit{are} integrable in the special case earlier dealt with, where the electric field only depends upon time. That is, if
$V'(x,t)=V'(t)$, $A(x,t)=A(t)$, the right hand sides of (\ref{20}), (\ref{21}) are identical.
\subsection{Classical Motion: Approximate Hamiltonian}
Our procedure shall be to construct an \textit{approximate} Hamiltonian for which the problem is \textit{exactly} soluble (i,e, for which the phase angle \textit{is} integrable).
The Hamiltonian shall be designed so that the particular classical trajectory $x_{cl}(t)$, with specified initial values of $x_{0}, v_{0}$, which satisfies the exact Hamiltonian's equation of motion, now also satisfies the approximate Hamiltonian's
equation of motion, and that neighboring classical trajectories
stay ``close" (to be made precise at the end of this section) to $x_{cl}(t)$.
The Hamiltonian we propose is
\begin{equation}\label{22}
H'=\frac{1}{2m}[P_{cl}-\frac{q}{c}A(x_{cl}(t),t)]^{2}+qV(x_{cl}(t),t)+ (X_{cl}-x_{cl}(t))qV'(x_{cl}(t),t)-\frac{q}{c}v_{cl}(t)(X_{cl}-x_{cl}(t))A'(x_{cl}(t),t)
\end{equation}
\noindent where now $X_{cl}, P_{cl}$ are the canonical coordinates.
Thus, one particular $x_{cl}(t)$ plays three roles: it is a solution of the exact Hamiltonian dynamics, it is a crucial element in the approximate Hamiltonian (\ref{22}) and, as we shall see,
it is a solution of the approximate Hamiltonian dynamics.
The Poisson bracket equations of motion corresponding to the Hamiltonian (\ref{22}) are then
\begin{eqnarray}\label{23}
\frac{d}{dt}X_{cl}(t)&=&\frac{1}{m}[P_{cl}(t)-\frac{q}{c}A(x_{cl}(t),t)]\nonumber\\
\frac{d}{dt}[P_{cl}(t)-\frac{q}{c}A(x_{cl}(t),t)]&=&-qV'(x_{cl}(t),t)+\frac{q}{c}v_{cl}(t)A'(x_{cl}(t),t)-\frac{d}{dt}\frac{q}{c}A(x_{cl}(t),t)
\end{eqnarray}
\noindent Upon combining these two equations, we obtain
\begin{eqnarray}\label{24}
\frac{d^{2}}{dt^{2}}X_{cl}(t)&=&-qV'(x_{cl}(t),t)-\frac{q}{c}\dot A(x_{cl}(t),t).
\end{eqnarray}
\noindent $X_{cl}(t)$ is the general solution with arbitrary initial conditions $X_{cl}(0), V_{cl}(0)$. We see from (\ref{10}) that $X_{cl}(t)=x_{cl}(t)$ is a solution of (\ref{24}) (the solution with initial conditions
$X_{cl}(0)=x_{cl}(0), V_{cl}(0)=v_{cl}(0)$). Moreover, $d^{2}[X_{cl}(t)-x_{cl}(t)]/dt^{2}=0$ so, for an arbitrary solution $X_{cl}(t)$,
\[
X_{cl}(t)-x_{cl}(t)=[X_{cl}(0)+V_{cl}(0)t]-[x_{cl}(0)+v_{cl}(0)t]+ot^{3}.
\]
Thus, two trajectories with the same initial speed and different initial positions only diverge to order $t^{3}$ under this Hamiltonian. This property, of neighboring classical trajectories staying close, might be expected of a soluble Hamiltonian because a quantum wave packet that holds together as time increases might be expected to have its classical
counterparts hold together, for at least small times.
\subsection{Quantum Phase: Approximate Hamiltonian}
For the comparable quantum problem, the Hamiltonian as usual is obtained by replacing in (\ref{22}) the position and momentum variables $X_{cl}, P_{cl}$ with operators $X,P$:
\begin{equation}\label{25}
H'=\frac{1}{2m}[P-\frac{q}{c}A(x_{cl}(t),t)]^{2}+qV(x_{cl}(t),t)+ (X-x_{cl}(t))qV'(x_{cl}(t),t)-\frac{q}{c}v_{cl}(t)(X-x_{cl}(t))A'(x_{cl}(t),t).
\end{equation}
\noindent Assuming again the wave function form (\ref{13}),
\[
\psi(x,t)=e^{-[x-x_{cl}(t)]^{2}/4\sigma^{2}}e^{i\theta(x,t)},
\]
\noindent we obtain the two equations (with neglect of the ``quantum potential" as before):
\begin{eqnarray}\label{26}
\theta'(x,t)&=&mv_{cl}(t)+\frac{q}{c}A(x_{cl}(t),t),\nonumber\\
-\dot\theta(x,t)&=&\frac{m}{2}v_{cl}^{2}(t)+qV(x_{cl}(t),t)+q(x-x_{cl}(t))V'(x_{cl}(t),t)-\frac{q}{c}v_{cl}(t)(x-x_{cl}(t))A'(x_{cl}(t),t).\nonumber\\
\end{eqnarray}
From the first of Eqs.(\ref{26}),using the second of Eqs.(\ref{10}) again, we have
\begin{eqnarray}\label{27}
\partial_{t}\partial_{x}\theta(x,t)&=&\partial_{t}[mv_{cl}(t)+\frac{q}{c}A(x_{cl}(t),t)]\nonumber\\
&=&-qV'(x_{cl}(t),t)-\frac{q}{c}\dot A(x_{cl}(t),t)+\frac{q}{c}[\dot A(x_{cl}(t),t)+v_{cl}(t)A'(x_{cl}(t),t)]\nonumber\\
&=&-qV'(x_{cl}(t),t)+\frac{q}{c}v_{cl}(t)A'(x_{cl}(t),t).
\end{eqnarray}
\noindent We see from the second of (\ref{26}) that this is equal to $\partial_{x}\partial_{t}\theta(x,t)$, so $\theta(x,t)$ can be found.
By integrating the first of (\ref{26}), we find that $\theta(x,t)$ has the form
\begin{equation}\label{28}
\theta(x,t)=x[mv_{cl}(t)+\frac{q}{c}A(x_{cl}(t),t)]+\alpha(t),
\end{equation}
\noindent where $\alpha(t)$ is an arbitrary function of $t$. Taking the time derivative of this, and comparing it with the second of (\ref{22}), we find:
\begin{eqnarray}\label{29}
\dot\alpha(t)&=&-\frac{m}{2}v^{2}_{cl}(t)+x_{cl}(t)[qV'(x_{cl}(t),t)-\frac{q}{c}v_{cl}(t)A'(x_{cl}(t),t)] -qV(x_{cl}(t),t),\nonumber\\
&=&-\frac{1}{2m}[p_{cl}(t)-\frac{q}{c}A(x_{cl}(t),t)]^{2}-x_{cl}(t)\frac{d}{dt}p_{cl}(t)-qV(x_{cl}(t),t),\nonumber\\
&=&-\frac{1}{2m}p_{cl}^{2}(t)+\frac{q}{mc}p_{cl}(t)A(x_{cl}(t),t) -\Big[\frac{d}{dt}[x_{cl}(t)p_{cl}(t)]-v_{cl}(t)p_{cl}(t)\Big]-qV(x_{cl}(t),t),\nonumber\\
&=&-\frac{1}{2m}p_{cl}^{2}(t)+\frac{q}{mc}p_{cl}(t)A(x_{cl}(t),t) -\frac{d}{dt}[x_{cl}(t)p_{cl}(t)]+\frac{1}{m}[p_{cl}(t)-\frac{q}{c}A(x_{cl}(t),t)]p_{cl}(t)-qV(x_{cl}(t),t),\nonumber\\
&=&\frac{1}{2m}p_{cl}^{2}(t) -\frac{d}{dt}[x_{cl}(t)p_{cl}(t)]-qV(x_{cl}(t),t).
\end{eqnarray}
Putting the integral of (\ref{29}) into (\ref{28}), we obtain the expression for the phase (up to an additive constant, in which $x_{cl}(0)p_{cl}(0)$ has been absorbed):
\begin{equation}\label{30}
\theta(x,t)=p_{cl}(t)[x-x_{cl}(t)]+\int_{0}^{t}dt'\frac{1}{2m}p_{cl}^{2}(t')-q\int_{0}^{t}dt'V(x_{cl}(t'),t').
\end{equation}
(As a check, it is shown in Appendix B that when the electric force only depends upon time, this expression for the phase is equal to the phase in Eq. (\ref{8}).)
\section{ Interference: Extra Phase in Two Particle and N+1 Particle Cases.}
We wish to consider an interference situation governed by a Hamiltonian describing many particles interacting with a single particle.
The situation is such that each particle's wave function
is expected to be well-approximated as a spatially localized packet with well-defined momentum, and the total wave function is expected to be well-approximated
as the product of these wave packets or the linear combination of such products. We turn now to show how how to analyze this situation in a consistent fashion.
Our result (\ref{13}) for the amplitude and (\ref{30}) for the phase associated with the motion of a single particle in known space-time dependent potentials,
shall be utilized in this endeavor.
In the next section we shall consider the simplest problem, an interference situation involving just two particles, which however displays all the features of the more complicated problem.
\subsection{ Two Particle Interaction: Wave Function. }
The Schro\"dinger equation for two particles moving under their mutual vector and scalar potentials, has the form
\begin{equation}\label{31}
i\frac{d}{dt}|\Psi,t\rangle=\Bigg[\frac{{\bf p}_{1}^{2}}{2m_{1}}+\frac{{\bf p}_{2}^{2}}{2m_{2}}+V(r_{12})-\frac{q_{1}q_{2}}{m_{1}m_{2}c^{2}}{\cal S}[{\bf p}_{1}\cdot {\bf D}^{\leftrightarrow}({\bf r}_{12})\cdot{\bf p}_{2}
]\Bigg]\Psi,t\rangle.
\end{equation}
\noindent Here, $r_{12}\equiv|{\bf x}_{1}-{\bf x}_{2}|$, ${\bf D}^{\leftrightarrow}({\bf r}_{12})$ is a dyad and {\cal S}[] is an operation that makes the argument suitably Hermitian.
In the Lorenz gauge\footnote{Since the vector potential term is of order $c^{-1}$, one might add the term in the scalar potential to that order. However, since the Lorenz gauge condition ${\bf \nabla}\cdot{\bf A}+c^{-1}\dot V=0$ just uses the $0$th order term in the scalar potential, the extra term is not needed for that, nor does it affect anything in our calculation, so it may be omitted} and the Coulomb gauge, $V(r_{12})= q_{1}q_{2}/r_{12}$, while the dyad for these two gauges (the Darwin Hamiltonian in the case of the Coulomb gauge) is, respectively,
\begin{eqnarray}\label{32}
D^{\leftrightarrow}({\bf r}_{12})&=&\frac{{\bf 1}^{\leftrightarrow}}{r_{12}}, \quad
D^{\leftrightarrow}({\bf r}_{12})=
\frac{1}{2}\Big[\frac{{\bf 1}^{\leftrightarrow}}{r_{12}}
+\frac{{\bf r}_{12}{\bf r}_{12}}{r_{12}^{3}}\Big], \hbox{ so the vector potential e.g., due to particle 2 is } \nonumber\\
{\bf A}_{2}&=&\frac{q_{2}}{m_{2}c}{\cal S}\Big[\frac{{\bf p}_{2}}{r_{12}}\Big], \quad {\bf A}_{2}=\frac{q_{2}}{2m_{2}c}{\cal S}\Big[\frac{{\bf p}_{2}}{r_{12}}+
\frac{{\bf r}_{12} {\bf r}_{12}\cdot{\bf p}_{2}}{r_{12}^{3}} \Big] ,
\end{eqnarray}
\noindent the difference arising from the source of the vector potential being respectively the current and the transverse component of the current.
A general solution of (\ref{31}) will be an entangled state for the two particles. However, for the physical situations we contemplate,
we suppose the state vector is well-approximated as a direct product of state vectors for each particle. The associated wave functions
are to be those we have been considering, well localized wave packets moving under
the vector and scalar potentials due to the other particle, where the other particle's operators are replaced by their classical counterparts.
How do we make a consistent approximation?
Consider the variational principle for the Schr\"odinger equation,
\[
\delta\int_{0}^{T}dt\langle\Psi,t|\Big[i\frac{d}{dt}-H\Big]|\Psi,t\rangle=0.
\]
Inserting the approximate solution $|\Psi,t\rangle=|\psi_{1},t\rangle|\psi_{2},t\rangle$, and varying separately for each particle state vector, we obtain for particle 1
(and similarly for particle 2),
\begin{eqnarray}\label{33}
&&i\frac{d}{dt}|\psi_{1},t\rangle +\Big[\langle\psi_{2},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{2}^{2}}{2m_{2}}\Big] |\psi_{2},t\rangle \Big]
|\psi_{1},t\rangle\nonumber\\
&&\quad =\Bigg[\frac{{\bf p}_{1}^{2}}{2m_{1}}+
\langle\psi_{2},t|V(r_{12})|\psi_{2},t\rangle-\frac{q_{1}q_{2}}{m_{1}m_{2}c^{2}}\cal S}[{\bf p}_{1}\cdot\langle\psi_{2},t|{{\bf D}^{\leftrightarrow}({\bf r}_{12})\cdot{\bf p}_{2}
|\psi_{2},t\rangle]\Bigg]|\psi_{1},t \rangle \nonumber\\
&&\quad\approx \Bigg[\frac{{\bf p}_{1}^{2}}{2m_{1}}+
V(|{\bf x}_{1}-{\bf x}_{2 cl}(t)|)-\frac{q_{1}q_{2}}{m_{1}m_{2}c^{2}}{\cal S}[{\bf p}_{1}\cdot{\bf D}^{\leftrightarrow}({\bf x}_{1}-{\bf x}_{2 cl}(t))\cdot{\bf p}_{2cl}(t)]
\Bigg]|\psi_{1},t\rangle,
\end{eqnarray}
\noindent where the approximation in the last step uses the localized nature of packet 2 to replace the operators ${\bf x}_{2 }, {\bf p}_{2 }$
by their mean values ${\bf x}_{2cl }(t) ,{\bf p}_{2cl }(t)$.
Eq.(\ref{33}) is just the equation we have solved for the magnitude and phase of a localized packet moving in an external vector and scalar potential
\textit{except} that there is an extra phase term on the left hand side, which we denote
\begin{equation}\label{34}
\dot\phi_{j}(t)\equiv \langle\psi_{j},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{j}^{2}}{2m_{j}}\Big]|\psi_{j},t\rangle, \quad j=1,2.
\end{equation}
\noindent (That this is a real number may be seen by taking its complex conjugate and utilizing $d\langle\psi_{j},t|\psi_{j},t\rangle/dt=0.$)
Now, the equations for $|\psi_{1},t\rangle, |\psi_{2},t\rangle$ are form invariant under the replacements
$|\psi_{1},t\rangle=|\psi_{1},t\rangle'e^{i\beta(t)}, |\psi_{2},t\rangle=|\psi_{2},t\rangle'e^{-i\beta(t)}$: this is to be expected, since the product wave function
is unchanged by this phase transformation. This gives the freedom to choose $\beta(t)$ to remove the extra phase term from, say, the equation for $|\psi_{1},t\rangle$.
Given a solution for $|\psi_{1},t\rangle, |\psi_{2},t\rangle$, choose $\beta(t)=\phi_{2}(t)$, so the transformed equations become
\begin{subequations}
\begin{eqnarray}\label{35}
&&i\frac{d}{dt}|\psi_{1},t\rangle' = \Bigg[\frac{{\bf p}_{1}^{2}}{2m_{1}}+
V(|{\bf x}_{1}-{\bf x}_{2 cl}(t)|)-\frac{q_{1}q_{2}}{m_{1}m_{2}c^{2}}{\cal S}[{\bf p}_{1}\cdot{\bf D}^{\leftrightarrow}({\bf x}_{1}-{\bf x}_{2 cl}(t))\cdot{\bf p}_{2cl}(t)]
\Bigg]|\psi_{1},t\rangle',\label{35a}\\
&&i\frac{d}{dt}|\psi_{2},t\rangle' +\Big[\thinspace'\langle\psi_{1},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{1}^{2}}{2m_{1}}\Big]|\psi_{1},t\rangle'\Big]|\psi_{2},t\rangle'\nonumber\\
&&\qquad\qquad =\Bigg[\frac{{\bf p}_{2}^{2}}{2m_{2}}+
V(|{\bf x}_{2}-{\bf x}_{1 cl}(t)|)-\frac{q_{1}q_{2}}{m_{1}m_{2}c^{2}}{\cal S}[{\bf p}_{2}\cdot{\bf D}^{\leftrightarrow}({\bf x}_{2}-{\bf x}_{1 cl}(t))\cdot{\bf p}_{1cl}(t)]
\Bigg]|\psi_{2},t\rangle'.\label{35b}\nonumber\\
\end{eqnarray}
\end{subequations}\
\noindent
By subtracting $\frac{{\bf p}_{1}^{2}}{2m_{1}}|\psi_{1},t\rangle'$ from (\ref{35a}), and taking the scalar product with $'\langle\psi_{1},t|$, we see that the extra phase in (\ref{35b}) is given by
\begin{equation}\label{36}
\dot\phi'_{1}(t)\equiv '\negmedspace\negthinspace\langle\psi_{1},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{1}^{2}}{2m_{1}}\Big]|\psi_{1},t\rangle'=V(|{\bf x}_{1cl}(t)-{\bf x}_{2 cl}(t)|)-\frac{q_{1}q_{2}}{m_{1}m_{2}c^{2}}{\bf p}_{1cl}(t)\cdot{\bf D}^{\leftrightarrow}({\bf x}_{1cl}(t)-{\bf x}_{2 cl}(t))\cdot{\bf p}_{2cl}(t)
\end{equation}
\noindent That is, \textit{this phase is the time integral of the interaction energy term of the Hamiltonian, where all operators are replaced by their classical values}.
We also note from (\ref{35b}), by subtracting $\frac{{\bf p}_{2}^{2}}{2m_{2}}$ and taking the scalar product with $'\langle\psi_{2},t|$, that
\begin{equation}\label{37}
\thinspace'\negthinspace\langle\psi_{2},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{2}^{2}}{2m_{2}}\Big]|\psi_{2},t\rangle'+'\negmedspace\langle\psi_{1},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{1}^{2}}{2m_{1}}\Big]|\psi_{1},t\rangle'= \dot\phi'_{1}(t), \hbox{ that is, }\thinspace'\negthinspace\langle\psi_{2},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{2}^{2}}{2m_{2}}\Big]|\psi_{2},t\rangle'=0.
\end{equation}
If we write $|\psi_{2},t\rangle'\equiv e^{i\phi'_{1}(t)}|\psi_{2},t\rangle''$, then $|\psi_{2},t\rangle''$ satisfies (\ref{35b}) without the extra phase term.
Then it, and the solution of (\ref{35a}), are both
the solutions we have found for
a localized packet moving in external potentials, where those potentials now are internal potentials so to speak, potentials due to the other particle
with operator values replaced by classical ones. So, the wavefunction which solves (\ref{31}) approximately is the product of these two solutions, \textit{multiplied by the
extra phase factor} $e^{i\phi'_{1}(t)}$.
\subsection{Two Particle Interaction: Interference. }
We shall now show, in an interference experiment involving the two particles interacting,
that each particle makes the \textit{same} contribution to the phase shift, each the \textit{negative} of the contribution of (\ref{36}), so the
\textit{net} phase shift is the negative of the time integral of (\ref{36}). Thus we see the germ here of what will be fleshed out later,
that the motion of the electron (think particle 1) in the vector potential of the solenoid, and the motion of the solenoid (think particle 2) in the vector potential
of the electron each make identical contributions to the phase shift. That would give twice the expected phase shift except that \textit{the proper approximation generates an additional phase shift which is the negative of these}, and so one ends up with the expected phase shift, the shift due to \textit{either} mechanism.
Consider an interference experiment where, at time 0, packets of the two particles split in two, with equal amplitude both particles going on trajectories labeled $A$,
or on trajectories labeled $B$. The wave function at some time $t<T$ may be written as
\begin{equation}\label{38}
\Psi(x_{1},x_{2}, t)=\frac{1}{\sqrt{2}}[\psi_{1}^{'A} (x_{1},t)\psi_{2}^{''A} (x_{2},t)e^{i\phi_{1}^{'A}(t)}
+\psi_{1}^{'B} (x_{1},t)\psi_{2}^{''B} (x_{2},t)e^{i\phi_{1}^{'B}(t)}].
\end{equation}
\noindent Here, $\psi_{1}^{'}(x_{1},t)=\langle x_{1}|\psi_{1}, t\rangle', \psi_{2}^{''}(x_{1},t)=\langle x_{2}|\psi_{2}, t\rangle''$ satisfy their respective
Schr\"odinger equations, motion under potentials where the position and momentum operators of the other particle have been replaced by their time-dependent classical values, with no extra phase terms. $\phi_{1}^{'A,B}(t)$ is the extra phase factor for trajectory $A$ or $B$.
The packets finally come together at time $T$. We suppose that particle 2's packets
precisely overlap then and thereafter. We suppose that particle 1's packets meet at (the particle analog of) a half-silvered mirror, coming from opposite directions. If particle 1 had been on path $A$, it splits into two packets that go to the right or left with equal amplitude $1/\sqrt{2}$.
If particle 1 had been on path $B$, one packet goes to the right with amplitude $1/\sqrt{2}$ and the other to the left with amplitude $-1/\sqrt{2}$. The probability of particle 1 being detected either at the left or the right is what is measured.
The wave function at time $T$ (trivially extended thereafter) is therefore $\Psi(x_{1},x_{2}, T)=\frac{1}{\sqrt{2}}[\Psi_{+}(x_{1},x_{2}, T)+\Psi_{-}(x_{1},x_{2}, T)]$, with
\begin{equation}\label{39}
\Psi_{\pm}(x_{1},x_{2}, T)=\frac{1}{\sqrt{2}}[\psi_{1}^{'A} (x_{1},T)\psi_{2}^{''A} (x_{2},T)e^{i\phi_{1}^{'A}(T)}
\pm\psi_{1}^{'B} (x_{1},T)\psi_{2}^{''B} (x_{2},T)e^{i\phi_{1}^{'B}(T)}]
\end{equation}
\noindent with the subscript $+$ referring to what is measured to the right and $-$ referring to what is measured to the left, and the superscripts $A,B$ denoting the path.
The individual particle wave functions in (\ref{39}) are of the form (\ref{13}), with phase (\ref{30}), e.g.,
\begin{equation}\label{40}
\psi_{1}^{'A}(x_{1},T)=Ne^{-[x_{1}-x_{1,cl}^{A}(T)]^{2}/4\sigma^{2}}e^{i\big[p_{1,cl}^{A}(T)[x_{1}-x_{1,cl}^{A}(T)]+
\int_{0}^{T}dt\frac{1}{2m}[p_{1,cl}^{A}(t)]^{2}-q_{1}\int_{0}^{T}dtV_{1}(x_{1,cl}^{A}(t),t)\big]}.
\end{equation}
The probabilities of the two outcomes are
\begin{eqnarray}\label{41}
P_{\pm}&\equiv&\int dx_{1}dx_{2}|\Psi_{\pm}
(x_{1},x_{2}, T)|^{2}=\frac{1}{4}\Bigg[2\pm
e^{i(\phi_{1}^{'A}(T)-\phi_{1}^{'B}(T))}\int dx_{1}\psi_{1}^{'A} (x_{1},T)\psi_{1}^{*'B}(x_{1},t)\int dx_{2}\psi_{2}^{''A} (x_{2},T)\psi_{2}^{*''B} (x_{2},t) +cc\nonumber \Big] \\
\end{eqnarray}
\noindent Using the expressions (\ref{40}), the integrals in (\ref{41}) are readily performed and give 1, since the packets completely overlap at time $T$ so $x_{cl}^{A}(T)=x_{cl}^{B}(T),p_{cl}^{A}(T)=p_{cl}^{B}(T)$. We are left then with the phase factors.
From (\ref{40}), the factor associated to interference of each particle is (leaving off the primes):
\begin{eqnarray}\label{42}
e^{i(\Phi_{i}^{A}-\Phi_{i}^{B})}\equiv\int_{-\infty}^{\infty}dx_{i}\psi_{i}^{A}(x_{i},T)\psi_{i}^{*B}(x_{i},T)&=&
e^{i\int_{0}^{T}dt\frac{1}{2m_{i}}[p_{i,cl}^{A}(t)]^{2}-i\int_{0}^{T}dt\frac{1}{2m_{i}}[p_{i,cl}^{B}(t)]^{2}}e^{-iq_{i}[\int_{0}^{T}dt(V(x_{i,cl}^{A}(t),t)-V(x_{i,cl}^{B}(t),t)]}.\nonumber\\
\end{eqnarray}
Then, the probabilities are
\begin{equation}\label{43}
P_{\pm}=\frac{1}{2}\Big[1\pm\cos[(\Phi_{1}^{A}+\Phi_{2}^{A}+\phi_{1}^{'A})-( \Phi_{1}^{B}+\Phi_{2}^{B}+\phi_{1}^{'B}) ]\Big].
\end{equation}
In order to evaluate the phase in (\ref{42}), we utilize Eq.(\ref{11}). We find, for trajectory $A$ or $B$,
\begin{subequations}
\begin{eqnarray}\label{44}
p_{i,cl}(t)&=&m_{i}v_{i,0}+\frac{q_{i}}{c}A(x_{i,cl}(t),t)+q_{i}\int_{0}^{t}dt{\cal E}(x_{i,cl}(t),t),\label{44a}\\
x_{i,cl}(t)&=&x_{i,0}+v_{i,0}t+\frac{q_{i}}{m_{i}}\int_{0}^{t}dt'\int_{0}^{t'}dt''{\cal E}(x_{i,cl}(t''),t'').\label{44b}
\end{eqnarray}
\end{subequations}
Since $x_{i,cl}^{A}(T)=x_{i,cl}^{B}(T)$, it follows from (\ref{44b}) that
\begin{equation}\label{45}
\int_{0}^{T}dt\int_{0}^{t}dt'[{\cal E}(x_{i,cl}^{A}(t'),t')-{\cal E}(x_{i,cl}^{B}(t'),t')]=0.
\end{equation}
We may write the momentum dependent phase for each particle that appears in (\ref{42}), using (\ref{44a}), (\ref{45}), as:
\begin{eqnarray}\label{46}
&&\int_{0}^{T}dt\frac{1}{2m_{i}}[p_{i,cl}^{A}(t)]^{2}-\int_{0}^{T}dt\frac{1}{2m_{i}}[p_{i,cl}^{B}(t)]^{2}=\int_{0}^{T}dt\frac{1}{2m_{i}}
[p_{i,cl}^{A}(t)+p_{i,cl}^{B}(t)][p_{i,cl}^{A}(t)-p_{i,cl}^{B}(t)]\nonumber\\
&&\approx\int_{0}^{T}dtv_{i}(0)\Bigg[\frac{q_{i}}{c}[A(x_{i,cl}^{A}(t),t)-A(x_{i,cl}^{B}(t),t)]+q_{i}\int_{0}^{t}dt'[{\cal E}(x_{i,cl}^{A}(t'),t')-{\cal E}(x_{i,cl}^{B}(t'),t')]\Bigg]\nonumber\\
&&=\int_{0}^{T}dtv_{i}(0)\frac{q_{i}}{c}[A(x_{i,cl}^{A}(t),t)-A(x_{i,cl}^{B}(t),t)]\approx\int_{0}^{T}dt\Big[v_{i, cl}^{A}(t)\frac{q_{i}}{c}A(x_{i,cl}^{A}(t),t)
-v_{i, cl}^{B}(t)\frac{q_{i}}{c}A(x_{i,cl}^{B}(t),t)\Big]\nonumber\\
&&=\int_{0}^{T}dt\Big[{\bf v}_{i, cl}^{A}(t)\cdot\frac{q_{i}}{c}{\bf A}(x_{i,cl}^{A}(t),t)
-{\bf v}_{i, cl}^{B}(t)\cdot\frac{q_{i}}{c}{\bf A}(x_{i,cl}^{B}(t),t)\Big]
\end{eqnarray}
In the approximations made in the second and third lines of (\ref{46}), we have used $v_{i}(0)=v_{i,cl}(t)+0(q)$ from (\ref{11}), and dropped terms in the square of the charges, as has been done throughout this paper.
In the last line, we have reintroduced vector notation, since the vector potential in our equations has always been the component parallel to the velocity of the particle.
Thus, from (\ref{42}) and (\ref{46}),we have
\begin{equation}\label{47}
\Phi_{i}^{A}-\Phi_{i}^{B}= \int_{0}^{T}dt\Big[{\bf v}_{i, cl}^{A}(t)\cdot\frac{q_{i}}{c}{\bf A}(x_{i,cl}^{A}(t),t)-q_{i}V(x_{i,cl}^{A}(t),t)\Big]-
\int_{0}^{T}dt\Big[{\bf v}_{i, cl}^{B}(t)\cdot\frac{q_{i}}{c}{\bf A}(x_{i,cl}^{B}(t),t)-q_{i}V(x_{i,cl}^{B}(t),t)\Big].
\end{equation}
That is, $\Phi_{i}^{A},\Phi_{i}^{B}$ are each the \textit{time integral of the negative of the Hamiltonian interaction energy with operators replaced by classical variables}
for that trajectory ($A$ or $B$). Due to the symmetry of the interaction under exchange of particles 1 and 2, these are the \textit{same}, independent of $i$.
\textit{Since this is the negative of the extra phase}
$\phi_{i}^{'A}(T),\phi_{i}^{'B}(T)$, we have for the probabilities (\ref{43}):
\begin{subequations}
\begin{eqnarray}
P_{\pm}&=&\frac{1}{2}\Big[1\pm\cos(\Phi^{A}-\Phi^{B})\Big] \hbox{ where }\\\label{48a}
\Phi^{A,B} &=&\int_{0}^{T}dt\Big[{\bf v}_{i, cl}(t)^{A,B}\cdot\frac{q_{i}}{c}{\bf A}(x_{i,cl}^{A,B}(t),t)-q_{i}V(x_{i,cl}^{A,B}(t),t)\Big]\nonumber\\
&=&-\int_{0}^{T}dtH_{\hbox{int}}(x_{1cl}^{A,B}(t), x_{2,cl}^{A,B}(t),p_{1,cl}^{A,B}(t),p_{2,cl}^{A,B}(t)) \label{48b}.
\end{eqnarray}\\
\end{subequations}
Thus, we have confirmed the assertion made in the beginning of this section, that the phase shift is the difference for each path of the time integral
of the negative of the interaction Hamiltonian with operators replaced by time-dependent classical variables.
\subsection{$N$ Particles Interacting With a Single Particle: Wave Function.}
We shall extend the result of the previous section since we eventually wish to consider an $N$ particle solenoid interacting with an electron. We shall repeat as closely as possible the steps taken in the discussion of the interaction of two particles.
The Schr\"odinger equation for $N$ identical particles of mass $m$, charge $q$, interacting with a single particle of mass $m_{e}$, charge $e$, under mutual vector and scalar potentials has the form
\begin{equation}\label{49}
i\frac{d}{dt}|\Psi,t\rangle=\Bigg[\frac{{\bf p}_{e}^{2}}{2m_{e}}+\sum_{n=1}^{N}\frac{{\bf p}_{n}^{2}}{2m}+\sum_{n=1}^{N}V(r_{en})-\sum_{n=1}^{N}\frac{eq}{m_{e}mc^{2}}{\cal S}[{\bf p}_{e}\cdot {\bf D}^{\leftrightarrow}({\bf r}_{en})\cdot{\bf p}_{n}
]\Bigg]\Psi,t\rangle.
\end{equation}
\noindent (We shall call the single particle the ``electron.")
Upon considering the variational principle for the Schr\"odinger equation with
the approximate solution $|\Psi,t\rangle=|\psi_{e},t\rangle\prod_{n=1}^{N}|\psi_{n},t\rangle$, and varying separately for each particle state vector, we obtain
\begin{subequations}
\begin{eqnarray}
&&i\frac{d}{dt}|\psi_{e},t\rangle +\Bigg[\sum_{n=1}^{N}\langle\psi_{n},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{n}^{2}}{2m}\Big]|\psi_{n},t\rangle \Bigg]|\psi_{e},t\rangle\nonumber\\
&&\quad =\Bigg[\frac{{\bf p}_{e}^{2}}{2m_{e}}+
\sum_{n=1}^{N}\langle\psi_{n},t|V(r_{en})|\psi_{n},t\rangle-\frac{eq}{m_{e}mc^{2}}{\cal S}[{\bf p}_{e}\cdot\sum_{n=1}^{N}\langle\psi_{n},t|{\cal S}[{\bf D}^{\leftrightarrow}({\bf r}_{en})\cdot{\bf p}_{n}]
|\psi_{n},t\rangle\Bigg]|\psi_{e},t \rangle \nonumber\\
&&\quad\approx \Bigg[\frac{{\bf p}_{e}^{2}}{2m_{e}}+
\sum_{n=1}^{N}V(|{\bf x}_{e}-{\bf x}_{n cl}(t)|)-\frac{eq}{m_{e}mc^{2}}{\cal S}[{\bf p}_{e}\cdot\sum_{n=1}^{N}{\bf D}^{\leftrightarrow}({\bf x}_{e}-{\bf x}_{n cl}(t))\cdot{\bf p}_{ncl}(t)]
\Bigg]|\psi_{e},t\rangle,\label{50a}\\
&&i\frac{d}{dt}|\psi_{n},t\rangle +\Bigg[\sum_{n'=1, n'\neq n}^{N}\langle\psi_{n'},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{n'}^{2}}{2m}\Big]|\psi_{n'},t\rangle
+ \langle\psi_{e},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{e}^{2}}{2m_{e}}\Big]\psi_{e},t\rangle \nonumber\\
&&\qquad \qquad -\sum_{n'=1, n'\neq n}^{N}V(|{\bf x}_{ecl}(t)-{\bf x}_{n' cl}(t)|) +
\frac{eq}{m_{e}mc^{2}}{\bf p}_{ecl}(t)\cdot\sum_{n'=1, n'\neq n}^{N}{\bf D}^{\leftrightarrow}({\bf x}_{ecl}(t)-{\bf x}_{n' cl}(t))\cdot{\bf p}_{n'cl}(t)\Bigg] |\psi_{n},t\rangle\nonumber\\
&&\quad = \Bigg[\frac{{\bf p}_{n}^{2}}{2m}+\langle\psi_{e},t|V({\bf r}_{en})|\psi_{e},t\rangle-\frac{eq}{m_{e}mc^{2}}{\cal S}[{\bf p}_{n}\cdot\langle\psi_{e},t|{\cal S}[
{\bf D}^{\leftrightarrow}({\bf r}_{en})\cdot{\bf p}_{e}]|\psi_{e},t\rangle
\Bigg]|\psi_{n},t \rangle \nonumber\\
&&\quad \approx\Bigg[\frac{{\bf p}_{n}^{2}}{2m}+V(|{\bf x}_{n}-{\bf x}_{ecl}(t)|)-\frac{eq}{m_{e}mc^{2}}{\cal S}[{\bf p}_{n}\cdot[
{\bf D}^{\leftrightarrow}({\bf x}_{n}-{\bf x}_{ecl}(t))\cdot{\bf p}_{ecl}(t)]
\Bigg]|\psi_{n},t \rangle.\label{50b}
\end{eqnarray}
\end{subequations}
\noindent (Note the new terms in the scalar and vector potentials on the second line of (\ref{50b}) that do not appear in the two particle case.)
We can remove the extra phase terms from the $N$ particles by the phase transformation $|\psi_{n},t \rangle=|\psi_{n},t \rangle' e^{i\alpha_{n}(t)},
|\psi_{e},t \rangle=|\psi_{e},t \rangle'e^{-i\sum_{n=1}^{N}\alpha_{n}(t)}$. Eqs.(\ref{50a}),(\ref{50b}) then become
\begin{subequations}
\begin{eqnarray}
&&i\frac{d}{dt}|\psi_{e},t\rangle' +\Bigg[\sum_{n=1}^{N}\negthinspace '\langle\psi_{n},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{n}^{2}}{2m}\Big]|\psi_{n},t\rangle '\Bigg]|\psi_{e},t\rangle'\nonumber\\
&&\quad\approx \Bigg[\frac{{\bf p}_{e}^{2}}{2m_{e}}+
\sum_{n=1}^{N}V(|{\bf x}_{e}-{\bf x}_{n cl}(t)|)-\frac{eq}{m_{e}mc^{2}}{\cal S}[{\bf p}_{e}\cdot\sum_{n=1}^{N}{\bf D}^{\leftrightarrow}({\bf x}_{e}-{\bf x}_{n cl}(t))\cdot{\bf p}_{ncl}(t)]
\Bigg]|\psi_{e},t\rangle',\label{51a}\\
&&i\frac{d}{dt}|\psi_{n},t\rangle' \approx\Bigg[\frac{{\bf p}_{n}^{2}}{2m_{n}}+V(|{\bf x}_{n}-{\bf x}_{ecl}(t)|)-\frac{eq}{m_{e}mc^{2}}{\cal S}[{\bf p}_{n}\cdot[
{\bf D}^{\leftrightarrow}({\bf x}_{n}-{\bf x}_{ecl}(t))\cdot{\bf p}_{ecl}(t)]
\Bigg]|\psi_{n},t \rangle'.\label{51b}
\end{eqnarray}
\end{subequations}
By subtracting $\frac{{\bf p}_{n}^{2}}{2m}|\psi_{n},t \rangle'$ from (\ref{51b}), and taking the scalar product with $'\langle\psi_{n},t|$, we see that the extra phase in (\ref{51a}) is given by
\begin{equation}\label{52}
\dot\phi'_{e}(t)\equiv \sum_{n=1}^{N}\negthinspace'\langle\psi_{n},t|\Big[ i\frac{d}{dt}- \frac{{\bf p}_{n}^{2}}{2m}\Big]|\psi_{n},t\rangle'=\sum_{n=1}^{N}V(|{\bf x}_{ncl}(t)-{\bf x}_{e cl}(t)|)-
\frac{eq}{m_{e}mc^{2}}{\bf p}_{ecl}(t)\cdot \sum_{n=1}^{N}{\bf D}^{\leftrightarrow}({\bf x}_{ecl}-{\bf x}_{ncl}(t))\cdot{\bf p}_{ncl}(t).
\end{equation}
\noindent Again, we find that the phase $\phi'_{e}(t)$ is the time integral of the interaction energy term of the Hamiltonian, where all operators are replaced by their classical values.
If we write $|\psi_{e},t\rangle'\equiv e^{i\phi'_{e}(t)}|\psi_{j},t\rangle''$, then $|\psi_{j},t\rangle''$ satisfies (\ref{51a}) without the extra phase term.
So, the wavefunction which solves (\ref{49}) approximately is the product of $N+1$ wave functions of the type we have considered, for the $N$ particles and the electron,
\textit{multiplied by the
extra phase factor} $e^{i\phi'_{e}(t)}$.
\subsection{$N$ Particles Interacting With a Single Particle: Interference. }
We shall now consider a situation where the electron goes on either path $A$ or path $B$, and so the $N$ interacting particles likewise
move on trajectories $A$ or $B$ determined by their interaction with the electron. The electron packets which traveled by route $A$ and $B$ are brought, at time $T$, to
exactly overlap at a half-silvered mirror. The electron is measured as either going right or left, just as was the case for particle 1 in the two particle example, However, for each of the $N$ particles, the wave packet on trajectory $A$ does not precisely overlap with the wave packet on trajectory $B$ at time T (the overlap is presumed the same thereafter). So, we shall have to
consider the effect of this on the measured interference.
The wave function at time $t<T$ may be written as
\begin{equation}\label{53}
\Psi(x_{e},x_{1}, ... x_{N}, t)=\frac{1}{\sqrt{2}}\Big[e^{i\phi_{e}^{'A}(t)}\psi_{e}^{''A} (x_{e},t)\prod_{n=1}^{N}\psi_{n}^{'A} (x_{n},t)
+e^{i\phi_{e}^{'B}(t)}\psi_{e}^{''B} (x_{e},t)\prod_{n=1}^{N}\psi_{n}^{'B} (x_{n},t)\Big],
\end{equation}
\noindent where $\psi_{e}^{''}(x_{e},t)=\langle x_{e}|\psi_{e}, t\rangle'', \psi_{n}^{'}(x_{n},t)=\langle x_{n}|\psi_{n}, t\rangle'$ satisfy their respective
Schr\"odinger equations, motion under potentials where the position and momentum operators of the other interacting particle have been replaced by their time-dependent classical values. The wave function at time $T$, just after the electron has passed through the half-silvered mirror, is
$\Psi(x_{e},x_{1}, ... x_{N}, T)=\frac{1}{\sqrt{2}}[\Psi_{+}(x_{e},x_{1}, ... x_{N}, T)+\Psi_{-}(x_{e},x_{1}, ... x_{N}, T)]$, with
\begin{equation}\label{54}
\Psi_{\pm}(x_{e},x_{1}, ... x_{N}, T)=\frac{1}{\sqrt{2}}[e^{i\phi_{e}^{'A}(t)}\psi_{e}^{''A} (x_{e},T)\prod_{n=1}^{N}\psi_{n}^{'A} (x_{n},T)
\pm e^{i\phi_{e}^{'B}(t)}\psi_{e}^{''B} (x_{e},T)\prod_{n=1}^{N}\psi_{n}^{'B} (x_{n},T)].
\end{equation}
The individual particle wave functions in (\ref{54}) are of the form (\ref{40}).
The probabilities of the two outcomes are
\begin{eqnarray}\label{55}
P_{\pm}&\equiv&\int dx_{e}dx_{1}|\Psi_{\pm}
(x_{1},x_{2}, T)_{\pm}|^{2}\nonumber\\
&=&\frac{1}{4}\Bigg[2\pm e^{i(\phi_{e}^{'A}(T)-\phi_{e}^{'B}(T))}
\int dx_{e}\psi_{e}^{''A} (x_{e},T)\psi_{e}^{*''B}(x_{T},T )\prod_{n=1}^{N}\int dx_{n}\psi_{n}^{'A} (x_{2},T)\psi_{n}^{*'B} (x_{2},T) +cc \Bigg]
\end{eqnarray}
\noindent We now must perform the integrals in (\ref{55}) using the expressions (\ref{40}). The integral over $x_{e}$ is readily performed, since the
electron wave functions completely overlap at time $T$. The integrals over $x_{n}$ are more complicated, since the wave functions do not overlap.
The factor associated to interference of each particle in (\ref{55}) (leaving off the subscript and the primes) is
\begin{eqnarray}\label{56}
\int_{-\infty}^{\infty}dx\psi^{A}(x,T)\psi^{B*}(x,T)&\sim&\int_{-\infty}^{\infty}dx
e^{-\frac{(x-x_{cl}^{A}(T))^{2}}{4\sigma^{2}}}e^{-\frac{(x-x_{cl}^{B}(T))^{2}}{4\sigma^{2}}}e^{ip_{cl}^{A}(T)(x-x_{cl}^{A}(T))}e^{-ip_{cl}^{B}(T)(x-x_{cl}^{B}(T))}\nonumber\\
&& \cdot e^{i\int_{0}^{T}dt\frac{1}{2m}[p_{cl}^{A}(t)]^{2}-i\int_{0}^{T}dt\frac{1}{2m}[p_{cl}^{B}(t)]^{2}}e^{-iq[\int_{0}^{T}dt(V(x_{cl}^{A}(t),t)-V(x_{cl}^{B}(t),t)]}.
\end{eqnarray}
We may now use:
\begin{eqnarray}\label{57}
\int_{-\infty}^{\infty}dx
e^{-\frac{(x-a)^{2}}{4\sigma^{2}}}e^{-\frac{(x-b)^{2}}{4\sigma^{2}}}e^{ip^{1}(x-a)}e^{-ip^{2}(x-b)}&=&
\int_{-\infty}^{\infty}dx e^{-\frac{x^{2}}{2\sigma^{2}}}e^{\frac{x(a+b)}{2\sigma^{2}}}e^{-\frac{a^{2}}{4\sigma^{2}}} e^{-\frac{b^{2}}{4\sigma^{2}}} e^{i(p^{1}-p^{2})x}e^{-ip^{1}a} e^{ip^{2}b} \nonumber\\
&\sim&e^{\frac{\sigma^{2}}{2}[ \frac{(a+b)}{2\sigma^{2}}+i(p^{1}-p^{2})]^{2} }e^{-\frac{a^{2}}{4\sigma^{2}}} e^{-\frac{b^{2}}{4\sigma^{2}}}e^{-ip^{1}a} e^{ip^{2}b} \nonumber\\
&=&e^{-\frac{(a-b)^{2}}{8\sigma^{2}}} e^{-\frac{\sigma^{2}(p^{1}-p^{2})^{2}}{2}} e^{i\frac{(p^{1}+p^{2})(b-a)}{2}}. \nonumber\\
\end{eqnarray}
\noindent The factor in ({\ref{56}) is then
\begin{eqnarray}\label{58}
&&\langle\psi^{B},T|\psi^{A},T\rangle\equiv|\langle\psi^{B},T|\psi^{A},T\rangle| e^{i\Phi} \nonumber\\
&=& e^{-\frac{(x_{cl}^{A}(T)-x_{cl}^{B}(T))^{2}}{8\sigma^{2}}}e^{-\frac{\sigma^{2}(p_{cl}^{A}(T)-p_{cl}^{B}(T))^{2}}{2}}e^{i\frac{1}{2}(p_{cl}^{A}(T)+p_{cl}^{B}(T))(x_{cl}^{B}(T)-x_{cl}^{A}(T)) +i\frac{1}{2m}\int_{0}^{T}dt\Big[[p_{cl}^{A}(t)]^{2}-[p_{cl}^{B}(t)]^{2}\Big] -iq\int_{0}^{T}dt[V(x_{cl}^{A}(t),t)-V(x_{cl}^{B}(t),t)]}.\nonumber\\
\end{eqnarray}
For the electron, since $x_{cl}^{A}(T)=x_{cl}^{B}(T), p_{cl}^{A}(T)=p_{cl}^{B}(T)$, the magnitude in (\ref{58}) is 1. The phase analysis is precisely the same as for either particle in
the case of two particles, resulting in the phase difference we shall call $\Phi_{e}^{A}-\Phi_{e}^{B}$. $\Phi_{e}^{A,B }$ is the negative time integral of
the interaction Hamiltonian with all particle operators replaced by time-dependent classical variables, for that trajectory ($A$ or $B$). This cancels the extra phase factor $\phi_{e}^{'A}(T)-\phi_{e}^{'B}(T)$. Therefore, the net phase is that of the $N$ particles.
The phase shift contributed by each particle, according to (\ref{58}), is
\begin{eqnarray}\label{59}
\Phi_{n}&=&\frac{1}{2}(p_{ncl}^{A}(T)+p_{ncl}^{B}(T))(x_{ncl}^{B}(T)-x_{ncl}^{A}(T)) +\frac{1}{2m}\int_{0}^{T}dt\Big[[p_{ncl}^{A}(t)]^{2}-[p_{ncl}^{B}(t)]^{2}\Big]\nonumber\\
&&\qquad\qquad\qquad\qquad\qquad \qquad -q\int_{0}^{T}dt[V(x_{ncl}^{A}(t),t)-V(x_{ncl}^{B}(t),t)].
\end{eqnarray}
\noindent For the electron, the first term vanishes, but here it does not. The analysis is the same as previously, using the
expressions (\ref{11}) for the classical position and momentum in terms of the potentials, recalling that terms proportional to the square of the charges are disregarded:
\begin{subequations}
\begin{eqnarray}\label{60}
&&\frac{1}{2}(p_{cl}^{A}(T)+p_{cl}^{B}(T))(x_{cl}^{B}(T)-x_{cl}^{A}(T))\approx\frac{1}{2}[2mv_{0}]\frac{q}{m}\int_{0}^{T}dt\int_{0}^{t}dt'[{\cal E}(x_{cl}^{B}(t'),t')-{\cal E}(x_{cl}^{A}(t'),t')],\nonumber\\
&&\frac{1}{2m}\int_{0}^{T}dt\Big[[p_{cl}^{A}(t)]^{2}-[p_{cl}^{B}(t)]^{2}\Big]=\frac{1}{2m}\int_{0}^{T}dt[p_{cl}^{A}(t)+p_{cl}^{B}(t)][p_{cl}^{A}(t)-p_{cl}^{B}(t)]\label{60a}\\
&\approx&\frac{1}{2m}\int_{0}^{T}dt[2mv_{0}]\Big[\frac{q}{c}[A(x_{cl}^{A}(t),t)-A(x_{cl}^{B}(t),t)]+q\int_{0}^{t}dt'[{\cal E}(x_{cl}^{A}(t'),t')-{\cal E}(x_{cl}^{B}(t'),t')]\Big].\label{60b}
\end{eqnarray}
\end{subequations}
\noindent Adding (\ref{60a}),(\ref{60b}), the double integral of the electric field vanishes, and putting the sum into (\ref{59}) gives the phase contribution of the $n$th particle,
\begin{eqnarray}\label{61}
\Phi_{n}&=&\int_{0}^{T}dt\frac{qv_{n0}}{c}[A(x_{ncl}^{A}(t),t)-A(x_{ncl}^{B}(t),t)]-q\int_{0}^{T}dt[V(x_{ncl}^{A}(t),t)-V(x_{ncl}^{B}(t),t)]\nonumber\\
&\approx&\int_{0}^{T}dt\frac{qv_{ncl}(t)}{c}[A(x_{ncl}^{A}(t),t)-A(x_{ncl}^{B}(t),t)]-q\int_{0}^{T}dt[V(x_{ncl}^{A}(t),t)-V(x_{ncl}^{B}(t),t)]\nonumber\\
&=&\int_{0}^{T}dt\frac{q{\bf v}_{ncl}(t)}{c}\cdot [{\bf A}(x_{ncl}^{A}(t),t)-{\bf A}(x_{ncl}^{B}(t),t)]-q\int_{0}^{T}dt[V(x_{ncl}^{A}(t),t)-V(x_{ncl}^{B}(t),t)]
\end{eqnarray}
Therefore, the phase contributed by all $N$ particles is
\begin{eqnarray}\label{62}
\Phi_{N}&\equiv &\Phi_{N}^{A}-\Phi_{N}^{B}=\sum_{n=1}^{N}\Phi_{n}\nonumber\\
&=&\int_{0}^{T}dt\sum_{n=1}^{N} \Big[\frac{q{\bf v}_{ncl}(t)}{c}\cdot{\bf A}(x_{ncl}^{A}(t),t)-qV(x_{ncl}^{A}(t),t)\Big]
-\int_{0}^{T}dt\sum_{n=1}^{N} \Big[\frac{q{\bf v}_{ncl}(t)}{c}\cdot{\bf A}(x_{ncl}^{B}(t),t)-qV(x_{ncl}^{B}(t),t)\Big].
\end{eqnarray}
Thus, we see that the phase $\Phi_{N}^{A,B} =\Phi_{e}^{A,B}\equiv \Phi^{A,B}$ is the negative of the interaction Hamiltonian with operators replaced
by their classical counterparts for the associated trajectory. \textit{Again we see the crucial effect of the extra phase}: since $\Phi^{A}- \Phi^{B}=- [\phi_{e}^{'A}(T)-\phi_{e}^{'B}(T)]$, we have for the probabilities (\ref{55}),
using (\ref{58}),
\begin{equation}\label{63}
P_{\pm}=\frac{1}{2}\Big[1\pm \prod_{n=1}^{N}e^{-\frac{(x_{ncl}^{A}(T)-x_{ncl}^{B}(T))^{2}}{8\sigma^{2}}}e^{-\frac{\sigma^{2}(p_{ncl}^{A}(T)-p_{ncl}^{B}(T))^{2}}{2}}\cos[\Phi^{A}-\Phi^{B}]\Big].
\end{equation}
\noindent When we apply Eq.(\ref{63}) to the magnetic A-B effect, where the $N$ particles comprise the solenoid, we shall evaluate the magnitude of the
interference term.
A few remarks are in order before we close this section.
Of course, the wave function of a quantized solenoid under the influence of the classical field of an electron (as done by Vaidman, and verified by us in the next section) has a phase, since all wave functions have a phase. But, why should that phase be precisely the same as that acquired by the quantized electron
moving in the classical potentials of the solenoid? We have answered that question here. At heart, it is due to the symmetry of the potentials under
exchange of the electron and solenoid variables.
A concise way to see the equality of these phases is to note that the current of the electron is ${\bf J}_{e}({\bf x},t)=e{\bf v}_{e}(t)\delta ({\bf x}- {\bf x}_{e}(t))$
so the phase contribution of the electron which moved in the classical field of the N particles until time $T$ may be expressed as
\[
\Phi_{e}=\frac{1}{c}\int_{0}^{T} dt\int d{{\bf x}}{\bf J}_{e}({\bf x},t)\cdot {\bf A}_{N}({\bf x},t),
\]
\noindent where ${\bf A}_{N}({\bf x},t)$ is the vector potential due to the $N$ particles.\footnote{ In the Coulomb gauge, either the current or the transverse current ${\bf J}_{eT}({\bf x},t)$
may be put into this equation: $\int d{{\bf x}}{\bf J}_{e}({\bf x},t)\cdot {\bf A}_{N}({\bf x},t)=\int d{{\bf x}}{\bf J}_{eT}({\bf x},t)\cdot {\bf A}_{N}({\bf x},t)$. This is because ${\bf J}_{e}({\bf x},t)-{\bf J}_{eT}({\bf x},t)$ is a gradient, and its contribution to the integral vanishes, since ${\bf \nabla}\cdot{\bf A}_{N}({\bf x},t)=0$.} And, the current of the $N$ particles is
${\bf J}_{N}({\bf x},t)=q\sum_{n=1}^{N}{\bf v}_{n}(t)\delta ({\bf x}- {\bf x}_{n}(t))$, so their phase acquired while moving in the classical field of the electron is
\[
\Phi_{N}=\frac{1}{c}\int_{0}^{T} dt\int d{{\bf x}}{\bf J}_{N}({\bf x},t)\cdot {\bf A}_{e}({\bf x},t),
\]
However, using integration by parts,
\begin{eqnarray}
\Phi_{e}&=&\frac{1}{c}\int_{0}^{T} dt\int d{{\bf x}}[-c\nabla^{2}{\bf A}_{e}({\bf x},t)]\cdot{\bf A}_{N}({\bf x},t)=
\frac{1}{c}\int_{0}^{T} dt\int d{{\bf x}}{\bf A}_{e}({\bf x},t)[-c\nabla^{2}\cdot{\bf A}_{N}({\bf x},t)]\nonumber\\
&=&\frac{1}{c}\int_{0}^{T} dt\int d{{\bf x}}{\bf A}_{e}({\bf x},t)\cdot{\bf J}_{N}({\bf x},t)=\Phi_{N}.\nonumber
\end{eqnarray}
\section{Application to the Magnetic A-B Effect.}
We now apply these considerations to the Aharonov-Bohm magnetic effect, where we model the solenoid as a collection of $N$ particles.
As remarked in the introduction, Vaidman demonstrated by a semi-classical calculation that the phase shift associated to the motion of the solenoid under the vector potential of the electric field of the electron is equal to the usual phase shift associated to the motion of the electron under the vector potential of the solenoid. Using our fully quantum calculation, we have confirmed this in the previous section by a general argument. In this section we shall show this by direct calculation. And, we shall show that
the magnitude of the interference term in (\ref{63}) is essentially 1 for reasonable values of the parameters in our model.
\subsection{Model}
We suppose that the electron moves in from infinity to location $(x=0, y=-R, z=0)\equiv(R,\phi'=-\frac{\pi}{2},z=0)$ at time $t=0$, where it is split by a beam splitter into two packets , each packet then circulating around the solenoid in a half circle (in the $z=0$ plane) of radius $R$ with speed $u$ (Fig. \ref{Fig.1}). We choose the discontinuity in angle at $-\frac{\pi}{2}\leftrightarrow\frac{3\pi}{2}$. The right-side packet goes counterclockwise with angle $\phi'(t)=-\frac{\pi}{2}+\frac{ut}{R}$,
the left-side packet goes clockwise with angle $\phi'(t)=\frac{3\pi}{2}-\frac{ut}{R}$, with the packets meeting again after time $T=\frac{\pi R}{u}$ at $(R, \pi/2, z=0)$ at a second beam splitter, from which the sum of packets emerges from one side and the difference from the other side.
\begin{figure}
\caption{The electron packet trajectories in the $z=0$ plane and a cross-section of the rotating cylindrical shells. The "splitter" boxes do not represent simply
beam splitters but whatever "optics" is necessary to
execute the electron behaviors explained in the text.}
\label{Fig.1}
\end{figure}
\begin{figure}
\caption{ Illustrating one of the two superimposed cylindrical shells comprising the solenoid, a hollow ring within the shell containing charged pieces, and the wave packet of a piece.}
\label{Fig.2}
\end{figure}
We model the solenoid as comprised of two concentric cylindrical shells of nearly the same radius (which, for simplicity, we shall consider as superimposed).
Each cylinder consists of a stack of, say square cross-sectioned, hollow rings (see Fig.\ref{Fig.2}), the center of each ring at $x=0, y=0, z$, with the rings extending from $z=-L/2$ to $z=L/2$. Each ring is of radius
$a$, height characterized by coordinate $z$. Each ring contains charged pieces, each of mass $m$ and charge $\pm q$, moving with speed $v_{0}$, each of localized extent and well-defined momentum so that our previous results apply.
As the electron moves in, its Coulomb field alters the speed of these of these pieces as well as their density, but the current which is their product is
unchanged\footnote{
Suppose the electron wave packet moves in adiabatically from infinity to $x=0, y=-R, z=0$. Then,
one can neglect the vector potential of the electron as well as neglect $\dot {\bf E}$. This leaves the electric field as the gradient of the scalar potential. It then follows from Maxwell's equation
$\dot {\bf B}=-c{\bf\nabla}\times {\bf E}=0$ that the magnetic field is unchanged. Thus, we see from $ {\bf J}=(c/4\pi)\bf{\nabla}\times \bf{B}$ that $\dot {\bf J}=0$ so, the current is unchanged, $J=J_{0}$. Thus, the current associated to the $n$th piece $J_{0}=\sigma_{n}v_{n}$ is unchanged although both surface charge density $\sigma_{n}$
and velocity $v_{n}$ have both been changed from $\sigma_{0}$ and $v_{0}$. One may further show that $v_{n}(\phi)\approx v_{0}+\frac{qeaR\sin\phi}{mv_{0}D^{3}}.$
Thus, we may take the particle densities and velocities as having the constant values $\sigma_{0}$ and $v_{0}$
since the effect of the correction term $\sim q$ upon the phase shift is $\sim q^{2}$, which may be ignored.}.
The charge may be written as
\begin{equation}\label{64}
q=dzd\phi Q/2\pi L,
\end{equation}
\noindent where $\phi$ is the angular coordinate of a piece, $Q$ ($M$) is the magnitude of the total charge (total mass) in one cylindrical shell, and $dz$ is the height of a ring and $ad\phi$ is the length of each piece. The charge to mass ratio is $q/m=Q/M$ for each piece.
The positively charged pieces move counterclockwise, the negatively charged pieces move clockwise, with uniform surface charge density $\sigma_{0}=Q/2\pi aL$, so there is
linear current density $J_{0}=Qv_{0}/2\pi aL$ in each ring. Thus, the magnetic field due to the two cylinders is
\begin{equation}\label{65}
B_{0}= 2\frac{4\pi}{c}J_{0}=4\frac{v_{0}}{c}\frac{Q}{ a L}
\end{equation}
\subsection{Magnitude of Interference Term}
One expects the magnitude to differ negligibly from 1.
For the electron, in either the magnetic or the electric A-B effect, when the packets recombine at time $T$, they are brought by the beam splitter to have the same position
and momentum,
$x_{ecl}^{1}(T)=x_{ecl}^{2}(T), p_{ecl}^{1}(T)=p_{ecl}^{2}(T)$, which we have seen implies that the electron contributes a factor 1 to the magnitude.
For the electric A-B effect, as discussed in Section VII and Appendix D, the capacitor plate dynamics is that of a single particle
(i.e., the plate wave function describes its center of mass behavior) and, when
the electron is brought to interfere, the two plates then overlap almost completely in position and momentum, and so their contribution to the magnitude is $\approx 1$.
The solenoid in the magnetic A-B effect requires a lot more discussion. We see from (\ref{63}) that there is a dependence upon both position and momentum for each piece.
Qualitatively, the A-B shift is shared among a large number of pieces (as we shall call the charged particles making up the solenoid). Each piece wave function
then has such small relative shift (for the electron packet's two traverses) in position and momentum that the magnitude of overlap is $\approx 1$.
In terms of deBroglie waves, this corresponds to the
need for the deBroglie waves to overlap spatially almost completely, to shift very slightly, and to have nearly the same frequency to get maximal
interference.
In our model, suppose each ring of radius $a$ contains $n_{a}$
pieces of wave packet
size $\sigma$ moving clockwise with speed $v_{0}$, each with $n_{e}$ electrons. Each ring also contains
$n_{a}$ similar sized pieces moving counterclockwise with speed $v_{0}$, each with $n_{e}$ ``positive electrons." We note that $\sigma=2\pi a/n_{a}$, and the mass of a piece is
$m=n_{e}m_{e}$, where $m_{e}$ is the mass of an electron. The solenoid is of length $L$ and
contains $n_{L}\equiv L/\sigma$ rings. The number of pieces is $N=2n_{L}n_{a}$.
We shall give here an example, to illustrate how, for reasonable values of the parameters, the sum of all the exponents in (\ref{63}) corresponding to all $N$ pieces in the solenoid can be negligibly small.
Say $a$ is of order 1cm, $R$ is of order 10 cm, $L$ of order 100cm, $v_{0}$ is of order 1cm/s (a typical drift velocity of electrons in a conductor), $u$ is of order 1m/sec.
In terms of the total number of positive or negative electron charges, $N_{e}=n_{e}n_{a}n_{L}$, the A-B shift is $\Phi_{AB}=4\pi N_{e}\frac{e^{2}}{\hbar c}\frac{v_{0}}{c}\frac{a}{L}$ (see Eq.(\ref{67}), below). Say the setup gives a shift $\Phi_{AB}=\pi$. Then, $N_{e}\approx 10^{14}$ electrons.
The strongest constraint on $n_{a}$ is the condition for our model that $\lambda\equiv h/mv_{0}<<\sigma$, i.e., that a solenoid piece wave packet contain many wavelengths so that its momentum is well defined. Using $m=n_{e}m_{e}=\frac{N_{e}}{n_{a}n_{L}}m_{e}=\frac{N_{e}}{n_{a}}\frac{L}{\sigma}m_{e}$ and $\sigma=2\pi a/n_{a}$, this constraint becomes
$n_{a}^{3}<<N_{e}\frac{m_{e}v_{0}a}{\hbar}\frac{2\pi a}{L}\approx 6\times10^{12}$, which we satisfy by choosing $n_{a}\approx 1000$ pieces per ring.
This determines $m$ and so makes $\lambda\approx 10^{-6}$cm and determines $\sigma\approx 6\times 10^{-3}$cm, so there are $\approx 6000$ wavelengths in a wavepacket. Also, then there are $n_{e}\approx 6\times 10^{6}$ electrons per piece, $n_{L}\approx1.5\times 10^{4}$ rings in the solenoid and $n_{p}=1.5\times 10^{7}$ pieces in the solenoid.
For this estimate, we shall treat each piece of the solenoid as participating equally in the phase shift, even though the pieces in the ring in the plane of the electron's motion, being closer to the electron, can receive a larger force and therefore displacement and, also, pieces in the same ring are displaced differently.
Since the A-B shift $\Phi_{AB}=\pi$ corresponds to a total shift of all pieces by a distance $\lambda/2\approx 5\times 10^{-7}$cm, each
individual piece shifts by a distance $\lambda/2n_{p}\approx 3\times 10^{-14}$cm. The squared fractional displacement
appearing in the exponent of the first term in (\ref{63}) for one piece is therefore $\frac{(x_{cl}^{1}(T)-x_{cl}^{2}(T))^{2}}{8\sigma^{2}}\approx 3\times 10^{-24}$, and for all $n_{p}=1.5\times 10^{7}$ pieces the spatial displacement total exponent is $\approx 10^{-16}$.
In order to achieve the displacement of $\approx 3\times 10^{-14}$cm in the time $T=\pi R/u\approx 0.3$sec it takes the electron packets to complete their traverse,
the relative speed change of the pieces is $\Delta v \approx 10^{-13}$cm/s. Therefore, since $\sigma(p_{cl}^{1}(T)-p_{cl}^{2})(T))/\hbar\approx \sigma m \Delta v/\hbar$, using
$m\approx n_{e}m_{e}$, we have $\sigma(p_{cl}^{1}(T)-p_{cl}^{2})(T))/\hbar\approx \times 10^{-8}$. Thus for one piece, the momentum contribution to the
exponent in
(\ref{63}) is therefore $\approx 0.5\times 10^{-16}$, and for all $n_{p}$ pieces the momentum-dependent total exponent is $\approx 10^{-9}$.
We conclude that the magnitude in (\ref{63}) is 1 to high accuracy.
\subsection{Direct Calculation of Phase Shift}
\begin{figure}
\caption{Illustration of the notation.}
\label{Fig.3}
\end{figure}
The phase shift of the electron moving in the vector potential of the solenoid, the well-known calculation of the A-B phase shift, is
\begin{equation}\label{66}
\Phi_{AB}=\Phi_{e}^{A}-\Phi_{e}^{B}=\frac{1}{c}\int_{0}^{T} dt eu A(R)-\frac{1}{c}\int_{0}^{T} dt e(-u)(- A(R))=2T \frac{e}{c}u \frac{a^{2}B_{0}}{2R}= \frac{e}{c}\pi a^{2}B_{0}. ,
\end{equation}
\noindent where $A(R)=\frac{a^{2}B_{0}}{2R}$ is magnitude of the solenoid's vector potential which is parallel to the electron velocity for trajectory $A$ and
and antiparallel for trajectory $B$, and we have used $uT=\pi R$. (In the previous section's discussion of the magnitude of the interference term, we have used the expression
\begin{equation}\label{67}
\Phi_{AB}= \frac{e}{c}\pi a^{2}B_{0}=\frac{e}{c}\pi a^{2}\Big[4\frac{v_{0}}{c}\frac{N_{e}e}{ a L} \Big]\frac{1}{\hbar},
\end{equation}
\noindent where $B_{0}$ comes from (\ref{65}), $Q=N_{e}e$ and the correct factor of $\hbar$ has been inserted. )
Now we explicitly calculate the phase contribution of the pieces of the solenoid due to the field of the electron.
We shall do this in both the Lorenz gauge
and the Coulomb gauge.
At time $t$, the electron is at the location $(R,\phi'(t),0)$, where $\phi'(t)=-\pi/2+ut/R$, and the solenoid piece is at $(a,\phi_{n}(t),z_{n})$
where $\phi_{n}(t)=\phi_{n}(0)+v_{0}t/a+o(q)$. We identify each piece, formerly labeled by the index $n$, by its initial angular value $\phi_{n}(0)$ and its
$z=z_{n}$ value. According to (\ref{32}), the vector potential caused by the electron at the
location of the piece in the solenoid at time $t$ is
\begin{subequations}
\begin{eqnarray}
{\bf A}(\phi_{n}(0), z_{n},t)&=&\frac{e}{ c}\frac{{\bf u}(t)}{|{\bf r}-{\bf R}|}, \label{68a}\\
{\bf A}(\phi_{n}(0), z_{n},t)&=&\frac{e}{ 2c}\frac{{\bf u}(t)}{|{\bf r}-{\bf R}|}+
\frac{e}{ 2c}[{\bf r}-{\bf R}]\frac{{\bf u}(t)\cdot[{\bf r}-{\bf R}]}{|{\bf r}-{\bf R}|^{3}}, \label{68b}
\end{eqnarray}
\end{subequations}
\noindent in the Lorenz and Coulomb gauges respectively. Here, ${\bf r}={\bf r}(a,\phi_{n}(t),z_{n}), {\bf R}={\bf R}(R,\phi'(t),0)$.
First we treat the Lorenz gauge. We consider the counter-clockwise traverse of the electron (path A) and the phase it gives to the positively charged pieces of the solenoid.
The contribution to the phase for the $n$th piece is
\begin{eqnarray}\label{69}
\Phi_{n}^{A}(z_{n},\phi_{n}(0), T)&=&\int_{0}^{T}dt \frac{q}{c}v_{0}\hat \phi(t)\cdot{\bf A}(z,t)\nonumber\\
&=& \frac{v_{0}equ}{c^{2}}\int_{0}^{T}dt \frac{\cos(\phi_{0}+\frac{v_{0}t}{a} +\pi/2-\frac{ut}{R})}{\sqrt{R^{2}+z^{2}+a^{2}-2aR\cos(\phi_{0}+\frac{v_{0}t}{a} +\pi/2-\frac{ut}{R})}}
\end{eqnarray}
\noindent where we have used $\hat\phi(t)\cdot{\bf u}(t)=u\hat\phi(t)\cdot\hat\phi'(t)=u\cos(\phi(t)-\phi'(t))$ and ${\bf r}\cdot {\bf R}=aR\cos(\phi(t)-\phi'(t))$.
We now use (\ref{64}) to write $q=dzd\phi_{0}Q/2\pi L$ and sum over all pieces, i.e., integrate over $\phi_{0}$ and $z$. Since $\phi_{0}$
is integrated over a $2\pi$ range, the argument of the cosine can be replaced by an angle $\theta$ integrated over that range. Then, we assume that
the solenoid length $L>>a, R$, which simplifies the integral over $z$
\begin{eqnarray}\label{70}
\Phi_{sol}^{A}&=& \frac{v_{0}Qeu}{2\pi Lc^{2}}\int_{0}^{T}dt\int_{0}^{2\pi}d\theta\int_{-L/2}^{L/2}dz
\frac{\cos\theta}{\sqrt{R^{2}+z^{2}+a^{2}-2aR\cos\theta}} \nonumber\\
&\approx& \frac{v_{0}QeuT}{2\pi Lc^{2}}\int_{0}^{2\pi}d\theta \cos\theta \Big[2 \ln L-\ln[R^{2}+a^{2}-2aR\cos\theta]\Big] \hbox{ and, integrating by parts,}
\nonumber\\
&=& \frac{v_{0}QeuT}{2\pi Lc^{2}}\int_{0}^{2\pi}\sin\theta d\ln[R^{2}+a^{2}-2aR\cos\theta] = \frac{v_{0}QeuT}{2\pi Lc^{2}}2aR\int_{0}^{2\pi}d\theta \frac{\sin^{2}\theta}{R^{2}+a^{2}-2aR\cos\theta} \nonumber\\
&=& \frac{v_{0}QeuT}{2\pi Lc^{2}}2aR\frac{1}{R^{2}}\int_{0}^{2\pi}d\theta \frac{\sin^{2}\theta}{1+(a/R)^{2}-2a/R\cos\theta}\nonumber\\
&=&\frac{v_{0}QeuT}{2\pi Lc^{2}}2aR\frac{1}{R^{2}}\pi=\frac{e}{4c}\pi a^{2}B_{0}
\end{eqnarray}
\noindent where the last integral over $\theta$ is $\pi$ if $a/R<1$, which it is, and we have used $uT=\pi R$ and the expression (\ref{65}) for $B_{0}$.
The result (\ref{70}) is $1/4 $ the A-B shift (\ref{66}). The negatively charged rings give the same result ($Q\rightarrow-Q, v_{0}\rightarrow-v_{0}$), so traverse A
gives $1/2 $ the A-B shift. The counterclockwise traverse also gives $1/2 $ the A-B shift ($u\rightarrow -u$, and the $B$ traverse phase is subtracted). Thus
we get the full A-B shift.
For the Coulomb gauge, the first term in the vector potential expression (\ref{68b}) is 1/2 the Lorenz gauge potential (\ref{68a}), so this term gives 1/8 the A-B shift.
Using $\hat\phi(t)\cdot[{\bf r}-{\bf R}]=- \hat\phi(t)\cdot{\bf R}=R \sin(\phi(t)-\phi'(t))$ and ${\bf u}(t)\cdot[{\bf r}-{\bf R}]= {\bf u}(t)\cdot{\bf r}=ua \sin(\phi(t)-\phi'(t))$, the second term in (\ref{68b}) gives the shift, for large $L$:
\begin{eqnarray}\label{71}
\Phi_{sol}^{A}&=&\frac{1}{2} \frac{v_{0}QeuaR}{2\pi Lc^{2}}\int_{0}^{T}dt\int_{0}^{2\pi}d\theta\int_{-L/2}^{L/2}dz
\frac{\sin^{2}\theta}{[R^{2}+z^{2}+a^{2}-2aR\cos\theta]^{3/2}} \nonumber\\
&\approx& \frac{v_{0}QeuTaR}{4\pi Lc^{2}}\int_{0}^{2\pi}d\theta \frac{2\sin^{2}\theta}{R^{2}+a^{2}-2aR\cos\theta}
\end{eqnarray}
\noindent This is 1/2 the result given by the last term on the third line in Eq.(\ref{70}), i.e., it is 1/8 of the A-B shift, so both vector potential terms combined in the Coulomb gauge give
1/4 the A-B phase shift.
\subsection{A time-average approach to understanding the contribution of a solenoid piece to the phase shift.}
Here is a simple, intuitively appealing, approach which gives the result (\ref{70}).
Suppose each piece of the solenoid moves slowly compared to the speed of the electron over the interval $T$. Then, \ one might consider the approximation of replacing the vector potential
created by the electron at the site of a piece, during the piece's traverse along its own path, by the time-averaged potential it experiences.
This averaging technique will also apply even if the speed of a solenoid pieces is not slow compared to the electron speed, as in the prior section. Because one piece of the solenoid replaces another during their motion, a piece will always be present at a given place in the solenoid and thus that place can be considered to experience an average vector potential and we can add the contribution from all the places.
This approach starts with the following Hamiltonian for a piece:
\begin{equation}\label{72}
H=\frac{1}{2m}[\hat p-\frac{q}{c}\langle A\rangle_{T}]^{2}\hbox{ where } \langle A\rangle_{T}\equiv\frac{1}{T}\int_{0}^{T}dtA(t).
\end{equation}
\noindent where $\langle A\rangle_{T}\equiv\frac{1}{T}\int_{0}^{T}dtA(t)$ and $A(t)$ is the component of the electron's vector potential along the path of the solenoid piece.
Because $\langle A\rangle_{T}$ is a constant, it immediately follows (as is usual when treating the phase contributed by the electron's motion) that the phase contributed by the piece is the path integral of the vector potential:
\begin{equation}\label{73}
\hbox{Phase}(T)=\int_{path}ds\frac{q}{c}\langle A\rangle_{T}=v_{0}T\frac{q}{c}\langle A\rangle_{T}.
\end{equation}
\noindent But, this is precisely the expression given in (\ref{40}) for the phase contributed by the motion of a piece which, when summed over all positively charged pieces as in the previous section, gives 1/4 the A-B phase.
Including, in a similar way, the contribution of the negatively charged pieces and the contribution of the left traverse, gives the full A-B shift.
Thus, the phase shift can be viewed as due to the action of the \textit{time-averaged} electric-field-producing vector potential
of the electron acting on the charged pieces of the solenoid as they travel their short paths, the small arcs of the ring in which they are constrained to move.
A more detailed analysis from this point of view appears in Appendix C.
\subsection{Electric Force in the Magnetic A-B Effect.}
We have shown that the standard A-B calculation and the solenoid-involved calculation are two alternative ways of calculating the same thing.
It is remarkable how conceptually dissimilar they are. In the usual case, there is the situation of
a non-field-producing potential causing the phase shift. We would like to point out in this subsection, in the other case, although we have expressed the phase shift in terms of the vector potential, that it can be
looked at as due to the electric force of the electron on the solenoid pieces, the view that Vaidman took in his semi-classical calculation.\cite{Vaidman}.
It may be noted that Vaidman reasoned the
existence of the electric force on the solenoid using
the Faraday law. Faraday's law encompasses two
distinct effects (see e.g. reference 8), one dealing
with the EMF generated in a conductor traveling
through a magnetic field, the other dealing with the
EMF generated when there is a changing magnetic
field inside a closed path. Vaidman's analysis uses
the latter. He reasoned that, when the electron
accelerates from rest and begins its traverse, it
produces a changing magnetic flux in the solenoid.
Thus, he obtained the electric field induced in the
solenoid (since its integral over the solenoid
circumference is the EMF), whose force changes the
speed of the charged solenoid cylinders. This expression of the second Faraday law effect is equivalent to noting that the accelerating electron causes a time-changing vector potential, whose (negative) time derivative is this
same electric field.
Consider for example the effect of the electron's vector potential, on the positively charged solenoid pieces, during its right-traverse. We shall suppose that the electron starts from rest
so that its initial vector potential is 0 (we ignore the solenoid's self-vector potential). We take the time dependence of the electron's vector potential at the site of each solenoid piece
to be just due to the electron's motion and not the piece's motion, i.e., ${\bf A}({\bf r}_{n},t)$ at the site ${\bf r}_{n}$. This is
because the replacement of solenoid pieces by successive pieces means we need only label the pieces' locations and ignore their translation. Then, the phase associated to these solenoid pieces may be written as
\begin{eqnarray}\label{74}
\Phi^{A}&=&\int_{0}^{T}dt \sum_{n}\frac{q}{c}{\bf v}_{n}\cdot {\bf A}({\bf r}_{n},t)\nonumber\\
&=&-\sum_{n}\int_{0}^{T}d{\bf x}_{n}(t) \cdot\int_{0}^{t}dt'\Big[- \frac{\partial}{\partial t'} \frac{q}{c}{\bf A}({\bf r}_{n},t')\Big]=-\sum_{n}\int_{0}^{T}d{\bf x}_{n}(t) \cdot \int_{0}^{t}dt'{\bf F}_{n}({\bf r}_{n},t').
\end{eqnarray}
\noindent We have written ${\bf v}_{n}dt=d{\bf x}_{n}$ as the displacement of the $n$th piece during $dt$. We have noted that the bracketed expression is the electric force ${\bf F}_{n}({\bf r}_{i},t')$
exerted by the electron on the $n$th particle
in the interval $dt'$.
Thus, this phase shift contribution has been expressed in terms of $dt'{\bf F}_{n}({\bf r}_{n},t')=dp_{n}(t')$, the impulse exerted during $dt'$ by the electron on the solenoid particles.
If we consider that the electron rapidly accelerates to speed $u$ at the beginning of its trajectory, since the vector potential is proportional to $u(t)$,
there is a large initial impulse (followed, as the electron continues its traverse, by a force on each solenoid piece which averages out over all pieces to 0),
causing a sudden initial change of the momentum $\Delta {\bf p}_{n}$ of the positively charged pieces (and the opposite change in momentum for the negatively charged pieces), so
$\Phi^{A}=-\sum_{n}\int_{0}^{T}d{\bf x}_{n}(t) \cdot\Delta {\bf p}_{n}$.
This impulse-induced differential of momentum, occurring oppositely for the electron's left-traverse, causes the phase shift to accumulate over the interval $T$, which was the point of view taken by Vaidman in his semi-classical calculation. We can see that here by writing $\Delta p_{n}=mv_{n}-mv_{0}$, which is constant and close to parallel to
the piece displacement $\int_{0}^{T}d{\bf x}_{n}\equiv \Delta \sigma_{n}$ over time $T$. Tossing out the term $mv_{0}$ which does not contribute to the A-B shift as it is the same for left and right traverses of the electron, we have for the effective $\Phi^{A}=-\sum_{n}\Delta \sigma_{n}mv_{n}=\sum_{n}\Delta \sigma_{n}/\lambda_{n}$, where $\lambda_{n}$ is the deBroglie
wavelength for the piece.
\section{Application to the Electric A-B Effect.}
\begin{figure}
\caption{ Top: Setup for electric A-B effect, when the plates are together, before and after the electron nears the center of the plates. Bottom:
Illustrating the plates with separation $\approx D$ while the electron moves for a time interval T. (Figures are not to scale, the plates and tubes should be greatly stretched in the $\pm x$-direction.)}
\label{Fig.4}
\end{figure}
Lastly, we turn to discuss the electric A-B effect. This has precisely the same dichotomy as the magnetic A-B effect, two different ways of viewing and calculating the
A-B phase shift. However, here it can be shown more simply (because the Coulomb interaction is a direct particle interaction, not an interaction mediated by an intermediate field as is
the interaction through the vector potential) precisely how this involves two alternative views of the identically same calculation.
Using a simplified model, where the
electron sits in a zero-field environment halfway between two identical point charges, again by a semi-classical argument, Vaidman\cite{Vaidman} showed that, while the A-B shift is obtainable as due to the electron's motion in the field of the charges, it can also be obtained as due to the motion of the charges in the electric field of the electron.
Instead of Vaidman's simplified model, we shall apply our considerations to the usually discussed electric A-B effect setup,
where electron packets passing over and under a charged capacitor are brought together to interfere. According to the usual view, the phase shift
$\Phi_{AB}=-e\sigma DT$ (e is the charge on the electron,
$\sigma$ is the surface charge on the plates, $D$ is the plate separation and $T$ is the time of traverse of the electron---in this section we use rationalized MKS units) is due to the quantized electron moving in the constant potential of the classical capacitor. We shall show that the same phase shift is obtained by consideration of the quantized capacitor plates moving in the electric field of the classical electron.
However, we do more. We consider what happens when both electron and plates are quantized. The Hamiltonian is separable, the wave function is the product of wave functions for the electron and plates, and we show how the phase shift of this product wave function, the A-B electric phase shift, can be viewed as either due to the electron motion or due to
the capacitor plate motion, alternative ways of calculating the same thing.
\subsection{Model}
We consider two very large plates containing charges ``glued down," with uniform charge densities $\pm\sigma$ and area $A$, lying initially on top of one another in the $x-y$ plane at $z=0$ (see Fig.(\ref{Fig.4})).
There are two long hollow frictionless tubes parallel to the
$x$-axis, one at $y=0, z=d$, the other at $y=0, z=-d$, with the entrance and exit of each tube at the plate edges. From the left, an electron approaches a beam spitter
which creates two wave packets of equal amplitude., following which
two mirrors direct the packets into the tubes.
After the packets have travelled well away from the capacitor edges, the two plates are separated, rapidly moved by an external field (over a time interval much shorter than $T$) so that the positive and negative plates
are left at rest at $\pm D/2$ at time $t=0$. The plates are free to move thereafter for a time interval $T$, but it is assumed that they are so massive that their displacement is relatively small compared to $D$ over that interval.
At time $T$, while the electron is still far from the capacitor edge, the plates are rapidly returned to the plane $z\approx 0$ by an external field. (While the plates acquire a phase shift during their separation and rejoining motions, we may take the time scale to be brief enough that it may be neglected compared to the phase shift acquired during the interval $T$.) Eventually the electron reaches the edge of the plates and, by means of two more mirrors reflecting the packets toward each other and another beam splitter,
the packets emerge with their sum in one direction and their difference in another direction, each direction containing a detector.
The Hamiltonian over the time interval $(0, T)$ for electron passage above ($H_{+}$) or below ($H_{-}$) the capacitor is
\begin{equation}\label{75}
H_{\pm}=\frac{P_{e}^{2}}{2m}+\frac{P_{U}^{2}}{2M}+\frac{P_{L}^{2}}{2M}+\frac{1}{2}[\sigma^{2}A\pm e\sigma](Z_{U}-Z_{L}).
\end{equation}
\noindent Here $(X_{e}, P_{e})$ , $(Z_{U}, P_{U})$ and $(Z_{L}, P_{L})$ are the conjugate operators for the electron motion in the $x$-direction, upper plate and lower plate motion in the $z-$direction respectively, and $A$ is the plate area. This gives the force on each capacitor plate as due to the other plate and the electron and, of course, no force on the electron.
\subsection{Calculations.}
One can take the point of view that the capacitor is a classical object, and only the electron should be quantized. Then, one may truncate this Hamiltonian by throwing away the plate
potential energy and kinetic energy operator terms and setting $Z_{U}=D/2, Z_{L}=-D/2$, their classical positions to excellent accuracy. The solution of
Schr\"odinger's equation with the Hamiltonian
\begin{equation}\label{76}
H_{\pm electron}=\frac{P_{e}^{2}}{2m}+\frac{1}{2}[\pm e\sigma]D
\end{equation}
\noindent is easily seen to have the form
\begin{equation}\label{77}
\psi_{\pm}(x,T)=\psi'(x,T)e^{ -i\frac{1}{2}[\pm e\sigma]DT},
\end{equation}
\noindent where $\psi'(x,T)$ is a free-particle wave function (this is, of course, what is obtained from Eq.(\ref{8}) or Eqs.(\ref{13}),(\ref{30}) applied to this problem). $\psi_{+}(x,T)$ ($\psi_{-}(x,T)$ )
is the wave function for the electron packet which passes above (below) the capacitor plates. By inspection, the electric A-B phase shift, the difference of phases for the two traverses,
is seen to be $-e\sigma DT$
However, one may rewrite the Hamiltonian (\ref{75}) as (omitting the self-energy of the capacitor since it makes the same contribution to the phase for the upper and lower electron traverses),
\begin{equation}\label{78}
H_{\pm}=\frac{P_{e}^{2}}{2m}+\Bigg[\frac{P_{U}^{2}}{2M}\pm \frac{1}{2}e\sigma Z_{U}\Bigg] + \Bigg[\frac{P_{L}^{2}}{2M}\mp \frac{1}{2}e\sigma Z_{L}\Bigg].
\end{equation}
In this form, the wave function is the product of the free electron wave function, associated to no phase shift, and the upper and lower plate wave functions which
do have associated phase shifts. We use our solution Eq.(\ref{8}) in Section II (or Eqs.(\ref{13}),(\ref{30}) in Section III) of the problem of finding the wave function for one-dimensional motion in an electric potential to obtain the wave function in terms of the classical variables:
\begin{equation}\label{79}
\psi_{\pm}(x, z_{U}, z_{L}, T)=\psi_{el}(x)Ne^{-\frac{(z_{U}-D/2)^{2}}{4\sigma'^{2}}}e^{\mp i \frac{1}{2}e\sigma T[z_{U}-D/2]}e^{\mp i\frac{1}{4}eD\sigma T}\cdot
e^{-\frac{(z_{L}+D/2)^{2}}{4\sigma'^{2}}}e^{\pm i \frac{1}{2}e\sigma T[z_{L}+D/2]}e^{\mp i\frac{1}{4}eD\sigma T}.
\end{equation}
\noindent In obtaining (\ref{79}) we have used $p_{U, cl}(T)=-\frac{1}{2}e\sigma T, p_{L, cl}(T)=-\frac{1}{2}e\sigma T$ (the upper and lower plates move toward each other during T), $z_{U, cl}(T)\approx D/2, z_{L, cl}(T)\approx -D/2$, and e.g., for the upper traverse situation, $-\int_{0}^{T}dt'V(z_{U}(t'))=-\int_{0}^{T}dt'\frac{e\sigma}{2}\frac{D}{2}=-\frac{1}{4}eD\sigma T$.
Because of the narrowness of the gaussian wave packets in (\ref{79}), $ z_{U}\approx D/2, z_{L}\approx -D/2$, so it is only the last factor in each wave function expression that contributes to the phase shift. For the upper traverse, from $\psi_{+}$, the contribution of both plates is then $2\times(-\frac{1}{4}eD\sigma T)=-\frac{1}{2}eD\sigma T$. Adding the contribution of the lower traverse gives $4\times(-\frac{1}{4}eD\sigma T)=-eD\sigma T$.
It might seem from this solution that we have settled the question as to what causes the phase shift: here the phase shift is all due to the capacitor.
However, one can also write this Hamiltonian as
\begin{equation}\label{80}
H_{\pm}=\Bigg[\frac{P_{e}^{2}}{2m}\pm\frac{1}{2}e\sigma D\Bigg]+\Bigg[\frac{P_{U}^{2}}{2M} \pm\frac{1}{2}e\sigma \Big(Z_{U}-\frac{D}{2}\Big)\Bigg]+ \Bigg[\frac{P_{L}^{2}}{2M} \mp\frac{1}{2}e\sigma \Big(Z_{L}+\frac{D}{2}\Big)\Bigg].
\end{equation}
In this case, the phase shift belongs completely to the electron part of the wave function, and the capacitor plate part has no phase shift associated to it (the wave function for each plate is the wave function in (\ref{79}) with additional phase factors that exactly cancel the phase that arose from the dynamics).
Or, one could add and subtract a constant term in such a way that any fraction of the phase shift belongs to the electron wave function and the rest belongs to the capacitor.
It is clear that these are alternative ways of calculating the same phase shift, with no reason apparent here to prefer one over the other.
\subsection{Electric Force in the Electric A-B Effect}
In the example just discussed, the phase shift in both cases was calculated using potentials. In one case there is the electron in the non-field-producing potential of the capacitor. In the other case
there are the
capacitor plates in the electric field, of the electron.
The conceptual contrast, no force vs. force, would be even greater if the phase shift calculation for the capacitor plates was expressed, not
in terms of the potential, but in terms of the electron's electric force on the capacitor plates. This could not be done in the above example because the external force,
required to separate the plates and re-unite them, intrudes.
Accordingly, we consider a modified problem such that an external force does not act. Instead, we suppose that, at $t=0$, the plates which coincide at $z=0$ are suddenly given velocities $\pm v_{0}$ in the $z$-direction.
They then move, under their mutual forces and the force of the electron, to a maximum separation which we shall call $D_{\pm}$ ($D_{+}$ for the upper traverse, $D_{-}$
for the lower traverse), and fall back to $z=0$.
Taking $e$ to be positive for definiteness, then $D_{+}<D_{-}$. Therefore, for the upper traverse, the plates return to $z=0$ at time $T_{+}$, which is slightly earlier than the time of return $T_{-}$ for the lower electron traverse.
The plates encounter a decelerating potential at $z\approx 0$ designed to slow them down to a near stop and keep them close to each other (an idea introduced by Vaidman\cite{Vaidman} in his example of the electric A-B effect) until after the electron is finally detected. Since
the potentials are $\sim z$ and $z\approx 0$, there is negligible phase shift contribution thereafter. The width of the wave packets is chosen to be large enough that the positive plate wave packets from the two traverses finally overlap almost completely (similarly, of course for the negative plate wave packets),
which is necessary if there is to be maximal interference.
The calculation is governed by the same Hamiltonian we have already discussed, and the details of that calculation are in Appendix D. However, they are not essential to the point be made here. We illustrate it by considering
the phase contributed by the positively charged plate's motion in the electric field of the electron undergoing the upper traverse using Eq.(\ref{39}), namely,
\begin{eqnarray}\label{81}
\Phi&=&-\int_{0}^{T}dtV(z_{U}(t))=-\int_{0}^{T}dt\int_{0}^{t}dt'\frac{d}{dt'}V(z_{U}(t'))\nonumber\\
&=&-\int_{0}^{T}dt\int_{0}^{t}dt'\frac{dz_{U}(t')}{dt'}\frac{d}{dz_{U}(t')}V(z_{U}(t'))\nonumber\\
&=&\int_{0}^{T}dt\int_{0}^{t}dz_{U}(t')F(z_{U}(t')).
\end{eqnarray}
\noindent (we have utilized $V(0)=0$) where $F(z_{U}(t)$ is the force on the capacitor plate exerted by the electron. Thus, this phase contribution
has been expressed in terms of the work done by the electron on the upper plate.
\section{Concluding Remarks}
Here is a summary of the new results in this paper.
1) Exact solution for the magnitude and phase of a localized wave packet describing a particle moving in a vector and scalar potential corresponding to non-spatially varying forces.
2) Approximate solution for the magnitude and phase of a particle moving in an \textit{arbitrary} vector and scalar potential.
3) Approximate solution of the problem where both electron and solenoid are quantized, employing a variational technique, obtaining Schr\"odinger equations for electron and solenoid, showing how an \textit{extra phase} arises.
4) Application of 2) and 3) to an interference experiment, where one localized particle interacts with a second, or interacts with $N>1$ other localized particles, obtaining the magnitude and phase shift of the interference term.
5) A fully quantum mechanical verification of Vaidman's semi-classical argument that the electron acting on the quantized solenoid particles gives the magnetic A-B phase shift.
6) Answer to the vexing question of why this does not result in twice the usual magnetic A-B phase shift, the phase shift from the solenoid acting on the electron added to the equal phase shift from the electron acting on the solenoid. The answer lies in 2), 3), 4): for the problem of the jointly quantized electron and solenoid, the extra phase provides the negative of these phase shifts.
7) A time-average approach to understanding the contribution of a solenoid piece to the phase shift that gives simple intuition into the nature of the build up of the phase in the magnetic AB effect.
8) Treatment of the electric A-B effect, the exactly soluble problem of interference when a quantized electron interacts with quantized capacitor plates. Verification of Vaidman's semi-classical argument that the electric A-B phase shift arises from the electron exerting forces on quantized capacitor plates. Showing how an extra phase is responsible for providing the electric A-B phase shift, and not twice that value.
We have seen, in both effects, two conceptual pictures, that the A-B phase shift arises
from the electron moving in non-field producing potentials and that
the A-B phase shift arises from the electric force exerted by the
electron (on the solenoid particles or on the charged capacitor
plates) as was argued by Vaidman. As far as our examples are concerned,
there is no reason to prefer one of these conceptual pictures over the
other.
\appendix
\section{Wave function in Section II}\label{A}
In this appendix we do the algebra involved in obtaining the wave function for a charged object, moving on a one dimensional path, subject to a time-dependent electric field.
\subsection{Wave function in the momentum representation}
Putting the momentum space wave function (\ref{6}),
\begin{equation}\label{A1}
\psi(p,t)\sim e^{-(\sigma^{2}+it/2m) p^{2}+\beta(t) p+i\gamma(t)-\beta_{R}^{2}(t)/4\sigma^{2}},
\end{equation}
\noindent with initial conditions $\gamma(0)=0$ and $\beta(0)=2p_{0}\sigma^{2}-ix_{0}$ into the momentum space Schr\"odinger equation (\ref{4}), and equating coefficients of $p$ and 1 (the coefficient of $p^{2}$ agrees on both sides of the equation) yields:
\begin{eqnarray}\label{A2}
i\frac{d}{dt}\beta&=&-\frac{q}{mc} A-2iqV' \sigma^{2}+\frac{qt}{m} V' \nonumber\\
-\frac{d}{dt}\gamma&=&\frac{i}{2\sigma^{2}}\beta_{R}\frac{d}{dt}\beta_{R}+ \frac{q^{2}}{2mc^{2}}A^{2}+iqV'\beta +qg(t)\nonumber\\
&=&\frac{i\beta_{R}}{2\sigma^{2}}\Big[\frac{d}{dt}\beta_{R}+qV'2\sigma^{2}\Big]+\frac{q^{2}}{2mc^{2}}A^{2}-qV'\beta_{I} +qg(t)\nonumber\\
&=&\frac{q^{2}}{2mc^{2}}A^{2}-qV'\beta_{I} +qg(t)
\end{eqnarray}
\noindent where the bracketed term in the next to last equation vanishes because of the imaginary part of the first equation.
\noindent Solving for $\beta$, and putting in the initial conditions, results in
\begin{eqnarray}\label{A3}
\beta_{R}&=&2\sigma^{2}[p_{0}-qU]=2\sigma^{2}p_{cl}(t)\nonumber\\
\beta_{I}&=&-x_{0}+\frac{q}{m}[{\cal A}/c-W].
\end{eqnarray}
Now, note that
\begin{eqnarray}\label{A4}
x_{cl}(t)&=&x_{0}+\frac{p_{0}}{m}t-\frac{q}{mc}{\cal A}-\frac{q}{m}[tU(t)-W(t)], \nonumber\\
&=&\frac{t}{m}(p_{0}-qU)-\Big[-x_{0}+\frac{q}{m}({\cal A}/c-W)]=
\frac{t}{2m\sigma^{2}}\beta_{R}-\beta_{I},
\end{eqnarray}\label{A5}
\noindent from which follows
\begin{equation}\label{A5'}
\beta_{I}=-x_{cl}(t)+\frac{p_{cl}(t)t}{m}.
\end{equation}
In Eq.(\ref{A2}) for $\gamma$, dropping terms quadratic in the potentials leaves only
\begin{equation}\label{A6}
-\frac{d}{dt}\gamma=qx_{0}V'(t)+qg(t)=qV(t)-v_{0}tV'(t)\hbox{ or }\gamma=-q\int_{0}^{t}dt'V(t')+v_{0}W(t).
\end{equation}
\noindent where we have used $g(t)\equiv V(t)-(x_{0}+v_{0}t)V'(t)$.
\subsection{Wave function in the position representation}
The Fourier transform of (\ref{6}) is
\begin{eqnarray}\label{A7}
\psi(x,t) &\sim&\int dpe^{ipx}e^{-(\sigma^{2}+it/2m) p^{2}+\beta(t) p+i\gamma(t)-\beta_{R}^{2}(t)/4\sigma^{2}}\nonumber\\
&=&e^{\frac{(ix+\beta)^{2}}{4[\sigma^{2}+it/2m]}}e^{i\gamma}e^{-\beta_{R}^{2}(t)/4\sigma^{2}} \nonumber\\
&=&e^{\frac{[-(x+\beta_{I})^{2}+\beta_{R}^{2}+2i\beta_{R}(x+\beta_{I})][\sigma^{2}-it/2m]-(\beta_{R}^{2}/4\sigma^{2})4[\sigma^{4}+(t/2m)^{2}]}{4[\sigma^{4}+(t/2m)^{2}]}}e^{i\gamma} .
\end{eqnarray}
\noindent We shall assume that the mass of the object is large enough so
that there is negligible spreading of the wave packet over time $T$, so $\sigma^{4}+(t/2m)^{2}\approx \sigma^{4}$.
\subsubsection{Real part of exponent}
We have for the real part of the exponent:
\begin{eqnarray}\label{A8}
&&4\sigma^{4}\hbox{Re}\Bigg[[-(x+\beta_{I})^{2}+\beta_{R}^{2}+2i\beta_{R}(x+\beta_{I})][\sigma^{2}-it/2m]-(\beta_{R}^{2}/4\sigma^{2})4[\sigma^{4}+(t/2m)^{2}]\Bigg]\nonumber\\
&=& -(x+\beta_{I})^{2}\sigma^{2} + 2\beta_{R}(x+\beta_{I})t/2m -(\beta_{R}t/2m\sigma)^{2} \nonumber\\
&=&\sigma^{2}\Big[ -x^{2}+2x[\beta_{R}(t /2m\sigma^{2})-\beta_{I}] - [\beta_{R}(t /2m\sigma^{2})-\beta_{I}]^{2} \Big]\nonumber\\
&=&- \sigma^{2}[x-x_{cl}(t)]^{2}.
\end{eqnarray}
\noindent Thus, the mean position follows the classical trajectory.
\subsubsection{Imaginary part of exponent}
The imaginary part of the exponent, using expressions (\ref{A3}) and (\ref{A5}) for $\beta_{R}$ and $\beta_{I}$ (and omitting $\gamma$ for the moment), is:
\begin{eqnarray}\label{A9}
\hbox{Imag. Part}&=&[-(x+\beta_{I})^{2}+\beta_{R}^{2}](-t/2m\sigma^{2})+2\beta_{R}(x+\beta_{I})]/4\sigma^{2}\nonumber\\
&=&\frac{t}{8m\sigma^{2}}\frac{(x-x_{cl}(t)+p_{cl}(t)t/m)^{2}}{\sigma^{2}}-\frac{p_{cl}^{2}(t)t}{2m}
+p_{cl}(t)[x-x_{cl}(t)+p_{cl}(t)t/m]\nonumber\\
&=&\frac{t}{8m\sigma^{2}}\frac{(x-x_{cl}(t)+p_{cl}(t)t/m)^{2}}{\sigma^{2}}+\frac{p_{cl}^{2}(t)t}{2m}
+p_{cl}(t)(x-x_{cl}(t))
\end{eqnarray}
\noindent That the first term in (\ref{A9}) is negligibly smaller than the second two terms follows below, from the negligible spread condition $ t/m\sigma^{2}<<1$ and that the real part of the exponent makes $(x-x_{cl}(t))^{2}/\sigma^{2}$ be of order 1:
\begin{eqnarray} \label{A10}
\hbox{Imag. Part}&=&
\frac{t}{8m\sigma^{2}}\frac{(x-x_{cl}(t))^{2}}{\sigma^{2}}+\frac{p_{cl}^{2}(t)t}{2m}\Bigg[1+\Big[\frac{t}{2m\sigma^{2}}\Big]^{2}\Bigg]+p_{cl}(t)(x-x_{cl}(t))
\Bigg[1+\Big[\frac{t}{2m\sigma^{2}}\Big]^{2}\Bigg]\nonumber\\
&&\approx\frac{t}{8m\sigma^{2}}+\frac{p_{cl}^{2}(t)t}{2m}+p_{cl}(t)(x-x_{cl}(t))\nonumber\\
&&\approx\frac{p_{cl}^{2}(t)t}{2m}+p_{cl}(t)(x-x_{cl}(t)).
\end{eqnarray}
The last approximation arises since,
in order that the momentum of the wave packet be well defined, many oscillations of the wave function should lie within the packet width:
\begin{equation}\label{A11}
\frac{1}{8m\sigma^{2}}<<\frac{p_{cl}^{2}(t)}{2m}=\frac{k_{cl}^{2}}{2m}.
\end{equation}
Combining the exponents in (\ref{A6}), (\ref{A8}) and (\ref{A10}), Wwe have finally arrived at the wave packet expression:
\begin{equation}\label{A12}
\psi(x,t)=Ne^{-\frac{(x-x_{cl}(t))^{2}}{4\sigma^{2}}}e^{ip_{cl}(t)(x-x_{cl}(t))}e^{ip_{cl}^{2}(t)t/2m}e^{-iq\int_{0}^{t}dt'V(t')+iv_{0}W(t)}.
\end{equation}
\section{General Case Phase of Section III Reduces to Special Case Phase of Section II For Solely Time-Dependent Electric Field}\label{B}
If the force depends only upon time, i.e., if
$V'(x,t)=V'(t)$, $A(x,t)=A(t)$, the phase angle is given by Eq. (\ref{8}) in Section II:
\begin{equation}\label{B1}
\theta(x,t)=p_{cl}(t)[x-x_{cl}(t)]+\frac{t}{2m}p_{cl}^{2}(t)-q\int_{0}^{t}dt'V(t')+v_{0}qW(t),
\end{equation}
\noindent (neglecting some terms squared in the potentials (i.e., $\sim q^{2}$), which is the order we employ).
We wish to show here that the phase expression (\ref{30}) in Section III,
\begin{equation}\label{B2}
\theta(x,t)=p_{cl}(t)[x-x_{cl}(t)]+\int_{0}^{t}dt'\frac{1}{2m}p_{cl}^{2}(t')-q\int_{0}^{t}dt'V(x_{cl}(t'),t').
\end{equation}
\noindent for the general case of the electric field depending on position and time reduces to (\ref{B1}) when there is only time-dependence.
In that case,
\begin{eqnarray}\label{B3}
p_{cl}(t)&=&mv_{cl}(t)+\frac{q}{c}A(x_{cl}(t),t)\rightarrow mv_{cl}(t)+\frac{q}{c}A(t)=mv_{0}-U(t)\nonumber\\
V(x_{cl}(t'),t')&\rightarrow &V(t').
\end{eqnarray}
\noindent (The latter is the zeroth order term in the expansion of the scalar potential $V(x,t)$ in $x-x_{cl}(t)$) Then we have:
\begin{eqnarray}\label{B4}
\int_{0}^{t}dt'\frac{1}{2m}p_{cl}^{2}(t')&=&\int_{0}^{t}dt'\frac{1}{2m}[mv_{0}-qU(t')]^{2}=\frac{m}{2}v_{0}^{2}t-v_{0}q\int_{0}^{t}dt'U(t')\nonumber\\
&=&\frac{m}{2}v_{0}^{2}t-v_{0}q[tU(t)-W(t)]=\frac{1}{2m}p_{cl}^{2}(t)t+v_{0}qW(t).
\end{eqnarray}
\noindent When (\ref{B4}) is inserted into (\ref{B2}), we obtain (\ref{B1}).
\section{Calculations with explication involved in time average approach}\label{C}
In section VD, we argued that one gets the correct contribution to the phase associated to the motion of a piece, subjected to the vector potential of the electron, by
recognizing that, effectively, there is always a piece at angle $\phi$ that feels the time-averaged vector potential:
\begin{eqnarray}\label{C1}
\langle A\rangle_{T}&\equiv &\frac{1}{T}\int_{0}^{T}dtA(t)= \frac{1}{T}\int_{0}^{T}dt\frac{e}{cS}\hat{\boldsymbol\phi}\cdot{\bf u}\nonumber\\
&=&\frac{u}{\pi R}\int_{0}^{T}dt(\pm u) e\Big[\frac{\cos(\phi-\phi'(t))}{cD}+\frac{aR\cos^{2}(\phi-\phi'(t))}{cD^{3}}\Big]\nonumber\\
&=&\pm\frac{eu}{cD}\Big[\frac{2}{\pi}\cos\phi +\frac{aR}{2D^{2}}\Big]
\end{eqnarray}
\noindent where $S\equiv\sqrt{a^{2}+R^{2}+z^{2}-2aR\cos(\phi-\phi')}$ has been expanded to first order in $a$, $D\equiv \sqrt{R^{2}+z^{2}}$, $uT=\pi R$ has been employed, as has $d\phi'(t)=\pm\frac{u}{R}dt $ (+ for right traverse where
$\phi'(t)=\frac{u}{R}t-\frac{\pi}{2}$, - for left traverse where $\phi'(t)=-\frac{u}{R}t-\frac{\pi}{2}$).
Now, since the vector potential is constant along the path of the piece, and noting that the distance traveled by each piece during $T$ is
$v_{0}T=v_{0}\frac{\pi R}{u}$, the phase contributed by a piece at angle $\phi$ is given in Eq.(\ref{73}),
\begin{eqnarray}\label{C2}
\Lambda_{\pm}&=&\int_{path}ds\frac{q}{c}\langle A\rangle_{T}\nonumber\\
&=&\pm v_{0}\frac{eq\pi R}{c^{2}D}\Big[\frac{2}{\pi}\cos\phi +\frac{aR}{2D^{2}}\Big],
\end{eqnarray}
Integrating over all the pieces using $q$ in Eq.(\ref{64}) gives for the total phase for a path and a cylinder:
\begin{eqnarray}\label{C3}
\hbox{Phase}_{\pm}&=&Q\int_{0}^{2\pi}\frac{d\phi}{2\pi}\int_{-L/2}^{L/2}\frac{dz}{L}\Lambda_{\pm}\nonumber\\
&=&Q\int_{0}^{2\pi}\frac{d\phi}{2\pi}\int_{-L/2}^{L/2}\frac{dz}{L}\frac{1}{D^{3}}(\pm) v_{0}\frac{eq\pi a R^{2}}{2c^{2}}=\pm\frac{Qev_{0}a\pi}{Lc^{2}}.
\end{eqnarray}
The phase difference between the two paths gives twice the magnitude of this. Accounting for both cylinders requires another factor of 2, resulting in the standard A-B phase shift.
This result is the same as obtained in section VC, where the integral over angle was considered first, followed by the integrals over time: here the integrals are taken in reverse order.
\section{Analysis of the phase shift in the electric A-B effect for the setup discussed in Section VIC}\label{D}
To calculate the phase shift with the Hamiltonian (\ref{78}), we must find the classical motion of the electron and the capacitor plates.
The electron feels no force, and the plates feel a constant force with the potential energy $\frac{\sigma}{2}[\sigma A\pm e][z_{U}-z_{L}]$,
where $z_{U}, z_{L}$ are the upper and lower capacitor plate positions, The upper sign in $\pm$ refers to the electron's traverse above the capacitor (trajectory $A$),
the lower sign refers to the electron's traverse below the capacitor (trajectory $B$).
The solutions of the equations of motion are, for the electron and plates:
\begin{eqnarray}\label{D1}
v_{e}&=&u, \quad x_{e}=ut\nonumber\\
v_{U}&=&-v_{L}=v_{0}-\frac{\sigma}{2M}[\sigma A\pm e]t, \quad z_{U}=-z_{L}= v_{0}t-\frac{\sigma}{4M}[\sigma A\pm e]t^{2}.
\end{eqnarray}
\noindent
The largest separation of the plates for the two possible traverses, and the time it takes the plates to return to $z=0$, are
\begin{equation}\label{D2}
D_{\pm}= \frac{1}{4}v_{0}T_{\pm}, \quad T_{\pm}=\frac{4Mv_{0}}{\sigma[\sigma A\pm e]}.
\end{equation}
\noindent Taking $e$ to be positive for simplicity, then $T_{+}<T_{-}$. We shall take $e<<\sigma A$, so that time difference is small, but we shall see that nonetheless it has an important effect.
The phase shift is then
\begin{eqnarray}\label{D3}
\Phi^{A}-\Phi^{B}&\equiv&-\int_{0}^{T }dt[V^{A}(t)-V^{B}(t)]=-\int_{0}^{T_{+}}dt\frac{\sigma}{2}[\sigma A+ e][z_{U}(t)-z_{L}(t)]_{+}+\-\int_{0}^{T_{-}}dt\frac{\sigma}{2}[\sigma A+ e][z_{U}(t)-z_{L}(t)]_{-}\nonumber\\
&=&-\frac{\sigma e v_{0}}{6}[T_{-}^{2}+T_{+}^{2}]+\frac{\sigma^{2}Av_{0}}{6}[T_{-}^{2}-T_{+}^{2}]\nonumber\\
&\approx& -\frac{1}{3}\sigma e v_{0}{\bar T}^{2}+\frac{2}{3}\sigma e v_{0}{\bar T}^{2}=\frac{1}{3}\sigma e v_{0}{\bar T}^{2}.
\end{eqnarray}
\noindent where we have written ${\bar T}\equiv\frac{4Mv_{0}}{\sigma^{2}A}$ and the approximation is to drop terms $\sim e/\sigma A$ compared to 1.
The first term in the second line of (\ref{D3}) is the negative phase shift due to the plates moving in the potential of the electron. The second term in that line
is a positive phase shift due to one plate moving in the potential of the other. That phase shift would vanish if the plates spent the same amount of time on their
trajectories when the electron was above them as below them. However, that is not the case, due to the force of the electron on the plates.
Perhaps surprisingly, the
net phase shift is positive. This is because, although the time difference in travel is relatively small, the self-potential energy of the plates is relatively large compared to their potential energy
in the field of the electron, and the phase shift due to the larger potential energy wins out.
Again, as in section VIB, although it appears here that the phase shift is totally due to the motion of the plates under the force of the electron, one can add and subtract a term from the
Hamiltonian (\ref{78}), rewriting it as
\begin{equation}\label{D4}
H_{\pm}=\Bigg[\frac{P^{2}}{2m}+\frac{\sigma}{2}[\sigma A\pm e][z_{U}(t)-z_{L}(t)]_{\pm}\Bigg]+\Bigg[\frac{P_{U}^{2}}{2M} + \frac{\sigma}{2}[\sigma A\pm e][Z_{U}-z_{U}(t)]_{\pm}\Bigg]+
\Bigg[\frac{P_{L}^{2}}{2M} - \frac{\sigma}{2}[\sigma A\pm e][Z_{L}-z_{L}(t)]_{\pm}\Bigg].
\end{equation}
Now the electron part of the Hamiltonian has in it the time varying, spatially constant potential it sees, and this is completely responsible for the phase shift, while the capacitor plate part of the Hamiltonian
makes no contribution to the phase shift.
\acknowledgments{ We would like to thank Lev Vaidman for very helpful conversations. We are also very grateful to
an anonymous referee who insisted our previous assertion, that the interacting quantized electron and solenoid results in
twice the A-B phase shift, was wrong, leading us to greater effort, resulting in the resolution in this paper.}
\end{document}
|
\begin{document}
\newcommand{\mathbb{Z}}{\mathbb{Z}}
\newcommand{\mathbb{C}}{\mathbb{C}}
\newcommand{\mathbb{Q}}{\mathbb{Q}}
\renewcommand{\mathbb{A}}{\mathbb{A}}
\newcommand{\mathcal{D}}{\mathcal{D}}
\newcommand{\mathcal{O}}{\mathcal{O}}
\newcommand{\mathcal{A}}{\mathcal{A}}
\newcommand{\widetilde}{\widetilde}
\newcommand{\operatorname{Spec}}{\operatorname{Spec}}
\newcommand{\operatorname{PPDiv}}{\operatorname{PPDiv}}
\newcommand{\operatorname{star}}{\operatorname{star}}
\newcommand{\operatorname{Hom}}{\operatorname{Hom}}
\newcommand{{\prime\prime}}{{\prime\prime}}
\newcommand{\operatorname{eval}}{\operatorname{eval}}
\newcommand{\TV}[1]{\operatorname{TV}(#1)}
\newcommand{\dashrightarrow}{\dashrightarrow}
\newcommand{\isoto}{\overset{\sim}{\to}}
\newcommand{\operatorname{Pol}}{\operatorname{Pol}}
\newcommand{\operatorname{CaDiv}}{\operatorname{CaDiv}}
\newcommand{\operatorname{pos}}{\operatorname{pos}}
\newcommand{\operatorname{SL}}{\operatorname{SL}}
\newcommand{\operatorname{Mat}}{\operatorname{Mat}}
\newcommand{\operatorname{conv}}{\operatorname{conv}}
\newcommand{\operatorname{div}}{\operatorname{div}}
\newcommand{\ptxset}[2]{(#1,#2)}
\newcommand{\mins}[2]{{\min \langle #1, #2 \rangle}}
\title{Toroidal embeddings and polyhedral divisors}
\author[R. Vollmert]{Robert Vollmert}
\address{Fachbereich Mathematik und Informatik,
Freie Universit\"at Berlin,
Arnimalle 3,
14195 Berlin,
Germany}
\email{[email protected]}
\subjclass[2000]{Primary 14M25,
Secondary 14L30}
\begin{abstract}
Given an effective action of an $(n-1)$-dimensional torus on an
$n$-dimensional normal affine variety, Mumford constructs a
toroidal embedding, while Altmann and Hausen give a description in
terms of a polyhedral divisor on a curve. We compare the fan of the
toroidal embedding with this polyhedral divisor.
\end{abstract}
\maketitle
\section*{Introduction}
Suppose $X$ is an $n$-dimensional normal affine variety over the complex
numbers with an effective action by the $(n-1)$-dimensional torus $T$.
With $T \cong (\mathbb{C}^*)^{n-1}$, we associate the lattice $M \cong \mathbb{Z}^{n-1}$
of characters and the dual lattice $N = \operatorname{Hom}(M, \mathbb{Z})$ of
one-parameter subgroups. The action defines the weight cone $\omega$
in $M$ generated by the degrees of semi-invariant functions on $X$ and
the dual cone $\sigma$ in $N$. Effectivity of the action translates to
the fact that $\omega$ is full-dimensional and $\sigma$ is pointed.
\begin{notation}
A cone $\delta$ ``in'' a lattice $N$ is really a subset of the vector
space $N_\mathbb{Q} = N \otimes \mathbb{Q}$. The toric variety associated with this cone
will be denoted by $\TV\delta$.
\end{notation}
Our goal is to compare two sets of combinatorial data associated with
$X$. Mumford~\cite[Chapter 4, \S 1]{kkms} takes a rational quotient map
$p$ from $X$ to a complete nonsingular curve $C$. He defines $X^{\prime\prime}$
to be the
normalization of the graph of $p$ and shows that for certain open
subsets $U$ of $C$, we obtain a toroidal embedding $(U \times T,
X^{\prime\prime})$. This determines a combinatorial datum, namely the
toroidal fan $\Delta(X,U)$. It is a collection of cones in
\emph{different} lattices $\mathbb{Z} \times N$, one for each point $P \in C
\setminus U$, glued along their common face in $\ptxset{0}{N}$.
Altmann and Hausen~\cite{ppdiv} construct a divisor $\mathcal{D}$ with polyhedral
coefficients on a nonsingular curve $Y$; this divisor determines a
$T$-variety $\widetilde{X}$, affine over $Y$, which contracts to $X$. Here,
$\mathcal{D}$ is of the form $\Sigma_{P \in Y} \Delta_P \otimes P$, where
the $\Delta_P$ are polyhedra in $N_\mathbb{Q}$ with tail cone
$\sigma$, only finitely many nontrivial.
To compare these data, we note that the curve $Y$ is an open subset of
$C$, namely the image of the map $\pi \colon X^{\prime\prime} \to C$. In
fact, the varieties $\widetilde{X}$ and $X^{\prime\prime}$ agree, which allows us
to describe $\Delta(X,U)$ in terms of $\mathcal{D}$. Defining the
homogenization of a polyhedron $\Delta \subset N_\mathbb{Q}$ with tail
$\sigma$ to be the cone in $\mathbb{Z} \times N$ generated by
$\ptxset{1}{\Delta}$ and $\ptxset{0}{\sigma}$, we obtain the following
result.
\newcommand{\theoremtext}{
The toroidal fan $\Delta(X,U)$ is equal to the fan obtained by
gluing the homogenizations of the coefficient polyhedra $\Delta_P$
of points $P \in Y \setminus U$ along their common face
$\ptxset{0}{\sigma}$.
}
\begin{theoremmain}
\theoremtext
\end{theoremmain}
In Section~\ref{sec:toroidal}, we recall relevant facts about toroidal
embeddings and summarize the construction of the embedding $(U \times
T, X^{\prime\prime})$. Section~\ref{sec:polyhedral} contains some details
about polyhedral divisors on curves. Finally, we present the proof of
Theorem~1 in Section~\ref{sec:comparison}.
\section{Toroidal interpretation} \label{sec:toroidal}
\subsection*{Toroidal embeddings}
A toroidal embedding~\cite[Chapter 2]{kkms} is a pair $(U,X)$ of a
normal variety $X$ and an open subset $U \subset X$ such that for each
point $x \in X$, there exists a toric variety $(H,Z)$ with embedded
torus $H \subset Z$ which is locally formally isomorphic at some
point $z \in Z$ to $(U,X)$ at $x$. We will further assume that the
components $E_1,\dotsc,E_r$ of $X \setminus U$ are normal, i.e., that all
toroidal embeddings are ``without self-intersection''.
The components of the sets
$\cap_{i \in I} E_i \setminus \cup_{i \not\in I} E_i$
for all subsets $I \subset \{1,\dotsc,r\}$ give a stratification of
$X$. The \emph{star} of a stratum $Y$ is defined to be the union of
strata $Z$ with $Y \subset \overline{Z}$. Given a stratum $Y$, we
have the lattice $M_Y$ of Cartier divisors on the star of $Y$ with
support in the complement of $U$. The submonoid of effective divisors
is dual to a polyhedral cone $\sigma_Y$ in the dual lattice $N_Y$.
If $Z \subset \operatorname{star}(Y)$ is a stratum, its cone $\sigma_Z$ is a face of
$\sigma_Y$. The \emph{toroidal fan} of the embedding $(U,X)$ is the union of
the cones $\sigma_Y$ glued along common faces.
\begin{remark}
A toroidal fan differs from a conventional fan only in that it lacks a
global embedding into a lattice.
\end{remark}
Below, we will use the fact that an \'etale morphism $(U,\operatorname{star}(Y)) \to
(H,\TV{\delta})$ induces an isomorphism $\sigma_Y \isoto \delta$
of lattice cones.
\subsection*{Toroidal embeddings for torus actions}
We return to the $T$-variety $X$ and summarize Mumford's
description~\cite[Chapter 4, \S 1]{kkms}. There is a canonically
defined rational quotient map $p \colon X \dashrightarrow C$ to a
complete nonsingular curve $C$. Sufficiently small invariant open
sets $W \subset X$ split as $W \cong U \times T$ for some open set $U
\subset C$, where the first projection $U \times T \to U$ corresponds
to $p$. We will identify $U \times T$ with $W$.
We define $X^\prime$ to be the closure of the graph of the rational
map $p$ in $X \times C$, and $X^{\prime\prime}$ to be its normalization. The
action of $T$ on $X$ lifts to $X^{\prime\prime}$. We may consider $U \times
T$ as an open subset of $X^{\prime\prime}$; the projection to $U$ now
extends to a regular map $\pi \colon X^{\prime\prime} \to C$.
After possibly replacing $U$ by an open subset, we are in the
following situation: Let $P \in C \setminus U$ be a point in the
complement of $U$. The sets $U$, $U^\prime = U \cup \{P\}$ and
$\pi^{-1}(U^\prime)$ are affine with coordinate rings $R$, $R^\prime$
and $S$, respectively. We may regard $S$ as a subring of $R \otimes
\mathbb{C}[M]$ which is generated by homogeneous elements with respect to the
$M$-grading. Denoting by $s$ a local parameter at $P \in C$, the ring
$S$ is generated over $R^\prime$ by a finite number of monomials
$s^k\chi^u$.
The corresponding semigroup in $\mathbb{Z} \times M$ and its dual cone
$\delta_P$ in $\mathbb{Z} \times N$ define a toric variety
$Z = \TV{\delta_P}$. The monomial generators of $S$ define an \'etale map
$\pi^{-1}(U^\prime) \to Z$ which shows that the embedding $(U \times
T, \pi^{-1}(U^\prime))$ is toroidal with cone isomorphic to
$\delta_P$. By considering all points $P \in C \setminus U$, we see
that $(U \times T, X^{\prime\prime})$ is a toroidal embedding.
\begin{theoremcite}[{\cite[Chapter 4, \S 1]{kkms}}] \label{thm:toroidal}
The embedding $(U \times T, X^{\prime\prime})$ is toroidal. Its fan
$\Delta(X,U)$ consists of the cones $\delta_P$ glued along the
common face $\delta_P \cap \ptxset{0}{N_\mathbb{Q}}$.
\end{theoremcite}
\begin{remark} \label{rem:complement}
This common face is $\sigma \subset N_\mathbb{Q}$ and corresponds to
$\pi^{-1}(U)$, an open subset of each $\pi^{-1}(U^\prime)$. For
points $P$ that lie outside the image of $\pi$, we have
$\pi^{-1}(U^\prime) = \pi^{-1}(U)$, hence the cone $\delta_P$ is
equal to $\ptxset{0}{\sigma}$.
\end{remark}
\begin{remark} \label{rem:canonical}
Given $U \subset C$, the constructed toroidal fan $\Delta(X,U)$ is
independent of the choice of equivariant isomorphism $U \times T \cong
W$. It does however depend on the choice of $U$.
If we don't require that there be an \'etale model for the whole of
$\pi^{-1}(U^\prime)$, we can enlarge $U$ to form a canonical
embedding $(V \times T, \widetilde{X})$. Here, $V$ is obtained by adding
to any $U$ as above all points $P$ with a toric model that splits as
$Z = \mathbb{A}^1 \times F$, where $F = \TV{\sigma}$ is the generic fiber of
$\pi$. That is, the points $P$ with cone $\delta_P$ isomorphic to
$\sigma \times \mathbb{Q}_{\ge 0}$.
\end{remark}
\begin{example}
The affine threefold $X = \operatorname{SL}(2,\mathbb{C}) = \mathbb{C}[a,b,c,d]/(ad-bc-1)$ admits a
two-dimensional torus action by defining
\begin{equation*}
(t_1,t_2) \cdot \begin{pmatrix} a & b \\ c & d \end{pmatrix} =
\begin{pmatrix} t_1a & t_2b \\ t_2^{-1}c & t_1^{-1}d\end{pmatrix}.
\end{equation*}
It admits a quotient morphism $\pi \colon X \to \mathbb{A}^1 = \operatorname{Spec} \mathbb{C}[s]$
with $s \mapsto ad$. Let $W$ be the open subset of matrices with no
vanishing entries. With $U = \mathbb{A}^1 \setminus \{0,1\}$, we get an
isomorphism $W \cong U \times T$ by mapping $t_1 \mapsto a$ and
$t_2 \mapsto b$.
We consider $P = 0$, so $U^\prime = \mathbb{A}^1 \setminus \{1\}$. The
coordinate ring of $\pi^{-1}(U^\prime)$ is generated over
$\mathbb{C}[s]_{s(s-1)}$ by $t_1$, $st_1^{-1}$ and $t_2^{\pm1}$. Thus
$\delta_0$ is generated by $(1,0,0)$ and $(1,1,0)$. Similarly,
$\delta_1$ is generated by $(1,0,0)$ and $(1,0,1)$, as shown in
Figure~\ref{fig:sl2}. The fan $\Delta(X,U)$ is obtained by gluing
these two cones at the vertex.
\begin{figure}
\caption{Cones and polyhedra for $\operatorname{SL}
\label{fig:sl2}
\end{figure}
\end{example}
\section{Polyhedral divisors on curves} \label{sec:polyhedral}
We turn to the construction and relevant properties of proper
polyhedral divisors on curves, restating results of Altmann and
Hausen~\cite{ppdiv} in the setting of codimension one actions.
Given a cone $\sigma$ in $N$, the set of polyhedra with tail cone
$\sigma$
\begin{equation*}
\operatorname{Pol}_\sigma^+ = \{ \Delta \subset N_\mathbb{Q} \mid \Delta = \Pi + \sigma
\text{ for some compact polytope } \Pi \}
\end{equation*}
forms a semigroup under Minkowski addition. It is embedded in the
group of differences $\operatorname{Pol}_\sigma$; the neutral element is $\sigma$. A
divisor $\mathcal{D} \in \operatorname{Pol}_\sigma \otimes \operatorname{CaDiv}(Y)$ on a smooth curve
$Y$ is called a \emph{polyhedral divisor}. Under
certain positivity assumptions ($\sum \Delta_P \subsetneq \sigma$ is
almost the right condition, see~\cite[Example 2.12]{ppdiv}), $\mathcal{D}$
is called \emph{proper}. We may express it as
\begin{equation*}
\mathcal{D} = \sum \Delta_P \otimes P,
\end{equation*}
where the sum ranges over all prime divisors of $Y$, and all but
finitely many of the polyhedra $\Delta_P$ are equal to $\sigma$.
A proper polyhedral divisor defines an affine $T$-variety. Each weight
$u$ in the weight monoid $\omega \cap M$ gives a $\mathbb{Q}$-divisor $\mathcal{D}(u)$
on $Y$ by
\begin{equation*}
\mathcal{D}(u) = \sum \mins{u}{\Delta_P} \cdot P.
\end{equation*}
This allows us to define an $M$-graded sheaf $\mathcal{A}$ of
$\mathcal{O}_Y$-algebras by setting $\mathcal{A}_u = \mathcal{O}_Y(\mathcal{D}(u))$.
We denote by $\widetilde{X}$ the relative
spectrum $\operatorname{Spec}_Y(\mathcal{A})$ and by $X = \mathcal{X}(\mathcal{D})$ its
affine contraction $\operatorname{Spec}\Gamma(Y, \mathcal{A})$.
We summarize the relevant results on proper polyhedral divisors.
\begin{theoremcite}[{\cite[Theorem 3.4]{ppdiv}}] \label{thm:ppdiv:exist}
Given a $T$-variety $X$ as above, there is a curve $Y$ and a proper
polyhedral divisor $\mathcal{D}$ on $Y$ such that the associated
$T$-variety $\mathcal{X}(\mathcal{D})$ is equivariantly isomorphic to
$X$.
\end{theoremcite}
\begin{theoremcite}[{\cite[Theorem 3.1]{ppdiv}}] \label{thm:ppdiv:props}
Let $X$ and $\widetilde{X}$ be given by a proper polyhedral divisor on the
curve $Y$.
\begin{enumerate}
\item The contraction map $\widetilde{X} \to X$ is proper and birational.
\item The map $\pi \colon \widetilde{X} \to Y$ is a good quotient for the
$T$-action on $\widetilde{X}$; in particular, it is affine.
\item \label{thm:ppdiv:affine} There is an affine open subset $U
\subset Y$ such that the contraction map restricts to an
isomorphism on $\pi^{-1}(U)$.
\end{enumerate}
\end{theoremcite}
\begin{example}
A polyhedral divisor for the torus action on $X = \operatorname{SL}(2,\mathbb{C})$ is computed
easily by considering the closed embedding in the toric variety
$\operatorname{Mat}(2 \times 2, \mathbb{C}) \cong \mathbb{A}^4$. The toric
computation~\cite[Section 11]{ppdiv} shows that $\mathbb{A}^4$
with the induced $(\mathbb{C}^*)^2$-action may be described by the
divisor $\mathcal{D}^\prime = \Delta_1 \otimes D_1
+ \Delta_2 \otimes D_2$ on $\mathbb{A}^2$, where
$D_i = \operatorname{div}(x_i)$ are the coordinate axes and
$\Delta_i = \operatorname{conv}\{0,e_i\}$.
The image of $X$ in $\mathbb{A}^2$ is the line through $(1,0)$ and $(0,1)$.
Hence, $\mathcal{D}^\prime$ restricts to the divisor
$\mathcal{D} = \Delta_1 \otimes [0] + \Delta_2 \otimes [1]$
on $\mathbb{A}^1$.
\end{example}
\section{Comparison} \label{sec:comparison}
Now to compare the toroidal and polyhedral data associated with
a $T$-variety $X$. By Theorem~\ref{thm:ppdiv:exist},
we may assume $X$ is given by a polyhedral divisor $\mathcal{D}$ on a
curve $Y$, contained in the complete curve $C$. As above, we have
the $T$-variety $\widetilde{X}$ with the quotient map $\pi$ to $Y$ and
the contraction to $X$.
Denote the open subset of points $P$ with trivial coefficient
$\Delta_P = \sigma$ by $V$. Then for any open subset $U \subset V$, we
have
\begin{equation*}
\pi^{-1}(U) = \operatorname{Spec}_U \mathcal{O}_U \otimes \mathbb{C}[\omega \cap M]
= U \times \TV{\sigma}.
\end{equation*}
In particular, $U \times T$ is an open subset of
$\widetilde{X}$. By part~\ref{thm:ppdiv:affine}
of Theorem~\ref{thm:ppdiv:props}, we may regard $U \times T$ as a
subset of $X$ after possibly shrinking $U$. The projection to $U$
gives the required rational quotient map $X \dashrightarrow C$.
We get varieties $X^\prime$ and $X^{\prime\prime}$ as before and note the
following fact.
\begin{lemma}\label{lemma:XtilXdprime}
$\widetilde{X}$ is canonically isomorphic to $X^{\prime\prime}$.
\end{lemma}
\begin{proof}
It follows from the construction of $X^{\prime\prime}$ that the maps
$\widetilde{X} \to X$ and $\widetilde{X} \to C$ factor through a map $\varphi\colon
\widetilde{X} \to X^{\prime\prime}$. Since both maps to $X$ are proper, so is
$\varphi$. Since both maps to $C$ are affine, so is $\varphi$. Since
$\varphi$ is also birational, it is an isomorphism.
\end{proof}
Now for suitable $U$, we saw above that $(U \times T, \widetilde{X})$ is a
toroidal embedding with fan $\Delta(X,U)$. We recall the statement
of our claim.
\begin{theoremmain}
\theoremtext
\end{theoremmain}
To see this, consider $P \in Y \setminus U$ and $U^\prime = U \cup
\{P\}$ with local parameter $s$ at $P$. Since $\mathcal{D}_{|U}$ is
trivial, we have $\mathcal{D}_{|U^\prime} = \Delta_P \otimes P$. The
graded parts of $\mathcal{A} = \bigoplus_{u \in \omega \cap M} \mathcal{A}_u$ are
thus
\begin{equation*}
\mathcal{A}_u = \mathcal{O}_{U^\prime}\big(\mathcal{D}_{|U^\prime}(u)\big)
= \mathcal{O}_{U^\prime}\big(\mins{u}{\Delta_P} \cdot P\big)
= \mathcal{O}_{U^\prime}\big(\lfloor \mins{u}{\Delta_P}\rfloor \cdot P\big).
\end{equation*}
Hence, we can express the graded parts of the coordinate ring $S$
of $\pi^{-1}(U^\prime)$ as
\begin{equation*}
S_u = \Gamma\big(U^\prime, \mathcal{O}_{U^\prime}(\mathcal{D}(u))\big)
= R^\prime \cdot s^{-\lfloor\mins{u}{\Delta_P}\rfloor}.
\end{equation*}
It follows that
the monomial semigroup of the toric model consists of the pairs
$(k,u) \in \mathbb{Z} \times M$ with $k \ge -\mins{u}{\Delta_P}$. By
Lemma~\ref{lemma:homog} below, we see that $\delta_P$ is the homogenization
of $\Delta_P$. As Remark~\ref{rem:complement} implies that points in the
complement of $Y$ don't contribute to $\Delta(X,U)$, the proof is
complete.
\begin{lemma}\label{lemma:homog}
Let $\Delta$ be a polyhedron in $N$ with tail cone $\sigma$. Let
$\delta$ in $\mathbb{Z} \times N$ be its homogenization, i.e.,
$\delta = \operatorname{pos}\{(0,\sigma), (1,\Delta)\}$. Then the dual cone
$\delta^\vee$ consists of those pairs $(r,u) \in \mathbb{Q} \times M_\mathbb{Q}$
with $u \in \sigma^\vee$ and $r \ge -\mins{u}{\Delta}$.
\end{lemma}
\begin{proof}
By definition, we have $(r,u) \in \delta^\vee$ if and only if $(r,u)$
is non-negative on both $(0,\sigma)$ and $(1,\Delta)$. The first
condition is equivalent to $u \in \sigma^\vee$. The second condition
means that $r \ge -\langle u,v \rangle$ for any $v \in \Delta$, that is,
$r \ge -\mins{u}{\Delta}$.
\end{proof}
\begin{example}
For the example of $\operatorname{SL}(2,\mathbb{C})$, clearly the homogenizations of the
segments $\operatorname{conv}\{0,e_i\}$ give the cones $\delta_0$, $\delta_1$ generated by
$(1,0)$ and $(1,e_i)$. This is illustrated in Figure~\ref{fig:sl2}.
\end{example}
\begin{remark}
Both descriptions generalize to the non-affine case. Mumford treats
this directly, while the polyhedral approach involves the fans of
polyhedral divisors developed by Altmann, Hausen and
S\"u\ss{}~\cite{fansydiv}. It should be straightforward to carry this
result over.
\end{remark}
\end{document}
|
\begin{enumerate}gin{document}
\def\spacingset#1{\renewcommand{\begin{align}selinestretch}
{#1}\small\normalsize} \spacingset{1}
\if00
{
\title{\bf Combined Analysis of Amplitude and Phase Variations in Functional Data}
\author{Sungwon Lee
and
Sungkyu Jung \\
Department of Statistics, University of Pittsburgh}
\maketitle
} \fi
\if10
{
\begin{enumerate}gin{itemize}gskip
\begin{enumerate}gin{itemize}gskip
\begin{enumerate}gin{itemize}gskip
\begin{enumerate}gin{center}
{\LARGE\bf Combined Analysis of Amplitude and Phase Variations in Functional Data}
\end{center}
} \fi
\begin{enumerate}gin{itemize}gskip
\begin{enumerate}gin{abstract}
When functional data manifest amplitude and phase variations, a commonly-employed framework for analyzing them is to take away the phase variation through a function alignment and then to apply standard tools to the aligned functions. A downside of this approach is that the important variations contained in the phases are completely ignored. To combine both of amplitude and phase variations, we propose a variant of principal component analysis (PCA) that captures non-linear components representing the amplitude, phase and their associations simultaneously. The proposed method, which we call functional combined PCA, is aimed to provide more efficient dimension reduction with interpretable components, in particular when the amplitudes and phases are clearly associated. We model principal components by non-linearly combining time-warping functions and aligned functions. A data-adaptive weighting procedure helps our dimension reduction to attain a maximal explaining power of observed functions. We also discuss an application of functional canonical correlation analysis in investigation of the correlation structure between the two variations. We show that for two sets of real data the proposed method provides interpretable major non-linear components, which are not typically found in the usual functional PCA.
\end{abstract}
\noindent
{\it Keywords:} Functional data; principal component analysis; amplitude variation; phase variation; manifold; exponential map.
\spacingset{1.45}
\section{Introduction}
\label{sec:intro}
Functional data are frequently encountered in modern sciences \citep{1}. When functional data consist of repeated measurements of a common activity or development over time, they often show a similar pattern of progression, which can be understood as a combination of two types of variations, called amplitude and phase variations. When the phase variation resides in functional data, a naive application of standard tools such as the pointwise mean and variance, and functional principal component analysis (FPCA) tends to yield misleading results \citep{3}. Curve registration (or function alignment) has been routinely performed to disregard the phase differences \citep[\emph{cf}.][]{9}. Recently, several researchers have pointed out that the phase variation also contains important information \citep{14,16,47,48,44}.
A prominent example where the phase variation is commonly observed is growth curves \citep{chen2012nonlinear,park2016clustering}. For example, the growth rate curves
from the well-known Berkeley study \citep{6} share common events such as pubertal growth spurt and maturity. Visual inspection of this data set reveals that the curves develop the events with varying magnitudes of heights (amplitude variation), and at varying temporal paces (phase variation), as shown in in Fig.~\ref{fig:1}.
Moreover, these two types of variations are clearly associated to each other; individuals who reach the phase of pubertal growth spurt (corresponding to the main peak of curves) later in their ages tend to show smaller maximum pubertal growth rates.
This important major association is not captured in the application of FPCA to the original data or to the aligned data (see Fig.~\ref{fig:1}(b) and (c)).
\begin{enumerate}gin{figure}[t!]
\centering
\includegraphics[width = 1\textwidth]{berkeley_example.png}
\caption{(a) Velocity curves from the Berkeley data set, boys only.
(b) The first component, shown as the mean (black), $\pm$2 standard deviations (red, blue) from the ordinary functional principal component analysis (FPCA). The resulting mode of variation is not easy to interpret.
(c) The first component of the aligned data contains no information of the apparent phase variation. (d) The first combined PC of FCPCA, the proposed method, applied to the raw data. The non-linear major variation in the data shown in (a) is well-captured by the proposed method.}
\label{fig:1}
\end{figure}
In this paper, we propose a principal component analysis for the original, unregistered data, combining the two types of variations into one. The principal components (PCs) obtained from the proposed method, which we call \emph{functional combined principal component analysis (FCPCA)}, effectively capture all of the amplitude and phase variations, including their associations. In Fig.~\ref{fig:1}, an advantage of FCPCA is exemplified for the growth data, where the dominant association between the amplitude and phase variations is well-captured in the first combined PC.
We assume that the observation $f_i$ is composed of an amplitude function, $y_i$, and a time-warping function, $\gamma_i$, and that the observed functions can be well-aligned by time-warping functions. Our method is developed for a particular class of time-warpings, denoted by $\Gamma$, consisting of orientation-preserving diffeomorphisms of the unit interval $[0, 1]$, as developed and used in \citet{srivastava2007riemannian,13,14,16,22}.
For the combined analysis of amplitude and phase functions, we further define a bijection, denoted by $\phi$, between $\Gamma$ and a convex subset of the function space. This step enables us to use the standard linear functional operations to $x_i := \phi(\gamma_i)$.
In our FCPCA framework, we assume that the \emph{combined} random function $(y_i,x_i)$ can be represented as a linear combination of orthogonal functions. The Karhunen-Lo\`{e}ve transformation of this function is simply the FPCA in the combined function space, the components of which are then mapped back to the original function space (in which $f_i$ lies). For estimation of the functional combined components, we use a function alignment method to obtain predictions of $y_i$ and $\gamma_i$, denoted by $\hat{y}_i$ and $\hat\gamma_i$. The resulting functions $\hat{y}_i$ and $\phi(\hat\gamma_i)$ are then joined together, to which a standard functional PCA is applied.
These two functions are adaptively weighted so that the resulting combined PCs achieve the maximal explaining power of the observed functions.
The result is represented and visualized in the original function space, which can be used to aid interpretation of each principal component.
We also demonstrate a use of the functional canonical correlation analysis in the detection of maximally correlated components between the amplitude and phase functions.
In recent years, there have been a few attempts to analyze the phase variations. In particular, the phase variations are used in segmentation of periodic signals \citep{14}, clustering \citep{15}, functional regression \citep{46,47,48} and classification \citep{16}. In \citet{13,22,14,16}, the Fisher-Rao function alignment is used to obtain time-warping functions, and the authors suggest several different approaches of analyzing the phase variation through the time-warping functions. They, however, did not discuss the association between two types of variations. While we use the Fisher-Rao alignment as used in \citet{16}, the ``composite FPCA'' of \citet{16} is less efficient than our proposal when the amplitude and phase functions are linearly associated.
Analyses combining the phase and amplitude variations have been reported more recently in \cite{47}, where the authors used a log transformation for phase functions (thus making use of compositional data analysis). In contrast, we use the transformation $\phi$ to take advantage of the well-developed tools of conventional functional data analysis. Moreover, \cite{47} used a linear functional model consisting of individual principal component scores from each of amplitude and phase functions, which can be viewed as a two-step approach. In contrast, we directly combine the two functions using data-adaptive weights, for the purpose of dimension reduction through non-linear principal components. Finally, \citet{chen2012nonlinear} proposed a nonparametric dimension reduction using manifold learning. Our model-based approach is conceptually different from the nonparametric approach of finding nonlinear submanifolds in \citet{chen2012nonlinear}.
The rest of the paper is organized as follows. In Section \ref{sec:2 Models}, we formally define a population structure to model the amplitude, phase and their association, and introduce our two models, functional combined PCA and CCA. Estimation of the model parameters and the data-adaptive choice of weights are discussed in Section \ref{sec:estimation}.
In Section~\ref{sec:real data}, the advantages of the proposed methods are demonstrated in analyses of two real data sets, and in Section \ref{sec:numerical studies} several simulation studies are reported.
\section{Models} \label{sec:2 Models}
\subsection{Decomposition into two variations} \label{sec:2.1Decomp}
We consider a smooth random function $f$ that inherently contains amplitude and phase variations and is composed of two random functions $y$ and $\gamma$:
\begin{enumerate}gin{equation}
f(t) = (y \circ \gamma) (t) = y(\gamma(t)), \quad t \in [0,1].
\label{equ:1}
\end{equation}
We restrict the domain of $f$ to be $[0,1]$ without losing generality. The amplitude function $y$ is assumed to be a smooth square-integrable function on $[0,1]$, i.e.,
$ y \in L_{2}[0,1] := \{ h: [0,1] \mapsto R \mid E\| h \|_2^2 < \infty \}$.
The time-warping function $\gamma$ is an orientation-preserving diffeomorphism on $[0,1]$ and lies in
\begin{enumerate}gin{align*}
\Gamma = \{ h: [0,1] \mapsto [0,1] \mid h(0) = 0, \,\, h(1) = 1, \,\, h^{\prime}(t) > 0, \, t \in (0,1) \} \subset L_{2}[0,1].
\end{align*}
In other words, $\Gamma$ is the set of cumulative distribution functions of absolutely continuous random variables with support on $[0,1]$.
Note that the endpoint constraints restrict the warping of $f$ to only occur on the given interval, and the positive derivative constraint does not allow the warps travel back into the past. For any $\gamma \in \Gamma$, the inverse function $\gamma^{-1}$ exists, and is also a member of $\Gamma$. This implies that $y = f \circ \gamma^{-1}$. We assume that the identity function $\gamma_{\rm id}(t) = t$ is the \emph{center} of the random warping function, where the center is defined later in Section \ref{sec:2.2 Mapping Gamma}.
This assumption formally defines phase variation as the deviation of $\gamma$ from the identity. This choice of center is purely for the sake of simplicity and interpretability; our analysis using the Fisher-Rao function alignment and the transformation of $\gamma$ discussed in Section \ref{sec:2.2 Mapping Gamma} is in fact insensitive to different choices of the center \citep[\textit{cf}. ][]{44}.
\subsection{Simplifying the geometry of $\Gamma$} \label{sec:2.2 Mapping Gamma}
Working directly with warping functions is not desirable since $\Gamma$ is not convex; there exist $\gamma_1, \gamma_2 \in \Gamma$ and $c > 0$ such that $\gamma_1 + \gamma_2 \notin \Gamma$ and $c\gamma \notin \Gamma$. Thus, standard operations based on Euclidean geometry can only be applied with great care. We circumvent this issue by adopting the geometric approach laid out in \citet{13,16}, and introduce a bijection $\phi: \Gamma \to B$, where $B$ is a convex subset of $L_2[0,1]$ containing the origin (i.e., the $0$ function), so that standard operations can be employed. The map $\phi$ is defined below in (\ref{eq:phifunction}), and its inverse in (\ref{equ:5b}). The map $\phi$ is best understood as a composition of two transformations, as elaborated below.
\paragraph{Mapping to the unit sphere:} The level of difficulty in dealing with $\gamma$ is eased by taking the square-root of the derivative of $\gamma$, the operation of which is denoted by $\Theta: \Gamma \to L_2[0,1]$,
\begin{enumerate}gin{equation}
\Theta(\gamma) := q_{\gamma} = \sqrt{\gamma^{\prime}}.
\label{equ:2}
\end{equation}
This corresponds to the ``{sqaure-root velocity function}'' of \citet{13}.
Denote by $S_+ = \{h \in L_2[0,1] : \| h \|_2 = 1, h(t) > 0, \mboxox{ for all } t \in (0,1) \}$ the positive orthant of the unit sphere in $L_2[0,1]$.
It can be checked that for any $\gamma \in \Gamma$, $q_{\gamma} \in S_+$ and that $\Theta: \Gamma \to S_+$ is a bijection.
A significant benefit of using this transformation is that the complicated structure of $\Gamma$ is simplified to that of the well-known unit sphere.
The ``center'' of the random diffeomorphism $\gamma$ is defined through the Karcher mean \citep{karcher1977riemannian} of $\Theta(\gamma)$. Let
$$\mu = \mu(\gamma) = \mathbb{E}[\Theta(\gamma)] = \mboxox{arg}\min_{\mu \in S_+} E [d_g^2(\Theta(\gamma), \mu)]$$ be the Karcher mean using the geodesic distance $d_{g}(a,b) = \cos^{-1}(\langle a,b \rangle)$. Then $\Theta^{-1}(\mu(\gamma))$ is the center of $\gamma$.
\paragraph{Mapping to a tangent space:}
The positive unit sphere $S_+$ has been well-studied as a space for random directions and unit-norm random functions. While there are several approaches of modeling random elements in $S_+$ \citep[cf.][]{Mardia2000,JungPNS,16}, we use a linear approximation of $S_+$ by a tangent space. The tangent space approximation is schematically illustrated in Fig. \ref{fig:2}. For simplicity, we use the unit sphere $S$ that includes $S_+$.
\begin{enumerate}gin{figure}[t!]
\centering
\includegraphics[width = 0.8\textwidth]{Picture5.png}
\caption{Schematic illustration of $S$ and $T_\mu S$. (a) The pointwise mean $\mu^\prime$ of two functions $a, b \in S$ does not lie on $S$. (b) Mapping of $\sqrt{\gamma^{\prime}} \in S$ to a tangent space $T_{\mu}S$ by the log map. An example of $S_\mu$ is given as a ball in $S$ centered at $\mu$. }
\label{fig:2}
\end{figure}
The tangent space of $S$ at a point $\mu \in S$, denoted by $T_{\mu}S$, is the collection of functions in $L_2[0,1]$ orthogonal to $\mu$,
\begin{enumerate}gin{align*}
T_{\mu}S=\{h(t) \in L_2[0,1] : \langle h, \mu \rangle = 0 \},
\end{align*}
where $\langle \cdot, \cdot \rangle$ is the usual inner product in $L_2[0,1]$. Functions in $S$ will be approximated by functions in $T_{\mu}S$.
Figure \ref{fig:2}(b) schematically illustrates $T_{\mu}S$ and the approximation of the $S$-valued function $\sqrt{\gamma\prime}$ by the function $\mboxox{Log}_\mu(\sqrt{\gamma\prime}) \in T_{\mu}S$; see (\ref{equ:3}). To help understand the tangent space approximation, we take the hyperplane $T$ in $L_2[0,1]$ tangent to $S$ at $\mu$. The tangent space $T_{\mu}S$ is obtained by a translation of the hyperplane $T$ so that the tangent point $\mu$ is translated to the origin. Thus, $T_{\mu}S$ is a subspace of $L_2[0,1]$.
Points (i.e., functions) on the tangent space $T_{\mu}S$ can provide good approximations of functions in a subset $S_{\mu} \subset S$ containing $\mu$. In particular, the \emph{log map} is frequently used for such approximation, and is defined as $\text{Log}_{\mu}: {S}_{\mu} \to T_{\mu} S$,
\begin{enumerate}gin{equation}
\text{Log}_{\mu}(q_\gamma) = \frac{d_{g}(q_\gamma, \mu)}{\text{sin}(d_{g}(q_\gamma, \mu))} (q_\gamma-\text{cos}(d_{g}(q_\gamma, \mu))\mu).
\label{equ:3}
\end{equation}
The geodesic distance $d_{g}(q_\gamma,\mu)$ measures the distance between $q_\gamma (=\sqrt{\gamma\prime})$ and $\mu$ by the length of the shortest arc on $S$ that joins $\sqrt{\gamma^{\prime}}$ and $\mu$. When the standard $L_2$-norm is used for $T_{\mu} S$, the geodesic distance between $\mu$ and $q_\gamma$ and the direction in which $q_\gamma$ shoots from $\mu$, for any $q_\gamma \in S_+$, are preserved by the log map.
A sensible choice of the tangential point $\mu$ is given by the assumption that $\gamma_{\rm id}$ is the center of $\gamma$. It can be seen that $\Theta(\gamma_{\rm id}(t)) = 1$ for all $t \in [0,1]$. Thus we choose the constant function $1$ as $\mu$.
This entails that the Karcher mean of $\Theta(\gamma)$ is $\mu(\gamma) \equiv 1$, and that
$E[ \log_\mu(\Theta(\gamma))] = 0$.
Note that the center, $\gamma_{\rm id} = \Theta^{-1}(1)$, of $\gamma$ is in general different from the mean of $\gamma$.
\paragraph{Summary:}
The mapping $\phi$ we use for the trasformation of the time-warping function $\gamma$ is
$\phi: S_+ \to T_\mu S$,
\begin{enumerate}gin{equation}\label{eq:phifunction}
\phi(\gamma) = \log_\mu(\Theta(\gamma)),
\end{equation}
where $\mu \equiv 1$.
We call $x = \phi(\gamma)$ phase function. Since the image of $\phi$ (denoted by $B$) is a convex subset of $T_\mu S$, standard vector operations (e.g., the Gram-Schmidt operations) can be used for the phase function $x$.
\subsection{Construction of $f$ by the amplitude and phase functions} \label{sec:2.3 Construction of f}
Any pair of amplitude and phase functions $(y,x) \in L_2[0,1] \times B$ can be composed to a single function, by reverting the decomposition in Sections \ref{sec:2.1Decomp} and \ref{sec:2.2 Mapping Gamma}. To define this composition, we note that the log map is indeed the inverse of \emph{exponential map}, $\text{Exp}_{\mu}: T_\mu S \to S$, defined by
\begin{enumerate}gin{equation}
\text{Exp}_{\mu}(x) = \frac{\text{sin}\|x\|_2}{\|x\|_2}x+\text{cos}\|x\|_2\mu.
\label{equ:4}
\end{equation}
For any phase function $x \in B \subset T_{\mu}S$, the corresponding time-warping is uniquely given by
\begin{enumerate}gin{align}\label{equ:5b}
\gamma = \phi^{-1}(x) = (\Theta^{-1} \circ {\rm Exp}_\mu) (x),
\end{align}
that is, $\gamma(t) = \int_0^{t} \text{Exp}_{\mu}^2(x)(s) ds$, $t \in [0,1]$.
All in all, any random functions $(y,x) \in L_2[0,1] \times B$ can be composed to yield a random function $f$ in the form of (\ref{equ:1}) as follows.
\begin{enumerate}gin{equation}
f(t) = (y \circ \gamma) (t) = (y \circ \phi^{-1}(x) ) (t) = y \left ( \int_0^{t} \text{Exp}_{\mu}^2(x)(s) ds \right ), \,\, t \in [0,1].
\label{equ:5}
\end{equation}
\subsection{Models for joint variability of amplitude and phase functions} \label{sec: FCPCA and FCCCA}
In this section, we define the joint population structures of the amplitude and phase functions $(y,x)$. The parameters we aim to estimate are defined in the models we describe below. Recall that the mean of $x$ corresponds to the identity time-warping $\gamma_{\rm id}$ and thus $E(x) = 0$.
\subsubsection{Model for functional combined principal components} \label{sec: FCPCA }
To model the association between $y$ and $x$, we define a random function $g^{C}$ on the extended domain $[0,2]$ for a positive scaling parameter $C>0$,
\begin{enumerate}gin{equation}
g^{C}(t) =
\begin{enumerate}gin{cases}
y(t), & t \in [0,1),\\
Cx(t-1), & t \in [1,2].
\end{cases}
\label{equ:6}
\end{equation}
The exclusion of the end point $\{1\}$ of the domain $[0,1]$ of $y$ in the construction of $g^{C}$ does not lose any information since $y$ is assumed to be continuous. Note that for any $y,x,C$, we have $g^{C} \in L_2[0,2]$. The parameter $C$ is introduced to adjust \emph{scaling imbalance} between $y$ and $x$. We will discuss the role of $C$ shortly, but for now we let $C$ be fixed.
For a given $C$, denote the eigen-decomposition of the covariance function $\Sigma_{g^{C}}$ of $g^{C}$ by
\begin{enumerate}gin{align*}
\Sigma_{g^{C}}(s,t) = \sum_{i=0}^{\infty} \lambda_{i}^{C} \xi_{i}^{C}(s) \xi_{i}^{C}(t), \quad s,t \in [0,2],
\end{align*}
where $\lambda_{i}^{C}$ are eigenvalues of $\Sigma_{g^{C}}$ in the decreasing order ($\lambda_{i}^{C} \ge \lambda_{i+1}^{C} \ge 0$, $i \ge 1$), and $\xi_{i}^{C}$ is the eigenfunction corresponding to $\lambda_i^C$.
The eigenfunctions are orthonormal, i.e., $\| \xi_{i}^{C} \|_2 = 1$ and $\langle \xi_{i}^{C}, \xi_{j}^{C} \rangle = 0$ for $i \neq j$.
The superscript $C$ is used to emphasize the dependence of the decomposition on $C$.
By Karhunen-Lo\'eve decomposition, we write $g^{C}(t) = \mu(t) + \sum_{i=1}^{\infty} z_{i}^{C} \xi_{i}^{C}(t)$,
$t \in [0,2]$, where $z_i^C$'s are uncorrelated mean-zero random variables with $E((z_{i}^{C})^2)=\lambda_{i}^{C}$. Note that the mean function $\mu=E(g^{C})$ does not depend on $C$ since $y$ is irrelevant of $C$ and $E(x)=0$. The function $g^C$ is then divided into the amplitude and phase functions as
\begin{enumerate}gin{equation}
\begin{enumerate}gin{split}
y^{C}(t) &= \mu(t) + \sum_{i=1}^{\infty} z_{i}^{C} \xi_{i}^{C}(t),\quad t \in [0,1),\\
x^{C}(t) &= \sum_{i=1}^{\infty} \frac{z_{i}^{C}}{C} \xi_{i}^{C}(t+1),\quad t \in [0,1].
\end{split}
\label{equ:8}
\end{equation}
In~(\ref{equ:8}), the joint variation between $y$ and $x$ is paired in eigenfunctions $\xi_{i}^{C}$.
The role of the scaling parameter $C$ in (\ref{equ:6}) becomes clear from (\ref{equ:8}). As opposed to the unit-free $x$, values of $y$ depend on the unit in which measurements of $y$ (or $f$) are made. The overall analysis should not depend on the particular scaling of $y$ (due to, for example, changes from the metric system to US customary units). Since scaling of $y$ by $C$ is equivalent to scaling of $x$ by $C^{-1}$, we introduce the scaling parameter $C$ applied only to the ``$x$ part'' of $g^{C}$, in order to keep the original unit of observed $f$ (and $y$). The eigenfunctions $\{ \xi_{i}^{C} \}_{i=1}^{\infty}$ and their eigenvalues $\{ \lambda_{i}^{C} \}_{i=1}^{\infty}$ vary for different choices of $C$; for a small $C$, the first few eigenfunctions $\xi_{i}^{C}$ are bound to capture more variations from the amplitude variation, while for a large $C$, the leading eigenfunctions reflect more phase variations. For any given $f$, or the pair $(y,x)$, there exists a continuum of different sets $\{\xi_{i}^{C}\}_{i=1}^{\infty}$, depending on the value of $C \in (0, \infty)$, which causes an identifiability issue. To our aim of succinctly representing the combined variation of $y$ and $x$ in the original function space, we choose $C$ to be dependent on the original random function $f$ as discussed below.
Let $m$ be a positive integer. From (\ref{equ:5}) and (\ref{equ:8}), for a given $C>0$, we define $A_{m}^{C}(f)$ as a \emph{projection} of $f$ onto the $m$-dimensional eigen-space, spanned by the first $m$ eigenfunctions, by
\begin{enumerate}gin{equation}
A_{m}^{C}(f)(t) = y^{C}_{m} \left (\int_0^t \text{Exp}_{\mu}^2(x^{C}_{m})(s) ds \right ),\quad t \in [0,1),
\label{equ:9}
\end{equation}
where for $t \in [0,1)$,
\begin{enumerate}gin{equation}
\begin{enumerate}gin{split}
y^{C}_{m}(f)(t) &= \mu(t) + \sum_{i=1}^{m} z_{i}^{C} \xi_{i}^{C}(t), \\
x^{C}_{m}(f)(t) &= \sum_{i=1}^{m} \frac{z_{i}^{C}}{C} \xi_{i}^{C}(t+1).
\end{split}
\label{equ:10}
\end{equation}
This projection utilizes the standard orthogonal projection of $g_C$ to its eigen-space in $L_2[0,2]$, but is \emph{non-linear} in the original function space $L_2[0,1]$. To minimize the approximation error of $A_{m}^{C}(f)$ with respect to $f$, the scaling parameter $C: = C_m$ is chosen as follows:
\begin{enumerate}gin{equation}
\label{equ:18}
C = \text{argmin}_{C >0} E\left[d^{2}(A_{m}^{C}(f), A_{\infty}^{C}(f))\right] = \text{argmin}_{C >0} E\left[d^{2}(A_{m}^{C}(f), f)\right],
\end{equation}
where $d$ is a distance function on $L_2[0,1]$. We use $d(f,g) = \|f-g\|_2$ for fast computation and mathematical convenience. Other distance functions such as $L_1$-distance, Fisher-Rao distance \citep{13}, and the earth-mover's distance \citep{40} can be used as well.
For a chosen $C$, the combined principal component of $y$ and $x$ (or the so-called eigen-mode) can be visualized in the original function space. In particular, the $i$th \emph{mode of variation} of $f$ can be visualized by overlaying the curves $\tilde{f}_{i,z} := \tilde{y}_{i,z} \circ \phi^{-1}(\tilde{x}_{i,z})$ for various values of $z \in \Re$.
Here, $\tilde{y}_{i,z}$ and $\tilde{x}_{i,z}$ are obtained from (\ref{equ:8}) by setting $z_{i}^{C}= z \sqrt{\lambda_{i}^{C}}$ and also setting $z_{j}^{C}=0$ for all $j \not= i$.
Figure \ref{fig:1}(d) shows empirical estimates of $\tilde{f}_{1,z}$, $z=-1,0,1$, for the Berkeley data.
Our estimation procedure is described in Section \ref{sec:estimation}.
We note that one may use approaches of multivariate functional principal component analysis \citep[cf.][]{chiou2014multivariate,Happ2016} instead of gluing the two functions as done in (\ref{equ:6}). While such multivariate approaches may be mathematically more appealing, using (\ref{equ:6}) facilitates our discussion for the adaptive choice of $C$, and is satisfactory in our numerical examples.
\subsubsection{Model for correlation analysis} \label{sec: FCCCA }
As another approach to model the association between $y$ and $x$, we briefly discuss a model for a functional \textit{combined} canonical correlation analysis (CCA).
For a pair of non-random functions $\psi_y, \psi_x \in L_2[0,1]$, write $\rho(\psi_{y},\psi_{x})$ for the correlation coefficient between two random variables $\langle \psi_{y},y \rangle$ and $\langle \psi_{x}, x \rangle$. Here, $y$ and $x$ are the random amplitude and phase functions as defined before.
In functional combined CCA, the association between the amplitude and phase functions is modeled by a few canonical weight function pairs $(\psi_y,\psi_x)$ that sequentially maximize $\rho(\psi_{y},\psi_{x})$.
In general, the $i$th canonical weight function pair $(\psi_{y,i}, \psi_{x,i})$ maximizes $\rho(\psi_{y,i},\psi_{x,i})$, with the constraint that
$\mboxox{Cov}(\langle \psi_{y,i}, y \rangle, \langle \psi_{y,j}, y \rangle)
= \mboxox{Cov}(\langle \psi_{x,i}, x \rangle, \langle \psi_{x,j}, x \rangle)=0$ for $1 \le j < i$. The correlation coefficient $\rho_{i} := \rho(\psi_{y,i},\psi_{x,i})$ is called the $i$th canonical correlation coefficient.
The joint variation modeled by the $i$th canonical weight functions $\psi_{y,i}$ and $\psi_{x,i}$ can be visualized in the original function space. For $a,b \in \Re$, let
\begin{enumerate}gin{equation}
\begin{enumerate}gin{split}
&P_{y,(i,a)}(t) = \mu(t) + a \psi_{y,i},\quad t \in [0,1],\\
&P_{x,(i,b)}(t) = b \psi_{x,i},\quad t \in [0,1].
\end{split}
\label{equ:13}
\end{equation}
Then the $i$th mode of variation given by the functional combined CCA is visualized by overlaying the curves of $\tilde{f}_{i,a,b}:= P_{y,(i,a)} \circ \phi^{-1}( P_{x,(i,b)}) $ for various values of $(a,b)$. A reasonable choice of $(a,b)$ satisfies $a/b = \begin{enumerate}ta$, where $\begin{enumerate}ta$ is the slope from the regression of $\langle \psi_{xi}, x \rangle$ against $\langle \psi_{yi},y \rangle$
\section{Estimation} \label{sec:estimation}
In this section we discuss our procedures for the application of functional combined PCA and CCA to a data set.
\subsection{Decomposition into amplitude and phase functions} \label{sec:3.1decomposition_empirical}
Let $f_{i}$, $i = 1,\ldots,n$, be the $i$th realization of the underlying random function $f$ obtained from $n$ independent experiments. The realizations $f_{i}$'s do not manifest themselves in a direct way. They are usually recorded at discrete time points, leading to observed values $f_{ij}$, at time point index $j = 1,\ldots,n_i$, and sometimes are blurred with measurement errors. We assume that smoothing the observations $\{ f_{ij} \}_{j=1}^{n_{i}}$ with a suitable basis function system gives a close approximation of $f_{i}$. Denote the approximations to $f_i$ by $\hat{f}_i$, $i = 1,\ldots,n$.
Each $\hat{f}_i$ is then decomposed into the amplitude and phase functions by applying the method of Fisher-Rao function alignment \citep{13} to all sample $\{ \hat{f}_{i} \}_{i=1}^{n}$, which iteratively time-warps $\hat{f}_i$ to a template function, resulting in the time-warp $\hat\gamma_i$ and the aligned function $\hat{y}_i$, satisfying
\begin{enumerate}gin{equation}
\hat{f}_{i}(t) = \hat{y}_{i}(\hat{\gamma}_{i}(t)), \,\, i=1,2,..,n, \,\, t \in [0,1].
\label{equ:15}
\end{equation}
Write $\hat{x}_i = \phi(\hat\gamma_i)$. The Fisher-Rao alignment is known to be invariant to the choice of templates, and we choose it to satisfy $\sum_{i=1}^n \hat{x}_i = 0$ so that the center of $\{ \hat\gamma_i \}_{i=1}^{n}$ is $\gamma_{\rm id}$. Other methods of function alignment may be used here. We use the method of \citet{13} for its good performance \citep{14,44} and invariance to the choice of templates.
\subsection{Functional combined PCA} \label{sec:FCPCAestimation}
In the model for the functional combined PCA, the population eigen-structure depends on the unknown parameter $C$. We first discuss the empirical eigen-decomposition for any given $C$, and then present our procedure to obtain a data-adaptive estimate of $C$.
\subsubsection{Estimation of $(\mu, \lambda_{i}^{C}, \xi_{i}^{C})$}
Let the scaling parameter $C$ be given. For easy computation, we evaluate the functions $\hat{y}_{i}$ and $\hat{x}_{i}$ on a fine grid, $0= t_1 <t_2< \cdots<t_k = 1$, to obtain their vector expressions $\hat{\mathbf{y}}_{i}$ and $ \hat{\mathbf{x}}_{i}$. Write
\begin{enumerate}gin{align*}
\hat{\mathbf{g}}_{i}^{C} =
\begin{enumerate}gin{bmatrix}
\hat{\mathbf{y}}_{i}\\
C\hat{\mathbf{x}}_{i}
\end{bmatrix},\,\,\,
\hat{\mathbf{y}}_{i}=[\hat{y}_{i}(t_1) \,\, \dots \,\, \hat{y}_{i}(t_k)]^{T}, \,\, \hat{\mathbf{x}}_{i}=[\hat{x}_{i}(t_1) \,\, \dots \,\,\hat{x}_{i}(t_k)]^{T},
\end{align*}
and let $\hat{\boldsymbol{\mu}}=\sum_{i=1}^{n} \hat{\mathbf{g}}_{i}^{C} / n$.
The eigen-decomposition of the sample covariance matrix $\widehat{\Sigma}_{g^{C}}$ obtained from $\{ \hat{\mathbf{g}}_{i}^{C} \}_{i=1}^{n}$ provides $(n-1)$ pairs of eigenvalues and eigenvectors $(\hat{\lambda}_{i}^{C}, \hat{\boldsymbol{\xi}}_{i}^{C})$,
\begin{enumerate}gin{align*}
\widehat{\Sigma}_{g^{C}}=\sum_{i=1}^{n} [\hat{\mathbf{g}}_{i}^{C}-\hat{\boldsymbol{\mu}}][\hat{\mathbf{g}}_{i}-\hat{\boldsymbol{\mu}}]^{T}=\sum_{i=1}^{n-1} \hat{\lambda}_{i}^{C} \hat{\boldsymbol{\xi}}_{i}^{C} \left(\hat{\boldsymbol{\xi}}_{i}^{C}\right)^{T},
\end{align*}
where $\hat{\lambda}_{1}^{C} \ge \dots \ge \hat{\lambda}_{n-1}^{C} \ge 0$, $\| \hat{\boldsymbol{\xi}}_{i}^{C} \|_2=1$ and $\langle \hat{\boldsymbol{\xi}}_{i}^{C}, \hat{\boldsymbol{\xi}}_{j}^{C} \rangle=0$ for $i \not= j$.
Estimates $\hat{\mu}$ of $\mu$ and $\hat{\xi}_{i}^{C}$ of $\xi_{i}^{C}$ are obtained by interpolation of the elements of $\hat{\boldsymbol{\mu}}$ and $\hat{\boldsymbol{\xi}}_{i}^{C}$.
\subsubsection{Estimation of $C$}
The estimates $\{ (\hat{\lambda}_{i}^{C}, \hat{\xi}_{i}^{C}) \}_{i=1}^{n-1}$ are dependent on the value of $C$. We note that the true parameter $C$ depends on the number of principal components, $m$, used in (\ref{equ:18}). For the purpose of exploratory analysis and visualization of the data, $m$ is typically chosen as a small number. For a given $m$, our strategy in the estimation of $C$ is to use an empirical minimizer of (\ref{equ:18}).
For this, let $a_{ij}^{C}=\langle \hat{g}_{i}^{C} - \hat\mu, \hat{\xi}_{j}^{C} \rangle$ be the $j$th score of the $i$th observation. We write $A_{m}^{C}(\hat{f}_{i})$ for an approximation of the $i$th observation $\hat{f}_i$ by the first $m$ empirical principal components, which is defined by (\ref{equ:9}), by replacing $y^{C}_{m}$ and $x^{C}_{m}$ with
\begin{enumerate}gin{align*}
\hat{y}^{C}_{m}(\hat{f}_{i})(t) &= \hat{\mu}(t) + \sum_{j=1}^{m} a_{ij}^{C} \hat{\xi}_{j}^{C}(t), \,\, t \in [0,1),\\
\hat{x}^{C}_{m}(\hat{f}_{i})(t) &= \sum_{j=1}^{m} \frac{a_{ij}^{C}}{C} \hat{\xi}_{j}^{C}(t+1), \,\, t \in [0,1].
\end{align*}
Our choice of $\hat{C}$ is then
\begin{enumerate}gin{equation}
\hat{C} = \underset{C >0}{\text{argmin}} \sum_{i=1}^{n} \frac{\| {A}_{m}^{C}(\hat{f}_{i}) - \hat{f}_{i} \|_2^{2}}{n},
\label{equ:16}
\end{equation}
which entails that the first $m$ combined principal components $\hat{\xi}_{i}^{\hat{C}}$ found at $C=\hat{C}$ reconstruct $\{ \hat{f}_{i} \}_{i=1}^{n}$ most faithfully, compared to other values of $C$. In practice, we use a numerical method to solve (\ref{equ:16}), which is almost instantaneous for small values of $m$.
In all of our numerical studies, the minimizer $\hat{C}$ always exists, and does not degenerate to 0 nor diverges to infinite. Heuristically, this is because we assume that the observation has both amplitude and phase variations. Large (or small) values of $C$ force the eigenfunctions $\hat{\xi}_{i}^{C}$ to explain only the phase variation (or amplitude variation, respectively). For large $C$, the amplitude variation of $\hat{f}_i$ is typically not found in $\hat{A}_m^C(\hat{f}_i)$; for small $C$, the two functions $\hat{f}_i$ and $\hat{A}_m^C(\hat{f}_i)$ exhibit different phases.
\subsection{Functional combined CCA}
In the functional combined CCA of the data $\{f_i: i =1,\ldots,n\}$, we again use the decomposed functions $(\hat{y}_i, \hat{x}_i)$, obtained in Section \ref{sec:3.1decomposition_empirical}, to compute estimates of the triple $(\rho_{j}, \psi_{y,j}, \psi_{x,j})$ as defined in Section \ref{sec: FCCCA }.
It is well know that a naive adaptation of the usual CCA procedure to functional data often leads to spurious estimates of the triple with the estimated canonical correlation coefficient close to one. Following the suggestions in \citet{19}, we use the regularized functional CCA as follows. For a given smoothing parameter $\lambda>0$, the estimates of the canonical weight functions are
\begin{enumerate}gin{equation}
(\hat{\psi}_{y,1}, \hat{\psi}_{x,1}) = \max_{\psi_{y},\psi_{x} \in L_2[0,1]} \widehat{\mboxox{Cov}}(\langle \psi_{y},\hat{y}_{i} \rangle, \langle \psi_{x}, \hat{x}_{i} \rangle)
\label{equ:17}
\end{equation}
subject to $\widehat{\mboxox{Var}}(\langle \psi_{y},\hat{y}_{i} \rangle) + \lambda \| D^2\psi_{y} \|_2^2 = \widehat{\mboxox{Var}}(\langle \psi_{x}, \hat{x}_{i} \rangle) + \lambda \| D^2\psi_{y} \|_2^2 = 1$, where $\widehat{\mboxox{Cov}}$ and $\widehat{\mboxox{Var}}$ denote sample covariance and variance and $D^2$ is the second order differential operator. Subsequent pairs $(\hat{\psi}_{y,j}, \hat{\psi}_{x,j})$ are obtained similarly with the additional orthogonality constraint. The $i$th empirical canonical correlation coefficient $\hat\rho_{i}$ is given by the sample correlation coefficient of $(\langle\psi_{y,1},\hat{y}_i\rangle,\langle\psi_{x,1},\hat{x}_i\rangle)$. We refer to \citet{1} for a detailed procedure of the functional CCA and the choice of $\lambda$ by a generalized cross-validation.
\section{Combined analysis of amplitude and phase variations in real data sets} \label{sec:real data}
In this section, we illustrate applications of the proposed methods to two sets of real data.
\subsection{Berkeley growth data} \label{sec:Berkeley}
The Berkeley growth data set \citep{6} consists of the height measurements of 39 boys and 54 girls from age 1 to 18. We present here the results of our analysis for the boy-only data. The analysis for girls' growth leads to a similar conclusion. To highlight periods of slower and faster growths, we use the growth velocity curves, by taking derivatives of the smoothed growth curves. These raw data are shown in Fig.~\ref{fig:1}(a).
The application of the proposed functional combined PCA and CCA results in a succinct dimension reduction of the data, as well as interpretable major modes of variations. In particular, the first two combined principal components (PCs) well explain the association between growth velocities (amplitude variation) and temporal paces (phase variations).
The mode of variation captured in the first combined PC explains the pattern that boys with higher overall growth rates tend to have fast temporal paces (e.g., reaching their pubertal growth spurt earlier than others). In the first row of Fig.~\ref{fig:X and Y}, the red curves represent this patten. On the other hand, boys with lower growth rates tend to have slower paces, as shown in the figure by the blue curves.
The second combined PC (shown in the second row of Fig.~\ref{fig:X and Y}) captures a contrast, which is characterized by the growths before and after about 9 years old. Specifically, the second PC explains a growth pattern that the growth rate and pace are positively associated for growths in ages 0--9, and negatively associated for growths in ages 10--18. As mentioned earlier, FCPCA aims to simultaneously capture the amplitude, phase and their association, and does so for this data set. The interpretable modes of variation shown in Fig.~\ref{fig:X and Y} are not typically found in applications of functional PCA (see e.g. Fig.~\ref{fig:1}).
\begin{enumerate}gin{figure}[t!]
\centering
\includegraphics[width=1\textwidth]{berkeley_FCPCA5.png}
\caption{First two combined principal component scores from the growth data. Amplitude functions (left column) and phase functions (middle column) are combined in the right column. Colors correspond across columns.}
\label{fig:X and Y}
\end{figure}
\begin{enumerate}gin{figure}[t!]
\centering
\includegraphics[width=1\textwidth]{berkeley3.png}
\caption{Berkeley growth data. Top row: Major modes of variations captured in the functional combined PCA. Bottom row: Major associations between the amplitude and phase from the functional combined CCA.}
\label{fig:7}
\end{figure}
An application of our functional combined CCA to the data set reveals a difference between two of our proposed methods. The reconstructed functions from the most correlated components are shown in the botton row of Fig.~\ref{fig:7}.
These are visually different from the combined principal components shown in the top row.
The differences in patterns found by functional combined PCA and CCA should not be surprising. The \emph{internal} variations within each of amplitude and phase functions affect the combined PCA, while, in CCA, they are simply ignored.
\subsection{Lip motion data} \label{sec:Lip}
The data set we analyze here is a part of lip motion data used in \cite{17}. The data set is composed of measurements at 51 equally-spaced points in the timeframe from 0 to 340 milliseconds of a vertical position of lower lip while the subject speaks a syllable ``bob'' 20 times. The dynamics of lip motion is well captured by its acceleration. These second derivatives plotted in Figure \ref{fig:8}(a) show a common pattern. Lip movement is first accelerated negatively and then pass through a positive acceleration phase during which the descent of the lower lip is stopped. This lip opening phase is followed by a short period of near zero acceleration when pronunciation of the vowel ``o'' is at its full force, followed by another strong acceleration upward initiating lip closure. The movement is completed by a negative acceleration episode as the lip returns to the closed position \citep{17}.
\begin{enumerate}gin{figure}[t!]
\centering
\includegraphics[width=1\textwidth]{lip.png}
\caption{(a) 20 acceleration curves of lip movement. (b) Three functions describing a first mode of variation from FCPCA. (c) Three functions describing a combined effect of the most correlated directions from the functional combined CCA.}
\label{fig:8}
\end{figure}
By an application of FCPCA, we found that the first combined PC explains a large portion (58\%) of the total variation. The first mode of variation, shown in Figure \ref{fig:8}(b), explains a speech habit of the speaker; as he makes the sound of the word louder (or softer), he tends to speak faster (or slower, respectively). For this data set, the findings from the function combined CCA are similar to those of FCPCA.
\section{Numerical studies} \label{sec:numerical studies}
\subsection{Efficiency of functional combined PCA under non-linear associations} \label{sec:non-linear}
The success of the proposed methods depends on whether a particular type of the association between the amplitude and phase variations exists in data. In particular, our methods are well-suited for a linearly dependent amplitude $y$ and phase $x$ functions. To elaborate this point, we present a toy data analysis.
Two sets of data are prepared by sampling from the amplitude and phase function pair $(y,x)$. We have set each of $y$ and $x$ has one major principal component, and the association between the PC score of $y$ and that of $x$ is either nearly linear or severely non-linear (quadratic). The observations are obtained by the composition, $f = y \circ \phi^{-1}(x)$, and displayed in the first column of Fig.~\ref{fig:6}. The types of association, or the degrees of non-linearity, are illustrated in the scatters of the two individual PC scores, shown in the second column of Fig.~\ref{fig:6}. The proposed functional combined PCA works well for the first data set, where the association between $y$ and $x$ is nearly linear.
To confirm this and to investigate the sensitivity of our method to the degrees of non-linearity, we evaluate for each data set the mean squared approximation error (MSE) using only the first $m$ components, as a function of $m \ge 1$, computed by $n^{-1} \sum_{i=1}^{n}\| {A}^{\hat{C}}_m(\hat{f}_i) - \hat{f}_i \|_2^2$. These errors are compared with errors from other natural competitors: the usual functional PCA (FPCA) and a \emph{composite} functional PCA, proposed in \cite{16}. The FPCA is applied to the original data (without applying function alignment), and the first $m$ components are used to approximate the observations.
In the composite method, the FPCA is applied to each individual functions ($\hat{y}$ and $\hat{x}$). First $m$ components from both $\hat{y}$ and $\hat{x}$ are used to approximate the observations (thus using $2m$ components). These MSEs are shown in the last column of Fig.~\ref{fig:6}.
\begin{enumerate}gin{figure}[t!]
\centering
\includegraphics[width=1\textwidth]{linear_example2.png}
\caption{Reconstruction errors of functional combined PCA (black solid), FPCA (green dashed) and the composite method (red dotted) of \citet{16}. The proposed method works well when the amplitude and phase are \emph{linearly} associated.}
\label{fig:6}
\end{figure}
Note that our definition of one-dimensional linear or nearly linear association, as shown in the first row of Fig.~\ref{fig:6}, typically results in a one-dimensional non-linear mode of variation in the original function space. This non-linear variation is not completely captured in a single component of FPCA, and oftentimes needs multiple components.
In contrast, our method efficiently captures the non-linear variation (showing the smallest MSE for $m = 1$), since in fact, the non-linear association becomes linear in $\hat{y}$ and $\hat{x}$.
For this type of association, the usual FPCA needs several components to capture the non-linear variation in the original space, and is less favorable. The separate method, on the other hand, uses $2m$ linear components (compared to only $m$ components in the other two methods), thus is expected to show better performances than FPCA in general. Note that our functional combined PCA has smaller errors than the separate method has for this data set.
As the degrees of non-linearity intensify, the advantage of the functional combined PCA gradually lessens. For the severely non-linear case (shown in the second row of the figure), our method fails to capture the non-linear mode of variation in one component. However, it performs comparable to other methods when more than one component is used, i.e. for $m>1$.
\subsection{Performance of estimation in functional combined PCA}\label{sec:simPCA}
In this and next subsections, we exhibit good performances of our estimation procedures. The success of our methods is largely dependent upon the quality of the alignment. The Fisher-Rao function alignment we choose to use has been shown to work well in practice \citep{14}, but its theoretical results (e.g., consistency in the estimation of $\mu$) are limited \citep{13}. Instead, we use simulated data sets to glimpse the consistency of the estimators. We have tried a range of parameter settings, and the results are concordant across settings. Below we present representative cases.
We use a four-component model for (\ref{equ:6}), where $g_{i}^{C}(t) = \mu(t) + \sum_{j=1}^{4} z_{ij} \sqrt{\lambda_j} \xi_{j}(t)$, $t \in [0,2]$.
We set $\mu(t) = 20 [ g((t - 0.35)/0.05) + g((t-0.65)/0.05)]$, $t \in [0,1)$, where $g(\cdot)$ is the density function of the standard normal. The eigenfunctions for amplitudes are chosen by the Gram-Schmidt orthogonalization of four functions
$$
\left\{ g\left(\frac{t-0.35}{0.05}\right), g\left(\frac{t-0.65}{0.05}\right), g\left( \frac{t-0.5}{0.1}\right), g\left(\frac{t-0.3}{0.1}\right) + g\left(\frac{t-0.7}{0.1}\right) \right\},
$$
while the eigenfunctions for phases are from
$ \{(t - 0.5)^j : j = 1,\ldots, 4\}$. Figure~\ref{fig:sim_model1} illustrates the mean function, and the eigenfunctions. We set $C = 1$ and
$(\lambda_1,\ldots, \lambda_4) = (3.5,2.6,0.3,0.1)$. The scores $z_{ij}$ are sampled from the standard normal distribution. The observed function $f_i$ is obtained from $g_i^C$ using (\ref{equ:5}) and (\ref{equ:8}). An example of such random sample is shown in the left panel of Fig.~\ref{fig:sim_model1}.
We observe $f_i$ at each time point $t_j: = (j-1)/ 101$, for $j=1,2,\ldots,101$ with measurement error $\varepsilonilon_{ij} \sim N(0,0.1)$. As for a smoothing step for $f_{ij}$'s, the B-spline basis system of degree 4 with a roughness penalty on second derivative is used. Following~\cite{20}, knots are placed at evaluation points $\{ t_k \}_{k=1}^{101}$ and, following~\cite{21}, the value of the smoothing parameter $\lambda$ is determined by the generalized cross-validation method.
\begin{enumerate}gin{figure}[t!]
\centering
\includegraphics[width=1\textwidth]{sim_model1.png}
\caption{Model used in Sections~\ref{sec:simPCA} and \ref{sec:simCCA}. }
\label{fig:sim_model1}
\end{figure}
For sample sizes $n = 30,100$, we generated $f_1,\ldots,f_n$ from the model described above and obtained the estimates $(\hat{C}, \hat{\mu}, \hat{\lambda}_{1}^{\hat{C}}, \hat{\xi}_{1}^{\hat{C}}, \hat{\lambda}_{2}^{\hat{C}}, \hat{\xi}_{2}^{\hat{C}})$, from our procedure discussed in Section~\ref{sec:FCPCAestimation}. For a random sample of size $n = 30$, the analysis result is shown in Fig.~\ref{fig:sim_result}. There, we see that the first two component estimates capture the amplitude, phase and their association rather well; the estimates are very close to the population eigenfunctions, shown in Fig.~\ref{fig:sim_model1}. The third component seems negligible as $\lambda_3$ is small.
\begin{enumerate}gin{figure}[t!]
\centering
\includegraphics[width=1\textwidth]{sim_resutlt4.png}
\caption{Modes of variations captured in FCPCA for simulated data in Section~\ref{sec:simPCA}.}
\label{fig:sim_result}
\end{figure}
We repeat the experiment 100 times to witness the sampling distributions of the estimators. The result is summarized in Table \ref{tab1}. We observed that the estimators approach their population counterparts as the sample size increases.
\begin{enumerate}gin{table}[t]
\centering
\renewcommand{1.4}{1.4}
\begin{enumerate}gin{tabular}{c|cc}
& $n = 30$ & $n = 100$ \\
\hline
$ \hat{C} $ ($C=1$) & 1.44 (0.31) & 1.28 (0.29)\\
$ \hat{\lambda}_{1}^{\hat{C}}$ ($\lambda_{1}=3.5$)& 4.12 (0.26)& 3.81 (0.21) \\
$ \hat{\lambda}_{2}^{\hat{C}}$ ($\lambda_{2}=2.6$)& 2.98 (0.37)& 2.74 (0.18) \\
$ \| \mu - \hat{\mu}\|_2 $&2.89 (1.27)&2.15 (0.85)\\
$ \|\xi_{1}-\hat{\xi}_{1}^{\hat{C}}\|_2 $&0.49 (0.24)&0.38 (0.36)\\
$ \|\xi_{1}-\hat{\xi}_{2}^{\hat{C}}\|_2 $&0.71 (0.41)&0.34 (0.50)\\
\end{tabular}
\caption{Simulation results for functional combined PCA. The mean and standard deviation (in parentheses) of scalar estimates and $L_2$-distances of functional estimates to their parameter counterparts are shown for different sample sizes.
\label{tab1}}
\end{table}
\subsection{Performance of estimation in functional combined CCA}\label{sec:simCCA}
For a model for the functional combined CCA, the amplitude and phase functions are each modeled using four principal components, where
$y_{i}(t) = \mu_{y}(t) + \sum_{i=1}^{4} u_{i} \sqrt{\lambda_{y,i}} \xi_{y,i}(t)$,
$x_{i}(t) = \sum_{j=1}^{4} v_{j} \sqrt{\lambda_{x,i}} \xi_{x,j}(t)$, $t \in [0,1]$,
so that the corresponding function $f$ is obtained by the function composition (\ref{equ:5}).
We use $\mu$, $\xi_{y,i}$, $\xi_{x,j}$ as shown in Fig.~\ref{fig:sim_model1}.
We choose to model only one canonical weight function pair by setting $(\psi_{y,1}, \psi_{x,1}) := (\xi_{y,1}, \xi_{x,2})$ with the canonical correlation coefficient 0.8. (That is, only the first ``y'' component and the second ``x'' component are correlated.)
The variances of individual principal components are set to be $(\lambda_{y,1},\ldots, \lambda_{y,4}) = (5,3.5,0.8,0.7)$, and $(\lambda_{x,1},\ldots,\lambda_{x,4}) = (1,0.7,0.16,0.14) / 100$. The scores $(u_i,v_j)$ are independently sampled from $N(0,1)$, except that $\mboxox{Cov}(u_1,v_2) = 0.8$.
The random function $f_i$ is observed at a dense grid with a measurement error drawn from $N(0, 0.1)$ and the data are processed as done in Section~\ref{sec:simCCA}.
We obtained the empirical sampling distributions of the estimators $(\hat{\rho}_{1}, \hat{\psi}_{y,1}, \hat{\psi}_{x,1})$ for sample sizes $n =30,100$ with 100 repetitions. The results, summarized in Table \ref{tab2}, suggest a good performance of our estimation procedure. Note that we have used the generalized cross validation \citep{1} to choose the smoothing parameter of functional CCA.
\begin{enumerate}gin{table}[t]
\centering
\renewcommand{1.4}{1.4}
\begin{enumerate}gin{tabular}{c|cc}
& $n = 30$ & $n = 100$ \\
\hline
$ \hat{\rho}_1 $ ($\rho_1=0.8$) &0.68 (0.21)&0.72 (0.19)\\
$ \|\psi_{y,1}-\hat{\psi}_{y,1}\|_2 $&0.89 (0.31)&0.43 (0.18) \\
$ \|\psi_{x,1}-\hat{\psi}_{x,1}\|_2 $&0.72 (0.28)&0.55 (0.15)\\
\end{tabular}
\caption{Simulation results for functional combined CCA. The mean and standard deviation (in parentheses) of $\hat\rho$ and $L_2$-distances of functional estimates to their parameter counterparts are shown for different sample sizes.
\label{tab2}}
\end{table}
\section{Conclusion}
This paper presents a novel framework for exploring the combined structure of amplitude and phase variations in functional data. Naive applications of standard statistical tools such as the functional PCA to this type of data sometimes produces unsatisfactory results. The commonly-employed framework of statistical analysis of aligned functions by the use of function alignment disregards the relevant phase variation. To overcome the disadvantages, we propose functional combined PCA and CCA to investigate major modes of variation and correlated directions of data in the underlying space, in which the association between amplitude and phase variations can be addressed. The analysis results are visually presented in the original form of observed functions to aid interpretation.
\begin{enumerate}gin{itemize}bliographystyle{Chicago}
\begin{enumerate}gin{itemize}bliography{mybibfile}
\end{document}
|
\begin{document}
\baselineskip 6.1mm
\title
{Divisibility of the class numbers of imaginary quadratic fields}
\author{K. Chakraborty, A. Hoque, Y. Kishi, P. P. Pandey}
\address[K. Chakraborty]{Harish-Chandra Research Institute, HBNI, Chhatnag Road, Jhunsi, Allahabad-211019, India.}
\email{[email protected]}
\address[A. Hoque]{Harish-Chandra Research Institute, HBNI, Chhatnag Road, Jhunsi, Allahabad-211019, India.}
\email{[email protected]}
\address[Y. Kishi]{Department of Mathematics, Aichi University of Education, 1 Hirosawa Igaya-cho, Kariya, Aichi 448-8542, Japan.}
\email{[email protected]}
\address[P. P. Pandey]{Department of Mathematics, IISER Berhampur, Berhampur-760010,
Odisha, India.}
\email{[email protected]}
\subjclass[2010]{11R11; 11R29}
\date{\today}
\keywords{Imaginary Quadratic Field; Class Number; Ideal Class Group}
\begin{abstract}
For a given odd integer $n>1$, we provide some families of imaginary quadratic number fields
of the form $\mathbb{Q}(\sqrt{x^2-t^n})$ whose ideal class group has a subgroup
isomorphic to $\mathbb{Z}/n\mathbb{Z}$.
\end{abstract}
\maketitle{}
\section{Introduction}
The divisibility properties of the class numbers of number fields are very important
for understanding the structure of the ideal class groups of number fields.
For a given integer $n>1$, the Cohen-Lenstra heuristic \cite{CL84} predicts that a
positive proportion of imaginary quadratic number fields have class number divisible by $n$.
Proving this heuristic seems out of reach with the current state of knowledge.
On the other hand, many families of (infinitely many) imaginary quadratic fields
with class number divisible by $n$ are known.
Most of such families are of the type $\mathbb{Q}(\sqrt{x^2-t^n})$ or of the type
$\mathbb{Q}(\sqrt{x^2-4t^n})$, where $x$ and $t$ are positive integers with some
restrictions (for the former see
\cite{AC55, IT11p, IT11, KI09, RM97, RM99, NA22, NA55, SO00, MI12},
and for the later see \cite{Cohn, GR01, IS11, IT15, LO09, YA70}).
Our focus in this article will be on the family $K_{t,x}=\mathbb{Q}(\sqrt{x^2-t^n})$.
In 1922, T. Nagell~\cite{NA22} proved that for an odd integer $n$,
the class number of imaginary quadratic field $K_{t,x}$
is divisible by $n$ if $t$ is odd, $(t,x)=1$,
and $q\mid x$, $q^2\nmid x$ for all prime divisors $q$ of $n$.
Let $b$ denote the square factor of $x^2-t^n$, that is,
$x^2-t^n=b^2d$, where $d<0$ is the square-free part of $x^2-t^n$.
Under the condition $b=1$, N. C. Ankeny and S. Chowla \cite{AC55}
(resp.\ M. R. Murty \cite[Theorem~1]{RM97})
considered the family $K_{3,x}$ (resp.\ $K_{t,1}$).
M. R. Murty also treated the family $K_{t,1}$ with $b<t^{n/4}/2^{3/2}$ (\cite[Theorem~2]{RM97}).
Moreover, K. Soundararajan \cite{SO00} (resp.\ A. Ito \cite{IT11p})
treated the family $K_{t,x}$ under the condition that
$b<\sqrt{(t^n-x^2)/(t^{n/2}-1)}$ holds (resp.\ all of divisors of $b$ divide $d$).
On the other hand, T. Nagell~\cite{NA55} (resp.\ Y. Kishi~\cite{KI09}, A. Ito~\cite{IT11}
and M. Zhu and T. Wang~\cite{AC55}) studied the family $K_{t,1}$
(resp.\ $K_{3,2^k}$, $K_{p,2^k}$ and $K_{t,2^k}$)
unconditionally for $b$, where $p$ is an odd prime.
In the present paper, we consider the case when both
$t$ and $x$ are odd primes and $b$ is unconditional and prove the following:
\begin{thm}\label{T1}
Let $n\geq 3$ be an odd integer and $p,q$ be distinct odd primes with $q^2<p^n$.
Let $d$ be the square-free part of $q^2-p^n$. Assume that $q \not \equiv \pm 1 \pmod {|d|}$. Moreover, we assume
$p^{n/3}\not= (2q+1)/3, (q^2+2)/3$ whenever both $d \equiv 1 \pmod 4$ and $3\mid n$.
Then the class number of $K_{p,q}=\mathbb{Q}(\sqrt{d})$ is divisible by $n$.
\end{thm}
In Table 1 (respectively Table 2), we list $K_{p,q}$ for small values of $p,q$ for $n=3$ (respectively for $n=5$).
It is readily seen from these tables that the assumptions in Theorem \ref{T1} hold very often. We can easily prove, by reading modulo $4$, that the condition \enquote{$p^{n/3}\not= (2q+1)/3, (q^2+2)/3$} in Theorem \ref{T1} holds whenever $p \equiv 3 \pmod 4$.
Further, if we fix an odd prime $q$, then the condition \enquote{$q \not\equiv \pm 1 \pmod{|d|}$} in Theorem \ref{T1} holds almost always, and,
this can be proved using the celebrated Siegel's theorem on integral points on affine curves. More precisely, we prove the following theorem in this direction.
\begin{thm}\label{T2}
Let $n\geq 3$ be an odd integer not divisible by $3$. For each odd prime $q$ the class number of $K_{p,q}$ is divisible by $n$ for all but finitely many $p$'s. Furthermore, for each $q$ there are infinitely many fields $K_{p,q}$.
\end{thm}
\section{Preliminaries}
In this section we mention some results which are needed for the proof of the Theorem \ref{T1}. First we state a basic result from algebraic number theory.
\begin{prop}\label{P1}
Let $d \equiv 5 \pmod 8$ be an integer and $\ell$ be a prime. For odd integers $a,b$ we have
$$\left(\frac{a+b\sqrt{d}}{2}\right)^{\ell} \in \mathbb{Z}[\sqrt{d}] \mbox{ if and only if } \ell=3.$$
\end{prop}
\begin{proof}
This can be easily proved by taking modulo some power of two.
\end{proof}
We now recall a result of Y. Bugeaud and T. N. Shorey \cite{BS01} on Diophantine equations which is one of the main ingredient in the proof of Theorem \ref{T1}.
Before stating the result of Y. Bugeaud and T. N. Shorey, we need to introduce some definitions and notations.
Let $F_k$ denote the $k$th term in the Fibonacci sequence defined by $F_0=0, \ F_1= 1$
and $F_{k+2}=F_k+F_{k+1}$ for $k\geq 0$. Similarly $L_k$ denotes the $k$th term in the Lucas
sequence defined by $L_0=2, \ L_1=1$ and $L_{k+2}=L_k+L_{k+1}$ for $k\geq 0$.
For $\lambda\in \{1, \sqrt{2}, 2\}$, we define the subsets $\mathcal{F}, \ \mathcal{G_\lambda},
\ \mathcal{H_\lambda}\subset \mathbb{N}\times\mathbb{N}\times\mathbb{N}$ by
\begin{align*}
\mathcal{F}&:=\{(F_{k-2\varepsilon},L_{k+\varepsilon},F_k)\,|\,
k\geq 2,\varepsilon\in\{\pm 1\}\},\\
\mathcal{G_\lambda}&:=\{(1,4p^r-1,p)\,|\,\text{$p$ is an odd prime},r\geq 1\},\\
\mathcal{H_\lambda}&:=\left\{(D_1,D_2,p)\,\left|\,
\begin{aligned}
&\text{$D_1$, $D_2$ and $p$ are mutually coprime positive integers with $p$}\\
&\text{an odd prime and there exist positive integers $r$, $s$ such that}\\
&\text{$D_1s^2+D_2=\lambda^2p^r$ and $3D_1s^2-D_2=\pm\lambda^2$}
\end{aligned}\right.\right\},
\end{align*}
except when $\lambda =2$, in which case the condition ``odd''
on the prime $p$ should be removed in the definitions of $\mathcal{G_\lambda}$
and $\mathcal{H_\lambda}$.
\begin{thma}\label{A1}
Given $\lambda\in \{1, \sqrt{2}, 2\}$, a prime $p$ and positive co-prime integers $D_1$ and $D_2$, the number of positive integer solutions $(x, y)$ of the Diophantine equation
\begin{equation}\label{E1}
D_1x^2+D_2=\lambda^2p^y
\end{equation}
is at most one except for $$
(\lambda,D_1,D_2,p)\in\mathcal{E}:=\left\{\begin{aligned}
&(2,13,3,2),(\sqrt 2,7,11,3),(1,2,1,3),(2,7,1,2),\\
&(\sqrt 2,1,1,5),(\sqrt 2,1,1,13),(2,1,3,7)
\end{aligned}\right\}
$$
and $(D_1, D_2, p)\in
\mathcal{F}\cup \mathcal{G_\lambda}\cup \mathcal{H_\lambda}$.
\end{thma}
We recall the following result of J. H. E. Cohn \cite{Cohn1} about appearance of squares in the Lucas sequence.
\begin{thma}\label{A2}
The only perfect squares appearing in the Lucas sequence are $L_1=1$ and $L_3=4$.
\end{thma}
\section{Proofs}
We begin with the following crucial proposition.
\begin{prop}\label{P2}
Let $n,q,p,d$ be as in Theorem \ref{T1} and let $m$ be the positive integer with
$q^2-p^n=m^2d$. Then the element $\alpha =q+m\sqrt{d}$ is not an $\ell^{th}$ power of an element in the ring of integers of $K_{p,q}$ for any prime divisor $\ell $ of $n$.
\end{prop}
\begin{proof}
Let $\ell$ be a prime divisor of $n$. Since $n$ is odd, so is $\ell$.
We first consider the case when $d \equiv 2 \mbox{ or }3 \pmod 4$.
If $\alpha$ is an $\ell^{th}$ power, then there are integers $a,b$ such that
$$q+m \sqrt{d}=\alpha =(a+b\sqrt{d})^{\ell}.$$
Comparing the real parts, we have
$$q=a^{\ell}+\sum_{i=0}^{(\ell-1)/2} \binom{\ell}{2i} a^{\ell-2i}b^{2i}d^i.$$
This gives $a\mid q$ and hence $a=\pm q$ or $a=\pm 1$.\\
Case (1A): $a= \pm q$.\\
We have $q+m\sqrt{d}=(\pm q+b\sqrt{d})^{\ell}$. Taking norm on both sides we obtain
$$p^n=(q^2-b^2d)^{\ell}.$$
Writing $D_1=-d>0$, we obtain
\begin{equation*}
D_1b^2+q^2=p^{n/ \ell}.
\end{equation*}
Also, we have
\begin{equation*}
D_1m^2+q^2=p^n.
\end{equation*}
As $\ell$ is a prime divisor of $n$ so $(x,y)=(|b|,n/ \ell)$ and $(x,y)=(m,n)$ are
distinct solutions of (\ref{E1}) in positive integers for $D_1=-d>0,D_2=q^2, \lambda=1$.
Now we verify that $(1, D_1,D_2,p) \not \in \mathcal{E}$ and $(D_1,D_2,p) \not \in \mathcal{F}\cup \mathcal{G_\lambda}\cup \mathcal{H_\lambda}$. This will give a contradiction.
Clearly $(1,D_1,D_2,p) \not \in \mathcal{E}$. Further, as $D_1>3$, we see that $(D_1,D_2,p) \not \in \mathcal{G}_1$. From Theorem \ref{A2}, we see that $(D_1,D_2,p) \not \in \mathcal{F}$. Finally, if $(D_1,D_2,p) \in \mathcal{H}_1$ then there are positive integers $r,s$ such that
\begin{equation}\label{eq:1}
3D_1s^2-q^2=\pm 1
\end{equation}
and
\begin{equation}\label{eq:2}
D_1s^2+q^2=p^r.
\end{equation}
By (\ref{eq:1}), we have $q\not= 3$, and hence we have $3D_1s^2-q^2=- 1$.
From this together with (\ref{eq:2}), we obtain
$$4q^2=3p^r+1,$$
that is $$(2q-1)(2q+1)=3p^r.$$
This leads to $2q-1=1 \mbox{ or } 2q-1=3$, but this is not possible as $q$ is an odd prime. Thus $(D_1,D_2,p) \not \in \mathcal{H}_1$.\\
Case (1B): $a=\pm 1$.\\
In this case we have $q+m\sqrt{d}=(\pm 1+b \sqrt{d})^{\ell}$.
Comparing the real parts on both sides, we get $q \equiv \pm 1 \pmod{|d|}$ which contradicts to the assumption \enquote{$q \not\equiv \pm 1 \pmod{|d|}$}.
Next we consider the case when $d \equiv 1 \pmod 4$.
If $\alpha$ is an $\ell^{th}$ power of some integer in $K_{p,q}$,
then there are rational integers $a,b$ such that
$$q+m\sqrt{d}=\left( \frac{a+b\sqrt{d}}{2} \right)^{\ell},\ \ a\equiv b\pmod 2.$$
In case both $a \mbox{ and }b$ are even, then we can proceed as in the case
$d \equiv 2 \mbox{ or }3 \pmod 4$ and obtain a contradiction under the assumption
$q \not\equiv \pm 1 \pmod{|d|}$. Thus we can assume that both $a$ and $b$ are odd.
Again, taking norm on both sides we obtain
\begin{equation}\label{E4}
4p^{n/\ell}=a^2-b^2d.
\end{equation}
Since $a,b$ are odd and $p \neq 2$, reading modulo 8 in (\ref{E4}) we get
$d \equiv 5 \pmod 8$. As $\left( \frac{a+b\sqrt{d}}{2} \right)^{\ell}=q+m\sqrt{d} \in \mathbb{Z}[\sqrt{d}]$, by Proposition \ref{P1} we obtain $\ell=3$.
Thus we have
$$q+m\sqrt{d}=\left( \frac{a+b\sqrt{d}}{2} \right)^3.$$
Comparing the real parts, we have
\begin{align}\label{E44}
8q&=a(a^2+3b^2d).
\end{align}
Since $a$ is odd, therefore, we have $a\in\{\pm 1,\pm q\}$.\\
Case (2A): $a=q$.\\
By (\ref{E44}), we have $8=q^2+3b^2d$, and hence, $2\equiv q^2\pmod{3}$.
This is not possible.\\
Case (2B): $a=-q$.\\
By (\ref{E4}) and (\ref{E44}), we have
$$4p^{n/3}=q^2-b^2d\ \text{and}\ 8=-(q^2+3b^2d).$$
From these, we have $3p^{n/3}=q^2+2$, which violates our assumption.\\
Case (2C): $a=1$.\\
By (\ref{E44}) and $d<0$, we have $8q=1+3b^2d<0$.
This is not possible.\\
Case (2D): $a=-1$.\\
By (\ref{E4}) and (\ref{E44}), we have
$$4p^{n/3}=1-b^2d\ \text{and}\ 8q=-(1+3b^2d).$$
From these, we have $3p^{n/3}=2q+1$, which violates our assumption.
This completes the proof.
\end{proof}
We are now in a position to prove Theorem \ref{T1}.
\begin{proof}[\bf Proof of Theorem~$\ref{T1}$]
Let $m$ be the positive integer with $q^2-p^n=m^2d$ and put $\alpha =q+m\sqrt{d}$.
We note that $\alpha$ and $\bar{\alpha}$ are co-prime and $N(\alpha)=\alpha \bar{\alpha}=p^n$.
Thus we get $(\alpha)= \mathfrak{a}^n$ for some integral ideal $\mathfrak{a}$ of $K_{p,q}$.
We claim that the order of $[\mathfrak{a}]$ in the ideal class group of $K_{p, q} $ is $n$.
If this is not the case, then we obtain an odd prime divisor $\ell$ of $n$ and an integer
$\beta $ in $K_{p,q}$ such that $(\alpha)=(\beta)^{\ell}$.
As $q$ and $p$ are distinct odd primes, the condition \enquote{$q \not \equiv \pm 1 \pmod{|d|}$}
ensures that $d<-3$. Also $d$ is square-free, hence the only units in the ring of integers of
$K_{p,q}=\mathbb{Q}(\sqrt{d})$ are $\pm1$. Thus we have $\alpha =\pm \beta^{\ell}$.
Since $\ell$ is odd, therefore, we obtain $\alpha= \gamma^{\ell}$ for some integer
$\gamma$ in $K_{p,q}$ which contradicts to Proposition \ref{P2}.
\end{proof}
We now give a proof of Theorem \ref{T2}. This is obtained as a consequence of a well known theorem of Siegel (see \cite{ES, LS}).
\begin{proof}[\bf Proof of Theorem $\ref{T2}$]
Let $n>1$ be as in Theorem \ref{T2} and $q$ be an arbitrary odd prime. For each odd prime $p \neq q$, from Theorem \ref{T1}, the class number of $K_{p,q}$ is divisible by $n$ unless $q\equiv \pm 1 \pmod {|d|}$.
If $q\equiv \pm 1 \pmod {|d|}$, then $|d|\leq q+1$.
For any positive integer $D$, the curve
\begin{equation}\label{E5}
DX^2+q^2=Y^n
\end{equation}
is an irreducible algebraic curve (see \cite{WS}) of genus bigger than $0$. From Siegel's theorem (see \cite{LS}) it follows that there are only finitely many integral points $(X,Y)$ on the curve (\ref{E5}). Thus, for each $d<0$ there are at most finitely many primes $p$ such that
$$q^2-p^n=m^2d.$$
Since $K_{p,q}=\mathbb{Q}(\sqrt{d})$, it follows that there are infinitely many fields $K_{p,q}$ for each odd prime $q$. Further if $p$ is large enough, then for $q^2-p^n=m^2d$, we have $|d|>q+1$. Hence, by Theorem \ref{T1}, the class number of $K_{p,q}$ is divsible by $n$ for $p$ sufficiently large.
\end{proof}
\section{Concluding remarks}
We remark that the strategy of the proof of Theorem \ref{T1} can be adopted, together with the following result of W. Ljunggren \cite{LJ43}, to prove Theorem \ref{T4}.
\begin{thma}\label{TE}
For an odd integer $n$, the only solutions to the Diophantine equation
\begin{equation}
\frac{x^n-1}{x-1}=y^2
\end{equation}
in positive integers $x,y, n $ with $x>1$ is $n=5, x=3, y=11$.
\end{thma}
\begin{thm}\label{T4}
For any positive odd integer $n$ and any odd prime $p$, the class number of the imaginary quadratic field $\mathbb{Q}(\sqrt{1-p^n})$ is divisible by $n$ except for the case $(p, n)=(3, 5)$.
\end{thm}
Theorem \ref{T4} alternatively follows from the work of T. Nagell (Theorem 25 in \cite{NA55}) which was elucidated by J. H. E. Cohn (Corollary 1 in \cite{CO03}). M. R. Murty gave a proof of Theorem \ref{T4} under condition either \enquote{$1-p^n$ is square-free with $n>5$} or \enquote{$m<p^{n/4}/2^{3/2}$ whenever $m^2\mid 1-p^n$ for some integer $m$ with odd $n>5$} (Theorem 1 and Theorem 2 in \cite{RM97}, also see
\cite{RM99}).
Now we give some demonstration for Theorem \ref{T1}. All the computations in this paper were done using PARI/GP (version 2.7.6). Table 1 gives the list of imaginary quadratic fields $K_{p,q}$ corresponding to $n=3$, $p \leq 19$ (and hence discriminant not exceeding $19^3$). Note that the list does not exhaust all the imaginary quadratic fields $K_{p,q}$ of discriminant not exceeding $19^3$. Table 2 is the list of $K_{p,q}$ for $n=5$ and $p \leq 7$.\\
\begin{center}
\begin{longtable}{|l|l|l|l|l|l|l|l|l|l|}
\caption{Numerical examples of Theorem 1 for $n=3$.} \label{tab:long1} \\
\hline \multicolumn{1}{|c|}{$p$} & \multicolumn{1}{c|}{$q$} & \multicolumn{1}{c|}{$q^2-p^n$}& \multicolumn{1}{c|}{$d$} & \multicolumn{1}{c|}{$h(d)$}& \multicolumn{1}{|c|}{$p$} & \multicolumn{1}{c|}{$q$} & \multicolumn{1}{c|}{$q^2-p^n$}& \multicolumn{1}{c|}{$d$} & \multicolumn{1}{c|}{$h(d)$}\\ \hline
\endfirsthead
\multicolumn{10}{c}
{{\bfseries \tablename\ \thetable{} -- continued from previous page}} \\
\hline \multicolumn{1}{|c|}{$p$} & \multicolumn{1}{c|}{$q$} & \multicolumn{1}{c|}{$q^2-p^n$}& \multicolumn{1}{c|}{$d$} & \multicolumn{1}{c|}{$h(d)$}& \multicolumn{1}{|c|}{$p$} & \multicolumn{1}{c|}{$q$} & \multicolumn{1}{c|}{$q^2-p^n$}& \multicolumn{1}{c|}{$d$} & \multicolumn{1}{c|}{$h(d)$}\\ \hline
\endhead
\hline \multicolumn{10}{|r|}{{Continued on next page}} \\ \hline
\endfoot
\hline
\endlastfoot
3&5&-2&-2&1*& 5&3&-116&-29&6\\
5&7&-76&-19&1**& 7&3&-334&-334&12\\
7&5&-318&-318&12&7&11&-222&-222&12\\
7&13&-174&-174&12& 7&17&-54&-6&2*\\
11&3&-1322&-1322&42&11&5&-1306&-1306&18\\
11&7&-1282&-1282&12&11&13&-1162&-1162&12\\
11&17&-1042&-1042&12&11&19&-970&-970&12\\
11&23&-802&-802&12&11&29&-490&-10&2*\\
11&31&-370&-370&12&11&37&-38&-38&6*\\
13&3&-2188&-547&3&13&5&-2172&-543&12\\
13&7&-2148&-537&12&13&11&-2076&-519&18\\
13&17&-1908&-53&6&13&19&-1836&-51&2**\\
13&23&-1668&-417&12&13&29&-1356&-339&6\\
13&31&-1236&-309&12&13&37&-828&-23&3\\
13&41&-516&-129&12&13&43&-348&-87&6\\
13&47&-12&-3&1*&17&3&-4904&-1226&42\\
17&5&-4888&-1222&12&17&7&-4864&-19&1**\\
17&11&-4792&-1198&12&17&13&-4744&-1186&24\\
17&19&-4552&-1138&12&17&23&-4384&-274&12\\
17&29&-4072&-1018&18&17&31&-3952&-247&6\\
17&37&-3544&-886&18&17&41&-3232&-202&6\\
17&43&-3064&-766&24&17&47&2704&-1&1*\\
17&53&-2104&-526&12&17&59&-1432&-358&6\\
17&61&-1192&-298&6&17&67&-424&-106&6\\
19&3&-6850&-274&12&19&5&-6834&-6834&48\\
19&7&-6810&-6810&48&19&11&-6738&-6738&48\\
19&13&-6690&-6690&72&19&17&-6570&-730&12\\
19&23&-6330&-6330&48&19&29&-6018&-6018&48\\
19&31&-5898&-5898&48&19&37&-5490&-610&12\\
19&41&-5178&-5178&48&19&43&-5010&-5010&48\\
19&47&-4650&-186&12&19&53&-4050&-2&1*\\
19&59&-3378&-3378&24&19&61&-3138&-3138&24\\
19&67&-2370&-2370&24&19&71&-1818&-202&6\\
19&73&-1530&-170&12&19&79&-618&-618&12\\
\end{longtable}
\end{center}
\begin{center}
\begin{longtable}{|l|l|l|l|l|l|l|l|l|l|}
\caption{Numerical examples of Theorem 1 for $n=5$.} \label{tab:long2} \\
\hline \multicolumn{1}{|c|}{$p$} & \multicolumn{1}{c|}{$q$} & \multicolumn{1}{c|}{$q^2-p^n$}& \multicolumn{1}{c|}{$d$} & \multicolumn{1}{c|}{$h(d)$}& \multicolumn{1}{|c|}{$p$} & \multicolumn{1}{c|}{$q$} & \multicolumn{1}{c|}{$q^2-p^n$}& \multicolumn{1}{c|}{$d$} & \multicolumn{1}{c|}{$h(d)$}\\ \hline
\endfirsthead
\multicolumn{10}{c}
{{\bfseries \tablename\ \thetable{} -- continued from previous page}} \\
\hline \multicolumn{1}{|c|}{$p$} & \multicolumn{1}{c|}{$q$} & \multicolumn{1}{c|}{$q^2-p^n$}& \multicolumn{1}{c|}{$d$} & \multicolumn{1}{c|}{$h(d)$}& \multicolumn{1}{|c|}{$p$} & \multicolumn{1}{c|}{$q$} & \multicolumn{1}{c|}{$q^2-p^n$}& \multicolumn{1}{c|}{$d$} & \multicolumn{1}{c|}{$h(d)$}\\ \hline
\endhead
\hline \multicolumn{10}{|r|}{{Continued on next page}} \\ \hline
\endfoot
\hline
\endlastfoot
3&5&-218&-218&10&3&7&-194&-194&20\\
3&11&-122&-122&10&3&13&-74&-74&10\\
5&3&-3116&-779&10&5&7&-3076&-769&20\\
5&11&-3004&-751&15&5&13&-2956&-739&5\\
5&17&-2836&-709&10&5&19&-2764&-691&5\\
5&23&-2596&-649&20&5&29&-2284&-571&5\\
5&31&-2164&-541&5&5&37&-1756&-439&15\\
5&41&-1444&-1&1*&5&43&-1276&-319&10\\
5&47&-916&-229&10&5&53&-316&-79&5\\
7&3&-16798&-16798&60&7&5&-16782&-16782&100\\
7&11&-16686&-206&20&7&13&-16638&-16638&80\\
7&17&-16518&-16518&60&7&19&-16446&-16446&100\\
7&23&-16278&-16278&80&7&29&-15966&-1774&20\\
7&31&-15846&-15846&160&7&37&-15438&-15438&80\\
7&41&-15126&-15126&120&7&43&-14958&-1662&20\\
7&47&-14598&-1622&30&7&53&-13998&-13998&100\\
7&59&-13326&-13326&100&7&61&-13086&-1454&60\\
7&67&-12318&-12318&60&7&71&-11766&-11766&120\\
7&73&-11478&-11478&60&7&79&-10566&-1174&30\\
7&83&-9918&-1102&20&7&89&-8886&-8886&60\\
7&97&-7398&-822&20&7&101&-6606&-734&40\\
7&103&-6198&-6198&40&7&107&-5358&-5358&40\\
7&109&-4926&-4926&40&7&113&-4038&-4038&60\\
7&127&-678&-678&20&&&&& \\
\end{longtable}
\end{center}
In both the tables we use $*$ in the column for class number to indicate the failure of condition \enquote{$q\not\equiv \pm 1 \pmod{|d|}$} of Theorem \ref{T1}. Appearance of $**$ in the column for class number indicates that both the conditions \enquote{$q\not\equiv \pm 1 \pmod{|d|}$ and $p^{n/3}\ne (2q+1)/3, (q^2+3)/3$} fail to hold.
For $n=3$, the number of imaginary quadratic number fields obtained from the family provided by T. Nagell (namely $K_{t,1}$ with $t$ any odd integer) with class number divisible by $3$ and discriminant not exceeding $19^3$ are at most $9$, whereas, in Table 1 there are 59 imaginary quadratic fields $K_{p,q}$ with class number divisible by $3$ and discriminant not exceeding $19^3$ (Table 1 does not exhaust all such $K_{p,q}$). Out of these 59 fields in Table 1, the conditions of Theorem~\ref{T1} hold for 58. This phenomenon holds for all values of $n$.
\noindent\textbf{Acknowledgements.}
The third and fourth authors would like to appreciate the hospitality provided by Harish-Chandra Research Institute, Allahabad, where the main part of the work was done. The authors would like to thank the anonymous referee for valuable comments to improve the presentation of this paper.
\end{document}
|
\begin{document}
\flushbottom
\title{Superradiant Quantum Heat Engine}
\thispagestyle{empty}
Superradiance (SR) was originally introduced by Dicke in 1954~\cite{dicke1954coherence} as
a cooperative emission of light from an ensemble of excited two level atoms
in a small volume relative to emission wavelength. The atoms radiate in a
synchronized (coherent) manner, at quadratic rate
with the number of atoms. Experimental verification of SR has been accomplished in diverse systems~\cite{skribanowitz1973observation,gross1976observation,scheibner2007superradiance,
rohlsberger2010collective,devoe1996observation,eschner2001light,mlynek2014observation}. Due to fast
dephasing, typical observations are limited to pulsed or transient regimes, while modern experiments
achieve SR in steady state as well~\cite{baumann_dicke_2010}.
Nature herself benefits from the SR processes; in particular, in
light harvesting complexes, for
efficient collection and transfer of solar energy~\cite{meier1997polarons,
celardo2012superradiance,scholes2002designing
}. We explore if we can follow the
nature's example and use SR
to enhance work harvesting capability of a quantum heat engine (QHE).
Typical QHEs are analogs of classical heat engines, whose working fluid or heat reservoirs are replaced by
quantum systems~\cite{PhysRevLett.2.262,
PhysRevLett.93.140403,PhysRevE.76.031105,
PhysRevLett.112.150602,PhysRevLett.112.076803,
scully2003extracting,
scully2002extracting,rostovtsev2003extracting,scully2011quantum,scully2010quantum,altintas_rabi_2015}.
While the recognition
of a quantum system, three level maser, as a heat engine goes back to 1959~\cite{PhysRevLett.2.262}, many
modern studies discuss effects of
quantum coherence on the performance of QHEs.
A particularly intriguing QHE consists of an optical cavity pumped by a beam
of atoms prepared in coherent superposition states.
After repeated passages of the atoms one by one~\cite{scully2002extracting}, or two by two~\cite{li2014quantum},
the cavity field can be thermalized to an effective temperature which can be
higher than that of the atom.
Using such a quantum coherent thermalization procedure in a Carnot cycle, a QHE with unique properties
is proposed~\cite{scully2002extracting,scully2003extracting,rostovtsev2003extracting}.
Photonic Carnot engine could operate with a single thermal reservoir, and could beat the Carnot efficiency.
Addition of the coherence reservoir and the cost of coherence generation brings these feats within the boundaries
of the second law of thermodynamics. The work output of the engine is determined by the radiation pressure on
the cavity mirrors and it is proportional to the cavity field intensity~\cite{scully2003extracting}.
We propose to enhance the work harvesting capabilities of a photonic QHE using SR enhancement of the cavity field
intensity. We choose to operate our QHE
in Otto cycle instead of Carnot cycle, as it is more experimentally feasible.
We consider clusters of
$N$ two level atoms interact with the cavity at regular intervals in the ignition stage of the Otto cycle. Steady state SR in an overdamped cluster micromaser~\cite{temnov2001superradiant,temnov2005superradiance}
is similar to our set up, though we consider weak damping regime. The atoms are assumed to be prepared in a low
temperature coherent superposition state and the cavity field is assumed to be in a thermal state initially.
We find that after a number of interactions, the cavity field reaches an equilibrium state, which is a coherent thermal
state~\cite{barnett_thermofield_1985,emch_new_1986,bishop_coherent_1987,fearn_representations_1988}.
The corresponding work output of the QHE can be determined from the
steady state photon number, which is enhanced by the SR. Accordingly it becomes proportional to $N^{2}$.
Quantum coherence in the cluster is used here as an effective catalyst~\cite{aberg_catalytic_2014},
increasing the energy transfer rate from cluster to the field. It is necessary to prove that quantum coherence can be
completely recycled in order to call it as a true catalyst. As there are fundamental and practical challenges against
that~\cite{horodecki_fundamental_2013,lostaglio_quantum_2014,lostaglio_description_2015,rodriguez-rosario_thermodynamics_2013,cwiklinski2014limitations},
we leave it here as an intriguing open question for future contributions
if and how quantum coherence can be recovered partially or completely. Our focus here will be to utilize quantum coherence
to enhance the rate of energy transfer.
Coherent cluster acts like a fuel which burns quadratically faster than an incoherent one. The time to reach equilibrium
increases when the energy kicked into the cavity in rapid bursts.
This allows for harvesting quadratically more energy from a resource before equilibrium is established. Coherence
is partially transferred from a set of clusters to the cavity field. The field can be
described as a thermal coherent state in equilibrium~\cite{barnett_thermofield_1985,emch_new_1986,bishop_coherent_1987,fearn_representations_1988}.
In contrast to energy, which is harvested as work output, coherence is not consumed and it
remains within the field. More technically, quantum coherence is determined by the off diagonal elements of the
density matrix of the field; and work
is harvested in a quantum adiabatic stage, where the off diagonal
elements are preserved up to a geometric and dynamic
phase factor~\cite{berry_quantal_1984,aguiar_pinto_adiabatic_2002}.
The diagonal elements, and hence the number of photons,
cannot change in accordance with the adiabatic theorem. The coherence could in principle be transformed from the cavity field back
to the clusters in the exhaust stage. The cluster
resonance frequency should be altered to match the adiabatic changes in the photon frequency. While one could envision a hybrid
system where coherence is exchanged between clusters and photons in a completely or partially reversible manner, and work is
extracted locally from photon subsystem, there
are serious obstacles against its realization. Complete recovery of initial coherence may not be possible first due to fundamental
constraints by the time translational symmetry~\cite{lostaglio_quantum_2014,lostaglio_description_2015,rodriguez-rosario_thermodynamics_2013}; and second due to practical constraints by the rapid
dephasing of coherence. In practice one can take precautions against dephasing using time dependent control methods~\cite{erez_thermodynamic_2008}, and be
content with partial recovery of coherence within the fundamental bounds to reduce the production cost of quantum coherent fuel~\cite{scully2003extracting}.
Instead, coherence can be repeatedly reestablished externally in the clusters before every cycle of the engine operation. This is an effective
use of catalytic coherence.
\section*{The model and dynamical algorithm}\label{sec:model}
We consider a dissipative, but high finesse, single mode optical cavity, whose field is interacting
with a of cluster of $N$ two-level atoms for a time $t_{\mathrm{int}}$ repeatedly at a rate $r<1/t_{\mathrm{int}}$.
A beam of clusters passing through the cavity in a time of $t_{\mathrm{int}}$ one at a time at regular intervals
of $1/r> t_{\mathrm{int}}$ is an equivalent alternative scenario.
The interaction is described
by the Tavis-Cummings Hamiltonian (in units of $\hbar=1$)
\begin{equation}\label{eq:tc}
H_\mathrm{sys}=\omega_f a^{\dagger}a+\omega_a S_z+g(aS^{+}+a^{\dagger}S^{-}),
\end{equation}
where $\omega_f$ is the cavity photon frequency, $\omega_a$ is the transition frequency of the
atoms, and $g$ is the uniform interaction strength. The photon annihilation and creation operators
obey the boson algebra and denoted by $a$ and $a^\dag$, respectively. The atomic cluster is represented by
collective spin operators $(S^{\pm}, S_z)=(\sum_i\sigma_i^{\pm},1/2\sum_i\sigma_i^{z})$, which
obey the $SU(2)$ spin algebra, where $\sigma_i^{\pm}$ and $\sigma_i^{z}$ are the Pauli spin matrices, corresponding to the
transition and population inversion operators for the $i$th atom, respectively. The multi photon generalization of the Tavis-Cummings model is considered
in quantum Otto cycle from the perspective of the interplay between dynamical Stark shift, thermal entanglement
and the engine efficiency \cite{chotorlishvili2011thermal}.
The dynamics of the state of the system, $\rho_{\mathrm{sys}}$, during the interaction, is determined by the master equation
\begin{equation}\label{eq:m1}
\frac{\partial\rho_{\mathrm{sys}}}{\partial t}=-i[H_\mathrm{sys},\rho_{\mathrm{sys}}]+\frac{\kappa}{2}(2a\rho_{\mathrm{sys}}a^{\dagger}-a^{\dagger}a\rho_{\mathrm{sys}}-\rho_{\mathrm{sys}}a^{\dagger}a),
\end{equation}
where $\kappa$ is the cavity dissipation rate.
Between the interactions, the cavity field evolves freely by the master equation
\begin{equation}\label{eq:m2}
\frac{\partial\rho_f}{\partial t}=-i[\omega_f a^{\dagger}a,\rho_{f}]+
\frac{\kappa}{2}(2a\rho_{f}a^{\dagger}-a^{\dagger}a\rho_{f}-\rho_{f}a^{\dagger}a),
\end{equation}
where $\rho_f=\mathrm{Tr}_{a}(\rho_{\mathrm{sys}})$ is the reduced density matrix of the field,
and $\mathrm{Tr}_{a}$ is the partial trace over the atomic states.
Every interaction starts with an initial state of the system in the form $\rho_{sys}=\rho_a(0)\otimes\rho_f$,
where $\rho_a(0)$ is the externally prepared state of the cluster, which is the same at the beginning
of every interaction; and $\rho_f$ denote the state of the cavity field which is changing from one
interaction to the other according to dynamics described by Eqs.~(\ref{eq:m1}) and (\ref{eq:m2}).
\section*{Results} \label{sec:Results}
\subsection*{Thermalization}
In a usual set up, the atomic ensemble, first, gets into contact with a hot reservoir at temperature $T_h$. After a sufficient
period of time, the state of the individual atoms $\rho_{a}^{i}$ thermalizes, i.e.,
$\rho_{a}^{i}(0)=(1/Z_a)\sum_n\exp{(-\beta E_{n}^{i})}|\psi_n\rangle^{i}\langle\psi_n|$. Here, $E_n$ and $|\psi_n\rangle$ are
the eigenvalues and the corresponding eigenvectors of the single-atom Hamiltonian $H^{i}=(\omega_a/2)\sigma_{z}^{i}$,
$\beta=(1/T_h)$ $(k_B=1)$ is the inverse temperature and $Z_a=\mathrm{Tr}(\rho_a^i(0))$ is the partition function.
The state of the cluster, assuming there are no interatomic interactions, is given by $\rho_a(0)^{\prime}=\bigotimes_i\rho_a^{i}(0)$.
The initial state of the cavity field is a thermal state at a temperature $T_c$, i.e.,
$\rho_f(T_c) = (1/Z_f)\exp{(-\beta H_f)}$ with $H_f=\omega_f a^{\dagger}a$ and $Z_f=\mathrm{Tr}(\rho_f(0))$.
We assume that $T_h$ is sufficiently low, such that most of the atoms are in their ground states. Prior to their interaction with the
cavity field, the atoms are transformed into coherent superposition states by a rotation operation
\begin{equation}\label{eq:state_a}
\rho_a(0)=R(\zeta)\rho_a(0)^{\prime}R(\zeta)^{\dagger},
\end{equation}
where $R(\zeta)=\exp{(\zeta S^{+}-\zeta^* S^{-})}$ with $\zeta=(\phi/2)e^{i\varphi}$. In our calculations we set $\varphi=0$ and take $\phi=-\pi/2$, i.e., the atomic states are moved from the pole of the Bloch sphere to the equator. The initial state of the system is given by $\rho_{sys} = \rho_a(0)\otimes\rho_f$. Our choice of the initial state is not arbitrary. It is well known that the collective atomic coherent states are closely related to the Dicke states and superradiant with the choices of $\varphi=0$ and $\phi=\pm\pi/2$~\cite{arecchi1972atomic}.
The initial state of the atomic cluster $\rho_a(0)$ can be called as a thermal coherent spin state.
We assume that the field and the atoms are in resonance, i.e., $\omega_f=\omega_a$, which will be used to scale energy, time and
temperature parameters in the numerical simulations. We take $g = 0.19$, $\kappa=0.03$, $T_h=0.001$, $T_c=0.5$
and $rt_{\mathrm{int}}=1/6$.
Since $g/\kappa>1$, our system is not in the overdamped micromaser regime and we cannot use the corresponding
superradiant micromaser master equation~\cite{temnov2001superradiant,temnov2005superradiance}. We solve Eqs.~(\ref{eq:m1}) and (\ref{eq:m2}) instead to determine the cavity field density matrix after a number of interactions with the clusters of $N$ atoms. Our typical
results are shown in Fig.~\ref{fig:fig2}.
\begin{figure}
\caption{
\textbf{Dynamics of the effective temperature and the mean photon number.}
\label{fig:fig2}
\end{figure}
In Fig.~\ref{fig:fig2}(a) and~\ref{fig:fig2}(b), we present the time dependence of the effective temperature $T_{\mathrm{eff}}$ and
the mean number of photons in the cavity $\langle n\rangle= \mathrm{Tr}(\rho_f a^{\dagger}a)$, respectively. The initial number of
photons in the cavity is calculated to be $\langle n_c\rangle\sim0.156$ at
$T_c=0.5$. The effective temperature is defined by the relation
$\langle n\rangle=1/[\exp{(1/T_ {\mathrm{eff}})}-1]$. It can be interpreted as a temperature only in a steady state which is an approximate
thermal equilibrium state. If the coherence is large, the probability distribution of the photons in the steady state
SR phase becomes closer to that of a coherent state, with the increasing $N$~\cite{PhysRevA.81.063827}. More rigorously,
it can be described by a thermal coherent state~\cite{barnett_thermofield_1985,emch_new_1986,bishop_coherent_1987,fearn_representations_1988}.
It can be written as a thermal state $\rho_f(T)$ subject to Glauber displacement transformation $\rho_f(T,\alpha)=D(\alpha)\rho_f(T)D^\dag(\alpha)$, with the displacement operators $D(\alpha)$, where $\alpha$
is the coherence parameter.
The results are shown for the cases of clusters with $N=2,3,4$ atoms. In each case the time range allows for
$100$ interactions between the clusters and the cavity field. We see that the cavity field takes more time to reach steady state with
larger clusters. This observation is in fact ensures that catalytic use of coherence is consistent with the energy conservation. Each cluster
stores an energy of $N\omega_a/2$, which scales linearly with the number of atoms. If $M$ clusters are needed to bring the cavity
field to equilibrium, then the energy delivered from clusters to the cavity will be a fraction of $MN\omega_a/2$. The change in the
energy of the
cavity field scales quadratically with $N$. Accordingly, if $M$ would not increase with $N$, we could in principle
transfer more energy than the stored amount by simply increasing $N$ to extreme values. Hence, the increase of $M$ with $N$
is consistent with the energy conservation.
Fig.~\ref{fig:fig2}(c) shows the steady state values of the effective temperature of the cavity field $T_{\mathrm{eff},ss}$ and the
mean photon number in the cavity $\langle n\rangle_{ss}$ that are calculated via time-averaging over a period of
$t = 1500$ corresponding to $250$ successive interactions of $N=2,3,4,5,6$ atom clusters. Curve fitting yields the
relations $T_{\mathrm{eff},ss} =
T_c + 0.1N^2$ and $\langle n\rangle_{ss}=\langle n\rangle_c+0.095N^2$. The proportionality constant $\xi\sim0.1$, that appears in front of the $N^2$ scaling, is due to the single atom micromaser emission intensity. In micromasers with random arrival times of pumped atoms, the analytical expression of the emission intensity is given by $I_{1}=rg^2t_{int}^2P_e/\kappa$~\cite{liao2010single} where $P_e$ is the probability of finding the two level atom in its excited state. Our parameters used in this formula indeed verifies that incoherent single atom emission is $\sim0.1$. On the other hand, we cannot immediately conclude that this formula is
applicable to the our set up of regular injection of clusters, for which case the analytical verification of such a relation is proven to be a difficult problem.
Nevertheless, we further test these relations via numerical investigations and found that
$\xi\propto4g^{2.3}$ for $\kappa=0.03$ and $\xi\propto0.008\kappa^{-0.7}$ for $g=0.19$. These numerical
fits comply with the analytical estimation.
The mean number of photons in a thermal coherent
state (TCS) is given by $\langle n\rangle_\mathrm{TCS}=\langle n\rangle_\mathrm{th}+\mid \alpha\mid^2$. The effective temperature
description is not essential to comprehend or operate the thermalization. We recognize
that the physical temperature of the cavity remains the same and the atoms only transfer coherence to the field. Clusters therefore
act as a pure coherence reservoir. By fitting TCS to the numerically determined density matrix in equilibrium, with fidelity $\sim 1$, we verify that $\mid \alpha\mid^2\sim 0.1N^2$. This quadratic enhancement of coherence transfer
into the cavity field can be translated into
useful work output, by associating number of photons with the radiation pressure, in a photonic QHE.
\subsection*{Superradiant quantum Otto engine}
We consider a four-stroke quantum Otto cycle for our photonic QHE. The working fluid, which is the photons inside the
cavity, is described by
Hamiltonian $H_{f}=\omega a^{\dagger}a$ with eigenvalues $E_{n}, (n=0,1,2...)$. The corresponding
eigenstates $\mid n\rangle$ has occupation probabilities $P_{n}(T)=\exp{(-\beta E_{n})}/Z$.
In the ignition
stroke, the photon gas is heated in a quantum isochoric process to temperature $T_H$ and the occupation probabilities
change to $P_n(T_{H})$.
The eigenvalues remain constant and denoted by $E_{n}^H$. There is only heat intake and no work is done.
This process is assumed to happen by the superradiant thermalization procedure where the photons interact
with coherent atomic clusters. For simulations we choose the starting temperature of the cavity field as
$T_c=0.5$, in units of $\omega_H$, which is the frequency of the atoms and the field.
After the thermalization, the photon gas effective temperature becomes $T_H\equiv T_{\mathrm{eff},\mathrm{ss}}$.
Both the energy and coherence
of the clusters are partially transferred to the photon gas, which can be described approximately in thermal state
in the case of weak coherence. More generally, we do not need an effective temperature description and one can consider larger
coherence transfer by thermalization into a thermal coherent state. The clusters and the cavity field comes into an equilibrium
in terms of coherence, rather than temperature. This vision is in parallel with the recent ideas on thermodynamics
of quantum coherence~\cite{rodriguez-rosario_thermodynamics_2013,lostaglio_quantum_2014,lostaglio_description_2015,cwiklinski2014limitations}
as well as with other generalizations of second law and thermodynamical principles~\cite{aberg_truly_2013,brandao_second_2015,skrzypczyk_work_2014}.
If we denote the initial thermal density matrix of the field
as $\rho_f(T_c)$, it evolves into a coherent thermal state $\rho_f(T_c,\alpha)$
in the steady state where $\mid \alpha\mid^2\sim 0.1N^2$.
Second stage is the expansion stroke, which is a quantum adiabatic process,
where the work is done by the photon gas and the eigenvalues change to $E_n^L$ by changing the frequency to
$\omega_L$; while there is no heat
exchange and the occupation probabilities remain the same, the physical temperature drops from $T_c$
by expansion. The off diagonal elements remains the same as well up
to a dynamical and a geometrical phase factor. The
diagonal elements of the density matrix, and hence the photon number,
remains the same and the magnitude of the coherence is preserved. Ideally the coherence would not vanish in the
adiabatic process, but in practice decoherence and dephasing would reduce coherence. The density matrix of the field at the end of the expansion can be written as
$\rho_f(T^\prime,\alpha^\prime)$.
In the subsequent exhaust stroke, the state of the photon gas should be transformed into a thermal one as $\rho(T_L)$
in another quantum isochoric process and the occupation probabilities change to $P_n(T_L)$,
but the eigenvalues remain constant. An amount of heat released, but no work is
done. Coherence can be transferred to environment by dephasing or in principle
to clusters. If the same clusters would be used, it is necessary to alter their
frequencies to $\omega_L$.
Due to conservation of the photon number and by variation of the atomic frequency, the coherence transfer is energetically allowed,
though dephasing and time translation symmetry constraints makes it limited~\cite{lostaglio_quantum_2014,lostaglio_description_2015,rodriguez-rosario_thermodynamics_2013}.
Accordingly one could choose more practical methods than using clusters to
cool the cavity. The thermal density matrix of the field at the end of the exhaust stroke can be written as
$\rho_f(T_L)$.
Compression stroke is the last stage where the eigenstates change back to $E_n^{H}$ by variation of the photon frequency
from $\omega_L$ to $\omega_H$ in another quantum adiabatic process where work is done on the cavity field without any
heat exchange; and the temperature raises back to $T_c$. The density matrix of the cavity field is set back to $\rho_f(T_c)$.
If the same clusters are used in cooling stage, their frequency should be changed back to $\omega_H$.
Due to dephasing and fundamental
constraints on coherence transformations~\cite{lostaglio_quantum_2014,lostaglio_description_2015,rodriguez-rosario_thermodynamics_2013}, the clusters would need to be
induced coherence at the beginning of every engine cycle, so that their state is set back to $\rho_a(0)$. The coherence production costs can be reduced by optimizing time dependent control methods to reduce dephasing~\cite{erez_thermodynamic_2008} and by using schemes to recover
coherence as much as possible.
In our calculations, we employ the quantum mechanical interpretation of the first law of thermodynamics, where the heat absorbed $Q_{in}$, the heat released $Q_{out}$, the net work done
$W$ are given by the relations~\cite{PhysRevLett.93.140403}
\begin{eqnarray}\label{eq:hwe}
Q_{in}&=&\sum_nE_n^H\left[P_n(T_H)-P_n(T_L)\right],\nonumber\\
Q_{out}&=&\sum_nE_n^l\left[P_n(T_L)-P_n(T_H)\right],\nonumber\\
W&=&Q_{\mathrm{in}}+Q_{\mathrm{out}}
=\sum_n\left[E_n^H-E_n^l\right]\left[P_n(T_H)-P_n(T_L)\right],
\end{eqnarray}
where $E_n^H$ ($E_n^L$) are the energy levels during the isochoric stages.
The efficiency $\eta$ is defined by $\eta=W/Q_{\mathrm{in}}$.
Throughout our analysis, we only consider
positive work extraction which obeys the relation $Q_{\mathrm{in}}>-Q_{\mathrm{out}}>0$ in accordance with the
second law of thermodynamics.
\begin{figure}
\caption{
\textbf{Work output (W) of the photonic quantum Otto engine.}
\label{fig:fig3}
\end{figure}
Using these definitions, it is straightforward to show that the work output is proportional to the
difference in the mean number of photons~\cite{PhysRevE.76.031105}
$W=\eta(\langle n\rangle_{\mathrm{ss}}-\langle n\rangle_L)$, where $=\langle n\rangle_L=\langle n\rangle_c$ and
$\eta = 1-\omega_L$. These relations are plotted in Fig.~\ref{fig:fig3}. The work output $W$ is shown in
Fig.~\ref{fig:fig3}(a) for atomic clusters of
$N=2,3,4$ atoms. The positive work condition is independent of $N$ and given by $\omega_L<\omega_H$.
In the simulations, we find the temperature of the photon gas before the compression stroke $T_L$ for each $\omega_L$,
using the relation $T_L = \omega_L T_c$ which gives $T_L= \omega_L/2$ for $T_c=0.5$.
This relation is a consequence of the requirement to retain same
occupation probabilities in the quantum adiabatic process.
The positive work assumes its maximum at maximum efficiency when $\omega_L\ll1$, accordingly, at low $\omega_L$,
photonic quantum Otto engine is able to translate SR enhancement of cavity intensity to its work output, as shown in
Fig.~\ref{fig:fig3}(b). We find that the harvested work is maximum
at maximum efficiency and it obeys a power law such that $W_{\mathrm{max}}\sim0.085 N^{2}$ at $\eta_{\mathrm{max}}\sim0.99$.
\section*{Discussion}
We considered a photonic QHE that undergoes a four-stroke Otto cycle. The working substance is taken as a
photon gas in an optical cavity. The coherent thermalization of the photon gas has been accomplished via
coupling two-level atom clusters, acting as quantum coherent fuel, with the cavity at regular time intervals.
The cluster atoms have initially been prepared in a thermal coherent spin state. The cavity field reaches an
equilibrium coherent thermal state. We find that the mean number of the cavity photons
and the corresponding effective temperature are scaled with the square of the number of the atoms in the clusters, due to SR.
We argued that the coherence of quantum fuel function as an effective catalyst to the engine cycle. It makes the
energy transfer to the working substance faster. It is not called as true catalyst due to fundamental and practical
constraints on its complete recovery
by the time translation symmetry as well as rapid dephasing. Instead, it can be restored before every interaction externally to
imitate an effective catalytic use of coherence for enhancing energy transfer rate.
SR enhanced thermalization is translated to work output of the photonic Otto engine, so that it is scaled quadratically with
the number of atoms in the cluster. Such a scaling law is a quantum coherent effect, which cannot be realized with classical fuels,
and hence exhibiting a profound difference of quantum fuels from their classical counterparts.
Our proposed engine could work at higher temperatures, though still less than atomic and photon frequencies. In such
a case our model should be generalized to Dicke Hamiltonian which includes counter rotating terms.
Instead of preparing clusters at low temperatures, they could be at higher temperatures, including $T_h>T_c$.
Beyond a critical coupling strength and below a critical temperature, such a model predicts a superradiant phase.
However, the critical coupling is in the ultrastrong interaction regime, $g/\omega>1$~\cite{wang1973phase},
and challenging experimentally. Instead, if we keep inducing coherence externally to the clusters, we find SR could
still occur at a lower coupling strengths. Specifically, at $g/\omega\sim0.36$, which is in SR regime, numerical simulations
give $T_{\mathrm{eff},\mathrm{ss}}=T_c+N^2$ at finite $T_a,T_c<1$. The fluctuations around steady state values are less in the Dicke model case.
Another advantage of inducing coherence is that the SR emission relies less on the critical time $t_{\mathrm{crt}}$~\cite{andreev1980collective}, which is required for the atoms to build up collective coherence inside the cavity.
It is inversely proportional to the injection rate of the atoms in non-dissipative systems~\cite{andreev1980collective}. By externally
inducing coherence, we do not need to increase the injection rate to initiate SR earlier. Combination of noise induced coherence
schemes~\cite{scully2011quantum} with the coherence reservoir can be an attractive extension of superradiant QHE from the
perspective of cost of coherence generation.
In contrast to infinitely slow photonic Carnot engine, superradiant QHE in Otto cycle can produce finite power.
It is necessary to make a dynamical simulation of the full cycle to examine the power output. We can predict the power scales
linearly with the number of atoms in the cluster, as the thermalization time increases with the cluster population.
We have recently examined a single multilevel atom as a quantum fuel for a photonic Carnot engine~\cite{turkpencce2015quantum}.
The atom was assumed to have multiple ground
states in coherent superposition, which are coupled to a common excited state by the cavity field. This effective model allows
for analytical determination of the steady state photon number and related thermodynamical properties in Carnot cycle. It is
found that the work output is scaled quadratically with the number of quantum coherent ground states. Rapid dephasing
of the multilevel coherence as well as effective nature of the proposed level scheme and the vanishing power of the Carnot cycle
make the benefits of quantum coherence in a single multilevel atom as a quantum fuel severely limited.
An intriguing question is then if the quadratic enhancement in work harvesting capability, obtained both
for a single multilevel atom as well as for many atoms as quantum fuels, is a fundamental limiting power law associated
with the number of quantum resources; or
if there can be more advantageous power laws, which can be translated into power output as well. Indeed,
one can imagine combination of multilevel and SR enhancements.
There are proposals that quantum entangled initial preparations for SR from multilevel systems could give
faster than quadratic increase in intensity ~\cite{agarwal2011quantum}. Alternatively, SR emission in
photonic crystals can also yield higher power laws~\cite{john_localization_1995}.
Our superradiant photonic QHE could be further improved by
such extensions.
Atomic clusters with effective catalytic quantum coherence can be used in other QHE cycles and systems.
Their enhanced energy transfer rates make them fundamentally distinct quantum fuels from classical ones,
and allow for technologically appealing power laws in the work output of QHEs.
\section*{Methods}\label{sec:Methods}
\subsection*{Computational algoritm}
We run our simulations by using scientific python packages and some key libraries of QuTiP python~\cite{johansson2012qutip}. The numerical algorithm is prepared as one to one correspondence with the algorithm of the physical model. Inability to make adiabatic elimination of field mode ($g^2N/\kappa^2\gg1$) and the requirement to take partial traces over
atomic degree of freedom after each injection as well as the exponential increase of the Hilbert space dimension with the addition of atoms to the cluster set limits to the maximum number of atoms to be used to thermalize the working fluid. Again, the requirement to take partial traces over atomic degree of freedom makes the quantum Monte-Carlo trajectory methods unsuitable and force us to use full master equation approach.
Moreover, the requirement of ordered injection of atomic clusters makes parallel programming over multi-cores almost impossible. On the other hand, we use multi-threading (parallel computing over a single core) to accelerate the code and reduce the run-time, though parallelising has no effect on the required dimension of the Hilbert space.
\subsection*{Effects of atomic decoherence}
The decoherence of the atomic clusters contributes to the dynamics as the spontaneous emission and the dephasing. The most general Lindblad dissipators reads:
\begin{eqnarray}\label{eq:at_dec}
\nonumber L(x)\rho &=& \frac{\gamma}{2}(2x\rho x^{\dagger}-x^{\dagger}x\rho-\rho x^{\dagger}x),\\
L(y_i)\rho &=& \frac{\gamma}{2}\sum_{i=1}^{N} (2y_i\rho y_i^{\dagger}-y_i^{\dagger}y_i\rho-\rho y_i^{\dagger}y_i)
\end{eqnarray}
where $x\in \{S^{-}, S_{z}\}$ if the contributions are collective and $y_i\in \{\sigma_i^{-}, \sigma_{z}^{i}\}$ if the contributions are individual with a dissipation coefficient of $\gamma$.
Our simulations revealed that the superradiance emission profile prevails with slightly smaller effective temperatures
for decoherence rates changing from $0$ to $6\kappa$ range and has the same behaviour as in in In Fig.~\ref{fig:fig2}(a) and~\ref{fig:fig2}(b). This result follows from the fact that the passage times of the
atomic clusters are small for the atomic decoherence to kick in and the transfer rate of the quantum coherence is robust ($\sim N^2$).
The most detrimental effect is found to be pure dephasing and
hence we consider pure dephasing rate as the main limiting factor
to determine a most suitable experimental set up.
In a typical microwave resonator with a frequency of $\omega\sim51$~GHz, the atomic dephasing time $T_2\sim116$~$\mu$s leads to the dephasing rate $\gamma_{\phi}\sim10^4$~Hz~\cite{blais2004cavity}. The magnitude of the dissipation is about $\kappa\sim10^3$~Hz~\cite{blais2004cavity}, therefore $\gamma_{\phi}\sim10\kappa$. Instead, if we consider an optical resonator system with a frequency of $\omega\sim350$~THz, we have $\gamma_{\phi}\sim\kappa\sim10^8$~Hz, though the coupling strength is about $g\sim220$~MHz and therefore $g\sim\kappa$~\cite{blais2004cavity}. Thus, in microwave and optical resonator systems, the superradiance conditions are not satisfied. A circuit quantum electrodynamics set up, on the other hand, with $\omega\sim10$~GHz, $g\sim100$~MHz, $\gamma_{\phi}/\omega\sim5\times10^{-6}$ and $\kappa/\omega\sim6\times10^{-4}$~\cite{blais2004cavity} seem to be most suitable modern resonator set up to satisfy the conditions for superradiant heat engine.
Using circuit QED parameters, our numerical simulations give a range of $30-60$~ns thermalization time for the number of atoms that we considered in the manuscript. The interaction time is taken to be $\sim0.1$~ns. An optimization of the interaction time relative to atomic decoherence times require further examination to test if available control methods on atom-field coupling and uncoupling in circuit QED can be sufficiently fast. In addition, thermalization time is much larger than the typical time of adiabatic stages which should be larger than $1/\omega\sim0.1$~ns. The power output of the engine is therefore limited by the thermalization time.
\subsection*{Cost of coherence}
Let us consider a two level atom which is in its ground state $| g\rangle$. To create a coherent superposition state $|\psi\rangle=(1/\sqrt{2})(| g\rangle+| e\rangle)$, we may
apply a square pulse with pulse duration $\tau_p$ and pulse amplitude $E_p$. The area of the pulse is given by the relation $A_p = dE_p\tau_p/\hbar$ with $d$ being the magnitude of the dipole-matrix element such that the Rabi frequency is given by $\Omega_R=dE_p/\hbar$. We can approximate the pulse area with the tipping angle in the Bloch sphere, i.e, $A\sim\theta$ which is necessarly equal to $\pi/2$ to obtain a coherent superposition state. If we denote the spontaneous emission rate of the two level atom with $\gamma$, Fermi's golden rule leads ($n\approx1$)
\begin{equation}
d^2=\frac{3\pi\epsilon_0\hbar c^3\gamma}{\omega},
\end{equation}
where $\epsilon_0$ is the vacuum permittivity, $c$ is the speed of light and $\omega$ is the resonant frequency of the field. We, then, obtain for the pulse amplitude that
\begin{equation}
E_p=\frac{\hbar\pi}{2\tau_p}\sqrt{\frac{\omega^3}{3\pi\epsilon_0\hbar c^3\gamma}},
\end{equation}
and thus, the pulse intensity reads
\begin{equation}
I_p=\frac{c\epsilon_0}{2}|E_p|^2=\frac{\pi\hbar\omega^3}{24c^2\tau_p^2\gamma}.
\end{equation}
For a beam with a width of $\delta$, the pulse energy is given by
\begin{equation}
U_p=I_p\pi(\delta/2)^2\tau_p=\hbar\omega\frac{\pi^2\omega^2\delta^2}{96c^2\tau_p\gamma}.
\end{equation}
We make use of the relations $\omega=2\pi c/\lambda$ and $\zeta=\lambda/\pi\delta$, with $\lambda$ and $\zeta$ being the wavelength of field and the radial beam divergence, respectively. We obtain
\begin{equation}
U_p=\hbar\omega\frac{\pi^2}{24}\frac{1}{\tau_p\gamma}\frac{1}{\zeta^2}\sim\frac{\pi^2}{3}\hbar\omega\sim3\hbar\omega,
\end{equation}
where we set $1/\tau_p\gamma\sim2$ and $\zeta\sim0.5$. For an $N$ atom cluster, the cost of coherence is found to be $U_{\text{cost}}^{\prime}=NU_p$. Thus, if we need $m$ clusters to obtain superradiant themalization of the working fluid, the total cost of coherence is given by $U_{\text{cost}}=mU_{\text{cost}}^{\prime}$.
If we use $N=2$ atom ensembles $m=250$ times to thermalize the working fluid, the maximum work output obtained from the superradiant heat engine is found to be $W_{\text{out}}\sim0.35\omega\sim10^{-25}$J for a $\omega/2\pi=5$~GHz resonator. This output is, then, at least three order of magnitude smaller than the cost $U_{\text{cost}}=1500\omega\sim5\times10^3 W_{\text{out}}$.
It is crucial to distinguish the cost paid to generate the coherent atoms from the cost to maintain coherence in the working substance. The housekeeping cost to maintain coherence in working fluid can reduce the thermodynamic efficiency~\cite{gardas2015thermodynamic}. In our case the atoms are the fuel and their coherence is the catalyst. Photon gas in the cavity is the working substance. Coherence does not need to be maintained in the working substance, photon gas, in the operation cycle of the engine. The generation costs associated with in the fuel are not included to the thermodynamical efficiency of engines. Accordingly we do not include the cost of coherence to the engine efficiency determination.
Our calculation of cost of coherence produces a result larger than the work output of the engine and verifies thermodynamical constraints. One could consider a definition of round trip efficiency as another figure of merit for the relative energy harvested with respect to the total energy spent for the engine. From this perspective quantum coherence generation cost must be reduced to make the proposed engine more appealing for certain applications.
\begin{thebibliography}{10}
\expandafter\ifx\csname url\endcsname\relax
\def\url#1{\texttt{#1}}\fi
\expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi
\providecommand{\bibinfo}[2]{#2}
\providecommand{\eprint}[2][]{\url{#2}}
\bibitem{dicke1954coherence}
\bibinfo{author}{Dicke, R.}
\newblock \bibinfo{title}{Coherence in spontaneous radiation processes}.
\newblock \emph{\bibinfo{journal}{Phys. Rev.}} \textbf{\bibinfo{volume}{93}},
\bibinfo{pages}{99} (\bibinfo{year}{1954}).
\bibitem{skribanowitz1973observation}
\bibinfo{author}{Skribanowitz, N.}, \bibinfo{author}{Herman, I.},
\bibinfo{author}{MacGillivray, J.} \& \bibinfo{author}{Feld, M.}
\newblock \bibinfo{title}{Observation of dicke superradiance in optically
pumped hf gas}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{30}}, \bibinfo{pages}{309} (\bibinfo{year}{1973}).
\bibitem{gross1976observation}
\bibinfo{author}{Gross, M.}, \bibinfo{author}{Fabre, C.},
\bibinfo{author}{Pillet, P.} \& \bibinfo{author}{Haroche, S.}
\newblock \bibinfo{title}{Observation of near-infrared dicke superradiance on
cascading transitions in atomic sodium}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{36}}, \bibinfo{pages}{1035} (\bibinfo{year}{1976}).
\bibitem{scheibner2007superradiance}
\bibinfo{author}{Scheibner, M.} \emph{et~al.}
\newblock \bibinfo{title}{Superradiance of quantum dots}.
\newblock \emph{\bibinfo{journal}{Nat. Phys.}} \textbf{\bibinfo{volume}{3}},
\bibinfo{pages}{106--110} (\bibinfo{year}{2007}).
\bibitem{rohlsberger2010collective}
\bibinfo{author}{R{\"o}hlsberger, R.}, \bibinfo{author}{Schlage, K.},
\bibinfo{author}{Sahoo, B.}, \bibinfo{author}{Couet, S.} \&
\bibinfo{author}{R{\"u}ffer, R.}
\newblock \bibinfo{title}{Collective lamb shift in single-photon
superradiance}.
\newblock \emph{\bibinfo{journal}{Science}} \textbf{\bibinfo{volume}{328}},
\bibinfo{pages}{1248--1251} (\bibinfo{year}{2010}).
\bibitem{devoe1996observation}
\bibinfo{author}{DeVoe, R.} \& \bibinfo{author}{Brewer, R.}
\newblock \bibinfo{title}{Observation of superradiant and subradiant
spontaneous emission of two trapped ions}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{76}}, \bibinfo{pages}{2049} (\bibinfo{year}{1996}).
\bibitem{eschner2001light}
\bibinfo{author}{Eschner, J.}, \bibinfo{author}{Raab, C.},
\bibinfo{author}{Schmidt-Kaler, F.} \& \bibinfo{author}{Blatt, R.}
\newblock \bibinfo{title}{Light interference from single atoms and their mirror
images}.
\newblock \emph{\bibinfo{journal}{Nature}} \textbf{\bibinfo{volume}{413}},
\bibinfo{pages}{495--498} (\bibinfo{year}{2001}).
\bibitem{mlynek2014observation}
\bibinfo{author}{Mlynek, J.}, \bibinfo{author}{Abdumalikov, A.},
\bibinfo{author}{Eichler, C.} \& \bibinfo{author}{Wallraff, A.}
\newblock \bibinfo{title}{Observation of dicke superradiance for two artificial
atoms in a cavity with high decay rate}.
\newblock \emph{\bibinfo{journal}{Nat. Comm.}} \textbf{\bibinfo{volume}{5}},
\bibinfo{pages}{5186} (\bibinfo{year}{2014}).
\bibitem{baumann_dicke_2010}
\bibinfo{author}{Baumann, K.}, \bibinfo{author}{Guerlin, C.},
\bibinfo{author}{Brennecke, F.} \& \bibinfo{author}{Esslinger, T.}
\newblock \bibinfo{title}{Dicke quantum phase transition with a superfluid gas
in an optical cavity}.
\newblock \emph{\bibinfo{journal}{Nature}} \textbf{\bibinfo{volume}{464}},
\bibinfo{pages}{1301--1306} (\bibinfo{year}{2010}).
\bibitem{meier1997polarons}
\bibinfo{author}{Meier, T.}, \bibinfo{author}{Zhao, Y.},
\bibinfo{author}{Chernyak, V.} \& \bibinfo{author}{Mukamel, S.}
\newblock \bibinfo{title}{Polarons, localization, and excitonic coherence in
superradiance of biological antenna complexes.}
\newblock \emph{\bibinfo{journal}{J. Chem. Phys.}}
\textbf{\bibinfo{volume}{107}}, \bibinfo{pages}{3876--3893}
(\bibinfo{year}{1997}).
\bibitem{celardo2012superradiance}
\bibinfo{author}{Celardo, G.~L.}, \bibinfo{author}{Borgonovi, F.},
\bibinfo{author}{Merkli, M.}, \bibinfo{author}{Tsifrinovich, V.~I.} \&
\bibinfo{author}{Berman, G.~P.}
\newblock \bibinfo{title}{Superradiance transition in photosynthetic
light-harvesting complexes}.
\newblock \emph{\bibinfo{journal}{J. Phys. Chem. C}}
\textbf{\bibinfo{volume}{116}}, \bibinfo{pages}{22105--22111}
(\bibinfo{year}{2012}).
\bibitem{scholes2002designing}
\bibinfo{author}{Scholes, G.~D.}
\newblock \bibinfo{title}{Designing light-harvesting antenna systems based on
superradiant molecular aggregates}.
\newblock \emph{\bibinfo{journal}{Chem. Phys.}} \textbf{\bibinfo{volume}{275}},
\bibinfo{pages}{373--386} (\bibinfo{year}{2002}).
\bibitem{PhysRevLett.2.262}
\bibinfo{author}{Scovil, H. E.~D.} \& \bibinfo{author}{Schulz-DuBois, E.~O.}
\newblock \bibinfo{title}{Three-level masers as heat engines}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{2}}, \bibinfo{pages}{262--263}
(\bibinfo{year}{1959}).
\bibitem{PhysRevLett.93.140403}
\bibinfo{author}{Kieu, T.~D.}
\newblock \bibinfo{title}{The second law, maxwell's demon, and work derivable
from quantum heat engines}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{93}}, \bibinfo{pages}{140403}
(\bibinfo{year}{2004}).
\bibitem{PhysRevE.76.031105}
\bibinfo{author}{Quan, H.~T.}, \bibinfo{author}{Liu, Y.-x.},
\bibinfo{author}{Sun, C.~P.} \& \bibinfo{author}{Nori, F.}
\newblock \bibinfo{title}{Quantum thermodynamic cycles and quantum heat
engines}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. E}} \textbf{\bibinfo{volume}{76}},
\bibinfo{pages}{031105} (\bibinfo{year}{2007}).
\bibitem{PhysRevLett.112.150602}
\bibinfo{author}{Zhang, K.}, \bibinfo{author}{Bariani, F.} \&
\bibinfo{author}{Meystre, P.}
\newblock \bibinfo{title}{Quantum optomechanical heat engine}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{112}}, \bibinfo{pages}{150602}
(\bibinfo{year}{2014}).
\bibitem{PhysRevLett.112.076803}
\bibinfo{author}{Bergenfeldt, C.}, \bibinfo{author}{Samuelsson, P.},
\bibinfo{author}{Sothmann, B.}, \bibinfo{author}{Flindt, C.} \&
\bibinfo{author}{B\"uttiker, M.}
\newblock \bibinfo{title}{Hybrid microwave-cavity heat engine}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{112}}, \bibinfo{pages}{076803}
(\bibinfo{year}{2014}).
\bibitem{scully2003extracting}
\bibinfo{author}{Scully, M.~O.}, \bibinfo{author}{Zubairy, M.~S.},
\bibinfo{author}{Agarwal, G.~S.} \& \bibinfo{author}{Walther, H.}
\newblock \bibinfo{title}{Extracting work from a single heat bath via vanishing
quantum coherence}.
\newblock \emph{\bibinfo{journal}{Science}} \textbf{\bibinfo{volume}{299}},
\bibinfo{pages}{862--864} (\bibinfo{year}{2003}).
\bibitem{scully2002extracting}
\bibinfo{author}{Scully, M.~O.}
\newblock \bibinfo{title}{Extracting work from a single heat bath via vanishing
quantum coherence ii: Microscopic model}.
\newblock In \emph{\bibinfo{booktitle}{Quantum Limits to the Second Law: First
International Conference on Quantum Limits to the Second Law}}, vol.
\bibinfo{volume}{643}, \bibinfo{pages}{83--91} (\bibinfo{organization}{AIP
Publishing}, \bibinfo{year}{2002}).
\bibitem{rostovtsev2003extracting}
\bibinfo{author}{Rostovtsev, Y.~V.}, \bibinfo{author}{Sariyianni, Z.} \&
\bibinfo{author}{Scully, M.}
\newblock \bibinfo{title}{Extracting energy from a single heat bath via
vanishing quantum coherence: Iii. master equation derivation}.
\newblock \emph{\bibinfo{journal}{Laser Phys.}} \textbf{\bibinfo{volume}{13}},
\bibinfo{pages}{375--385} (\bibinfo{year}{2003}).
\bibitem{scully2011quantum}
\bibinfo{author}{Scully, M.~O.}, \bibinfo{author}{Chapin, K.~R.},
\bibinfo{author}{Dorfman, K.~E.}, \bibinfo{author}{Kim, M.~B.} \&
\bibinfo{author}{Svidzinsky, A.}
\newblock \bibinfo{title}{Quantum heat engine power can be increased by
noise-induced coherence}.
\newblock \emph{\bibinfo{journal}{Proceedings of the National Academy of
Sciences}} \textbf{\bibinfo{volume}{108}}, \bibinfo{pages}{15097--15100}
(\bibinfo{year}{2011}).
\bibitem{scully2010quantum}
\bibinfo{author}{Scully, M.~O.}
\newblock \bibinfo{title}{Quantum photocell: Using quantum coherence to reduce
radiative recombination and increase efficiency}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{104}}, \bibinfo{pages}{207701}
(\bibinfo{year}{2010}).
\bibitem{altintas_rabi_2015}
\bibinfo{author}{Altintas, F.}, \bibinfo{author}{Hardal, A. {\"U}.~C.} \&
\bibinfo{author}{M\"{u}stecapl{\i}o\u{g}lu, {\"O}.~E.}
\newblock \bibinfo{title}{Rabi model as a quantum coherent heat engine: {From}
quantum biology to superconducting circuits}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{91}},
\bibinfo{pages}{023816} (\bibinfo{year}{2015}).
\bibitem{li2014quantum}
\bibinfo{author}{Li, H.} \emph{et~al.}
\newblock \bibinfo{title}{Quantum coherence rather than quantum correlations
reflect the effects of a reservoir on a system's work capability}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. E}} \textbf{\bibinfo{volume}{89}},
\bibinfo{pages}{052132} (\bibinfo{year}{2014}).
\bibitem{temnov2001superradiant}
\bibinfo{author}{Temnov, V.~V.}, \bibinfo{author}{Kolobov, M.~I.} \&
\bibinfo{author}{Haake, F.}
\newblock \bibinfo{title}{Superradiant and subradiant behavior of the
overdamped many-atom micromaser}.
\newblock In \emph{\bibinfo{booktitle}{Directions in Quantum Optics}},
\bibinfo{pages}{261--270} (\bibinfo{publisher}{Springer},
\bibinfo{year}{2001}).
\bibitem{temnov2005superradiance}
\bibinfo{author}{Temnov, V.~V.}
\newblock \bibinfo{title}{Superradiance and subradiance in the overdamped
many-atom micromaser}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{71}},
\bibinfo{pages}{053818} (\bibinfo{year}{2005}).
\bibitem{barnett_thermofield_1985}
\bibinfo{author}{Barnett, S.~M.} \& \bibinfo{author}{Knight, P.~L.}
\newblock \bibinfo{title}{Thermofield analysis of squeezing and statistical
mixtures in quantum optics}.
\newblock \emph{\bibinfo{journal}{Journal of the Optical Society of America B}}
\textbf{\bibinfo{volume}{2}}, \bibinfo{pages}{467--479}
(\bibinfo{year}{1985}).
\bibitem{emch_new_1986}
\bibinfo{author}{Emch, G.~G.} \& \bibinfo{author}{Hegerfeldt, G.~C.}
\newblock \bibinfo{title}{New classical properties of quantum coherent states}.
\newblock \emph{\bibinfo{journal}{Journal of Mathematical Physics}}
\textbf{\bibinfo{volume}{27}}, \bibinfo{pages}{2731--2737}
(\bibinfo{year}{1986}).
\bibitem{bishop_coherent_1987}
\bibinfo{author}{Bishop, R.~F.} \& \bibinfo{author}{Vourdas, A.}
\newblock \bibinfo{title}{Coherent mixed states and a generalised {P}
representation}.
\newblock \emph{\bibinfo{journal}{Journal of Physics A: Mathematical and
General}} \textbf{\bibinfo{volume}{20}}, \bibinfo{pages}{3743}
(\bibinfo{year}{1987}).
\bibitem{fearn_representations_1988}
\bibinfo{author}{Fearn, H.} \& \bibinfo{author}{Collett, M.}
\newblock \bibinfo{title}{Representations of {Squeezed} {States} with {Thermal}
{Noise}}.
\newblock \emph{\bibinfo{journal}{Journal of Modern Optics}}
\textbf{\bibinfo{volume}{35}}, \bibinfo{pages}{553--564}
(\bibinfo{year}{1988}).
\bibitem{aberg_catalytic_2014}
\bibinfo{author}{{\AA}berg, J.}
\newblock \bibinfo{title}{Catalytic {Coherence}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{113}}, \bibinfo{pages}{150402}
(\bibinfo{year}{2014}).
\bibitem{horodecki_fundamental_2013}
\bibinfo{author}{Horodecki, M.} \& \bibinfo{author}{Oppenheim, J.}
\newblock \bibinfo{title}{Fundamental limitations for quantum and nanoscale
thermodynamics}.
\newblock \emph{\bibinfo{journal}{Nat. Commun}} \textbf{\bibinfo{volume}{4}},
\bibinfo{pages}{2059} (\bibinfo{year}{2013}).
\bibitem{lostaglio_quantum_2014}
\bibinfo{author}{Lostaglio, M.}, \bibinfo{author}{Korzekwa, K.},
\bibinfo{author}{Jennings, D.} \& \bibinfo{author}{Rudolph, T.}
\newblock \bibinfo{title}{Quantum coherence, time-translation symmetry and
thermodynamics}.
\newblock \emph{\bibinfo{journal}{arXiv:1410.4572}} (\bibinfo{year}{2014}).
\bibitem{lostaglio_description_2015}
\bibinfo{author}{Lostaglio, M.}, \bibinfo{author}{Jennings, D.} \&
\bibinfo{author}{Rudolph, T.}
\newblock \bibinfo{title}{Description of quantum coherence in thermodynamic
processes requires constraints beyond free energy}.
\newblock \emph{\bibinfo{journal}{Nat. Commun}} \textbf{\bibinfo{volume}{6}},
\bibinfo{pages}{6383} (\bibinfo{year}{2015}).
\bibitem{rodriguez-rosario_thermodynamics_2013}
\bibinfo{author}{Rodr{\'\i}guez-Rosario, C.~A.}, \bibinfo{author}{Frauenheim,
T.} \& \bibinfo{author}{Aspuru-Guzik, A.}
\newblock \bibinfo{title}{Thermodynamics of quantum coherence}.
\newblock \emph{\bibinfo{journal}{arXiv:1308.1245}} (\bibinfo{year}{2013}).
\bibitem{cwiklinski2014limitations}
\bibinfo{author}{{\'C}wikli{\'n}ski, P.}, \bibinfo{author}{Studzi{\'n}ski, M.},
\bibinfo{author}{Horodecki, M.} \& \bibinfo{author}{Oppenheim, J.}
\newblock \bibinfo{title}{Limitations for thermodynamical processing of
coherences}.
\newblock \emph{\bibinfo{journal}{arXiv:1405.5029}} (\bibinfo{year}{2014}).
\bibitem{berry_quantal_1984}
\bibinfo{author}{Berry, M.~V.}
\newblock \bibinfo{title}{Quantal {Phase} {Factors} {Accompanying} {Adiabatic}
{Changes}}.
\newblock \emph{\bibinfo{journal}{Proceedings of the Royal Society of London A:
Mathematical, Physical and Engineering Sciences}}
\textbf{\bibinfo{volume}{392}}, \bibinfo{pages}{45--57}
(\bibinfo{year}{1984}).
\bibitem{aguiar_pinto_adiabatic_2002}
\bibinfo{author}{Aguiar~Pinto, A.~C.}, \bibinfo{author}{Fonseca~Romero, K.~M.}
\& \bibinfo{author}{Thomaz, M.~T.}
\newblock \bibinfo{title}{Adiabatic approximation in the density matrix
approach: non-degenerate systems}.
\newblock \emph{\bibinfo{journal}{Physica A: Statistical Mechanics and its
Applications}} \textbf{\bibinfo{volume}{311}}, \bibinfo{pages}{169--187}
(\bibinfo{year}{2002}).
\bibitem{erez_thermodynamic_2008}
\bibinfo{author}{Erez, N.}, \bibinfo{author}{Gordon, G.},
\bibinfo{author}{Nest, M.} \& \bibinfo{author}{Kurizki, G.}
\newblock \bibinfo{title}{Thermodynamic control by frequent quantum
measurements}.
\newblock \emph{\bibinfo{journal}{Nature}} \textbf{\bibinfo{volume}{452}},
\bibinfo{pages}{724--727} (\bibinfo{year}{2008}).
\bibitem{chotorlishvili2011thermal}
\bibinfo{author}{Chotorlishvili, L.}, \bibinfo{author}{Toklikishvili, Z.} \&
\bibinfo{author}{Berakdar, J.}
\newblock \bibinfo{title}{Thermal entanglement and efficiency of the quantum
otto cycle for the su (1, 1) tavis--cummings system}.
\newblock \emph{\bibinfo{journal}{J. Phys. A: Math. Theor.}}
\textbf{\bibinfo{volume}{44}}, \bibinfo{pages}{165303}
(\bibinfo{year}{2011}).
\bibitem{arecchi1972atomic}
\bibinfo{author}{Arecchi, F.}, \bibinfo{author}{Courtens, E.},
\bibinfo{author}{Gilmore, R.} \& \bibinfo{author}{Thomas, H.}
\newblock \bibinfo{title}{Atomic coherent states in quantum optics}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{6}},
\bibinfo{pages}{2211} (\bibinfo{year}{1972}).
\bibitem{PhysRevA.81.063827}
\bibinfo{author}{Meiser, D.} \& \bibinfo{author}{Holland, M.~J.}
\newblock \bibinfo{title}{Intensity fluctuations in steady-state
superradiance}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{81}},
\bibinfo{pages}{063827} (\bibinfo{year}{2010}).
\bibitem{liao2010single}
\bibinfo{author}{Liao, J.-Q.}, \bibinfo{author}{Dong, H.},
\bibinfo{author}{Sun, C.} \emph{et~al.}
\newblock \bibinfo{title}{Single-particle machine for quantum thermalization}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{81}},
\bibinfo{pages}{052121} (\bibinfo{year}{2010}).
\bibitem{aberg_truly_2013}
\bibinfo{author}{{\AA}berg, J.}
\newblock \bibinfo{title}{Truly work-like work extraction via a single-shot
analysis}.
\newblock \emph{\bibinfo{journal}{Nat. Commun}} \textbf{\bibinfo{volume}{4}},
\bibinfo{pages}{1925} (\bibinfo{year}{2013}).
\bibitem{brandao_second_2015}
\bibinfo{author}{Brand{\~a}o, F.}, \bibinfo{author}{Horodecki, M.},
\bibinfo{author}{Ng, N.}, \bibinfo{author}{Oppenheim, J.} \&
\bibinfo{author}{Wehner, S.}
\newblock \bibinfo{title}{The second laws of quantum thermodynamics}.
\newblock \emph{\bibinfo{journal}{Proc. Natl. Acad. Sci.}}
\textbf{\bibinfo{volume}{112}}, \bibinfo{pages}{3275--3279}
(\bibinfo{year}{2015}).
\bibitem{skrzypczyk_work_2014}
\bibinfo{author}{Skrzypczyk, P.}, \bibinfo{author}{Short, A.~J.} \&
\bibinfo{author}{Popescu, S.}
\newblock \bibinfo{title}{Work extraction and thermodynamics for individual
quantum systems}.
\newblock \emph{\bibinfo{journal}{Nat. Commun}} \textbf{\bibinfo{volume}{5}},
\bibinfo{pages}{4185} (\bibinfo{year}{2014}).
\bibitem{wang1973phase}
\bibinfo{author}{Wang, Y.~K.} \& \bibinfo{author}{Hioe, F.}
\newblock \bibinfo{title}{Phase transition in the dicke model of
superradiance}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{7}},
\bibinfo{pages}{831} (\bibinfo{year}{1973}).
\bibitem{andreev1980collective}
\bibinfo{author}{Andreev, A.~V.}, \bibinfo{author}{Emel'yanov, V.~I.} \&
\bibinfo{author}{Il'inski{\u\i}, Y.~A.}
\newblock \bibinfo{title}{Collective spontaneous emission (dicke
superradiance)}.
\newblock \emph{\bibinfo{journal}{Usp. Fiz. Nauk.}}
\textbf{\bibinfo{volume}{131}}, \bibinfo{pages}{653--694}
(\bibinfo{year}{1980}).
\bibitem{turkpencce2015quantum}
\bibinfo{author}{T{\"u}rkpen{\c{c}}e, D.} \&
\bibinfo{author}{M{\"u}stecapl{\i}o{\u{g}}lu, {\"O}.~E.}
\newblock \bibinfo{title}{Quantum fuel with multilevel atomic coherence for
ultrahigh specific work in a photonic carnot engine}.
\newblock \emph{\bibinfo{journal}{arXiv:1503.01627}} (\bibinfo{year}{2015}).
\bibitem{agarwal2011quantum}
\bibinfo{author}{Agarwal, G.}
\newblock \bibinfo{title}{Quantum-entanglement-initiated super raman
scattering}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{83}},
\bibinfo{pages}{023802} (\bibinfo{year}{2011}).
\bibitem{john_localization_1995}
\bibinfo{author}{John, S.} \& \bibinfo{author}{Quang, T.}
\newblock \bibinfo{title}{Localization of {Superradiance} near a {Photonic}
{Band} {Gap}}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{74}}, \bibinfo{pages}{3419--3422}
(\bibinfo{year}{1995}).
\bibitem{johansson2012qutip}
\bibinfo{author}{Johansson, J.}, \bibinfo{author}{Nation, P.} \&
\bibinfo{author}{Nori, F.}
\newblock \bibinfo{title}{Qutip: An open-source python framework for the
dynamics of open quantum systems}.
\newblock \emph{\bibinfo{journal}{Comp. Phys. Commun.}}
\textbf{\bibinfo{volume}{183}}, \bibinfo{pages}{1760--1772}
(\bibinfo{year}{2012}).
\bibitem{blais2004cavity}
\bibinfo{author}{Blais, A.}, \bibinfo{author}{Huang, R.-S.},
\bibinfo{author}{Wallraff, A.}, \bibinfo{author}{Girvin, S.} \&
\bibinfo{author}{Schoelkopf, R.~J.}
\newblock \bibinfo{title}{Cavity quantum electrodynamics for superconducting
electrical circuits: An architecture for quantum computation}.
\newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{69}},
\bibinfo{pages}{062320} (\bibinfo{year}{2004}).
\bibitem{gardas2015thermodynamic}
\bibinfo{author}{Gardas, B.} \& \bibinfo{author}{Deffner, S.}
\newblock \bibinfo{title}{Thermodynamic universality of quantum carnot
engines}.
\newblock \emph{\bibinfo{journal}{arXiv preprint arXiv:1503.03455}}
(\bibinfo{year}{2015}).
\end{thebibliography}
\section*{Acknowledgments}
We thank G.~S.~Agarwal, A.~Imamoglu, H.~Tureci, I.~Adagideli and J.~Vaccaro for illuminating discussions.
A.~\"{U}.~C.~H.~acknowledges the COST Action
MP1209. A.~\"{U}.~C.~H.~and \"{O}.~E.~M.~gratefully
acknowledge hospitality of Princeton University Electrical Engineering Department
where early phases of this work are developed.
A.~\"{U}.~C.~H.~and \"{O}.~E.~M.~acknowledge the support
from Ko\c{c} University and Lockheed Martin Corporation Research Agreement. .
\section*{Author contributions statement}
\"{O}.~E.~M.~conceived the idea and developed the theory. A.~\"{U}.~C.~H.~derived the technical results and
carried out numerical simulations. A.~\"{U}.~C.~H.~and \"{O}.~E.~M.~wrote the manuscript.
\section*{Additional information}
Competing financial interests: The authors declare no competing financial interests.
\end{document}
|
\begin{document}
\title{Uniform convergence of the Fleming-Viot process in a hard killing metastable case}
\abstract{We study the long-time convergence of a Fleming-Viot process, in the case where the underlying process is a metastable diffusion killed when it reaches some level set. Through a coupling argument, we establish the long-time convergence of the Fleming-Viot process toward some stationary measure at an exponential rate independent of $N$, the size of the system, as well as uniform in time propagation of chaos estimates. }
\section{Introduction}
Given some open bounded domain $D\subset\mathbb R^d$, and some potential $U:\mathbb R^d\to \mathbb R_+$, we are interested in the process:
\begin{equation}\label{descente}
\text{d} X_t = -\nabla U(X_t)\text{d} t + \sqrt{2\varepsilon}\text{d} B_t
\end{equation}
with small $\varepsilon>0$, killed when it reaches $\partial D$ the boundary of $D$. More precisely, write:
\begin{equation}\label{deathtime}
\tau_{\partial D} =\inf\left\{t\geqslant 0, X_t\notin D\right\}.
\end{equation}
Denote by $\mathcal M^1(D)$ the set of probability measures on $D$, and $\mathbb P_\mu$ the law of the process~\eqref{descente}, with initial condition $\mu\in \mathcal M^1(D)$. Then we say that $\nu\in\mathcal M^1(D)$ is a quasi-stationary distribution (QSD) of the process \eqref{descente} if for all $t\geqslant 0$:
\[\procz{\nu}{X_t\in\cdot}{\tau_{\partial D}>t}=\nu.\]
It is shown in \cite{LelievreLeBris} that, under some mild assumptions on $U$ and $D$, the process \eqref{descente} admits a unique QSD, that we will denote by $\nu_{\infty}^{\varepsilon}$. It is also proven that there is convergence for all initial condition of the law of the process conditioned on its survival toward this QSD, namely, for all $\mu\in\mathcal M^1(D)$,
\[\procz{\mu}{X_t\in\cdot}{\tau_{\partial D}>t}\underset{t\rightarrow+\infty}\longrightarrow \nu_{\infty}^{\varepsilon}.\]
The fact that the process is killed when it exits a domain is classically referred to as a \emph{hard killing} case, by contrast with the \emph{soft killing} case where the process is killed at a smooth rate, as in \cite{journel20}.
The present work is concerned with the question of sampling the QSD $\nu_{\infty}^{\varepsilon}$. More precisely, in practice, the QSD is approximated by the empirical measure of a system of interacting particles, called a Fleming-Viot (FV) process, at stationarity. This FV process is defined informally as follows: for a given $N$, let $X^1,\dots,X^N$ be $N$ independent diffusions until one of them reaches $\partial D$. The diffusion that has been killed then branches onto one of the $N-1$ remaining ones, chosen uniformly at random. In very general settings, it is known that if the initial condition consists in $N$ independent random variables distributed according to a common law $\mu$, then for any time $t\geqslant 0$, we have:
\begin{equation}
\label{eq:propchaos}
a.s.\qquad
\pi^N(X^1_t,\dots,X^N_t)\overset{weak}{\underset{N\rightarrow\infty}\longrightarrow} \procz{\mu}{X_t\in\cdot}{\tau_{\partial D}>t},
\end{equation}
(see Section~\ref{RelWor} below) where
\begin{equation}\label{empiri}
\pi^N(x)= \frac{1}{N}\sum_{i=1}^N \delta_{x_i}
\end{equation}
stands for the empirical measure of a vector $(x_1,\dots,x_N)\in D^N$. This would simply be the law of large numbers if the particles were independent, which they are not due to the resurrection mechanism. For mean-field interacting particle systems as the FV process, such a convergence is known as a propagation of chaos phenomenon.
Two questions are addressed in this work. First, the long-time relaxation of the FV process toward its invariant measure: a quantitative convergence in the total variation distance sense at a rate independent from $N$ is stated in Theorem~\ref{thm}. Second, the propagation of chaos: Theorem~\ref{thm2} gives a quantitative version of \eqref{eq:propchaos}, with a bound uniform in time. Combining both results yields a quantitative estimate for the convergence of the empirical measure of the FV process toward the QSD as $N,t\rightarrow \infty$.
As detailed below, these results are established under the condition that $D$ is a metastable state for the diffusion \eqref{descente}, in the sense that the mixing time of \eqref{descente} within $D$ is shorter than the typical exit time from $D$. Mathematically speaking, this is reflected by the fact that $c^*$, the critical height within $D$, is smaller than $U_0$, the height of the boundary $\partial D$ (see below for the definition of $c^*$ and $U_0$), and the temperature $\varepsilon$ is small enough. This metastable context is typically the one where QSD are of interest, since in that case the (non-conditional) law of the process is close to the QSD for times in intermediary scales between the mixing time and the extinction time. Moreover, it is exactly the context of some algorithms in molecular dynamics, such as the parallel replica algorithm presented in \cite{LelievreLeBris}, which involves the sampling of the QSD. In fact, for technical reasons, we will work under a stronger condition than simply $c_*<U_0$ (see Assumption \ref{assu3} below), which is also related to the metastability of $D$. Although we haven't been able to do it, we think that the proof may possibly be modified to work only with the condition $c_*<U_0$, without the additional condition.
The paper is organized as follows. In the rest of this introduction, the main results are stated in Section~\ref{main} and discussed in view of previous related works in Section~\ref{RelWor}. Some preliminary properties of the FV process are studied in Section~\ref{sec:proof}, and the main theorems are proven in Section~\ref{conclu}. Finally, we prove in Section~\ref{technique} the technical lemma which involves the additional condition.
\subsection{Main Result}\label{main}
Define the critical height $ c^* =c^*(U)$ of $U$ as $c^{\ast} = \sup_{x_1,x_2\in D}c(x_1,x_2)$ with
\[c(x_1,x_2) = \inf\left\{\max_{0\leqslant t \leqslant1}U(\xi(t))-U(x_1)-U(x_2) \right\},\]
where the infimum runs over $\left\{ \xi\in\mathcal{C}\left(\left[0,1\right], D\right),\xi(0)=x_1, \xi(1)=x_2 \right\}$. The critical height $c^*$ represents the largest energy barrier the process has to cross in order to go from any local minimum to any global one (within $D$).
The following conditions are enforced throughout all this work.
\begin{assu}\label{assu1}
\begin{itemize}
\item $D\subset\mathbb R^d$ is open, bounded, connected and its boundary is $\mathcal C^2$.
\item $U:\mathbb R^d \to \mathbb R_+$ is smooth on some neighborhood of $\overline{D}$.
\item $\min_{D}U=0$.
\item $U|_{\partial D}$ is constant, and \begin{equation}\label{u0}
U_0=U(\partial D) >c^* .
\end{equation}
\item For $x\in\partial D$, denote by $n(x)$ the outward normal to $D$. For all $x\in \partial D$,
\begin{equation}\label{return}
n(x) \cdot \nabla U(x) > 0.
\end{equation}
\end{itemize}
\end{assu}
The condition $\min_{D}U=0$ is just a choice of normalisation since the process is unchanged if a constant is added to $U$. Under Assumption~\ref{assu1}, neglecting sub-exponential terms, for small $\varepsilon$, the mixing rate of the non-killed process~\eqref{descente} is known to be of order $e^{c_*/\varepsilon}$ (see \cite{HoStKu}) while, according to the theory of Freidlin-Wentzell (see \cite{Freidlin-Wentzell}), the exit time $\tau_{\partial D}$ is of order $e^{U_0/\varepsilon}$. As already mentioned, the condition $U_0>c^*$ thus describes a difference of timescales between the mixing time and the death time. More precisely, it is known that for any neighborhood $B_1$ of $\partial D$ and any $a<U_0$,
$\sup_{x\in D\setminus B_1}\mathbb P_x(\tau_{\partial D}<e^{a/\varepsilon})$ vanishes with $\varepsilon$.
We will need an even stronger uniformity in terms of the initial condition. For now, let us state it as an assumption:
\begin{assu}\label{assu2}
There exist $a>c^*$ and a neighborhood $B_1$ of $\partial D$ such that the following holds:
\begin{itemize}
\item $\min_{B_1}U>a$,
\item $\min_{x\in B_1}|\nabla U(x)|>0$,
\item for a fixed Brownian motion $(B_t)_{t\geqslant 0}$, denoting by $(X_t^x)_{t\geqslant 0}$ the solution of ~\eqref{descente} with initial condition $x$ for all $x\in D$ and by $\tau_{\partial D}(X^x)$ its first exit time from $D$, we have
\begin{equation}\label{strongdeathprob}
\bar p_\varepsilon:=\mathbb P\left(\exists x\in D\setminus B_1, \tau_{\partial D}(X^x)<e^{a/\varepsilon}\right)\underset{\varepsilon\rightarrow 0}{ \longrightarrow} 0.
\end{equation}
\end{itemize}
\end{assu}
We are able to prove that this is implied by the following condition, which in dimension $d>1$ strengthens~\eqref{u0}:
\begin{assu}\label{assu3}
One of the following is satisfied:
\begin{itemize}
\item $d=1.$
\item There exist a neighborhood $\tilde B_1$ of $\partial D$ such that $\inf_{\tilde B_1}|\nabla U| >0$ and
\[c^*<\frac{U_0-U_1}{2}\,,\qquad \text{where}\qquad U_1 = \min_{\tilde B_1} U\,.\]
\end{itemize}
\end{assu}
As stated in Lemma~\ref{unifexitevent} below, Assumptions \ref{assu1} and \ref{assu3} together imply Assumption~\ref{assu2}. However we don't think that Assumption~\ref{assu3} is sharp, and thus we state our main results in terms of Assumption~\ref{assu2}. Notice that Assumption~\ref{assu3} is always implied by Assumption~\ref{assu1} when $c_*=0$.
We define the semi-group $(P_t)_{t\geqslant0}$ associated to a Markov process $(X_t)_{t\geqslant 0}$ in $\mathbb R^d$ by:
\[P_tf(x)=\mathbb E_x(f(X_t))\]
for any bounded measurable function $f:\mathbb R^d\rightarrow \mathbb R$ and any $t\geqslant 0$, where $\mathbb E_x$ stands for the expectation under $\mathbb{P}_x=\mathbb{P}_{\delta_x}$.
Now, let us define rigorously the FV process, starting from some initial condition $\mu\in\mathcal M^1(D^N)$. Let $(I^i_n)_{1\leqslant i\leqslant N,n\in\mathbb N}$ be a family of independent random variables, where for $1\leqslant i\leqslant N$, $I^i_n$ is uniform on $\left\{1,\dots,N\right\}\setminus\left\{i\right\}$. Let $(B^i)_{1\leqslant i \leqslant N}$ be $N$ independent Brownian motions, and $\textbf{X}_0=(X_0^1,\dots,X_0^N)$ be distributed according to $\mu$. Define $\bar X^i$ as the solution to:
\[\bar X^i_t = X_0^i -\int_0^t \nabla U(\bar X^i_s) \text{d} s + \sqrt{2\varepsilon}B^i_t\]
and set
\[\tau_1=\min_i\inf\left\{t\geqslant 0, \bar X^i_t\notin D\right\}.\]
Then, denote by $i_1$ the index of the particle which exits the domain at time $\tau_1$. It is uniquely defined almost surely because, since the hitting time of the boundary has a density on $\mathbb R_+$, the probability that two particles hit the boundary at the same time is zero (this is true for the Brownian motion, and the general case follows from an application of the Girsanov theorem). For $i\neq i_1$, $0\leqslant t \leqslant \tau_1$, or $i=i_1$ and $1\leqslant t < \tau_1$, simply let:
\[X^i_t = \bar X^i_t \qquad \text{and}\qquad X^{i_1}_{\tau_1}= \bar X^{I_1^i}_{\tau_1}.\]
This defines the process between times $0$ and $\tau_1$. The process is then defined on $\left(\tau_1,\infty\right)$ by induction: if the process is defined up to time $\tau_{n-1}$, we define it between time $\tau_{n-1}$ and $\tau_n$ in the same way, with $\textbf{X}_0$ replaced by $\textbf X_{\tau_{n-1}}$, $i_1$ by $i_n$ the index of the particle that hits $\partial D$ at time $\tau_n$, and $I_1^{i_0}$ by $I_n^{i_n}$. Thus, $(\tau_n)_n$ is the sequence of branching times of the process.
Under Assumption~\ref{assu1}, the FV process $\textbf X = (X^1,\dots,X^N)$ is well-defined and does not explode in finite time, meaning that $\sup_n \tau_n =\infty$ almost surely, see \cite[Theorem 2.1]{villemonais2010interacting}. This defines a Markov process, and we denote by $P^N=(P_t^N)_{t\geqslant 0}$ the associated semi-group.
A law $\mu\in\mathcal M^1(D^N)$ is said to be exchangeable if it is invariant by any permutation of the particles, i.e. $(X^{\sigma(i)})_{i\in\llbracket 1,N\rrbracket} \sim \mu$ if $(X^{i})_{i\in\llbracket 1,N\rrbracket} \sim \mu$ for all permutations $\sigma$ of $\llbracket 1,N\rrbracket$. For $k\in\llbracket 1,N\rrbracket$, we denote by $\mu^k\in\mathcal M^1(D^k)$ the marginal law of the $k$ first particles under $\mu$ (which, for exchangeable laws, is thus the marginal law of any subset of $k$ particles).
Our first main result concerns the long time behavior of the FV process.
\begin{thm}\label{thm}
Under Assumptions~\ref{assu1} and \ref{assu2}, there exist $\varepsilon_0 ,c,C>0$ such that, for all $\varepsilon\in(0,\varepsilon_0]$, $N\in\mathbb{N}$, $t\geqslant 0$, setting $t_{\varepsilon} = e^{a/\varepsilon}$, the following holds.
\begin{enumerate}
\item For all $\mu,\nu\in\mathcal{M}^1(D^N)$,
\[\|\mu P^N_{t}-\nu P^N_{t}\|_{TV} \leqslant CN(1-c)^{t/t_\varepsilon}. \]
\item The semi-group $P^N$ admits a unique invariant measure $\nu^{N,\varepsilon}_\infty$, which is exchangeable.
\item For all exchangeable $\mu,\nu\in\mathcal M^1(D^N)$, for all $k\in\llbracket 1,N\rrbracket$,
\[\|(\mu P^{N}_{t})^k-(\nu P^{N}_{t})^k\|_{TV} \leqslant Ck(1-c)^{t/t_\varepsilon}. \]
\end{enumerate}
\end{thm}
Our second result is a uniform in time propagation of chaos estimate.
\begin{thm}\label{thm2}
Under Assumptions~\ref{assu1} and \ref{assu2}, there exists $\varepsilon_0>0$ such that for all compact set $K\subset D$, all $\varepsilon\in(0,\varepsilon_0]$, there exist $C_\varepsilon,\eta_\varepsilon>0$ such that for all $\mu_0\in \mathcal{M}^1(D)$ satisfying $\mu_0(K)\geqslant 1/2$, all bounded $f:D\to\mathbb R_+$ and all $N\in\mathbb N$,
\begin{equation*}
\sup_{t\geqslant0} \mathbb E\left( \left|\int_D f\text{d} \pi^N(\mathbf{X}_t) - \mathbb E_{\pi^N(\mathbf X_0)}\left(f\left(X_t\right)\middle|\tau_{\partial D} >t\right) \right| \right) \leqslant \frac{C_\varepsilon\|f\|_{\infty}}{N^{\eta_\varepsilon}},
\end{equation*}
where $X$ solves \eqref{descente}, $\tau_{\partial D}$ is defined in \eqref{deathtime}, $\pi^N$ in \eqref{empiri}, and $\textbf X$ is a FV-process with initial condition $\mu_0^{\otimes N}$.
\end{thm}
In Theorem~\ref{thm}, the dependency in $N$ of the speed of convergence is the same as for $N$ independent diffusion processes. Moreover, the dependency in $\varepsilon$ is also sharp. Indeed, in Assumption~\ref{assu2}, we can take $a$ arbitrarily close to $c^*$, which means that we get a mixing time smaller than $e^{(c^*+\delta)/\varepsilon}$ for any $\delta>0$, which is the order of the mixing time for the non-killed process. However, as far as Theorem~\ref{thm2} is concerned, for independent processes, one would get from the Bienaym{\'e}-Chebyshev inequality the explicit rate $1/\sqrt N$. This is indeed what is proven in \cite[Theorem 1]{Villemonais} for the FV process, but with a bound that depends on time. In other words, we improve the result of \cite{Villemonais} to a uniform in time bound, but at the cost of a loss in the rate in $N$. Notice that $\eta_{\varepsilon}$ may be made explicit by carefully following the proofs.
\subsection{Related works}\label{RelWor}
Fleming-Viot processes have first been introduce in the work of Fleming and Viot \cite{FlemingViot} and of Moran \cite{Moran}, in the study of population genetics models. Their use for the approximation of a QSD dates back to \cite{Burdzy1}, where the authors study the case of a Brownian motion in a rectangle. Since then, many results were proven in different cases, and for different questions (long-time convergence, propagation of chaos, existence of the FV process\dots).
In the case of a process in a countable state, the study began with \cite{ferrari2007}. The FV process is well defined here as soon as the death rate is bounded, and the authors showed under several conditions the uniqueness of the QSD, the convergence toward this QSD, the ergodicity of the FV process, and the propagation of chaos for finite time and at equilibrium. In \cite{groisman2012simulation}, the authors improved the propagation of chaos with a quantitative rate, introducing the $\pi$-return process. In \cite{CloezThai}, the rate of convergence of the FV process is proven to be independent of $N$ under strong assumptions, using coupling arguments similar to those of the present work or of \cite{journel20}. As soon as the set is finite, the existence of the FV process is immediate. In \cite{Asselah}, the propagation of chaos is proven for all times and for the stationary measure, with a stronger convergence. In \cite{LelievreReygner}, the convergence as $N\rightarrow\infty$ was refined with a central limit theorem.
For processes in a general space, many results are available. Finite time propagation of chaos is addressed in \cite{Burdzy2,GrigoKang,Villemonais,MicloDelMoral2}, with central limit theorems as $N\rightarrow\infty$ in \cite{Guyadersoft,Guyaderhard}. Then, uniform in time propagation of chaos and long-time convergence are established in \cite{MicloDelMoral,DelMoralGuionnet,Rousset}. The long-time convergence is established when the underlying process is a Brownian motion in \cite{Burdzy1}. If the killing-rate is smooth and bounded, then the well-definiteness of the FV process is obvious, but the non-explosion in the hard killing case has been studied in \cite{bieniek2009nonextinction,bieniek2011extinction}, and along with long time convergence in \cite{villemonais2010interacting}.
Other methods of approximation of a QSD have been developed in discrete and continuous cases in \cite{benaim2015,BenaimCloezPanloup,BCV}, based on self-interacting processes. Study of the conditioned process and its long-time limit has also been studied for a decade by Champagnat, Villemonais and coauthors, in \cite{ChampVilK2018,CV2020,Bansaye,DelMoralVillemonais2018}.
The coupling method used in the present work has been applied in several works about interacting particles systems (not only FV processes) such as \cite{Monmarche,CloezThai,journel20}. These works are based on a perturbation approach where the interaction is assumed to be small enough with respect to the mixing properties of the underlying Markov process. In particular, in our framework, we do not know if the conclusion of Theorem~\ref{thm} is true for any $\varepsilon>0$ and not only in the low temperature regime.
Our work follows the similar study \cite{journel20} in the soft killing case. In fact, as mentioned in the latter, the main motivation of this first work was to set up in a simpler case a method that would then be used to tackle the hard killing case, which was from the beginning the main objective. Besides, \cite{journel20} also addresses the question of the time-discretization of \eqref{descente}, but in the present work we focus on the convergence in $N$ and $t$ and thus we only consider the continuous time dynamics for clarity.
\section{Preliminary results}
\label{sec:proof}
For any two probability measures $\mu, \nu$, we call $(X,Y)$ a coupling of $\mu$ and $\nu$ if the law of $X$ (resp. $Y$) is $\mu$ (resp. $\nu$). For any distance $\mathbf d$ on a set $E$ (here $E=D$ or $E=D^N$), the associated Kantorovich distance on $\mathcal M^1(E)$, is defined by
\[W_{\mathbf d}(\mu,\nu) = \inf\left\{\mathbb E\left( \mathbf d(X,Y)\right), (X,Y) \text{ coupling of }\mu \text{ and }\nu \right\}.\]
We say that $(X,Y)$ is an optimal coupling if $W_{\mathbf d}(\mu,\nu) = \mathbb E\left( \mathbf d(X,Y)\right)$. The existence of an optimal coupling results from \cite[Theorem 4.1]{villani}. Given a Markov semi-group $P$, we call a coupling of $(\mu P_t)_{t\geqslant 0}$ and $(\nu P_t)_{t\geqslant 0}$ a stochastic process $(X_t,Y_t)_{t\geqslant 0}$ such that $(X_t)_{t\geqslant 0}$ and $(Y_t)_{t\geqslant 0}$ are Markov processes of semi-group $(P_t)_{t\geqslant 0}$ and initial condition $\mu$ for $X$ and $\nu$ for $Y$. In particular, we have that for such a coupling and all $t\geqslant 0$, \[W_{\mathbf d}(\mu P_t,\nu P_t) \leqslant \mathbb E\left( \mathbf d(X_t,Y_t) \right).\]We also say that the processes $X$ and $Y$ have coupled at time $t\geqslant 0$ if $X_t=Y_t$. Finally, in the case were $\mathbf d(x,y)=2\mathbbm{1}_{x\neq y}$, we recover the total variation distance which we write $W_{\mathbf d}(\mu,\nu)=\|\mu-\nu \|_{TV}$.
The proof of our theorems relies on the construction of a coupling of $(\mu P_t^N)_{t\geqslant 0}$ and $(\nu P^N_t)_{t\geqslant 0}$. This coupling will yield that $P^N_t$ is a contraction for some particular distance defined in Section~\ref{conclu}.
In order to do this, we first need some preliminary results, which is the subject of this section. We start by studying the mixing properties of the non-killed process in Subsection~\ref{coupnonkil} by embedding $D$ into a torus. In Subsection~\ref{Lyapunov}, We construct a Lyapunov functional for each particle. In Subsection~\ref{partnearbound}, using the Lyapunov functional, we study the number of particle that may stay near the boundary of the domain.
In the rest of the paper, we fix some $a\in\left(c^*,U_0\right)$ satisfying assumption~\ref{assu2} and set $t_\varepsilon=e^{a/\varepsilon}$. Furthermore, bold letters will always denote particle systems, in the sense that $\textbf{X}$ can always be written $\textbf{X}=(X^1,\dots,X^N)$ where for all $1\leqslant i\leqslant N$, $X^i\in D$.
\subsection{Coupling of the non-killed diffusion}\label{coupnonkil}
In this section, we show that we are able to couple two diffusions solution of \eqref{descente} on a torus in total variation distance in a time $t_\varepsilon$ with a probability that goes to 1 as $\varepsilon$ goes to 0, uniformly on $D$. Since we are studying a process killed at the boundary of $D$, we are not interested in what the potential might look like outside of $D$. Consider some torus $\mathbb T^d=\left(\mathbb R/2L\mathbb Z\right)^d$, with $L$ big enough so that as a subset of $\mathbb R^d$ (meaning seeing $\mathbb T^d$ as $\left[-L,L\right[^d$), we have that $D\subset \mathbb T^d$. Then consider some periodic potential $\tilde U:\mathbb R^d\rightarrow\mathbb R_+$, equal to $U$ on $D$ as a periodic function, and such that $c^*(\tilde U)=c^*(U)$, where $c^*(\tilde U)$ is defined as $c^*(U)$ with $U$ replaced by $\tilde U$. Such a function exists, as shown in \cite[Section 4]{FourTar}. We still denote by $\tilde U$ the associated function on $\mathbb T^d$, and this potential defines a diffusion on $\mathbb T^d$ as:
\begin{equation}\label{descente2}
\text{d} \tilde X_t = -\nabla \tilde U(\tilde X_t)\text{d} t + \sqrt{2\varepsilon}\text{d} B_t.
\end{equation}
We note $\tilde P$ its semi-group. If we see $\tilde X$ as a process in $\mathbb R^d$, then we have that $X_t=\tilde X_t$ for all $t\leqslant \tau_{\partial D}$, where $\tau_{\partial D}$ is the death time \eqref{deathtime}.
Now, we construct a coupling for the process $\tilde X$, for all initial condition $(x,y)\in (\mathbb T^d)^2$. To do this, we use Sobolev and Poincar\'e inequalities. The Sobolev inequality is used for ultra-contractivity, whereas the Poincar\'e inequality is used to get an optimal convergence rate for the process \eqref{descente2}. Let $\mu_\varepsilon$ denote the probability measure on $\mathbb T^d$:\[\mu_\varepsilon(\text{d} x) = \mathcal Z e^{-\tilde U(x)/\varepsilon}\text{d} x\] where $\mathcal Z$ is the normalization constant. Recall those inequalities:
\begin{lem}\label{Sob}
$\mu_\varepsilon$ satisfies a Poincar\'e and a Sobolev inequality: there exist $p>2$, $C,\lambda_\varepsilon>0$, such that:
\[\varepsilon\ln\left(\lambda_\varepsilon\right)\rightarrow -c^*\]
as $\varepsilon\rightarrow 0$, and for all smooth $f:\mathbb T^d\mapsto\mathbb R$ with $\int_{\mathbb T^d} f \text{d} \mu_\varepsilon = 0$:
\[\lambda_{\varepsilon}\int_{\mathbb T^d} f^2 \text{d} \mu_\varepsilon \leqslant\int_{\mathbb T^d} |\nablabla f|^2 \text{d} \mu_\varepsilon\ \text{ (PI)}\]
\[\left(\int_{\mathbb T^d} f^p \text{d} \mu_\varepsilon\right)^{\frac{2}{p}} \leqslant Ce^{\|\tilde U\|_{\infty}/\varepsilon}\left(\int_{\mathbb T^d} f^2 \text{d} \mu_\varepsilon + \int_{\mathbb T^d} |\nablabla f|^2 \text{d} \mu_\varepsilon\right)\ \text{ (SI)}.\]
Moreover, for all $t>0$, the law of $\tilde X_t$ with initial condition $\delta_x$ has a density $h^\varepsilon_t(x,\cdot)$ with respect to $\mu_\varepsilon$, and both inequalities together imply the existence of some constant $\tilde C>0$ such that, for all $t\geqslant 1$ and $\varepsilon>0$,
\begin{equation}\label{TVCONV}
\|h^\varepsilon_t(\cdot,\cdot)-1\|_{\infty} \leqslant \tilde C e^{\tilde C \varepsilon^{-1} -\lambda_{\varepsilon}t}.
\end{equation}
\end{lem}
\begin{proof}
The Poincar{\'e} inequality, as well as the asymptotic on $\lambda_\varepsilon$, have been proven in \cite{HoStKu}.
The uniform measure on $\mathbb T^d$ satisfies a Sobolev inequality, see \cite[section 6]{BakryGentilLedoux}. Then we can write:
\begin{align*}
\left(\int_{\mathbb T^d} f^p \text{d} \mu_\varepsilon\right)^{\frac{2}{p}} &\leqslant \mathcal{Z}^{-\frac{2}{p}} \left(\int_{\mathbb T^d} f^p\right)^{\frac{2}{p}} \\ & \leqslant C\mathcal{Z}^{-\frac{2}{p}} \left( \int_{\mathbb T^d} f^2 + \int_{\mathbb T^d} |\nabla f|^2 \right) \\ &\leqslant C\mathcal{Z}^{1-\frac{2}{p}} e^{\|\tilde U\|_{\infty}/\varepsilon} \left( \int_{\mathbb T^d}f^2\text{d} \mu_\varepsilon + \int_{\mathbb T^d} |\nabla f|^2 \text{d}\mu_\varepsilon \right).
\end{align*}
$\mathcal{Z}$ is bounded by the volume of $\mathbb T^d$, hence $\mathcal{Z}^{1-\frac{2}{p}}$ is bounded uniformly on $\varepsilon$ because $p>2$. Thus, we have the Sobolev inequality with the said constant.
The last two points are \cite[Theorem 6.3.1 and Proposition 6.3.4]{BakryGentilLedoux}.
\end{proof}
\begin{lem}\label{coupopt}
Under Assumption~\ref{assu1}, there exists $\varepsilon_0>0$, such that for all $0<\varepsilon<\varepsilon_0$, there exists $c_{\varepsilon}>0$ such that for all $x,y\in D$, there exists a coupling $(\tilde X_t,\tilde Y_t)_{t\geqslant 0}$ of $(\delta_x\tilde P_t)_{t\geqslant 0}$ and $(\delta_y\tilde P_t)_{t\geqslant 0}$ such that: \[\pro{\tilde X_{ t_\varepsilon}= \tilde Y_{ t_\varepsilon}}\geqslant c_\varepsilon.\]Moreover, as $\varepsilon\rightarrow0$, \[c_\varepsilon\rightarrow 1.\]
\end{lem}
\begin{proof}
We start by bounding the total variation distance between the law of $\tilde X_t$ and the equilibrium $\mu_\varepsilon$. Recall that $h^\varepsilon_t(x,\cdot)$ denotes the density of the law of $\tilde X_t$ with respect to $\mu_\varepsilon$. For $x\in D$, using \eqref{TVCONV}, we have:
\[\|\delta_x\tilde P_t - \mu_\varepsilon\|_{TV} = \int_{R^d}|h^\varepsilon_t-1| \text{d} \mu_\varepsilon \leqslant \|h^\varepsilon_t(\cdot,\cdot)-1\|_{\infty} \leqslant \tilde C e^{\tilde C \varepsilon^{-1} -\lambda_{\varepsilon}t}.\]
Since $t_\varepsilon=e^{a/\varepsilon}$, and $\varepsilon\ln(\lambda_\varepsilon)\rightarrow -c^*$ as $\varepsilon\rightarrow0$, we have that $\lambda_\varepsilon t_\varepsilon\geqslant e^{b/\varepsilon}$, for $\varepsilon$ small enough and all $b<a-c^*$. Then, we may fix some $b<a-c^*$ and let:
\[c_\varepsilon = 1-2\tilde C e^{\tilde C \varepsilon^{-1} -e^{b/\varepsilon}},\]so that $\lim_{\varepsilon\rightarrow0}c_\varepsilon= 1$ and for all $x,y\in D$, \[\|\delta_x\tilde P_t - \delta_y \tilde P_t\|_{TV}\leqslant \|\delta_x\tilde P_t - \mu_\varepsilon\|_{TV}+\|\delta_y\tilde P_t - \mu_\varepsilon\|_{TV} \leqslant 1-c_\varepsilon.\]
The existence of the coupling of the trajectories results from the total variation distance properties, see for example \cite[Lemma 9]{Monmarche}.
\end{proof}
\subsection{Lyapunov functional}\label{Lyapunov}
In order to show the convergence of the particle system, we need first the construction of some Lyapunov function for each particle, that is to say some function of one particle that decreases in average over time, as long as it starts large enough. This is the part where we need $U$ to be constant on the boundary of $D$. This part relies on a result first shown in \cite{GrigoKang}, similar to Ito's formula. The process~\eqref{descente} has a generator $\mathcal L$ defined for all smooth function $f:\mathbb R^d\to\mathbb R$ with compact support as:
\begin{equation}
\mathcal L f= \varepsilon \Delta f - \nabla U\cdot \nabla f.
\end{equation}
For a smooth function $f:D^N\to\mathbb R_+$, and $1\leqslant i\leqslant N$, write $\mathcal L_{x_i}$ for the generator $\mathcal L$, acting only on the i-th variable:
\begin{equation*}
\mathcal L_{x_i}f = \sum_{j=1}^d \varepsilon \partial^2_{(x_i)_j}f - \partial_j U(x_i) \partial_{(x_i)_j}f.
\end{equation*}
Write as well $(\tau_n^i)_n$ for the sequence of death times of particle $i$:
\begin{equation}\label{eqf:deftauni}
\tau^i_{n+1} = \inf\left\{t> \tau^i_n, X_{t-}^i\in\partial D\right\},
\end{equation}
and $R_i$ for the point process corresponding to the jumps of this particle:
\[R_i(t)=\sum_{n=1}^{\infty}\mathbbm{1}_{\tau^i_n\leqslant t}.\]
For all $\textbf x\in D^N$, $1\leqslant i\neq j\leqslant N$, write:
\[ x^{ij}_k = \left\{ \begin{matrix} x_k\text{ if }k\neq i \\ x_j \text{ else }\end{matrix} \right..\]
\begin{prop}[\cite{GrigoKang}, Proposition 1]\label{Ito}
Let $N\in\mathbb N$, $f\in\mathcal C^0\left(\bar{D}^N\right)\bigcap\mathcal C^{\infty}\left(D^N\right)$. Denote by \[\mathbf{R}(f)(t)=\sum_{i=1}^N\frac{1}{N-1}\sum_{j\neq i}\int_0^t \left(f(\mathbf{X}^{ij}_{s-})-f(\mathbf{X}_{s-})\right)\text{d} R_i(s)\] the finite variation process of the jump part, and \[Q(f)(t)=\int_0^t \sum_{i=1}^N \mathcal L_{x_i}f(\mathbf{X}_s)\text{d} s\] the finite variation process of the diffusion part. Then there exists a martingale $\mathcal M(f)$ such that almost surely for all $t\geqslant 0$: \[f(\mathbf{X}_t)-f(\mathbf{X}_0)= \mathbf R(f)(t) + Q(f)(t) + \mathcal M(f)_t. \]
Moreover,
\[\mathbf R(f)(t)+\mathcal M(f)_t = \sum_{i=1}^N\int_0^t \nabla_{x_i}f(\textbf X_s)\cdot \text{d} B^i_s + \sum_{n,\tau_n\leqslant t}f(\textbf X_{\tau_n})-f(\textbf X_{\tau_n-}),\]
where the $(B^i)$'s are the Brownien motion used in the definition of the FV process, and the $(\tau_n)$ are the death times.
\end{prop}
This decomposition will allow us to prove the existence of the Lyapunov functional. The idea is the following: the function $V$ will be equal to $U$ at the center of the domain, and near its boundary, $V$ will be such that $\nabla V$ is proportional to $\nabla U$, but with $|\nabla V|$ posibly greater then $|\nabla U|$. Since $n(x)\cdot \nabla U(x)>0$ on the boundary, we will also have $n(x)\cdot \nabla V(x)>0$, which means that, near the boundary, $V$ tends to decrease along~\eqref{descente}. If in addition $V$ is maximal at the boundary, then it can only decrease at a jump time.
\begin{lem}\label{Lyapu}
Under Assumption~\ref{assu1}, there exits $C_1>0$ such that for all $V_0>0$ with
\[V_0 \geqslant 4\sup_D U +1,\] there exist $\varepsilon_0>0$ and a smooth function $V:\bar D\mapsto\mathbb R_+$ such that $V$ is constant equal to $V_0$ on $\partial D$, $\sup_D V=V_0$, $4C_1<V_0$, and for all $N\in\mathbb N$, $0<\varepsilon<\varepsilon_0$, $\textbf x\in D^N$, and $1\leqslant i\leqslant N$, we have:
\begin{equation}\label{lyapu}
\mathbb E_{\textbf x}\left(V\left(X^i_{t_\varepsilon}\right)\right) \leqslant \gamma_{\varepsilon}V(x_i) + C_1(1-\gamma_{\varepsilon})
\end{equation}
where $\gamma_\varepsilon \in(0,1)$ is independent from $\textbf{x}$ and vanishes as $\varepsilon\rightarrow 0$.
\end{lem}
The value $V_0$ of $V$ on the boundary is a fixed parameter that we will be chosen in Section~\ref{conclu}.
\begin{proof}
Fix some $V_0\geqslant 4\sup_DU+1=:4\theta +1$. Under Assumption~\ref{assu1}, there exists some set $F\subset D$ such that $\mathbb R^d\setminus F$ is a neighborhood of $\partial D$, there are no critical points of $U$ on $D\setminus F$ and \[\sup_{D\setminus F}U\leqslant U_0.\qquad\]
Write $U_i=\min_{D\setminus F} U$.
Write as well \[\zeta =\inf_{x\in D\setminus F}|\nabla U(x)| > 0.\]Let $f:\left[0,U_0\right]\mapsto \mathbb R$ be some smooth function satisfying the following conditions:
\begin{itemize}
\item for all $u\leqslant U_i$, $f(u)=u$,
\item $f(U_0)=V_0$,
\item $\min_{U_i \leqslant u\leqslant U_0}f'(u)>0$.
\end{itemize}
Then, set $V(x) = U(x)$ for all $x\in F$, and $V(x)=f(U(x))$ for all $x\notin F$. The function $V$ is smooth, constant equal to $V_0=f(U_0)$ on $\partial D$, bounded above by $V_0$, and we have for all $x\in D\setminus F$:
\[\mathcal{L}V = f'(U)\left(\varepsilon\Delta U-|\nabla U|^2\right) + \varepsilon f''(U)|\nabla U|^2.\]
Recall that $|\nabla U|$ is positive on $D\setminus F$. We consider $\varepsilon_0$ small enough so that on $D\setminus F$ we have, for all $\varepsilon<\varepsilon_0$: \[\varepsilon f'(U)|\Delta U|+\varepsilon |f''(U)||\nabla U| \leqslant 1/2 f'(U)|\nabla U|^2.\] We then have for all $x\in D\setminus F$:
\[\mathcal LV(x)\leqslant -\omega V(x)\]
where
\[\omega = \frac{\zeta}{4V_0}\min_{U_1 \leqslant u\leqslant U_0}f'(u)\]
is independent of $\varepsilon$. Now, for $x\in F$:
\[\mathcal L V(x) \leqslant \varepsilon \sup_D|\Delta U| + \omega \theta - \omega V(x).\]
This inequality is thus true for all $x\in D$. Hence, we may plug it into the formula of Proposition~\ref{Ito} with $f(\textbf x)=V(x_i)$. Using the fact that $V$ is maximal on the boundary of $D$, $\textbf R(V) \leqslant 0$, and hence for all $0\leqslant s\leqslant t$:
\[\mathbb E\left(V\left(X^i_t\right)\right) - \mathbb E\left(V\left(X^i_s\right)\right) \leqslant \int_s^t \left( -\omega \mathbb E\left(V\left(X^i_u\right)\right) + \varepsilon\|\Delta U\|_{\infty}+\omega \theta \right)\text{d} u.\]
Writing $g(t)=\mathbb E\left(V\left(X^i_t\right)\right) - \theta - \varepsilon\|\Delta U\|_{\infty}/\omega$, we have for $t\geqslant 0$ and $h>0$:
\[g(t+h)-g(t)\leqslant -\omega\int_t^{t+h} g(u)\text{d} u. \]We now suppose that $g$ is continuous. Fix $\eta>0$ and write:
\[T_\eta=\min\left\{t\geqslant 0, g(t)\geqslant g(0)e^{\omega t}+\eta\right\}.\]Suppose that $T_\eta<\infty$ and write:
\[s_\eta=\max\left\{ 0\leqslant s\leqslant T_\eta, g(s)\leqslant g(0)e^{\omega s}\right\}.\]Then by continuity we have:
\begin{multline*}g(0)e^{-\omega T_\eta } + \eta - g(0)e^{-\omega s_\eta} = g(T_\eta) - g(s_\eta) \leqslant -\omega\int_{s_\eta}^{T_\eta}g(u)\text{d} u \\< -\omega\int_{s_\eta}^{T_\eta}g(0)e^{-\omega u}\text{d} u = g(0)e^{-\omega T_\eta } - g(0)e^{-\omega s_\eta},
\end{multline*}hence necessarily $T_\eta=\infty$, for all $\eta>0$, and thus for all $t\geqslant 0$, \[g(t)\leqslant g(0)e^{-\omega t},\]and
\[\mathbb E\left(V\left(X^i_{t_\varepsilon}\right)\right) \leqslant \gamma_{\varepsilon}V(x_i) + \frac{\varepsilon\|\Delta U\|_{\infty}+\omega \theta}{\omega}\left(1-\gamma_{\varepsilon}\right),\]
with $\gamma_{\varepsilon}=e^{-\omega t_\varepsilon}$. Since $V_0>4\theta+1$, we may take $\varepsilon_0$ small enough so that, for all $\varepsilon < \varepsilon_0$: \[4\frac{\varepsilon\|\Delta U\|_{\infty}+\omega \theta}{\omega} < 4\sup_D U+1 \leqslant V_0,\]and hence the result with $C_1=\sup_FU+1/4$.
We are left to show that $t\mapsto \mathbb E(V(X^i_t))$ is continuous. Write $V^i(\textbf x)=V(x_i)$. Then from Proposition~\ref{Ito}, we get that:
\[\mathbb E(V(X^i_t))=\mathbb E(V(X^i_0)) + \mathbb E(Q(V^i)(t)) + \mathbb E(\textbf R(V^i)(t)).\]
Since $\mathcal L_{x_i}V$ is bounded, $t\mapsto \mathbb E(Q(V^i)(t))$ is continuous. Fix $t\geqslant 0$, $\eta>0$. The FV process does not explode in finite time, hence we may fix $M>0$ such that:
\[\mathbb P(\text{The number of death event before time }2t\text{ is greater then }M)\leqslant \eta.\]For $0\leqslant s\leqslant t$, we have:
\[\left|\int_0^t \left(V(X^j_{u-}) - V(X^i_{u-}) \right) \text{d} R_i(u) \right| \leqslant \eta + 2M\|V\|_{\infty}\mathbb P(X^i\text{ dies between time }s\text{ and }t ).\]This last probability goes to $0$ as $s\rightarrow t$, and hence we may take $s$ close enough to $t$ so that:
\[2M\|V\|_{\infty}\mathbb P(X^i\text{ dies between time }s\text{ and }t )\leqslant \eta.\]The same goes if $s\geqslant t$, and this implies the continuity of $t\mapsto \mathbb E(\textbf R(V^i)(t))$.
\end{proof}
\begin{rem}\label{remarque}
When Assumption~\ref{assu2} also holds, we will additionally suppose that $V$ is such that
\[B_1\subset \left\{V>m\right\}\]
for some $m>3C_1$, where $B_1$ is the set from Assumption~\ref{assu2}. Indeed, in the proof of Lemma~\ref{Lyapu}, we may choose $F$ such that $U_i<\min_{B_1}U$. In this case, since $V_0>4C_1$, we may impose on $f$ that $f(\min_{B_1} U)>3C_1$.
\end{rem}
We are now interested in the death probability of a particle.
\begin{prop}\label{prop:couplage}
Under Assumption~\ref{assu1}, denote by $(X_t)$ the diffusion \eqref{descente}, and
\[\tau_{\partial D}=\inf\left\{ t\geqslant 0; X_t \notin D\right\}.\]
Consider any $C_2\in\left(2C_1,4C_1\right)$, where $C_1$ is given in Lemma~\ref{Lyapu}. Then
we have:
\begin{equation}\label{deathprob}
p_{\varepsilon} :=\sup_{x\in\left\{V\leqslant C_2\right\}}\mathbb{P}_x(\tau_{\partial D} <t_\varepsilon) \rightarrow0,
\end{equation}
as $\varepsilon\rightarrow0$.
\end{prop}
\begin{proof}
Since $t_\varepsilon = e^{a/\varepsilon}$ with $a<U_0$, this is the theory of Freidlin-Wentzell, see \cite[Chapter 6, Theorem 6.2]{Freidlin-Wentzell}. Freidlin and Wentzell didn't state the uniformity, but it follows from their proof.
\end{proof}
\subsection{Particles near the boundary}\label{partnearbound}
We want to control the number of particles which are close to the boundary of $D$ after a time $t_\varepsilon$. Consider the neighborhood \[B=\left\{V>3C_1\right\}\] of $\partial D$, where $C_1$ is the constant from Lemma~\ref{Lyapu}.
For $\textbf x=(x_1,\cdots,x_N)$, write:
\begin{equation}\label{numbbound}
A(\mathbf x)= \#\left\{i\in \llbracket1,N\rrbracket; x_i\in B\right\},
\end{equation}
where $\#$ stands for the cardinality of a set. We show that at time $t_{\varepsilon}$, the number of particles close to the boundary, $A(\mathbf{x})$, is a small fraction of $N$ with high probability as $N$ goes to infinity or as $\varepsilon$ goes to $0$.
\begin{lem}\label{boundary}
For all $\alpha>0$, there exists $\varepsilon_0>0$ such that for all $\varepsilon<\varepsilon_0$, there exists $q_\varepsilon>0$ such that for all $N\in \mathbb N$ and $\mathbf x\in D^N$:
\begin{equation}\label{probbound}
\mathbb{P}_{\mathbf{x}} (A(\mathbf X_{t_\varepsilon})>\alpha N) \leqslant q_\varepsilon^N
\end{equation}
and $q_\varepsilon \rightarrow0$ as $\varepsilon\rightarrow0$.
\end{lem}
\begin{proof}
The idea for the proof is the following: we want to compare the evolution of $V(X^i_t)$ and Ornstein-Uhlenbeck processes with small variance. If we had $N$ independent diffusions, the result would derive from a simple enumeration. But then the interaction through jumps can only make the Lyapunov decrease.
From Proposition~\ref{Ito} and the proof of Lemma~\ref{Lyapu}, we have that almost surely for all $1\leqslant i \leqslant N$ and $t\geqslant 0$:
\[V\left(X^i_t\right) \leqslant V(x_i) + \int_0^t \left(-\omega V\left(X^i_s\right)+ \omega C_1 \right) \text{d} s+ \sqrt{2\varepsilon}\int_0^t \nablabla V\left(X^i_s\right)\cdot \text{d} B_s^i.\]
for some $\omega>0$ (independent from $\varepsilon$). Now introduce for $1\leqslant i \leqslant N$ the process:
\[K_t^i = V(x_i)e^{- \omega t} + C_1(1-e^{-\omega t}) + \sqrt{2\varepsilon}\int_0^t e^{ \omega (s-t)} \nabla V(X^i_s)\cdot\text{d} B_s^i,\]
which solves
\[K_t^i = V(x_i) + \int_0^t \left(-\omega K_s^i + \omega C_1 \right) \text{d} s+ \sqrt{2\varepsilon}\int_0^t \nablabla V\left(X^i_s\right)\cdot \text{d} B_s^i.\]
From proposition~\ref{Ito}, we have that:
\[V(X^i_t)-K^i_t = \int_0^t \left(\mathcal LV(X^i_s) +\omega K^i_s-\omega C_1\right) \text{d} s + \sum_{n,\tau_n^i\leqslant t} V(X^i_{\tau_n^i})-V(X^i_{\tau_n^i-}),\]
where $(\tau_n^i)$ are the death times of particle number $i$ as defined in~\eqref{eqf:deftauni}. Moreover, $K^i$ is a continuous process, and $V(X^i)$ is continuous between death times. Hence, $V(X^i)-K^i$ is $\mathcal C^1$ between death times.
Let $f(x,y)=((x-y)_+)^2$, so that $f$ is differentiable, and is non-decreasing in the variable $x$. By construction of $V$, for all $n\in\mathbb N$, $V(X^i_{\tau_n^i})\leqslant V(X^i_{\tau_n^i-})$, hence we have for all $t\geqslant 0$:
\[\left(\left(V(X^i_{t})-K^i_{t}\right)_+\right)^2 \leqslant 2\int_0^{t} \omega\left(K^i_s-V\left(X^i_{s}\right)\right)\left(V\left(X^i_{s}\right)-K^i_s\right)_+\text{d} s \leqslant 0.\]
Thus, almost surely, $V\left(X^i_{t}\right)$ is bounded by $K^i_t$ for all $t\geqslant 0$, and we are left to show that with high probability, there are only a few $K^i$'s which are greater then $3C_1$ at time $t_\varepsilon$. Write:
\[G^i_t = \sqrt{2\varepsilon}\int_0^t e^{\omega (s-t)}\nabla V(X^i_s)\cdot \text{d} B^i_s. \]Fix some family of indexes $(i_1,\dots,i_k)\in \{1,\dots,N\}^k$. The $G^i$'s are $L^2$-martingales, hence for any $\xi \in\mathbb R$, $\xi\sum_{j=1}^k G^{i_j}$ is a $L^2$-martingale, and:
\[\exp \left( \xi\sum_{j=1}^k G^{i_j} - \xi^2 \left\langle \sum_{j=1}^k G^{i_j} \right\rangle\right)\]is a local-martingale. We have that $\left\langle G^i,G^j \right\rangle = 0$ for all $i\neq j$ because the Brownian motions are independent, hence \[\left\langle \sum_{j=1}^k G^{i_j} \right\rangle_t = \sum_{j=1}^k 2\varepsilon \int_0^t e^{2\omega (s-t)}|\nabla V(X^i_s)|^2 \text{d} s \leqslant \frac{\varepsilon k \|\nabla V\|_{\infty}}{\omega} ,\]and using Fatou's Lemma: \[\mathbb E\left(\exp \left( \xi\sum_{j=1}^k G^{i_j}_t\right) \right)\leqslant \exp\left(\frac{\varepsilon \xi^2 k \|\nabla V\|_{\infty}}{\omega}\right),\]for all $t\geqslant 0$.
Now we can write, using the Markov inequality:
\[\mathbb P\left( G^{i_1}_{t_\varepsilon} > C_1,\cdots,G^{i_k}_{t_\varepsilon} > C_1 \right) \leqslant \mathbb P\left( \exp \left( \xi\sum_{j=1}^k G^{i_j}\right) > e^{\xi k C_1} \right) \leqslant e^{-\xi k C_1}e^{\frac{\varepsilon \xi^2 k\|\nabla V\|_{\infty}}{\omega}}. \]
Taking $\xi = C_1 \omega/(2\varepsilon \|\nabla V\|_{\infty})$, one gets:
\[\mathbb P\left( G^{i_1}_{t_\varepsilon} > C_1,\cdots,G^{i_k}_{t_\varepsilon} > C_1 \right) \leqslant \exp\left( -C_1^2 \omega/2\varepsilon \|\nabla V\|_{\infty} \right)^k =: \tilde q_\varepsilon^k.\]
We chose $\varepsilon_0$ small enough so that:\[V_0e^{- \omega t_{\varepsilon_0}} + C_1(1-e^{-\omega t_{\varepsilon_0}})<2C_1.\]
For all $1\leqslant i\leqslant N$, we then have:
\[ \left\{K^i_{t_\varepsilon}>3C_1\right\}\subset \left\{ G^i_{t_\varepsilon}> C_1 \right\},\]
and we have for all family of indexes $(i_1,\dots,i_k)$:
\begin{equation*}
\mathbb{P}\left(X^{i_1}_{t_\varepsilon}\in B,\dots,X^{i_k}_{t_\varepsilon}\in B\right) \leqslant \mathbb{P}\left(G^{i_1}_{t_\varepsilon}>C_1,\dots,G^{i_k}_{t_\varepsilon}>C_1\right)\leqslant \tilde q_\varepsilon^k.
\end{equation*}
Finally, we conclude with:
\begin{align*}
\mathbb{P}_x \left(A\left(\textbf X_{t_\varepsilon}\right)>\alpha N\right) &\leqslant \mathbb{P}\left(\text{There exist at least }\alpha N\text{ indexes }i\text{ such that } X^i_{t_\varepsilon}\in B\right) \\ &\leqslant \sum_{\alpha N\leqslant k\leqslant N}\binom{n}{k} \tilde q_\varepsilon^k \\ &\leqslant \left(2\left(\tilde q_\varepsilon\right)^{\alpha}\right)^N =: q_\varepsilon^N.
\end{align*}
\end{proof}
\section{Proofs of the main theorems}\label{conclu}
Our goal is to construct a coupling of $\delta_{\mathbf{x}}P^N$ and $\delta_{\mathbf{y}}P^N$ for all $\mathbf{x},\mathbf{y}\in D^N$ in such a way that some distance $\mathbf{d}(\mathbf{x},\mathbf{y})$ is contracted on average by this coupling along time. The basic idea of the coupling is the following: particles are coupled by pair, namely we want the particle $i$ of the system starting at $\mathbf{x}$ to merge, after a time $t_\varepsilon$, with the particle $i$ of the system starting at $\mathbf{y}$. However, contrary to the case of independent particles, here, even if two particles start at the same position (namely $x_i=y_i$), they have a positive probability to decouple before time $t_\varepsilon$. This can be particularly bad for some initial conditions: for instance if most of the pairs start merged but close to the boundary while a decoupled pair is in the middle of the domain, then this will typically lead to a lot of decoupling as coupled pairs rebirth on the uncoupled pair. This will be tackled through the definition of the distance $\mathbf{d}$.
\subsection{Long time convergence}
We now construct the coupling of $(\delta_{\mathbf{x}} P^N_{t})_{t\geqslant 0}$ and $(\delta_{\mathbf{y}} P^N_{t})_{t\geqslant 0}$ for all $\textbf x,\textbf y\in D^N$, that will yield a bound on the distance between $\delta_{\mathbf{x}} P^N_{t_\varepsilon}$ and $\delta_{\mathbf{y}} P^N_{t_\varepsilon}$. Fix $\textbf x,\textbf y\in D^N$, and a sequence $(I^{i}_n)$ of independent random variable, where $I_n^i$ is uniform on $\llbracket 1,N \rrbracket\setminus\left\{i\right\}$.
For all $1\leqslant i \leqslant N$, consider a coupling $(\tilde X^i_t,\tilde Y^i_t)$ of the diffusion~\eqref{descente2} starting from $(x_i,y_i)$ such as the one constructed in Lemma~\ref{coupopt} (with these processes being independent for two different values of the index $i$). Recall that $\mathbb T^d= (\mathbb R/2L\mathbb Z)^d$, and $L$ is great enough so that we may consider that $D\subset \mathbb T^d$. Hence we may write \[\tilde \tau_1=\inf\left\{ t\geqslant 0, \exists i\in\llbracket 1,N\rrbracket, \tilde X^i_t \notin D \text{ or }\tilde Y^i_t \notin D \right\}.\]
Denote by $i_1$ the index of the particles that exit the domain at time $\tilde \tau_1$. For all $i\neq i_1$ and $0\leqslant t \leqslant \tilde \tau_1$ or $i=i_1$ and $0\leqslant t <\tilde \tau_1$, let:
\[X^i_t=\tilde X^i_t \qquad \text{and} \qquad Y_t^i= \tilde Y_t^i, \]
in the sense that $X^i_t$ (resp. $Y^i_t$) is the only point of $D$ whose projection is $\tilde X^i_t$ (resp. $\tilde Y^i_t$).
Finally, if $\tilde X^{i_1}_{\tilde\tau_1}\notin D$, then set $X^{i_1}_{\tilde\tau_1} = X^{I_1^{i_1}}_{\tilde\tau_1}$, else set $X^{i_1}_{\tilde\tau_1}=\tilde X^{i_1}_{\tilde\tau_1}$. The same goes for $Y^{i_1}_{\tilde \tau_1}$: if $\tilde Y^{i_1}_{\tilde\tau_1}\notin D$, then set $Y^{i_1}_{\tilde\tau_1} = Y^{I_1^{i_1}}_{\tilde\tau_1}$, else set $Y^{i_1}_{\tilde\tau_1}=\tilde Y^{i_1}_{\tilde\tau_1}$.
The coupling can then be constructed by induction, just as for the construction of the FV processes in the introduction.
Basically, the coupling is as follow: two particles with same index will be an optimal coupling of the diffusion as long as they don't die as constructed in Lemma~\ref{coupopt}, and if they die while coupled, then they resurrect using the same index. By this we mean that the uniform variable $I_n^i$ used in the construction of the process in Section~\ref{main} must be the same for both systems.
We will show that this coupling yields a contraction for the Wasserstein distance associated to a particular distance on $D^N$, namely:
\begin{equation}\label{distance}
\mathbf d(\mathbf{x},\mathbf{y}) = \sum_{i=1}^N\mathbbm{1}_{x_i\neq y_i}\left(1 + \beta V(x_i) + \beta V(y_i)\right) + \left(1+V_0\right)N\left(\mathbbm{1}_{A(\mathbf{x})>\alpha N}+\mathbbm{1}_{A(\mathbf{y})>\alpha N}\right)\mathbbm{1}_{\mathbf{x}\neq \mathbf{y}},
\end{equation}
where $\beta,\alpha>0$ are parameters that will be chosen small enough, and $A(\mathbf{x})$ has been defined in \eqref{numbbound}. We define as well:
\[d^1(x_i,y_i)=\mathbbm{1}_{x_i\neq y_i}\left(1 + \beta V(x_i) + \beta V(y_i) \right).\]
The meaning of this distance, which follows the construction of Hairer and Mattingly in \cite{HairerMattingly2008}, is this: if $x_i\neq y_i$ and $V(x_i)+V(y_i) <C_2$, where $C_2$ is as in Proposition~\ref{prop:couplage}, then both particles of index $i$ are in the center of the domain at initial time, and we are able to couple $X^i$ and $Y^i$ before time $t_\varepsilon$ and before they die with high probability. If $x_i\neq y_i$ and $V(x_i)+V(y_i) \geqslant C_2$, then we may not be able to couple them, but the Lyapunov functional will decrease on average. In any case, if $x_i\neq y_i$, $\mathbb E\left(d^1(X^i_t,Y^i_t)\right)$ will decrease between initial time and time $t_\varepsilon$. If $x_i=y_i$, then we cannot expect any contraction of $\mathbb E\left(d^1(X^i_t,Y^i_t)\right)$, since it is equal to zero at initial time, and the probability that $X^i$ and $Y^i$ decouple is positive (if they die and resurrect on an uncoupled pair). In this case, if $x_i$ is in the center of the domain, then, as we will see below, the probability of decoupling is very small and won't be an issue. But in the case where there are many particles coupled at $t=0$ close to the boundary, many of them will get separated. This is why we added the additional term $N\left(\mathbbm{1}_{A(\mathbf{x})>\alpha N}+\mathbbm{1}_{A(\mathbf{y})>\alpha N}\right)\mathbbm{1}_{\mathbf{x}\neq \mathbf{y}}$ in the definition of $\mathbf{d}$. If we are in this case, this term is initially not zero but, according to Lemma~\ref{boundary}, it will probably be zero at time $t_\varepsilon$, which will compensate for the non-zero terms that will appear with other parts of the distance. In other words, this term plays the role of a global Lyapunov function, by contrast with the pairwise Lyapunov function $V(x_i)+V(y_i)$.
Let's start by bounding from above the probability to decouple. This is the part where we use Assumption~\ref{assu2}.
\begin{lem}\label{deathprobability}
Under Assumptions~\ref{assu1} and \ref{assu2}, there exists $C_3$ such that for all $0<\alpha<1/4$, there exists $\varepsilon_0>0$ such that for all $0<\varepsilon<\varepsilon_0$, there exists $m_\varepsilon>0$, such that for all $N\in\mathbb{N}$, $\mathbf{x},\mathbf{y}\in D^N$ with $A(\mathbf{x}),A(\mathbf{y})\leqslant \alpha N$, and all $i\in\llbracket 1,N\rrbracket$ such that $x_i=y_i$, we have:
\[\mathbb{P}_{\mathbf{x},\mathbf{y}}\left(X^i_{t_\varepsilon}\neq Y^i_{t_\varepsilon}\right)\leqslant \left\{ \begin{matrix}
m_\varepsilon C_3\bar d(\mathbf{x},\mathbf{y})/N &\text{ if }x_i \notin B \\ C_3\bar d(\mathbf{x},\mathbf{y})/N &\text{ if }x_i\in B
\end{matrix} \right.\]where $\bar d(\mathbf{x},\mathbf{y}) = \sum_{i=1}^N \mathbbm{1}_{x_i\neq y_i}$, and $\lim_{\varepsilon\rightarrow0}m_\varepsilon =0$.
\end{lem}
An intermediate lemma is needed. The goal of this lemma is to get bounds on the number of death events. This next lemma is where we use the neighborhood $B_1$ of $\partial D$ from Assumption~\ref{assu2}.
\begin{lem}\label{deathnumber}
Under Assumptions~\ref{assu1} and~\ref{assu2}, let $B_1$ be the neighborhood of $\partial D$ from Assumption~\ref{assu2}.
Write the event:
\[\mathcal A=\left\{\#\left\{i\in \llbracket 1,N\rrbracket, \exists t\leqslant t_\varepsilon, X^i_t\in B_1 \right\} \geqslant 2\alpha N\right\}.\]
\begin{enumerate}
\item There exists $\varepsilon_0>0$ such that for all $0<\varepsilon<\varepsilon_0$, there exists $\tilde p_\varepsilon>0$, such that for all $0<\alpha<1/4$, $N\in\mathbb N$, $\mathbf{x}\in D^N$ with $A(\mathbf x)\leqslant \alpha N$,
\[\mathbb P_{\mathbf{ x}}(\mathcal A)\leqslant \left(2\tilde p_{\varepsilon}^{\alpha}\right)^N,\]and $\lim_{\varepsilon\rightarrow0}\tilde p_\varepsilon=0$.
\item Moreover, if $T$ denote the number of rebirth in the system before time $t_\varepsilon$, there exists $\varepsilon_0,\sigma>0$, $0<q<1$, such that for all $0<\varepsilon<\varepsilon_0$ and $0<\alpha<1/4$:
\[\mathbb P(T>\sigma N,\mathcal A^c)\leqslant q^N.\]
\item Write $T^i$ the number of rebirth of particle $i$ before time $t_\varepsilon$. We have as well that there exist $C,\varepsilon_0>0$ such that for all $0<\varepsilon<\varepsilon_0$, for all $0<\alpha<1/4$, $\mathbf{x}\in D^N$ satisfying $A(\mathbf{x}) <\alpha N$:
\[\mathbb E_{\mathbf{x}}\left( T^i\mathbbm{1}_{\mathcal A^c}\right) \leqslant \left\{ \begin{matrix} C\bar p_\varepsilon\text{ if }x_i\notin B \\ C\text{ if }x_i\in B , \end{matrix} \right. \]
\end{enumerate}
where $\bar p_\varepsilon$ is given in ~\eqref{strongdeathprob}.
\end{lem}
\begin{proof}
\begin{enumerate}
\item
At time $t=0$, the condition on $\mathbf{x}$ implies that there are less than $\alpha N$ particles in $B$. Under Assumption~\ref{assu2}, we assumed in Remark~\ref{remarque} that \[\min_{B_1} U > \max_{D\setminus B} U.\] Hence $B_1\subset B$, and this means that for $\mathcal A$ to happen, at least $\alpha N$ particles that were initially in $D\setminus B$ must have reached $B_1$ before time $t_{\varepsilon}$. Write:
\[\widetilde p_\varepsilon = \sup_{x\in D\setminus B}\mathbb{P}_x\left(\tau_{B_1}<t_\varepsilon \right),\]
where $\tau_{B_1}$ is the first hitting time of the set $B_1$ for the diffusion~\eqref{descente}. Under Assumption~\ref{assu2}, $a< \min_{B_1}U$. Together with the fact that $\min_{B_1} U > \max_{D\setminus B} U$ and \cite[Chapter 6, Theorem 6.2]{Freidlin-Wentzell} yields that $\tilde p_\varepsilon\rightarrow 0$ as $\varepsilon\rightarrow 0$. The fact that a particle reaches $B_1$ only depends on the Brownian motion driving it, hence we have:
\[\mathbb P(\mathcal A)\leqslant \sum_{k\geqslant \alpha N} \binom{(1-\alpha)N}{k}\tilde p_\varepsilon^k \leqslant (2\tilde p^{\alpha}_{\varepsilon})^N.\]
\item In order to control the number of deaths of the $i^{th}$ particle up to time $t_\varepsilon$, we are going to distinguish two types of rebirth events: either the particle is resurrected on a particle which we know never reaches $B_1$ (i.e. stays away from the boundary), in which case we can bound the probability that the $i^{th}$ particle dies again, or it is resurrected on a particle for which we have no information, in which case it can be arbitrarily close to the boundary and the time before the next death of the $i^{th}$ particle can be arbitrarily small.
For convenience, we consider in the rest of the proof that the FV process has been defined thanks to a construction similar to the one presented in Section~\ref{main} except that the Brownian motions driving the SDEs are changed at each death event, namely along with the variables $(I_n^i)_{n\geqslant 0,i\in\llbracket 1,N\rrbracket}$, we consider a family of independent $d$-dimensional Brownian motions $((B^{n,i}_t)_{t\geqslant0})_{n\geqslant 0,i\in\llbracket 1,N\rrbracket}$, so that after its $n^{th}$ death and up to its $(n+1)^{th}$ death the position of the particle $i$ is given by $X_{\tau_n^i+t}^i = \bar X_t^i$ where $\bar X^i$ is the solution of \eqref{descente} driven by $B^{n,i}$ with initial condition $\bar X_0^i = X_{\tau_n^i}$ (recall the notation $\tau_n^i$ from \eqref{eqf:deftauni}). Of course the law of the process is correct with this construction.
Denote:
\[\textbf S=\left\{i\in \llbracket 1,N\rrbracket, \exists t<t_\varepsilon, X_t^i \in B_1\right\}.\]
Then the Markov inequality yields:
\begin{align*}
\mathbb P\left( T >\sigma N,\mathcal A^c\right) \leqslant e^{-\sigma N}\mathbb E\left(e^{T}\mathbbm{1}_{\mathcal A^c}\right) = e^{-\sigma N}\sum_{\underset{\#S\leqslant 2\alpha N}{S\in \mathcal P(\llbracket 1,N\rrbracket)}}\mathbb E\left(e^{\sum_{i=1}^N T_i}\mathbbm{1}_{\textbf S=S}\right).
\end{align*}
Fix $S\in \mathcal P(\llbracket 1,N\rrbracket)$, such that $\#S<2\alpha N$, and recall the definition of the variable $I^i_n$ used in the construction of the FV process, which are independent uniform variables on $\llbracket 1,N\rrbracket \setminus\left\{i\right\}$. We define by induction $P^i_0=0$ and :
\[P^i_k = \inf\left\{n>P_{k-1}^i, I_n^i\notin S\right\}.\]
Notice that, under the event $\{\mathbf{S}=S\}$, if $I_n^i\notin S$, it means that at its $n^{th}$ rebirth the particle $i$ is resurrected on a particle which never reaches $B_1$ before time $t_\varepsilon$.
Setting $k_0(i)=1$ if $x_i \in B_1$ and $k_0(i)=0$ otherwise, we define as well
\[P^i=\inf\left\{k \geqslant k_0(i), \forall x\in D\setminus B_1, \tau_D(X^{x,i,P_k^i}) >t_\varepsilon \right\},\]
where for $n\in\mathbb N$ the family of processes $(X^{x,i,n})_{x\in D\setminus B_1}$ is as in Assumption~\ref{assu2} and are driven by the Brownian motion $B^{n,i}$. Since we have already observed that, for all $k>0$, at its $(P_k^i)^{th}$ death, the particle $i$ is resurrected at a position in $D\setminus B_1$,
the event $\{\forall x\in D\setminus B_1, \tau_D(X^{x,i,P_k^i}) >t_\varepsilon\}$, which is measurable with respect to the Brownian motion $B^{P_k^i,i}$, implies that the particle does not die again before time $t_\varepsilon$. For $k=0$, it depends whether initially $x_i \in B_1$: if $x_i \notin B_1$ (which is in particular the case if $x_i\notin B$) then, again, the event $\{\forall x\in D\setminus B_1, \tau_D(X^{x,i,0}) >t_\varepsilon\}$ implies that the particle doesn't die before time $t_\varepsilon$. This is not the case if $x_i\in B_1$. As a consequence, in any cases, under the event $\{\mathbf{S}=S\}$, we can bound the total number of death of the $i^{th}$ particle by
\[T^i \leqslant \sum_{k=1}^{P^i} (P_k^i - P_{k-1}^i).\]
The variables $(P^i_{k}-P^i_{k-1})_{k\geqslant 1,i}$ are independent geometric random variables of parameter $1-\#S>1-2\alpha$. Under Assumption~\ref{assu2}, if $x_i \in B_1$ (resp. if $x_i\notin B_1$) then $P^i$ (resp. $P^i+1$) is a geometric random variable of parameter $1-\bar p_\varepsilon$. Moreover, $(P^i)_{1\leqslant i\leqslant N}$ is a family of independent random variables, independent from $(P_k^i)_{k\geqslant 1, 1\leqslant i\leqslant N}$. We have:
\[\mathbb E\left(e^{\sum_{i=1}^N T_i}\mathbbm{1}_{\mathbf S=S}\right) \leqslant \mathbb E\left( e^{\sum_{i=1}^N \sum_{k=1}^{P^i}(P^i_k-P^i_{k-1})} \right) = \left( \mathbb E\left( e^{\sum_{k=1}^{P^i}(P^i_k-P^i_{k-1})}\right)\right)^N.\]
We are just left to show that $\mathbb E\left( \exp{\sum_{k=1}^{P^i}(P^i_k-P^i_{k-1})}\right)$ is finite and bounded uniformly in $\varepsilon<\varepsilon_0$. Conditioning with respect to $P^i$ we get :
\begin{align*}
\mathbb E\left( e^{\sum_{k=1}^{P^i}P^i_k-P^i_{k-1}}\right) = \mathbb E \left( \mathbb E\left( e^{P^i_0} \right)^{P^i}\right) \leqslant \mathbb E\left( \left( \frac{e}{1-e\alpha}\right)^{P^i}\right),
\end{align*}
hence the result if $\varepsilon_0$ satisfies $\bar p_{\varepsilon_0} < \frac{1-e\alpha}{e}$, since we bound then
\begin{align*}
\mathbb P\left( T >\sigma N,\mathcal A^c\right) \leqslant \left( 2 e^{-\sigma } \mathbb E\left( \left( \frac{e}{1-e\alpha}\right)^{P^i}\right)\right)^N.
\end{align*}
\item In the same spirit, fix $i\in \llbracket 1,N\rrbracket$,
and write now:
\[P^i_k = \inf\left\{n>P_{k-1}^i, I_n^i\notin \textbf S^i\right\},\]
where
\[\textbf S^i=\left\{j\in \llbracket 1,N\rrbracket\setminus \left\{i\right\}, \exists t<t_\varepsilon, X_t^{j} \in B_1\right\}
,\]
and
\[\mathcal A^i=\left\{\#\left\{j\in \llbracket 1,N\rrbracket\setminus \left\{i\right\}, \exists t\leqslant t_\varepsilon, X^{j}_t\in B_1 \right\} \geqslant 2\alpha N\right\},\]
and the definition of $P^i$ does not change.
We have that $(P^i_k)_k$, and $P^i$ are independent random variable, and $P^i$ is independent of $\mathcal A^i$ and $\textbf S^i$. Indeed, $\mathcal A^i$ and $\textbf S^i$ only depends on the Brownian motions that drive $(X^j)_{j\neq i}$. Under the event $(\mathcal A^i)^c$, the cardinality of $\mathbf{S}^i$ is less than $2\alpha N$. Furthermore, we have that $\mathcal A^i\subset\mathcal A$, and hence,
as in the previous step,
\[\mathbb E\left(T^i\mathbbm{1}_{\mathcal A^c}\right) \leqslant \mathbb E\left( \sum_{k=1}^{P^i}\left(P_k^i -P_{k-1}^i\right) \mathbbm{1}_{(\mathcal A^i)^c} \right) \ \leqslant \ \mathbb E\left( P^i\mathbb E(P_1^i|\textbf S^{i},\mathcal A^i,P^i)\right) \leqslant \frac{1}{1-2\alpha}\mathbb E(P^i),\]
and we conclude by bounding $\mathbb E(P^i) \leqslant \bar p_\varepsilon (1-\bar p_\varepsilon)^{-1}$
if $x_i\notin B$ (since, then, $x_i\notin B_1$) and, otherwise, $\mathbb E(P^i) \leqslant (1-\bar p_\varepsilon)^{-1}$.
\end{enumerate}
\end{proof}
\begin{proof}[Proof of Lemma~\ref{deathprobability}]
Define the sets:
\begin{align*}
&U_1(0) = \left\{i\in\llbracket 1,N\rrbracket,\ x_i\neq y_i\right\},\\
&U_2(0) = \left\{i\in\llbracket 1,N\rrbracket,\ x_i=y_i\right\}.
\end{align*}
Now, for $t\geqslant 0$, we want to define some sets $U_1(t)$, $U_2(t)$, such that if $X^i$ and $Y^i$ decouple at some time $s\geqslant 0$, then for all $t\geqslant s$, $i\in U_1(t)$. For $i\in U_2(0)$, $n\in\mathbb N$, write:
\[\tau^i_n=\inf\left\{ t > \tau_{n-1}^i, X_{t-}^i=Y^i_{t-}\in \partial D \right\},\]
as in\eqref{eqf:deftauni}, and \[\bar \tau^i_d = \inf\left\{t\geqslant 0, X^i_t\neq Y_t^i\right\}.\]
Since the FV-process is well-defined, almost surely, there is only a finite number of such events before time $t_\varepsilon$, for all $1\leqslant i\leqslant N$. Then define the set $U_1(t)$ and $U_2(t)$ for $t\in (\tau_{k-1},\tau_k]$ by induction on $k\geqslant 1$. Assume that the sets have been defined up to the time $ \tau_{k-1}$ for some $k\geqslant 1$. Set $U_j(t)=U_j( \tau_{k-1})$ for all $t\in( \tau_{k-1}, \tau_k)$. Let $i\in\llbracket 1,N\rrbracket$ be the index such that $ \tau_k\in \cup_{n\in\mathbb N}\{\bar \tau^i_n\}$. Now we distinguish two cases. If $ \tau_k\neq \bar\tau^i_d$, then $U_j(\tau_k)=U_j(\tau_{k-1})$ for $j=1,2$. Else set:
\[U_1(\tau_k) = U_1(\tau_{k-1}) \cup \{i\}\,,\quad U_2(\tau_k) = U_2(\tau_{k-1}) \cap \{i\}^c \,.\]
It is immediate to check that $U_1(t)$ and $U_2(t)$ form a partition of $\llbracket 1,N\rrbracket$ for all $t\geqslant 0$, and that $U_1(t)$ is non-decreasing with $t$ and such that $\{i\in\llbracket 1,N\rrbracket,\ X_t^i \neq Y_t^i\} \subset U_1(t)$ for all $t\geqslant 0$.
Recall from Lemma~\ref{deathnumber} the event:
\[\mathcal A=\left\{\#\left\{i\in \llbracket 1,N\rrbracket, \exists t\leqslant t_\varepsilon, X^i_t=Y_t^i\in B_1 \right\} \geqslant 2\alpha N\right\}.\]
For $n\in\mathbb N$ and $j=1,2$, write $u_k^j = \# U_j(\tau_k)$. At each time $\tau_k$, the probability that a particle goes from $U_2$ to $U_1$ is less than $u^1_{k-1}/N$. Hence, we have that for all $k\geqslant 1$:
\[\mathbb E\left(u^1_{k+1}|\mathcal F_{\tau_{k}}\right) \leqslant u^1_k\left(1+\frac{1}{N}\right),\]
and thus
\[\mathbb E\left(u^1_k\right) \leqslant \bar d(x,y)\left(1+\frac{1}{N}\right)^k.\]
Using the notations of Lemma~\ref{deathnumber} , in particular $T$ to denote the total number of death event before time $t_\varepsilon$,
using that $u^1_n$ is non-decreasing, we bound
\begin{align*}
\mathbb E(u_T\mathbbm{1}_{\mathcal A^c}) &\leqslant \mathbb E(u_{\sigma N}) + N\mathbb P(T>\sigma N,\mathcal A^c) \\& \leqslant e^\sigma \bar d(x,y) + Nq^N,
\end{align*}
which is bounded uniformly on $N\geqslant 1$ and $\varepsilon$ small enough, by $\tilde C_3\bar d(x,y)$, for some $\tilde C_3>0$, as soon as $\bar d(x,y)\geqslant 1$ (while, if $\bar d(x,y)=0$ then the two processes remain equal for all times and thus the result is trivial).
We get from all of this:
\[\mathbb E\left(\sup_{t\leqslant t_\varepsilon} \bar d\left(\textbf X_{t},\textbf Y_{t}\right)\mathbbm{1}_{\mathcal{A}^c}\right)\leqslant \mathbb E(u^1_T\mathbbm{1}_{\mathcal{A}^c}) \leqslant \tilde C_3\bar d(x,y).\]
Now we can bound the probability to decouple starting from any $x_i=y_i\in D$, for a fixed $i$ (recall the notation $T^i$ from Lemma~\ref{deathnumber}):
\begin{align*}
\mathbb P\left(\exists 0<t<t_\varepsilon, X^i_t\neq Y_t^i\right) &\leqslant \sum_{n\geqslant 1} \mathbb{P}\left(\bar\tau_d^i=\bar\tau^i_n, \mathcal A^c, T_i>n \right) + \mathbb P(\mathcal A)\\ &= \sum_{n\geqslant 1} \mathbb E\left(\bar d(\textbf X_{\tau_n},\textbf Y_{\tau_n})\mathbbm{1}_{\mathcal A^c}\right)/N \mathbb P\left( T_i>n,\mathcal A^c\right) + (2\tilde p_\varepsilon^{\alpha})^N \\ &\leqslant \sum_{n\geqslant 1} \mathbb E\left(\sup_{t\leqslant t_\varepsilon} \bar d\left(\textbf X_{t},\textbf Y_{t}\right)\mathbbm{1}_{\mathcal A^c}\right)/N \mathbb P\left( T_i>n,\mathcal A^c\right) + (2\tilde p_\varepsilon^{\alpha})^N \\ &\leqslant \tilde C_3\bar d(x,y) \mathbb E(T_i\mathbbm 1_{\mathcal A^c})/N + (2\tilde p_\varepsilon^{\alpha})^N.
\end{align*}
We conclude using Lemma~\ref{deathnumber}.
\end{proof}
We need to choose the parameters involved in the definition of the distance $\mathbf d$. There are three of them: $\alpha$, $\beta$, and $V_0$. We chose them in order to have:
\begin{equation}\label{cond1}
\frac{1+2\beta C_1}{1+\beta C_2}\wedge 2\beta C_1 + \alpha C_3(1+2\beta V_0)<1,
\end{equation}
and
\begin{equation}\label{cond2}
\frac{1+2\beta V_0}{1+V_0}<1.
\end{equation}
This is possible by fixing first some small $\beta$, and then taking $V_0$ great enough and finally $\alpha$ small.
\begin{lem}\label{coupkilled}
Let $\mathbf{x},\mathbf{y}\in D^N$ and $1\leqslant i\leqslant N$ such that $x_i\neq y_i$ and $V(x_i)+V(y_i)\leqslant C_2$. Then with $\kappa_{1,\varepsilon} = \gamma_\varepsilon\wedge \left(1-c_\varepsilon+2p_\varepsilon + 4\beta C_1(1-\gamma_{\varepsilon})\right)$, where $\gamma_\varepsilon$ has been defined in Lemma~\ref{Lyapu}, we have:
\[\mathbb E(d^1(X^i_{t_\varepsilon},Y^i_{t_\varepsilon}))\leqslant \kappa_{1,\varepsilon} d^1(x_i,y_i).\]
\end{lem}
\begin{proof}
Let $(\tilde X^{i}_t,\tilde Y_t^{i})$ be the coupling of the diffusion \eqref{descente2} as in lemma \ref{coupopt}, used in the construction of our coupling. Then, $(\tilde X^{i}_t,\tilde Y_t^{i})=(X^i_t,Y_t^i)$ until $X^i$ or $Y^i$ reaches $\partial D$. We have :
\begin{align*}
\pro{X^i_{t_\varepsilon} = Y^i_{t_\varepsilon}} &\geqslant \pro{X^i_{t_\varepsilon} = Y^i_{t_\varepsilon},\tau_{x_i}>t_\varepsilon,\tau_{y_i}>t_\varepsilon} \\ &=\pro{\tilde X^{x_i}_{t_\varepsilon} = \tilde Y^{y_i}_{t_\varepsilon},\tau_{x_i}>t_\varepsilon,\tau_{y_i}>t_\varepsilon} \\ &\geqslant \pro{\tilde X^{x_i}_{t_\varepsilon} = \tilde Y^{y_i}_{t_\varepsilon}} - \pro{\tau_{x_i}>t_\varepsilon} - \pro{\tau_{y_i}>t_\varepsilon} \\ &\geqslant c_{\varepsilon} - 2p_\varepsilon
\end{align*}
Using the property of the Lyapunov function described in \eqref{lyapu}, we then have:
\begin{align*}
\mathbb E\left(d^1(X^i_{t_\varepsilon},Y^i_{t_\varepsilon})\right) &\leqslant 1 - c_{\varepsilon} + 2p_\varepsilon + 2\beta C_1(1-\gamma_{\varepsilon}) + \gamma_\varepsilon \beta \left( V(x_i)+V(y_i) \right)\\
& \leqslant \kappa_{1,\varepsilon} d^1(x_i,y_i).
\end{align*}
\end{proof}
Now we focus on the particles near the boundary that are not coupled:
\begin{lem}\label{decreaselya}
Let $\mathbf{x},\mathbf{y}\in D^N$ and $1\leqslant i\leqslant N$ such that $V(x_i)+V(y_i)\geqslant C_2$ and $x_i\neq y_i$. Then with $\kappa_{2,\varepsilon} = \gamma_{\varepsilon} + (1-\gamma_{\varepsilon})\frac{1+2\beta C_1}{1+\beta C_2}$, we have:
\[\mathbb E(d^1(X^i_{t_\varepsilon},Y^i_{t_\varepsilon}))\leqslant \kappa_{2,\varepsilon} d^1(x_i,y_i).\]
\end{lem}
\begin{proof}
Using the Lyapunov property and the fact that $\gamma_{\varepsilon}\leqslant\kappa_{2,\varepsilon}$, we have:
\begin{multline*}
\mathbb E\left(d^1(X^i_{t_\varepsilon},Y^i_{t_\varepsilon})\right) \leqslant 1 + 2\beta C_1(1-\gamma_{\varepsilon}) + \beta\gamma_{\varepsilon}\left(V(x_i) + V(y_i) \right) \\ \leqslant \kappa_{2,\varepsilon}d^1(x_i,y_i) + 1 + 2\beta C_1(1-\gamma_{\varepsilon}) - \kappa_{2,\varepsilon} + \beta (\gamma_{\varepsilon}-\kappa_{2,\varepsilon})\left(V(x_i)+V(y_i)\right).
\end{multline*}
The fact that $V(x_i)+V(y_i) \geqslant C_2$ implies that
\[ 1 + 2\beta C_1(1-\gamma_{\varepsilon}) - \kappa_{2,\varepsilon} + \beta (\gamma_{\varepsilon}-\kappa_{2,\varepsilon})\left(V(x_i)+V(y_i)\right)\leqslant 0,\]and thus the result.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{thm}]
Let $\mathbf{x},\mathbf{y}\in D^N$, $\kappa_\varepsilon=\kappa_{1,\varepsilon}\wedge\kappa_{2,\varepsilon}$. First suppose that $\mathbbm{1}_{A(\mathbf{x})>\alpha N}=\mathbbm{1}_{A(\mathbf{y})>\alpha N}=0$. We decompose:
\begin{multline}\label{prouf}
\mathbb E\left(\mathbf d\left(\mathbf{X}_{t_\varepsilon},\mathbf{Y}_{t_\varepsilon}\right)\right) \\= \sum_{i/x_i\neq y_i}\mathbb E\left(d^1\left(X^i_{t_\varepsilon},Y^i_{t_\varepsilon}\right)\right) + \sum_{i/x_i=y_i \notin B}\mathbb E\left(d^1\left(X^i_{t_\varepsilon},Y^i_{t_\varepsilon}\right)\right) + \sum_{i/x_i=y_i \in B}\mathbb E\left(d^1\left(X^i_{t_\varepsilon},Y^i_{t_\varepsilon}\right)\right) \\+ N\left(1+V_0\right)\left( \mathbb{P}\left(A(\mathbf X_{t_\varepsilon})>\alpha N\right) + \mathbb{P}\left(A(\mathbf Y_{t_\varepsilon})>\alpha N\right) \right)\,.
\end{multline}
Thanks to Lemmas~\ref{coupkilled} and \ref{decreaselya}, we have that the first sum is less than $\kappa_\varepsilon d(\mathbf{x},\mathbf{y})$. From Lemma~\ref{deathprobability}, the second term is less than: \[C_3m_{\varepsilon}(1+2\beta V_0)\mathbf d(\mathbf{x},\mathbf{y}),\]
and the third term is less than:
\[\alpha C_3(1+2\beta V_0)\mathbf d(\mathbf{x},\mathbf{y}).\]
Finally, thanks to Lemma~\ref{boundary}, the last term is less than: \[2N\left(1+V_0\right)q_{\varepsilon}^N\mathbf{d}(\mathbf{x},\mathbf{y})\leqslant \frac{-2\left(1+ V_0\right)}{e\ln(q_\varepsilon)}\mathbf d(\mathbf{x},\mathbf{y}).\]
Putting all of this together we get:
\[\mathbb E(\mathbf d(\mathbf{X}_{t_\varepsilon},\mathbf{Y}_{t_\varepsilon}))\leqslant s_\varepsilon \mathbf d(\mathbf{x},\mathbf{y})\]where \[s_{\varepsilon}=\kappa_\varepsilon + C_3\left(1+2\beta V_0\right)m_\varepsilon + \alpha C_3(1+2\beta V_0)+ \frac{-2\left(1+ V_0\right)}{e\ln(q_\varepsilon)}.\]As $\varepsilon$ goes to $0$, $s_\varepsilon$ goes to $\frac{1+2\beta C_1}{1+\beta C_2}\wedge 2\beta U_0 + \alpha C_3(1+2\beta V_0)<1$ because of our choice of constants \eqref{cond1}. \par
Now, consider the case where $\mathbbm{1}_{A(\mathbf{x})>\alpha N}+\mathbbm{1}_{A(\mathbf{y})>\alpha N}>0$. Assume that $\mathbf{x}\neq\mathbf{y}$, the result being trivial otherwise since the processes stay equal for all times. In that case, $\mathbf d(\mathbf{x},\mathbf{y}) \geqslant N(1+ V_0)$ and we simply bound
\[\mathbb E\left(d\left(\mathbf{X}_{t_\varepsilon},\mathbf{Y}_{t_\varepsilon}\right)\right) \leqslant N(1+2\beta V_0 + (1+V_0)q_\varepsilon)\leqslant \left(\frac{1+2\beta V_0}{1+V_0}+q_\varepsilon\right)\mathbf d(\mathbf{x},\mathbf{y}),\]
for $\varepsilon$ small enough.
Since $r_\varepsilon := \frac{1+2\beta V_0}{1+V_0} + q_\varepsilon$ is strictly less than $1$ with our choice of constants \eqref{cond2} for $\varepsilon$ small enough, we conclude that, with $c=1-\sup_{\varepsilon<\varepsilon_0}s_\varepsilon\wedge r_\varepsilon>0$ where $\varepsilon_0$ is small enough, we have for all $\mathbf{x},\mathbf{y}\in D^N$:
\[\mathbb E_{\mathbf{x},\mathbf{y}}\left(\mathbf d\left(\mathbf{X}_{t_\varepsilon},\mathbf{Y}_{t_\varepsilon}\right)\right) \leqslant (1-c) \mathbf d(\mathbf{x},\mathbf{y}).\]
By conditioning with respect to the initial condition we get that:
\[W_{\mathbf d}\left(\mu P_{t_\varepsilon}^N , \nu P_{t_\varepsilon}^N\right) \leqslant ( 1-c )W_{\mathbf d}\left(\mu , \nu \right),\]
for all probability measures $\mu,\nu$ in $\mathcal{M}^1(D^N)$, and by iteration:
\begin{align*}W_{\mathbf d}\left(\mu P_{t}^N , \nu P_{t}^N\right)&\leqslant (1-c)^{\left\lfloor t/t_\varepsilon \right\rfloor} W_{\mathbf d}\left(\mu P_{t-\left\lfloor t/ t_\varepsilon\right\rfloor t_\varepsilon}^N , \nu P_{t-\left\lfloor t/ t_\varepsilon\right\rfloor t_\varepsilon}^N\right) \\ &\leqslant (1 + 2(\beta+1)V_0)N (1-c)^{\left\lfloor t/t_\varepsilon \right\rfloor}.
\end{align*}
We conclude the first point of the theorem using $\mathbbm{1}_{\mathbf{x}\neq \mathbf{y}} \leqslant \mathbf d(\mathbf{x},\mathbf{y})$.
Let's now prove the second point of Theorem~\ref{thm}. $\mathcal{M}^1\left(D^N\right)$ endowed with the distance $\mathbf d$ is a complete space. The contraction of $P_{t_\varepsilon}^N$ yield the existence and uniqueness of the stationary measure $\nu^{N,\varepsilon}_{\infty}$, as well as the exponential convergence of $\mu P_t^N$ towards $\nu^{N,\varepsilon}_{\infty}$. If $\mu$ is exchangeable, then $\mu P_t^N$ is exchangeable for all $t\geqslant 0$. The convergence of $\mu P_t^N$ toward $\nu^{N,\varepsilon}_{\infty}$ implies that $\nu^{N,\varepsilon}_{\infty}$ is exchangeable. Now consider an optimal coupling for the distance $d$ of $\mu P^N_t$ and $\nu^{N,\varepsilon}_{\infty} P_t^N$. Using the exchangeability property we have:
\begin{align*}
\|\mu P^{N,k}_{t}-\nu^{N,\varepsilon,k}_{\infty}\|_{TV} &\leqslant \mathbb E\left(\mathbbm{1}_{\left(X_t^1,\dots,X^k_t\right)\neq \left(Y_t^1,\dots,Y^k_t\right)}\right) \\ &\leqslant \sum_{i=1}^k\mathbb E\left(\mathbbm{1}_{X^i_t\neq Y^i_t}\right) \\ &= k\mathbb E\left(\mathbbm{1}_{X^1_t\neq Y^1_t}\right) \\&\leqslant \frac{k}{N}\sum_{i=1}^N\mathbb E\left(\mathbbm{1}_{X^i_t\neq Y^i_t}\right),
\end{align*}
and we conclude with the first point of the theorem.
\end{proof}
\subsection{Propagation of chaos}\label{proof2}
Recall the definition of the empirical measure $\pi^N$ in \eqref{empiri}.
As said in the introduction, the goal is to get a uniform in time propagation of chaos result. We start from a propagation of chaos result, with a time dependency, from \cite{Villemonais}. Their result reads as follows:
\begin{prop}[\cite{Villemonais}, Theorem 1]\label{propag}
For all $\mu_0\in \mathcal M^1(D^N)$, considering $(X_t^i)_{t\geqslant 0}$ the FV process with initial condition $(X_0^i)$ which is a random variable of law $\mu_0$, and $(X_t)$ the diffusion \eqref{descente}, then, for all bounded $f:D\to\mathbb R_+$, all $\varepsilon>0$ and all $t\geqslant 0$:
\[\mathbb E\left(\left|\int_D f\text{d}\pi^N(\textbf{X}_t) - \mathbb E_{\pi^N(\textbf X_0)}\left(f(X_t)|\tau_{\partial D}>t\right) \right|\right)\leqslant\frac{2(1+\sqrt{2})\|f\|_{\infty}}{\sqrt{N}}\sqrt{\mathbb E\left(\frac{1}{\mathbb{P}_{\pi^N(\textbf X_0)}\left(\tau_{\partial D}>t\right)^2}\right)},\]where $\tau_{\partial D}$ is defined in \eqref{deathtime}.
\end{prop}
We also need a result on the convergence of the law of the diffusion \eqref{descente}, conditioned on survival, towards the QSD $\nu_{\infty}^{\varepsilon}$. This is from \cite{champagnat2018general}, although the statement is slightly modified to fit our setting.
\begin{prop}\label{convnonlin}
Under Assumption~\ref{assu1}, there exists $\varepsilon_0>0$ such that for all compact $K\subset D$, and all $0<\varepsilon<\varepsilon_0$, there exists $C_\varepsilon,\tilde{C}_\varepsilon,\lambda_\varepsilon,\chi_\varepsilon>0$ such that for all $\mu_0\in\mathcal M^1(D)$ satisfying $\mu_0(K)\geqslant 1/4$:
\[\|\mathbb{P}_{\mu_0}\left(X_t\in \cdot|\tau_{\partial D}>t\right)-\nu_{\infty}^{\varepsilon}\|_{TV}\leqslant C_\varepsilon e^{-\chi_\varepsilon t},\]and
\[\mathbb{P}_{\mu_0}\left(\tau_{\partial D}>t\right)\geqslant \tilde C_\varepsilon e^{-\lambda_\varepsilon t}.\]
\end{prop}
\begin{proof}
The process~\eqref{descente} satisfies equation~(4.7) of \cite{champagnat2018general}, with $D_0=F$, where $F$ was defined in the proof of Lemma~\ref{Lyapu}, some $\lambda_1$ independent from $\varepsilon$, and $\varphi=V$. The constant $\lambda_0$ defined in equation~(4.4) of \cite{champagnat2018general} goes to zero as $\varepsilon$ goes to zero, hence we may chose $\varepsilon_0$ such that for all $\varepsilon<\varepsilon_0$, $\lambda_1>\lambda_0$, and assumption of \cite[Corollary 4.3]{champagnat2018general} hold true. From \cite[Theorem 4.1]{champagnat2018general}, this yields the existence of the $QSD$ $\nu_{\infty}$ and of some function $\phi:D\to\mathbb R_+^*$, uniformly bounded away from $0$ on all compact subsets of $D$ such that for all $\mu_0\in\mathcal M^1(D)$:
\[\|\mathbb{P}_{\mu_0}\left(X_t\in \cdot|\tau_{\partial D}>t\right)-\nu_{\infty}^{\varepsilon}\|_{TV}\leqslant C_\varepsilon e^{-\chi_\varepsilon t}\mu_0(V)/\mu_0(\phi).\]
Since $V$ is bounded, if $\mu_0(K)\geqslant 1/4$, then $\mu_0(\phi)\geqslant 1/4\min_{K}\phi$, and we get that
\[\|\mathbb{P}_{\mu_0}\left(X_t\in \cdot|\tau_{\partial D}>t\right)-\nu_{\infty}^{\varepsilon}\|_{TV}\leqslant C_\varepsilon e^{-\chi_\varepsilon t}\frac{4\|V\|_{\infty}}{\inf_{K}\phi}.\]
For the second point, write:
\[\mathbb{P}_{\mu_0}\left(\tau_{\partial D}>t\right)\geqslant \frac{1}{4}\inf_{x\in K}\mathbb P_x(\tau_{\partial D}>t).\]
Now fix $0<\tilde U_1<\tilde U_0$ such that $K\subset F\cup \left\{U\leqslant \tilde U_0\right\}=:\tilde F$, and \[\tilde \omega := \inf_{(F\cup \left\{U\leqslant \tilde U_1\right\})^c}|\nabla U|>0\].
Fix $T>0$ such that $\tilde U_0-\tilde\omega^2 T< \tilde U_1$, and some $\delta >0$ such that:
\[\delta < \min\left(dist \left(F\cup \left\{U\leqslant \tilde U_1\right\},(F\cup \left\{U\leqslant \tilde U_0\right\})^c \right), dist\left(F\cup \left\{U\leqslant \tilde U_0\right\},\mathbb R^d\setminus D\right) \right).\]
Write:
\[\mathcal E = \left\{ \tau_{\partial D}>T, X_T \in \tilde F \right\},\]
as well as $\varphi_t(x)$ for the flow
\[\left\{ \begin{matrix} \varphi'_t = -\nabla U(\varphi_t)\\ \varphi_0(x)=x \end{matrix} \right. .\]
With our choice of $T$, $\tilde U_0$ and $\tilde U_1$, we have that $\varphi_T \in F\cup \left\{U\leqslant \tilde U_1\right\}$ for all $x\in \tilde F$.
We get using Gronwall's Lemma:
\[\sup_{0\leqslant t\leqslant T}|X_t-\varphi_t| \leqslant e^{\|\nabla^2U\|_{\infty}T}\sqrt{2\varepsilon}\sup_{0\leqslant t\leqslant T}|B_t|.\]
If $W$ is a one-dimensional Brownian motion, $\sup_{0\leqslant t\leqslant T}W_t$ has the law of $|G|$ where $G$ is a standard Normal random variable. Hence:
\[\mathbb P\left(\sup_{0\leqslant t\leqslant T}|X_t-\varphi_t|\geqslant \delta \right) \leqslant 4d\mathbb P\left(G\geqslant \frac{\delta e^{-\|\nabla^2U\|_{\infty}T}}{T\sqrt{2\varepsilon}} \right) \leqslant 4de^{-\frac{b}{\varepsilon}}, \]where $b=\frac{\delta e^{-\|\nabla^2U\|_{\infty}T}}{2\sqrt{2}T}$.
Now write
\[\mathcal E_i = \left\{\tau_{\partial D} >(i+1)T \text{ and }X_{(i+1)T} \in \tilde F \right\}.\]
We showed that for $\varepsilon$ small enough, $\mathbb P\left( \mathcal E_{i+1}|\mathcal E_{i}\right) \geqslant 1-e^{-b/\varepsilon}$. We also have for our choice of $\delta$:\[\left\{ \tau_{\partial D}< t_\varepsilon \right\} \subset \mathcal{E}_{\left\lceil t/T\right\rceil}^c.\]Hence for all $x\in\tilde F$:
\[\mathbb P_{x}\left( \tau_{\partial D} >t \right) \geqslant
\left(1-e^{-b/\varepsilon}\right)^{t/T},\] and thus the result.
\end{proof}
In our metastable setting, this already yields propagation of chaos at equilibrium. Indeed, if the FV process starts from its stationary measure, its law won't change. But then from Proposition~\ref{propag}, the empirical measure of this process is close to the law of the process \eqref{descente} conditioned on survival at time $t\geqslant0$ starting from $\nu_{\infty}^{N,\varepsilon}$, which is itself close to the $QSD$ if $t$ is large enough.
\begin{lem}\label{propeq}
Under Assumption~\ref{assu1} and~\ref{assu2}, there exists $\varepsilon_0>0$ such that for all $0<\varepsilon<\varepsilon_0$, there exists $C_\varepsilon,\eta_{\varepsilon,1}>0$ such that if $\textbf{X}_{\infty}$ is a random vector of law $\nu^{N,\varepsilon}_{\infty}$ on $D^{dN}$, then for all bounded function $f:D\to\mathbb R_+$:
\[\mathbb E\left(\left|\int_D f \text{d}\pi^N(\textbf{X}_{\infty}) - \int_D f\text{d}\nu_{\infty}\right|\right)\leqslant \frac{C_\varepsilon\|f\|_{\infty}}{N^{\eta_{\varepsilon,1}}}.\]
\end{lem}
\begin{proof}
Assumptions~\ref{assu1} and~\ref{assu2} yield the existence of $\nu_{\infty}^{N,\varepsilon}$. Introduce the FV process $(\textbf{X}_t)_{t\geqslant0}$ with initial condition $\textbf X_{\infty}$. By definition, the law of $\textbf{X}_t$ is $\nu_\infty^{N,\varepsilon}$ for all $t\geqslant 0$. Recall the definition of $B$ and $A$ from \eqref{numbbound}, and set $K=D\setminus B$. Since $\nu^{N,\varepsilon}_\infty$ is the stationary measure of the FV process, we get from Lemma~\ref{boundary} applied with $\alpha=1/2$ that for all $t\geqslant 0$:
\[\mathbb{P}\left(A\left(\textbf{X}_{t}\right)>N/2\right)\leqslant q_{\varepsilon}^N,\]where $q_\varepsilon>0$ goes to zero as $\varepsilon$ goes to zero. Let $t=b\ln(N)$ for some $b>0$, and write $\mathcal{A}=\left\{ A\left(\textbf{X}_{t}\right) >N/2\right\}$. We have that:
\[\mathbb E\left(\left|\int_D f \text{d}\pi^N(\textbf{X}_{\infty}) - \int_D f\text{d}\nu_{\infty}\right|\right)=\mathbb E\left(\left|\int_D f \text{d}\pi^N(\textbf{X}_{t}) - \int_D f\text{d}\nu_{\infty}\right|\right),\]
and
\begin{multline*}
\mathbb E\left(\left|\int_D f \text{d}\pi^N(\textbf{X}_{\infty}) - \int_D f\text{d}\nu_{\infty}\right|\right)=\mathbb E\left(\left|\int_D f \text{d}\pi^N(\textbf{X}_{t}) - \int_D f\text{d}\nu_{\infty}\right|\right) \\ \leqslant \mathbb E\left(\left|\int_D f \text{d}\pi^N(\textbf{X}_{t})-\mathbb E_{\pi^N(\textbf{X}_{\infty})}(f(X_t)|\tau_{\partial D}>t)\right|\right) \\+ \mathbb E\left(\left|\mathbb E_{\pi^N(\textbf{X}_{\infty})}(f(X_t)|\tau_{\partial D}>t)-\int_D f \text{d}\nu_{\infty}\right|\right).
\end{multline*}
On $\mathcal A^c$, we have that $\pi^N(\textbf{X}_{\infty})(K)\geqslant 1/2$. Hence, from Proposition~\ref{propag} and \ref{convnonlin}, there exists $C>0$ such that:
\begin{equation*}
\mathbb E\left( \left| \int_D f\text{d}\pi^N(\textbf{X}_t) - \mathbb E_{\pi^N(\textbf{X}_{\infty})}(f(X_t)|\tau_{\partial D}>t)\right|\right) \leqslant \frac{C\|f\|_{\infty}}{\sqrt{N}}\frac{1}{N^{-b\lambda_\varepsilon}-q_\varepsilon^N} =\frac{C\|f\|_{\infty}}{N^{1/2-b\lambda_\varepsilon}}.
\end{equation*}
From Proposition~\ref{convnonlin}, we get that:
\[\mathbb E\left(\left|\mathbb E_{\pi^N(\textbf{X}_{\infty})}(f(X_t)|\tau_{\partial D}>t)-\int_Df \text{d}\nu_{\infty}\right|\right)\leqslant \left(Ce^{-\chi_{\varepsilon}t}+q_\varepsilon^N\right)\|f\|_{\infty} = \frac{C\|f\|_{\infty}}{N^{b \chi_\varepsilon}}.\]
We thus have the result if $b<1/2\lambda_\varepsilon$, and if $\varepsilon$ is small enough so that $q_\varepsilon<1$.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{thm2}]
Fix some $\varepsilon>0$, some compact $K\subset D$, $\mu_0\in\mathcal M^1(D)$ such that $\mu_0(K)\geqslant 1/2$, and a random variable $\textbf X_0$ of law $\mu_0^{\otimes N}$. Write:
\[\mathcal{A} = \left\{\pi^N(\textbf X_0)(K) \geqslant 1/4 \right\}.\]
We have that $\mathbb E(\pi^N(\textbf X_0)(K)) = \mu_0(K)$, and $\text{Var} (\pi^N(\textbf X_0)(K)) = \mu_0(K)(1-\mu_0(K))/N$. Hence we have:
\[\mathbb P(\mathcal A^c) \leqslant 4/N.\]
We now fix $0<b<1/(2\lambda_\varepsilon)$. For all $t\leqslant b\ln(N)$, from Propositions~\ref{propag} and \ref{convnonlin}, we get that there exists $C>0$ such that:
\begin{align*}
\mathbb E\left( \left|\int_D f\text{d} \pi^N(\textbf{X}_t) - \mathbb E_{\pi^N(\textbf X_0)}\left(f\left(X_t\right)\middle|\tau_{\partial D} >t\right) \right| \right) \leqslant \frac{C\|f\|_{\infty}}{\sqrt{N}}\sqrt{\frac{1}{N^{-b\lambda_\varepsilon}-CN^{-1}}}\leqslant \frac{C\|f\|_{\infty}}{N^{1/2-b\lambda_\varepsilon/2}}.
\end{align*}
If $t\geqslant b\ln(N)$, we consider a random variable $(\textbf X_t,\textbf{X}_\infty)$ which is an optimal coupling of $\mu_0^{\otimes N}P_t^N$ and $\nu_{\infty}^{N,\varepsilon}$ for the distance $\mathbf d$ defined in~\eqref{distance}. We then bound as follow:
\begin{align*}
\mathbb E\left( \left|\int_D f\text{d} \pi^N(\textbf{X}_t) - \mathbb E_{\pi^N(\textbf X_0)}\left(f\left(X_t\right)\middle|\tau_{\partial D} >t\right) \right| \right) &\leqslant \mathbb E\left( \left|\int_D f\text{d} \pi^N(\textbf{X}_t) -\int_D f\text{d} \pi^N(\textbf{X}_\infty) \right| \right) \\ &+ \mathbb E\left( \left|\int_D f\text{d} \pi^N(\textbf{X}_\infty) - \int_D f \text{d}\nu_\infty \right| \right) \\ &+ \mathbb E\left( \left|\int_D f \text{d}\nu_\infty - \mathbb E_{\pi^N(\textbf X_0)}\left(f\left(X_t\right)\middle|\tau_{\partial D} >t\right) \right| \right).
\end{align*}
From the proof of Theorem~\ref{thm}, the first term is bounded by:
\[\frac{\mathbb E(d(\textbf X_t,\textbf{X}_\infty))}{N}\|f\|_{\infty}\leqslant C(1-c)^{t/t_\varepsilon}\|f\|_{\infty}\leqslant CN^{\ln(1-c)/t_\varepsilon}\|f\|_{\infty}.\]
From Lemma~\ref{propeq}, the second term is bounded by:
\[\frac{C}{N^{\eta_{\varepsilon,1}}}\|f\|_{\infty}.\]
Finally, by Proposition~\ref{convnonlin}, the third term is bounded by:
\[\left(Ce^{-\chi_\varepsilon t}+2\mathbb P(\mathcal A)\right)\|f\|_{\infty}\leqslant CN^{-\chi_\varepsilon b}\|f\|_{\infty},\]and thus the result.
\end{proof}
\section{Establishing Assumption~\ref{assu2}}\label{technique}
This section is devoted to the proof of the following:
\begin{lem}\label{unifexitevent}
Under Assumptions~\ref{assu1} and \ref{assu3}, Assumption~\ref{assu2} holds.
\end{lem}
\begin{proof}
\begin{enumerate}
\item Suppose first that $d=1$. Under Assumption~\ref{assu1}, $D=(x_1,x_2)$ for some $x_1<x_2$. Set $B_1 = (x_1,x_1+\theta)\cup (x_2-\theta,x_2)$ with $\theta>0$ sufficiently small so that $U'\neq 0$ on $B_1$ (as a consequence of Assumption~\ref{assu1}, $U$ is thus decreasing (resp. increasing) on $(x_1,x_1+\theta)$ (resp. $(x_2-\theta,x_2)$)) and so that $\min(U(x_1+\theta),U(x_2-\theta))>c_*$ (which is possible since $U(x_1)=U(x_2)=U_0>c_*$).Take any $a\in (c^*,\min(U(x_1+\theta),U(x_2-\theta)))$. Using the uniqueness of the solution to equation~\eqref{descente} we have:
\[\left\{\exists x\in D\setminus B_1, \tau_{\partial D}(X^x) <t_\varepsilon \right\} = \left\{ \tau_{\partial D}(X^{x_1+\theta}) <t_\varepsilon \right\}\cup \left\{ \tau_{\partial D}(X^{x_2-\theta}) <t_\varepsilon \right\}.\]
We can conclude with \cite[Chapter 6, Theorem 6.2]{Freidlin-Wentzell}.
\item We now suppose the second condition of Assumption~\ref{assu3}.
We set \[B_1= \left\{ U > (U_0 + U_1)/2 \right\}\cap \tilde B_1,\] where $\tilde B_1$ is the set from Assumption~\ref{assu3}, and we may chose any $a\in (c^*,(U_0-U_1)/2)$.
Fix some
\begin{align*}
&U_2\in \left( a + (U_0+U_1)/2,U_0 \right), \\ &U_3\in \left(U_1+a,(U_0+U_1)/2\right),\\ &0<\gamma<\min(U_3-U_1-a,U_2-(U_0+U_1)/2-a),
\end{align*}
see Figure~\ref{Figure1}.
\begin{figure}
\caption{First, $U_0$ is the energy level on the boundary of $\mathcal D$. Then $U_1$ is taken as low as possible with the constraint that Assumption~\ref{assu3}
\label{Figure1}
\end{figure}
Fix as well some $T>0$ (to be chosen large enough below, independently from $\varepsilon$) and for all $x\in D\setminus B_1$ write the event:
\[\mathcal E^x = \left\{U(X^x_T)<U_3 \text{ and } \forall 0<t<T, U(X^x_t)<U_2 \right\}.\]
Let's first show that there exists $C>0$ such that $\mathbb{P}((\mathcal E^x)^c) \leqslant Ce^{-(a+\gamma)/\varepsilon}$ for $\varepsilon$ small enough and $T$ great enough, using Large Deviation results. In other words, in a fixed time interval, with high probability, starting from a medium energy level at most $(U_1+U_0)/2$, the process will stay below the high energy level $U_2$ and will end below the medium energy level $U_3$. According to \cite[Chapter 4, Theorem 1.1]{Freidlin-Wentzell}, the action function of the process~\eqref{descente} is:
\begin{eqnarray}
I(\varphi) &=& \frac{1}{4\varepsilon}\int_0^T |\varphi'_s + \nabla U(\varphi_s)|^2\text{d} s \nonumber\\
&= & \frac{1}{4\varepsilon}\int_0^T \left(|\varphi_s'|^2 + |\nabla U(\varphi_s)|^2\right)\text{d} s + \frac{U(\varphi_T)-U(\varphi_0)}{2\varepsilon}\label{Iphi1}\\
& =& \frac{1}{4\varepsilon}\int_0^T |\varphi'_s - \nabla U(\varphi_s)|^2\text{d} s + \frac{U(\varphi_T)-U(\varphi_0)}{\varepsilon}.\label{Iphi2}
\end{eqnarray}
For all function $\varphi:[0,T]\to D$ such that $\varphi_0\notin B_1$ and there exists $t\in(0,T)$ such that $U(\varphi_t)\geqslant U_2$, using \eqref{Iphi2}, we have:
\[\varepsilon I(\varphi) \geqslant \frac{1}{4}\int_0^t |\varphi'_s + \nabla U(\varphi_s)|^2\text{d} s \geqslant U(\varphi_t) - U(\varphi_0) \geqslant U_2-(U_0+U_1)/2 >a+\gamma.\]
From this we deduce that for $\varepsilon$ small enough, for all $x\notin B_1$:\[\mathbb P\left(\exists t\in [0,T], U(X_t^x) \geqslant U_2 \right)\leqslant e^{-(a+\gamma)/\varepsilon}.\]
Second, to bound the probability that $U(X_T^x) \geqslant U_3$, we consider two possible events: either the process stays during the whole interval $[0,T]$ above the energy level $U_1$ (which is unlikely because it would mean it stays in an unstable region where $\nabla U$ is non-zero) or the process goes down to $U_1$ but then climbs back in a time less than $T$ to the level $U_3$ (which is also unlikely).
More precisely, for all functions $\varphi:[0,T]\to D$ such that $\varphi_t\notin D\setminus\tilde B_1$ for all $t\in[0,T]$, using \eqref{Iphi1}, we have that
\[\varepsilon I(\varphi) \geqslant T \inf_{\tilde B_1}|\nabla U|^2/4 - U_3/2.\]
We chose $T$ great enough so that for all such functions, $\varepsilon I(\varphi)> a+\gamma$. Then for $\varepsilon$ small enough we have that for all $x\notin B_1$,
\[\mathbb P\left( \exists t\in[0,T], X^x_t\notin \tilde B_1 \right) \geqslant 1-e^{-(a+\gamma)/\varepsilon}.\]
Next, for all functions $\varphi:[0,T]\to D$ such that $\varphi_0\notin \tilde B_1$ and $U(\varphi_T)>U_3$, \[\varepsilon I(\varphi)>U_3-U_1.\] Hence, for all $x$ such that $x\notin \tilde B_1$ and $\varepsilon$ small enough :
\[\mathbb P(\exists t\in [0,T], U(X^x_t)> U_3) \leqslant e^{-(a+\gamma)/\varepsilon}.\]
From those last two bounds we get for all $x\notin B_1$:
\begin{multline*}
\mathbb P\left( U(X^x_T) > U_3 \right) \\ \leqslant \mathbb{P} \left( \forall t<T, X^x_t \in \tilde B_1 \right)
+ \mathbb P\left( \exists T>s>t>0, X^x_t\notin \tilde B_1, U(X^x_s)>U_3 \right) \\
\leqslant 2e^{-(a+\gamma)/\varepsilon}.
\end{multline*}
Finally we get for all $x\notin B_1$:
\[\mathbb P\left( (\mathcal E^x)^c\right) \leqslant 3e^{-(a+\gamma)/\varepsilon}.\]
Up to now, we only have a control for a fixed initial condition $x$. To tackle simultaneously all initial conditions in $D\setminus B_1$, we use that, in a time $T$, two processes which start close stays close (deterministically). More precisely, fix
\[
\delta
<
\min\left(dist(\left\{ U\leqslant U_2\right\},\mathbb R^d\setminus D),dist(\left\{ U\leqslant U_3\right\},B_1)\right),
\]
and $\delta'>0$ such that $\delta'e^{\|\nabla^2U\|_{\infty} T}<\delta$. Fix a family of point $z_1,\dots,z_k \notin B_1$ such that $D\setminus B_1\subset \cup_{i=1}^k B(z_i,\delta')$, where $B(z,r)$ is the ball of center $z$ and radius $r$. Write the event:
\[\mathcal E=\left\{ \forall x\notin B_1, \tau_{\partial D}(X^x)>T \text{ and }X^x_T\notin B_1 \right\}.\]
If $x\notin B_1$, there exists $i$ such that $|x-z_i|<\delta'$. Gronwall's lemma then classically yields that \[\sup_{0\leqslant t\leqslant T} |X^x_t-X^{z_i}_t| \leqslant \delta'e^{\|\nabla^2U\|_{\infty}T} <\delta.\]
In particular, $\tau_{\partial D}(X^x) < T$ implies that $U(X_t^{z_i}) \geqslant U_2$ for some $t\in[0,T]$.
Hence we have that:
\[ \bigcap_{i=1}^k\mathcal E^{z_i}\subset \mathcal E,\]and for $\varepsilon$ small enough \[\mathbb P(\mathcal E^c)\leqslant 3ke^{-(a+\gamma)/\varepsilon}.\]
Now write
\[\mathcal E_i = \left\{\forall x\notin B_1,\tau_{\partial D}(X^x) >(i+1)T \text{ and }X^x_{(i+1)T} \notin B_1 \right\}.\]
We showed that for $\varepsilon$ small enough, $\mathbb P\left( \mathcal E_{i+1}|\mathcal E_{i}\right) \geqslant 1-3ke^{-(a+\gamma)/\varepsilon}$. We also have that \[\left\{\exists x\in D\setminus B_1, \tau_{\partial D}(X^x)< t_\varepsilon \right\} \subset \mathcal{E}_{\left\lfloor t_\varepsilon/T\right\rfloor}^c.\]Hence:
\[\mathbb P\left( \forall x\in D\setminus B_1, \tau_{\partial D}(X^x) >t_\varepsilon \right) \geqslant
\left(1-e^{-(a+\gamma)/\varepsilon}\right)^{e^{a/\varepsilon}/T},\] and thus this probability goes to $1$ as $\varepsilon$ goes to $0$.
\end{enumerate}
\end{proof}
\end{document}
|
\betaegin{equation}gin{document}
\renewcommand{\varphinsymbol{footnote}}{\varphinsymbol{footnote}}
\tauitle{Wiener integrals with respect to the Hermite random field and applications to the wave equation}
\alphauthor{Jorge Clarke De la Cerda $^{1,2,3}$ \tauhanks{Partially supported by BECAS CHILE 2011 and the CONICYT-ECOS program C10E03}. \quaduad Ciprian A. Tudor
$^{3,4}$ \varphiootnote{Supported by the CNCS grant PN-II-ID-PCCE-2011-2-0015. Associate member of the team Samm, Universit\'e de Panth\'eon-Sorbonne Paris 1. Supported by the ANR grant "Masterie" BLAN 012103 is also acknowledged.}\vspace*{0.1in}\\
$^{1}$ Departamento de Matem\'atica, Facultad de Ciencias, Universidad del Bio-Bio, \\ Concepci\'on, Chile. \\
$^{2}$ Departamento de Ingenier\'ia Matem\'atica, Universidad de Concepci\'on,\\
Casilla 160-C, Concepci\'on, Chile. \\
[email protected] \vspace*{0.1in} \\
$^{3}$ Laboratoire Paul Painlev\'e, Universit\'e de Lille 1\\
F-59655 Villeneuve d'Ascq, France. \vspace*{0.1in} \\
$^{4}$
Academy of Economical Studies, Bucharest\\
Piata Romana, nr. 6, Bucharest, Romania.\\
\quaduad [email protected]\vspace*{0.1in}}
\deltaate{}
\muaketitle
\betaegin{equation}gin{abstract}
The Hermite random field has been introduced as a limit of some weighted Hermite variations of the fractional Brownian sheet. In this work we define it as a multiple integral with respect to the standard Brownian sheet and introduce Wiener integrals with respect to it. As an application we study the wave equation driven by the Hermite sheet. We prove the existence of the solution and we study the regularity of its sample paths, the existence of the density and of its local times.
\varepsilonnd{abstract}
\vskip0.3cm
{\betaf 2000 AMS Classification Numbers: }60F05, 60H05, 60G18.
\vskip0.2cm
{\betaf Key words: } Hermite process, Hermite sheet, Wiener integral, stochastic wave equation.
\section{Introduction}
\widehatspace*{1.1cm} The random fields or multiparameter stochastic processes have focused a significant amount of attention among scientists due to the wide range of applications that they have. Particularly, self-similar random fields find some of their applications in various kind of phenomena, going from hydrology and surface modeling to network traffic analysis and mathematical finance, to name a few. From other side, this type of processes are also quite interesting when they appear as solutions to Stochastic Partial Differential Equations (SPDE's) in several dimensions, such as the wave or heat equations.\\
\widehatspace*{1.1cm} A class of processes that lies in the family described above are the Hermite random fields or Hermite sheets (from now on). Inside this class we can find the well-studied fractional Brownian sheet and the Rosenblatt processes, among others.\\
The Hermite processes of order $q\gammaeq 1$ are self-similar with stationary increments and live in the $q$th Wiener chaos, that is, they can be expressed as $q$ times iterated integrals with respect to the Wiener process. The class of Hermite processes includes the fractional Brownian motion (fBm) which is the only Gaussian process in this family.
Their practical aspects are striking: they provide a wide class of processes that allow to model long memory, self-similarity and H\"{o}lder-regularity,
enabling a significant deviation from the fBm and other Gaussian processes. Since they
are non-Gaussian and self-similar with stationary increments, the Hermite processes can also be
an input in models where self-similarity is observed in empirical data which appears to be
non-Gaussian.
The Hermite sheet of order $q$ is only known in his representation as a non-central limit of a particularly normalized Hermite variations of the fractional Brownian sheet, see \cite{RST10} for the two-parameter case and \cite{Breton11} for the general $d$-parametric case. In both cases the authors also prove self-similarity, stationary increments and H\"older continuity.\\
\widehatspace*{1.1cm} In the present work we deal directly with the multi-parametric case building the Hermite sheet as a natural extension of the expression for the Hermite process studied as a non-central limit in \cite{DoMa} and \cite{Taqqu79}.
Fix $d\in \muathbb{N} \betaegin{equation}gin{array}ckslash \lambdaeft\lambdabrace0\right\rbrace$, define $\muathbf{t}=(t_{1},t_{2}, \lambdadots ,t_{d}) \in \muathbb{R}^{d}$ and let $\muathbf{H}=(H_{1},H_{2}, \lambdadots ,H_{d}) \in (\varphirac{1}{2},1)^{d}$ a Hurst multi-index
\betaegin{equation}gin{eqnarray}
\lambdaabel{hermite-sheet}
\nuonumber
Z^{q}_{\muathbf{H}}(\muathbf{t}) &=& c(\muathbf{H},q) \int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{t_{1}}
\lambdadots \int_{0}^{t_{d}} \lambdaeft( \prod _{j=1}^{q} (s_{1}-y_{1,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{1}}{q} \right) }
\lambdadots (s_{d}-y_{d,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{d}}{q} \right) } \right) \\
\nuonumber
& & ds_{d} \lambdadots ds_{1} \quaduad
dW(y_{1,1},\lambdadots ,y_{d,1} )\lambdadots dW(y_{1,q},\lambdadots ,y_{d,q}) \\
&=&
c(\muathbf{H},q) \int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{\muathbf{t}} \quaduad \prod _{j=1}^{q} (\muathbf{s}-\muathbf{y}_{j})_{+} ^{-\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} } d\muathbf{s} \quaduad dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q}),
\varepsilonnd{eqnarray}
the bold characters are for multidimensional quantities as indicated below in Section 2.
The above integrals are Wiener-It\^o multiple integrals of order $q$ with respect to the $d$-parametric standard Brownian sheet $(W(\muathbf{y}))_{\muathbf{y}\in \muathbb{R}^{d}}$ (see \cite{N} for the definition) and $c(\muathbf{H},q)$ is a positive
normalization constant depending only on $\muathbf{H}$ and $q$. We designate the process $\lambdaeft( Z^{q}_{\muathbf{H}}(\muathbf{t}) \right)_{\muathbf{t} \in \muathbb{R}^{d} }$ as the {\varepsilonm Hermite sheet} or {\varepsilonm Hermite random field}.
From expression (\ref{hermite-sheet}) it is possible to note that for $d=1$ we recover the {\varepsilonm Hermite process} which represents a family that has been recently studied in \cite{CTV11-Hp}, \cite{MT07} and \cite{PT10-Hp}. As a particular case ($q=1$) we recover the most known element of this family, the {\varepsilonm fractional Brownian motion}, which has been largely studied due to its various applications. Recently, a rich theory of stochastic integration with respect to this process has been introduced and stochastic differential equations driven by the
fractional Brownian motion have been considered for several purposes. The process obtained in (\ref{hermite-sheet-1}) for $d=1, q=2$ is known as the {\varepsilonm Rosenblatt process}, it was introduced by Rosenblatt in \cite{Rose} and it has been called in this way by Taqqu in \cite{Taqqu75}. Lately, this process has been increasingly studied by his different interesting aspects like wavelet type expansion or extremal properties, parameter estimations, discrete approximations and others potential applications (see \cite{Abry1}, \cite{Abry2}, \cite{BT10}, \cite{CTV09}, \cite{Tudor08}). \\
As far as we know, the only well-known multiparameter process that can be obtained from (\ref{hermite-sheet}) is the {\varepsilonm fractional Brownian sheet} ($d>1$ and $q=1$). This process has been recently studied as a driving noise for stochastic differential equations and stochastic calculus with respect to it has been developed. We refer to \cite{ALP2002}, \cite{KP2009}, \cite{TV1} for only a few works on various aspects of the fractional Brownian sheet.
In one hand the purpose of this article is to study the basic properties of the multiparameter Hermite process and then to introduce Wiener integrals with respect to the Hermite sheet in order to generalize and continue the line introduced in \cite{MT07} putting a new brick in the construction of stochastic calculus driven by this class of processes in several dimensions. As in \cite{Breton11} the covariance structure of the Hermite sheet is like the one of the fractional Brownian sheet, enabling the use of the same classes of deterministic integrands as in the fractional Brownian sheet profiting its well-known properties.
Also in the aim of this work lives the idea of making an approach to the study of stochastic partial differential equations in several dimensions driven by non-Gaussian noises, giving a specific expression for the driving noise allowing to use in a better way the properties of the equations by taking advantage of the results already existing in the literature. It is in this sense that, inspired by the works \cite{BT}, \cite{CT12} or \cite{DaSa1} and exploiting these, we present a stochastic wave equation with respect to the Hermite sheet in spatial dimension $d\gammaeq 1$ and we study the existence, regularity, and other properties of the solution, including the existence of local times and of the joint density.\\
We organize our paper as follows. Section 2 presents the necessary notations and prove several properties of the Hermite sheet. In Section 3, we construct
Wiener integrals with respect to this process. Section 4 is devoted to present the wave equation and discuss the existence and regularity of the solution and other properties.
\\
\section{Notation and the Hermite sheet}
\widehatspace*{1.1cm} Throughout the work we use the notation introduced in \cite{Breton11}. Fix $d\in \muathbb{N} \betaegin{equation}gin{array}ckslash \lambdaeft\lambdabrace0\right\rbrace$ and consider multi-parametric processes indexed in $\muathbb{R} ^{d}$. We shall use bold notation for multi-indexed quantities, i.e., $\muathbf{a}=(a_{1},a_{2},\lambdadots ,a_{d})$, $\muathbf{ab}=(a_{1}b_{1},a_{2}b_{2},\lambdadots ,a_{d}b_{d})$, $\muathbf{a/b}=(a_{1}/b_{1},a_{2}/b_{2},\lambdadots ,a_{d}/b_{d})$, $ [\muathbf{a},\muathbf{b}]= \deltaisplaystyle \prod_{i=1}^{d}[a_{i},b_{i}]$, $(\muathbf{a},\muathbf{b})=\deltaisplaystyle \prod_{i=1}^{d}(a_{i},b_{i})$, $\deltaisplaystyle\sum_{\muathbf{i}\in [\muathbf{0},\muathbf{N}]} a_{\muathbf{i}} = \deltaisplaystyle\sum_{i_{1}=0}^{N_{1}} \deltaisplaystyle\sum_{i_{2}=0}^{N_{2}} \lambdadots \deltaisplaystyle\sum_{i_{d}=0}^{N_{d}} a_{i_{1},i_{2},\lambdadots ,i_{d}}$, $\muathbf{a}^{\muathbf{b}}=\deltaisplaystyle \prod_{i=1}^{d} a_{i}^{b_{i}}$, and $ \muathbf{a} < \muathbf{b} $ iff $ a_{1} < b_{1}, a_{2} < b_{2},\lambdadots ,a_{d} < b_{d}$ (analogously for the other inequalities).\\
Before introducing the {\varepsilonm Hermite sheet} we briefly recall the {\varepsilonm fractional Brownian sheet} and the {\varepsilonm standard Brownian sheet}.\\
The {\varepsilonm d-parametric anisotropic fractional Brownian sheet} is the centered Gaussian process $\{B_{\muathbf{t}}^{\muathbf{H}}:\muathbf{t}=(t_{1},\lambdadots ,t_{d}) \in \muathbb{R} ^{d}\}$ with Hurst multi-index $\muathbf{H}=(H_{1},\lambdadots ,H_{d}) \in (0,1)^{d}$. It is equal to zero on the hyperplanes $\{\muathbf{t}:t_{i}=0\}$, $1\lambdaeq i \lambdaeq d$, and its covariance function is given by
\betaegin{equation}gin{eqnarray}\lambdaabel{cov-fra}
\nuonumber
R_{\muathbf{H}}(\muathbf{s},\muathbf{t}) &=& \muathbb{E}[B_{\muathbf{s}}^{\muathbf{H}}B_{\muathbf{t}}^{\muathbf{H}}] \\
&=& \deltaisplaystyle \prod_{i=1}^{d} R_{H_{i}}(s_{i},t_{i}) = \prod_{i=1}^{d} \varphirac{s_{i}^{2H_{i}} + t_{i}^{2H_{i}} - |t_{i} - s_{i}|^{2H_{i}} }{2}.
\varepsilonnd{eqnarray}
The {\varepsilonm d-parametric standard Brownian sheet} is the Gaussian process $\{W_{\muathbf{t}}:\muathbf{t}=(t_{1},\lambdadots ,t_{d}) \in \muathbb{R} ^{d}\}$ equal to zero on the hyperplanes $\{\muathbf{t}:t_{i}=0\}$, $1\lambdaeq i \lambdaeq d$, and its covariance function is given by
\betaegin{equation}gin{eqnarray}\lambdaabel{cov-brow}
R(\muathbf{s},\muathbf{t}) = \muathbb{E}[W_{\muathbf{s}}W_{\muathbf{t}}] = \deltaisplaystyle \prod_{i=1}^{d} R(s_{i},t_{i}) = \prod_{i=1}^{d} s_{i} \wedge t_{i}.
\varepsilonnd{eqnarray}
Let $q\in \muathbb{N}$, and the Hurst multi-index $\muathbf{H}=(H_{1},H_{2}, \lambdadots ,H_{d}) \in (\varphirac{1}{2},1)^{d}$. The {\varepsilonm Hermite sheet of order q} is given by
\betaegin{equation}gin{eqnarray}
\lambdaabel{hermite-sheet-1}
\nuonumber
Z^{q}_{\muathbf{H}}(\muathbf{t}) &=& c(\muathbf{H},q) \int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{t_{1}}
\lambdadots \int_{0}^{t_{d}} \lambdaeft( \prod _{j=1}^{q} (s_{1}-y_{1,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{1}}{q} \right) }
\lambdadots (s_{d}-y_{d,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{d}}{q} \right) } \right) \\
\nuonumber
& & ds_{d} \lambdadots ds_{1} \quaduad
dW(y_{1,1},\lambdadots ,y_{d,1} )\lambdadots dW(y_{1,q},\lambdadots ,y_{d,q}) \\
&=&
c(\muathbf{H},q) \int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{\muathbf{t}} \quaduad \prod _{j=1}^{q} (\muathbf{s}-\muathbf{y}_{j})_{+} ^{-\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} } d\muathbf{s} \quaduad dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q})
\varepsilonnd{eqnarray}
where $x_{+}=\muax(x,0)$. For a better understanding about multiple stochastic integrals we refer to \cite{N}. As pointed out before, when $q=1$, (\ref{hermite-sheet-1}) is the fractional Brownian sheet with Hurst multi-index $\muathbf{H}=(H_{1},H_{2}, \lambdadots ,H_{d}) \in (\varphirac{1}{2},1)^{d}$. For $q \gammaeq 2$ the process $Z^{q}_{\muathbf{H}}(\muathbf{t})$ is not Gaussian and for $q=2$ we denominate it as the {\varepsilonm Rosenblatt sheet}.
Now let us calculate the covariance $R_{\muathbf{H}}^{q}(\muathbf{s},\muathbf{t})$ of the Hermite sheet. Using the isometry of multiple Wiener-It\^o integrals and Fubini theorem one gets
\betaegin{equation}gin{eqnarray*}
\nuonumber
R_{\muathbf{H}}^{q}(\muathbf{s},\muathbf{t}) &=& \muathbb{E}[Z^{q}_{\muathbf{H}}(\muathbf{s})Z^{q}_{\muathbf{H}}(\muathbf{t})] \\
\nuonumber
&=& \muathbb{E} \lambdaeft\lambdabrace c(\muathbf{H},q)^{2} \int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{\muathbf{s}} \quaduad \prod _{j=1}^{q} (\muathbf{u}-\muathbf{y}_{j})_{+} ^{-\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} } d\muathbf{u} \quaduad dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q}) \right. \\
\nuonumber
& & \cdotot \lambdaeft. \int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{\muathbf{t}} \quaduad \prod _{j=1}^{q} (\muathbf{v}-\muathbf{y}_{j})_{+} ^{-\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} } d\muathbf{v} \quaduad dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q}) \right\rbrace \\
\nuonumber
&=& c(\muathbf{H},q)^{2} \int_{\muathbb{R}^{d\cdotot q}} \lambdaeft\lambdabrace \int_{0}^{s_{1}} \lambdadots \int_{0}^{s_{d}} \prod _{j=1}^{q} \prod _{i=1}^{d} (u_{i}-y_{i,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{i}}{q} \right) } du_{d} \lambdadots du_{1} \right. \\
\nuonumber
& & \cdotot \lambdaeft. \int_{0}^{t_{1}} \lambdadots \int_{0}^{t_{d}} \prod _{j=1}^{q} \prod _{i=1}^{d} (v_{i}-y_{i,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{i}}{q} \right) } dv_{d} \lambdadots dv_{1} \right\rbrace dy_{1,1}\lambdadots dy_{d,1}\lambdadots dy_{1,q} \lambdadots dy_{d,q} \\
\nuonumber
&=& c(\muathbf{H},q)^{2} \int_{0}^{t_{1}} \int_{0}^{s_{1}} \int_{\muathbb{R}^{q}} \prod _{j=1}^{q} (u_{1}-y_{1,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{1}}{q} \right) } (v_{1}-y_{1,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{1}}{q} \right) } dy_{1,1} \lambdadots dy_{1,q} du_{1} dv_{1} \\
\nuonumber
& & \vdots \\
\nuonumber
& & \int_{0}^{t_{d}} \int_{0}^{s_{d}} \int_{\muathbb{R}^{q}} \prod _{j=1}^{q} (u_{d}-y_{d,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{d}}{q} \right) } (v_{d}-y_{d,j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{d}}{q} \right) } dy_{d,1} \lambdadots dy_{d,q} du_{d} dv_{d} \\
\varepsilonnd{eqnarray*}
but
\betaegin{equation}gin{eqnarray}\lambdaabel{prop1}
\nuonumber
&&\int_{\muathbb{R}^{q}} \prod _{j=1}^{q} (u-x_{j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right) } (v-x_{j})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right) } dx_{1} \lambdadots dx_{q} \\
&=&
\lambdaeft[ \int_{\muathbb{R}} (u-x)_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right) } (v-x)_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right) } dx \right] ^{q},
\varepsilonnd{eqnarray}
so
\betaegin{equation}gin{eqnarray*}
R_{\muathbf{H}}^{q}(\muathbf{s},\muathbf{t}) &=& c(\muathbf{H},q)^{2} \int_{0}^{t_{1}} \int_{0}^{s_{1}} \lambdaeft[ \int_{\muathbb{R}} (u_{1}-y_{1})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{1}}{q} \right) } (v_{1}-y_{1})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{1}}{q} \right) } dy_{1} \right] ^{q} du_{1} dv_{1} \\
& & \vdots \\
\nuonumber
& & \int_{0}^{t_{d}} \int_{0}^{s_{d}} \lambdaeft[ \int_{\muathbb{R}} (u_{d}-y_{d})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{d}}{q} \right) } (v_{d}-y_{d})_{+} ^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{d}}{q} \right) } dy_{d} \right] ^{q} du_{d} dv_{d}. \\
\varepsilonnd{eqnarray*}
Recalling that the Beta function $\betaegin{equation}ta (p,q)= \int_{0}^{1} z^{p-1}(1-z)^{q-1} dz, p,q>0$, satisfies the following identity
\betaegin{equation}gin{eqnarray}\lambdaabel{beta}
\int_{\muathbb{R}} (u-y)_{+} ^{a-1} (v-y)_{+}^{a-1}
dy& =&\betaegin{equation}ta (a,1-2a) \vert u-v\vert ^{2a-1}
\varepsilonnd{eqnarray}
if $a\in (0, \varphirac{1}{2})$, we see that, since $H_{k}\gammaeq \varphirac{1}{2} $ for every $k=1,..,d$,
\betaegin{equation}gin{eqnarray*}
R_{\muathbf{H}}^{q}(\muathbf{s},\muathbf{t}) &=& c(\muathbf{H},q)^{2} \int_{0}^{t_{1}} \int_{0}^{s_{1}} \betaegin{equation}ta \lambdaeft( \varphirac{1}{2} - \varphirac{1-H_{1}}{q}, \varphirac{2(1-H_{1})}{q} \right) ^{q} \cdotot \vert u_{1}-v_{1}\vert ^{2(H_{1}-1)} du_{1} dv_{1} \\
\nuonumber
& & \lambdadots \int_{0}^{t_{d}} \int_{0}^{s_{d}} \betaegin{equation}ta \lambdaeft( \varphirac{1}{2} - \varphirac{1-H_{d}}{q}, \varphirac{2(1-H_{d})}{q} \right) ^{q} \cdotot \vert u_{d}-v_{d}\vert ^{2(H_{d}-1)} du_{d} dv_{d} \\
\nuonumber
&=& c(\muathbf{H},q)^{2} \betaegin{equation}ta \lambdaeft( \varphirac{1}{2} - \varphirac{1-H_{1}}{q},\varphirac{2(1-H_{1})}{q} \right) ^{q} \varphirac{1}{2H_{1}(2H_{1}-1)} \lambdaeft( s_{1}^{2H_{1}} + t_{1}^{2H_{1}} - \vert t_{1} - s_{1} \vert ^{2H_{1}} \right) \\
\nuonumber
& & \lambdadots \betaegin{equation}ta \lambdaeft( \varphirac{1}{2} - \varphirac{1-H_{d}}{q},\varphirac{2(1-H_{d})}{q} \right) ^{q} \varphirac{1}{2H_{d}(1-2H_{d})} \lambdaeft( s_{d}^{2H_{d}} + t_{d}^{2H_{d}} - \vert t_{d} - s_{d} \vert ^{2H_{d}} \right).
\varepsilonnd{eqnarray*}
So now we choose
\betaegin{equation}gin{equation}\lambdaabel{constant}
c(\muathbf{H},q)^{2} = \lambdaeft( \varphirac{\betaegin{equation}ta \lambdaeft( \varphirac{1}{2} - \varphirac{1-H_{1}}{q},\varphirac{2(1-H_{1})}{q} \right) ^{q}}{H_{1}(2H_{1}-1)} \right)^{-1} \lambdadots \lambdaeft( \varphirac{\betaegin{equation}ta \lambdaeft( \varphirac{1}{2} - \varphirac{1-H_{d}}{q},\varphirac{2(1-H_{d})}{q} \right) ^{q}}{H_{d}(2H_{d}-1)} \right)^{-1}
\varepsilonnd{equation}
in this way we get $\muathbb{E} \lambdaeft( Z^{q}_{\muathbf{H}}(\muathbf{t}) ^{2} \right) = \muathbf{t}^{2\muathbf{H}} = t_{1}^{2H_{1}} \lambdadots t_{d}^{2H_{d}} $, and finally
\betaegin{equation}gin{eqnarray}\lambdaabel{cov-hermite}
\nuonumber
R_{\muathbf{H}}^{q}(\muathbf{s},\muathbf{t}) &=& \varphirac{1}{2}\lambdaeft( s_{1}^{2H_{1}} + t_{1}^{2H_{1}} - \vert t_{1} - s_{1} \vert ^{2H_{1}} \right) \lambdadots \lambdaeft( s_{d}^{2H_{d}} + t_{d}^{2H_{d}} - \vert t_{d} - s_{d} \vert ^{2H_{d}} \right) \\
\nuonumber
&=& \deltaisplaystyle \prod_{i=1}^{d} \varphirac{s_{i}^{2H_{i}} + t_{i}^{2H_{i}} - |t_{i} - s_{i}|^{2H_{i}} }{2} \\
&=& \prod_{i=1}^{d} R_{H_{i}}(s_{i},t_{i}) = R_{\muathbf{H}}(\muathbf{s},\muathbf{t}).
\varepsilonnd{eqnarray}
\betaegin{equation}gin{remark}
From the previous development we see that:
\betaegin{equation}gin{itemize}
\item The covariance structure is the same for all $q\gammaeq 1$, so it coincides with the covariance of the fractional Brownian sheet.
\item In order to all the quantities are well defined, the condition $H_{k} \in (\varphirac{1}{2},1)$, $k=1, \lambdadots , d$ must be satisfied.
\varepsilonnd{itemize}
\varepsilonnd{remark}
We will next prove the basic properties of the Hermite sheet: self-similarity, stationarity of the increments and H\"older continuity.
Let us first recall the concept of self-similarity for multiparameter stochastic processes.
\betaegin{equation}gin{definition}\lambdaabel{self}
A stochastic process $(X_{\muathbf{t} } )_{\muathbf{t}\in T}$, where $T\subset \muathbb{R} ^{d}$ is called self-similar with self-similarity order $\muathbf{\alphalpha}=(\alphalpha _{1},\lambdadots , \alphalpha _{d})>0$ if for any $\muathbf{h}=(h_{1},\lambdadots , h_{d})>0$ the stochastic process $(\widehatat{X} _{\muathbf{t}}) _{\muathbf{t} \in T}$ given by
\betaegin{equation}gin{equation*}
\widehatat{X} _{\muathbf{t}} =\muathbf{h} ^{\muathbf{\alphalpha}} X _{\varphirac{\muathbf{t}}{\muathbf{h}}} = h_{1} ^{\alphalpha_{1}} ...h_{d} ^{\alphalpha _{d}} X_{\varphirac{t_{1}}{h_{1}},\lambdadots , \varphirac{t_{d}}{h_{d}}}
\varepsilonnd{equation*}
has the same finite dimensional distributions as the process $X$.
\varepsilonnd{definition}
\betaegin{equation}gin{prop}
The Hermite sheet is self-similar of order $\muathbf{H}=(H_{1},\lambdadots , H_{d}) $.
\varepsilonnd{prop}
{\betaf Proof: } The scaling property of the Wiener sheet implies that for every $0<\muathbf{c}= (c_{1},\lambdadots , c_{d} ) \in \muathbb{R} ^{d}$ the processes $\lambdaeft( W(\muathbf{c} \muathbf{t} ) _{\muathbf{t} \gammaeq 0} \right) $ and $\lambdaeft(\sqrt{\muathbf{c}} W( \muathbf{t} ) \right) _{\muathbf{t} \gammaeq 0} $ have the same finite dimensional distributions.
Therefore, if $\muathbf{1}=(1,\lambdadots ,1) \in \muathbb{R} ^{d}$, using obvious changes of variables in the integrals with respect to $d\muathbf{s}$ and $dW$,
\betaegin{equation}gin{eqnarray*}
\widehatat{Z}^{q}_{\muathbf{H}}(t) &=&\muathbf{h} ^{\muathbf{H}}{Z} ^{q} _{\varphirac{\muathbf{t}}{\muathbf{h}}}\\
&=&
c(\muathbf{H},q)\muathbf{h} ^{\muathbf{H}} \int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{\varphirac{\muathbf{t}}{\muathbf{h}}} \quaduad \prod _{j=1}^{q} (\muathbf{s}-\muathbf{y}_{j})_{+} ^{-\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} } d\muathbf{s} \quaduad dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q})\\
&=&c(\muathbf{H},q)\muathbf{h} ^{\muathbf{H}-\muathbf{1}} \int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{\muathbf{t}} \quaduad \prod _{j=1}^{q} (\varphirac{\muathbf{s}}{\muathbf{h}} -\muathbf{y}_{j})_{+} ^{-\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} } d\muathbf{s} \quaduad dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q})\\
&=&c(\muathbf{H},q)\muathbf{h} ^{\muathbf{H}-\muathbf{1}} \int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{\muathbf{t}} \quaduad \prod _{j=1}^{q} (\varphirac{\muathbf{s}}{\muathbf{h}} - \varphirac{\muathbf{y}_{j}}{\muathbf{h}})_{+} ^{-\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} } d\muathbf{s} \quaduad dW(\muathbf{h} ^{-1}\muathbf{y}_{1})\lambdadots dW(\muathbf{h} ^{-1}\muathbf{y}_{q})\\
&=&c(\muathbf{H},q)\muathbf{h} ^{\muathbf{H}-\muathbf{1}}\muathbf{h} ^{q\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} }
\int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{\muathbf{t}} \quaduad \prod _{j=1}^{q} (\muathbf{s}-\muathbf{y}_{j})_{+} ^{-\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} } d\muathbf{s} \quaduad dW(\muathbf{h} ^{-1}\muathbf{y}_{1})\lambdadots dW(\muathbf{h} ^{-1}\muathbf{y}_{q})\\
&=^{(d)}& c(\muathbf{H},q)\muathbf{h} ^{\muathbf{H}-\muathbf{1}}\muathbf{h} ^{q\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} }\muathbf{h} ^{-\varphirac{q}{2}}
\int_{\muathbb{R}^{d\cdotot q}}
\int_{0}^{\muathbf{t}} \quaduad \prod _{j=1}^{q} (\muathbf{s}-\muathbf{y}_{j})_{+} ^{-\muathbf{\lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)} } d\muathbf{s} \quaduad dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q})\\
&=&Z^{q}_{\muathbf{H}}(t)
\varepsilonnd{eqnarray*}
where $=^{(d)}$ means equivalence of finite dimensional distributions.
\quaded
\\
Let us recall the notion of the increment of a $d$-parameter process $X$ on a rectangle $[\muathbf{s}, \muathbf{t} ] \subset \muathbb{R} ^{d}$, $\muathbf{s}= (s_{1},\lambdadots , s_{d} ), \muathbf{t}=(t_{1},\lambdadots ,t_{d})$, with $\muathbf{s} \lambdaeq \muathbf{t}$. This increment is denoted by $\Deltaelta X_{[\muathbf{s}, \muathbf{t} ]}$ and it is given by
\betaegin{equation}gin{equation}\lambdaabel{mi}
\Deltaelta X_{[\muathbf{s}, \muathbf{t} ]}= \deltaisplaystyle\sum_{r\in \{0,1\} ^{d}}(-1) ^{d- \sum_{i=1}^{d} r_{i}} X_{\muathbf{s} + \muathbf{r} \cdotot (\muathbf{t}-\muathbf{s})}.
\varepsilonnd{equation}
When $d=1$ one obtains $\Deltaelta X_{[\muathbf{s}, \muathbf{t} ]}=X_{t}-X_{s}$ while for $d=2$ one gets $\Deltaelta X_{[\muathbf{s}, \muathbf{t} ]}=X_{t_{1}, t_{2}} -X_{t_{1}, s_{2}} -X_{s_{1}, t_{2}} + X_{s_{1}, s_{2}}$.
\betaegin{equation}gin{definition}\lambdaabel{stationary}
A process $(X_{\muathbf{t}}, \muathbf{t} \in \muathbb{R} ^{d})$ has stationary increments if for every $\muathbf{h} >0, \muathbf{h}\in \muathbb{R} ^{d}$ the stochastic processes $(\Deltaelta X _{[0, \muathbf{t}]}, \muathbf{t} \in \muathbb{R} ^{d})$ and $(\Deltaelta X_{[\muathbf{h}, \muathbf{h}+ \muathbf{t}] },\muathbf{t} \in \muathbb{R} ^{d})$ have the same finite dimensional distributions.
\varepsilonnd{definition}
\betaegin{equation}gin{prop}
The Hermite sheet $(Z^{q} (\muathbf{t})) _{\muathbf{t} \gammaeq 0}$ has stationary increments.
\varepsilonnd{prop}
{\betaf Proof: } Developing the increments of the process using the definition of the Hermite sheet and proceding as in the proof of Proposition 1 using the change of variables $\muathbf{s}' = \muathbf{s} - \muathbf{h}$, it is immediate to see that for every $\muathbf{h} >0, \muathbf{h}\in \muathbb{R} ^{d}$,
$$\Deltaelta Z^{q}_{[\muathbf{h}, \muathbf{h}+ \muathbf{t}] } =^{(d)} \Deltaelta Z^{q} _{[0, \muathbf{t}]} $$
for every $\muathbf{t}$.
\quaded
\betaegin{equation}gin{prop}
The trajectories of the Hermite sheet $(Z^{q}(\muathbf{t}), \muathbf{t} \gammaeq 0)$ are H\"older continuous of any order $\muathbf{\deltaelta} = (\deltaelta _{1},\lambdadots , \deltaelta _{d}) \in [0, \muathbf{H} )$ in the following sense: for every $\omegamega \in \Omegamega$, there exists a constant $C_{\omegamega } >0$ such that for every $\muathbf{s}, \muathbf{t} \in \muathbb{R} ^{d}, \muathbf{s}, \muathbf{t}\gammaeq 0$,
\betaegin{equation}gin{equation*}
\vert \Deltaelta Z^{q} _{[\muathbf{s}, \muathbf{t}]} \vert \lambdaeq C_{\omegamega } \vert t_{1}-s_{1}\vert ^{\deltaelta _{1}} \cdotots \vert t_{d}-s_{d}\vert ^{\deltaelta _{d}}= C _{\omegamega} \vert \muathbf{t} -\muathbf{s}\vert ^{\muathbf{\deltaelta}}.
\varepsilonnd{equation*}
\varepsilonnd{prop}
{\betaf Proof: } Using the Cencov's criteria (see \cite{Cencov}) and the fact that the process $Z^{q}$ is almost surely equal to 0 when $t_{i}=0$, it suffices to check that
\betaegin{equation}gin{equation}
\lambdaabel{i1}
\muathbb{E} \lambdaeft| \Deltaelta Z^{q} _{[\muathbf{s}, \muathbf{t}]} \right| ^{p} \lambdaeq C\lambdaeft( \vert t_{1}-s_{1}\vert \cdotots \vert t_{d}-s_{d}\vert \right) ^{1+\gammaamma }
\varepsilonnd{equation}
for some $p\gammaeq 2$ and $\gammaamma >0$. From the self-similarity and the stationarity of the increments of the process $Z^{q}$, we have for every $p\gammaeq 2$
\betaegin{equation}gin{equation*}
\muathbb{E} \lambdaeft| \Deltaelta Z^{q} _{[\muathbf{s}, \muathbf{t}]} \right| ^{p} =\muathbb{E} \lambdaeft| Z_{\muathbf{1}} \right| ^{p} \lambdaeft( \vert t_{1}-s_{1}\vert \cdotots \vert t_{d}- s_{d}\vert \right)^{p\muathbf{H}}
\varepsilonnd{equation*}
and this obviously implies (\ref{i1}) by taking $p$ arbitrary large.
\quaded
\\
\betaegin{equation}gin{remark}
In the one-parameter case, there exists several representations of the Hermite process (spectral domain representation, finite interval representation, positive half-axis representation, time domain representation, see \cite{PT10-Hp}). It has been shown in \cite{PT10-Hp} that all these representations of the (one-parameter) Hermite process have the same finite dimensional distributions. It would be interesting to generalize this study to the multi-parameter case.
\varepsilonnd{remark}
\section{Wiener integrals with respect to the Hermite sheet}
\widehatspace*{1.1cm} Now we are well positioned to present Wiener integrals with respect to the $d$-parametric Hermite sheet. Let us consider a Hermite sheet $\lambdaeft( Z^{q} _{\muathbf{H}} (\muathbf{t})\right) _{\muathbf{t}\in \muathbb{R} ^{d}}.$ Denote $\muathscr{E}$ the family of elementary functions on $\muathbb{R}^{d}$ of the form
\betaegin{equation}gin{eqnarray}\lambdaabel{elem-func}
f(\muathbf{u}) &=&\sum _{l=1}^{n} a_{l}1_{(\muathbf{t}_{l}, \muathbf{t}_{l+1}] }(\muathbf{u}) \\
\nuonumber
&=& \sum _{l=1}^{n} a_{l} 1_{({t}_{1,l}, {t}_{1,l+1}] \tauimes \lambdadots \tauimes ({t}_{d,l}, {t}_{d,l+1}]} (u_{1}, \lambdadots ,u_{d}),
\widehatskip0.5cm \muathbf{t}_{l} < \muathbf{t}_{l+1}, \widehatskip0.2cm a_{l} \in \muathbb{R},\widehatskip0.2cm l=1,\lambdadots ,n.
\varepsilonnd{eqnarray}
For functions like $f$ above we can naturally define its Wiener integral with respect to the Hermite sheet $Z^{q}_{\muathbf{H}}$ as
\betaegin{equation}gin{eqnarray} \lambdaabel{Her-int-1}
\int_{\muathbb{R}^{d}} f(\muathbf{u}) dZ_{\muathbf{H}}^{q} (\muathbf{u})&=& \deltaisplaystyle\sum_{l=1}^{n} a_{l} (\Deltaelta Z^{q} _{\muathbf{H}}) _{ [\muathbf{t} _{l}, \muathbf{t} _{l+1} ]}
\varepsilonnd{eqnarray}
where $(\Deltaelta Z^{q} _{\muathbf{H}}) _{ [\muathbf{t} _{l}, \muathbf{t} _{l+1} ]}$ (see (\ref{mi})) stands for the generalized increments of $Z^{q}_{\muathbf{H}}$ on the rectangle
\betaegin{equation}gin{equation*}
\Deltaelta_{\muathbf{t}_{l}} := \lambdaeft[ \muathbf{t}_{l}, \muathbf{t}_{l+1} \right] = \deltaisplaystyle \prod_{i=1}^{d} \lambdaeft[t_{i,l}, t_{i,l+1} \right].
\varepsilonnd{equation*}
In the case $d=1$, we simply have
$$(\Deltaelta Z^{q} _{\muathbf{H}}) _{[ \muathbf{t} _{l}, \muathbf{t} _{l+1} ]}= Z^{q} _{\muathbf{H} } (t_{1,l+1} -t_{1,l}) $$
while for $d=2$
$$(\Deltaelta Z^{q} _{\muathbf{H}}) _{[ \muathbf{t} _{l}, \muathbf{t} _{l+1} ]}= Z^{q}_{\muathbf{H}} (t_{1,l+1}, t_{2, l+1} ) - Z^{q}_{\muathbf{H}} (t_{1,l}, t_{2, l+1} )- Z^{q}_{\muathbf{H}} (t_{1,l+1}, t_{2, l} )+ Z^{q}_{\muathbf{H}} (t_{1,l}, t_{2, l} ).$$
With the purpose of extending the definition (\ref{Her-int-1}) to a larger family of integrands, we will point out some observations before. Let us consider the mapping $J$ on the set of functions $f:\muathbf{R}^{d} \rightarrow \muathbf{R}$ to the set of functions $f:\muathbf{R}^{d\cdotot q} \rightarrow \muathbf{R}$ such that
\betaegin{equation}gin{eqnarray}\lambdaabel{J}
J(f)(\muathbf{y}_{1}, \lambdadots ,\muathbf{y}_{q}) &=& c(\muathbf{H},q) \int_{\muathbb{R}^{d}} f(\muathbf{u}) \prod_{j=1}^{q} (\muathbf{u} - \muathbf{y}_{j})_{+}^{-\muathbf{ \lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)}} d\muathbf{u} \\
\nuonumber
&=&
c(\muathbf{H},q) \int_{\muathbb{R}^{d}} f(u_{1}, \lambdadots ,u_{d}) \prod_{j=1}^{q} \deltaisplaystyle \prod_{i=1}^{d} (u_{i}-u_{i,j})_{+}^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{i}}{q} \right)} du_{1} \lambdadots du_{d}.
\varepsilonnd{eqnarray}
Using the mapping $J$ we see that definition (\ref{hermite-sheet-1}) can be re-expressed as follows
\betaegin{equation}gin{eqnarray}\lambdaabel{hermite-sheet-2}
Z^{q}_{\muathbf{H}}(\muathbf{t})
&=& \int_{\muathbb{R}^{d\cdotot q}}
J(1_{[0,t_{1}]\tauimes \lambdadots \tauimes[0,t_{d}]}) (\muathbf{y}_{1}, \lambdadots ,\muathbf{y}_{q}) dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q}).
\varepsilonnd{eqnarray}
Since $J$ is clearly linear, definition (\ref{Her-int-1}) can be tailored to
\betaegin{equation}gin{eqnarray}\lambdaabel{Her-int-2}
\nuonumber
\int_{\muathbb{R}^{d}} f(\muathbf{u}) dZ_{\muathbf{H}}^{q} (\muathbf{u})&=& \deltaisplaystyle\sum_{l=1}^{n} a_{l} \lambdaeft( \Deltaelta Z^{q}_{\muathbf{H}}\right)_ {[\muathbf{t}_{l},\muathbf{t}_{l+1}] } \\
\nuonumber
&=&
\deltaisplaystyle\sum_{l=1}^{n} a_{l} \lambdaeft( \deltaisplaystyle\sum_{\muathbf{\xi} \in \{0,1\}^{d}} (-1)^{d- \sum_{i=1}^{d} \xi_{i} } Z^{q}_{\muathbf{H}}(t_{1,l+\xi_{1}}, \lambdadots , t_{d,l+\xi_{d}}) \right) \\
\nuonumber
&=&
\deltaisplaystyle\sum_{l=1}^{n} a_{l} \deltaisplaystyle\sum_{\muathbf{\xi} \in \{0,1\}^{d}} (-1)^{d- \sum_{i=1}^{d} \xi_{i} } \int_{\muathbb{R}^{d\cdotot q}} J(1_{[0,t_{1,l+\xi_{1}}]\tauimes \lambdadots \tauimes[0,t_{d,l+\xi_{d}]}}) (\muathbf{y}_{1}, \lambdadots ,\muathbf{y}_{q}) dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q}) \\
\nuonumber
&=& \deltaisplaystyle\sum_{l=1}^{n} a_{l} \int_{\muathbb{R}^{d\cdotot q}} J(1_{[t_{1,l},t_{1,l+1}]\tauimes \lambdadots \tauimes[t_{d,l},t_{d,l+1}]}) (\muathbf{y}_{1}, \lambdadots ,\muathbf{y}_{q}) dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q}) \\
&=& \int_{\muathbb{R}^{d\cdotot q}} J(f) (\muathbf{y}_{1}, \lambdadots ,\muathbf{y}_{q}) dW(\muathbf{y}_{1})\lambdadots dW(\muathbf{y}_{q}).
\varepsilonnd{eqnarray}
In this way we introduce the space
\betaegin{equation}gin{equation}\lambdaabel{Integrands-space-1}
\muathcal{H} = \lambdaeft\lambdabrace f:\muathbb{R}^{d} \rightarrow \muathbb{R} : \int_{\muathbb{R}^{d\cdotot q}} \lambdaeft( J(f) (\muathbf{y}_{1}, \lambdadots ,\muathbf{y}_{q}) \right)^{2} d\muathbf{y}_{1} \lambdadots d\muathbf{y}_{q} < \infty \right\rbrace
\varepsilonnd{equation}
equipped with the norm
\betaegin{equation}gin{equation}\lambdaabel{norm-integrands-space-1}
\Vert f\Vert _{\muathcal{H}}^{2} = \int_{\muathbb{R}^{d\cdotot q}}\lambdaeft( J(f) (\muathbf{y}_{1}, \lambdadots ,\muathbf{y}_{q}) \right)^{2} d\muathbf{y}_{1} \lambdadots d\muathbf{y}_{q}.
\varepsilonnd{equation}
Working the expression for the norm we see that
\betaegin{equation}gin{eqnarray}
\nuonumber
\Vert f\Vert _{\muathcal{H}}^{2} &=& c(\muathbf{H},q)^{2} \int_{\muathbb{R}^{d\cdotot q}} \lambdaeft\lambdabrace \lambdaeft( \int_{\muathbb{R}^{d}} f(\muathbf{u}) \prod_{j=1}^{q} (\muathbf{u} - \muathbf{y}_{j})_{+}^{-\muathbf{ \lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)}} d\muathbf{u} \right) \right. \\
\nuonumber
& & \cdotot \lambdaeft. \lambdaeft( \int_{\muathbb{R}^{d}} f(\muathbf{v}) \prod_{j=1}^{q} (\muathbf{v} - \muathbf{y}_{j})_{+}^{-\muathbf{ \lambdaeft( \varphirac{1}{2} + \varphirac{1-H}{q} \right)}} d\muathbf{v} \right) \right\rbrace d\muathbf{y}_{1} \lambdadots d\muathbf{y}_{q}.
\varepsilonnd{eqnarray}
Using (\ref{prop1}), (\ref{beta}) and (\ref{constant}) we get that
\betaegin{equation}gin{eqnarray} \lambdaabel{norm-integrands-space-2}
\nuonumber
\Vert f\Vert _{\muathcal{H}}^{2} &=& c(\muathbf{H},q)^{2} \int_{\muathbb{R}^{d}} \int_{\muathbb{R}^{d}} f(u_{1}, \lambdadots ,u_{d}) f(v_{1}, \lambdadots ,v_{d}) \\
\nuonumber
& & \lambdaeft\lambdabrace \deltaisplaystyle \prod_{i=1}^{d} \int_{\muathbb{R}^{q}} \prod_{j=1}^{q} (u_{i}-y_{i,j})_{+}^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{i}}{q} \right)} (v_{i}-y_{i,j})_{+}^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{i}}{q} \right)} dy_{i,1} \lambdadots dy_{i,q} \right\rbrace du_{1} \lambdadots du_{d} \ \ dv_{1} \lambdadots dv_{d} \\
\nuonumber
&=& c(\muathbf{H},q)^{2} \int_{\muathbb{R}^{d}} \int_{\muathbb{R}^{d}} f(u_{1}, \lambdadots ,u_{d}) f(v_{1}, \lambdadots ,v_{d}) \\
\nuonumber
& & \cdotot \deltaisplaystyle \prod_{i=1}^{d} \lambdaeft( \int_{\muathbb{R}} (u_{i}-y)_{+}^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{i}}{q} \right)} (v_{i}-y)_{+}^{-\lambdaeft( \varphirac{1}{2} + \varphirac{1-H_{i}}{q} \right)} dy \right) ^{q} du_{1} \lambdadots du_{d} \ \ dv_{1} \lambdadots dv_{d} \\
\nuonumber
&=& \int_{\muathbb{R}^{d}} \int_{\muathbb{R}^{d}} f(u_{1}, \lambdadots ,u_{d}) f(v_{1}, \lambdadots ,v_{d}) \deltaisplaystyle \prod_{i=1}^{d} H_{i}(2H_{i}-1) \vert u - v\vert ^{2H_{i}-2} du_{1} \lambdadots du_{d} \ \ dv_{1} \lambdadots dv_{d} \\
&=&
\muathbf{H(2H-1)} \int_{\muathbb{R}^{d}} \int_{\muathbb{R}^{d}} f(\muathbf{u}) f( \muathbf{v}) \vert \muathbf{u} - \muathbf{v}\vert ^{\muathbf{2H-2}} d\muathbf{u} d\muathbf{v},
\varepsilonnd{eqnarray}
hence
\betaegin{equation}gin{equation}\lambdaabel{Integrands-space-2}
\muathcal{H}=\lambdaeft\lambdabrace f:\muathbb{R}^{d} \rightarrow \muathbb{R}: \int_{\muathbb{R}^{d}} \int_{\muathbb{R}^{d}} f(\muathbf{u}) f( \muathbf{v}) \vert \muathbf{u} - \muathbf{v}\vert ^{\muathbf{2H-2}} d\muathbf{u} d\muathbf{v} < + \infty \right\rbrace
\varepsilonnd{equation}
and
\betaegin{equation}gin{equation*}
\Vert f\Vert _{\muathcal{H}}^{2} = \muathbf{H(2H-1)}\int_{\muathbb{R}^{d}} \int_{\muathbb{R}^{d}} f(\muathbf{u}) f( \muathbf{v}) \vert \muathbf{u} - \muathbf{v}\vert ^{\muathbf{2H-2}} d\muathbf{u} d\muathbf{v}.
\varepsilonnd{equation*}
The mapping
\betaegin{equation}gin{equation}
f \rightarrow \int_{\muathbb{R}^{d}} f(\muathbf{u}) dZ^{q}_{\muathbf{H}}(\muathbf{u})
\varepsilonnd{equation}
provides an isometry from $\muathscr{E}$ to $L^{2}(\Omegamega)$. Indeed, for $f$ like in (\ref{elem-func}) it holds that
\betaegin{equation}gin{eqnarray}\lambdaabel{Isometry-1}
\nuonumber
&&\muathbb{E}\lambdaeft\lambdabrace \lambdaeft( \int_{\muathbb{R}^{d}} f(\muathbf{u}) dZ^{q}_{\muathbf{H}}(\muathbf{u}) \right)^{2} \right\rbrace \\
&=&
\deltaisplaystyle \sum_{k,l=0}^{n-1} a_{k}a_{l} \muathbb{E}\lambdaeft( \lambdaeft( \Deltaelta Z^{q}_{\muathbf{H}} \right)_{[\muathbf{t}_{k},\muathbf{t}_{k+1}]} \cdotot \lambdaeft( \Deltaelta Z^{q}_{\muathbf{H}}\right)_ {[\muathbf{t}_{l},\muathbf{t}_{l+1}] } \right) \\
\nuonumber
&=&
\deltaisplaystyle\sum_{k,l=0}^{n-1} a_{k}a_{l} \deltaisplaystyle\sum_{\muathbf{\xi} \in \{0,1\}^{d}} (-1)^{d- \sum_{i=1}^{d} \xi_{i} } \deltaisplaystyle\sum_{\muathbf{\rho} \in \{0,1\}^{d}} (-1)^{d- \sum_{j=1}^{d} \rho_{j} } \muathbb{E} \lambdaeft\lambdabrace Z^{q}_{\muathbf{H}}(\muathbf{t}_{k+\xi}) Z^{q}_{\muathbf{H}}(\muathbf{t}_{l+\rho}) \right\rbrace \\
\nuonumber
&=&
\deltaisplaystyle\sum_{k,l=0}^{n-1} a_{k}a_{l} \deltaisplaystyle\sum_{\muathbf{\xi} \in \{0,1\}^{d}} (-1)^{d- \sum_{i=1}^{d} \xi_{i} } \deltaisplaystyle\sum_{\muathbf{\rho} \in \{0,1\}^{d}} (-1)^{d- \sum_{j=1}^{d} \rho_{j} } R_{\muathbf{H}}(\muathbf{t}_{k+\xi},\muathbf{t}_{l+\rho}) \\
\nuonumber
&=& \deltaisplaystyle\sum_{k,l=0}^{n-1} a_{k}a_{l} H_{1}(2H_{1}-1) \lambdadots H_{d}(2H_{d}-1) \int_{t_{1,k}}^{t_{1,k+1}} \lambdadots \int_{t_{d,k}}^{t_{d,k+1}} \cdotot \int_{t_{1,l}}^{t_{1,l+1}} \lambdadots \int_{t_{d,l}}^{t_{d,l+1}} \\
\nuonumber
& & \vert u_{1} - v_{1} \vert ^{2H_{1}-2} \lambdadots \vert u_{d} - v_{d} \vert ^{2H_{d}-2} du_{1} \lambdadots du_{d} dv_{1} \lambdadots dv_{d} \\
\nuonumber
&=& \deltaisplaystyle\sum_{k,l=0}^{n-1} a_{k}a_{l} <1_{[t_{1,k},t_{1,k+1}]\tauimes \cdotots \tauimes [t_{d,k},t_{d,k+1}]},1_{[t_{1,l},t_{1,l+1}]\tauimes \cdotots \tauimes [t_{d,l},t_{d,l+1}]}>_{\muathcal{H}} \\
&=& <f,f>_{\muathcal{H}},
\varepsilonnd{eqnarray}
where we have made a slight abuse of notation, $\muathbf{t}_{k+\xi} = (t_{1,k+\xi_{1}}, \lambdadots , t_{d,k+\xi_{d}})$.
\vspace*{0.5cm}
On the other hand, from what shown in \cite{PiTa1} section 4, it follows that the set of elementary functions
${\cal{E}}$ is dense in ${\cal{H}}$. As a consequence the
mapping (\ref{J}) can be extended to an isometry from
${\cal{H}}$ to $L^{2}(\Omegamega)$ and relation (\ref{hermite-sheet-2}) still
holds.
\vskip0.1cm
\betaegin{equation}gin{remark}
The elements of ${\cal{H}}$ may be not functions
but distributions; it is therefore more practical to work with
subspaces of ${\cal{H}}$ that are sets of functions. Such a subspace
is
\betaegin{equation}gin{eqnarray*} \lambdaeft| {\cal{H}}\right| &=&\lambdaeft \{ f:\muathbb{R}^{d}\tauo
\muathbb{R} \,\,\Big | \int _{\muathbb{R}^{d}} \int_{\muathbb{R}^{d}} \vert f(\muathbf{u})\vert
\vert f(\muathbf{v})\vert \vert \muathbf{u}-\muathbf{v}\vert ^{\muathbf{2H-2}} d\muathbf{v}d\muathbf{u} <\infty \right \}.
\varepsilonnd{eqnarray*}
Then $\lambdaeft| {\cal{H}}\right|$ is a strict subspace of $
{\cal{H}}$ and we actually have the inclusions
\betaegin{equation}gin{eqnarray}
\lambdaabel{inclu1} L^{\muathbf{2}}(\muathbb{R}^{d}) \cap L^{\muathbf{1}}(\muathbb{R}^{d})\subset
L^{\varphirac{1}{\muathbf{H} }} (\muathbb{R}^{d}) \subset \lambdaeft| {\cal{H}}\right|
\subset {\cal{H}},
\varepsilonnd{eqnarray}
where $L^{\muathbf{p}}$ denotes $L^{p_{1}} \omegatimes \lambdadots \omegatimes L^{p_{d}} $. \\
The space $\lambdaeft| {\cal{H}}\right|$ is not complete with respect to the norm $\Vert \cdotot
\Vert _{{\cal{H}}}$ but it is a Banach space with respect to the
norm
\betaegin{equation}gin{eqnarray*}
\Vert f\Vert ^{2}_{\lambdaeft| {\cal{H}}\right|
}&=& \int _{\muathbb{R}^{d}} \int_{\muathbb{R}^{d}} \vert f(\muathbf{u})\vert
\vert f(\muathbf{v})\vert \vert \muathbf{u}-\muathbf{v}\vert ^{\muathbf{2H-2}} d\muathbf{v}d\muathbf{u}
\varepsilonnd{eqnarray*}
\varepsilonnd{remark}
\betaegin{equation}gin{remark}
Expression (\ref{Her-int-2}) presents a useful interpretation for the Wiener integrals with respect to the Hermite sheet; as elements in the $q$-th Wiener chaos generated by the $d$-parametric standard Brownian field.
\varepsilonnd{remark}
\section{Application: The Hermite stochastic wave equation}
\widehatspace*{1.1cm} In this section we presents the linear stochastic wave equation as an example of equations driven by a Hermite sheet. We show the existence of the solution and study some properties of it thanks to the definition of the Wiener integrals with respect to the Hermite sheet.\\
Consider the linear stochastic wave equation driven by an infinite-dimensional Hermite sheet $ Z^{q}_{\muathbf{H}} $ with Hurst multi-index $\muathbf{H}\in (1/2,1)^{(d+1)}$. That is
\betaegin{equation}gin{eqnarray}
\lambdaabel{wave}
\varphirac{\partialrtial^2 u}{\partialrtial t^2}(t,\muathbf{x})&=& \Deltaelta u
(t,\muathbf{x}) +\deltaot Z^{q}_{\muathbf{H}}(t,\muathbf{x}), \quaduad t>0, \muathbf{x} \in \muathbb{R}^{d} \\
\nuonumber
u(0,\muathbf{x})&=& 0, \quaduad \muathbf{x} \in \muathbb{R}^{d} \\
\nuonumber
\varphirac{\partialrtial u}{\partialrtial t}(0,\muathbf{x}) &=& 0, \quaduad \muathbf{x} \in \muathbb{R}^{d}.
\varepsilonnd{eqnarray}
Here $\Deltaelta$ is the Laplacian on $\muathbb{R} ^{d}$ and $Z^{q}_{\muathbf{H}}=\{Z^{q}_{\muathbf{H}}(t,\muathbf{x}); t \gammaeq 0, \muathbf{x} \in \muathbb{R} ^{d}\}$ is the $(d+1)$-parametric Hermite sheet whose covariance is given by
\betaegin{equation}gin{equation*}
\muathbb{E} \lambdaeft\lambdabrace Z^{q}_{\muathbf{H}}(s,\muathbf{x}) Z^{q}_{\muathbf{H}}(t,\muathbf{y}) \right\rbrace =R_{H}(t,s) R_{\muathbf{H}_{0}} (\muathbf{x}, \muathbf{y} )
\varepsilonnd{equation*}
if $\muathbf{H} = (H, H_{1},\lambdadots , H_{d})$ and we denoted by $\muathbf{H}_{0}= (H_{1},\lambdadots , H_{d})$ and $\deltaot Z^{q} $ stands for the formal derivative of $Z^{(q)}$. Equivalently we can write
\betaegin{equation}gin{equation}
\muathbb{E} \lambdaeft\lambdabrace \deltaot Z^{q}_{\muathbf{H}}(s,\muathbf{x}) \deltaot Z^{q}_{\muathbf{H}}(t,\muathbf{y}) \right\rbrace = H(2H-1)|t-s|^{2H-2}\deltaisplaystyle \prod_{i=1}^{d}(H_{i}(2H_{i}-1) \cdotot |x_i-y_i|^{2H_i-2}).
\varepsilonnd{equation}
Let $G_1$ be the fundamental solution of $\varphirac{\partialrtial^2 u}{\partialrtial t^2}-\Deltaelta u=0$. It is known that $G_1(t, \cdotot)$ is a distribution in ${\cal{S}'}(\muathbb{R}^d)$ with
rapid decrease, and
\betaegin{equation}gin{equation}
\lambdaabel{Fourier-G-wave} {\cal F}
G_1(t,\cdotot)(\xi)=\varphirac{\sigman(t|\xi|)}{|\xi|},
\varepsilonnd{equation}
for any $\xi \in \muathbb{R}^d$, $t>0$ and $d \gammaeq 1$, where ${\cal F}$ denotes the Fourier transform (see e.g. \cite{treves75}). In particular,
\betaegin{equation}gin{eqnarray*}
G_1(t,\muathbf{x})&=&\varphirac{1}{2}1_{\{|\muathbf{x}|<t\}}, \quaduad \mubox{if} \ d=1 \\
G_1(t,\muathbf{x})&=&\varphirac{1}{2 \pi}\varphirac{1}{\sqrt{t^2-|\muathbf{x}|^2}}1_{\{|\muathbf{x}|<t\}},
\quaduad \mubox{if} \ d=2 \\
G_1(t,\muathbf{x})&=&c_{d}\varphirac{1}{t}\sigmagma_t, \quaduad \mubox{if} \ d=3,
\varepsilonnd{eqnarray*}
where $\sigmagma_t$ denotes the surface measure on the 3-dimensional
sphere of radius $t$.
The {\it mild} solution of (\ref{wave}) is a square-integrable process
$u=\{u(t,\muathbf{x}); t \gammaeq 0, \muathbf{x} \in \muathbb{R} ^{d}\}$ defined by:
\betaegin{equation}gin{equation} \lambdaabel{def-sol-wave} u(t,\muathbf{x})=\int_{0}^{t}
\int_{\muathbb{R} ^{d}}G_1(t-s,\muathbf{x}-\muathbf{y})Z^{q}_{\muathbf{H}}(ds,d\muathbf{y}).
\varepsilonnd{equation}
The above integral is a Wiener integral with respect to the Hermite sheet, as introduced in Section 2.
\subsection{Existence and regularity of the solution}
By definition, $u(t,\muathbf{x})$ exists if and only if the stochastic integral above is well-defined, i.e. $g_{t,\muathbf{x}}:=G_1(t-\cdotot,\muathbf{x}-\cdotot)
\in \muathcal{H}$. In this case, $\muathbb{E}|u(t,x)|^2 = \|g_{t,\muathbf{x}}\|_{{\cal H}}^2$.
We state the result on the existence and the regularity of the solution of (\ref{wave}).
\betaegin{equation}gin{prop}
Let $Z^{q}_{\muathbf{H}}(t,\muathbf{x})$ be the $(d+1)$-parametric Hermite sheet of order $q$. Denote by
\betaegin{equation}gin{equation}
\lambdaabel{beta1}
\betaegin{equation}ta = d-\deltaisplaystyle\sum_{i=1}^{d} (2H_{i}-1).
\varepsilonnd{equation}
Then, the stochastic wave equation (\ref{wave}) admits an unique mild solution $(u(t,\muathbf{x}) )_{t\gammaeq 0, \muathbf{x} \in \muathbb{R} ^{d} } $ if and only if
\betaegin{equation}gin{equation}\lambdaabel{exist-sol}
\betaegin{equation}ta < 2H + 1.
\varepsilonnd{equation}
If in addition we have that
\betaegin{equation}gin{equation}\lambdaabel{beta-1}
\betaegin{equation}ta \in (2H-1, d\wedge 2H+1)
\varepsilonnd{equation}
then, for fixed $0<t_{0}<T$, the following statements are true:
\betaegin{equation}gin{description}
\item[a.-] Let $\muathbf{x} \in \muathbb{R} ^{d}$ fixed. Then there exist positive constants $c_{1}, c_{2}$ such that for every $s,t\in [t_{0}, T]$
\betaegin{equation}gin{equation*}
c_{1} \vert t-s\vert ^{2H+1-\betaegin{equation}ta} \lambdaeq \muathbb{E} \lambdaeft| u(t,\muathbf{x})-u(s,\muathbf{x}) \right| ^{2} \lambdaeq c_{2} \vert t-s\vert ^{2H+1-\betaegin{equation}ta}.
\varepsilonnd{equation*}
Also for every fixed $\muathbf{x} \in \muathbb{R} ^{d}$ the application
$$t\tauo u(t,\muathbf{x})$$
is almost surely H\"older continuous of any order $\deltaelta \in \lambdaeft( 0, \varphirac{2H+1-\betaegin{equation}ta}{2}\right). $ \\
\item[b.-] Fix $M>0$ and $t\in [t_{0}, T]$. Then there exist positive constants $c_{3}, c_{4}$ such that for any $ \muathbf{x}, \muathbf{y} \in [-M,M]^{d} $
\betaegin{equation}gin{equation*}
c_{3} \vert \muathbf{x} - \muathbf{y} \vert ^{2H+1-\betaegin{equation}ta} \lambdaeq \muathbb{E} \lambdaeft| u(t,\muathbf{x}) -u(t,\muathbf{y}) \right| ^{2} \lambdaeq c_{4} \vert \muathbf{x} - \muathbf{y} \vert ^{2H+1-\betaegin{equation}ta }.
\varepsilonnd{equation*}
Also, for any $t\in [t_{0}, T]$ the application
$$ \muathbf{x} \tauo u(t,\muathbf{x})$$
is almost surely H\"older continuous of any order $\deltaelta \in \lambdaeft(0, \lambdaeft( \varphirac{2H+1-\betaegin{equation}ta}{2} \right) \wedge 1 \right)$.
\item[c.-] Denote by $\Deltaelta $ the following metric on $[0,T] \tauimes \muathbb{R} ^{d}$
\betaegin{equation}gin{equation}
\lambdaabel{metric}
\Deltaelta \lambdaeft( (t,\muathbf{x}); (s,\muathbf{y}) \right)= \vert t-s\vert ^{2H+1-\betaegin{equation}ta } + \vert \muathbf{x}-\muathbf{y}\vert^{ 2H+1-\betaegin{equation}ta }.
\varepsilonnd{equation}
Fix $M>0$ and assume (\ref{exist-sol}). For every $t,s \in [t_{0}, T]$ and $\muathbf{x}, \muathbf{y} \in [-M,M]^{d}$ there exist positive constants $C_{1}, C_{2}$ such that
\betaegin{equation}gin{equation}\lambdaabel{increm}
C_{1}\Deltaelta \lambdaeft( (t,\muathbf{x}); (s,\muathbf{y}) \right) \lambdaeq \muathbb{E} \lambdaeft| u(t,\muathbf{x}) -u(s,\muathbf{y}) \right| ^{2} \lambdaeq C_{2} \Deltaelta \lambdaeft( (t,\muathbf{x}); (s,\muathbf{y}) \right).
\varepsilonnd{equation}
\varepsilonnd{description}
\varepsilonnd{prop}
{\betaf Proof: } By the isometry of the Wiener integral with respect to the Hermite sheet, the $L^{2}$ norm will be
\betaegin{equation}gin{eqnarray*}
\muathbb{E} u(t, \muathbf{x} ) ^{2}&=& \alphalpha _{H} \int_{0} ^{t} du \int_{0}^{t} dv \vert u-v \vert ^{2H-2}\int_{\muathbb{R} ^{d}} \int_{\muathbb{R} ^{d}}d\muathbf{y} d\muathbf{z} G_{1} (t-u, \muathbf{x} -\muathbf{y} ) G_{1} (t-v, \muathbf{x} -\muathbf{z} )\\
&&\tauimes \deltaisplaystyle \prod_{i=1}^{d} (H_{i} (2H_{i}-1)) \vert y_{i} -z_{i} \vert ^{2H_{i} -2}\\
&=& \alphalpha _{H} \int_{0} ^{t} du \int_{0}^{t} dv \vert u-v \vert ^{2H-2} \int_{\muathbb{R} ^{d}} \varphirac{\sigman (u\vert \muathbf{\xi} \vert ) \sigman (v\vert \muathbf{\xi} \vert ) }{\vert \muathbf{\xi} \vert ^{2}} \muu (d\muathbf{\xi} )
\varepsilonnd{eqnarray*}
where
\betaegin{equation}gin{equation}
\lambdaabel{mu1}
\muu (d\muathbf{\xi})= c_{\muathbf{H}} \deltaisplaystyle \prod_{i=1}^{d}\vert \xi _{i} \vert ^{-(2H_{i} -1)}
\varepsilonnd{equation}
with $\muathbf{\xi} = (\xi_{1},\lambdadots , \xi_{d})$, $\alphalpha _{H}= H(2H-1)$ and $c_{\muathbf{H}}=\muathbf{H}(2\muathbf{H}-1)$. This is, $u(t,\muathbf{x})$ has the same $L^{2}$ norm as in the case $q=1$, i.e. when the noise of the equation is a fractional Brownian sheet. It therefore follows from \cite{BT}, Theorem 3.1 that the above integral is finite if and only if
\betaegin{equation}gin{equation*}
\int_{\muathbb{R} ^{d}} \lambdaeft( \varphirac{1}{1+ \vert \muathbf{\xi} \vert ^{2}}\right) ^{H+ \varphirac{1}{2}} \muu (d\muathbf{\xi} ) < \infty
\varepsilonnd{equation*}
with $\muu$ given by (\ref{mu1}). The above condition is equivalent to $\betaegin{equation}ta < 2H+1$, see Example 3.4 in \cite{BT}.
The proof of the other items is strongly held in the covariance structure of the Hermite sheet, which is the same as for the fractional Brownian sheet. By a careful revision of the proofs of Theorem 3.1 in \cite{BT}, Propositions 1, 2, 3 and Corollary 1 in \cite{CT12}, it is possible to appreciate that the computations are also valid for any process with a covariance structure like the one presented in these articles, in particular in our case.
\betaegin{equation}gin{description}
\item{$\betaullet $} The bounds for the increments in time are consequence of Proposition 1 in \cite{CT12}, and the H\"older regularity comes from Corollary 1 in \cite{CT12}, this proves \tauextbf{a}.
\item{$\betaullet$ } The bounds for the increments in the space variable are deduced from Proposition 2 in \cite{CT12}, and the space H\"older regularity is direct from Proposition 3 in \cite{CT12}, this proves \tauextbf{b}.
\item{$\betaullet$ } Point $\muathbf{c}$ follows from \tauextbf{a} and \tauextbf{b} by following the lines of the proof of Theorem 2 in \cite{CT12}.
\varepsilonnd{description}
\quaded
\\
\subsection{Existence of local times}
\widehatspace*{1cm} We will show that the solution of (\ref{wave}), viewed as a process in $(t,\muathbf{x})$, admits a square integrable local time.
Let us define the local time of a stochastic process $(X_{t})_{t\in J}$. Here $J$ denotes a subset of $\muathbb{R} ^{d}$. For any Borel set $I\subset J$ the occupation measure of $X$ on $I$ is defined as
$$\muu _{I} (A)= \lambdaambda \lambdaeft( \muathbf{t} \in I, X_{t} \in A\right), \widehatskip0.5cm A \in {\cal{B}}({\muathbb{R}})$$
where $ \lambdaambda $ denotes the Lebesgue measure. If $\muu _{I}$ is absolutely continuous with respect to the Lebesgue measure, we say that $X$ has local time on $I$. The local time is defined as the Radon-Nikodym derivative of $\muu _{I}$
\betaegin{equation}gin{equation*}
L(I, x) = \varphirac{ d\muu _{I}}{d\lambdaambda } (x), \widehatskip0.5cm x\in \muathbb{R}.
\varepsilonnd{equation*}
We will use the notation
\betaegin{equation}gin{equation*}
L(\muathbf{t},x):= L([0,\muathbf{t}], x), \widehatskip0.5cm \muathbf{t}\in \muathbb{R}^{d} _{+}, x\in \muathbb{R}.
\varepsilonnd{equation*}
The local time satisfies the occupation time formula
\betaegin{equation}gin{equation}
\lambdaabel{oc}
\int_{I} f(X_{\muathbf{t}}) d\muathbf{t} = \int_{\muathbb{R}} f(y) L(I, y) dy
\varepsilonnd{equation}
for any Borel set $I$ in $T$ and for any measurable function $f: \muathbb{R} \tauo \muathbb{R}$.
\betaegin{equation}gin{prop}
Let $u(t,\muathbf{x}), t\gammaeq 0, \muathbf{x} \in \muathbb{R} ^{d}$ be the solution to (\ref{wave}) and assume (\ref{beta-1}) where $\betaegin{equation}ta $ is given by (\ref{beta1}). Then on each set $[a,b] \tauimes [A,B] \subset [0, \infty) \tauimes \muathbb{R} ^{d} $ the process $\lambdaeft( u(t,\muathbf{x}), t\gammaeq 0, \muathbf{x} \in \muathbb{R} ^{d}\right) $ admits a local time $\lambdaeft( L([a,b] \tauimes [A,B], y), y\in \muathbb{R}\right)$
which is square integrable with respect to $y$
\betaegin{equation}gin{equation*}
\muathbb{E} \int_{\muathbb{R}} L([a,b] \tauimes [A,B], y)^{2}dy <\infty \mubox{ a.s. }
\varepsilonnd{equation*}
\varepsilonnd{prop}
{\betaf Proof: }It is well known from \cite{B2} (see also Lemma 8.1 in \cite{Xiao1}) that, for a jointly measurable zero-mean stochastic process $X=(X(\muathbf{t}), \muathbf{t}\in[0,\muathbf{T}])$ ($\muathbf{T}$ belongs to $\muathbb{R} ^{d}$) with bounded variance, the condition
$$\int_{[0, \muathbf{T}]}\int_{[0, \muathbf{T}]}(\muathbb{E}[X(\muathbf{t})-X(\muathbf{s})]^{2})^{-1/2}d\muathbf{s} d\muathbf{t}<\infty$$
is sufficient for the local time of $X$ to exist on $[0,\muathbf{T}]$ almost surely and to be square integrable as a function of $y$. \\
According to the inequality (\ref{increm}), for all $I=[a,b]\tauimes [A,B]$ included in $ [0, \infty) \tauimes \muathbb{R} ^{d} $ we have,
$$\int_{I} \int_{I} (\muathbb{E} \lambdaeft[ u(t,\muathbf{x})-u(s, \muathbf{y}) \right]^{2} )^{-1/2}dtd\muathbf{x} ds d\muathbf{y} < C \int_I \int_I \lambdaeft( |t-s|^{2H+1-\betaegin{equation}ta }+ \vert \muathbf{x}-\muathbf{y}\vert ^{2H+1 -\betaegin{equation}ta } \right) ^{-\varphirac{1}{2}}dtd\muathbf{x} ds d\muathbf{y} $$
and this is finite for $\betaegin{equation}ta >2H-1$. Thus almost surely the local time of $u$ exists and is square integrable. \quaded
\betaegin{equation}gin{remark}
It follows as a consequence of Lemma 8.1 in \cite{Xiao1} that the local time of the solution $u$ admits the following $L^{2}$ representation
$$ L([a,b] \tauimes [A,B], x)=\varphirac{1}{2\pi} \int_{\muathbb{R}} dz e^{-izx} \int_{[a,b] \tauimes [A, B]} ds d\muathbf{y} e^{iu (s, \muathbf{y} ) z} $$
for every $x \in \muathbb{R}$.
\varepsilonnd{remark}
\subsection{Existence of the joint density for the solution in the Rosenblatt case}
It is possible to obtain the existence of the joint density of the random vector $ \lambdaeft( u(t,\muathbf{x}), u(s,\muathbf{y}) \right) $ with $s\nuot=t$ or $\muathbf{x}\nuot= \muathbf{y}$ in the case when the wave equation (\ref{wave}) is driven by a Hermite sheet of order $q=2$ (the Rosenblatt sheet). The result is based on a criterium for the existence of densities for vectors of multiple integrals which has recently been proven in \cite{NNP}.
Let us state our result.
\betaegin{equation}gin{prop}
Let $u(t,\muathbf{x}), t\gammaeq 0, \muathbf{x} \in \muathbb{R} ^{d}$ be the mild solution to (\ref{wave}). Then for every $(t, \muathbf{x}) \nuot= (s, \muathbf{y})$, $(t, \muathbf{x}), (s, \muathbf{y} ) \in (0, \infty)\tauimes \muathbb{R} ^{d}$, the random vector
$$\lambdaeft( u(t,\muathbf{x}), u(s, \muathbf{y}) \right) $$
admits a density.
\varepsilonnd{prop}
{\betaf Proof: } Note that for every $t\gammaeq 0$ and $\muathbf{x} \in \muathbb{R} ^{d}$, the random variable $u(t, \muathbf{x})$ is a multiple integral of order 2 with respect to the $d$-parametric Brownian sheet. A result present in \cite{NNP} states that a two-dimensional vector of multiple integrals of order 2 admits a density if and only if the determinant of the covariance matrix is strictly positive. Denote by $C(t,s, \muathbf{x}, \muathbf{y})$ the covariance matrix of $\lambdaeft( u(t,\muathbf{x}), u(s, \muathbf{y}) \right) $. The determinant of this matrix is the same for every $q\gammaeq 1$, from the covariance structure of the Hermite sheet. It is clear that for $q=1$ obviously $\deltaet C (t,s, \muathbf{x}, \muathbf{y})$ is strictly positive, since the vector $\lambdaeft( u(t,\muathbf{x}), u(s, \muathbf{y}) \right) $ is a Gaussian vector and hence admits a density when $(t, \muathbf{x}) \nuot= (s, \muathbf{y})$. This implies that $\deltaet C (t,s, \muathbf{x}, \muathbf{y})$ is also strictly positive for $q=2$ and so the vector $\lambdaeft( u(t,\muathbf{x}), u(s, \muathbf{y}) \right) $ admits a density also for $q=2$.
\quaded
\betaegin{equation}gin{thebibliography}{99}
\betaibitem{Abry1}
{Albin I.M.P. (1998):}
{\varepsilonm A note on the Rosenblatt distributions.}
{ Statistics and Probability Letters, 40(1), 83-91.}
\betaibitem{Abry2}
{Albin I.M.P. (1998): }
{\varepsilonm On extremal theory for self similar processes.}
{Annals of Probability, 26(2), 743-793. }
\betaibitem{ALP2002}
Ayache A., Leger S. \& Pontier M. (2002):
{\varepsilonm Drap brownien fractionnaire. (French) [The fractional Brownian sheet].}
Potential Anal. {\betaf 17}, no. 1, 3143.
\betaibitem{BT}
Balan R.M. \& Tudor C. A. (2010):
{\varepsilonm The stochastic wave equation with fractional noise: A random field approach.}
Stoch. Proc. Appl. {\betaf 120}, 2468-2494.
\betaibitem{BT10}
Bardet J.-M. \& Tudor C. A. (2010):
{\varepsilonm A wavelet analysis of the Rosenblatt process: chaos expansion and estimation of the self-similarity parameter}. Stochastic Process. Appl. {\betaf 120} 12, 2331-2362.
\betaibitem{B1}
{Berman S. M. (1973):}
{\varepsilonm Local nondeterminism and local times of Gaussian processes.}
Indiana. Univ. Math. J, 23, pp. 69-94.
\betaibitem{B2}
{Berman S. M. (1969):}
{\varepsilonm Local times and sample function properties of stationary Gaussian processes.}
Trans. Amer. Math. Soc. 137, 277 - 299.
\betaibitem{Breton11}
Breton J.-C. (2011):
{\varepsilonm On the rate of convergence in non-central asymptotics of the Hermite variations of fractional Brownian sheet.}
Prob. Math. Stats. {\betaf 31}(2), 301-311.
\betaibitem{Cencov}
{Cencov N.N. (1956):}
{\varepsilonm Wiener random fields depending on several parameters.} Dokl. Akad. Nauk SSSR 106, 607-609.
\betaibitem{CT12}
Clarke De la Cerda J. \& Tudor C. A. (2012):
{\varepsilonm Hitting times for the stochastic wave equation with fractional-colored noise}.
Revista Matematica Iberoamericana, to appear, http://arxiv.org/abs/1203.3921v1.
\betaibitem{CTV09}
Chronopoulou A., Tudor C. A. \& Viens F. G. (2009):
{\varepsilonm Variations and Hurst index estimation for a Rosenblatt process using longer filters}.
Electron. J. Stat. {\betaf 3}, 1393-1435.
\betaibitem{CTV11-Hp}
Chronopoulou A., Tudor C.A. \& Viens F. G. (2011):
{\varepsilonm Self-similarity parameter estimation and reproduction property for non-Gaussian Hermite processes}.
Commun. Stoch. Anal. {\betaf 5} 1, 161-185.
\betaibitem{DaSa1}
Dalang R. \& Sanz-Sol\'e M. (2010):
{\varepsilonm Criteria for hitting probabilities with applications to systems of stochastic wave equations.}
Bernoulli {\betaf 16}(4), 1343-1368.
\betaibitem{DoMa}
{Dobrushin R. L. \& Major P. (1979):}
{\varepsilonm Non-central limit theorems for non-linear functionals of Gaussian fields.}
{Zeitschrift f\"ur Wahrscheinlichkeitstheorie und verwandte Gebiete, {\betaf 50}, 27-52. }
\betaibitem{KP2009}
Kim Y. T. \& Park, H. S. (2009):
{\varepsilonm Stratonovich calculus with respect to fractional Brownian sheet.}
Stoch. Anal. Appl. {\betaf 27}, no. 5, 962-983.
\betaibitem{MT07}
Maejima M. \& Tudor C. A. (2007):
{\varepsilonm Wiener Integrals with respect to the Hermite process and a Non-Central Limit Theorem.}
Stoch. Anal. Appl. {\betaf 25} 5, 1043-1056.
\betaibitem{NNP}
{Nourdin I., Nualart D. \& Poly G. (2012): }
{\varepsilonm Absolute continuity and convergence of densities for random vectors on Wiener chaos.} arXiv:1207.5115.
\betaibitem{N}
{Nualart D. (1995):}
{\varepsilonm Malliavin Calculus and Related Topics.}
{Springer.}
\betaibitem{PiTa1}
{Pipiras V. \& Taqqu M. S. (2001):}
{\varepsilonm Integration questions related to the fractional Brownian motion.}
{Probability Theory and Related Fields, {\betaf 118}, 251-281. }
\betaibitem{PT10-Hp}
Pipiras V. \& Taqqu M. S. (2010):
{\varepsilonm Regularization and integral representations of Hermite processes}.
Statist. Probab. Lett. {\betaf 80} 23-24, 2014-2023.
\betaibitem{RST10}
R\'eveillac A., Stauch M. \& Tudor C. A. (2011):
{\varepsilonm Hermite variations of the fractional Brownian sheet.}
Stochastics and Dynamics, 12 (3), 21pp.
\betaibitem{Rose}
{Rosenblatt M. (1960): }
{\varepsilonm Independence and dependence. }
{Proceedings of the 4th Berkeley Symposium on Mathematical Statistics}, Vol. II, 1960, 431-443.
\betaibitem{ST2008}
Sottinen T. \& Tudor C. A. (2008):
{\varepsilonm Parameter estimation for stochastic equations with additive fractional Brownian sheet.}
Stat. Inference Stoch. Process. {\betaf 11}, no. 3, 221-236.
\betaibitem{Taqqu75}
{Taqqu M. S. (1975): }
{\varepsilonm Weak convergence to the fractional
Brownian motion and to the Rosenblatt process. }
{Zeitschrift f\"ur Wahrscheinlichkeitstheorie und verwandte Gebiete. {\betaf 31}, 287-302. }
\betaibitem{Taqqu79}
{Taqqu M. S. (1979):}
{\varepsilonm Convergence of integrated processes of
arbitrary Hermite rank. }
{Zeitschrift f\"ur Wahrscheinlichkeitstheorie und verwandte Gebiete, {\betaf 50} 53-83. }
\betaibitem{treves75}
Treves F. (1975):
{\varepsilonm Basic Linear Partial Differential Equations}.
Academic Press, New York.
\betaibitem{Tudor08}
Tudor C. A. (2008):
{\varepsilonm Analysis of the Rosenblatt process}.
ESAIM Probab. Stat. {\betaf 12}, 230-257.
\betaibitem{TV1}
{Tudor C. A. \& Viens F. G. (2003):}
{\varepsilonm It\^o Formula and Local Time for the Fractional Brownian Sheet.}
Electronic Journal of Probability. {\betaf 8}(14), 1-31.
\betaibitem{Xiao1}
{Xiao Y. (2009): }
{\varepsilonm Sample path properties of anisotropic Gaussian random fields: A minicourse on stochastic partial differential equations.} Lecture Notes in Math. 1962, 145-212, Springer, Berlin.
\varepsilonnd{thebibliography}
\varepsilonnd{document}
|
\begin{document}
\maketitle
\begin{abstract}
We consider random walks on the set of all words over a finite alphabet such
that in each step only the last two letters of the current word may be modified
and only one letter may be adjoined or deleted. We assume that the transition
probabilities depend only on the last two letters of the current
word. Furthermore, we consider also the special case of random walks on free
products by amalgamation of finite
groups which arise in a natural way from random walks on the
single factors. The aim of this paper is to compute several equivalent formulas
for the rate of escape with respect to natural length functions for these random walks using different techniques.
\end{abstract}
\section{Introduction}
Let $A$ be a finite alphabet and let $A^\ast$ be the set of all finite words
over the alphabet $A$, where $\varepsilon$ is the empty
word. Furthermore, let $l:A \to [0;\infty)$ be a function representing a
`letter length'. The extension of $l$ to $A^\ast$ defined by $l(a_1\dots
a_n)=\sum_{i=1}^n l(a_i)$ gives then a suitable 'word length'.
We consider a transient Markov chain $(X_n)_{n\in\mathbb{N}_0}$ on $A^\ast$
with $X_0=\varepsilon$ such that transition probabilities depend only on the
last two letters of the actual word and in each step only the last two letters
may be modified and only one letter may be
adjoined or deleted. We are interested in whether
the sequence of random variables $l(X_n)/n$ converges almost surely to a
constant, and if so, to compute this constant. If the limit exists, it is called the
\textit{rate of escape}, or the \textit{drift with respect to $l$}. In this
paper, we study this question for random walks on regular languages and on free
products by amalgamation of finite groups, which form
special cases of regular languages and are a generalization of free
products of groups.
\par
It is well-known that the rate of escape w.r.t. the natural word length exists
for a random walk on a finitely generated group, which is governed by a
probability measure on the group. This follows from \textit{Kingman's subadditive
ergodic theorem}; see Kingman \cite{kingman}, Derriennic \cite{derrienic} and
Guivarc'h \cite{guivarch}.
There
are many detailed results for random walks on free products by
amalgamation:
Picardello and Woess \cite{picardello-woess} showed that a locally compact free
product by amalgamation of compact groups acts naturally on a tree. They also derived the
behaviour of the $n$-step transition probabilities.
Cartwright and Soardi \cite{cartwright-soardi} investigated random walks on
free products by amalgamation, where the amalgamating subgroup is finite and normal. They derived a
formula for the Green function $G(z)=\sum_{n\geq 0} p^{(n)}(e,e)z^n$, where
$p^{(n)}(e,e)$ is the $n$-step return probability from the identity $e$, of the
random walk on the amalgamated product in terms of
the Green functions of the single factors that is essentially the same as in
Woess \cite{woess3}. For random walks on
free products of finite groups Mairesse and Math\'eus \cite{mairesse1} have
developed a specific technique for the computation of the rate of
escape. For this purpose, they have to solve a more elegant system of algebraic equations than we have to solve, but
our results will be more general.
Three different formulas for the rate of escape of random walks on free
products of graphs and groups are derived in Gilch \cite{gilch}. The techniques used in \cite{gilch} were the
starting point for the computation of the rate of escape in this paper.
An important link
between drifts and harmonic analysis was obtained by Varopoulos
\cite{varopoulos}. He proved that for symmetric finite range random walks on groups the existence of non-trivial bounded
harmonic functions is equivalent to a non-zero rate of escape. The recent work
of Karlsson and Ledrappier \cite{karlsson-ledrappier07} generalizes this result to random walks with finite first moment of the step lengths. This leads to a
link between the rate of escape and the entropy of random walks; compare
e.g. with Kaimanovich and Vershik \cite{kaimanovich-vershik} and Erschler
\cite{erschler2}.
\par
We also consider random walks on regular languages which can be seen as a generalization
of free products by amalgamation. Random Walks on this class of structures have been
investigated by several authors:
\mbox{Malyshev \cite{malyshev},} \cite{malyshev-inria} and Gairat, Malyshev, Menshikov, Pelikh \cite{malyshev2}
stated criteria for transience, null-recurrence and positive
recurrence. More\-over, \mbox{Malyshev} proved limit theorems concerning existence
of the stationary distribution and speed in the transient case and convergence
of conditional distributions in the ergodic case; in particular, he
showed that the rate of escape w.r.t. the natural word length (that is, $l(\cdot)=1$) is constant and it is strictly
positive if and only if the random walk is transient. Yambartsev and Zamyatin
\cite{yambartsev-zamyatin} proved limit theorems for random walks on two
semi-infinite strings over a finite alphabet.
Lalley \cite{lalley} also investigated random walks on regular languages. He found out that the $n$-step return probabilities must obey one of three
different types of power laws. His analysis is based on a finite algebraic
system of generating functions related to the Green \mbox{function.} This
algebraic system is also used in this paper to compute explicit formulas for
the rate of escape.
The rate of escape has also been studied on trees, which may be seen as a
special case of our context:
Nagnibeda and Woess \cite[Section 5]{woess2} proved that the
rate of escape of random walks on trees with finitely many cone types is
non-zero and give a formula for it. One of the techniques used in this paper
for the computation of the rate of escape was motivated by Nagnibeda and Woess.
\par
Our aim is to compute formulas for the rate of escape of random walks on
regular languages and free products by amalgamation of finite groups. In Section \ref{reglanguages}
we compute the rate of escape of random walks on regular languages, while in
Section \ref{amalgams} we compute it for random walks on free products by amalgamation. In Section
\ref{exit-time} we compute the rate of escape analogously to Section
\ref{rate-of-escape} and in Section \ref{dgf} we compute it by an application of a theorem
of Sawyer and Steger \cite{sawyer}. In Section \ref{limitprocesses} we use the
algebraic group structure of free products by amalgamation to compute the rate of escape with
respect to the natural word length. This
approach is based on a technique
which was already used by Ledrappier \cite{ledrappier} and Furstenberg
\cite{furstenberg}. Finally,
in Section \ref{sample} we give sample computations.
\section{Rate of Escape of Random Walks on Regular Languages}
\label{reglanguages}
\subsection{Regular Languages and Random Walks}
Let $A$ be a finite alphabet and $\varepsilon$ be the empty word. A \textit{random walk on a regular language} is a Markov
chain on the set $A^\ast:=\bigcup_{n\geq 1} A^n\cup \{\varepsilon\}$ of all
finite words over the alphabet $A$, whose transition probabilities obey the
following rules:
\begin{enumerate}
\item[(i)] Only the last two letters of the current word may be modified.
\item[(ii)] Only one letter may be adjoined or deleted at one instant of time.
\item[(iii)] Adjunction and deletion may only be done at the end of the current word.
\item[(iv)] Probabilities of modification, adjunction or deletion depend only on the
last two letters of the current word.
\end{enumerate}
Compare with Lalley \cite{lalley}. The hypothesis that transition probabilities
depend only on the last two letters of the current word can be weakened to
dependence of the last $K\geq 2$ letters by a ``recoding trick'', which is also
described by Lalley. In general, a regular language is a subset of $A^\ast$
whose words are accepted by a finite-state automaton. It is necessary that by
each modification of a word of the regular language in one single step a new
word of the regular language is created. The results below, however, are so
general such that w.l.o.g. \mbox{-- for ease } and better readability -- we may
assume that the regular language consists of the whole \mbox{set $A^\ast$.}
\par
The random walk on $A^\ast$ is described by the sequence of random variables
$(X_n)_{n\in\mathbb{N}_0}$. Initially, we have $X_0=\varepsilon$.
For two words $w,w'\in A^\ast$ we write $ww'$ for the concatenated word.
We use the following abbreviations for the transition probabilities: for $w\in
A^\ast$, $a,a',b\in A$, $b',c'\in A\cup \{\varepsilon\}$, $n\in\mathbb{N}_0$, let be
$$
\begin{array}{c}
\mathbb{P}[X_{n+1}=wa'b'c'\mid X_n=wab] = p(ab,a'b'c'),\\[0.5ex]
\mathbb{P}[X_{n+1}=b'c'\mid X_n=a] = p(a,b'c'),\\[0.5ex]
\mathbb{P}[X_{n+1}=b'\mid X_n=\varepsilon] = p(\varepsilon,b').
\end{array}
$$
If we want to start the random walk at $w\in A^\ast$ instead of $\varepsilon$,
we write for short $\mathbb{P}_w[\,\cdot\,]:=\mathbb{P}[\,\cdot \mid X_0=w]$.
Suppose
we are given a function $l:A\to [0;\infty)$. We extend $l$ to $A^\ast$ by defining
$l(a_1a_2\dots a_n):=\sum_{i=1}^n l(a_i)$ for \mbox{$a_1a_2\dots a_n\in A^n$.} Additionally, we set $l(\varepsilon):=0$. If $l(a)=1$ for each $a\in A$,
then $l$ is just the \textit{natural word length} which is denoted by $|\cdot
|$. If there is a non-negative constant $\ell$ such that
$$
\lim_{n\to\infty}\frac{l(X_n)}{n} = \ell \quad \textrm{ almost surely,}
$$
then $\ell$ is called the \textit{rate of escape} with respect to $l$.
Malyshev \cite{malyshev} proved that the rate of escape w.r.t. the natural word length
exists. Furthermore, by Malyshev follows that the rate of escape w.r.t. $l$ is zero if and only if $(X_n)_{n\in\mathbb{N}_0}$ is recurrent. Our aim is
to compute a formula for $\ell$ in the transient case. Therefore, we assume
from now on transience of $(X_n)_{n\in\mathbb{N}_0}$.
\par
Moreover, we assume that the random walk on $A^\ast$ is \textit{suffix-irreducible},
that is, for all $w\in A^\ast$ with $\mathbb{P}[X_m=w]>0$ for some $m\in\mathbb{N}$ and for
all $ab\in A^2$ there is some $n\in\mathbb{N}$ such that
$$
\mathbb{P}\Bigl[ \exists w_1\in A^\ast : X_n=ww_1ab, \forall k<n: |X_k|\geq |w| \,\Bigl|\, X_0=w\Bigr]>0.
$$
If suffix-irreducibility is dropped, then the rate of escape may be
non-deterministic; e.g., if $A=\{a,b\}$ with $l(a)=l(b)=1$ and
$p(aa,aaa)=p>1/2$, $p(aa,a)=p(a,\varepsilon)=1-p$,
$p(\varepsilon,a)=p(\varepsilon,b)=1/2$,
$p(bb,bbb)=q>1/2$, $p(bb,b)=p(b,\varepsilon)=1-q$ with $p\neq
q$, then $l(X_n)/n$ converges only non-deterministically.
\subsection{The Rate of Escape}
\label{rate-of-escape}
The technique we use to compute $\ell$ was motivated by Nagnibeda and Woess
\cite[Section 5]{woess2}. For $k\in\mathbb{N}_0$ we define the \textit{$k$-th exit time} as
$$
\mathbf{e}_k:=\sup \bigl\lbrace m\in\mathbb{N}_0 \,\bigl|\, |X_m|=k\bigr\rbrace.
$$
As the alphabet $A$ is finite
and the random walk on $A^\ast$ is assumed to be transient, we have
$\mathbf{e}_k<\infty$ almost surely for every $k\in\mathbb{N}_0$. Furthermore, we write $\mathbf{W}_k:=X_{\mathbf{e}_k}$ and
$\mathbf{i}_k:=\mathbf{e}_k-\mathbf{e}_{k-1}$ with $\mathbf{e}_{-1}:=0$. We
show at first that $(\mathbf{W}_k,\mathbf{i}_k)_{k\geq 3}$ is a Markov chain. For this purpose, we
introduce some useful functions: for $a,b,c\in A$ and real $z>0$ define
\begin{eqnarray*}
H(ab,c|z) & := & \sum_{n=1}^\infty \mathbb{P}_{ab}\bigl[X_n=c,\forall m<n:
|X_m|>1\bigr]\, z^n,\\
\xi(abc) & :=& \sum_{a'b'c'\in A^3} p(bc,a'b'c')\cdot \Bigl(1- \sum_{d\in A} H(b'c',d|1)\Bigr).
\end{eqnarray*}
Observe that
$$
\mathbb{P}_{abc}\bigl[ X_n=ab', \forall m<n:|X_m|>2\bigr]
= \mathbb{P}_{bc}\bigl[ X_n=b',\forall m<n:|X_m|>1\bigr],
$$
as the transition probabilities depend only on the last two letters of the
current word and in each step only one letter may be deleted. Thus, the number $\xi(abc)$ is the probability of starting at $abc\in A^3$ such that
$|X_n|\geq 4$ for all $n\geq 1$, and it does not depend on the letter
``$a$''. Furthermore, let be $[a_1\dots a_n]_3:=a_{n-2}a_{n-1}a_n$, if
$a_1\dots a_n\in A^\ast$ with $n\geq 3$.
With this notation we get:
\begin{Prop}\label{prop-e_k}
The stochastic process $(\mathbf{W}_k,\mathbf{i}_k)_{k\geq 3}$ is a Markov chain with transition probabilities
\begin{eqnarray*}
&& \mathbb{P}\bigl[\mathbf{W}_{k+1}=x_{k+1},\mathbf{i}_{k+1}=n_{k+1}\, \bigl|\,
\mathbf{W}_k=x_k,\mathbf{i}_k=n_k\bigr] \\[2ex]
& = & \frac{\xi([x_{k+1}]_3)}{\xi([x_{k}]_3)}\cdot \mathbb{P}_{x_k}\bigl[X_{n_{k+1}}=x_{k+1}, \forall i\in\{1,\dots,n_{k+1}\}: |X_i|>k\bigr]
\end{eqnarray*}
for $n_k,n_{k+1}\in\mathbb{N}$, $x_k,x_{k+1}\in A^\ast$ with $|x_k|=k$,
$|x_{k+1}|=k+1$ and \mbox{$\mathbb{P}\bigl[\mathbf{W}_k=x_k,\mathbf{i}_k=n_k\bigr]>0$.}
\end{Prop}
\begin{proof}
Let be $n_0,n_1,\dots,n_{k+1}\in\mathbb{N}$ and $x_0,x_1,\dots,x_{k+1}\in A^\ast$ with
$|x_j|=j$ for \mbox{$j\in\{0,1,\dots,k+1\}$.} Define the event
$$
\bigl[\mathbf{W}_0^m=x_0^m,\mathbf{i}_0^m=n_0^m\bigr] :=
\bigl[\forall j\in\{0,1,\dots,m\}:\mathbf{W}_j=x_j,\mathbf{i}_j=n_j\bigr],
$$
where $m\in\{k,k+1\}$. With this notation we get
\begin{eqnarray*}
\mathbb{P}\bigl[\mathbf{W}_0^k=x_0^k,\mathbf{i}_0^k=n_0^k\bigr]
&=& \mathbb{P}\left[
\begin{array}{c}
\forall j\in\{0,\dots,k\} \, \forall \lambda\in\{0,\dots,n_j\}:\\
|X_{n_1+\dots+n_{j-1}+\lambda}|\geq j, X_{n_1+\dots+n_{j}}= x_j
\end{array}
\right]
\cdot \mathbb{P}_{x_k}\bigl[\forall n\geq 1: |X_n|>k\bigr]\\
&=& \mathbb{P}\left[
\begin{array}{c}
\forall j\in\{0,\dots,k\} \, \forall \lambda\in\{0,\dots,n_j\}:\\
|X_{n_1+\dots+n_{j-1}+\lambda}|\geq j, X_{n_1+\dots+n_{j}}= x_j
\end{array}
\right]
\cdot \xi([x_k]_3).
\end{eqnarray*}
Analogously,
\begin{eqnarray*}
&&\mathbb{P}\bigl[\mathbf{W}_0^{k+1}=x_0^{k+1},\mathbf{i}_0^{k+1}=n_0^{k+1}\bigr]\\
&=& \mathbb{P}\left[
\begin{array}{c}
\forall j\in\{0,\dots,k\} \, \forall \lambda\in\{0,\dots,n_j\}:\\
|X_{n_1+\dots+n_{j-1}+\lambda}|\geq j, X_{n_1+\dots+n_{j}}= x_j
\end{array}
\right]\\[1ex]
&&\
\cdot \mathbb{P}_{x_k}\bigl[\forall i\in\{1,\dots,n_{k+1}\}:|X_i|>k,X_{n_{k+1}}=x_{k+1}\bigr]
\cdot \xi([x_{k+1}]_3).
\end{eqnarray*}
Thus, under the assumption that
$\mathbb{P}\bigl[\mathbf{W}_0^k=x_0^k,\mathbf{i}_0^k=n_0^k\bigr]>0$ we obtain
\begin{eqnarray*}
&&\mathbb{P}\bigl[\mathbf{W}_0^{k+1}=x_0^{k+1},\mathbf{i}_0^{k+1}=n_0^{k+1}\, \bigl|\,
\mathbf{W}_0^k=x_0^k,\mathbf{i}_0^k=n_0^k\bigr] \\
&=&
\frac{\xi([x_{k+1}]_3)}{\xi([x_{k}]_3)}\cdot
\mathbb{P}_{x_k}\bigl[\forall i\in\{1,\dots,n_{k+1}\}:|X_i|>k,X_{n_{k+1}}=x_{k+1}\bigr].
\end{eqnarray*}
\end{proof}
Observe that $\mathbb{P}_{x_k}\bigl[\forall
i\in\{1,\dots,n_{k+1}\}:|X_i|>k,X_{n_{k+1}}=x_{k+1}\bigr]$ depends only on
$n_{k+1}$, $[x_k]_3$ and $[x_{k+1}]_3$. We use this observation to construct a new
Markov chain on the state space
$$
\mathcal{Z} := \bigl\lbrace
(abc,n)\in \overline{A}^3\times \mathbb{N} \, \bigl| \, \exists de\in A^2:
\mathbb{P}_{de}[X_n=abc, \forall m\in\{1,\dots,n\}:|X_m|>2]
\bigr\rbrace,
$$
where $\overline{A}^3 :=\{abc \in A^3 \mid \xi(abc)>0\}$
with the following transition probabilities:
$$
q\bigl( (abc,n),(a'b'c',n')\bigr) =
\frac{\xi(a'b'c')}{\xi(abc)}\cdot
\mathbb{P}_{abc}\bigl[ X_n=aa'b'c', \forall i\in\{1,\dots,n'\}: |X_i|\geq 4\bigr].
$$
Observe that
$$
\mathbb{P}\bigl[\mathbf{W}_{k+1}=x_{k+1},\mathbf{i}_{k+1}=n_{k+1}\, \bigl|\,
\mathbf{W}_k=x_k,\mathbf{i}_k=n_k\bigr] = q\bigl(([x_k]_3,n_k),([x_{k+1}]_3,n_{k+1})\bigr)
$$
for $k\geq 3$ and that the transition probabilities do not depend on
$n_k$. This provides that also $\bigl( [\mathbf{W}_k]_3\bigr)_{k\geq 3}$ is a
Markov chain on $\overline{A}^3$ with transition probabilities
$$
\tilde{q}(abc,a'b'c') =
\sum_{n'\in\mathbb{N}} q\bigl( (abc,n_{abc}),(a'b'c',n')\bigr),
$$
where the $n_{abc}$'s on the right hand side of the equation may be chosen
arbitrarily. Observe that $[\mathbf{W}_k]_3$ may only take a finite number of states,
since the alphabet $A$ is finite and $|[\mathbf{W}_k]_3| = 3$. At this point we
need the above made assumption of suffix-irreducibility; this provides that
$\bigl([\mathbf{W}_k]_3\bigr)_{k\geq 3}$ is irreducible and therefore has an invariant probability measure
$\nu$.
\begin{Lemma}
Let be $abc\in A^3$ and $n\in\mathbb{N}$ and define
$$
\pi(abc,n):=\sum_{def \in \overline{A}^3} \nu(def)\, q\bigl((def,n_{def}),(abc,n)\bigr),
$$
where $n_{def}$ can be chosen arbitrarily. Then $\pi$ is the unique invariant probability
measure of $\bigl([\mathbf{W}_k]_3,\mathbf{i}_k\bigr)_{k\geq 3}$.
\end{Lemma}
\begin{proof}
It is a straightforward computation to prove the lemma:
\begin{eqnarray*}
&& \sum_{(ghi,s)\in\mathcal{Z}} \pi(ghi,s)\ q\bigl((ghi,s),(abc,n)\bigr)\\
&=& \sum_{(ghi,s)\in\mathcal{Z}} \sum_{def\in \overline{A}^3} \nu(def)\
q\bigl((def,n_{def}),(ghi,s)\bigr) \ q\bigl((ghi,s),(abc,n)\bigr) \\
&=& \sum_{ghi\in \overline{A}^3} q\bigl((ghi,n_{ghi}),(abc,n)\bigr) \sum_{def\in \overline{A}^3}
\nu(def) \sum_{s\in\mathbb{N}} q\bigl((def,n_{def}),(ghi,s)\bigr)\\
&=& \sum_{ghi\in \overline{A}^3} q\bigl((ghi,n_{ghi}),(abc,n)\bigr)\ \nu(ghi)
= \pi(abc,n).
\end{eqnarray*}
\end{proof}
Define $g:\mathcal{Z}\to\mathbb{N}: (abc,n) \mapsto n$. An application of the
\textit{ergodic theorem for positive recurrent Markov chains} yields
\begin{equation}\label{integral}
\frac{1}{k} \sum_{i=3}^k g\bigl([\mathbf{W}_k]_3,\mathbf{i}_k\bigr) = \frac{\mathbf{e}_k-
\mathbf{e}_2}{k}\ \xrightarrow{k\to\infty}\ \int g(abc,n)\,d\pi \quad
\textrm{almost surely},
\end{equation}
if the integral exists. Our next aim is to ensure finiteness of this integal
and to compute a formula for it. For this purpose, we define
\begin{eqnarray*}
\overline{G}(ab,cd|z) &:=& \sum_{n=0}^\infty \mathbb{P}_{ab}\bigl[ X_n=cd,\forall
m\leq n:|X_m|\geq 2\bigr]\, z^n,\\
\mathcal{K}(ab,cde|z) & :=& \sum_{n=1}^\infty \mathbb{P}_{ab}\bigl[ X_n=cde, \forall
m \in\{1,\dots,n\}:|X_m|\geq 3\bigr]\, z^n\\
&=& \sum_{fg\in A^2} p(ab,cfg)\cdot z \cdot \overline{G}(fg,de|z),
\end{eqnarray*}
where $a,b,c,d,e\in A$ and $z>0$.
We have the following linear system of equations:
\begin{eqnarray}\label{g-system}
\overline{G}(ab,cd|z) &=& \delta_{ab}(cd) + \sum_{c'd'\in A^2}
p(ab,c'd') \cdot z\cdot \overline{G}(c'd',cd|z) +\nonumber\\
&& \ + \sum_{c'd'e'\in A^3} p(ab,c'd'e')\cdot
z\cdot \sum_{f'\in A} H(d'e',f'|z)\cdot \overline{G}(c'f',cd|z).
\end{eqnarray}
Moreover, we also have the following finite system of equations:
\begin{eqnarray}\label{h-equations}
H(ab,c|z) &=& p(ab,c)\cdot z + \sum_{de\in
A^2} p(ab,de)\cdot z\cdot H(de,c|z)\nonumber\\
&& \ + \sum_{def\in A^3} p(ab,def)\cdot z \cdot \sum_{g\in A} H(ef,g|z)\cdot H(dg,c|z);
\end{eqnarray}
compare with Lalley \cite{lalley}.
The system (\ref{h-equations}) consists of equations of quadratic order, and
thus the functions $H(\cdot,\cdot|z)$ are algebraic, if the transition
probabilities are algebraic. If one
has solved this system, then the linear system of equations (\ref{g-system})
can be solved easily. In particular, the functions
$\overline{G}(\cdot,\cdot|z)$ are also algebraic for algebraic transition probabilities. Observe that we can write
$$
\tilde q(abc,a'b'c') = \frac{\xi(a'b'c')}{\xi(abc)}\ \mathcal{K}(bc,a'b'c'|1),
$$
providing $\nu$ can be computed if (\ref{h-equations}) can be solved. Turning back to our integral in (\ref{integral}) we can
now compute:
\begin{Prop}\label{exit-th}
We have $\lim_{k\to\infty} \mathbf{e}_k/k=\Lambda$ almost surely, where
\begin{eqnarray*}
\Lambda:=
\sum_{abc,def\in \overline{A}^3} \nu(def)\cdot \frac{\xi(abc)}{\xi(def)}\cdot
\frac{\partial}{\partial z}\biggl[ \sum_{gh\in A^2} p(ef,agh)\cdot z \cdot \overline{G}(gh,bc|z)\biggr]\Biggl|_{z=1}.
\end{eqnarray*}
\end{Prop}
\begin{proof}
We compute straight-forward:
\begin{eqnarray*}
&& \int g(abc,n)\,d\pi\\
&=& \sum_{(abc,n)\in\mathcal{Z}} n\cdot \sum_{def\in \overline{A}^3} \nu(def)\cdot q\bigl(
(def,n_{def}),(abc,n)\bigr)\\
&=& \sum_{def\in \overline{A}^3} \nu(def) \sum_{(abc,n)\in\mathcal{Z}} n\cdot
\frac{\xi(abc)}{\xi(def)}\cdot \mathbb{P}_{def}\bigl[ X_n=dabc, \forall
m\in\{1,\dots,n\}:|X_m|\geq 4\bigr]\\
&=& \sum_{abc,def\in \overline{A}^3} \nu(def)\cdot \frac{\xi(abc)}{\xi(def)}\cdot
\sum_{n\in\mathbb{N}} n\cdot \mathbb{P}_{def}\bigl[X_n=dabc, \forall
m\in\{1,\dots,n\}:|X_m|\geq 4\bigr]\\
&=& \sum_{abc,def\in \overline{A}^3} \nu(def)\cdot \frac{\xi(abc)}{\xi(def)}\cdot
\frac{\partial}{\partial z}\bigl[\mathcal{K}(ef,abc|z) \bigr]\Bigl|_{z=1}.
\end{eqnarray*}
Finiteness of the integal is ensured if all functions $H(\cdot,\cdot |z)$ and
$\overline{G}(\cdot,\cdot|z)$ have radii of convergence bigger than $1$. But
this follows from Lalley \cite{lalley}: he proved that the Green functions of
random walks on regular languages have radii of convergence bigger \mbox{than $1$.}
\end{proof}
Now we can state an explicit formula for the rate of escape:
\begin{Th}
\label{reglang-roe}
There is some non-negative constant $\ell$ such that
$$
\ell = \lim_{n\to\infty} \frac{l(X_n)}{n} = \frac{\Delta}{\Lambda}>0 \quad
\textrm{almost surely,}
$$
where
$$
\Delta := \sum_{abc,def\in \overline{A}^3} \nu(def)\,l(a)\,\frac{\xi(abc)}{\xi(def)} \mathcal{K}(ef,abc|1).
$$
In particular, $\lim_{n\to\infty} |X_n|/n = 1/\Lambda$ almost surely.
\end{Th}
\begin{proof}
With $h:\mathcal{Z}\to\mathbb{N}$ defined by $h(abc,n):=l(a)$ we obtain
$$
\frac{1}{n}\sum_{k=3}^n h\bigl([\mathbf{W}_k]_3,\mathbf{i}_k\bigr)
\xrightarrow{n\to\infty} \int h\,d\pi = \lim_{m\to\infty} \frac{l(X_{\mathbf{e}_m})}{m}.
$$
Simple computations lead to the following formula for this limit:
$$
\Delta := \int h\,d\pi = \sum_{abc,def\in \overline{A}^3} \nu(def)\cdot l(a)\cdot \frac{\xi(abc)}{\xi(def)}\cdot \mathcal{K}(ef,abc|1).
$$
Defining \mbox{$\mathbf{k}(n):=\max\{k\in\mathbb{N}_0 \mid \mathbf{e}_k\leq n\}$} we
obtain analogously to Nagnibeda and Woess \cite[Proof of Theorem D]{woess2}
$$
\ell = \lim_{n\to\infty} \frac{l(X_n)}{n} = \lim_{n\to\infty} \frac{l(X_{\mathbf{e}_{\mathbf{k}(n)}})}{\mathbf{k}(n)}\frac{\mathbf{k}(n)}{\mathbf{e}_{\mathbf{k}(n)}}
= \frac{\Delta}{\Lambda}>0.
$$
\end{proof}
Observe that for algebraic transition probabilities the rate of escape is
obtained by solving the algebraic system of equations (\ref{h-equations}). This yields that the rate
of escape is also algebraic, if the transition probabilities are algebraic and
$l(\cdot)$ takes only algebraic values.
\section{Rate of Escape of Random Walks on Free Products by Amalgamation}
\label{amalgams}
In this section we compute three formulas for the rate of escape of random
walks on free products by amalgamation of finite groups. This class of
structures form special cases of regular languages.
\subsection{Free Products by Amalgamation}
Let be $2\leq r\in\mathbb{N}$. Consider finite groups
$\Gamma_1,\dots,\Gamma_r$ with identities $e_1,\dots,e_r$ and subgroups $H_1\subset
\Gamma_1$, $\dots,H_r\subset \Gamma_r$. We assume that $H_1,\dots,H_r$ are
isomorphic, that is, there is a finite group $H$ such that
there are isomorphisms
$\varphi_1:H\to H_1,\dots,\varphi_r:H\to H_r$. Thus, we identify in the
following each $H_i$ with $H$. To explain the concept of free products
by amalgamation, we give at first a simple example: consider $\Gamma_1=\Gamma_2=\mathbb{Z}/d\mathbb{Z}$, $d\in\mathbb{N}$ even,
and the subgroup $H=\mathbb{Z}/2\mathbb{Z}$. Let $\Gamma_1$ be generated by an element $a$, and
$\Gamma_2$ by an element $b$.
The free product by amalgamation $\mathbb{Z}/d\mathbb{Z} \ast_{\mathbb{Z}/2\mathbb{Z}}
\mathbb{Z}/d\mathbb{Z}$ consists then of all finite words over the alphabet
$\{a,b\}$, where we have the relations $a^{d/2}=b^{d/2}$ and
$a^d=b^d=\varepsilon$. That is, any two words which can be deduced from each
other with these relations represent the same element. The relation $a^{d/2}=b^{d/2}$ means that the subgroup $\mathbb{Z}/2\mathbb{Z}$ in both copies of
$\mathbb{Z}/d\mathbb{Z}$ are identified. E.g., for $d=4$ it is $a^3bab^2=ab^3a^3=aba$.
To help visualize the concept of free products
by amalgamation, we may also think of the Cayley graphs $X_i$ of $\Gamma_i$. We
connect the graphs $X_i$ by identifying the subgroups $H=H_i$; at each
non-trivial coset of $H$ in all graphs $X_i$ we attach copies of $X_j$, $j\neq i$,
where the coset is identified with $H$ of the copy of $X_j$. This construction
is then iterated.
\par
We explain below free products by amalgamation in more detail. The quotient $\Gamma_i / H$ consists of all sets of sets $yH=\{yh \mid h\in H\}$, where $y\in\Gamma_i$. We fix
representatives $x_{i,1}=e_i,x_{i,2},\dots,x_{i,n_i}$ for the elements of
$\Gamma_i / H$, that is, for each $y\in\Gamma_i$ there is a unique
$x_{i,k}$ with $y\in x_{i,k}H$. We write $\Gamma_i^\times :=\Gamma_i\setminus H$ and $R_i:=\{x_{i,2},\dots,x_{i,n_i}\}$
with $n_i=[\Gamma_i:H]$. For any element $x\in\bigcup_{i=1}^r \Gamma_i$ we set
$\tau(x):=i$, if $x\in\Gamma_i^\times$, and $\tau(x):=0$, if $x\in H$.
\par
The free product of $\Gamma_1,\dots,\Gamma_r$
by amalgamation with respect to $H$ is given by
$$
\Gamma := \Gamma_1\ast_H \Gamma_2 \ast_H \dots \ast_H \Gamma_r,
$$
which consists of all finite words of the form
\begin{eqnarray}\label{word-form}
x_1x_2\dots x_n h,
\end{eqnarray}
where $h\in H$, $n\in\mathbb{N}_0$ and $x_1,\dots,x_n\in \bigcup_{i=1}^r R_i$ such that $\tau(x_i)\neq
\tau(x_{i+1})$. In the following we will always use this representation of
words. Suppose we are given a function \mbox{$l:\bigcup_{i=1}^r R_i\to
[0;\infty)$.} Then we extend $l$ to a length function on $\Gamma$ by setting
$l(x_1\dots x_nh):=\sum_{i=1}^r l(x_i)$.
The \textit{natural word length} is defined to be $\Vert x_1\dots x_nh
\Vert:=n$. In particular, $l(h)=\Vert h\Vert=0$ for all $h\in H$.
For two words \mbox{$w_1=x_1x_2\dots x_m h$,} $w_2=y_1y_2\dots y_nh' \in\Gamma$
a group operation is defined in the following way: first, concatenate the two
words, then replace $hy_1$ in the middle by $y_1'h_1$ such that $y_1'$ is a
representative for the class of $hy_1$. Iterate the last step with $h_1y_2$ and
so on. Finally, we get a word of the form $x_1\dots x_ny_1'\dots y_n'h_n$ with
$h_n\in H$, that is, we get the requested equivalent
form (\ref{word-form}) for the concatenated word $w_1\circ w_2$. Note also that
$w^{-1}=h^{-1}x_m^{-1}\dots x_1^{-1}$ is the inverse of $w_1$ and can be
written in the form of (\ref{word-form}). The empty word $e$ is the identity of
this group operation. Observe that each $\Gamma_i$ is a subset of $\Gamma$.
\par
Suppose we are given probability measures $\mu_i$ on
$\Gamma_i$. Let $\alpha_1,\dots,\alpha_r$ be strictly positive real numbers such that
$\sum_{i=1}^r \alpha_i=1$. A probability measure on $\Gamma$ is given by
$$
\mu(x) :=
\begin{cases}
\alpha_{\tau(x)} \mu_{\tau(x)}(x), & \textrm{if } x\in
\bigcup_{i=1}^r\Gamma_i^\times\\
\sum_{i=1}^r \alpha_i \mu_i(x), & \textrm{if } x\in H\\
0, & \textrm{otherwise}
\end{cases}.
$$
The $n$-th convolution power of $\mu$ is denoted by $\mu^{(n)}$.
The random walk $(X_n)_{n\in\mathbb{N}_0}$ on $\Gamma$ is then governed by the transition
probabilities $p(w_1,w_2):=\mu(w_1^{-1}w_2)$, where $w_1,w_2\in\Gamma$. Initially, $X_0:=e$.
\begin{Lemma}
The random walk on $\Gamma$ is recurrent if and only if $r=2=[\Gamma_1:H]=[\Gamma_2:H]$.
\end{Lemma}
\begin{proof}
Assume $r=2=[\Gamma_1:H]=[\Gamma_2:H]$. This provides $H\unlhd
\Gamma_1,\Gamma_2$, that is, $(\Gamma_1 \ast \Gamma_2)/ H \simeq (\Gamma_1 /
H)\ast (\Gamma_2/H)$ and $\Gamma_1 / H \simeq \mathbb{Z}/ 2\mathbb{Z} \simeq
\Gamma_2 / H$. Since it is well-known that each random walk on the free product $(\mathbb{Z}/2\mathbb{Z}) \ast (\mathbb{Z}/ 2\mathbb{Z})$, which arises from
a convex combination of probability measures on the single factors, is
recurrent, the random walk on $\Gamma$ also must be recurrent.
\par
Assume now that $r=2=[\Gamma_1:H]=[\Gamma_2:H]$ does not hold. Then either
$r\geq 3$ or w.l.o.g. $[\Gamma_1:H]\geq 3$. In both cases, $\Gamma$ is
non-amenable (for further details see e.g. Woess \cite[Th.10.10]{woess}). With Woess
\cite[Cor.12.5]{woess} we get that the random walk on $\Gamma$ must be transient.
\end{proof}
From now on we exclude the case $r=2=[\Gamma_1:H_1]=[\Gamma_2:H_2]$.
In the following three subsections we want to compute three explicit formulas for the
rate of escape of our random walk on $\Gamma$. The first approach uses the
technique from the previous section, while the second approach arises from an
application of a theorem of Sawyer and Steger \cite{sawyer}. The third
technique uses the group
structure of $\Gamma$, but is restricted to the computation of the rate of
escape w.r.t. the natural word length.
\subsection{Exit Time Technique}
\label{exit-time}
We use the technique developped in Section \ref{rate-of-escape} to compute
$\ell$. Notice that $\Gamma$ is a special case of a regular language and our
random walk on $\Gamma$ fulfills the assumptions of our investigated random
walks on regular languages: starting from a word $x_1\dots x_nh\in \Gamma$ we
can only move in one step with positive probability to a word of the form
\begin{itemize}
\item $x_1\dots x_{n-1}x_n'h'$ with $x_n'h'\in \Gamma_{\tau(x_n)}^\times$,
namely with probability $\mu(h^{-1}x_n^{-1}x_n'h')$, or
\item $x_1\dots x_{n}x_{n+1}h'$ with $x_{n+1}h'\in \bigcup_{i=1,i\neq
\tau(x_n)}^r\Gamma_i^\times$, namely with probability \mbox{$\mu(h^{-1}x_{n+1}h')$,} or
\item $x_1\dots x_{n-1}h'$ with $h'\in H$, namely with probability $\mu(h^{-1}x_n^{-1}h')$,
\end{itemize}
where $x_1,\dots,x_{n+1},x_n'\in \bigcup_{i=1}^r R_i$ and $h,h'\in H$.
\par
We may now apply the technique of Section \ref{rate-of-escape} with some
slight modifications and simplifications. The exit-times are now given by
$$
\mathbf{e}_k:=\sup \bigl\lbrace m\in \mathbb{N}_0 \,\bigl|\, \Vert X_m\Vert=k \bigr\rbrace.
$$
Analogously, $\mathbf{W}_k:=X_{\mathbf{e}_k}$ and
$\mathbf{i}_k:=\mathbf{e}_k-\mathbf{e}_{k-1}$. We define for any $x,y\in R_i$, $i\in\{1,\dots,r\}$, $h,h'\in H$,
\begin{eqnarray*}
H(xh,h'|z) & := & \sum_{n=1}^\infty \mathbb{P}_{xh}\bigl[X_n=h',\forall m<n:
\Vert X_m\Vert \geq 1\bigr]\, z^n,\\
\xi(i) & :=& \sum_{gh_1\in \bigcup_{j=1,j\neq i}^r \Gamma_j^\times} \mu(gh_1)\cdot
\Bigl(1- \sum_{h_2\in H} H(gh_1,h_2|1)\Bigr)>0,\\
\overline{G}(xh,yh'|z) &:=& \sum_{n=0}^\infty \mathbb{P}_{xh}\bigl[ X_n=yh',\forall
m \leq n:\Vert X_m\Vert\geq 1\bigr]\, z^n.
\end{eqnarray*}
The functions $H(xh,h'|z)$ and $\overline{G}(xh,yh'|z)$ can be computed by
solving a finite system of non-linear equations; compare with (\ref{g-system}) and (\ref{h-equations}).
Analogously to Proposition \ref{prop-e_k}, it is easy to see that
$\bigl(\mathbf{W}_k,\mathbf{i}_k\bigr)_{k\in\mathbb{N}}$ is a Markov chain. The state
space $\mathcal{Z}$ can now be restricted to
$$
\mathcal{Z}_\Gamma := \biggl\lbrace (xh,n) \, \biggl|\, x \in \bigcup_{i=1}^r R_i, h\in H, n\in\mathbb{N}\biggr\rbrace.
$$
Define $[x_1\dots x_nh]:=x_nh$. Then $([\mathbf{W}_k])_{k\in\mathbb{N}}$ is also a
irreducible Markov chain on a finite state space with invariant probability
measure $\nu$. Thus, we get
$$
\Lambda = \sum_{\substack{xh,yh'\in\bigcup_{i=1}^r \Gamma_i^\times,\\ \tau(xh)\neq \tau(yh')}} \nu(yh')\cdot \frac{\xi\bigl(\tau(xh)\bigr)}{\xi\bigl(\tau(yh')\bigr)}\cdot
\frac{\partial}{\partial z}\biggl[ \sum_{w\in
\Gamma_{\tau(x)}^\times} p(yh',yh'w)\cdot z \cdot \overline{G}(h'w,xh|z)\biggr]\Biggl|_{z=1}
$$
and
$$
\Delta = \sum_{\substack{xh,yh'\in\bigcup_{i=1}^r \Gamma_i^\times,\\ \tau(xh)\neq
\tau(yh')}} \nu(yh')\cdot
\frac{\xi\bigl(\tau(xh)\bigr)}{\xi\bigl(\tau(yh')\bigr)}\cdot l(x)
\cdot \sum_{w\in \Gamma_{\tau(x)}^\times} p(yh',yh'w) \cdot \overline{G}(h'w,xh|1).
$$
Finally, we obtain:
\begin{Cor}\label{exittime-formula}
$$
\lim_{n\to\infty}\frac{l(X_n)}{n}=\frac{\Delta}{\Lambda} \quad \textrm{almost surely.}
$$
\end{Cor}
\subsection{Computation by Double Generating Functions}
\label{dgf}
In this section we derive another formula for the rate of escape with the help
of a theorem of Sawyer and Steger \cite[Theorem 2.2]{sawyer}, which we
reformulate adapted to our situation:
\begin{Th}[Sawyer and Steger]
\label{sawyer}
Suppose we can write for some $\delta>0$
$$
\mathcal{E}(w,z):=\mathbb{E}\biggl( \sum_{n\geq 0} w^{l(X_n)}\,z^{n}\biggl) = \frac{C(w,z)}{g(w,z)}
\quad \textrm{ for } w,z\in (1-\delta;1),
$$
where $C(w,z)$ and $g(w,z)$ are analytic for $|w-1|,|z-1|<\delta$
and \mbox{$C(1,1)\neq 0$}.
Then
$$
\frac{l(X_n)}{n} \xrightarrow{n\to\infty} \ell=\frac{\frac{\partial}{\partial w}g(1,1)}{\frac{\partial}{\partial z}g(1,1)} \quad
\textrm{ almost surely.}
$$
Moreover, if $(X_n)_{n\in\mathbb{N}_0}$ is a reversible Markov chain, then with $\bar g(r,s):=g(e^{-r},e^{-s})$
$$
\frac{Y_n-n \ell}{\sqrt{n}} \xrightarrow{n\to\infty} N(0,\sigma^2)\
\textrm{ in law, where }\
\sigma^2 = \frac{-\frac{\partial^2}{\partial^2 r}\bar g(0,0)+2\ell
\frac{\partial^2}{\partial s \partial r} \bar g(0,0) -\ell^2 \frac{\partial^2}{\partial^2 s}\bar g(0,0)}{\frac{\partial}{\partial s}\bar g(0,0)}.
$$
\end{Th}
We remark that \cite[Theorem 2.2]{sawyer} also comprises a central limit theorem.
Similar limit theorems are well-known in analytical
combinatorics, see e.g. Bender and
Richmond \cite{bender-richmond} and Drmota \cite{drmota94}, \cite{drmota97}.
We show now how to write the expectation in the theorem in the required way.
Let $s_H$ be the stopping time of the first return to $H$ after start at
$e$, that is, $s_H=\inf\{1\leq m \in\mathbb{N} \mid
X_m\in H\}$.
For $h\in H$, $i\in\{1,\dots,r\}$, $x\in\Gamma_i\setminus H$ and $z\in\mathbb{C}$ we define
$$
L(h,x|z) := \sum_{n\geq 0} \mathbb{P}_h\bigl[X_n=x,s_H>n\bigr]\,z^n
=\sum_{y\in \Gamma_i^\times} p(h,y)\cdot z\cdot \overline{G}(y,x|z).
$$
Additionally, we set $L(h,h|z):=1$ and $L(h,h'|z):=0$ for $h'\in H\setminus\{h\}$.
With this notation we have
$$
\mathcal{E}(w,z) = \sum_{x\in \Gamma} \sum_{n\in\mathbb{N}_0}
p^{(n)}(e,x)\,z^n\,w^{l(x)}
= \sum_{x\in \Gamma}\sum_{h\in H} G(e,h|z)\, L(h,x|z) \,w^{l(x)}.
$$
Setting
\begin{eqnarray*}
\mathcal{L}_i^+(w,z) & := & \sum_{x\in \Gamma_i^\times} L(e,x|z)\,w^{l(x)}
\quad \textrm{ and}\\
\mathcal{L}_i(w,z) & := & \sum_{n\geq 1} \sum_{\substack{x_1\dots x_nh\in \Gamma,\\
x_1\in \Gamma_i^\times}} L(e,x_1\dots x_nh|z)\,w^{l(x_1\dots x_nh)},
\end{eqnarray*}
we have
\begin{equation}\label{l-formula}
\mathcal{L}(w,z) := \sum_{x\in \Gamma} L(e,x|z)\,w^{l(x)} = 1 + \sum_{i=1}^r \mathcal{L}_i(w,z).
\end{equation}
We now rewrite $\mathcal{L}_i(w,z)$:
\begin{eqnarray}
\mathcal{L}_i(w,z)& = &\mathcal{L}_i^+(w,z) \cdot \Bigl( 1 + \sum_{n\geq 2}
\sum_{\substack{x_2\dots x_nh\in\Gamma \setminus H,\\ x_2\notin \Gamma_1}}
L(e,x_2\dots x_nh|z)\,w^{l(x_2\dots x_nh)}\Bigr)\nonumber\\
& = & \mathcal{L}_i^+(w,z) \cdot \Bigl( 1 + \sum_{j=1, j\neq i}^r
\mathcal{L}_j(w,z)\Bigr)
= \mathcal{L}_i^+(w,z) \cdot \Bigl( \mathcal{L}(w,z) -
\mathcal{L}_i(w,z)\Bigr) \label{li-formula}.
\end{eqnarray}
From (\ref{l-formula}) and (\ref{li-formula}) we obtain
$$
\mathcal{L}(w,z) = 1 + \sum_{i=1}^r \frac{\mathcal{L}_i^+(w,z)\mathcal{L}(w,z) }{1+\mathcal{L}_i^+(w,z)},
$$
yielding
$$
\mathcal{L}(w,z) = \frac{1}{1-\sum_{i=1}^r \frac{\mathcal{L}_i^+(w,z)}{1+\mathcal{L}_i^+(w,z)}}.
$$
Now we can write the expectation of Theorem \ref{sawyer} in the requested way:
\begin{eqnarray*}
\mathcal{E}(w,z)& =& \sum_{h\in H} G(e,h|z) \sum_{x\in\Gamma} L(e,h^{-1}x|z)\,
w^{l(x)}\\
&=& \sum_{h\in H} G(e,h|z) \sum_{x\in\Gamma} L(e,x|z)\,w^{l(x)}
= \frac{\sum_{h\in H} G(e,h|z)}{1-\sum_{i=1}^r \frac{\mathcal{L}_i^+(w,z)}{1+\mathcal{L}_i^+(w,z)}}.
\end{eqnarray*}
Thus, we can apply Theorem \ref{sawyer} with $C(w,z)=\sum_{h\in H} G(e,h|z)$ and
$$
g(w,z) = 1-\sum_{i=1}^r \frac{\mathcal{L}_i^+(w,z)}{1+\mathcal{L}_i^+(w,z)}.
$$
\begin{Cor}\label{dgf-formula}
The rate of escape w.r.t. $l(\cdot)$ is
$$
\lim_{n\to\infty} \frac{l(X_n)}{n} = \frac{\Upsilon_1}{\Upsilon_2} \textrm{
almost surely,}
$$
where
$$
\Upsilon_1 = \sum_{i=1}^r \frac{\sum_{x\in\Gamma_i^\times}
l(x)\,L(e,x|1)}{\bigl(1+\sum_{x\in\Gamma_i^\times} L(e,x|1)\bigr)^2} \
\textrm{ and } \
\Upsilon_2 = \sum_{i=1}^r \frac{\sum_{x\in\Gamma_i^\times}
L'(e,x|1)}{\bigl(1+\sum_{x\in\Gamma_i^\times} L(e,x|1)\bigr)^2}.
$$
\end{Cor}
\begin{proof}
Computing the derivatives of $g(w,z)$ w.r.t. $w$ and $z$ leads to the proposed formula.
\end{proof}
\subsection{Computation via the Limit Process}
\label{limitprocesses}
In this section we derive another formula for the rate of escape w.r.t. the
natural word length $\Vert \cdot \Vert$. First,
$$
\mathbb{E}[\Vert X_{n}\Vert] = \sum_{\bar g\in \Gamma} \Vert \bar g\Vert\,
\mu^{(n)}(\bar g)\quad \textrm{
and }\quad
\mathbb{E}[\Vert X_{n+1}\Vert] = \sum_{g,\bar g\in \Gamma} \Vert g\bar g\Vert\,
\mu(g)\, \mu^{(n)}(\bar g).
$$
Thus, we have
$$
\mathbb{E}[\Vert X_{n+1}\Vert] -\mathbb{E}[\Vert X_{n}\Vert] = \sum_{g\in
\Gamma} \mu(g) \int_\Gamma \bigl( \Vert gX_n\Vert
-\Vert X_n\Vert\bigr)\, d\mu^{(n)}.
$$
Since $\mathbb{E}[\Vert X_n\Vert]/n$ converges to $\ell=\lim_{n\to\infty} \Vert
X_n\Vert/n$, it is sufficient to prove that this difference of expectations converges; the limit must
then equal $\ell$.
The process $(X_n)_{n\in\mathbb{N}_0}$ converges to some random element $X_\infty$
valued in
$$
\Gamma_\infty := \biggl\lbrace
x_1x_2\dots \in\Gamma^\mathbb{N} \,\biggl|\, x_i\in \bigcup_{j=1}^r R_j, \tau(x_i)\neq \tau(x_{i+1})
\biggr\rbrace
$$
in the sense that the length of the common prefix of $X_n$ and $X_\infty$ goes to
infinity. We denote by $X_\infty^{(1)}$ the first letter of $X_\infty$ and for
$g\in \bigcup_{i=1}^r \Gamma_i$ we define
$$
Y_{g} := \lim_{n\to\infty} \Vert gX_n\Vert - \Vert X_n \Vert
=
\begin{cases}
1,& \textrm{if } X_{\infty}^{(1)}\notin \Gamma_{\tau(g)}\\
-1, & \textrm{if } X_{\infty}^{(1)}\in g^{-1}H \\
0, & \textrm{otherwise}
\end{cases}
$$
At this point we need the equation $\Vert hx\Vert=\Vert x\Vert$ for $h\in H$
and $x\in\Gamma$. This equation is, in general, not satisfied for other length functions.
The Green functions $G(x,y)=\sum_{n\geq 0} \mathbb{P}_x[X_n=y]$, where
$x,y\in \Gamma_i$ for any $i\in\{1,\dots,r\}$, satisfy the following linear recursive
equations:
\begin{displaymath}
G(x,y) = \delta_{x}(y) + \sum_{w\in \Gamma_i} p(x,w)\, G(w,y) +
\sum_{\substack{xwh\in\Gamma,\\ \Vert xwh\Vert=2}} p(x,xwh) \sum_{h'\in H} H(wh,h'|1)\,G(xh',y).
\end{displaymath}
This system of Green functions can be solved, when the functions $H(wh,h'|1)$
can be obtained by solving (\ref{h-equations}). We now define
$$
\varrho(i) := \mathbb{P}\bigl[X_\infty^{(1)}\in \Gamma_i\bigr] =
\sum_{h\in H} G(e,h|1) \sum_{g\in\Gamma_i^\times} \mu(g) \cdot \Bigl(1-
\sum_{h'\in H} H(hg,h'|1)\Bigr).
$$
By transience, $\sum_{i=1}^r \varrho(i)=1$. Furthermore,
$\mathbb{P}[Y_{g}=1] = 1-\varrho\bigl( \tau(g)\bigr)$ and
$$
\mathbb{P}[Y_{g}=-1] = \sum_{h\in H} F(e,g^{-1}h) \cdot \bigl(1-\varrho\bigl(
\tau(g)\bigr)\bigr)
= \frac{1-\varrho\bigl(\tau(g)\bigr)}{G(e,e)} \sum_{h\in H} G(e,g^{-1}h).
$$
By Lebesgue's Dominated Convergence Theorem,
$$
\mathbb{E}\bigl[\Vert X_{n+1}\Vert \bigr] - \mathbb{E}\bigl[\Vert X_{n}\Vert\bigr]
\xrightarrow{n\to\infty} \sum_{i=1}^r \sum_{g\in\Gamma_i^\times} \mu(g)
\Bigl( \mathbb{P}[Y_{g}=1]- \mathbb{P}[Y_{g}=-1]\Bigr).
$$
But this limit must be the rate of escape $\ell$. Thus:
\begin{Cor}\label{lp-formula}
$$
\ell =
\lim_{n\to\infty} \frac{\Vert X_n\Vert}{n}=
\sum_{i=1}^r \biggl[\mu(\Gamma_i^\times) \bigl(1-\varrho(i)\bigr)
- \frac{1-\varrho(i)}{G(e,e)} \sum_{g\in\Gamma_i^\times}
\sum_{h\in H} \mu(g) G(e,g^{-1}h)\biggr].
$$
\end{Cor}
As a final remark observe that the formulas of Corollaries \ref{exittime-formula},
\ref{dgf-formula} and \ref{lp-formula} have complexities in decreasing order:
while the computation of the rate of escape by Corollary \ref{exittime-formula}
needs three systems of equations to be solved and derivatives to be calculated,
the computation by Corollaries \ref{dgf-formula} or \ref{lp-formula} needs
only two systems of equations to be solved, while the formula in Corollary
\ref{dgf-formula} deals also with derivatives.
\section{Sample Computations}
\label{sample}
\subsection{A Regular Language}
Let be $A=\{a,b,c\}$ and we set $l(a)=l(b)=l(c)=1$. We consider the set $\mathcal{L}$ of all words over the
alphabet $A$, such that in each $w\in\mathcal{L}$ the letter $b$ is the first
letter of $w$ or follows after the letter $a$ and the letter $c$ may only appear after the letter
$b$; e.g., $abcaba \in \mathcal{L}$, but $abcba \notin \mathcal{L}$. Consider the random walk on $\mathcal{L}$ given by the following
transition probabilities:
\begin{eqnarray*}
&&p(aa,aaa)=\frac{1}{3},\ p(aa,aab)=\frac{1}{3},\ p(aa,a)=\frac{1}{3},\ p(ab,aba)=\frac{1}{6},\ p(ab,abc)=\frac{1}{3},\ p(ab,a)=\frac{1}{2},\\
&&p(ba,baa)=\frac{1}{4},\ p(ba,bca)=\frac{1}{4},\ p(ba,bab)=\frac{1}{4},\ p(ba,a)=\frac{1}{4},\\
&&p(bc,bca)=\frac{1}{2},\ p(bc,a)=\frac{1}{2},\ \
p(ca,caa)=\frac{1}{4},\ p(ca,cab)=\frac{1}{2},\ p(ca,a)=\frac{1}{4}.
\end{eqnarray*}
Note that it is not necessary to specify any further transition probabilities,
as the formula for the rate of escape does not depend on the transition probabilities of the form
\mbox{$\mathbb{P}[X_{n+1}=w' \mid X_n=w]$,} where $w\in \{\varepsilon,a,b,c\}$. The
system of equations \ref{h-equations} is then
\begin{eqnarray*}
H(aa,a|z) & = & \frac{z}{3} \bigl( H(aa,a|z)\cdot H(aa,a|z) + H(ab,a|z)\cdot
H(aa,a|z)+1\bigr),\\
H(ab,a|z) & = & \frac{z}{3} H(bc,a|z)\cdot H(aa,a|z) + \frac{z}{6} H(ba,a|z)\cdot
H(aa,a|z) + \frac{z}{2},\\
H(ba,a|z) & = & \frac{z}{4} \bigl( H(aa,a|z)\cdot H(ba,a|z) + H(ca,a|z)\cdot
H(ba,a|z)
+ H(ab,a|z)\cdot H(ba,a|z) +1 \bigr),\\
H(bc,a|z) & = & \frac{z}{2} H(ca,a|z)\cdot H(ba,a|z) + \frac{z}{2},\\
H(ca,a|z) & = & \frac{z}{4} H(aa,a|z)\cdot H(ca,a|z) + \frac{z}{2}
H(ab,a|z)\cdot H(ca,a|z) + \frac{z}{4}.
\end{eqnarray*}
This system in the unknown variables $H(aa,a|z)$, $H(ab,a|z)$, $H(ba,a|z)$,
$H(bc,a|z)$ and $H(ca,a|z)$, where $z$ appears as a parameter, can be solved
with the help of \textsc{Mathematica}. With these solutions we can compute the
modified Green functions $\overline{G}(\cdot,\cdot|z)$ by solving the linear
system (\ref{g-system}). Note that only $\overline{G}(aa,aa|z)$, $\overline{G}(ab,aa|z)$, $\overline{G}(ba,ba|z)$, $\overline{G}(bc,ba|z)$, $\overline{G}(ca,ca|z)$ are non-zero functions.
Moreover, we get
\begin{eqnarray*}
\xi(aaa) & = & \xi(baa)=\xi(caa) =\frac{1}{3} \bigl( 1- H(aa,a|1)\bigr) + \frac{1}{3} \bigl( 1-
H(ab,a|1)\bigr),\\
\xi(aab) & = & \xi(bab)=\xi(cab)=\frac{1}{6} \bigl( 1- H(ba,a|1)\bigr) + \frac{1}{3} \bigl( 1-
H(bc,a|1)\bigr),\\
\xi(aba) & = & \frac{1}{4} \bigl( 1- H(aa,a|1)\bigr) + \frac{1}{4} \bigl( 1-
H(ca,a|1)\bigr) + \frac{1}{4} \bigl(1- H(ab,a)\bigr),\\
\xi(abc) & = & \frac{1}{2} \bigl( 1- H(ca,a|1)\bigr),\
\xi(bca) = \frac{1}{4} \bigl( 1- H(aa,a|1)\bigr) + \frac{1}{2} \bigl( 1-
H(ab,a|1)\bigr).
\end{eqnarray*}
Since $\nu(abc)=\sum_{def\in \overline{A}^3} \nu(def)\ \tilde q(def,abc)$, we can compute
the invariant measure as
$$
\begin{array}{c}
\nu(aaa)=0.32475,\, \nu(aab)=0.13194,\, \nu(aba)=0.12597, \,
\nu(abc)=0.08021, \, \nu(baa)=0.05350,\\[1ex]
\nu(bca)=0.13095, \,
\nu(bab)=0.02174, \, \nu(caa)=0.07844, \, \nu(cab)=0.05251.
\end{array}
$$
Now we have all necessary ingredients to compute
$\Lambda =3.78507$, and finally we get the rate of escape as $\ell=0.264196$.
\subsection{$\mathbb{Z}/d\mathbb{Z} \ast_{\mathbb{Z}/2\mathbb{Z}} \mathbb{Z}/d\mathbb{Z}$}
Consider the free product by amalgamation $\mathbb{Z}/d\mathbb{Z} \ast_{\mathbb{Z}/2\mathbb{Z}} \mathbb{Z}/d\mathbb{Z}$,
$d\in\mathbb{N}$ even, over the common subgroup $\mathbb{Z}/2\mathbb{Z}$. Suppose that $\mathbb{Z}/d\mathbb{Z}$ is
generated by some element $a$ with $a^6$ equal to the identity. Setting
$\mu_1(a)=\mu_2(a)=1$ and $\alpha_1=\alpha_2=1/2$ we get the following values
for the rate of escape $\ell$ w.r.t. $\|\cdot \|$:
$$
\begin{array}{c|c|c|c|c}
d & 6 & 8 & 10 & 12\\
\hline
\ell & 0.24749 & 0.40859 & 0.46144 & 0.47543
\end{array}
$$
\end{document}
|
\betagin{document}
\title{On the limit of large girth graph sequencesootnote{AMS
Subject Classification: 05C99}
\betagin{abstract} Let $d\geq 2$ be given and let $\mu$ be an
involution-invariant probability measure on
the space of trees $T\in\mathcal{T}_d$ with maximum
degrees at most $d$. Then $\mu$ arises as the local limit of some sequence
$\{G_n\}^\infty_{n=1}$ of graphs with all degrees at most $d$.
This answers Question 6.8 of Bollob\'as and Riordan \cite{Bol}.
\end{abstract}
\section{Introduction}
Let $\textrm{\textbf{G}}d$ denote the set of all finite simple
graphs $G$ (up to isomorphism)
for which $\deltag(x) \leq d$ for every $x \in V(G)$.
For a graph $G$ and $x,y \in V(G)$ let $d_G(x,y)$ denote the distance
of $x$ and $y$, that is the length of the shortest path from $x$ to
$y$. A rooted $(r,d)$-ball is a graph $G \in \textrm{\textbf{G}}d$ with a marked
vertex $x \in V(G)$ called the root such that $d_G(x,y) \leq r$ for
every $y \in V(G)$. By $U^{r,d}$ we shall denote the set of rooted
$(r,d)$-balls.
If $G \in \textrm{\textbf{G}}d$ is a graph and $x\in V(G)$ then $B_r(x) \in U^{r,d}$
shall denote the rooted $(r,d)$-ball around $x$ in $G$.
For any $\alphapha \in U^{r,d}$ and $G \in \textrm{\textbf{G}}d$ we define the set
$T(G,\alphapha) \deltafeq \{x \in V(G): B_r(x) \cong \alphapha\}$ and let
$p_G(\alphapha) \deltafeq \frac{|T(G,\alphapha)|}{|V(G)|}$.
A graph sequence $\textrm{\textbf{G}} = \{G_n\}_{n=1}^{\infty} \subset \textrm{\textbf{G}}d$ is
{\bf weakly convergent} if $\lim_{n \to \infty} |V(G_n)| = \infty$
and for every $r$ and every $\alphapha \in U^{r,d}$ the limit
$\lim_{n\to\infty}p_{G_n}(\alphapha)$ exists (see \cite{BS}).
\noindent
Let $\textrm{\textbf{G}}rd$ denote the set of all countable, connected rooted
graphs
$G$ for which $\deltag(x) \leq d$ for every $x \in V(G)$.
If $G,H\in\textrm{\textbf{G}}rd$ let $d_g(G,H)=2^{-r}$, where
$r$ is the maximal number such that the $r$-balls around the roots
of $G$ resp. $H$ are rooted isomorphic. The distance $d_g$ makes
$\textrm{\textbf{G}}rd$ a compact metric space. Given an
$\alphapha \in U^{r,d}$ let $T(\textrm{\textbf{G}}rd,\alphapha) = \{(G,x) \in \textrm{\textbf{G}}rd : B_r(x)
\cong\alphapha\}$. The sets $T(\textrm{\textbf{G}}rd,\alphapha)$ are closed-open sets.
A convergent graphs sequence $\{G_n\}^\infty_{n=1}$ define a
{\bf local limit measure} $\mu_{\bf G}$ on $\textrm{\textbf{G}}rd$,
where $\mu_{\bf G}(T(\textrm{\textbf{G}}rd,\alphapha)) =\lim_{n\to\infty} p_{G_n}(\alphapha)$.
However, not all the probability measures on $\textrm{\textbf{G}}rd$ arise as local
limits. A necessary condition for a measure $\mu$ being a local limit
is its {\bf involution invariance} (see Section \ref{invol}).
The goal of this paper is to answer a question of
Bollob\'as and Riordan (Question 6.8 \cite{Bol}):
\betagin{theorem}\lambdabel{theorem1}
Any involution-invariant measure $\mu$ on $\textrm{\textbf{G}}rd$ concentrated on
trees arises as a local limit of some convergent graph sequence.
\end{theorem}
As it was pointed out in \cite{Bol}
such graph sequences are asymptotically treelike, thus
$\mu$ must arise as the local limit of a convergent large girth
sequence.
\section{Involution invariance} \lambdabel{invol}
Let $\vec{\Grd}$ be the compact space of all connected
countable rooted graphs $\vec{G}$ (up to isomorphism)
of vertex degree bound $d$
with a distinguished directed edge pointing out from the root.
Note that $\vec{G}$ and $\vec{H}$ are considered isomorphic if there
exists a rooted isomorphism between them mapping distinguished
edges into each other.
Let $\vec{U}^{r,d}$ be the isomorphism classes
of all rooted $(r,d)$-graphs $\vec{\alpha}}\def\ob{\vec{\beta}$ with a distinguished edge
$e(\vec{\alpha}}\def\ob{\vec{\beta})$ pointing out from the root.
Again, $T(\vec{\Grd},\vec{\alpha}}\def\ob{\vec{\beta})$ is well-defined for any $\vec{\alpha}}\def\ob{\vec{\beta}\in\vec{U}^{r,d}$ and defines
a closed-open set in $\vec{\Grd}$. Clearly,
the forgetting map $\mathcal{F}:\vec{\Grd}\to\textrm{\textbf{G}}rd$ is continuous.
Let $\mu$ be a probability measure on $\textrm{\textbf{G}}rd$. Then we define
a measure $\vec{\mu}$ on $\vec{\Grd}$ the following way.
\noindent
Let $\vec{\alpha}}\def\ob{\vec{\beta}\in \vec{U}^{r,d}$ and let $\mathcal{F}(\vec{\alpha}}\def\ob{\vec{\beta})=\alphapha\inU^{r,d}$ be the
underlying rooted ball. Clearly,
$\mathcal{F}(T(\vec{\Grd},\vec{\alpha}}\def\ob{\vec{\beta}))=T(\textrm{\textbf{G}}rd,\alphapha)$.Let
$$\vec{\mu}(T(\vec{\Grd},\vec{\alpha}}\def\ob{\vec{\beta})):=l\,,$$ where
$l$ is the number of edges $e$ pointing out from the root such that there
exists a rooted automorphism of $\alphapha$ mapping $e(\vec{\alpha}}\def\ob{\vec{\beta})$ to $e$.
Observe that
$$\vec{\mu}(\mathcal{F}^{-1}(T(\textrm{\textbf{G}}rd,\alphapha))=\deltag(\alphapha)\mu(T(\textrm{\textbf{G}}rd,\alphapha))\,.$$
We define the map $T:\vec{\Grd}\to\vec{\Grd}$ as follows. Let $T(\vec{G})=\vec{H}$,
where :
\betagin{itemize}
\item the underlying graphs of $\vec{G}$ and $\vec{H}$ are the same,
\item the root of $\vec{H}$ is the endpoint of $e(\vec{G})$,
\item the distinguished edge of $\vec{H}$ is pointing
to the root of $\vec{G}$.
\end{itemize}
Note that $T$ is a continuous involution.
Following Aldous and Steele \cite{AS}, we
call $\mu$ {\bf involution-invariant} if $T_*(\vec{\mu})=\vec{\mu}$.
It is important to note \cite{AS},\cite{AL} that the limit measure
of convergent graphs sequences are always involution-invariant.
\noindent
We need to introduce the notion of {\bf edge-balls}.
Let $\vec{G}\in\vec{\Grd}$. The edge-ball $B^e_r(\vec{G})$ of radius
$r$ around the root of $\vec{G}$ is the following spanned rooted
subgraph of $\vec{G}$:
\betagin{itemize}
\item The root of $B^e_r(\vec{G})$ is the same as the root of $\vec{G}$.
\item $y$ is a vertex of $B^e_r(\vec{G})$ if
$d(x,y)\leq r$ or $d(x',y)\leq r$, where $x$ is the root of $\vec{G}$
and $x'$ is the endpoint of the directed edge $e(\vec{G})$.
\item The distinguished edge of $B^e_r(\vec{G})$ is $(\vec{x,x'})$.
\end{itemize}
Let $\vec{E}^{r,d}$ be the set of all edge-balls of radius $r$ up to isomorphism.
Then if $\vec{\phi}\in\vec{E}^{r,d}$, let
$s(\vec{\phi})\in \vec{U}^{r,d}$ be the rooted ball around the root of $\vec{\phi}$.
Also, let $t(\vec{\phi})\in\vec{U}^{r,d}$ be the $r$-ball around $x'$ with
distinguished edge $(\vec{x',x})$.
\noindent
The involution $T^{r,d}:\vec{E}^{r,d}\to\vec{E}^{r,d}$ is
defined the obvious way and $t(T^{r,d}(\vec{\phi}))=s(\vec{\phi})$,
$s(T^{r,d}(\vec{\phi}))=t(\vec{\phi})$.
Since $\vec{\mu}$ is a measure we have
\betagin{equation} \lambdabel{e1}
\vec{\mu}(T(\vec{\Grd},\vec{\alpha}}\def\ob{\vec{\beta}))=\sum_{\vec{\phi},s(\vec{\phi})=\vec{\alpha}}\def\ob{\vec{\beta}}\vec{\mu}(T(\vec{\Grd},\vec{\phi})).
\end{equation}
Also, by the involution-invariance
\betagin{equation} \lambdabel{e3}
\vec{\mu}(T(\vec{\Grd},\vec{\phi}))=\vec{\mu}(T(\vec{\Grd},T^{r,d}(\vec{\phi})),
\end{equation}
since $T(T(\vec{\Grd},\vec{\phi}))=T(\vec{\Grd},T^{r,d}(\vec{\phi})$.
Therefore by (\ref{e1}),
\betagin{equation} \lambdabel{e2}
\vec{\mu}(T(\vec{\Grd},\vec{\alpha}}\def\ob{\vec{\beta}))=\sum_{\vec{\phi},t(\vec{\phi})=\vec{\alpha}}\def\ob{\vec{\beta}}\vec{\mu}(T(\vec{\Grd},\vec{\phi}))
\end{equation}
\section{Labeled graphs}
Let $\vec{\Grd}n$ be the isomorphism classes of
\betagin{itemize}
\item connected countable rooted graphs with vertex degree bound $d$
\item with a distinguished edge pointing out from the root
\item with vertex labels from the set $\{1,2,\dots,n\}$.
\end{itemize}
Note that if $\vec{G}_*$ and $\vec{H}_*$ are such graphs then
they called isomorphic if there exists a map $\rho:V(\vec{G}_*)\to
V(\vec{H}_*)$ preserving both the underlying $\vec{\Grd}$-structure and the
the vertex labels.
The labeled $r$-balls $\vec{U}^{r,d}_n$ and the labeled $r$-edge-balls
$\vec{E}^{r,d}_n$ are defined accordingly.
Again, $\vec{\Grd}n$ is a compact metric space and
$T(\vec{\Grd}n,\vec{\alpha}}\def\ob{\vec{\beta}_*)$,\,$T(\vec{\Grd}n,\vec{\phi}_*)$ are closed-open sets,
where $\vec{\alpha}}\def\ob{\vec{\beta}_*\in \vec{U}^{r,d}$, $\vec{\phi}_*\in \vec{E}^{r,d}_n$.
Now let $\mu$ be an involution-invariant probability
measure on $\textrm{\textbf{G}}rd$ with induced measure $\vec{\mu}$. The associated
measure $\vec{\mu}_n$ on $\vec{\Grd}n$ is defined the following way.
\noindent
Let $\vec{\alpha}}\def\ob{\vec{\beta}\in\vec{U}^{r,d}$ and $\kappa_1,\kappa_2$ be vertex labelings of $\vec{\alpha}}\def\ob{\vec{\beta}$ by
$\{1,2,\dots,n\}$. We say that $\kappa_1$ and $\kappa_2$ are equivalent
if there exists a rooted automorphism of $\vec{\alpha}}\def\ob{\vec{\beta}$ preserving the
distinguished edge and mapping $\kappa_1$ to $\kappa_2$.
Let $C(\kappa)$ be the equivalence class of the vertex labeling $\kappa$
of $\vec{\alpha}}\def\ob{\vec{\beta}$. Then we define
$$\vec{\mu}_n(T(\vec{\Grd}n,[\kappa])):=\frac{|C(\kappa)|}{n^{|V(\vec{\alpha}}\def\ob{\vec{\beta})|}}
\vec{\mu}(T(\vec{\Grd},\vec{\alpha}}\def\ob{\vec{\beta}))\,.$$
\betagin{lemma} \lambdabel{borel}
\betagin{enumerate}
\item $\vec{\mu}_n$ extends to a Borel-measure.
\item $\vec{\mu}(T(\vec{\Grd},\vec{\alpha}}\def\ob{\vec{\beta}))=\sum_{\vec{\alpha}}\def\ob{\vec{\beta}_*,\,\mathcal{F}(\vec{\alpha}}\def\ob{\vec{\beta}_*)=\vec{\alpha}}\def\ob{\vec{\beta}}
\vec{\mu}_n(T(\vec{\Grd}n,\vec{\alpha}}\def\ob{\vec{\beta}_*))\,.$
\end{enumerate} \end{lemma}
\noindent{\it Proof.}
The second equation follows directly from th definition. In order to prove
that $\vec{\mu}_n$ extends to a Borel-measure it is enough to prove that
$$\vec{\mu}_n(T(\vec{\Grd}n,\vec{\alpha}}\def\ob{\vec{\beta}_*))=
\sum_{\ob_*\in N_{r+1}(\vec{\alpha}}\def\ob{\vec{\beta}_*)}
\vec{\mu}_n(T(\vec{\Grd}n,\ob_*))\,,$$
where $\vec{\alpha}}\def\ob{\vec{\beta}_*\in\vec{U}^{r,d}_n$ and $N_{r+1}(\vec{\alpha}}\def\ob{\vec{\beta}_*)$ is the
set of elements $\ob_*$ in $\vec{U}^{r+1,d}_n$ such that the
$r$-ball around the root of $\ob_*$ is isomorphic to $\vec{\alpha}}\def\ob{\vec{\beta}_*$.
Let $\vec{\alpha}}\def\ob{\vec{\beta}=\mathcal{F}(\vec{\alpha}}\def\ob{\vec{\beta}_*)\in\vec{U}^{r,d}$ and let
$N_{r+1}(\vec{\alpha}}\def\ob{\vec{\beta})\subset \vec{U}^{r,d}$ be the set of elements $\ob$ such that
the $r$-ball around the root of $\ob$ is isomorphic to $\vec{\alpha}}\def\ob{\vec{\beta}$.
Clearly
\betagin{equation} \lambdabel{gyors}
\vec{\mu}(T(\vec{\Grd},\vec{\alpha}}\def\ob{\vec{\beta}))=\sum_{\ob\in N_{r+1}(\vec{\alpha}}\def\ob{\vec{\beta})} \vec{\mu}(T(\vec{\Grd},\ob))\,.
\end{equation}
Let $\kappa$ be a labeling of $\vec{\alpha}}\def\ob{\vec{\beta}$ by $\{1,2,\dots,n\}$ representing
$\vec{\alpha}}\def\ob{\vec{\beta}_*$. For $\ob\in N_{r+1}(\vec{\alpha}}\def\ob{\vec{\beta})$ let $L(\ob)$ be the set of labelings
of $\ob$ that extends some labeling of $\vec{\alpha}}\def\ob{\vec{\beta}$ that is equivalent to $\kappa$.
\noindent
Note that
$$\vec{\mu}_n(T(\vec{\Grd}n,\vec{\alpha}}\def\ob{\vec{\beta}_*))=\vec{\mu}(T(\vec{\Grd},\vec{\alpha}}\def\ob{\vec{\beta}))\frac{|C(\kappa)|}{n^{|V(\vec{\alpha}}\def\ob{\vec{\beta})|}}\,.$$
Also,
$$\sum_{\ob_*\in N_{r+1}(\vec{\alpha}}\def\ob{\vec{\beta}_*)} \vec{\mu}_n (T(\vec{\Grd}n,\ob_*))=
\sum_{\ob\in N_{r+1}(\vec{\alpha}}\def\ob{\vec{\beta})} \vec{\mu}(T(\vec{\Grd},\ob))
\frac{|L(\ob)|}{n^{|V(\ob)|}}\,.$$
Observe that
$|L(\ob)|=|C(\kappa)| n^{|V(\ob)|-V(\vec{\alpha}}\def\ob{\vec{\beta})|}$.
Hence
$$\sum_{\ob_*\in N_{r+1}(\vec{\alpha}}\def\ob{\vec{\beta}_*)} \vec{\mu}_n (T(\vec{\Grd}n,\ob_*))=
\sum_{\ob\in N_{r+1}(\vec{\alpha}}\def\ob{\vec{\beta})}\vec{\mu}(T(\vec{\Grd},\ob))
\frac{|C(\kappa)|}{n^{|V(\vec{\alpha}}\def\ob{\vec{\beta})|}}\,.$$
Therfore using equation (\ref{gyors}) our lemma follows. \qed
\vskip0.2in
\noindent
The following proposition shall be crucial in our construction.
\betagin{propo}\lambdabel{master}
For any $\vec{\alpha}}\def\ob{\vec{\beta}_*\in\vec{U}^{r,d}_n$ and $\vec{\phi}s_*\in\vec{E}^{r,d}_n$
\betagin{itemize}
\item
$\vec{\mu}_n(T(\vec{\Grd}n,\vec{\alpha}}\def\ob{\vec{\beta}_*))=\sum_{\vec{\phi}_*\in\vec{E}^{r,d}_n,\,s(\vec{\phi}_*)=\vec{\alpha}}\def\ob{\vec{\beta}_*}
\vec{\mu}_n(T(\vec{\Grd}n,\vec{\phi}_*))$
\item
$\vec{\mu}_n(T(\vec{\Grd}n,\vec{\alpha}}\def\ob{\vec{\beta}_*))=\sum_{\vec{\phi}_*\in\vec{E}^{r,d}_n,\,t(\vec{\phi}_*)=\vec{\alpha}}\def\ob{\vec{\beta}_*}
\vec{\mu}_n(T(\vec{\Grd}n,\vec{\phi}_*))$
\item
$\vec{\mu}_n(T(\vec{\Grd}n,\vec{\phi}s_*))=
\vec{\mu}_n(T(\vec{\Grd}n,T^{r,d}_n(\vec{\phi}s_*))\,.$
\end{itemize}
\end{propo}
\noindent{\it Proof.}
The first equation follows from the fact that $\vec{\mu}_n$ is a Borel-measure.
Thus the second equation will be an immediate corollary of the third one.
So, let us turn to the third equation.
Let $\mathcal{F}(\vec{\phi}s_*)=\vec{\phi}s\in\vec{E}^{r,d}$ and let $\kappa$ be a vertex-labeling
of $\vec{\phi}s$ representing $\vec{\phi}s_*$. It is enough to prove that
$$\vec{\mu}_n(T(\vec{\Grd}n,\vec{\phi}s_*))=
\frac{|C(\kappa)|}{n^{|V(\vec{\phi}s)|}} \vec{\mu}(T(\vec{\Grd},\vec{\phi}s))\,,$$
where $C(\kappa)$ is the set of labelings of $\vec{\phi}s$ equivalent to $\kappa$.
Let $N_{r+1}(\vec{\phi}s)\in\vec{U}^{r,d}$ be the set of elements $\ob$ such that the
edge-ball of radius $r$ around the root of $\ob$ is isomorphic to $\vec{\phi}s$.
Then
\betagin{equation} \lambdabel{gyors2}
\vec{\mu}(T(\vec{\Grd},\vec{\phi}s))=\sum_{\ob\in N_{r+1}(\vec{\phi}s)} \vec{\mu}(T(\vec{\Grd},\ob))\,.
\end{equation}
Observe that
$$\vec{\mu}_n(T(\vec{\Grd}n,\vec{\phi}s_*))=\sum_{\ob\in N_{r+1}(\vec{\phi}s)} \vec{\mu}(T(\vec{\Grd},\ob))
\frac{k(\ob,\vec{\phi}s_*)}{n^{|V(\ob)|}}\,,$$
where $k(\ob,\vec{\phi}s_*)$ is the number of labelings
of $\ob$ extending an element that is equivalent to $\kappa$.
Notice that $k(\ob,\vec{\phi}s_*)=|C(\kappa)|n^{|V(\ob)|-|V(\vec{\phi}s)|}\,.$
Hence by (\ref{gyors2})
$\vec{\mu}_n(T(\vec{\Grd}n,\vec{\phi}s_*))=
\frac{|C(\kappa)|}{n^{|V(\vec{\phi}s)|}} \vec{\mu}(T(\vec{\Grd},\vec{\phi}s))\,,$ thus our
proposition follows. \qed
\section{Label-separated balls}
Let $\Grd^n$ be the isomorphism classes of
\betagin{itemize}
\item connected countable rooted graphs with vertex degree bound $d$
\item with vertex labels from the set $\{1,2,\dots,n\}$.
\end{itemize}
Again, we define the space of labeled $r$-balls $U^{r,d}_n$. Then
$\Grd^n$ is a compact space with closed-open sets $T(\Grd^n,M), M\in\urd_n$.
Similarly to the previous section we define an associated probability measure
$\mu_n$, where $\mu$ in an involution-invariant probability measure on $\textrm{\textbf{G}}rd$.
\noindent
Let $M\in\urd_n$ and let $R(M)$ be the set of elements of
$\vec{U}^{r,d}_n$ with underlying graph $M$.
If $A\in R(M)$, then the multiplicity of $A$, $l_A$ is the number of
edges $e$ pointing out from the root of $A$ such that
there is a label-preserving rooted automorphism of $A$ moving the
distinguished edge to $e$.
Now let
$$\mu_n(M):=\frac{1}{\deltag(M)}\sum_{A\in R(M)} l_A\vec{\mu}_n(A)\,.$$
The following lemma is the immediate consequence of Lemma \ref{borel}.
\betagin{lemma} \lambdabel{egyenletek}
$\mu_n$ is a Borel-measure on $\Grd^n$ and
$\sum_{M\in M(\alphapha)}\mu_n(M)=\mu(A)$ if
$\alphapha\inU^{r,d}$ and $M(\alphapha)$ is the set of labelings of $\alphapha$ by
$\{1,2,\dots,n\}$.
\end{lemma}
\betagin{defin}
$M\in\urd_n$ is called label-separated
if all the labels of $M$ are different.
\end{defin}
\betagin{lemma}
For any $\alphapha\inU^{r,d}$ and $\deltalta>0$ there exists an $n>0$
such that
$$|\sum_{M\in M(\alphapha),\,M\,\,\mbox{ is label-separated}}
\mu_n(T(\textrm{\textbf{G}}rd,M))-\mu(T(\textrm{\textbf{G}}rd,\alphapha))|<\deltalta\,.$$
\end{lemma}
\noindent{\it Proof.}
Observe that
$$\sum_{M\in M(\alphapha),\,M\,\,\mbox{ is label-separated}}
\mu_n(T(\textrm{\textbf{G}}rd,M))=\frac{T(n,\alphapha)}{n^{|V(\alphapha)|}}\mu(T(\textrm{\textbf{G}}rd,\alphapha))\,,$$
where $T(n,\alphapha)$ is the number of
$\{1,2,\dots,n\}$-labelings of $\alphapha$ with
different labels.
Clearly, $\frac{T(n,\alphapha)}{n^{|V(\alphapha)|}}\to 1$ as $n\to\infty$. \qed
\section{The proof of Theorem \ref{theorem1}}
Let $\mu$ be an involution-invariant probability measure on $\textrm{\textbf{G}}rd$
supported on trees.
It is enough to prove that for any $r\geq 1$ and $\varepsilonsilon >0$ there exists a
finite graph $G$ such that for any $\alphapha\inU^{r,d}$
$$|p_G(\alphapha)-\mu(T(\textrm{\textbf{G}}rd,\alphapha))|<\varepsilonsilon\,.$$
The idea we follow is close to the one used by Bowen in \cite{Bow}.
First, let $n>0$ be a natural number such that
\betagin{equation} \lambdabel{becs1}
|\sum_{M\in M(\alphapha),\,M\,\,\mbox{ is label-separated}}
\mu_n(T(\textrm{\textbf{G}}rd,M))-\mu(T(\textrm{\textbf{G}}rd,\alphapha))|<\frac{\varepsilonsilon}{10}\,.
\end{equation}
\noindent
Then we define a directed labeled finite graph $H$ to encode some
information on $\vec{\mu}_n$. If $A\in\vec{U}^{r+1,d}_n$ then let $L_A$ be the
unique element of $\vec{E}^{r,d}_n$ contained in $A$.
\noindent
The set of vertices of $H$; $V(H):=\vec{U}^{r+1,d}_n$.
If $A,B\in \vec{U}^{r+1,d}_n$ and $L_A=L^{-1}_B$ (we use
the inverse notation instead of writing out the involution operator) then
there is a directed edge $(A,L_A,B)$ from $A$ to $B$ labeled by $L_A$ and
a directed edge $(B,L_B,A)$ from $B$ to $A$ labeled by $L_B=L^{-1}_A$.
Note that we might have loops.
We define the weight function $w$ on $H$ by
\betagin{itemize}
\item $w(A)=\vec{\mu}_n(T(\vec{\Grd}n,A))$.
\item $w(A,L_A,B)=\mu(T(\vec{\Grd}n,L_{A,B}))\,,$
where $L_{A,B}\in\vec{E}^{r+1,d}_n$ the unique element such that
$s(L_{A,B})=A, t(L_{A,B})=B$.
\end{itemize}
By Proposition \ref{master} we have the following equation for
all $A,B$ that are connected in $H$:
\betagin{equation} \lambdabel{d1}
w(A,L_A,B)=w(B,L^{-1}_A,A)\,.
\end{equation}
Also,
\betagin{equation} \lambdabel{d2}
w(A)=\sum_{w(A,L_A,B)\in E(H)} w(A,L_A,B)
\end{equation}
\betagin{equation} \lambdabel{d3}
w(A)=\sum_{w(B,L^{-1}_A,A)\in E(H)} w(B,L^{-1}_A,A)
\end{equation}
Also if $M\in U^{r+1,d}_n$ then
\betagin{equation} \lambdabel{d4}
\mu_n(M)=\frac{1}{\deltag{(M)}}\sum_{A\in R(M)} l_A w(A),
\end{equation}
where $l_A$ is the multiplicity of $w(A)$.
\vskip0.2in
\noindent
Since the equations (\ref{d1}), (\ref{d2}), (\ref{d3}) have rational
coefficients we also have weight functions $w_{\delta}$ on $H$
\betagin{itemize}
\item taking only rational values
\item satisfying equations (\ref{d1}), (\ref{d2}), (\ref{d3})
\item such that $|w_{\delta}(A)-w(A)|<\deltalta $ for any $A\in V(H)$, where
the exact value of $\deltalta$ will be given later.
\end{itemize}
Now let $N$ be a natural number such that
\betagin{itemize}
\item $\frac{Nw_{\delta}(A)}{l_A}\in\mathbb{N}$ if $A\in V(H)$.
\item $Nw_{\delta}(A,L_A,B)\in\mathbb{N}$ if $(A,L_A,B)\in E(H)$.
\end{itemize}
\vskip0.2in
\noindent
{\bf Step 1.} We construct an edge-less graph $Q$ such that:
\betagin{itemize}
\item $V(Q)=\cup_{A\in V(H)} Q(A)$\quad (disjoint union)
\item $|Q(A)|=N w_{\delta}(A)\,$
\item each $Q(A)$ is partitioned into
$\cup_{(A,L_A,B)\in E(H)} Q(A,L_A,B)$ such that $|Q(A,L_A,B)|=
Nw_{\delta}(A,L_A,B)$.
\end{itemize}
Since $w_{\delta}$ satisfy our equations such $Q$ can be constructed.
\vskip0.2in
\noindent
{\bf Step 2.} We add edges to $Q$ in order to obtain the
graph $R$. For each pair $A,B$ that are connected in the graph $H$
form a bijection $Z_{A,B}:Q(A,L_A,B)\to Q(B,L_B,A)$.
If there is a loop in $H$ consider a bijection $Z_{A,A}$.
Then draw an edge between $x\in Q(A,L_A,B)$ and $y\in Q(B,L_B,A)$
if $Z_{A,B}(x)=y$.
\vskip0.2in
\noindent
{\bf Step 3.} Now we construct our graph $G$.
If $M\in U^{r+1,d}_n$ is a rooted labeled tree
such that $\mu_n(M)\neq 0$ let $Q(M)=\cup_{A\in R(M)} Q(A)$. We partition
$Q(M)$ into $\cup^{s_M}_{i=1} Q_i(M)$ such a way that each
$Q_i(M)$ contains exactly $l_A$ elements from the set $Q(A)$.
By the definition of $N$, we can make such partition.
\noindent
The elements of $V(G)$ will be the sets $\{Q_i(M)\}_{M\in
U^{r+1,d}_n\,,1\leq i \leq s_M}$. We draw one edge between
$Q_i(M)$ and $Q_j(M')$ if there exists $x\in Q_i(M), y\in Q_j(M')$
such that $x$ and $y$ are connected in $R$. We label the
vertex $Q_i(M)$ by the label of the root of $M$.
Let $Q_i(M)$ be a vertex of $G$ such that $M$ is a label-separated
tree. Note that if $M$ is not a rooted tree then $\mu_n(M)=0$.
It is easy to see that the $r+1$-ball around $Q_i(M)$ in the graph $G$
is isomorphic to $M$ as rooted labeled balls.
Also if $M$ is not label-separated then the $r+1$-ball around $Q_i(M)$
can not be a label-separated tree.
Therefore \betagin{align} \lambdabel{becs2}
\sum_{L\inU^{r,d}_n\,,\mbox{ $L$ is not a label-separated tree}}
p_G(L)
=\\=\sum_{L\inU^{r,d}_n\,,\mbox{ $L$ is not a label-separated tree}}
\sum_{A\in R(L)}w_{\delta}(L)\leq \frac{\varepsilonsilon} {10} +\deltalta d |U^{r,d}_n|\,.
\end{align}
Also, if $M$ is a label-separated tree then
\betagin{equation} \lambdabel{becs3}
|p_G(M)-\mu_n(T(\textrm{\textbf{G}}rd,M))|\leq |R(M)|\deltalta\leq d\deltalta\,.
\end{equation}
Thus by (\ref{becs1}),(\ref{becs2}),(\ref{becs3})
if $\deltalta$ is choosen small enough then for any $\alphapha\in U^{r+1,d}$
$$|p_G(\alphapha)-\mu(T(\textrm{\textbf{G}}rd,\alphapha))|<\varepsilonsilon\,.$$
Thus our Theorem follows. \qed
\betagin{thebibliography}{99}
\bibitem{AL} {\sc D. Aldous and R. Lyons}, {\em
Processes on Unimodular Random \\
Networks}, {\sl Electron. J. Probab.} 12 (2007), no. 54, 1454-1508.
\bibitem{AS} {\sc D. Aldous and M. J. Steele},
{\em The objective method: probabilistic combinatorial optimization and local
weak convergence.}
Probability on discrete structures, 1--72, {\sl Encyclopaedia Math. Sci.}
, 110, Springer, Berlin, 2004.
\bibitem{BS} {\sc I. Benjamini and O. Schramm},
{\em Recurrence of distributional limits of finite planar graphs.}
{\sl Electron. J. Probab.} {\bf 6} (2001), no. 23, 13 pp. (electronic).
\bibitem{Bol} {\sc B. Bollob\'as and O. Riordan},
{\em Sparse graphs: metrics and random models}
(preprint) http://arxiv.org/abs/0708.1919
\bibitem{Bow} {\sc L. Bowen},
{\em Periodicity and circle packings of the hyperbolic plane}
{\sl Geom. Dedicata} {\bf 102} (2003) 213-236.
\end{thebibliography}
\end{document}
|
\begin{document}
\title[A uniform bijection between ${\rm NN}$ and ${\rm NC}$.]{A uniform bijection between nonnesting and noncrossing partitions}
\author{Drew Armstrong}
\address{Department of Mathematics, University of Miami, Coral Gables, FL, 33146}
\email{[email protected]}
\author{Christian Stump}
\address{LaCIM, Universit\'e du Qu\'ebec \`a Montr\'eal, Montr\'eal (Québec), Canada}
\email{[email protected]}
\author{Hugh Thomas}
\address{Department of Mathematics and Statistics, University of New Brunswick, Fredericton NB, E3B 5A3}
\email{[email protected]}
\subjclass[2000]{Primary 05A05; Secondary 20F55}
\date{\today}
\keywords{Weyl groups, Coxeter groups, noncrossing partitions, nonnesting partitions, cyclic sieving phenomenon, bijective combinatorics}
\begin{abstract}
In 2007, D.I.~Panyushev defined a remarkable map on the set of nonnesting partitions (antichains in the root poset of a finite Weyl group). In this paper we identify Panyushev's map with the Kreweras complement on the set of noncrossing partitions, and hence construct the first uniform bijection between nonnesting and noncrossing partitions. Unfortunately, the proof that our construction is well-defined is case-by-case, using a computer in the exceptional types. Fortunately, the proof involves new and interesting combinatorics in the classical types. As consequences, we prove several conjectural properties of the Panyushev map, and we prove two cyclic sieving phenomena conjectured by D.~Bessis and V.~Reiner.
\end{abstract}
\maketitle
\section{Introduction}
To begin we will describe the genesis of the paper.
\subsection{Panyushev complementation}
Let $\Delta\subseteq{\sf Pan}hi^+\subseteq{\sf Pan}hi$ be a triple of {\sf simple roots}, {\sf positive roots}, and a {\sf crystallographic root system} corresponding to a {\sf finite Weyl group} $W$ of \Dfn{rank} $r$. We think of ${\sf Pan}hi^+$ as a poset in the usual way, by setting $\alpha\leq\beta$ whenever $\beta-\alpha$ is in the nonnegative span of the simple roots $\Delta$. This is called the {\sf root poset}. The set of \Dfn{nonnesting partitions} ${\rm NN}(W)$ is defined to be the set of antichains (sets of pairwise-incomparable elements) in ${\sf Pan}hi^+$. This name is based on a pictorial presentation of antichains in the classical types. It is well known that the number of nonnesting partitions is equal to the \Dfn{Catalan number}
\begin{equation*}
{\rm Cat}(W):=\prod_{i=1}^r \frac{d_i+h}{d_i},
\end{equation*}
where $d_1\leq d_2\leq {\sf conj}dots \leq d_r = h$ are the degrees of a fundamental system of polynomial invariants for $W$ (called the {\sf degrees} of $W$), and where $h$ is the \Dfn{Coxeter number}. This formula was first conjectured by Postnikov~{\sf conj}ite[Remark~2]{Rei1997} and at least two uniform proofs are known {\sf conj}ite{Ath2004,Hai1994}. These enumerations were established in somewhat different contexts; the link to the combinatorics of antichains is supplied in both cases by {\sf conj}ite{CP2002}.
In 2007, Panyushev defined a remarkable map on nonnesting partitions~{\sf conj}ite{Pan2008}. To describe it, we first note than an antichain $I\subseteq{\sf Pan}hi^+$ corresponds bijectively to the {\sf order ideal} $\langle I\rangle\subseteq{\sf Pan}hi^+$ that it generates. The \Dfn{Panyushev complement} is defined as follows.
\begin{definition}
Given an antichain of positive roots $I\subseteq{\sf Pan}hi^+$, define ${\sf Pan}(I)$ to be the antichain of minimal roots in ${\sf Pan}hi^+\setminus\langle I\rangle$.
\end{definition}
\begin{figure}\label{fig:panorbit}
\end{figure}
For example, Figure \ref{fig:panorbit} displays a single orbit of the Panyushev complement acting on the root poset of type $A_3$. The antichain in each picture corresponds to the maximal black dots in the order ideal given by the shaded area. In {\sf conj}ite[Conjecture~2.1]{Pan2008} Panyushev made several conjectures about the Panyushev complementation, which have remained open even in type $A$. Even though ${\sf Pan}$ can be defined on any poset, those conjectures provide strong evidence that the Panyushev complementation behaves in a very special way for root posets, and that it has a particular meaning in this case which has not yet been explained.
\begin{panyushevconjecture}
Let $W$ be a finite Weyl group of rank $r$, with $h$ its Coxeter number, and ${\sf Pan}$ the Panyushev complement on antichains in the associated root poset ${\sf Pan}hi^+$. Moreover, let $\omega_0$ be the unique longest element in $W$.
\begin{itemize}
\item[(i)] ${\sf Pan}^{2h}$ is the identity map on ${\rm NN}(W)$,
\item[(ii)] ${\sf Pan}^h$ acts on ${\rm NN}(W)$ by the involution induced by $-\omega_0$,
\item[(iii)] For any orbit $\mathcal{O}$ of the Panyushev complement acting on ${\rm NN}(W)$, we have
$$\frac{1}{|\mathcal{O}|}\sum_{I \in \mathcal{O}}|I| = r/2.$$
\end{itemize}
\end{panyushevconjecture}
For example, in type $A_3$ we have $2h=8$, and the Panyushev complement has three orbits, of sizes $2$, $4$, and $8$ (the one pictured). In type $A$, $\omega_0$ acts by $\alpha_i \mapsto -\alpha_{n-i}$ where $\alpha_i$ denotes the $i$-th simple root in the linear ordering of the Dynkin diagram. It can be easily seen in the pictured orbit, that ${\sf Pan}^h$ acts by ``flipping'' the root poset (this corresponds to reversing the linear ordering of the Dynkin diagram), and
that ${\sf Pan}^{2h}$ is the identity map. Moreover, the average number of elements in this orbit is $\frac{1}{8}(2+1+1+2+2+1+1+2) = 3/2$.
In this paper we will prove the following.
\begin{theorem}
The Panyushev Conjectures are true.
\end{theorem}
However, the proof of this theorem is not the main goal of the paper. Instead, we will use the Panyushev complement as inspiration to solve an earlier open problem: to find a uniform bijection between the antichains in ${\sf Pan}hi^+$ and a different sort of Catalan object, the {\sf noncrossing partitions}. We will then use the combinatorics we have developed to prove the Panyushev Conjectures.
\subsection{Kreweras complementation} There is also a notion of {\sf noncrossing partitions} for root systems, which we now describe.
Let $T$ be the set of all \Dfn{reflections} in a \Dfn{finite Coxeter group} $W$. Those are given by the reflections defined by the positive roots in a (not necessarily crystallographic) finite root system ${\sf Pan}hi$. Let $c \in W$ be a \Dfn{Coxeter element} (i.e., the product of the \Dfn{simple reflections} $S$ defined by the simple roots in some order). Then the set of {\sf noncrossing partitions} is
\begin{equation*}
{\rm NC}(W,c):=\{ w\in W: \ell_T(w)+\ell_T(cw^{-1})=r\}\subseteq W,
\end{equation*}
where $r$ is the rank of $W$. For a full exposition of this object and its history, see~{\sf conj}ite{Arm2006}. It turns out that ${\rm NC}(W,c)$ is also counted by the Catalan number ${\rm Cat}(W)$, but in this case {\bf no uniform proof is known} (the only proof is case-by-case, using a computer for the exceptional types). In this paper we will (partially) remedy the situation by constructing a uniform bijection between antichains in ${\sf Pan}hi^+$ and the noncrossing partitions ${\rm NC}(W,c)$. It is only a partial remedy because our proof that the construction is well-defined remains case-by-case.
Our bijection relies on the Panyushev complement and a certain map on noncrossing partitions, which we now describe. The type $A$ noncrossing partitions were first studied in detail by Kreweras~{\sf conj}ite{Kre1972}, as pictures of ``noncrossing partitions'' of vertices around a circle. He noticed that the planarity of these pictures yields a natural automorphism, which we call the {\sf Kreweras complement}.
\begin{definition}
Given a noncrossing partition $w\in{\rm NC}(W,c)\subseteq W$, let ${\sf Krew}(w):=cw^{-1}$. Since the reflection length $\ell_T$ is invariant under conjugation it follows that ${\sf Krew}(w)$ is also in ${\rm NC}(W,c)$.
\end{definition}
\begin{figure}\label{fig:kreworbit}
\end{figure}
In type $A_{n-1}$, the set ${\rm NC}(W,c)$ consists of partitions of the vertices $\{1,2,\ldots,n\}$ placed around a circle, such that the convex hulls of its equivalence classes are nonintersecting (``noncrossing''). To describe the classical Kreweras map, we place vertices $\{1',1,2',2,\ldots,n',n\}$ around a circle; if $\pi$ is a noncrossing partition of $\{1,2,\ldots,n\}$ then ${\sf Krew}(\pi)$ is defined to be the {\bf coarsest} partition of $\{1',2',\ldots,n'\}$ such that $\pi{\sf conj}up{\sf Krew}(\pi)$ is noncrossing. For example, Figure \ref{fig:kreworbit} shows a single orbit of ${\sf Krew}$ acting on the noncrossing partitions of a square (given by the black vertices). Note here that ${\sf Krew}^2$ rotates the square by $90^{\sf conj}irc$.
For a general root system we have ${\sf Krew}^2(w)=cwc^{-1}$; that is, ${\sf Krew}^2$ is conjugation by the Coxeter element. Since any Coxeter element $c$ has order $h$ (indeed this is an equivalent definition of the Coxeter number $h$) we conclude that ${\sf Krew}^{2h}$ is the identity map. Thus we prove part (i) of the Panyushev conjectures by finding a bijection from antichains to noncrossing partitions that sends ${\sf Pan}$ to ${\sf Krew}$.
\subsection{Panyushev complementation $=$ Kreweras complementation} Since no uniform bijection currently exists, we will create one, essentially by {\em declaring} that ${\sf Pan}={\sf Krew}$. The key observation is the following.
Since a Dynkin diagram of finite type is a tree, we may partition the simple reflections $S$ into sets $S=L\sqcup R$ such that the elements of $L$ commute pairwise, as do the elements of $R$. Let $c_L$ denote the product of the reflections $L$ (in any order) and similarly let $c_R$ denote the product of the reflections $R$. Thus, $c_L$ and $c_R$ are involutions in $W$ and $c=c_Lc_R$ is a special Coxeter element, called a {\sf bipartite Coxeter element}.
The data for ${\sf Pan}$ consists of a choice of simple system $\Delta$, which from now on we will partition as $\Delta=\Delta_L\sqcup\Delta_R$; and the data for ${\sf Krew}$ consists of a Coxeter element, which from now on we will assume to be $c=c_Lc_R$. With this in mind, Panyushev observed that his map has two distinguished orbits: one of size $h$ which consists of the sets of roots at each rank of the root poset; and one of size 2, namely $\{\Delta_L,\Delta_R\}$. Similarly, the Kreweras map on ${\rm NC}(W,c_Lc_R)$ has two distinguished orbits: one of size $h$ consisting of
\begin{equation*}
c_L, c_Lc_Rc_L, \ldots, c_Rc_Lc_R, c_R;
\end{equation*}
and one of size 2, namely $\{{\bf 1},c\}$. The attempt to match these orbits was the genesis of our Main Theorem.
To understand its statement, we must first discuss parabolic recursion. Let $W_J\subseteq W$ denote the {\sf parabolic subgroup} generated by some subset $J\subseteq S$ of simple reflections, and let $\Delta_J\subseteq{\sf Pan}hi_J^+\subseteq{\sf Pan}hi^+$ be the corresponding simple and positive roots. Antichains and noncrossing partitions may be restricted to $W_J$ as follows. Let $I\subseteq{\sf Pan}hi^+$ be an antichain and define its support ${\sf supp}(I)=\langle I\rangle{\sf conj}ap\Delta$ to be the simple roots below it. If ${\sf supp}(I)\subseteq J$ then $I$ is also an antichain in the parabolic sub-root system ${\sf Pan}hi^+_J$. Similarly, the set $J$ induces a unique partition of the diagram $J=L_J\sqcup R_J$ with $L_J\subseteq L$ and $R_J\subseteq R$, and we may discuss the {\sf parabolic noncrossing partitions}
\begin{equation*}
{\rm NC}(W_J,c_{L_J}c_{R_J})\subseteq{\rm NC}(W,c_Lc_R).
\end{equation*}
With these notions in mind, we state our main theorem.
\begin{maintheorem}\label{th:main}
Let $S=L\sqcup R$ be a bipartition of the simple reflections with corresponding bipartition $\Delta=\Delta_L\sqcup\Delta_R$ of the simple roots and bipartite Coxeter element $c=c_Lc_R$. Then {\bf there exists a (unique) bijection} $\alpha$ from nonnesting partitions ${\rm NN}(W)$ to noncrossing partitions ${\rm NC}(W,c)$ satisfying the following three properties:
\begin{itemize}
\item $\alpha(\Delta_L)={\bf 1}$,
{\em (initial condition)}
\item $\alpha{\sf conj}irc{\sf Pan}={\sf Krew}{\sf conj}irc\alpha$,
{\em (${\sf Pan}={\sf Krew}$)}
\item $\alpha(I)=\left(\prod_{s\in L\setminus{\sf supp}(I)} s\right) \alpha |_{{\sf supp}(I)}(I).$
{\em (parabolic recursion)}
\end{itemize}
\end{maintheorem}
That is, to compute $\alpha$ of an antichain $I$, let $J={\sf supp}(I)$. If $J\subsetneq S$ then we think of $I$ as an antichain in the proper subsystem ${\sf Pan}hi^+_J$. We compute $\alpha|_J(I)$, which is an element of
\begin{equation*}
{\rm NC}(W_J,c_{L_J}c_{R_J})\subseteq W_J,
\end{equation*}
and then multiply on the left by the simple $L$-reflections {\bf not} in $J$. As $J$ gets smaller, we will reach the initial condition $\alpha(\Delta_{L_J})={\bf 1}$. If $J=S$ then we apply the map ${\sf Pan}$ $k$ times until we have ${\sf supp}({\sf Pan}^k I)\subsetneq S$. Finally, we apply $\alpha$ and then ${\sf Krew}^{-k}$. That this process works is the content of the theorem.
\begin{remark}
The statement of the Main Theorem is {\bf uniform}. (That is, it is expressed purely in terms of root systems.) Unfortunately, we will prove the theorem in a case-by-case way. Fortunately, the proof involves new and interesting combinatorics in the classical types. (Which is new and interesting even in type $A$.)
\end{remark}
We note that the interaction between the ``nonnesting'' and ``noncrossing'' properties is a subtle phenomenon, even in type $A_{n-1}$ alone (see {\sf conj}ite{CDDSY2007}). There has also been earlier progress on the problem for general finite root systems: A.~Fink and B.I.~Giraldo {\sf conj}ite{FG2009} and M.~Rubey and the second author~{\sf conj}ite{RS2010} have both constructed bijections which work for the classical types. These bijections have an advantage over ours in that they both preserve the ``parabolic type'' of noncrossing and nonnesting partitions. However, our bijection has the advantage of being uniform for root systems, as well as proving the Panyushev conjectures and a cyclic sieving phenomenon as described in the following section.
\subsection{Cyclic Sieving} The cyclic sieving phenomenon was introduced by V.~Reiner, D.~Stanton, and D.~White in {\sf conj}ite{RSW2004} as follows: let $X$ be a finite set, let $X(q) \in \mathbb{Z}[q]$ and let $\mathcal{C}_d = \langle c \rangle$ be a cyclic group of order $d$ acting on X. The triple $(X,X(q),\mathcal{C}_d)$ exhibits the \Dfn{cyclic sieving phenomenon (CSP)} if
$$ [X(q)]_{q = \zeta^k} = \big| X^{c^k} \big|,$$
where $\zeta$ denotes a primitive $d$-th root of unity and $X^{c^k} := \{x \in X : c^k(x) = x\}$ is the fixed-point set of $c^k$ in $X$. Let
\begin{align}
X(q) &\equiv a_0 + a_1 q + \ldots + a_{d-1} q^{d-1} \mod (q^d-1). \label{eq:CSPorbitlengths}
\end{align}
An equivalent way to define the CSP is to say that $a_i$ equals the number of $\mathcal{C}_d$-orbits in $X$ whose stabilizer order divides $i$ {\sf conj}ite[Proposition 2.1]{RSW2004}.
Bessis and Reiner recently showed that the action of the Coxeter element on noncrossing partitions together with a remarkable $q$-extension of the Catalan numbers ${\rm Cat}(W)$ exhibits the CSP: define the \Dfn{$q$-Catalan number}
\begin{equation*}
{\rm Cat}(W;q):=\prod_{i=1}^r \frac{[d_i+h]_q}{[d_i]_q},
\end{equation*}
where $[k]_q=1+q+q^2+{\sf conj}dots +q^{k-1}$ is the usual {\sf $q$-integer}. It is not obvious, but it turns out (see Berest, Etingof, and Ginzburg {\sf conj}ite{BEG2003}) that this number is a polynomial in $q$ with nonnegative coefficients. In type $A_{n-1}$, the formula reduces to the classical $q$-Catalan number of F\"urlinger and Hofbauer {\sf conj}ite{FH1985}. That is, we have
\begin{equation*}
{\rm Cat}(A_{n-1};q)=\frac{1}{[n+1]_q}\begin{bmatrix} 2n\\ n \end{bmatrix}_q,
\end{equation*}
where $\left[\begin{smallmatrix} a \\ b \end{smallmatrix}\right]_q=\frac{[a]_q!}{[b]_q![a-b]_q!}$ is the {\sf Gaussian binomial coefficient} and $[k]_q!:=[1]_q[2]_q{\sf conj}dots [k]_q$ is the {\sf $q$-factorial}.
For a Coxeter element $c\in W$, it follows directly from the definition that the map ${\sf conj}(w) = cwc^{-1}$ is a permutation of the set ${\rm NC}(W,c)$ of noncrossing partitions. In classical types, this corresponds to a ``rotation'' of the pictorial presentation.
\begin{theorem}[Bessis and Reiner {\sf conj}ite{BR2007}] \label{th:BR}
The triple $\big({\rm NC}(W),{\rm Cat}(W;q), \langle{\sf conj}\rangle\big)$ exhibits the CSP for any finite Coxeter group $W$.
\end{theorem}
Actually, they proved this result in the greater generality of {\sf finite complex reflection groups}; we will restrict the current discussion to {\sf (crystallographic) finite real reflection groups} --- that is, finite Coxeter groups and finite Weyl groups, respectively. At the end of their paper, Bessis and Reiner {\sf conj}ite{BR2007} conjectured several other examples of cyclic sieving, two of which we will prove in this paper.
\begin{theorem}
Let $W$ be a finite Coxeter group respectively finite Weyl group.
\begin{itemize}
\item[(i)] The triple $\big({\rm NC}(W),{\rm Cat}(W;q), \langle {\sf Krew} \rangle\big)$ exhibits the CSP.
\item[(ii)] The triple $\big({\rm NN}(W),{\rm Cat}(W;q), \langle {\sf Pan} \rangle\big)$ exhibits the CSP.
\end{itemize}
\label{th:CSPNC}
\end{theorem}
Note that (i) is a generalization of Theorem \ref{th:BR} since ${\sf Krew}^2$ is the same as conjugation by the Coxeter element. The type $A$ version of (i) has been proved by D.~White (see {\sf conj}ite{BR2007}) and independently by C.~Heitsch {\sf conj}ite{Hei}; C.~Krattenthaler has announced a proof of a more general version for complex reflection groups which appeared in the exceptional types in {\sf conj}ite{KM2008}; and will appear for the group $G(r,p,n)$ in {\sf conj}ite{kra}. In this paper we find it convenient to present an independent proof, on the way to proving our Main Theorem. Combining (i) and the Main Theorem then yields (ii) as a corollary.
\subsection{Outline} The paper is organized as follows.
In {\bf Section 2}, we introduce a notion of {\sf noncrossing handshake configurations} for the classical types, and define a bijection $\phi_W$ from noncrossing handshake configurations $\mathcal{T}_W$ to noncrossing partitions ${\rm NC}(W,c)$. We establish the cyclic sieving phenomenon for noncrossing partitions using these bijections in classical type, and via a computer check for the exceptional types.
In {\bf Section 3}, we define a bijection $\psi_W$ from the nonnesting partitions of $W$ to $\mathcal{T}_W$ in the classical types. Using this, we establish the cyclic sieving phenomenon for nonnesting partitions in the classical types, and again via a computer check for the exceptional types.
In {\bf Section 4}, we show that the bijection from the nonnesting partitions of $W$ to $\mathcal{T}_W$ in the classical types satisfies a suitable notion of parabolic induction.
In {\bf Section 5}, we put together the bijections from sections two and three to prove the Main Theorem. The calculations for the exceptional types were done using \texttt{Maple} code, which is available from the first author.
In the final section, {\bf Section 6}, we use the combinatorics describing the Panyushev and the Kreweras complementation to prove the {\bf Panyushev conjectures}.
\section{The Kreweras CSP for noncrossing partitions}\label{sectiontwo}
In this section, we prove Theorem~\ref{th:CSPNC}(i) for every type individually. For type $A_{n-1}$, C.~Heitsch proved the theorem by connecting noncrossing partitions of type $A_{n-1}$ to noncrossing set partitions of $[n] := \{1,\ldots,n\}$ and moreover to noncrossing handshake configurations of $[2n]$ and to rooted plane trees. For the classical types, we will explore a connection which is related to the construction of C.~Heitsch as described in Remark~\ref{re:heitschbijection}.
\subsection{Type $A$}
Fix the linear Coxeter element $c$ to be the long cycle $(1,2,\ldots,n)$. Here, \emph{linear} refers to the fact that it comes from a linear ordering of the Dynkin diagram. It is well-known that the set of noncrossing partitions ${\rm NC}_n := {\rm NC}(A_{n-1})$ can be identified with the set of noncrossing handshake configurations. The ground set consists of $2$ copies of $[n]$ colored by $0$ and $1$ drawn on a circle in the order $1^{(0)},1^{(1)},\ldots,n^{(0)},n^{(1)}$. A \Dfn{noncrossing handshake configuration} is defined to be a noncrossing matching of those $2$ copies of $[n]$, see Figure~\ref{fig:handshakeA}. As shown in the figure, they are in natural bijection with rooted plane trees.
\begin{figure}\label{fig:handshakeA}
\end{figure}
The bijection $\phi_{A_{n-1}} : \mathcal{T}_n \longrightarrow {\rm NC}_n$ is then, for $w = \phi_{A_{n-1}}(T)$, given by
$$ \big(i^{(1)},j^{(0)}\big) \in T \Leftrightarrow w(i) = j.$$
For a direct description of noncrossing partitions in terms of rooted plane trees see e.g. {\sf conj}ite[Figure 6]{Ber2007}.
\begin{remark}\label{re:CoxeterElements}
Observe that the described construction does not require the choice of the linear Coxeter element. As the Coxeter elements in type $A_{n-1}$ are exactly the long cycles, one obtains analogous constructions by labelling the vertices of $\mathcal{T}_n$ by any given long cycle. This corresponds to the natural isomorphism between ${\rm NC}(W,c)$ and ${\rm NC}(W,c')$ given by conjugation sending $c$ to the Coxeter element $c'$. We will make use of this flexibility later on in this paper.
\end{remark}
The following proposition follows immediately from the definition.
\begin{proposition} \label{prop:KrewerasA}
The Kreweras complementation on ${\rm NC}_n$ can be described in terms of $\mathcal{T}_n$ by clockwise rotation of all edges by one, or, equivalently, by counterclockwise rotation of all vertex labels by one. I.e., for $T \in \mathcal{T}_n$, we have
$$
\big(i^{(1)},j^{(0)}\big) \in T \Leftrightarrow \big(j^{(1)},(i+1)^{(0)}\big) \in {\sf Krew}(T).
$$
\end{proposition}
\begin{remark}\label{re:heitschbijection}
One can easily deduce the proposition as well from O.~Bernardi's description {\sf conj}ite[Figure~6]{Ber2007} and the definition of the Kreweras complementation of a set partition to be its coarsest complementary set partition. C.~Heitsch obtains analogous results in {\sf conj}ite{Hei} by directly considering a bijection $\phi'$ between $\mathcal{T}_n$ and ${\rm NC}_n$ which is related to the bijection $\phi$ described above by $\phi'(w) = \phi({\sf Krew}(w))$.
\end{remark}
For more readability, we set ${\rm Cat}_n(q) := {\rm Cat}(A_{n-1};q)$, and ${\rm Cat}_n := {\rm Cat}_n(1)$.
\begin{theorem} \label{th:CSPNCA}
The triple $\big({\rm NC}_n,{\rm Cat}_n(q),\langle {\sf Krew} \rangle \big)$ exhibits the CSP.
\end{theorem}
\begin{proof}
The theorem follows immediately from {\sf conj}ite[Theorem 8]{Hei2}: let $d$ be an integer such that $d \big| 2n$ and let $\zeta$ be a primitive $d$-th root of unity. Then it follows e.g. from {\sf conj}ite[Lemma 3.2]{EF2008} that ${\rm Cat}_n(q)$ reduces for $q = \zeta$ to
\begin{eqnarray}
\big[{\rm Cat}_n(q)\big]_{q=\zeta} =
\left\{ \begin{array}{cl}
{\displaystyle{\rm Cat}_n} &\mbox{if $d=1$} \\[6pt]
n {\displaystyle {\rm Cat}_{\frac{n-1}{2}}} & \mbox{if $d=2$ and $n$ odd} \\[6pt]
{\displaystyle \binom{2n/d}{n/d}} &\mbox{if $d \geq 2$, $d\big|n$} \\[6pt]
{\displaystyle 0} & \mbox{otherwise}
\end{array} \right. \label{eq:CatA}
\end{eqnarray}
In {\sf conj}ite[Theorem 8]{Hei2}, C.~Heitsch proved that noncrossing handshake configurations of $2n$ which are invariant under a $d$-fold rotation, i.e., for which ${\sf Krew}^{2n/d}(T) = T$, are counted by those numbers.
\end{proof}
\subsection{Types $B$ and $C$}
As the reflection groups of types $B$ and $C$ coincide, the notions of noncrossing partitions do as well. Therefore we restrict our attention to type $C$. In this case, we fix the linear Coxeter element $c$ to be the long cycle $(1,\ldots,n,-1,\ldots,-n)$ and keep in mind that we could replace $c$ by any long cycle of analogous form. ${\rm NC}(C_n)$ can be seen as the subset of ${\rm NC}(A_{2n-1})$ containing all elements for which $i \mapsto j$ if and only if $-i \mapsto -j$, where $n+i$ and $-i$ are identified.
$\mathcal{T}_{C_n}$ is
defined to be the set of all noncrossing handshake configuration $T$ of $[\pm n]$ for which $(i^{(1)},j^{(0)}) \in T$ if and only if $(-i^{(1)},-j^{(0)}) \in T$. The Kreweras complementation on ${\rm NC}(C_n)$ is again the clockwise rotation of all edges by $1$. Observe that the symmetry property is expressed in terms of the Kreweras complementation by ${\sf Krew}^{2n}(T) = T$ for $T \in \mathcal{T}_{C_n}$. In particular, we see that the Kreweras map of order $4n$ on $\mathcal{T}_{C_n}$ is never free. By construction, the bijection $\phi_{A_{2n-1}} : \mathcal{T}_{2n} \tilde{\longrightarrow} {\rm NC}_{2n}$ restricts to a bijection
$$\phi_{C_n} : \mathcal{T}(C_n) \tilde{\longrightarrow} {\rm NC}(C_n),$$
which is compatible with the Kreweras complementation, i.e.,
$$\phi_{C_n}({\sf Krew}(T)) = {\sf Krew}(\phi_{C_n}(T)).$$
For the proof of Theorem~\ref{th:CSPNC}(i) in type $C$, we need the following observation.
\begin{lemma}
Let $d_1,d_2 \big| 2n$ and let $d_3 = \lcm{d_1,d_2}$. $T \in \mathcal{T}_n$ is invariant both under $d_1$- and $d_2$-fold rotation if and only if $T$ is invariant under $d_3$-fold symmetry.
\end{lemma}
\begin{proposition}
The triple $\big({\rm NC}(C_n),{\rm Cat}(C_n;q),\langle {\sf Krew} \rangle \big)$ exhibits the CSP.
\end{proposition}
The proof in type $C$ is a simple corollary of the proof in type $A$.
\begin{proof}
The $q$-Catalan number ${\rm Cat}(W;q)$ reduces for $W = C_n$ to
\begin{align*}
{\rm Cat}(C_n,q) &= \begin{bmatrix} 2n\\ n \end{bmatrix}_q.
\end{align*}
Let $d$ be an integer such that $d \big| 4n$ and let $\zeta$ be a primitive $d$-th root of unity. Then it follows again from {\sf conj}ite[Lemma 3.2]{EF2008} that ${\rm Cat}(C_n,q)$ reduces for $q = \zeta$ to
\begin{eqnarray*}
\big[{\rm Cat}(C_n,q)\big]_{q=\zeta} = \left\{ \begin{array}{cl} {\displaystyle \binom{4n/d}{2n/d}} &\mbox{if $d$ even and $d\big|2n$} \\[10pt] {\displaystyle \binom{2n/d}{n/d}} &\mbox{if $d$ odd} \\[6pt] {\displaystyle 0} & \mbox{otherwise} \end{array} \right.
\end{eqnarray*}
Let $d\big|4n$. Then by the previous lemma, the number of elements in $\mathcal{T}_{C_n}$ which are invariant under $d$-fold symmetry, i.e., for which ${\sf Krew}^{4n/d}(T) = T$, are exactly those elements in $\mathcal{T}_{2n}$ which are invariant under $\lcm{d,2}$-fold symmetry. The proposition follows.
\end{proof}
\subsection{Type $D$}
In this case, we fix the linear Coxeter element $c$ to be $(1,\ldots,n-1,-1,\ldots,-n+1)(n,-n)$. As in types $A$ and $C$, the noncrossing handshake configuration in type $D$ comes from noncrossing set partitions of type $D$ as defined in {\sf conj}ite{AR2004} by replacing every point $i$ by the two points $i^{(0)}$ and $i^{(1)}$, together with the appropriate restrictions, as described below.
Define a matching of
$$\{\pm 1^{(0)},\pm 1^{(1)},\ldots,\pm n^{(0)},\pm n^{(1)}\}$$
to be noncrossing of type $D_n$ if the points $\{\pm 1^{(0)},\pm 1^{(1)},\ldots,\pm (n-1)^{(0)},\pm (n-1)^{(1)}\}$ are arranged clockwise on a circle as in type $C_{n-1}$ and the points $\{\pm n^{(0)},\pm n^{(1)}\}$ form a small counterclockwise oriented square in the center of the circle, and the matching does not cross in this sense. A \Dfn{noncrossing handshake configuration} $T$ \Dfn{of type} $D_n$ is a noncrossing matching $T$ of type $D_n$, with the additional properties that $(i^{(1)},j^{(0)}) \in T$ if and only if $(-i^{(1)},-j^{(0)}) \in T$ and that the size of
$$M_\pm := \{(i^{(1)},j^{(0)}) \in T : \mbox{$i$ and $j$ have opposite signs}\}$$
is divisible by $4$. See Figure~\ref{fig:handshakeD} for examples of noncrossing handshake configurations of type $D_3$.
\begin{figure}\label{fig:handshakeD}
\end{figure}
As in the other types, we keep in mind that we could replace the linear Coxeter element by any Coxeter element to obtain labellings for the vertices of a noncrossing handshake configuration of type $D$.
Define the Kreweras complementation ${\sf Krew}$ on $D_n$ by rotating the labels of the outer circle counterclockwise and the labels of the inner circle clockwise; more precisely, let $\kappa(i^{(0)}) := i^{(1)}$ and
\begin{eqnarray}
\kappa(i^{(1)}) :=
\left\{ \begin{array}{cl}
(i+1)^{(0)} & \mbox{if $i \in [n-2]$} \\
(i-1)^{(0)} & \mbox{if $i \in [-n+2]$} \\
(-1)^{(0)} & \mbox{if $i = n-1$} \\
1^{(0)} & \mbox{if $i = -n+1$} \\
(-n)^{(0)} & \mbox{if $i = n$} \\
n^{(0)} & \mbox{if $i = -n$.}
\end{array} \right. \label{eq:krewerasD}
\end{eqnarray}
Then $(i^{(1)},j^{(0)}) \in T$ if and only if $\big(\kappa(j^{(0)}),\kappa(i^{(1)})\big) \in {\sf Krew}(T)$. To see this, observe that the only outer vertices changing sign are $\pm (n-1)^{(1)}$, and the only two inner vertices are $\pm n^{(1)}$. Thus, the size of $M_\pm$ for ${\sf Krew}(T)$ is again divisible by $4$. As an immediate consequence of the construction in {\sf conj}ite{AR2004}, we obtain that the map $\phi_{D_n} : \mathcal{T}_{D_n} \tilde{\longrightarrow} {\rm NC}(D_n)$ defined in the same way as for ${\rm NC}_n$ is well-defined and a bijection between noncrossing handshake configurations of type $D_n$ and ${\rm NC}(D_n)$.
\begin{proposition} \label{prop:KrewerasD}
The bijection $\phi_{D_n} : \mathcal{T}_{D_n} \tilde{\longrightarrow} {\rm NC}(D_n)$ is compatible with the Kreweras complementation, i.e., for $T \in \mathcal{T}_{D_n}$,
$$\phi_{D_n}({\sf Krew}(T)) = {\sf Krew}(\phi_{D_n}(T)).$$
\end{proposition}
\begin{proof}
Let $\big(i^{(1)},j^{(0)}\big) \in T$. This implies that $\big(\kappa(j^{(0)},\kappa(i^{(1)}))\big) \in {\sf Krew}(T)$. Therefore, by checking the different cases in (\ref{eq:krewerasD}), we obtain $\phi_{D_n}({\sf Krew}(T))\phi_{D_n}(T) = c$,
and moreover, $\phi_{D_n}({\sf Krew}(T)) = c \phi_{D_n}(T)^{-1} = {\sf Krew}(\phi_{D_n}(T))$.
\end{proof}
\begin{proposition} \label{prop:CSPNCD}
The triple $\big({\rm NC}(D_n),{\rm Cat}(D_n;q),\langle {\sf Krew} \rangle \big)$ exhibits the CSP.
\end{proposition}
\begin{proof}
The $q$-Catalan number ${\rm Cat}(D_n;q)$ is given by
\begin{align*}
{\rm Cat}(D_n,q) = \begin{bmatrix} 2n-1 \\ n \end{bmatrix}_{q^2} + q^n \begin{bmatrix} 2n-2\\ n \end{bmatrix}_{q^2}.
\end{align*}
Let $d$ be an integer such that $d \big| 4(n-1)$ and let $\zeta$ be a primitive $d$-th root of unity. Then it follows again from {\sf conj}ite[Lemma 3.2]{EF2008} that ${\rm Cat}(D_n,q)$ reduces for $q = \zeta$ to
\begin{eqnarray*}
\big[{\rm Cat}(D_n,q)\big]_{q=\zeta} =
\left\{ \begin{array}{cl}
{\displaystyle{\rm Cat}(D_n)} &\mbox{if $d=1$} \\[6pt]
{\displaystyle{\rm Cat}(D_n)} &\mbox{if $d=2$, $n$ even} \\[6pt]
{\displaystyle {\rm Cat}(C_{n-1})} &\mbox{if $d=2$, $n$ odd} \\[6pt]
{\displaystyle {\rm Cat}(C_{n/2})} &\mbox{if $d=4$, $4\big|n$} \\[6pt]
{\displaystyle {\rm Cat}(C_{2(n-1)/d})} &\mbox{if $d \geq 4$ even, $d\big|2(n-1)$} \\[6pt]
{\displaystyle {\rm Cat}(C_{(n-1)/d})} &\mbox{if $d \geq 3$ odd} \\[6pt]
{\displaystyle 0} & \mbox{otherwise}
\end{array} \right. \label{eq:CatD}
\end{eqnarray*}
\begin{figure}\label{fig:handshakeDsym}
\end{figure}
For $d=1$, this is obvious.
For $d=2$, $n$ even, the symmetry property implies that ${\sf Krew}^{2(n-1)}(T) = T$ for all $T \in \mathcal{T}_{D_n}$.
For $d=2$, $n$ odd, observe that $T \in \mathcal{T}_{D_n}$ is invariant under $2$-fold symmetry, i.e., ${\sf Krew}^{2(n-1)}(T) = T$ if and only if $\{\pm n^{(0)},\pm n^{(1)}\}$ forms a sub-matching of $T$. Therefore, those are counted by ${\rm Cat}(C_{n-1})$.
For $d=4\big|n$, we want that ${\sf Krew}^{n-1}(T) = T$ and therefore, $\{\pm n^{(0)},\pm n^{(1)}\}$ must not form a sub-matching of $T$ and we are in a situation as indicated in Figure~\ref{fig:handshakeDsym}. This gives
\begin{align*}
\big|\{T \in \mathcal{T}_{D_n} : {\sf Krew}^{n-1}(T) = T\}\big| &= 2(n-1){\rm Cat}(A_{(n-2)/2}) \\
&= \frac{4(n-1)}{n}\binom{n-2}{(n-2)/2} = \binom{n}{n/2},
\end{align*}
where the first $2$ comes from the $2$-fold rotation of the inner square, the $n-1$ is the number of possible connections between the inner square and the circle, and ${\rm Cat}(A_{(n-2)/2})$ is the number of noncrossing handshake configurations of the $n-2$ free points on the outer circle.
For $d \geq 4$ even, $d\big|2(n-1)$, we have again that $\{\pm n^{(0)},\pm n^{(1)}\}$ forms a sub-matching of $T$ and we have immediately that
$$\big|\{T \in \mathcal{T}_{D_n} : {\sf Krew}^{4(n-1)/d}(T) = T\}\big| = {\rm Cat}(C_{2(n-1)/d}).$$
For $d \geq 3$ odd, it follows that $d\big|n-1$ and the same argument as in the previous case applies.
The only otherwise case which is left is the case $d \geq 4$ even, $d \nmid 2(n-1)$. In this case, we see that $4 \big| d$ and it follows together with the symmetry property that there does not exist a $T \in \mathcal{T}_{D_n}$ such that ${\sf Krew}^{4(n-1)/d}(T) = T$.
\end{proof}
\subsection{Type $I_2(k)$}
For the dihedral groups, we obtain the theorem by straightforward computations. Let $I_2(k) = \langle a,b \rangle$ for two given simple reflections $a,b$ and fix the linear Coxeter element $c := ab$. Then ${\rm NC}(I_2(k))$ contains $\mathbf{1},c$ and all $k$ reflections contained in $I_2(k)$.
\begin{proposition}
The triple $\big({\rm NC}(I_2(k)),{\rm Cat}(I_2(k);q),\langle {\sf Krew} \rangle \big)$ exhibits the CSP.
\end{proposition}
\begin{proof}
The Kreweras complementation ${\sf Krew}$ on ${\rm NC}(I_2(k))$ has $2$ orbits, one is $\{\mathbf{1},c\}$ and the other contains all $k$ reflections. On the other hand,
\begin{align*}
{\rm Cat}(I_2(k);q) &= \frac{[k+2]_q[2k]_q}{[2]_q[k]_q} \\
&=
\left\{ \begin{array}{cl}
(1+q^2+\dots+q^k)(1+q^k) &\mbox{if $k$ even} \\[6pt]
1+q^2+\dots+q^{k-1}+q^k+q^{k+1}+\dots+q^{2k} &\mbox{if $k$ odd},
\end{array} \right.
\end{align*}
and the proposition follows.
\end{proof}
\subsection{Exceptional types} \label{sec:exceptionaltypes}
For the exceptional Coxeter groups,
$${\rm Cat}(W;q) \mod(q^{2h}-1)$$
can be simply computed and by (\ref{eq:CSPorbitlengths}), we need to find the following orbit lengths, where $i*j$ is shorthand for $i$ orbits of length $j$:
\begin{align*}
F_4 &: 8*12, 1*4, 1*3, 1*2, \\
H_3 &: 3*10, 1*2, \\
H_4 &: 9*30, 1*5, 1*3, 1*2, \\
E_6 &: 30*24, 8*12, 1*8, 1*4, 1*3, 1*2, \\
E_7 &: 230*18, 3*6, 1*2, \\
E_8 &: 832*30, 5*15, 3*10, 2*5, 1*3, 1*2.
\end{align*}
Those orbit lengths were verified with a computer; as mentioned above, they can be deduced as well from {\sf conj}ite{KM2008}.
\section{The Panyushev CSP for nonnesting partitions} \label{sec:CSPNN}
In this section, we prove Theorem~\ref{th:CSPNC}(ii) for every type individually by providing a bijection between nonnesting partitions and noncrossing handshake configurations which maps the Panyushev complementation to the Kreweras complementation. We consider the same noncrossing handshake configurations as before,
but we use a different labelling to refer to the vertices. In type $A_{n-1}$,
we label the vertices on the outer circle by $\{1^{(0)},\ldots,n^{(0)},n^{(1)},\ldots,1^{(1)}\}$ in clockwise order. E.g., the noncrossing handshake configuration shown in Figure~\ref{fig:handshakeA} is relabeled as shown in Figure~\ref{fig:handshakeNNA}.
\begin{figure}\label{fig:handshakeNNA}
\end{figure}
\subsection{Type $A$}
Let ${\sf Pan}hi^+ := \{ (i,j) = e_i - e_j : 1 \leq i < j \leq n\}$ be the set of all transpositions identified with a set of positive roots for $A_{n-1}$. The root poset structure on ${\sf Pan}hi^+$ is given by
\begin{align}
(i,j) \leq (i',j') \Leftrightarrow i' \leq i < j \leq j', \label{eq:rootrelationA]}
\end{align}
see Figure~\ref{fig:rootposets}(a) for an example.
\begin{figure}\label{fig:rootposets}
\end{figure}
Let $I = \{(i_1,j_1),\ldots,(i_k, j_k)\} \in {\rm NN}(A_{n-1})$ such that $i_1 < \dots < i_k$. Observe that (\ref{eq:rootrelationA]}) implies $j_1 < \dots < j_k$ as well. Define a map
$$\psi_{A_{n-1}} : {\rm NN}(A_{n-1}) \longrightarrow \mathcal{T}_n$$
as follows: for $1 \leq \ell \leq k$, mark the vertex $j_\ell^{(0)}$ with $i_\ell$ and for $i \in [n] \setminus \{i_1,\ldots,i_k\}$ mark the vertex $i^{(1)}$ with $i$. Now, for $1 \leq i \leq n$, in increasing order, match the vertex marked with $i$ with the first non-matched vertex, where first is interpreted counterclockwise from the marked vertex if $i \in \{i_1,\ldots,i_k\}$ and clockwise
from the marked vertex if $i \notin \{i_1,\ldots,i_k\}$. For example, for the antichain
$$I = \big\{(1,2),(4,5),(5,6) \big\} \in {\rm NN}(A_5)$$
considered in Figure~\ref{fig:rootposets}(a), we have $\psi_{A_{n-1}}(I) = T$, where $T \in \mathcal{T}_6$ is the noncrossing handshake configuration shown in Figures~\ref{fig:handshakeA} and \ref{fig:handshakeNNA}.
To show that $\psi_{A_{n-1}}$ is a bijection, we now define its inverse map $\psi'_{A_{n-1}}: \mathcal{T}_n \longrightarrow {\rm NN}(A_{n-1})$. Let $T \in \mathcal{T}_n$. Mark all $j^{(\beta)}$ for which $(i^{(\alpha)},j^{(\beta)}) \in T$ with $i < j$, or with $i = j$ and $(\alpha,\beta) = (0,1)$. Next, label all marks $i^{(1)}$ with $i$, and then label all marks $i^{(0)}$ clockwise with the remaining labels in $[n]$. The antichain $\psi'_{A_{n-1}}(T)$ is then given by
$$\psi'_{A_{n-1}}(T) = \big\{(i,j) : \text{ vertex } j^{(0)} \text{ is marked by } i \big\}.$$
\begin{proposition}
The map $\psi'_{A_{n-1}}$ is well-defined and the inverse of $\psi_{A_{n-1}}$. In particular, $\psi_{A_{n-1}}: {\rm NN}(A_{n-1}) \tilde{\longrightarrow} \mathcal{T}_n$ is a bijection.
\end{proposition}
\begin{proof}
To see that $\psi'_{A_{n-1}}$ is well-defined, we have to check that any marked vertex $j^{(0)}$ is marked with some $i<j$. Assume that $j^{(0)}$ is marked with $j$. This implies that the set $\{1^{(0)},\ldots,(j-1)^{(0)},(j-1)^{(1)},\ldots,1^{(1)}\}$ contains $j-1$ marked vertices and forms therefore a sub-matching -- a contradiction to the fact that $j$, as it is marked, is matched to some element in this set.
As in the process of applying $\psi'_{A_{n-1}}$ and of applying $\psi_{A_{n-1}}$ the same vertices get marked, $\psi'_{A_{n-1}}$ is in fact the inverse of $\psi_{A_{n-1}}$.
\end{proof}
\begin{theorem} \label{th:PanyushevKreweras}
The bijection $\psi_{A_{n-1}}$ is compatible with the Panyushev respectively the Kreweras complementation. For $I \in {\rm NN}(A_{n-1})$, we have
$${\sf Krew}(\psi_{A_{n-1}}(I)) = \psi_{A_{n-1}}({\sf Pan}(I)).$$
\end{theorem}
To prove this theorem, we first have to understand how the Panyushev complementation behaves in type $A$. Recall that the support ${\rm supp}(I)$ of some antichain $I \in {\rm NN}(A_{n-1})$ is given by ${\rm supp}(I) := \bigcup_{(i,j) \in I} \{s_i,\ldots,s_{j-1}\}$. Next, set
$$\hat{I} = \big\{(i'_1,j'_1),\ldots,(i'_k,j'_k)\}:= I {\sf conj}up \{(i,i) : s_{i-1},s_i \notin {\rm supp}(I) \big\}$$
such that $i'_1 < \ldots < i'_k$, where the dummies $s_0,s_n$ are supposed not to be in ${\rm supp}(I)$. The Panyushev complementation is then given by
$${\sf Pan}(I) = \big\{(i'_2-1,j'_1+1),\ldots,(i'_k-1,j'_{k-1}+1)\big\} \in {\rm NN}(A_{n-1}).$$
\begin{proposition} \label{prop:supporthandshake}
Let $I$ be a nonnesting partition. Then $s_k \notin {\rm supp}(I)$ if and only if $\{i^{(0)},i^{(1)} : 1 \leq i \leq k \}$ defines a submatching of $\psi_{A_{n-1}}(I)$. In particular,
$$(i^{(0)},i^{(1)}) \in \psi_{A_{n-1}}(I) \Leftrightarrow (i,i) \in \hat{I}.$$
\end{proposition}
\begin{proof}
The proposition follows directly from the definition.
\end{proof}
\begin{example}\label{ex:NNsupport}
The noncrossing handshake configuration $T$ in Figure~\ref{fig:handshakeNNA} is the image of $I = \{(1,2),(4,5),(5,6)\} \in {\rm NN}(A_5)$ under $\psi_{A_5}$. The complement of the support of $I$ is $S \setminus {\rm supp}(I) = \{s_2,s_3\}$. The submatchings guaranteed by the Proposition are those of the form $\{1^{(0)},1^{(1)},\ldots,k^{(0)},k^{(1)}\}$ for $k \in \{ 2, 3\}$.
\end{example}
\begin{proof}[Proof of Theorem~\ref{th:PanyushevKreweras}]
As it is easier to see, we describe the analogous statement for $\psi'_{A_{n-1}}$. $\psi'_{A_{n-1}}({\sf Krew}(T))$ can be described in terms of $\psi'_{A_{n-1}}(T)$ as follows: a marked $i^{(0)}$ is turned to a marked $(i+1)^{(0)}$ (unless $i=n$ when the mark disappears), and for a marked $i^{(1)}$, we obtain a marked $(i-1)^{(1)}$ (unless $i=1$ when the mark disappears). If $(i^{(0)},i^{(1)}) \in T$, the marked $i^{(1)}$ is replaced by a marked $(i+1)^{(0)}$. The theorem follows with Proposition~\ref{prop:supporthandshake} and the description of ${\sf Pan}(I)$ in terms of $\hat{I}$.
\end{proof}
\subsection{Types $B$ and $C$}
In contrast to the situation for reflection groups, the notion of the root system does not coincide for types $B$ and $C$. The resulting root posets turn out to be isomorphic (as posets) but not equal.
Thus, it suffices to study the Panyushev complementation on one of the two.
As the connection between the root poset of type $C_n$ and the root poset of type $A_{2n-1}$ is straightforward, whereas there is a little more work to do in type $B_n$, we will study nonnesting partitions of type $C_n$. This corresponds to the fact that the type $C_n$ Dynkin diagram can be obtained from the type $A_{n-1}$ Dynkin diagram through a \lq\lq folding process\rq\rq.
The set of reflections identified with a set of positive roots in type $C_n$ is given by
$${\sf Pan}hi^+ := \{ (i,j) = e_i - e_j : 1 \leq i < j \leq n \} {\sf conj}up \{(i,\overline{j}) = e_i + e_j : 1 \leq i \leq j \leq n\}.$$
See Figure~\ref{fig:rootposets}(b) for the root poset of type $C_3$ as an example.
To understand nonnesting partitions of type $C_n$, observe that an antichain in ${\sf Pan}hi^+$ can be identified with a symmetric antichain in the root poset of type $A_{2n-1}$: there is an involution $\delta$ on ${\rm NN}(A_{n-1})$ by horizontally flipping the root poset of type $A_{n-1}$, i.e., replacing the positive root $(i,j)$ by $(n+1-j,n+1-i)$. In other words, $\delta$ is the induced map coming from the involution on the Dynkin diagram sending one linear ordering to the other. Define an antichain $I \in {\rm NN}(A_{n-1})$ to be \Dfn{symmetric} if it is invariant under this involution.
It is well-known that ${\rm NN}(C_n)$ can be seen as the set of all antichains $A \in {\rm NN}(A_{2n-1})$ which are symmetric,
$${\rm NN}(C_n) {\sf conj}ong \big\{I \in {\rm NN}(A_{2n-1}) : \delta(I) = I \big\}.$$
Moreover, this identification is compatible with the Panyushev complementation,
$$\delta(I) = I \Leftrightarrow \delta({\sf Pan}(I)) = {\sf Pan}(I).$$
This allows us to study this complementation on nonnesting partitions of type $C_n$ in terms of symmetric nonnesting partitions of type $A_{2n-1}$.
On the other hand, we have seen above that the bijection $\phi_{A_{2n-1}} : \mathcal{T}_{2n} \longrightarrow {\rm NC}(A_{2n-1})$ restricts to a bijection $\phi_{C_n} : \mathcal{T}_{C_n} \longrightarrow {\rm NC}(C_n)$. Therefore, we want to show that the bijection $\psi_{A_{2n-1}} : {\rm NN}(A_{2n-1}) \longrightarrow \mathcal{T}_{2n}$ gives rise to a bijection $\psi_{C_n} : {\rm NN}(C_n) \longrightarrow \mathcal{T}_{C_n}$ which is again compatible with the Panyushev and the Kreweras complementation.
\begin{lemma} \label{lem:Binvolution}
The involution $\delta$ on $I$ for $I \in {\rm NN}(A_{n-1})$ can be described in terms of the Kreweras complementation as
$$\psi_{A_{n-1}}(\delta(I)) = {\sf Krew}^{n}(\psi_{A_{n-1}}(I)).$$
\end{lemma}
\begin{proof}
For $T \in \mathcal{T}_n$, we have
$$(i^{(\alpha)},j^{(\beta)}) \in T \Leftrightarrow \big((n+1-j)^{(\beta^c)},(n+1-i)^{(\alpha^c)}\big) \in {\sf Krew}^n(T),$$
where $\alpha,\beta \in \{0,1\}$ and $\alpha^c$ (resp. $\beta^c$) denotes the complement of $\alpha$ (resp. $\beta$) in $\{0,1\}$. It is straightforward to check that this observation implies that
$$\psi'_{A_{n-1}}\big({\sf Krew}^{n}(\psi_{A_{n-1}}(I))\big) = \delta(I).$$
\end{proof}
\begin{theorem}
$\psi_{A_{2n-1}}$ restricts to a well-defined bijection $\psi_{C_n} : {\rm NN}(C_n) \longrightarrow \mathcal{T}_{C_n}$.
\end{theorem}
\begin{proof}
The statement of the theorem is equivalent to the statement that
$$\delta(I) = I \Leftrightarrow {\sf Krew}^n(\psi_{A_{2n-1}}(I)) = \psi_{A_{2n-1}}(I).$$
This follows directly from the previous lemma.
\end{proof}
\subsection{Type $D$}
Fix the numbering of the Dynkin diagram of type $D_n$ so that $n-2$ is adjacent to $n-1$, $n$, and $n-3$.
We consider the involution
$\delta$
of this diagram which interchanges $n$ and $n-1$. It acts on ${\rm NN}(D_n)$, ${\rm NC}(D_n)$, and $\mathcal{T}_{D_n}$. On $\mathcal{T}_{D_n}$, it acts by rotating the inner four
vertices by a half turn. It is convenient to define a new type of
noncrossing handshake configuration, which we denote $\mathcal{T}_{D_n/\delta}$: this consists of
$4n-4$ external vertices, labelled as in a $C_{n-1}$ noncrossing handshake configuration, such
that either all the vertices participate in a $180^{\sf conj}irc$-rotationally symmetric
noncrossing matching
(in which case we simply have a $C_{n-1}$ noncrossing handshake configuration) or else all
but four vertices participate in a $180^{\sf conj}irc$-rotationally symmetric
noncrossing matching, while the four remaining vertices are isolated but
have the property that any two of them could be attached without creating
any crossings.
It is clear that elements of $\mathcal{T}_{D_n/\delta}$ correspond to $\delta$-orbits in $\mathcal{T}_{D_n}$.
\subsubsection{Defining a map from ${\rm NN}(D_n)/\delta$ to $\mathcal T_{D_n/\delta}$}
Note that ${\sf Krew}$ acts naturally on $\mathcal{T}_{D_n/\delta}$, while ${\sf Pan}$ acts naturally
on $\delta$-orbits in ${\rm NN}(D_n)$. We will begin by showing that
$( \mathcal{T}_{D_n/\delta},{\sf Krew} )$ and $( {\rm NN}(D_n)/\delta,{\sf Pan} )$ are isomorphic as sets
with a cyclic action.
In this subsection, we will define a cardinality-preserving
bijection from $\delta$-orbits in
${\rm NN}(D_n)$ to $\mathcal{T}_{D_n/\delta}$ which we will denote by
$\psi_{D_n/\delta}$. (In fact, for notational convenience, we will write
$\psi_{D_n/\delta}$ as a map from ${\rm NN}(D_n)$ to $\mathcal{T}_{D_{n}/\delta}$
which is constant on $\delta$-orbits.)
We will then show that it is possible to refine $\psi_{D_n/\delta}$ to a
bijection from ${\rm NN}(D_n)$ to $\mathcal{T}_{D_n}$.
{\em Singleton $\delta$-orbits in $\mathcal{T}_{D_n/\delta}$.} Such an element consists
of a type $C_{n-1}$ noncrossing handshake configuration on $4n-4$ external vertices
$1^{(0)},\dots,(2n-2)^{(0)},(2n-2)^{(1)},\dots
1^{(1)}$.
{\em Singleton $\delta$-orbits in ${\rm NN}(D_n)$.} Such an element of
${\rm NN}(D_n)$ corresponds to a single element of ${\rm NN}(B_{n-1})$. We
reinterpret this as an element of ${\rm NN}(C_{n-1})$, which corresponds
(as we have already seen) to an element of ${\rm NN}(A_{2n-3})$ fixed under
the involution of the $A_{2n-3}$ diagram.
{\em Map from singleton $\delta$-orbits in ${\rm NN}(D_n)$ to $\mathcal{T}_{D_n}$.}
We define $\psi_{D_n/\delta}$ on a singleton $\delta$-orbit by sending
the type
$A_{2n-3}$ antichain to an $A_{2n-3}$ noncrossing handshake configuration, using
$\psi_{A_{2n-3}}$.
Now we consider the doubleton $\delta$-orbits.
Write $H$ for the $2n-2$ vertices
$\{(n-2)^{(1)},\dots,1^{(1)},1^{(0)},\dots,n^{(0)}\}$, and $H^c$ for the
other $2n-2$ vertices on the boundary.
{\em Doubleton $\delta$-orbits in $\mathcal{T}_{D_n}$.} These correspond to
elements of $\mathcal{T}_{D_n/\delta}$ which have four vertices of degree zero.
{\em Doubleton $\delta$-orbits in ${\rm NN}(D_n)$.} Let $I$ be an antichain
in such an orbit. Write $\overline I$ for the collection of type
$A_{2n-3}$ roots obtained by taking each root in $I$, passing first to
$B_{n-1}$, identifying the root poset of $B_{n-1}$ with that of $C_{n-1}$,
and then unfolding to one or two roots in $A_{2n-3}$.
Note that $\overline I$ is typically not an antichain.
\begin{example}
Consider the $D_n$ antichain consisting of $\alpha_n+\alpha_{n-2}$
and $\alpha_{n-1}$. The former contributes elements $(n-1,n+1)$ and
$(n-2,n)$, while the latter contributes $(n-1,n)$. This does not form
an antichain. There will often be two elements in $\overline I$ with
first co-ordinate $n-1$, and two elements with second co-ordinate $n$.
\end{example}
We also associate to $I$ an antichain in ${\sf Pan}hi_{A_{2n-3}}$, defined as follows.
Consider the elements of
$\overline I$ which lie in the square with opposite corners at $(1,2n-2)$ and
$(n-1,n)$. (We call this square $R$.)
Record the first coordinates of these as $i_1,\dots,i_r$,
and the last as $j_1,\dots,j_r$.
Note that $j_1=j_2$ and $i_r=i_{r-1}$ are
possible (occurring when $\overline I$ is not an antichain).
Define $\widehat I$ by
replacing these $r$ elements of $\overline I$ by the $r-1$ elements
$(i_1,j_2), (i_2,j_3),\dots, (i_{r-1},j_r)$. (In the case that $r=1$,
the result is that $\widehat I{\sf conj}ap R=\emptyset$.)
{\em The map from doubleton $\delta$-orbits in ${\rm NN}(D_n)$ to
doubleton $\delta$-orbits in ${\rm NC}(D_n)$.} We define
$\psi_{D_n/\delta}(I)$ in several steps. Using Lemma \ref{widehatisgood},
below, we know that $\widehat I\in {\rm NN}(C_{n-1})$. Therefore, we can
consider $\psi_{C_{n-1}}(\widehat I)\in \mathcal T_{C_{n-1}}$.
Lemma \ref{lem3} below guarantees that there
are at least two edges in this diagram which run from
vertices in $H$ to vertices in $H^c$. Remove the two such edges which are closest
to the center. The result is a noncrossing handshake configuration of type $D_n/\delta$
as defined above. This is $\psi_{D_n/\delta}(I)$.
\subsubsection{Defining $\psi_{D_n}$}
We now consider refining $\psi_{D_n/\delta}$ to a map from ${\rm NN}(D_n)$ to
$\mathcal T_{D_n}$.
We use the convention that a type
$D$ noncrossing handshake configuration has the same outside labels as for
type $D/\delta$ noncrossing handshake configurations, with four internal vertices which
are numbered by congruence classes modulo 4,
increasing in counter-clockwise order.
We count as ``positive'', external vertices with label $(0)$, and
the internal vertices $0$ and $3$, and as ``negative'', external vertices
with the label $(1)$ and the internal vertices $1$ and $2$.
In a noncrossing handshake configuration, the number of edges that connect a positive
vertex to a negative vertex must be divisible by 4.
If a noncrossing handshake configuration $T$ of type $D_n/\delta$ has no isolated vertices,
this requirement means that there is a unique way of completing $T$
to a type $D_n$ configuration, while if $T$ has four isolated vertices,
then there are two ways of completing $T$ to a type $D_n$ configuration.
For $a,b$ outer vertices, write $d(a,b)$ for the clockwise distance from
$a$ to $b$. Write $e_I(a,b)$ for the number of vertices in the clockwise
interval from $a$ to $b$, including $b$ but not $a$, and which are not
on the clockwise end of an edge in $\psi_{D_n/\delta}(I)$.
For $I$ an antichain in ${\rm NN}(D_n)$ in a doubleton $\delta$-orbit,
define $s(I)$ to be $0$ if the
root of $I$ whose image in $\overline I$ is $(i,n)$ with $i$ as small
as possible, has $\alpha_{n-1}$ in its support; otherwise, set $s(I)=1$.
We now define $\psi_{D_n}(I)$. If $I$ is in a singleton $\delta$-orbit,
then define $\psi_{D_n}(I)$ to be $\psi_{D_n/\delta}(I)$ together with
edges connecting the internal vertices in the unique possible way.
If $I$ is in a doubleton $\delta$-orbit, define $\psi_{D_n}(I)$ by starting
with $\psi_{D_n/\delta}(I)$ and, for each singleton external vertex $v$,
attach it to the internal vertex whose number
is given by:
$n-d(v,(n-1)^{(0)})+2s(I)+2e_I(v,(n-1)^{(0)})$.
\begin{example}
For the root poset of type $D_3$ with simple roots
$$\alpha_1 = e_1 - e_2, \alpha_2 = e_2 - e_3, \alpha_3 = e_2 + e_3,$$
the four antichains $\emptyset, \{\alpha_1,\alpha_2,\alpha_3\}, \{\alpha_2\}, \{\alpha_1,\alpha_3\}$ are mapped by $\psi_{D_n}$ to the four noncrossing handshake configurations in $\mathcal{T}_{D_3}$ shown in Figure~\ref{fig:handshakeD} from left to right.
\end{example}
\subsubsection{Proof that $\psi_{D_n}$ is well-defined and is a bijection}
There are several lemmas
which must be established to show that the definition given above makes sense, and yields a bijection.
\begin{lemma} \label{widehatisgood}
$\widehat I$ is in ${\rm NN}(A_{2n-3})$. Further, the map from
$I$ to $\widehat I$ is injective, and its image consists of all the antichains
in ${\rm NN}(C_{n-1})$ (thought of as a subset of ${\rm NN}(A_{2n-3})$)
except those containing $(n-1,n)$.
\end{lemma}
\begin{proof}
The inverse map is clear, since $i_r$ must be $n$ and $j_1$ must be
$n-1$. This inverse map can be applied to any antichain in
${\rm NN}(C_{n-1})$ except those containing $(n-1,n)$.
\end{proof}
Now, since $\widehat I$ is in ${\rm NN}(C_{n-1})$, its image under the
bijection $\psi_{A_{2n-3}}$ is a type $C_{n-1}$ noncrossing handshake configuration.
The following lemma is useful.
\begin{lemma}\label{lem2}
The image of $\psi_{C_{n-1}}$ applied to antichains with no roots in $R$,
consists exactly of those type $C_{n-1}$ noncrossing handshake configurations with no edges from
$\{(n-1)^{(1)},\dots,1^{(1)}, 1^{(0)},\dots,(n-1)^{(0)}\}$ to the other vertices.
\end{lemma}
\begin{proof}
The first $n-1$ edges in the noncrossing handshake configuration will all connect
vertices in $\{(n-1)^{(1)},\dots,1^{(1)}$, $1^{(0)},\dots,(n-1)^{(0)}\}$,
which uses up
all those vertices.
\end{proof}
\begin{lemma} \label{lem3}
The image of $\psi_{C_{n-1}}$ applied to $\widehat I$ for $I \in {\rm NN}(D_n)$,
consists of exactly
those type $C_{n-1}$
noncrossing handshake configurations with the property that there is at least
one edge (and therefore at least two edges) from $H$ to $H^c$.
\end{lemma}
\begin{proof}
We have already shown that as $I$ runs through
${\rm NN}(D_n)$, we have that $\widehat I$ runs through those antichains
in ${\rm NN}(C_{n-1})$ not containing $(n-1,n)$.
The image under ${\sf Pan}^{-1}$ of type $C_{n-1}$ antichains not containing
$(n-1,n)$ is exactly the $C_{n-1}$ antichains whose intersection with
$R$ is non-empty.
Now apply Lemma \ref{lem2} to ${\sf Pan}^{-1}(\widehat I)$, together
with the fact that ${\sf Krew} {\sf conj}irc \psi_{C_{n-1}} = \psi_{C_{n-1}}{\sf conj}irc {\sf Pan}$.
\end{proof}
We now have the pieces in place to establish the following proposition:
\begin{proposition} The map $\psi_{D_n/\delta}$ is a bijection from
${\rm NN}(D_n/\delta)$ to $\mathcal{T}_{D_n/\delta}$.
\end{proposition}
\begin{proof} It is clear that $\psi_{D_n/\delta}$ takes singleton $\delta$
orbits in ${\rm NN}(D_n)$ bijectively to the noncrossing handshake configurations in
$\mathcal T_{D_n/\delta}$ which contain no isolated vertices. It is also
clear that $\psi_{D_n/\delta}$ is an injection from doubleton orbits in
${\rm NN}(D_n)$ into the $\mathcal T_{D_n/\delta}$ noncrossing handshake configurations with
four isolated vertices. Finally, given such a diagram, there is a
unique way to reattach the isolated vertices to obtain a
$\mathcal{T}_{C_{n-1}}$ noncrossing handshake configuration such that the reattached edges cross
from $H$ to $H^c$. It follows that $\psi_{D_n/\delta}$ is a bijection.
\end{proof}
We now proceed to show that $\psi_{D_n}$, as defined above, is a bijection
from ${\rm NN}(D_n)$ to $\mathcal{T}_{D_n}$. To begin with, we need the following
lemma which gives a condition equivalent to the parity condition on
the number of edges in a type $D_n$ noncrossing handshake configuration which connect
positive and negative vertices.
\begin{lemma} \label{restatement}
The condition that the number of edges joining a positive vertex
to a negative vertex be divisible by four, is equivalent to the condition that
a positive, even-numbered singleton vertex must
be connected to an internal vertex of odd parity, and similarly for
the other possible choices of singleton vertex, where changing either
``positive'' or ``even-numbered'' reverses the parity of the internal
vertex.
\end{lemma}
We are now ready to prove that $\psi_{D_n}$ is a bijection.
\begin{lemma} $\psi_{D_n}$ is a bijection from ${\rm NN}(D_n)$ to
$\mathcal T_{D_n}$.
\end{lemma}
\begin{proof} We must show that if $v$ and $v'$ are singleton
vertices in $\psi_{D_n/\delta}(I)$, such that the next singleton vertex after
$v$ in counter-clockwise order is $v'$, then
the vertex to which $v'$ is
attached is one step counter-clockwise from that to which $v$ is attached.
We evaluate $-d(v',(n-1)^{(0)})+d(v,(n-1)^{(0)})=-d(v',v)$ by
counting the vertices between $v'$ and $v$ (including $v$ but not
$v'$).
Each edge on the outer rim between $v$ and $v'$ contributes
$-2$ to $-d(v',v)$ (one for each of its endpoints),
and also contributes 2 to $2e_I(v',(n-1)^{(0)})-2e_I(v,(n-1)^{(0)})=2e_I(v',v)$.
The only other contribution to $2e_I(v',v)$ is an
additional 2 coming from the vertex $v$, and also
$-d(v',v)$ has an additional $-1$ coming from $v$. Thus
the total effect is that $v'$ is attached one step counter-clockwise from
$i$.
The condition provided by Lemma \ref{restatement} is also clear from
the definition. (Note that the complicated terms don't have any effect
on the parity of the vertex to which we connect $v$.)
Bijectivity follows from bijectivity for $\psi_{D_n/\delta}$ together with
the fact that the two elements of a doubleton $\delta$ orbit in
${\rm NN}(D_n)$ will be mapped to different noncrossing handshake configurations.
\end{proof}
\subsubsection{Compatibility between Panyushev complementation and rotation}
We will first prove that $\psi_{D_n/\delta}$ expresses the compatibility
between Panyushev complementation for ${\rm NN}(D_n)/\delta$ and rotation of
$D_n/\delta$ noncrossing handshake configurations, and then we will prove the similar
result for $\psi_{D_n}$.
\begin{proposition}\label{modsigmacomp}
For $I \in {\rm NN}(D_n)$, we have that
$$\psi_{D_n/\delta}({\sf Pan}(I))={\sf Krew}(\psi_{D_n/\delta}(I)).$$
\end{proposition}
\begin{proof}
We consider three cases separately. The first case is the case that
$I$ is in a singleton $\delta$-orbit,
in which case the result follows immediately
from the analogous result for type $C_{n-1}$.
The second case is when $\widehat I{\sf conj}ap R \ne \emptyset$.
\begin{lemma} If $\widehat I{\sf conj}ap R \ne \emptyset$, then
$\widehat{{\sf Pan}(I)}={\sf Pan}(\widehat I)$ and
$\psi_{C_{n-1}}({\sf Pan}(\widehat I)) ={\sf Krew}(\psi_{C_{n-1}}(\widehat I))=
{\sf Krew}(\psi_{D_n/\delta}(I))$
\end{lemma}
\begin{proof} The fact that {$\widehat{{\sf Pan}(I)}={\sf Pan}(\widehat I)$} in this
case follows from the definitions.
The compatibility of ${\sf Pan}$ and ${\sf Krew}$ in type $C$ implies that $\psi_{C_{n-1}}({\sf Pan}(\widehat I)) ={\sf Krew}(\psi_{C_{n-1}}(\widehat I))$.
Finally, we wish to show that ${\sf Krew}(\psi_{C_{n-1}}(\widehat I))=
{\sf Krew}(\psi_{D_n/\delta}(I))$.
The result which has to be established is that the pair of innermost edges
in ${\sf Krew}(\psi_{C_{n-1}}(\widehat I))$ is the rotation of the innermost edges of
$\psi_{C_{n-1}}(\widehat I)$ . This is true because, in order for the
innermost edges no longer to be innermost, they must no longer run between
the two sides of the diagram. But this would then imply that there were
no edges between $H$ and $H^c$ in $\psi_{C_{n-1}}(\widehat{{\sf Pan}(I)})$,
contrary to Lemma \ref{lem3}.
\end{proof}
We now consider the case that $\widehat I{\sf conj}ap R= \emptyset$. In this
case, in contrast to the previous one, the proof does not pass
through the similar statement in type $C$.
Let $\widehat X={\sf Pan}(\widehat I)$. It is immediate from
the definition of Panyushev complementation that $\widehat X{\sf conj}ap R=
(n-1,n)$.
By Lemma \ref{lem3} it follows that
$\psi_{C_{n-1}}(\widehat X)$ has no edges from $H$ to $H^c$.
By the compatibility of ${\sf Pan}$ and ${\sf Krew}$ in type $C$, we have that
$\psi_{C_{n-1}}(\widehat X)={\sf Krew} (\psi_{C_{n-1}}(\widehat I))$.
The innermost edges of $\psi_{C_{n-1}}(\widehat I)$ connecting
$H$ to $H^c$, after rotation, no longer connect $H$ to $H^c$. Thus,
in $\psi_{C_{n-1}}(\widehat X)$, those edges connect
$(n+1)^{(0)}$ to some
$z'$ in $H^c$ and
$(n-2)^{(1)}$ to some (symmetrical) $z$ in $H$.
\begin{lemma} $\psi_{C_{n-1}}(\widehat{{\sf Pan}(I)})$ can be obtained from $\psi_{C_{n-1}}
(\widehat X)$ by removing the edges connected to $(n+1)^{(0)}$ and
$(n-2)^{(1)}$ and replacing them by the other possible pair of symmetrical
edges.\end{lemma}
\begin{proof}
$\overline I {\sf conj}ap R$ necessarily equals $(n-1,n)$.
Let $Y={\sf Pan}(I)$. There are two possibilities for $\overline Y{\sf conj}ap R$:
it equals either $\{(n-2,n),(n-1,n),(n-1,n+1)\}$ or
$\{(n-1,n)\}$,
depending on whether or not $\overline I$ has any entries on the
$(n-1)$-th row (or equivalently the $(n+1)$-th column).
The corresponding values of $\widehat Y{\sf conj}ap R$ are $\{(n-2,n),(n-1,n+1)\}$ and
$\emptyset$.
Now consider applying $\psi_{C_{n-1}}$ to $\widehat X$ and $\widehat Y$.
Suppose first that we are in the case that $\widehat Y{\sf conj}ap R=\emptyset$. This
means that the $(n-1)$-th row is empty in $\overline I$, so in $\widehat I$, both
$R$ and the row below $R$ are empty. We have seen already that the fact
that $R$ is empty means that there are no edges between vertices numbered
at most $n-1$ and those numbered at least $n$. A similar argument
shows that the absence of roots in the $(n-1)$-th row implies that the
vertices numbered at most $n-2$ are connected to other vertices in that
set. It follows that $(n-1)^{(0)}$ and $(n-1)^{(1)}$ are connected in
$\psi_{A_{2n-3}}(\widehat I)$.
By symmetry, $n^{(0)}$ and $n^{(1)}$ are also.
In determining $\psi_{C_{n-1}}(\widehat X)$, $n^{(0)}$ gets the label $n-1$.
In determining $\psi_{C_{n-1}}(\widehat Y)$, the label $n-1$ goes to $(n-1)^{(1)}$, the
symmetrically opposite vertex. We know that $\psi_{C_{n-1}}(\widehat Y)$ has no
edges connecting vertices $\leq n-1$ with those $\geq n$, so the
result of adding the $n-1$-th edge is to complete the matchings among
the vertices $\leq n-1$. It follows that when we evaluate $\psi_{C_{n-1}}(\widehat X)$
instead, vertex $n^{(0)}$ will necessarily be connected to the
same vertex as $(n-1)^{(1)}$ was in $\psi_{C_{n-1}}(\widehat Y)$. This means that,
while $n^{(0)}$ and $(n-2)^{(1)}$ are connected in
$\psi_{C_{n-1}}(\widehat X)$, we have that $n^{(0)}$ and $(n+1)^{(0)}$ are connected
in $\psi_{C_{n-1}}(\widehat Y)$, establishing the claim.
Now consider the case that $\widehat Y{\sf conj}ap R=\{(n-2,n),(n-1,n+1)\}$.
In determining $\psi_{C_{n-1}}(\widehat Y)$, we have $n^{(0)}$ receives label
$n-2$ and $(n+1)^{(0)}$ receives label $n-1$. Since $\widehat X$ and $\widehat Y$
only differ inside $R$, we have that the $n-2$-th column is empty
in $\widehat X$, so $(n-2)^{(1)}$ receives the $n-2$ label; and we also
have that $n^{(0)}$ receives the label $n-1$.
Let us write $b$ for the vertex joined in $n^{(0)}$ in $\widehat X$,
and $a$ for the vertex joined in $(n-2)^{(1)}$ in $\widehat X$. Note
that in $\widehat X$, there are no edges between $H$ and $H^c$, so,
prior to the $(n-2)$-th edge being drawn, the four available vertices
in $H$ are $(n-2)^{(1)},a,b,n^{(0)}$ (in clockwise order).
Now consider what happens when we evaluate $\psi_{C_{n-1}}(\widehat Y)$. When
adding the $(n-2)$-th edge, we connect $n^{(0)}$ to the next available
vertex counter-clockwise from it, which is $b$. Next, we connect to
$(n+1)^{(0)}$ the next available vertex counter-clockwise from it, which
is $a$.
The result is that $n^{(0)}$ is attached to the same vertex in
$\widehat X$ and $\widehat Y$, but the vertex attached to $(n+1)^{(0)}$ in
$\widehat Y$ is attached to $(n-2)^{(1)}$ in $\widehat X$. This suffices to
establish the claim. \end{proof}
The final case of the proposition now follows,
because the only edges between $H$ and $H^c$ in
$\psi_{C_{n-1}}(\widehat {{\sf Pan}(I)})$ are the new edges identified above,
whose four end-vertices
are the result of rotating clockwise the four degree zero
vertices of $\psi_{D_n/\delta}(I)$.
\end{proof}
In order to show the compatibility between $\psi_{D_n}$ and Panyushev
complementation,
we must study the relationship between $s(I)$ and $s({\sf Pan}(I))$.
It is straightforward
to check that $s(I)$ and $s({\sf Pan}(I))$ are the same iff $I$ contains a root
supported over vertex $n-2$ but neither $n-1$ nor $n$. This is equivalent
to saying that $\widehat I$ includes some root $(j,n-1)$ (i.e., a root
on the row just below $R$).
This can also be described in terms of $\psi_{D_n}(I)$, as in the lemma below.
\begin{lemma} For $I$ an $A_{2n-3}$-antichain, $I$ contains a root
$(j,n-1)$ iff $\psi_{D_n}(I)$ contains an edge joining $n-1$ to $k$ with
$k$ in $\{(n-3)^{(1)},\dots,1^{(1)},1^{(0)},\dots,(n-2)^{(0)}\}$.
\end{lemma}
\begin{proof} If $I$ has such a root, then the $j$-th edge which is
added will be an edge joining $n-1$ to such a $k$. (Since $j\leq n-2$,
at the $j$-th step, at least one of the vertices in
$\{(n-3)^{(1)},\dots,(n-2)^{(0)}\}$ will be available.)
On the other hand, if $\psi_{D_n}(I)$ contains such an edge with $k=k^{(0)}$,
the only possibility is that there was a root $(j,n-1)$ in $I$. If
$k=k^{(1)}$ then an edge from $k$ could have been added at the $k$-th
step, but this edge would not have been joining $k^{(1)}$ to $(n-1)^{(0)}$
as there would have been an available vertex with a smaller label.
\end{proof}
We say that a vertex is the clockwise end of an edge if the vertex is
not degree zero, and the vertex to which it is attached
is closer to it counter-clockwise than clockwise.
\begin{lemma}\label{schange} $s(I)=s({\sf Pan}(I))$ iff $(n-1)^{(0)}$ is on the
clockwise end of an edge in $\psi_{D_n/\delta}(I)$.
\end{lemma}
\begin{proof}
It follows from the previous lemma that
$s(I)=s({\sf Pan}(I))$ iff $(n-1)^{(0)}$ is
attached to some $k$ in $\{(n-3)^{(1)},\dots,(n-2)^{(0)}\}$ in
$\psi_{A_{2n-3}}(\widehat I)$.
Suppose
$(n-1)^{(0)}$ is attached to some $k$ in $\{(n-3)^{(1)},\dots,(n-2)^{(0)}\}$
in $\psi_{A_{2n-3}}(\widehat I)$. Observe that $(n-1)^{(0)}$ cannot be degree
zero in $\psi_{D_n/\delta}(I)$, because the edge from $(n-1)^{(0)}$ to $k$
is entirely within $H$. Therefore $(n-1)^{(0)}$ is on the clockwise end
of its edge.
Conversely, if $(n-1)^{(0)}$ is on the clockwise end of an edge in
$\psi_{D_n/\delta}(I)$, either
it is attached to $k$ in $\{(n-3)^{(1)},\dots,(n-2)^{(0)}\}$, or else it
is attached to $(n-1)^{(1)}$. In fact, though, it cannot be attached
to $(n-1)^{(1)}$ in $\psi_{D_n/\delta}$. If it were the case that
$(n-1)^{(0)}$ and $(n-1)^{(1)}$ were attached in $\psi_{A_{2n-3}}(\widehat I)$,
this edge would have been removed in $\psi_{D_n/\delta}(I)$.
Thus $s(I)=s({\sf Pan}(I))$.
\end{proof}
We are now ready to prove the following result:
\begin{lemma} For $I \in {\rm NN}(D_n)$, we have that $\psi_{D_n}({\sf Pan}(I))=
{\sf Krew}(\psi_{D_n}(I))$.
\end{lemma}
\begin{proof} By Proposition \ref{modsigmacomp}, we know that
$\psi_{D_n/\delta}({\sf Pan}(I))={\sf Krew}(\psi_{D_n/\delta}(I))$. If $I$ lies in a singleton
$\delta$-orbit, this is sufficient.
Now suppose $I$ lies in a doubleton $\delta$-orbit. By Proposition
\ref{modsigmacomp}, we know that ${\sf Krew}(\psi_{D_n}(I))$ and
$\psi_{D_n/\delta}({\sf Pan}(I))$ differ, if at all, only in the way that the
singleton vertices are connected.
Let $v$ be a singleton vertex in $\psi_{D_n/\delta}(I)$. We know that
${\sf Krew}(v)$ is a singleton vertex in ${\sf Pan}(I)$. In
$\psi_{D_n}(I)$, suppose that $v$ is connected to $i$. We then see
that ${\sf Krew}(v)$ is connected to $i+1$, since the last two terms
in the formula cancel each other out by Lemma \ref{schange}.
\end{proof}
\subsection{Exceptional types}
As for noncrossing partitions in Section~\ref{sec:exceptionaltypes}, the exceptional types -- as we consider only crystallographic reflection groups, this includes for now the dihedral group $G_2$ -- were verified using a computer.
\section{Parabolic induction in the classical types}
In this section, we define the notion of parabolic induction
for a collection of maps from ${\rm NN}(W)$
to $\mathcal T_W$, for $W$ a reflection group of classical type,
and we show that
the previously defined bijections $\psi_W$ satisfy this notion of
parabolic induction. Further, we show that they are uniquely characterized
by this property together with their compatibility with Panyushev
complementation and rotation.
\subsection{Type $A_{n-1}$}
First, consider the case of $W=A_{n-1}$. Pick $i$, with $1\leq i \leq n-1$.
Removing the node $i$ from the Dynkin diagram, we obtain two Dynkin diagrams,
of types $A_{i-1}$ and $A_{n-1-i}$. Given noncrossing handshake configurations
$U\in \mathcal T_{A_{i-1}}$ and $V\in \mathcal T_{A_{n-1-i}}$,
we can assemble them into a single noncrossing handshake configuration $U\ast V$ of type $A_{n-1}$,
by adding $i$ to the labels of the vertices of
$V$. (In order for this
to work if $i=1$ or $i=n-1$, we define the unique
noncrossing handshake configuration associated to
type $A_0$ to consist of two vertices, numbered $1^{(0)}$ and $1^{(1)}$,
connected by an edge.)
Suppose that $I \in {\rm NN}(A_{n-1})$ does not have $\alpha_i$ in its support.
We can
then write $I$ as a union of $I_1$ supported over a subset of
$\alpha_1,\dots,\alpha_{i-1}$, and $I_2$ supported over a subset of
$\alpha_{i+1},\dots,\alpha_{n-1}$.
We say that a collection of maps
$F_{A_{n-1}} : {\rm NN}(A_{n-1}) \longrightarrow \mathcal T_{A_{n-1}}$
satisfies parabolic induction if, whenever $I \in {\rm NN}(A_{n-1})$ satisfies that
the simple root $\alpha_i$ is not in the support of $I$, then
$$F_{A_{n-1}}(I)=F_{A_{i-1}}(I_{1}) \ast F_{A_{n-1-i}}(I_{2})$$
\begin{proposition} The maps $\psi_{A_{n-1}}$ satisfy parabolic induction.
\end{proposition}
\begin{proof} This is an immediate corollary of
Proposition~\ref{prop:supporthandshake}. \end{proof}
\subsection{Type $C_n$}
Similarly, if we remove a simple root $\alpha_i$ from a $C_n$ Dynkin diagram,
we obtain a diagram of type $A_{i-1}$ and one of type $C_{n-i}$.
For convenience, we use $C_1$ as a pseudonym for $A_1$ here. In particular,
the noncrossing handshake configurations of type $C_1$ are just the
noncrossing handshake configurations of type $A_1$. By convention, the empty diagram is the
unique noncrossing handshake configuration of type $C_0$.
Given
a noncrossing handshake configuration of type $U\in \mathcal T_{A_{i-1}}$ and
$V\in\mathcal T_{C_{n-i}}$, define $U\ast V$ to consist of: \begin{itemize}
\item $U$,
\item $V$ with its labels increased by $i$,
\item $U$ with each label $j$ replaced by $2n+1-j$, and superscripts
$(0)$ and $(1)$ interchanged.
\end{itemize}
Again, if $I \in {\rm NN}(C_n)$ and $\alpha_i$ is not in the support of $I$, we
can divide $I$ into antichains $I_1$ and $I_2$.
A collection of maps $F_{W}:{\rm NN}(W)\rightarrow \mathcal T_W$ for
$W$ of type $A$ or $C$ is said to satisfy parabolic induction if the
collection $F_{A_n}$ satisfies type $A$ parabolic induction and for
$I \in {\rm NN}(C_n)$, whenever
$\alpha_i$ is not in the support of $I$, we have
$$F_{C_n}(I)= F_{A_{i-1}}(I_1)\ast F_{C_{n-i}}(I_2).$$
We have the following corollary of the previous proposition:
\begin{corollary}
The maps $\psi_{A_{n}}, \psi_{C_n}$ satisfy parabolic induction.
\end{corollary}
\subsection{Type $D_n$} If we remove a simple root $\alpha_i$
from a Dynkin diagram
of type $D_n$, for $i\ne n-1, n$ (the two antennae), then we obtain
a Dynkin diagram of type $A_{i-1}$ and a Dynkin diagram of type
$D_{n-i}$. Given two noncrossing handshake configurations $U\in \mathcal T_{A_{i-1}}$ and
$V\in \mathcal T_{D_{n-i}}$, we write $U\ast V$ for the diagram consisting of:
\begin{itemize}
\item The diagram $U$,
\item The diagram $V$ with its labels increased by $i$ (including the
central ones, where the increase is taken modulo 4),
\item The diagram $U$ with label $j$ replaced by $2n-1-j$, and the
superscripts $(0)$ and $(1)$ interchanged.
\end{itemize}
(We let $D_2$ refer to the reducible root system consisting of
two orthogonal simple roots and their negatives, and let $D_3=A_3$. We interpret ``noncrossing handshake configuration of type $D_n$'' for $n=2,3$, using the type $D$ definition of
noncrossing handshake configuration.)
If we remove a simple root $\alpha_i$ from a Dynkin diagram of type $D_n$,
where $i=n-1$ or $n$, then we obtain a Dynkin diagram of type $A_{n-1}$.
We will define a pair of maps
$\operatorname{Ind}_i:\mathcal T_{A_{n-1}}\rightarrow \mathcal T_{D_n}$, as follows.
$\operatorname{Ind}_n(U)$ is defined to consist of the type $A$ diagram, with vertices
$n^{(0)}$ and $n^{(1)}$ moved to the center and renamed $n$ and $n+1$, together
with the 180 degree rotation of this diagram.
This is a type $D_n$ noncrossing handshake configuration by Lemma \ref{restatement}.
$\operatorname{Ind}_{n-1}(U)$ is obtained by adding 2 to each of the labels of the central
vertices of $\operatorname{Ind}_n(U)$.
Again, if $1\leq i \leq n-2$, and $I \in {\rm NN}(D_n)$ does not have $\alpha_i$
in its support, we can define $I_1\in {\rm NN}(A_{i-1})$ and $I_2\in{\rm NN}(D_{n-i})$.
If $i=n-1,n$, and $I$ does not have $\alpha_i$ in its support, we can
simply view $I$ as an antichain in ${\rm NN}(A_{n-1})$.
A collection of maps $F_W : {\rm NN}(W) \longrightarrow \mathcal{T}_W$ for $W=A_n,D_n$ is said to satisfy
parabolic induction if the collection
$F_{A_n}$ satisfies type $A$ parabolic induction, and:
\begin{itemize}
\item[(i)] for $1\leq i \leq n-2$, if $I \in {\rm NN}(D_n)$ does not have
$\alpha_i$ in its support, then
$$F_{D_n}(I)=F_{A_{i-1}}(I_1)\ast
F_{D_{n-i}}(I_2),$$ and
\item[(ii)] for $i=n-1,n$, if $I \in {\rm NN}(D_n)$ does not have $\alpha_i$ in its
support, then
$$F_{D_n}(I)=\operatorname{Ind}_i(F_{A_{n-1}}(I)).$$
\end{itemize}
\begin{proposition}
The maps $\psi_{D_n}$, $\psi_{A_n}$ satisfy parabolic induction.
\end{proposition}
\begin{proof}
Condition (i) follows as in the previous cases.
For condition (ii), we divide into cases.
{\it $I \in {\rm NN}(D_n)$ has neither
$\alpha_n$ nor $\alpha_{n-1}$ in its support.}
In this case, $\overline I$ does not intersect $R$. The result in
this case follows as in type $C_{n-1}$.
{\it $I \in {\rm NN}(D_n)$ has exactly one of $\alpha_n, \alpha_{n-1}$ in its support}.
In this case, $\overline I {\sf conj}ap R$ consists of either one root
$(n-1,n)$ or two roots $(j,n)$ and $(n-1,2n-1-j)$. It follows that
$\widehat I {\sf conj}ap R$ consists of either zero roots or one root.
In the former case, in the type $A_{2n-3}$ noncrossing handshake configuration associated to
$\widehat I$, there are no edges from vertices with labels at most $n-1$
to those with labels at least $n$. It follows that the innermost edges
from $H$ to $H^c$ are connected to $n^{(0)}$ and to $(n-1)^{(1)}$, and thus
that in the $D_n/\delta$ noncrossing handshake configuration, $(n-1)^{(1)}$
is a singleton vertex. The other singleton vertex with label at most
$n-1$, call it $a$, is the one that is connected to $(n-1)^{(1)}$ in the
type $A_{2n-3}$ noncrossing handshake configuration.
Now, suppose $I$ is supported over $\alpha_{n-1}$, so $s(I)=0$. We deduce that
$(n-1)^{(1)}$ is attached to $n-(2n-3)+2(n-1)=n+1$.
On the other hand, if $I$ is supported over $\alpha_n$,
$(n-1)^{(1)}$ is attached to $(n+1)+2$.
Now consider the calculation of $\psi_{A_{n-1}}(I)$. Up to the $n-1$-th
step, the same thing happens. At the $n-1$-th step, there now {\it is}
an entry in the $n-1$ column (namely, $(n-1,n)$), so we mark
$n^{(0)}$ with label $n-1$, and thus on turn $n-1$, we
connect $n^{(0)}$ to the nearest available entry, which must be $a$,
since it and $(n-1)^{(1)}$ are the only unmatched vertices on the
lefthand side. On the final step, we join $n^{(1)}$ and $(n-1)^{(1)}$.
We see that $\psi_{D_n}(I)=\operatorname{Ind}_{n-s(I)}(\psi_{A_{n-1}}(I))$.
Next, consider the case that $\widehat I {\sf conj}ap R$ has one root in its support,
say $(i,2n+1-i)$.
Consider the calculation of $\psi_{A_{2n-3}}(\widehat I)$ and of
$\psi_{A_{n-1}}(I)$ in parallel.
The same thing happens in both up to the $i$-th step.
On the $i$-th step of the $A_{n-1}$ calculation,
the label $i$ goes onto the node $n^{(0)}$, so we connect $n^{(0)}$
to $(n-1)^{(0)}$ at this point, while for the $A_{2n-3}$ calculation, we connect
$(2n+1-i)^{(0)}$ to $(2n-i)^{(0)}$. From here on, the calculations
run the same way up to and through the $n-1$-th step. In both the
calculations, there is no entry in the $n-1$-th column, so we
connect $(n-1)^{(1)}$ to some entry on the lefthand side.
After this step, in the calculation of $\psi_{A_{2n-3}}(\widehat I)$,
there
are two remaining unmatched vertices whose labels are at most $n-1$. One
of them is $(n-1)^{(0)}$, while we call the other one $a$.
It follows that
the four vertices in $H$ which will eventually be matched to vertices
in $H^c$ are, in clockwise order, the vertex attached to $(n-1)^{(1)}$,
$a$, $(n-1)^{(0)}$, and (by symmetry) $n^{(0)}$. The two innermost edges
are therefore the ones attached to $a$ and $(n-1)^{(0)}$. It follows that
we will connect $a$ and $(n-1)^{(0)}$ to the internal vertices, and
$(n-1)^{(0)}$ will be connected to $n$ if $s(I)=0$ and $n+2$ if $s(I)=1$.
On the $n$-th
step of the $\psi_{A_{n-1}}(I)$ calculation, we connect $n^{(1)}$ to the
only available vertex, $a$. We therefore see that
$\operatorname{Ind}_{n-s(I)}(\psi_{A_{n-1}}(I))=\psi_{D_n}(I)$, as desired.
\end{proof}
\subsection{Uniqueness of $\psi$ in the classical types}
Finally, we show that parabolic induction determines $\psi$ uniquely
in the classical cases. In this section,
we show that:
\begin{theorem} \label{th:classicalparabolicinduction}
The only collection of bijections $F_W:{\rm NN}(W)\rightarrow \mathcal{T}_{W}$, for $W$ running over all classical irreducible reflection groups, that satisfy:
\begin{itemize}
\item[(i)] $F_W{\sf conj}irc {\sf Pan}={\sf Krew} {\sf conj}irc F_W$, and
\item[(ii)] classical parabolic induction, as defined previously,
\end{itemize}
are the maps $F_W=\psi_W$.
\end{theorem}
\begin{proof}
We have already shown that the maps $\psi_W$ do satisfy the two
properties mentioned in the theorem; we need only show that these two
properties are sufficient to characterize these functions uniquely.
By property (i), it suffices to know that, for any ${\sf Pan}$ orbit in
${\rm NN}(W)$, there is some antichain to which some parabolic induction
applies. Expressed in those terms, it is not obvious that this is true.
However,
thanks to the bijections $\psi_W$, it is sufficient to show that for
any ${\sf Krew}$ orbit in $\mathcal{T}_W$, there is a noncrossing handshake configuration which could
have arisen by parabolic induction. This is quite clear. Let $T$ be
a noncrossing handshake configuration of type $W$. Pick some
edge joining two external vertices. After applying a suitable power of
${\sf Krew}$ to $T$, the chosen edge connects $i^{(0)}$ to $i^{(1)}$.
In type $A_{n-1}$, this implies that $T$ comes from a parabolic induction
$A_{i-1}\ast A_1 \ast A_{n-i}$, where at most one of these is zero. A
completely similar approach works in type $C$ or $D$, except in the case of
$D_2$, since in that case there is a ${\sf Krew}$ orbit with no edge connecting
a pair of external vertices. However, it is easy to check that
both the elements of that orbit arise via $\operatorname{Ind}$. This completes
the proof.
\end{proof}
\section{A uniform bijection}
In this section, we prove the Main Theorem. We will begin
with the classical types.
Let $W$ be a reflection group of classical type,
and let $L,R$ be a bipartition of its simple
roots.
For each of the three classical families, we define a certain bijection
$\phi_{(L,R)}:\mathcal{T}_W\rightarrow {\rm NC}(W,c_Lc_R)$, which will be
a mild variant of $\phi_W$ as defined in Section \ref{sectiontwo}. Then
we define $\alpha_{(L,R)}:{\rm NN}(W)\rightarrow {\rm NC}(W,c_Lc_R)$ by setting
$\alpha_{(L,R)}(I)= \phi_{(L,R)}\psi_W(I)$. We then check that this bijection
satisfies the properties demanded by the Main Theorem.
Next, we show for any reflection group, classical or not, that
a bijection satisfying the conditions
of the Main Theorem is unique, if it exists. This completes
the proof for the classical types.
Our uniqueness
result also gives us an explicitly computable condition to verify
whether or not there exists a bijection satisfying the conditions of the Main Theorem for a given $W$, assuming that the bijections
are known for all parabolic subgroups. This condition was verified by
computer for the exceptional cases,
thus establishing the result for all types.
\subsection{Type $A_{n-1}$}
Let $\{s_1,\ldots,s_{n-1}\}$ with $s_i = (i,i+1)$ be the generators in type $A_{n-1}$, and let $c_L c_R$ be a bipartite Coxeter element. As mentioned in Remark~\ref{re:CoxeterElements}, we can cyclically label the vertices of the noncrossing handshake configurations in $\mathcal{T}_n$ by the Coxeter element $c_L c_R$. If $s_1 \in L$, the cyclic labelling for $\phi_{(L,R)}$ is given by \begin{align}
2^{(0)},2^{(1)},4^{(0)},4^{(1)},\ldots,3^{(0)},3^{(1)},1^{(0)},1^{(1)}, \label{eq:sL}
\end{align}
and if $s_1 \in R$, the cyclic labelling for $\phi_{(L,R)}$ is given by
\begin{align}
1^{(1)},3^{(0)},3^{(1)},\ldots,4^{(0)},4^{(1)},2^{(0)},2^{(1)},1^{(0)}. \label{eq:sR}
\end{align}
\begin{theorem}\label{th:conjectureA}
The bijections
\begin{align*}
\alpha_{A_{n-1},(L,R)}: {\rm NN}(A_{n-1}) &\tilde{\longrightarrow} {\rm NC}(A_{n-1},c_L c_R), \\
\alpha_{A_{n-1},(R,L)}: {\rm NN}(A_{n-1}) &\tilde{\longrightarrow} {\rm NC}(A_{n-1},c_R c_L)
\end{align*}
satisfy the conditions in the Main Theorem in type $A$.
\end{theorem}
\begin{proof}
We will only check the first statement; the proof of the second is identical. We must check the three properties of the Main Theorem. The initial condition is easily verified. The ${\sf Pan}={\sf Krew}$ condition follows from the facts that $\psi_{A_{n-1}}{\sf conj}irc {\sf Pan} = {\sf Krew}{\sf conj}irc\psi_{A_{n-1}}$ and $\phi_{(L,R)}{\sf conj}irc{\sf Krew}={\sf Krew}{\sf conj}irc \phi_{(L,R)}$.
As we have proved the parabolic recursion for $\psi_{A_{n-1}}$ in the previous section, it is left to prove the analogous statement for $\phi_{(L,R)}$. Let $T \in \mathcal{T}_n$ be a noncrossing handshake configuration such that $T_1 = \{i^{(0)},i^{(1)} : 1 \leq i \leq k \}$ and $T_2 = \{i^{(0)},i^{(1)} : k < i \leq n \}$ define submatchings of $T$ with vertices being labelled as in Proposition~\ref{prop:supporthandshake}. We have to show that
$$\phi_{(L,R)}(T) =
\begin{cases}
\hspace{12pt} \phi_{(L_1,R_1)}(T_1) \hspace{3pt} \phi_{(L_2,R_2)}(T_2) & \text{if } s_k \in R \\
s_k \hspace{3pt} \phi_{(L_1,R_1)}(T_1) \hspace{3pt} \phi_{(R_2,L_2)}(T_2) & \text{if } s_k \in L, \\
\end{cases}
$$
where $L_{1/2} = L {\sf conj}ap S_{1/2}$ and $R_{1/2} = R {\sf conj}ap S_{1/2}$ with $S_1 = \{s_1,\ldots,s_{k-1}\}$ and $S_2 = \{s_{k+1},\ldots,s_{n-1}\}$. This results in $4$ different cases.
\begin{itemize}
\item[Case 1:] $s_1 \in L, s_k \in R$. In this case, the labelling is as in \eqref{eq:sL} and $k$ is even. The statement follows as the labelling of $T_1$ is given by
$$2^{(0)},2^{(1)},\ldots,k^{(0)},k^{(1)},(k-1)^{(0)},(k-1)^{(1)},\ldots,1^{(0)},1^{(1)},$$
and the labelling of $T_2$ is given by the remaining labels. These are exactly the labellings obtained as well for $\phi_{(L_1,R_1)}(T_1)$ and $\phi_{(L_2,R_2)}(T_2)$.
\item[Case 2:] $s_1 \in L, s_k \in L$. In this case, the labelling is as in \eqref{eq:sL} and $k$ is odd. The labelling of $T_1$ is now given by
$$2^{(0)},2^{(1)},\ldots,(k+1)^{(0)},k^{(1)},\ldots,1^{(0)},1^{(1)},$$
and the labelling of $T_2$ is given by the remaining labels. It is a straightforward check that this differs from the labelling for $\phi_{(L_1,R_1)}(T_1)$ and $\phi_{(R_2,L_2)}(T_2)$ by having the labels $(k+1)^{(0)}$ and $k^{(0)}$ interchanged. This corresponds exactly to the additional factor $s_k$.
\end{itemize}
The remaining two cases for $s_1 \in R$ are solved in the analogous way.
\end{proof}
\subsection{Type $C_n$}
As above, the bipartite Coxeter elements in type $C_n$ can be obtained from bipartite Coxeter elements in type $A_{2n-1}$, where $-i$ and $2n+1-i$ are identified. The bijection in type $C$ then follows as a simple corollary from the construction in type $A$.
\begin{corollary}
The bijections
\begin{align*}
\alpha_{C_n,(L,R)}: {\rm NN}(C_n) &\tilde{\longrightarrow} {\rm NC}(C_n,c_L c_R), \\
\alpha_{C_n,(R,L)}: {\rm NN}(C_n) &\tilde{\longrightarrow} {\rm NC}(C_n,c_R c_L)
\end{align*}
satisfy the conditions in the Main Theorem in type $C$.
\end{corollary}
\subsection{Type $D$}
Exactly the same argument as in type $A_{n-1}$
applies to the bipartite Coxeter elements in type $D_n$. Those are obtained from the bipartite Coxeter element in type $A_{n-1}$ by adding $s_n = (n-1,-n)$ to $L$ if $n$ is even and to $R$ if $n$ is odd. E.g., in type $D_4$, we obtain the cyclic labelling on the outer circle for $c_L c_R$ given by
$$2^{(0)}, 2^{(1)}, -3^{(0)}, -3^{(1)}, -1^{(0)}, -1^{(1)}, -2^{(0)}, -2^{(1)}, 3^{(0)}, 3^{(1)}, 1^{(0)}, 1^{(1)},$$
and the inner circle labelling by $4^{(0)},4^{(1)},-4^{(0)},-4^{(1)}$. The labellings for $c_R c_L$ are again given by reflecting the labels at the diagonal through $1^{(1)}$.
\begin{corollary}
The bijections
\begin{align*}
\alpha_{D_n,(L,R)}: {\rm NN}(D_n) &\tilde{\longrightarrow} {\rm NC}(D_n,c_L c_R), \\
\alpha_{D_n,(R,L)}: {\rm NN}(D_n) &\tilde{\longrightarrow} {\rm NC}(D_n,c_R c_L)
\end{align*}
satisfy the conditions in the Main Theorem in type $D$.
\end{corollary}
\begin{proof}
The proof follows the same lines as the proof in type $A$, with the additional check for the cases in which $s_{n-1}$ or $s_n$ are not contained in the support of an antichain $I \in {\rm NN}(D_n)$. Using Theorem~\ref{th:classicalparabolicinduction} in type $D_n$, this check is straightforward.
\end{proof}
\subsection{Uniqueness}
We now establish uniqueness of the bijections satisfying the conditions of
the Main Theorem.
For $I$ which has less than full support, $\alpha_{(L,R)}(I)$ is determined by
parabolic induction. By ${\sf Pan}={\sf Krew}$, there is likewise no choice for
$\alpha_{(L,R)}(J)$ for any $J$ in the ${\sf Pan}$-orbit of $I$. We saw in the
classical types,
in the proof of Theorem~\ref{th:classicalparabolicinduction}, that
every ${\sf Pan}$-orbit in ${\rm NN}(W)$ contains an antichain which does not have full support.
This fact can also easily be checked (by computer) for the exceptional types.
Therefore, there is at most one $\alpha_{(L,R)}$ satisfying the
conditions of the Main Theorem.
\subsection{Exceptional types}
The argument above for uniqueness, actually proves more: it essentially
gives a candidate bijection.
Suppose that bijections as in the Main Theorem
have already been defined for all proper parabolic subgroups of $W$.
For each ${\sf Pan}$-orbit $\mathcal O$ in ${\rm NN}(W)$, pick an antichain $I_{\mathcal O}\in \mathcal O$
which does not have full support, and define
$\alpha_{(L,R)}(I_{\mathcal O})$ by parabolic induction. Now extend
the definition of $\alpha_{(L,R)}$ to all of $\mathcal O$ by ${\sf Pan}={\sf Krew}$.
We now have a candidate for a map satisfying the Main Theorem's condition and,
as in the uniqueness argument above, if there is any map satisfying
the conditions of the Main Theorem for $W$, it must be this one.
The fact that this map really is a bijection satisfying all three
of the properties of the Main Theorem can now be verified by
computer (and has been verified) in the exceptional types. This completes the proof of
the Main Theorem.
\section{A proof of the Panyushev conjectures}
In this final section of the paper, we will use combinatorial results described in the previous sections to prove the Panyushev conjectures. The first proposition follows directly from the uniform description of the bijection.
\begin{proposition}
Part (i) of the Panyushev conjectures holds: ${\sf Pan}^{2h}$ is the identity map on ${\rm NN}(W)$.
\end{proposition}
\begin{proof}
This follows from the connection to the Kreweras complementation and the fact that ${\sf Krew}^{2h}$ is the identity map on ${\rm NC}(W)$.
\end{proof}
For all remaining proofs, we use the combinatorics obtained for the classical types, and computer checks for the exceptionals. To prove (ii) of the Panyushev conjectures, it remains to show that ${\sf Krew}^h$ acts on ${\rm NN}(W)$ by the involution induced by $-\omega_0$. Thus, we have two cases, depending on how
$-\omega_0$ acts on Dynkin diagrams:
\begin{itemize}
\item[(iia)] ${\sf Krew}^h$ acts trivially on ${\sf Pan}hi$ in type $C_n, D_{2n}, F_4, E_7,$ and $E_8$.
\item[(iib)] In the remaining types $A_{n-1}, D_{2n+1},$ and $E_6$, the action of ${\sf Krew}^h$ is induced by the involution on the Dynkin diagram (called $\delta$ in types $A$ and $D$).
\end{itemize}
\begin{proof}[Proof of part (ii) of the Panyushev conjectures]
In types $A$ and $C$, (iia) and (iib) follow from the symmetry property of noncrossing handshake configurations (see Lemma~\ref{lem:Binvolution}). In type $D$, (iia) and (iib) follow from the facts that rotating a type $D_n/\delta$ noncrossing handshake configuration by $2(n-1)$ steps yields the same configuration, but to obtain the same $D_n$ noncrossing handshake configuration, it is also necessary to ensure that the number of rotations applied yields a half-turn of the $4$ inner vertices.
Type $E_6$ was checked with a computer. The statements for the remaining exceptional types can be verified using the orbit lengths found in Section~\ref{sec:exceptionaltypes}.
\end{proof}
\begin{proof}[Proof of part (iii) of the Panyushev conjectures]
First we consider type $A_{n-1}$. Pick
a noncrossing handshake configuration $X$, and consider $X, {\sf Krew}(X),\dots,{\sf Krew}^{2n-1}(X)$. Each edge $e$ in
$X$ appears (rotated) in each of these noncrossing handshake configurations, and we see that some
endpoint of $e$ is labelled with $(0)$ and marked in $n-1$ of these noncrossing handshake configurations.
In a given noncrossing handshake configuration, the number of vertices labelled with $(0)$ and marked
is exactly the number of positive roots in the corresponding antichain, so we see that
the total number of positive roots in the antichains corresponding to these $2n$ noncrossing handshake
configurations is $n-1$ times the number of edges, which is $n$. It follows that the
average number of positive roots in the corresponding ${\sf Pan}$ orbit is $(n-1)/2$.
The easiest way to prove the result for type $C_n$ is the following:
it is straightforward to check that every second antichain in a Panyushev orbit contains a positive root of the form $(i,\overline{i})$. As type $A_{2n-1}$ folds to the type $C_n$, the total number of antichains in an orbit in type $C_n$ is given by
$$\frac{\frac{4n}{2}\frac{2n-1}{2} + 2n}{4n} = \frac{n}{2}.$$
Here, the nominator contains $4n\frac{2n-1}{2}$ which is the orbit size (without symmetry) times the average number of elements in the orbit in type $A_{2n-1}$, the division by $2$ comes from the folding, and the correction term $2n$ comes from the centered element in every other orbit which is not folded. The $4n$ in the denominator is again the size of the orbit. (If we have a $k$-fold symmetry, all three pieces obtain a factor of $1/k$.) This completes the proof in type $C$.
In type $D$, the situation is again a little more involved. We will work in terms of $D_n/\delta$ configurations.
There are two different cases, based on whether or not there are four
isolated vertices on the outside.
Suppose first that there are not. Each such $D_n$ antichain
corresponds to a $C_{n-1}$ antichain, and the Panyushev map respects this
folding action. Thus, a Panyushev orbit of such $D_n$ antichains
corresponds to a Panyushev orbit of $C_{n-1}$ antichains; the
average number of roots present in these $C_{n-1}$ antichains is
$(n-1)/2$. The $D_n$ antichain $I$ corresponding to a $C_{n-1}$ antichain
$I'$ is just the inverse image of $I'$ under the folding
map from ${\sf Pan}hi_{D_n}$ to ${\sf Pan}hi_{C_{n-1}}$. The number of elements in
$I$ equals the number of elements in $I'$, plus the number of elements
in $I'$ whose inverse image consists of two roots; there will be either
one or zero such roots in $I'$. We observe that there is such a root
in $I'$ iff $n^{(0)}$ is marked. As we rotate
$\psi_{C_{n-1}}(I')$ through a full rotation,
each edge of the configuration is connected to vertex $n^{(0)}$ twice, once
at each of its endpoints, and it is easy to see that once we will
have $n^{(0)}$ marked, while once it will be unmarked. Thus, the
average effect of passing from $I'$ to $I$ is to add $\frac12$ to the
size of the antichains, resulting in an average size of $n/2$ as desired.
Now suppose that there are four isolated vertices in
$\psi_{D_n/\delta}(I)$. We consider first the average size of
$\hat I$ (which, we recall, is an antichain of type $A_{2n-3}$).
Recall that, as we consider $\psi_{A_{2n-3}}(\hat I),
\psi_{A_{2n-3}}(\widehat {{\sf Pan}(I)}),\dots$, the effect is to rotate the
noncrossing handshake configuration except that there is one pair of edges which,
at a certain point, gets switched, and then eventually switches back; in a full rotation
($4n-4$ steps) this happens twice.
Consider first an edge which is not involved in the switching. It
contributes a marked vertex $2n-3$ times (out of the $4n-4$ rotations).
Now consider the pair of edges that are involved in the switching.
One verifies directly that they contribute, together, $4n-8$ marked vertices.
The average size of the antichains $\widehat I, \widehat{{\sf Pan}(I)},$ etc.,
is $[(2n-4)(2n-3)+(4n-8)]/(4n-4)= (4n^2-10n+4)/(4n-4)$.
We next consider the average size of the sets $\overline I$,
$\overline{{\sf Pan}(I)}$, etc. Each of these contains one more root than the
corresponding antichain $\widehat I, \widehat{{\sf Pan}(I)}$, etc., so the
average size of these sets is $(4n^2 - 6n)/(4n-4)$.
Next we consider the relationship between the size of $\overline I$ and the
size of $I$. The size of $I$ is $|\overline I|/2$, plus a correction of
$\frac12$ if $\overline I$ has an element on the central diagonal.
Over $4n-4$ rotations, the correction will appear $2n$ times (i.e. two more
than half the time).
The reason for this is that, if $I$ is such that $\widehat I$ and
$\widehat {{\sf Pan}(I)}$ differ by a switch of the edges, then neither of
them will have an element on the central diagonal.
We see this because
of the fact that the switching edges are the most internal among those
connecting $H$ to $H^c$ in
$\psi_{A_{2n-3}}(\widehat I)$.
Now $\widehat I$ has
no element on the central diagonal iff $\overline I$ does have an element
on the central diagonal.
It follows that the number of elements in an antichain, averaged over a
${\sf Pan}$-orbit,
is $(4n^2-4n)/(8n-8)=n/2$.
\end{proof}
\end{document}
|
\begin{document}
\begin{center}
{\Large\bf
The matrix Stieltjes moment problem: a description of all solutions.}
\end{center}
\begin{center}
{\bf S.M. Zagorodnyuk}
\end{center}
\section{Introduction.}
The matrix Stieltjes moment problem consists of
finding a left-continuous non-decreasing matrix function $M(x) = ( m_{k,l}(x) )_{k,l=0}^{N-1}$
on $\mathbb{R}_+ = [0,+\infty)$, $M(0)=0$, such that
\begin{equation}
\label{f1_1}
\int_{\mathbb{R}_+} x^n dM(x) = S_n,\qquad n\in\mathbb{Z}_+,
\end{equation}
where $\{ S_n \}_{n=0}^\infty$ is a given sequence of Hermitian $(N\times N)$ complex matrices, $N\in\mathbb{N}$.
This problem is said to be determinate, if there exists a unique solution and indeterminate in the opposite case.
In the scalar ($N=1$) indeterminate case the Stieltjes moment problem was solved by M.G.~Krein
(see~\cite{c_1000_Kr_st},\cite{c_2000_KrN}), while in the scalar degenerate case the problem was solved
by F.R.~Gantmacher in~\cite[Chapter XVI]{Cit_3000_Gantmacher}.
The operator (and, in particular, the matrix) Stieltjes moment problem was introduced by M.G.~Krein and
M.A.~Krasnoselskiy in~\cite{c_4000_Kr_Kras}. They obtained the necessary and sufficient conditions of
solvability for this problem.
\noindent
Let us introduce the following matrices
\begin{equation}
\label{f1_2}
\Gamma_n = (S_{i+j})_{i,j=0}^n = \left(
\begin{array}{cccc} S_0 & S_1 & \ldots & S_n\\
S_1 & S_2 & \ldots & S_{n+1}\\
\vdots & \vdots & \ddots & \vdots\\
S_n & S_{n+1} & \ldots & S_{2n}\end{array}
\right),
\end{equation}
\begin{equation}
\label{f1_3}
\widetilde\Gamma_n = (S_{i+j+1})_{i,j=0}^n = \left(
\begin{array}{cccc} S_1 & S_2 & \ldots & S_{n+1}\\
S_2 & S_3 & \ldots & S_{n+2}\\
\vdots & \vdots & \ddots & \vdots\\
S_{n+1} & S_{n+2} & \ldots & S_{2n+1}\end{array}
\right),\qquad n\in\mathbb{Z}_+.
\end{equation}
The moment problem~(\ref{f1_1}) has a solution if and only if
\begin{equation}
\label{f1_4}
\Gamma_n \geq 0,\quad \widetilde\Gamma_n \geq 0,\qquad n\in\mathbb{Z}_+.
\end{equation}
In~2004, Yu.M.~Dyukarev performed a deep investigation of the moment problem~(\ref{f1_1}) in
the case when
\begin{equation}
\label{f1_5}
\Gamma_n > 0,\quad \widetilde\Gamma_n > 0,\qquad n\in\mathbb{Z}_+,
\end{equation}
and some limit matrix intervals (which he called the limit Weyl intervals) are non-degenerate, see~\cite{c_5000_D}.
He obtained a parameterization of all solutions of the moment problem in this case.
\noindent
Our aim here is to obtain a description of all solutions of the moment problem~(\ref{f1_1}) in the
general case. No conditions besides the solvability (i.e. conditions~(\ref{f1_4})) will be assumed.
We shall apply an operator approach which was used in~\cite{c_6000_Z} and Krein's formula for
the generalized $\Pi$-resolvents of non-negative Hermitian operators~\cite{c_7000_Kr},\cite{c_8000_Kr_Ovch}. We shall
use Krein's formula in the form which was proposed by V.A.~Derkach and
M.M.~Malamud in~\cite{c_9000_D_M}. We should also notice that these authors
presented a detailed proof of Krein's formula.
\noindent
{\bf Notations.} As usual, we denote by $\mathbb{R}, \mathbb{C}, \mathbb{N}, \mathbb{Z}, \mathbb{Z}_+$
the sets of real numbers, complex numbers, positive integers, integers and non-negative integers,
respectively; $\mathbb{R}_+ = [0,+\infty)$, $\mathbb{C}_+ = \{ z\in \mathbb{C}:\ \mathop{\rm Im}\nolimits z>0 \}$.
The space of $n$-dimensional complex vectors
$a = (a_0,a_1,\ldots,a_{n-1})$, will be
denoted by $\mathbb{C}^n$, $n\in \mathbb{N}$.
If $a\in \mathbb{C}^n$ then $a^*$ means the complex conjugate vector.
By $\mathbb{P}$ we denote the set of all complex polynomials.
\noindent
Let $M(x)$ be a left-continuous non-decreasing matrix function $M(x) = ( m_{k,l}(x) )_{k,l=0}^{N-1}$
on $\mathbb{R_+}$, $M(0)=0$, and $\tau_M (x) := \sum_{k=0}^{N-1} m_{k,k} (x)$;
$\Psi(x) = ( dm_{k,l}/ d\tau_M )_{k,l=0}^{N-1}$ (the Radon-Nikodym derivative).
We denote by $L^2(M)$ a set (of classes of equivalence)
of vector functions $f: \mathbb{R}\rightarrow \mathbb{C}^N$, $f = (f_0,f_1,\ldots,f_{N-1})$,
such that (see, e.g.,~\cite{c_10000_M_M})
$$ \| f \|^2_{L^2(M)} := \int_\mathbb{R} f(x) \Psi(x) f^*(x) d\tau_M (x) < \infty. $$
The space $L^2(M)$ is a Hilbert space with the scalar product
$$ ( f,g )_{L^2(M)} := \int_\mathbb{R} f(x) \Psi(x) g^*(x) d\tau_M (x),\qquad f,g\in L^2(M). $$
For a separable Hilbert space $H$ we denote by $(\cdot,\cdot)_H$ and $\| \cdot \|_H$ the scalar
product and the norm in $H$, respectively. The indices may be omitted in obvious cases.
By $E_H$ we denote the identity operator in $H$, i.e. $E_H x = x$, $x\in H$.
\noindent
For a linear operator $A$ in $H$ we denote by $D(A)$ its domain, by $R(A)$ its range, and by
$\mathop{\rm ker}\nolimits A$ its kernel. By $A^*$ we denote its adjoint if it exists.
By $\rho(A)$ we denote the resolvent set of $A$; $N_z = \mathop{\rm ker}\nolimits(A^* - zE_H)$.
If $A$ is bounded, then $\| A \|$ stands for its operator norm.
For a set of elements $\{ x_n \}_{n\in T}$ in $H$, we
denote by $\mathop{\rm Lin}\nolimits\{ x_n \}_{n\in T}$ and $\mathop{\rm span}\nolimits\{ x_n \}_{n\in T}$ the linear
span and the closed
linear span (in the norm of $H$), respectively. Here $T$ is an arbitrary set of indices.
For a set $M\subseteq H$ we denote by $\overline{M}$ the closure of $M$ with respect to the norm of $H$.
\noindent
If $H_1$ is a subspace of $H$, by $P_{H_1} = P_{H_1}^{H}$ we denote the operator of the orthogonal projection on $H_1$
in $H$. If $\mathcal{H}$ is another Hilbert space, by $[H,\mathcal{H}]$ we denote the space of all
bounded operators from $H$ into $\mathcal{H}$; $[H]:= [H,H]$.
$\mathfrak{C}(H)$ is the set of closed linear operators $A$ such that $\overline{D(A)}=H$.
\section{The matrix Stieltjes moment problem: the solvability.}
Consider the matrix Stieltjes moment problem~(\ref{f1_1}).
Let us check that conditions~(\ref{f1_4}) are necessary for the solvability
of the problem~(\ref{f1_1}). In fact, suppose that the moment problem has
a solution $M(x)$. Choose an arbitrary function
$a(x) = (a_0(x),a_1(x),...,a_{N-1}(x))$, where
$$ a_j(x) = \sum_{k=0}^n \alpha_{j,k} x^k,\quad \alpha_{j,k}\in \mathbb{C},\ n\in \mathbb{Z}_+. $$
This function belongs to $L^2(M)$ and
$$ 0 \leq \int_{\mathbb{R}_+} a(x) dM(x) a^*(x) =
\sum_{k,l=0}^n \int_{\mathbb{R}_+}(\alpha_{0,k},\alpha_{1,k},...,\alpha_{N-1,k}) x^{k+l} dM(x) $$
$$* (\alpha_{0,l},\alpha_{1,l},...,\alpha_{N-1,l})^* =
\sum_{k,l=0}^n (\alpha_{0,k},\alpha_{1,k},...,\alpha_{N-1,k}) S_{k+l} $$
$$* (\alpha_{0,l},\alpha_{1,l},...,\alpha_{N-1,l})^* =
A\Gamma_n A^*, $$
where $A = (\alpha_{0,0},\alpha_{1,0},...,\alpha_{N-1,0},\alpha_{0,1},\alpha_{1,1},...,
\alpha_{N-1,1},...,\alpha_{0,n},\alpha_{1,n},...,\alpha_{N-1,n})$,
and we have used the rules for the multiplication of block matrices.
In a similar manner we get
$$ 0 \leq \int_{\mathbb{R}_+} a(x) x dM(x) a^*(x) = A\widetilde\Gamma_n A^*, $$
and therefore conditions~(\ref{f1_4}) hold.
\noindent
On the other hand, let the moment problem~(\ref{f1_1}) be given and suppose that
conditions~(\ref{f1_4}) are true.
For the prescribed moments
$$ S_j = (s_{j;k,l})_{k,l=0}^{N-1},\quad s_{j;k,l}\in \mathbb{C},\qquad j\in \mathbb{Z}_+, $$
we consider the following block matrices
\begin{equation}
\label{f2_1}
\Gamma = (S_{i+j})_{i,j=0}^\infty =
\left(
\begin{array}{cccc}
S_{0} & S_{1} & S_2 & \ldots \\
S_{1} & S_{2} & S_3 & \ldots \\
S_{2} & S_{3} & S_4 & \ldots \\
\vdots & \vdots & \vdots & \ddots \end{array}\right),
\end{equation}
\begin{equation}
\label{f2_1_1}
\widetilde\Gamma = (S_{i+j+1})_{i,j=0}^\infty =
\left(
\begin{array}{cccc}
S_{1} & S_{2} & S_3 & \ldots \\
S_{2} & S_{3} & S_4 & \ldots \\
S_{3} & S_{4} & S_5 & \ldots \\
\vdots & \vdots & \vdots & \ddots \end{array}\right).
\end{equation}
The matrix $\Gamma$ can be viewed as a scalar semi-infinite matrix
\begin{equation}
\label{f2_2}
\Gamma = (\gamma_{n,m})_{n,m=0}^\infty,\qquad \gamma_{n,m}\in \mathbb{C}.
\end{equation}
Notice that
\begin{equation}
\label{f2_3}
\gamma_{rN+j,tN+n} = s_{r+t;j,n},\qquad r,t\in \mathbb{Z_+},\ 0\leq j,n\leq N-1.
\end{equation}
The matrix $\widetilde\Gamma$ can be also viewed as a scalar semi-infinite matrix
\begin{equation}
\label{f2_3_1}
\widetilde\Gamma = (\widetilde\gamma_{n,m})_{n,m=0}^\infty =
(\gamma_{n+N,m})_{n,m=0}^\infty.
\end{equation}
The conditions in~(\ref{f1_4}) imply that
\begin{equation}
\label{f2_4}
(\gamma_{k,l})_{k,l=0}^r \geq 0,\qquad r\in \mathbb{Z}_+;
\end{equation}
\begin{equation}
\label{f2_4_1}
(\gamma_{k+N,l})_{k,l=0}^r \geq 0,\qquad r\in \mathbb{Z}_+.
\end{equation}
We shall use the following important fact (e.g.,~\cite[Supplement 1]{c_11000_AG}):
\begin{thm}
\label{t2_1}
Let $\Gamma = (\gamma_{n,m})_{n,m=0}^\infty$, $\gamma_{n,m}\in \mathbb{C}$, be a semi-infinite
complex matrix such that condition~(\ref{f2_4}) holds.
Then there exist a separable Hilbert space $H$ with a scalar product $(\cdot,\cdot)_H$ and
a sequence $\{ x_n \}_{n=0}^\infty$ in $H$, such that
\begin{equation}
\label{f2_5}
\gamma_{n,m} = (x_n,x_m)_H,\qquad n,m\in \mathbb{Z}_+,
\end{equation}
and $\mathop{\rm span}\nolimits\{ x_n \}_{n=0}^\infty = H$.
\end{thm}
{\bf Proof. }
Consider an arbitrary infinite-dimensional linear vector space $V$. For example, we can choose the linear space
of all complex sequences $(u_n)_{n\in \mathbb{Z}_+}$, $u_n\in \mathbb{C}$.
Let $X = \{ x_n \}_{n=0}^\infty$ be an arbitrary infinite sequence of linear independent elements
in $V$. Let $L = \mathop{\rm Lin}\nolimits\{ x_n \}_{n\in\mathbb{Z}_+}$ be the linear span of elements of $X$. Introduce the following functional:
\begin{equation}
\label{f2_6}
[x,y] = \sum_{n,m=0}^\infty \gamma_{n,m} a_n\overline{b_m},
\end{equation}
for $x,y\in L$,
$$ x=\sum_{n=0}^\infty a_n x_n,\quad y=\sum_{m=0}^\infty b_m x_m,\quad a_n,b_m\in\mathbb{C}. $$
Here and in what follows we assume that for elements of linear spans all but a finite number of coefficients
are zero.
The space $V$ with $[\cdot,\cdot]$ will be a quasi-Hilbert space. Factorizing and making the completion
we obtain the required space $H$ (see~\cite{c_12000_Ber}).
$\Box$
From~(\ref{f2_3}) it follows that
\begin{equation}
\label{f2_7}
\gamma_{a+N,b} = \gamma_{a,b+N},\qquad a,b\in\mathbb{Z}_+.
\end{equation}
In fact, if $a=rN+j$, $b=tN+n$, $0\leq j,n \leq N-1$, $r,t\in\mathbb{Z}_+$, we can write
$$ \gamma_{a+N,b} = \gamma_{(r+1)N+j,tN+n} = s_{r+t+1;j,n} = \gamma_{rN+j,(t+1)N+n} = \gamma_{a,b+N}. $$
By Theorem~\ref{t2_1} there exist a Hilbert space $H$ and a sequence $\{ x_n \}_{n=0}^\infty$ in $H$,
such that $\mathop{\rm span}\nolimits\{ x_n \}_{n=0}^\infty = H$, and
\begin{equation}
\label{f2_8}
(x_n,x_m)_H = \gamma_{n,m},\qquad n,m\in\mathbb{Z}_+.
\end{equation}
Set $L := \mathop{\rm Lin}\nolimits\{ x_n \}_{n=0}^\infty$.
Notice that elements $\{ x_n \}$ are {\it not necessarily linearly independent}. Thus, for an
arbitrary $x\in L$ there can exist different representations:
\begin{equation}
\label{f2_9}
x = \sum_{k=0}^\infty \alpha_k x_k,\quad \alpha_k\in \mathbb{C},
\end{equation}
\begin{equation}
\label{f2_10}
x = \sum_{k=0}^\infty \beta_k x_k,\quad \beta_k\in \mathbb{C}.
\end{equation}
(Here all but a finite number of coefficients $\alpha_k$, $\beta_k$ are zero).
Using~(\ref{f2_7}),(\ref{f2_8}) we can write
$$ \left( \sum_{k=0}^\infty \alpha_k x_{k+N}, x_l \right) = \sum_{k=0}^\infty \alpha_k ( x_{k+N}, x_l ) =
\sum_{k=0}^\infty \alpha_k \gamma_{k+N,l} = \sum_{k=0}^\infty \alpha_k \gamma_{k,l+N} $$
$$ = \sum_{k=0}^\infty \alpha_k ( x_{k}, x_{l+N} ) =
\left( \sum_{k=0}^\infty \alpha_k x_{k}, x_{l+N} \right) = (x,x_{l+N}),\qquad l\in\mathbb{Z}_+. $$
In a similar manner we obtain that
$$ \left( \sum_{k=0}^\infty \beta_k x_{k+N}, x_l \right) = (x,x_{l+N}),\qquad l\in\mathbb{Z}_+, $$
and therefore
$$ \left( \sum_{k=0}^\infty \alpha_k x_{k+N}, x_l \right) =
\left( \sum_{k=0}^\infty \beta_k x_{k+N}, x_l \right),\qquad l\in\mathbb{Z}_+. $$
Since $\overline{L} = H$, we obtain that
\begin{equation}
\label{f2_11}
\sum_{k=0}^\infty \alpha_k x_{k+N} = \sum_{k=0}^\infty \beta_k x_{k+N}.
\end{equation}
Let us introduce the following operator:
\begin{equation}
\label{f2_12}
A x = \sum_{k=0}^\infty \alpha_k x_{k+N},\qquad x\in L,\ x = \sum_{k=0}^\infty \alpha_k x_{k}.
\end{equation}
Relations~(\ref{f2_9}),(\ref{f2_10}) and~(\ref{f2_11}) show that this definition does not
depend on the choice of a representation for $x\in L$. Thus, this definition is correct.
In particular, we have
\begin{equation}
\label{f2_13}
A x_k = x_{k+N},\qquad k\in\mathbb{Z}_+.
\end{equation}
Choose arbitrary $x,y\in L$, $x = \sum_{k=0}^\infty \alpha_k x_{k}$, $y = \sum_{n=0}^\infty \gamma_n x_{n}$,
and write
$$ (Ax,y) = \left( \sum_{k=0}^\infty \alpha_k x_{k+N},\sum_{n=0}^\infty \gamma_n x_{n} \right) =
\sum_{k,n=0}^\infty \alpha_k \overline{\gamma_n} (x_{k+N},x_n) $$
$$ = \sum_{k,n=0}^\infty \alpha_k \overline{\gamma_n} (x_{k},x_{n+N}) =
\left( \sum_{k=0}^\infty \alpha_k x_{k},\sum_{n=0}^\infty \gamma_n x_{n+N} \right) =
(x,Ay). $$
By relation~(\ref{f2_4_1}) we get
$$ (Ax,x) = \left( \sum_{k=0}^\infty \alpha_k x_{k+N},\sum_{n=0}^\infty \alpha_n x_{n} \right) =
\sum_{k,n=0}^\infty \alpha_k \overline{\alpha_n} (x_{k+N},x_n) $$
$$ = \sum_{k,n=0}^\infty \alpha_k \overline{\alpha_n} \gamma_{k+N,n} \geq 0, $$
Thus, the operator $A$ is a linear non-negative Hermitian operator in $H$ with the domain $D(A)=L$.
Such an operator has a non-negative self-adjoint extension~\cite[Theorem 7, p.450]{c_13000_Kr}.
Let $\widetilde A\supseteq A$ be an arbitrary non-negative self-adjoint extension of $A$ in a Hilbert space
$\widetilde H\supseteq H$, and $\{ \widetilde E_\lambda \}_{\lambda\in \mathbb{R}_+}$ be its left-continuous orthogonal
resolution of unity.
Choose an arbitrary $a\in \mathbb{Z}_+$, $a=rN + j$, $r\in \mathbb{Z}_+$, $0\leq j\leq N-1$. Notice that
$$ x_a = x_{rN+j} = A x_{(r-1)N+j} = ... = A^r x_j. $$
Using~(\ref{f2_3}),(\ref{f2_8}) we can write
$$ s_{r+t;j,n} = \gamma_{rN+j,tN+n} = ( x_{rN+j},x_{tN+n} )_H = (A^r x_j, A^t x_n)_H $$
$$ = ( \widetilde A^r x_j, \widetilde A^t x_n)_{\widetilde H} =
\left( \int_{\mathbb{R}_+} \lambda^r d\widetilde E_\lambda x_j, \int_{\mathbb{R}_+} \lambda^t d\widetilde E_\lambda x_n
\right)_{\widetilde H} $$
$$ = \int_{\mathbb{R}_+} \lambda^{r+t} d (\widetilde E_\lambda x_j, x_n)_{\widetilde H} =
\int_{\mathbb{R}_+} \lambda^{r+t} d \left( P^{\widetilde H}_H \widetilde E_\lambda x_j, x_n \right)_{H}. $$
Let us write the last relation in a matrix form:
\begin{equation}
\label{f2_14}
S_{r+t} = \int_{\mathbb{R}_+} \lambda^{r+t} d \widetilde M(\lambda),\qquad r,t\in\mathbb{Z}_+,
\end{equation}
where
\begin{equation}
\label{f2_15}
\widetilde M(\lambda) := \left( \left( P^{\widetilde H}_H \widetilde E_\lambda x_j,
x_n \right)_{H} \right)_{j,n=0}^{N-1}.
\end{equation}
If we set $t=0$ in relation~(\ref{f2_14}), we obtain that the matrix function $\widetilde M(\lambda)$ is
a solution of the matrix Stieltjes moment problem~(\ref{f1_1}). In fact, from the properties of the
orthogonal resolution of unity it easily follows that $\widetilde M (\lambda)$ is left-continuous non-decreasing and
$\widetilde M(0) = 0$.
Thus, we obtained another proof of the solvability criterion for the matrix Stieltjes moment problem~(\ref{f1_1}):
\begin{thm}
\label{t2_2}
Let a matrix Stieltjes moment problem~(\ref{f1_1}) be given. This problem has a solution if and only if
conditions~(\ref{f1_4}) hold true.
\end{thm}
\section{A description of solutions.}
Let $B$ be an arbitrary non-negative Hermitian operator in a Hilbert space $\mathcal{H}$.
Choose an arbitrary non-negative self-adjoint extension $\widehat B$ of $B$ in a Hilbert space
$\widehat{\mathcal{H}} \supseteq \mathcal{H}$.
Let $R_z(\widehat B)$ be the resolvent of $\widehat B$ and $\{ \widehat E_\lambda\}_{\lambda\in \mathbb{R}_+}$
be the orthogonal left-continuous resolution of unity of $\widehat B$. Recall that the operator-valued function
$\mathbf R_z = P_{ \mathcal{H} }^{ \widehat{\mathcal{H}} } R_z(\widehat B)$ is called {\bf a generalized
$\Pi$-resolvent of $B$}, $z\in\mathbb{C}\backslash\mathbb{R}$~\cite{c_8000_Kr_Ovch}.
If $\widehat{\mathcal{H}} = \mathcal{H}$ then $R_z(\widehat B)$ is called {\bf a canonical $\Pi$-resolvent}.
The function
$\mathbf E_\lambda = P_{\mathcal{H}}^{\widehat{\mathcal{H}}} \widehat E_\lambda$, $\lambda\in\mathbb{R}$, we call
a {\bf $\Pi$-spectral
function} of a non-negative Hermitian operator $B$.
There exists a one-to-one correspondence between generalized $\Pi$-resolvents and $\Pi$-spectral functions
established by the following relation (\cite{c_11000_AG}):
\begin{equation}
\label{f3_1}
(\mathbf R_z f,g)_{\mathcal{H}} = \int_{\mathbb{R}_+} \frac{1}{\lambda - z}
d( \mathbf E_\lambda f,g)_{\mathcal{H}},\qquad f,g\in \mathcal{H},\
z\in \mathbb{C}\backslash \mathbb{R}.
\end{equation}
Denote the set of all generalized $\Pi$-resolvents of $B$ by $\Omega^0(-\infty,0)=\Omega^0(-\infty,0)(B)$.
Let a moment problem~(\ref{f1_1}) be given and conditions~(\ref{f1_4}) hold.
Consider the operator $A$ defined as in~(\ref{f2_12}).
Formula~(\ref{f2_15}) shows that $\Pi$-spectral functions of the operator $A$ produce
solutions of the matrix Stieltjes moment problem~(\ref{f1_1}).
Let us show that an arbitrary solution of~(\ref{f1_1}) can be produced in this way.
\noindent
Choose an arbitrary solution $\widehat M(x) = ( \widehat m_{k,l}(x) )_{k,l=0}^{N-1}$ of
the matrix Stieltjes moment problem~(\ref{f1_1}). Consider the space $L^2(\widehat M)$ and
let $Q$ be the operator of multiplication by an independent variable in $L^2(\widehat M)$.
The operator $Q$ is self-adjoint and its resolution of unity is given by (see~\cite{c_10000_M_M})
\begin{equation}
\label{f3_2}
E_b - E_a = E([a,b)): h(x) \rightarrow \chi_{[a,b)}(x) h(x),
\end{equation}
where $\chi_{[a,b)}(x)$ is the characteristic function of an interval $[a,b)$, $0 \leq a<b\leq +\infty$.
Set
$$ \vec e_k = (e_{k,0},e_{k,1},\ldots,e_{k,N-1}),\quad e_{k,j}=\delta_{k,j},\qquad 0\leq j\leq N-1, $$
where $k=0,1,\ldots N-1$.
A set of (classes of equivalence of) functions $f\in L^2(\widehat M)$ such that
(the corresponding class includes) $f=(f_0,f_1,\ldots, f_{N-1})$, $f\in \mathbb{P}$, we denote
by $\mathbb{P}^2(\widehat M)$. It is said to be a set of vector polynomials in $L^2(\widehat M)$.
Set $L^2_0(\widehat M) := \overline{ \mathbb{P}^2(\widehat M) }$.
For an arbitrary (representative) $f\in \mathbb{P}^2(\widehat M)$ there
exists a unique representation of the following form:
\begin{equation}
\label{f3_3}
f(x) = \sum_{k=0}^{N-1}
\sum_{j=0}^\infty \alpha_{k,j} x^j \vec e_k,\quad \alpha_{k,j}\in \mathbb{C}.
\end{equation}
Here the sum is assumed to be finite.
Let $g\in \mathbb{P}^2(\widehat M)$ have a representation
\begin{equation}
\label{f3_4}
g(x) = \sum_{l=0}^{N-1} \sum_{r=0}^\infty \beta_{l,r} x^r \vec e_l,\quad \beta_{l,r}\in \mathbb{C}.
\end{equation}
Then we can write
$$ (f,g)_{L^2(\widehat M)} = \sum_{k,l=0}^{N-1} \sum_{j,r=0}^\infty \alpha_{k,j}\overline{\beta_{l,r}}
\int_\mathbb{R} x^{j+r} \vec e_k d\widehat M(x) \vec e_l^* $$
\begin{equation}
\label{f3_5}
= \sum_{k,l=0}^{N-1}
\sum_{j,r=0}^\infty \alpha_{k,j}\overline{\beta_{l,r}}
\int_\mathbb{R} x^{j+r} d\widehat m_{k,l}(x)
= \sum_{k,l=0}^{N-1} \sum_{j,r=0}^\infty \alpha_{k,j}\overline{\beta_{l,r}}
s_{j+r;k,l}.
\end{equation}
On the other hand, we can write
$$ \left( \sum_{j=0}^\infty \sum_{k=0}^{N-1} \alpha_{k,j} x_{jN+k},
\sum_{r=0}^\infty \sum_{l=0}^{N-1} \beta_{l,r} x_{rN+l} \right)_H =
\sum_{k,l=0}^{N-1} \sum_{j,r=0}^\infty \alpha_{k,j}\overline{\beta_{l,r}}
(x_{jN+k}, x_{rN+l})_H $$
\begin{equation}
\label{f3_6}
= \sum_{k,l=0}^{N-1} \sum_{j,r=0}^\infty \alpha_{k,j}\overline{\beta_{l,r}}
\gamma_{jN+k,rN+l}
= \sum_{k,l=0}^{N-1} \sum_{j,r=0}^\infty \alpha_{k,j}\overline{\beta_{l,r}}
s_{j+r;k,l}.
\end{equation}
From relations~(\ref{f3_5}),(\ref{f3_6}) it follows that
\begin{equation}
\label{f3_7}
(f,g)_{L^2(\widehat M)} = \left( \sum_{j=0}^\infty \sum_{k=0}^{N-1} \alpha_{k,j} x_{jN+k},
\sum_{r=0}^\infty \sum_{l=0}^{N-1} \beta_{l,r} x_{rN+l} \right)_H.
\end{equation}
Let us introduce the following operator:
\begin{equation}
\label{f3_8}
Vf = \sum_{j=0}^\infty \sum_{k=0}^{N-1} \alpha_{k,j} x_{jN+k},
\end{equation}
for $f(x)\in \mathbb{P}^2(\widehat M)$, $f(x) = \sum_{k=0}^{N-1} \sum_{j=0}^\infty \alpha_{k,j} x^j \vec e_k$,
$\alpha_{k,j}\in \mathbb{C}$.
Let us show that this definition is correct. In fact,
if vector polynomials $f$, $g$ have representations~(\ref{f3_3}),(\ref{f3_4}), and
$\| f-g \|_{L^2(\widehat M)} = 0$, then
from~(\ref{f3_7}) it follows that $V(f-g)=0$.
Thus, $V$ is a correctly defined operator from $\mathbb{P}^2(\widehat M)$ into $H$.
Relation~(\ref{f3_7}) shows that $V$ is an isometric transformation from $\mathbb{P}^2(\widehat M)$ onto $L$.
By continuity we extend it to an isometric transformation from $L^2_0(\widehat M)$ onto $H$.
In particular, we note that
\begin{equation}
\label{f3_9}
V x^j \vec e_k = x_{jN+k},\qquad j\in \mathbb{Z}_+;\quad 0\leq k\leq N-1.
\end{equation}
Set $L^2_1 (\widehat M) := L^2(\widehat M)\ominus L^2_0 (\widehat M)$, and
$U := V\oplus E_{L^2_1 (\widehat M)}$. The operator $U$ is
an isometric transformation from $L^2(\widehat M)$ onto $H\oplus L^2_1 (\widehat M)=:\widehat H$.
Set
$$ \widehat A := UQU^{-1}. $$
The operator $\widehat A$ is a non-negative self-adjoint operator in $\widehat H$.
Let $\{ \widehat E_\lambda \}_{\lambda\in\mathbb{R}_+}$
be its left-continuous orthogonal resolution of unity.
Notice that
$$ UQU^{-1} x_{jN+k} = VQV^{-1} x_{jN+k} = VQ x^j \vec e_k = V x^{j+1} \vec e_k =
x_{(j+1)N+k} $$
$$ = x_{jN+k+N} = Ax_{jN+k},\qquad j\in\mathbb{Z}_+;\quad 0\leq k\leq N-1. $$
By linearity we get
$$ UQU^{-1} x = Ax,\qquad x\in L = D(A), $$
and therefore $\widehat A\supseteq A$.
Choose an arbitrary $z\in\mathbb{C}\backslash\mathbb{R}$ and write
$$ \int_{\mathbb{R}_+} \frac{1}{\lambda - z} d( \widehat E_\lambda x_k, x_j)_{\widehat H} =
\left( \int_{\mathbb{R}_+} \frac{1}{\lambda - z} d\widehat E_\lambda x_k, x_j \right)_{\widehat H} $$
$$ = \left( U^{-1} \int_{\mathbb{R}_+} \frac{1}{\lambda - z} d\widehat E_\lambda x_k, U^{-1} x_j \right)_{L^2(\widehat M)} $$
$$ = \left( \int_{\mathbb{R}_+} \frac{1}{\lambda - z} d U^{-1} \widehat E_\lambda U \vec e_k, \vec e_j \right)_{L^2(\widehat M)} =
\left( \int_{\mathbb{R}_+} \frac{1}{\lambda - z} d E_{\lambda} \vec e_k, \vec e_j \right)_{L^2(\widehat M)} $$
\begin{equation}
\label{f3_10}
= \int_{\mathbb{R}_+} \frac{1}{\lambda - z} d(E_{\lambda} \vec e_k, \vec e_j)_{L^2(\widehat M)},\qquad 0\leq k,j\leq N-1.
\end{equation}
Using~(\ref{f3_2}) we can write
$$ (E_{\lambda} \vec e_k, \vec e_j)_{L^2(\widehat M)} = \widehat m_{k,j}(\lambda), $$
and therefore
\begin{equation}
\label{f3_11}
\int_{\mathbb{R}_+} \frac{1}{\lambda - z} d( P^{\widehat H}_H \widehat E_\lambda x_k, x_j)_H =
\int_{\mathbb{R}_+} \frac{1}{\lambda - z} d\widehat m_{k,j}(\lambda),\qquad 0\leq k,j\leq N-1.
\end{equation}
By the Stieltjes-Perron inversion formula (see, e.g.,~\cite{c_14000_Akh}) we conclude that
\begin{equation}
\label{f3_12}
\widehat m_{k,j} (\lambda) = ( P^{\widehat H}_H \widehat E_\lambda x_k, x_j)_H.
\end{equation}
\begin{prop}
\label{p3_1}
Let the matrix Stieltjes moment problem~(\ref{f1_1}) be given and conditions~(\ref{f1_4}) hold.
Let $A$ be a non-negative Hermitian operator which is defined by~(\ref{f2_12}).
The deficiency index of $A$ is equal to $(n,n)$, $0\leq n\leq N$.
\end{prop}
{\bf Proof. }
Choose an arbitrary $u\in L$, $u = \sum_{k=0}^\infty c_k x_k$, $c_k\in \mathbb{C}$. Suppose that
$c_k = 0$, $k\geq N+R+1$, for some $R\in \mathbb{Z}_+$. Consider the following system of linear equations:
\begin{equation}
\label{f3_13}
-z d_k = c_k,\qquad k=0,1,...,N-1;
\end{equation}
\begin{equation}
\label{f3_14}
d_{k-N} - z d_k = c_k,\qquad k=N,N+1,N+2,...;
\end{equation}
where $\{ d_k \}_{k\in \mathbb{Z}_+}$ are unknown complex numbers, $z\in \mathbb{C}\backslash \mathbb{R}$ is a
fixed parameter.
Set
$$ d_k = 0,\qquad k\geq R+1; $$
\begin{equation}
\label{f3_15}
d_{j} = c_{N+j} + z d_{N+j},\qquad j=R,R-1,R-2,...,0.
\end{equation}
For such defined numbers $\{ d_k \}_{k\in\mathbb{Z}_+}$, all equations in~(\ref{f3_14}) are satisfied.
But equations~(\ref{f3_14}) are not necessarily satisfied. Set
$$ v = \sum_{k=0}^\infty d_k x_k,\ v\in L. $$
Notice that
$$ (A-zE_H) v = \sum_{k=0}^\infty (d_{k-N} - z d_k) x_k, $$
where $d_{-1}=d_{-2}=...=d_{-N}=0$.
By the construction of $d_k$ we have
$$ (A-zE_H) v - u = \sum_{k=0}^\infty (d_{k-N} - z d_k - c_k) x_k =
\sum_{k=0}^{N-1} (-zd_k - c_k) x_k; $$
\begin{equation}
\label{f3_16}
u = (A-zE_H) v + \sum_{k=0}^{N-1} (zd_k + c_k) x_k,\qquad u\in L.
\end{equation}
Set
$$ H_z := \overline{(A-zE_H) L} = (\overline{A} - zE_H) D(\overline{A}), $$
and
\begin{equation}
\label{f3_17}
y_k := x_k - P^H_{H_z} x_k,\qquad k=0,1,...,N-1.
\end{equation}
Set
$$ H_0 := \mathop{\rm span}\nolimits\{ y_k \}_{k=0}^{N-1}. $$
Notice that the dimension of $H_0$ is less or equal to $N$, and $H_0\perp H_z$.
From~(\ref{f3_16}) it follows that $u\in L$ can be represented in the following form:
\begin{equation}
\label{f3_18}
u = u_1 + u_2,\qquad u_1\in H_z,\quad u_2\in H_0.
\end{equation}
Therefore we get $L\subseteq H_z\oplus H_0$; $H\subseteq H_z\oplus H_0$, and finally
$H=H_z\oplus H_0$. Thus, $H_0$ is the corresponding defect subspace.
So, the defect numbers of $A$ are less or equal to $N$. Since the operator $A$ is non-negative,
they are equal.
$\Box$
\begin{thm}
\label{t3_1}
Let a matrix Stieltjes moment problem~(\ref{f1_1}) be given and
conditions~(\ref{f1_4}) hold. Let an operator $A$ be constructed for the
moment problem as in~(\ref{f2_12}).
All solutions of the moment problem have the following form
\begin{equation}
\label{f3_19}
M(\lambda) = (m_{k,j} (\lambda))_{k,j=0}^{N-1},\quad
m_{k,j} (\lambda) = ( \mathbf E_\lambda x_k, x_j)_H,
\end{equation}
where $\mathbf E_\lambda$ is a $\Pi$-spectral function of the operator $A$.
Moreover, the correspondence between all $\Pi$-spectral functions of $A$ and all solutions
of the moment problem is one-to-one.
\end{thm}
{\bf Proof. }
It remains to prove that different $\Pi$-spectral functions of the operator $A$ produce different
solutions of the moment problem~(\ref{f1_1}).
Suppose to the contrary that two different $\Pi$-spectral functions produce the same solution of
the moment problem. That means that
there exist two non-negative self-adjoint extensions
$A_j\supseteq A$, in Hilbert spaces $H_j\supseteq H$, such that
\begin{equation}
\label{f3_20}
P_{H}^{H_1} E_{1,\lambda} \not= P_{H}^{H_2} E_{2,\lambda},
\end{equation}
\begin{equation}
\label{f3_21}
(P_{H}^{H_1} E_{1,\lambda} x_k,x_j)_H = (P_{H}^{H_2} E_{2,\lambda} x_k,x_j)_H,\qquad 0\leq k,j\leq N-1,\quad
\lambda\in\mathbb{R}_+,
\end{equation}
where $\{ E_{n,\lambda} \}_{\lambda\in\mathbb{R}_+}$ are orthogonal left-continuous resolutions of unity of
operators $A_n$, $n=1,2$.
Set $L_N := \mathop{\rm Lin}\nolimits\{ x_k \}_{k=0,N-1}$. By linearity we get
\begin{equation}
\label{f3_22}
(P_{H}^{H_1} E_{1,\lambda} x,y)_H = (P_{H}^{H_2} E_{2,\lambda} x,y)_H,\qquad x,y\in L_N,\quad \lambda\in\mathbb{R}_+.
\end{equation}
Denote by $R_{n,\lambda}$ the resolvent of $A_n$, and set $\mathbf R_{n,\lambda} := P_{H}^{H_n} R_{n,\lambda}$, $n=1,2$.
From~(\ref{f3_22}),(\ref{f3_1}) it follows that
\begin{equation}
\label{f3_23}
(\mathbf R_{1,z} x,y)_H = (\mathbf R_{2,z} x,y)_H,\qquad x,y\in L_N,\quad z\in \mathbb{C}\backslash \mathbb{R}.
\end{equation}
Choose an arbitrary $z\in\mathbb{C}\backslash\mathbb{R}$ and consider the space $H_z$ defined as above.
Since
$$ R_{j,z} (A-zE_H) x = (A_j - z E_{H_j} )^{-1} (A_j - z E_{H_j}) x = x,\qquad x\in L=D(A),$$
we get
\begin{equation}
\label{f3_24}
R_{1,z} u = R_{2,z} u \in H,\qquad u\in H_z;
\end{equation}
\begin{equation}
\label{f3_25}
\mathbf R_{1,z} u = \mathbf R_{2,z} u,\qquad u\in H_z,\ z\in\mathbb{C}\backslash\mathbb{R}.
\end{equation}
We can write
$$ (\mathbf R_{n,z} x, u)_H = (R_{n,z} x, u)_{H_n} = ( x, R_{n,\overline{z}}u)_{H_n} =
( x, \mathbf R_{n,\overline{z}} u)_H, $$
\begin{equation}
\label{f3_26}
x\in L_N,\ u\in H_{\overline z},\ n=1,2,
\end{equation}
and therefore we get
\begin{equation}
\label{f3_27}
(\mathbf R_{1,z} x,u)_H = (\mathbf R_{2,z} x,u)_H,\qquad x\in L_N,\ u\in H_{\overline z}.
\end{equation}
By~(\ref{f3_16}) an arbitrary element $y\in L$ can be represented as $y=y_{ \overline{z} } + y'$,
$y_{ \overline{z} }\in H_{ \overline{z} }$, $y'\in L_N$.
Using~(\ref{f3_23}) and~(\ref{f3_25}) we get
$$ (\mathbf R_{1,z} x,y)_H = (\mathbf R_{1,z} x, y_{ \overline{z} } + y')_H $$
$$ = (\mathbf R_{2,z} x, y_{ \overline{z} } + y')_H = (\mathbf R_{2,z} x,y)_H,\qquad x\in L_N,\ y\in L. $$
Since $\overline{L}=H$, we obtain
\begin{equation}
\label{f3_28}
\mathbf R_{1,z} x = \mathbf R_{2,z} x,\qquad x\in L_N,\ z\in\mathbb{C}\backslash\mathbb{R}.
\end{equation}
For an arbitrary $x\in L$, $x=x_z + x'$, $x_z\in H_z$, $x'\in L_N$, using
relations~(\ref{f3_25}),(\ref{f3_28}) we obtain
\begin{equation}
\label{f3_29}
\mathbf R_{1,z} x = \mathbf R_{1,z} (x_z + x') =
\mathbf R_{2,z} (x_z + x') = \mathbf R_{2,z} x,\qquad x\in L,\ z\in\mathbb{C}\backslash\mathbb{R},
\end{equation}
and
\begin{equation}
\label{f3_30}
\mathbf R_{1,z} x = \mathbf R_{2,z} x,\qquad x\in H,\ z\in\mathbb{C}\backslash\mathbb{R}.
\end{equation}
By~(\ref{f3_1}) that means that the $\Pi$-spectral functions coincide and we obtain a
contradiction.
$\Box$
We shall recall some basic definitions and facts from~\cite{c_9000_D_M}.
Let $A$ be a closed Hermitian operator in a Hilbert space $H$, $\overline{D(A)} = H$.
\begin{dfn}
\label{d3_1}
A collection $\{ \mathcal{H}, \Gamma_1, \Gamma_2 \}$ in which $\mathcal{H}$ is a Hilbert space,
$\Gamma_1, \Gamma_2 \in [D(A^*),\mathcal{H}]$, is called {\bf a space of boundary values (SBV)} for
$A^*$, if
\noindent
(1) $(A^* f,g)_H - (f,A^* g)_H = (\Gamma_1 f,\Gamma_2 g)_{\mathcal{H}} - (\Gamma_2 f, \Gamma_1 g)_{\mathcal{H}}$,
$\forall f,g\in D(A^*)$;
\noindent
(2) the mapping $\Gamma: f\rightarrow \{ \Gamma_1 f,\Gamma_2 f \}$ from $D(A^*)$ to $\mathcal{H}\oplus
\mathcal{H}$ is surjective.
\end{dfn}
Naturally associated with each SBV are self-adjoint operators $\widetilde A_1,\widetilde A_2\ (\subset A^*)$ with
$$ D(\widetilde A_1) = \mathop{\rm ker}\nolimits \Gamma_1,\ D(\widetilde A_2) = \mathop{\rm ker}\nolimits \Gamma_2. $$
The operator $\Gamma_2$ restricted to the defect subspace $N_z = \mathop{\rm ker}\nolimits(A^* - zE_H)$,
$z\in \rho(\widetilde A_2)$, is fully invertible. For
$\forall z\in \rho(\widetilde A_2)$ set
\begin{equation}
\label{f3_31}
\gamma(z) = \left( \Gamma_2|_{N_z} \right)^{-1} \in [\mathcal{H},N_z].
\end{equation}
\begin{dfn}
\label{d3_2}
The operator-valued function $M(z)$ defined for $z\in\rho(\widetilde A_2)$ by
\begin{equation}
\label{f3_32}
M(z) \Gamma_2 f_z = \Gamma_1 f_z,\qquad f_z\in N_z,
\end{equation}
is called {\bf a Weyl function} of the operator $A$, corresponding to SBV $\{ \mathcal{H}, \Gamma_1, \Gamma_2 \}$.
\end{dfn}
The Weyl function can be also obtained from the equality:
\begin{equation}
\label{f3_33}
M(z) = \Gamma_1 \gamma(z),\qquad z\in \rho(\widetilde A_2).
\end{equation}
For an arbitrary operator $\widetilde A = \widetilde A^* \subset A^*$ there exist a SBV with (\cite{c_15000_D_M})
\begin{equation}
\label{f3_34}
D(\widetilde A_2) = \mathop{\rm ker}\nolimits\Gamma_2 = D(\widetilde A).
\end{equation}
(There even exist a family of such SBV).
An extension $\widehat A$ of $A$ is called {\bf proper} if $A\subset\widehat A\subset A^*$ and
$(\widehat A^*)^* = \widehat A$. Two proper extensions $\widehat A_1$ and $\widehat A_2$ are
{\bf disjoint} if $D(\widehat A_1)\cap D(\widehat A_2) = D(A)$ and
{\bf transversals} if they are disjoint and $D(\widehat A_1) + D(\widehat A_2) = D(A^*)$.
Suppose that the operator $A$ is non-negative, $A\geq 0$. In this case there exist
two non-negative self-adjoint extensions of $A$ in $H$, Friedrich's extension $A_\mu$ and Krein's extension $A_M$, such that
for an arbitrary non-negative self-adjoint extension $\widehat A$ of $A$ in $H$ it holds:
\begin{equation}
\label{f3_35}
(A_\mu + xE_H)^{-1} \leq (\widehat A + xE_H)^{-1} \leq (A_M + xE_H)^{-1},\qquad x\in \mathbb{R}_+.
\end{equation}
Recall some definitions and facts from~\cite{c_8000_Kr_Ovch},\cite{c_13000_Kr}.
For the non-negative operator $A$ we put into correspondence the following operator:
\begin{equation}
\label{f3_36}
T = (E_H - A)(E_H + A)^{-1} = -E_H + 2(E_H + A)^{-1},\qquad D(T) = (A+E_H)D(A).
\end{equation}
The operator $T$ is a Hermitian contraction (i.e. $\| T \| \leq 1$). Its domain is not dense in $H$
if $A$ is not self-adjoint. The defect subspace $H\ominus D(T) = N_{-1}$ and its dimension is equal
to the defect number $n(A)$ of $A$.
The inverse transformation to~(\ref{f3_36}) is given by
\begin{equation}
\label{f3_37}
A = (E_H - T)(E_H + T)^{-1} = -E_H + 2(E_H + T)^{-1},\qquad D(A) = (T+E_H)D(T).
\end{equation}
Relations~(\ref{f3_36}),(\ref{f3_37}) (with $\widehat T,\widehat A$ instead of $T,A$) also
establish a bijective correspondence between
self-adjoint contractive extensions $\widehat T\supseteq T$ in $H$ and self-adjoint non-negative
extensions $\widehat A\supseteq A$ in $H$ (\cite[p.451]{c_13000_Kr}).
\noindent
Consider an arbitrary Hilbert space $\widehat H \supseteq H$. It is not hard to see that
relations~(\ref{f3_36}),(\ref{f3_37}) (with $\widehat T,\widehat A$ instead of $T,A$) establish
a bijective correspondence between
self-adjoint contractive extensions $\widehat T\supseteq T$ in $\widehat H$ and self-adjoint non-negative
extensions $\widehat A\supseteq A$ in $\widehat H$, as well.
There exist extremal self-adjoint contractive extensions of $T$ in $H$ such that for
an arbitrary self-adjoint contractive extension $\widetilde T\supseteq T$
in $H$ it holds
\begin{equation}
\label{f3_38}
T_\mu \leq \widetilde T \leq T_M.
\end{equation}
Notice that
\begin{equation}
\label{f3_39}
A_\mu = -E_H + 2(E_H + T_\mu)^{-1},\quad A_M = -E_H + 2(E_H + T_M)^{-1}.
\end{equation}
Set
\begin{equation}
\label{f3_40}
C = T_M - T_\mu.
\end{equation}
Consider the following subspace:
\begin{equation}
\label{f3_41}
\Upsilon = \mathop{\rm ker}\nolimits \left( C|_{ N_{-1} } \right).
\end{equation}
\begin{dfn}
\label{d3_3}
Let a closed non-negative Hermitian operator $A$ be given. For the operator $A$ it takes place
{\bf a completely indeterminate case} if $\Upsilon = \{ 0 \}$.
\end{dfn}
By Theorem~1.4 in~\cite{c_16000_KO}, on the set $\{ x\in H:\ T_\mu x = T_M x \} = \mathop{\rm ker}\nolimits C$,
all self-adjoint contractive extensions in a Hilbert space $\widetilde H\supseteq H$ coincide.
Thus, all such extensions are extensions of the operator $T_{ext}$:
\begin{equation}
\label{f3_42}
T_{ext} x = \left\{\begin{array}{cc} Tx, & x\in D(T)\\
T_\mu x = T_M x, & x\in \mathop{\rm ker}\nolimits C \end{array}\right..
\end{equation}
Introduce the following operator:
\begin{equation}
\label{f3_43}
A_{ext} = -E_H + 2(E_H + T_{ext})^{-1} \supseteq A.
\end{equation}
Thus, {\it the set of all non-negative self-adjoint extensions of $A$ coincides with the set of
all non-negative self-adjoint extensions of $A_{ext}$.}
Since $T_{ext,\mu} = T_\mu$ and $T_{ext,M} = T_M$, for $A_{ext}$ it takes place
the completely indeterminate case.
\begin{prop}
\label{p3_2}
Let $A$ be a closed non-negative Hermitian operator with finite defect numbers
and for $A$ it takes place the completely indeterminate case.
Then extensions $A_\mu$ and $A_M$ given by~(\ref{f3_39}) are transversal.
\end{prop}
{\bf Proof. }
Notice that
\begin{equation}
\label{f3_44}
D(A_M) \cap D(A_\mu) = D(A).
\end{equation}
In fact, suppose that there exists $y\in D(A_M) \cap D(A_\mu)$, $y\notin D(A)$.
Since $A_M\subset A^*$ and $A_\mu\subset A^*$ we have $A_M y = A_\mu y$.
Set
$$ g := (A_M + E_H)y = (A_\mu + E_H)y . $$
Then
$$ T_M g = -g + 2(E_H + A_M)^{-1}g = -g + 2y, $$
$$ T_\mu g = -g + 2(E_H + A_\mu)^{-1}g = -g + 2y, $$
and therefore $Cg = (T_M - T_\mu)g = 0$. Since $y\notin D(A)$, then $g\in N_{-1}$.
We obtained a contradiction, since for $A$ it takes place the completely indeterminate case.
Introduce the following sets:
\begin{equation}
\label{f3_45}
D_M := (A_M + E_H)^{-1} N_{-1},\quad D_\mu := (A_\mu + E_H)^{-1} N_{-1}.
\end{equation}
Since $D(A_M) = (A_M + E_H)^{-1} D(T_M)$, $D(A_\mu) = (A_\mu + E_H)^{-1} D(T_\mu)$, we have
\begin{equation}
\label{f3_46}
D_M \subset D(A_M),\ D_\mu \subset D(A_\mu),
\end{equation}
and
\begin{equation}
\label{f3_47}
D_M \cap D(A) = \{ 0 \},\quad D_\mu \cap D(A) = \{ 0 \},
\end{equation}
By~(\ref{f3_44}),(\ref{f3_46}) and~(\ref{f3_47}) we obtain that
\begin{equation}
\label{f3_48}
D_M \cap D_\mu = \{ 0 \}.
\end{equation}
Set
\begin{equation}
\label{f3_49}
D := D_M \dotplus D_\mu.
\end{equation}
By~(\ref{f3_45}) we obtain that the sets $D_M$ and $D_\mu$ have the linear dimension $n(A)$.
Elementary arguments show that $D$ has the linear dimension $2 n(A)$.
Since $D(A_\mu)\subset D(A^*)$, $D(A_M)\subset D(A^*)$, we can write
\begin{equation}
\label{f3_50}
D(A) \dotplus D_M \dotplus D_\mu \subseteq D(A^*) = D(A) \dotplus N_z \dotplus N_{\overline{z}},
\end{equation}
where $z\in \mathbb{C}\backslash \mathbb{R}$.
Let
$$ g_1,g_2,...,g_{2n(A)}, $$
be $2n(A)$ linearly independent elements from $D$. Let
\begin{equation}
\label{f3_52}
g_j = g_{A,j} + g_{z,j} + g_{\overline{z},j},\qquad 1\leq j\leq 2n(A),
\end{equation}
where $g_{A,j}\in D(A)$, $g_{z,j}\in N_z$, $g_{\overline{z},j}\in N_{\overline{z}}$.
Set
\begin{equation}
\label{f3_53}
\widehat g_j := g_j - g_{A,j},\qquad 1\leq j\leq 2n(A).
\end{equation}
If for some $\alpha_j\in \mathbb{C}$, $1\leq j\leq 2n(A)$, we have
$$ 0 = \sum_{j=1}^{ 2n(A) } \alpha_j \widehat g_j = \sum_{j=1}^{ 2n(A) } \alpha_j g_j -
\sum_{j=1}^{ 2n(A) } \alpha_j g_{A,j}, $$
then
$$ \sum_{j=1}^{ 2n(A) } \alpha_j g_j = 0, $$
and $\alpha_j = 0$, $1\leq j\leq 2n(A)$.
Therefore elements $\widehat g_j$, $1\leq j\leq 2n(A)$ are linearly independent.
Thus, they form a linear basis in a finite-dimensional subspace $N_z \dotplus N_{\overline{z}}$.
Then
\begin{equation}
\label{f3_54}
N_z \dotplus N_{\overline{z}} \subseteq D,
\end{equation}
\begin{equation}
\label{f3_55}
D(A^*) = D(A) \dotplus N_z \dotplus N_{\overline{z}} \subseteq D(A) \dotplus D = D_L.
\end{equation}
So, we get the equality
\begin{equation}
\label{f3_56}
D(A) \dotplus D_M \dotplus D_\mu = D(A^*).
\end{equation}
Since $D(A) + D_M \subseteq D(A_M)$, $D_\mu \subseteq D(A_\mu)$,
we get
$$ D(A^*) = D(A)+D_M+D_\mu \subseteq D(A_M) + D(A_\mu). $$
Since $D(A_M) + D(A_\mu)\subseteq D(A^*)$, we get
\begin{equation}
\label{f3_57}
D(A^*) = D(A_M) + D(A_\mu).
\end{equation}
From~(\ref{f3_44}),(\ref{f3_57}) it follows the statement of the Proposition.
$\Box$
We shall use the following classes of functions~\cite{c_9000_D_M}.
Let $\mathcal{H}$ be a Hilbert space.
Denote by $R_{\mathcal{H}}$ the class of operator-valued functions $F(z)=F^*(\overline{z})$
holomorphic in $\mathbb{C}\backslash \mathbb{R}$ with values (for $z\in \mathbb{C}_+$)
in the set of maximal dissipative operators in $\mathfrak{C}(\mathcal{H})$.
Completing the class $R_{\mathcal{H}}$ by ideal elements we get the class $\widetilde R_{\mathcal{H}}$.
Thus, $\widetilde R_{\mathcal{H}}$ is a collection of functions holomorphic in
$\mathbb{C}\backslash \mathbb{R}$ with values (for $z\in \mathbb{C}_+$)
in the set of maximal dissipative linear relations $\theta(z)=\theta^*(\overline{z})$ in $\mathcal{H}$.
The indeterminate part of the relation $\theta(z)$ does not depend on $z$ and the relation $\theta(z)$
admits the representation
\begin{equation}
\label{f3_58}
\theta(z) = \{ < h_1, F_1(z) h_1 + h_2 >:\ h_1\in D(F_1(z)),\ h_2\in \mathcal{H}_2 \},
\end{equation}
where $\mathcal{H} = \mathcal{H}_1 \oplus \mathcal{H}_2$, $F_1(z) \in R_{\mathcal{H}_1}$.
\begin{dfn} \cite{c_9000_D_M}
\label{d3_4}
An operator-valued function $F(z)\in R_{\mathcal{H}}$ belongs to the class
$S_{\mathcal{H}}^{-0} (-\infty,0)$ if $\forall n\in \mathbb{N}$, $\forall z_j\in \mathbb{C}_+$,
$h_j\in D(F(z_j))$, $\xi_j\in \mathbb{C}$, holds
\begin{equation}
\label{f3_59}
\sum_{i,j=1}^n \frac{ (z_i^{-1} F(z_i) h_i, h_j) - (h_i, z_j^{-1} F(z_j) h_j) }
{ z_i - \overline{z_j} } \xi_i \overline{\xi_j} \geq 0.
\end{equation}
Completing the class $S_{\mathcal{H}}^{-0} (-\infty,0)$ with ideal elements~(\ref{f3_58})
we obtain the class $\widetilde S_{\mathcal{H}}^{-0} (-\infty,0)$.
\end{dfn}
From Theorem~9 in~\cite[p.46]{c_9000_D_M} taking into account Proposition~\ref{p3_2} we have the
following conclusion (see also Remark~17 in~\cite[p.49]{c_9000_D_M}):
\begin{thm}
\label{t3_2}
Let $A$ be a closed non-negative Hermitian operator in a Hilbert space $H$ and for $A$ it takes place
the completely indeterminate case.
Let $\{ \mathcal{H}, \Gamma_1, \Gamma_2 \}$ be an arbitrary SBV for $A$ such that
$\widetilde A_2 = A_\mu$ and $M(z)$ be the corresponding Weyl function. Then the formula
\begin{equation}
\label{f3_60}
\mathbf{R}_z = (A_\mu - zE_H)^{-1} - \gamma(z) (\tau(z)+M(z)-M(0))^{-1} \gamma^*(\overline{z}),\quad
z\in \mathbb{C}\backslash \mathbb{R},
\end{equation}
establishes a bijective correspondence between $\mathbf{R}_z\in \Omega^0(-\infty,0)(A)$ and
$\tau\in \widetilde S_{\mathcal{H}}^{-0} (-\infty,0)$.
The function $\tau(z)\equiv \tau = \tau^*$ in~(\ref{f3_60}) corresponds to the canonical $\Pi$-resolvents
and only to them.
\end{thm}
Now we can state our main result.
\begin{thm}
\label{t3_3}
Let a matrix Stieltjes moment problem~(\ref{f1_1}) be given and
conditions~(\ref{f1_4}) hold. Let an operator $A$ be the closure of the operator constructed for the
moment problem in~(\ref{f2_12}).
Then the following statements are true:
1) The moment problem~(\ref{f1_1}) is determinate if and only if
Friedrich's extension $A_\mu$ and Krein's extension $A_M$ coincide: $A_\mu = A_M$.
In this case the unique solution of the moment problem is generated by the orthogonal spectral
function $\mathbf E_\lambda$ of $A_\mu$ by formula~(\ref{f3_19});
2) If $A_\mu\not= A_M$, define the extended operator $A_{ext}$ for $A$ as in~(\ref{f3_43}).
Let $\{ \mathcal{H}, \Gamma_1, \Gamma_2 \}$ be an arbitrary SBV for $A_{ext}$ such that
$\widetilde A_2 = (A_{ext})\mu$ and $M(z)$ be the corresponding Weyl function.
All solutions of the moment problem~(\ref{f1_1}) have the following form:
\begin{equation}
\label{f3_61}
M(\lambda) = (m_{k,j} (\lambda))_{k,j=0}^{N-1},
\end{equation}
where
$$ \int_{\mathbb{R}_+} \frac{dm_{k,j} (\lambda)}{\lambda - z} =
\left( (A_\mu - zE_H)^{-1} x_k,x_j \right)_H $$
\begin{equation}
\label{f3_62}
- \left( \gamma(z) (\tau(z)+M(z)-M(0))^{-1} \gamma^*(\overline{z}) x_k,
x_j \right)_H,\quad z\in \mathbb{C}\backslash \mathbb{R},
\end{equation}
where $\tau\in \widetilde S_{\mathcal{H}}^{-0} (-\infty,0)$.
Moreover, the correspondence between all $\tau\in \widetilde S_{\mathcal{H}}^{-0} (-\infty,0)$ and all solutions
of the moment problem~(\ref{f1_1}) is one-to-one.
\end{thm}
{\bf Proof. } The statements of the Theorem follow directly from Theorems~\ref{t3_1} and~\ref{t3_2}.
$\Box$
\begin{center}
\large\bf
The matrix Stieltjes moment problem: a description of all solutions.
\end{center}
\begin{center}
\bf S.M. Zagorodnyuk
\end{center}
We describe all solutions of the matrix Stieltjes moment problem in the general case (no conditions
besides solvability are assumed). We use Krein's formula for the generalized $\Pi$-resolvents
of positive Hermitian operators in the form of V.A~Derkach and M.M.~Malamud.
MSC: 44A60; Secondary 30E05
Key words: moment problem, positive definite kernel, spectral function.
\end{document}
|
\begin{document}
\begin{titlepage}
\title{\MakeUppercase{Top-Down Skiplists}
\begin{abstract}
We describe todolists (top-down skiplists), a variant of skiplists
(Pugh 1990) that can execute searches using at most $\log_{2-\varepsilon} n +
O(1)$ binary comparisons per search and that have amortized update time
$O(\varepsilon^{-1}\log n)$. A variant of todolists, called working-todolists,
can execute a search for any element $x$ using $\log_{2-\varepsilon} w(x)
+ o(\log w(x))$ binary comparisons and have amortized search time
$O(\varepsilon^{-1}\log w(w))$. Here, $w(x)$ is the ``working-set number'' of
$x$. No previous data structure is known to achieve a bound better
than $4\log_2 w(x)$ comparisons. We show through experiments that,
if implemented carefully, todolists are comparable to other common
dictionary implementations in terms of insertion times and outperform
them in terms of search times.
\end{abstract}
\end{titlepage}
\section{Introduction}
Comparison-based dictionaries supporting the three \emph{basic
operations} insert, delete and search represent \emph{the} classic
data-structuring problem in computer science. Data structures that
support each of these operations in $O(\log n)$ time have been known
since the introduction of AVL trees more than half a century ago
\cite{adelson-velskii.landis:algorithm}. Since then, many competing
implementations of dictionaries have been proposed, including
red-black trees \cite{guibas.sedgewick:dichromatic}, splay trees
\cite{sleator.tarjan:self-adjusting}, general-balanced/scapegoat
trees \cite{andersson:general,galperin.rivest:scapegoat},
randomized binary search trees \cite{martinez:randomized},
energy-balanced trees \cite{goodrich:competitive}, Cartesian trees/treaps
\cite{aragon.seidel:randomized,vuillemin:unifying}, skip lists
\cite{pugh:skip}, jump lists \cite{bronnimann.cazals.ea:randomized},
and others. Most major programming environments include one or more
$O(\log n)$ time dictionary data structures in their standard library,
including Java's \texttt{TreeMap} and \texttt{TreeSet}, the C++ STL's
\texttt{set}, and Python's \texttt{OrderedDict}.
In short, comparison-based dictionaries are so important that any
new ideas or insights about them are worth exploring. In this paper,
we introduce the todolist (\boldx{to}p-\boldx{do}wn skip\boldx{list}),
a dictionary that is parameterized by a parameter $\varepsilon\in(0,1)$,
that can execute searches using at most $\log_{2-\varepsilon} n + O(1)$ binary
comparisons per search, and that has amortized update time $O(\varepsilon^{-1}\log
n)$. (Note that $\log_{2-\varepsilon} n \le (1+\varepsilon)\log n$ for $\varepsilon < 1/4$.)
As a theoretical result todolists are nothing to get excited about; there
already exist comparison-based dictionaries with $O(\log n)$ time for all
operations that perform at most $\lceil\log n\rceil+1$ comparisons per
operation \cite{andersson.lai:fast,fagerberg:binary}. (Here, and throughout,
$\log n=\log_2 n$ denotes the binary logarithm of $n$. However, todolists
are based on a new idea---top-down partial rebuilding of skiplists---and
our experimental results show that a careful implementation of todolists
can execute searches faster than existing popular data structures.
In particular, todolists outperform (again, in terms of searches) Guibas
and Sedgewick's red-black trees \cite{guibas.sedgewick:dichromatic} which are
easily the most common implementation of comparison-based dictionaries
found in programming libraries. This is no small feat since, in the
setting we studied, the average depth of a node in a red-black tree
seems to be $\log n - O(1)$ \cite{sedgewick:left-leaning}.
As a more substantial theoretical contribution, we show that a variant of todolists, called working-todolists, is able to search for
an element $x$ using $\log_{2-\varepsilon} w(x)+o(\log w(x))$ comparisons
in $O(\varepsilon^{-1}\log w(x))$ amortized time. Here, $w(x)$---the
working set number of $x$---is loosely defined as the number of
distinct items accessed since the last time $x$ was accessed (see
\secref{working-todolist} for a precise definition.) Previous data
structures with some variant of this \emph{working-set property}
include splay trees \cite{sleator.tarjan:self-adjusting}, Iacono's
working-set structure \cite{iacono:alternatives,badoiu.cole.ea:unified},
deterministic self-adjusting skiplists \cite{bose.douieb.ea:dynamic},
layered working-set trees \cite{bose.douieb.ea:layered}, and skip-splay
trees \cite{derryberry.sleator:skip-splay}. However, even the most
efficient of these can only be shown to perform at most $4\log w(x)$
comparisons during a search for $x$.
\section{TodoLists}
\seclabel{todolist}
A \emph{todolist} for the values $x_1<x_2<\cdots<x_n$ consists of a
nested sequence of $h+1$ sorted singly-linked lists, $L_0,\ldots,L_h$,
having the following properties:\footnote{Here and throughout, we use set
notations like $|\cdot|$, and $\subseteq$ on the lists $L_0,\ldots,L_h$,
with the obvious interpretations.}
\begin{enumerate}
\item $|L_0| \le 1$.
\item $L_i\subseteq L_{i+1}$ for each $i\in\{0,\ldots,h-1\}$.
\item For each $i\in\{1,\ldots,h\}$ and each pair $x,y$ of consecutive
elements in $L_i$, at least one of $x$ or $y$ is in $L_{i-1}$.
\item $L_h$ contains $x_1,\ldots,x_n$.
\end{enumerate}
The value of $h$ is at least $\lceil \log_{2-\varepsilon} n\rceil$ and
at most $\lceil \log_{2-\varepsilon} n\rceil+1$. The \emph{height} of
a value, $x$, in a todolist is the number of lists in which $x$ appears.
We will assume that the head of each list $L_i$ is a \emph{sentinel} node that does not contain any data. (See \figref{todolist}.) We will also
assume that, given a pointer to the node containing $x_j$ in $L_i$, it is
possible to find, in constant time, the occurrence of $x_j$ in $L_{i+1}$.
This can be achieved by maintaining an extra pointer or by maintaining
all occurrences of $x_j$ in an array. (See \secref{implementation} for
a detailed description.)
\begin{figure}
\caption{An example of a todolist containing $1,3,4,7,8,9,11$.}
\end{figure}
\subsection{Searching}
Searching for a value, $x$, in a todolist is simple. In particular, we
can find the node, $u$, in $L_h$ that contains the largest value that
is less than $x$. If $L_h$ has no value less than $x$ then the search
finds the sentinel in $L_h$. We call the node $u$ the \emph{predecessor}
of $x$ in $L_h$.
Starting at the sentinel in $L_0$, one comparison (with the at most one
element of $L_0$) is sufficient to determine the predecessor, $u_0$ of $x$
in $L_0$. (This follows from Property~1.) Moving down to the occurrence
of $u_0$ in $L_1$, one additional comparison is sufficient to determine
the predecessor, $u_1$ of $x$ in $L_1$. (This follows from Property~3.)
In general, once we know the predecessor of $x$ in $L_i$ we can determine
the predecessor of $x$ in $L_{i+1}$ using one additional comparison. Thus,
the total number of comparisons needed to find the predecessor of $x$
in $L_h$ is only $h+1$.
\noindent{$\textsc{FindPredecessor}(x)$}
\begin{algorithmic}
\STATE{$u_0\gets \mathtt{sentinel}_0$}
\FOR{$i=0,\ldots,h$}
\IF{$\mathrm{next}(u_i)\neq \mathbf{nil}$ and $\mathrm{key}(\mathrm{next}(u_i)) < x$}
\STATE{$u_i\gets\mathrm{next}(u_i)$}
\ENDIF
\STATE{$u_{i+1}\gets\mathrm{down}(u_i)$}
\ENDFOR
\RETURN{$u_h$}
\end{algorithmic}
\subsection{Adding}
\seclabel{insertion}\seclabel{adding}
Adding a new element, $x$, to a todolist is done by searching for it
using the algorithm outlined above and then splicing $x$ into each of
the lists $L_0,\ldots,L_h$. This splicing is easily done in constant
time per list, since the new nodes containing $x$ appear after the nodes
$u_0,\ldots,u_h$. At this point, all of the Properties~2--4 are satisfied,
but Property~1 may be violated since there may be two values in $L_0$.
If there are two values in $L_0$, then we restore Property~1 with the
following \emph{partial rebuilding} operation: We find the smallest index
$i$ such that $|L_i|\le (2-\varepsilon)^i$; such an index always exists since
$n=|L_h|\le(2-\varepsilon)^h$. We then rebuild the lists $L_{0},\ldots,L_{i-1}$
in a bottom up fashion; $L_{i-1}$ gets every second element from $L_i$
(starting with the second), $L_{i-2}$ gets every second element from
$L_{i-1}$, and so on down to $L_0$.
Since we take every other element from $L_i$ starting with the second element,
after rebuilding we obtain:
\[
|L_{i-1}| = \lfloor |L_i|/2 \rfloor \le |L_i|/2
\]
and, repeating this reasoning for $L_{i-2}, L_{i-3},\ldots, L_0$, we see that, after rebuilding,
\[
|L_{0}| \le |L_i|/2^i \le (2-\varepsilon)^i/2^i < 1 \enspace .
\]
Thus, after this rebuilding, $|L_0|=0$, Property~1 is restored and the
rebuilding, by construction, produces lists satisfying Properties~2--4.
To study the amortized cost of adding an element, we
can use the potential method with the potential function
\[
\Phi(L_0,\ldots,L_h)=C\sum_{i=0}^h|L_i| \enspace .
\]
Adding $x$ to each of $L_0,\ldots,L_h$ increases this potential by
$C(h+1)=O(C\log n)$. Rebuilding, if it occurs, takes $O(|L_i|)=O((2-\varepsilon)^i)$
time, but causes a change in potential of at least
\begin{align*}
\Delta\Phi & = C\sum_{j=0}^i\left(|L_j| - (2-\varepsilon)^j\right) \\
& = C\sum_{j=0}^i\left(|L_i|/2^{i-j} - (2-\varepsilon)^j\right) \\
& \le C\sum_{j=0}^{i-1}\left((2-\varepsilon)^i/2^{i-j} - (2-\varepsilon)^j\right) \\
& \le C\left((2-\varepsilon)^i - \sum_{j=0}^{i-1}(2-\varepsilon)^j\right) \\
& = C\left((2-\varepsilon)^i - \frac{(2-\varepsilon)^i-(2-\varepsilon)}{1-\varepsilon}\right) \\
& < C\left((2-\varepsilon)^i - (1+\varepsilon)\left((2-\varepsilon)^i-(2-\varepsilon)\right)\right)
& \text{(since $1/(1-\varepsilon)>1+\varepsilon$)} \\
& = -C\varepsilon(2-\varepsilon)^i + O(C) \\
\end{align*}
Therefore, by setting $C=c/\varepsilon$ for a sufficiently large constant,
$c$, the decrease in potential is greater than the cost of rebuilding.
We conclude that the amortized cost of adding an element $x$ is $O(C\log
n)=O(\varepsilon^{-1}\log n)$.
\subsection{Deleting}
Since we already have an efficient method of partial rebuilding, we
can use it for deletion as well. To delete an element $x$, we delete
it in the obvious way, by searching for it and then splicing it out
of the lists $L_i,\ldots,L_h$ in which it appears. At this point,
Properties~1, 2, and 4 hold, but Property~3 may be violated in any
subset of the lists $L_i,\ldots,L_h$. Luckily, all of these violations
can be fixed by taking $x$'s successor in $L_h$ and splicing it into
each of $L_0,\ldots,L_{h-1}$.\footnote{If $x$ has no successor in
$L_h$---because it is the largest value in the todolist---then deleting
$x$ will not introduce any violations of Property~3.} Thus, the second
part of the deletion operation is like the second part of the insertion
operation. Like the insertion operation, this may violate Property~1
and trigger a partial rebuilding operation. The same analysis used to
study insertion shows that deletion has the same amortized running time
of $O(\varepsilon^{-1}\log n)$.
\subsection{Tidying Up}
Aside from the partial rebuilding caused by insertions and deletions,
there are also some \emph{global rebuilding} operations that are sometimes
triggered:
\begin{enumerate}
\item If an insertion causes $n$ to exceed $\lceil(2-\varepsilon)^h\rceil$, then
we increment the value of $h$ to $h'=h+1$ and rebuild $L_0,\ldots,L_{h'}$
from scratch, starting by moving $L_h$ into $L_{h'}$ and then performing
a partial rebuilding operation on $L_{0},\ldots,L_{h'-1}$.
\item If an insertion or deletion causes $\sum_{i=1}^n |L_i|$ to exceed $cn$ for some threshold constant $c>2$, then we perform a partial rebuilding to rebuild $L_{0},\ldots,L_{h-1}$.
\item If a deletion causes $n$ to be less than $\lceil(2-\varepsilon)^{h-2}\rceil$ then we decrement the value of $h$ to be $h'=h-1$, move $L_h$ to $L_{h'}$ and then perform a partial rebuilding operation on $L_{0},\ldots,L_{h'-1}$.
\end{enumerate}
A standard amortization argument shows that the first and third type
of global rebuilding contribute only $O(1)$ to the amortized cost of
each insertion and deletion, respectively. The same potential function
argument used to study insertion and deletion works to show that the
second type of global rebuilding contributes only $O(\log n)$ to the
amortized cost of each insertion or deletion (note that this second
type of global rebuilding is only required to ensure that the size of
the data structure remains in $O(n)$).
This completes the proof of our first theorem:
\begin{thm}\thmlabel{todolist}
For any $\varepsilon >0$, a todolist supports the operations of inserting,
deleting, and searching using at most $\log_{2-\varepsilon} n + O(1)$ comparisons
per operation. Starting with an empty todolist and performing any
sequence of $N$ add and remove operations takes $O(\varepsilon^{-1}N\log
N)$ time.
\end{thm}
\section{Working-TodoLists}
\seclabel{working-todolist}
Next, we present a new theoretical result that is achieved using a
variant of the todolist that we call a working-todolist. First, though, we need
some definitions. Let $a_1,\ldots,a_m$ be a sequence whose elements come
from the set $\{1,\ldots,n\}$. We call such a sequence an \emph{access
sequence}. For any $x\in\{1,\ldots,n\}$, the \emph{last-occurrence},
$\ell_t(x)$, of $x$ at time $t$ is defined as
\[
\ell_t(x)=\max\{j\in{1,\ldots,t-1}: a_{j} = x\} \enspace .
\]
Note that $\ell_t(x)$ is undefined if $x$ does not appear in
$a_1,\ldots,a_{t-1}$. The \emph{working-set number}, $w_t(x)$, of $x$
at time $t$ is
\[
w_t(x) = \begin{cases}
n & \text{if $\ell_t(x)$ is undefined} \\
|\{a_{\ell_t(x)},\ldots,a_{t-1}\}| & \text{otherwise.}
\end{cases}
\]
In words, if we think of $t$ as the current time, then $w_t(x)$ is the
number of distinct values in the access sequence since the most recent
access to $x$.
In this section, we describe the working-todolist data structure, which
stores $\{1,\ldots,n\}$ and, for any access sequence $a_1,\ldots,a_m$,
can execute searches for $a_1,\ldots,a_m$ so that the search for $a_t$
performs at most $(1+o(1))\log_{2-\varepsilon} w_t(a_t)$ comparisons and takes
$O(\varepsilon^{-1}\log w_t(a_t))$ amortized time.
From this point onward we will drop the time subscript, $t$, on $w_t$
and assume that $w(x)$ refers to the working set number of $x$ at the
current point in time (given the sequence of searches performed so far).
The working-todolist is a special kind of todolist that weakens Property~1
and adds an additional Property~5:
\begin{enumerate}
\item $|L_0|\le \varepsilon^{-1}+1$.
\setcounter{enumi}{4}
\item For each $i\in\{0,\ldots,h\}$, $L_i$ contains all values $x$ such that $w(x)\le (2-\varepsilon)^i$.
\end{enumerate}
For keeping track of working set numbers, a working-todolist also stores a
doubly-linked list, $Q$, that contains the values $\{1,\ldots,n\}$
ordered by their current working set numbers. The node that contains $x$
in this list is cross-linked (or associated in some other way) with the
appearances of $x$ in $L_0,\ldots,L_h$.
\subsection{Searching}
\seclabel{todolist-search}
Searching in a working-todolist is similar to a searching in a todolist.
The main
difference is that Property~5 guarantees that the working-todolist will reach
a list, $L_i$, that contains $x$ for some $i\le\log_{2-\varepsilon} w(x)$.
If ternary comparisons are available, then this is detected at the
first such index $i$. If only binary comparisons are available, then
the search algorithm is modified slightly so that, at each list $L_i$
where $i$ is a perfect square, an extra comparison is done to test if
the successor of $x$ in $L_i$ contains the value $x$. This modification
ensures that, if $x$ appears first in $L_i$, then it is found by the
time we reach the list $L_{i'}$ for
\[
i'=i+\left\lceil 2\sqrt{i}\right\rceil + 1 = \log_{2-\varepsilon} w(x) + O(\sqrt{\log w(x)}) \enspace .
\]
Once we find $x$ in some list $L_{i'}$, we move it to the front of $Q$;
this takes only constant time since the node containing $x$ in $L_{i'}$
is cross-linked with $x$'s occurrence in $Q$. Next, we insert $x$ into
$L_0,\ldots,L_{i'-1}$. As with insertion in a todolist, this takes only
constant time for each list $L_j$, since we have already seen the predecessor
of $x$ in $L_j$ while searching for $x$.
At this point, Properties 2--5 are ensured and the ordering of $Q$ is
correct.
All that remains is to restore Property~1, which is now violated
since $L_0$ contains $x$, for which $w(x)=1$, and the value $y$ such
that $w(y)=2$. Again, this is corrected using partial rebuilding,
but the rebuilding is somewhat more delicate. We find the first index
$i$ such that $|L_i|\le (2-\varepsilon/2)^i$. Next, we traverse the first
$(2-\varepsilon)^{i-1}$ nodes of $Q$ and label them with their position in $Q$.
Since $Q$ is ordered by working-set number, this means that the label
at a node of $Q$ that contains the value $z$ is at most $w(z)$.
At this point, we are ready to rebuild the lists $L_0,\ldots,L_{i-1}$. To
build $L_{j-1}$ we walk through $L_j$ and take any value whose label
(in $Q$) is defined and is at most $(2-\varepsilon)^j$ as well as every
``second value'' as needed to ensure that Property~3 holds. Finally,
once all the lists $L_0,\ldots,L_j$ are rebuilt, we walk through the first
$(2-\varepsilon)^{i-1}$ nodes of $Q$ and remove their labels so that these labels
are not incorrectly used during subsequent partial rebuilding operations.
\subsection{Analysis}
We have already argued that we find a node containing $x$ in some list
$L_i$ with $i\in \log_{2-\varepsilon} w(x) + O(\sqrt{\log w(x)})$ and that this
takes $O(\log w(x))$ time. The number of comparisons needed to reach
this stage is
\[
\log_{2-\varepsilon} w(x) + O\left(\varepsilon^{-1} + \sqrt{\log w(x)}\right) \enspace .
\]
The $O(\varepsilon^{-1})$ term is the cost of searching in $L_0$ and the
$O(\sqrt{\log w(x)})$ term accounts for one comparison at each of the lists
$L_{\lceil\log_{2-\varepsilon} w(x)\rceil},\ldots,L_i$ as well as the extra
comparison performed in each of the lists $L_j$ where $j\in\{0,\ldots,i\}$
is a perfect square.
After finding $x$ in $L_i$, the algorithm then updates
$L_0,\ldots,L_{i-1}$ in such a way that Properties~2--5 are maintained.
All that remains is to show that Property~1 is restored by the partial
rebuilding operation and to study the amortized cost of this partial
rebuilding. We accomplish both these goals by studying the sizes of the lists
$L_0,\ldots,L_i$ after rebuilding.
Let $n_i=|L_i|$ and recall that $n_i\le (2-\varepsilon/2)^i$. Then,
the number of elements that make it from $L_i$ into $L_{i-1}$ is
\[ |L_{i-1}| \le (2-\varepsilon)^{i-1} + n_i/2 \enspace , \]
and the number of elements that make it into $L_{i-2}$ is
\begin{align*}
|L_{i-2}| & \le (2-\varepsilon)^{i-2} + |L_{i-1}|/2 \\
& \le (2-\varepsilon)^{i-2} + (2-\varepsilon)^{i-1}/2 + n_i/4 \enspace .
\end{align*}
More generally, the number of elements that make it into $L_j$ for any $j\in \{0,\ldots,i-1\}$ is at most
\begin{align*}
|L_j| & \le (2-\varepsilon)^{j} \cdot \sum_{k=0}^{i-j-1}\left(\frac{2-\varepsilon}{2}\right)^k + n_i/2^{i-j} \\
& \le (2-\varepsilon)^j/\varepsilon + n_i/2^{i-j} \enspace .
\end{align*}
In particular
\[
|L_0| \le \varepsilon^{-1} + n_i/2^i \le \varepsilon^{-1} + 1\enspace .
\]
Therefore, Property~1 is satisfied.
To study the amortized cost of searching for $x$, we use the same
potential function argument as in \secref{todolist}. The change in the sizes of the lists is then
\begin{align*}
\Delta\Phi/C & \le \sum_{j=0}^{i-1}\left((2-\varepsilon)^j/\varepsilon + n_i/2^{i-j} - (2-\varepsilon/2)^j\right) \\
& \le O((2-\varepsilon)^i/\varepsilon) + n_i - \sum_{j=0}^{i-1}(2-\varepsilon/2)^j \\
& = O((2-\varepsilon)^i/\varepsilon) + n_i - \frac{(2-\varepsilon/2)^i}{1-\varepsilon/2} + O(1) \\
& \le O((2-\varepsilon)^i/\varepsilon) + n_i - (1+\varepsilon/2)((2-\varepsilon/2)^i) \\
& \le O((2-\varepsilon)^i/\varepsilon) + n_i - (1+\varepsilon/2)n_i \\
& \le O((2-\varepsilon)^i/\varepsilon) - (\varepsilon/2)n_i \\
& = - \Omega(\varepsilon n_i) & \text{(since $n_i \ge n_{i-1} \ge (2-\varepsilon/2)^{i-1}$)}
\end{align*}
Since the cost of rebuilding $L_0,\ldots,L_{i-1}$ is $O(n_i)$, this implies that the amortized cost of accessing $x$ is $O(\varepsilon^{-1}\log w(x))$.
\section{Implementation Issues}
\seclabel{implementation}
As a first attempt, one might try to implement a todolist exactly
as described in \secref{todolist}, with each list $L_i$ being a
separate singly linked list in which each node has a down pointer to
the corresponding node in $L_{i+1}$. However, past experience with
skiplists suggests (and preliminary experiments confirms) that this is
neither space-efficient nor fast (see the class \texttt{LinkedTodoList}
in the source code). Instead, we use an implementation idea that appears in
Pugh's original paper on skiplists~\cite{pugh:skip}.
\subsection{Nodes as Arrays}
A better implementation uses one structure for each data item, $x$,
and this structure includes an array of pointers. If $x$ appears in
$L_{i},\ldots,L_h$, then this array has length $h-i+1$ so that it can
store the \texttt{next} pointers for the occurrence of $x$ in each of
these lists.
One complication occurs in implementing arrays of next pointers in a
todolist that is not present in a skiplist. During a partial rebuilding
operation, the heights of elements in a todolist change, which means
that their arrays need to be reallocated. The cost of reallocating and
initializing an array is proportional to the length of the array. However,
the amortization argument used in \secref{insertion}, requires that the
cost of increasing the height of an element when rebuilding level $i$
is proportional to the increase in height; promoting an element from
level $i$ to level $i-c$ should take $O(c)$ time, not $O(h - i + c)$ time.
The work-around for this problem is to use a standard
doubling trick used to implement dynamic arrays (c.f.,
Morin~\cite[Section~2.1.2]{morin:open}). When a new array for a node
is allocated to hold $r$ values, its size is set to $r'=2^{\lceil\log
r\rceil}$. Later, if the height of the node increases, to $r+c$ during
a partial rebuilding operation, the array only needs to be reallocated
if $r+c > r'$. Using this trick, the amortized
cost of increasing the height of the node is $O(c)$. This trick does not
increase the number of pointers in the structure by more than factor of 2.
Our initial implementation did exactly this, and performed well-enough
to execute searches faster than standard skiplists but was still bested
by most forms of binary search trees. This was despite the fact that
the code for searching was dead-simple, and by decreasing $\varepsilon$ we
could reduce the height, $h$, (and hence the number of comparisons)
to less than was being performed by these search trees.
\subsection{The Problem With Skiplists}
After some reflection, the reason for the disappointing performance of
searches in todolists (and to a greater extent, in skiplists) became
apparent. It is due to the fact that accessing a node by following a
pointer that causes a CPU cache miss is more expensive than performing
a comparison.
The search path in a todolist has length equal to the number of
comparisons performed. However, the set of nodes in the todolist
that are dereferenced during a search includes nodes not on the
search path. Indeed, when the outcome of a comparison of the form
$\mathrm{key}(\mathrm{next}(u)) < x$ is false, the search path proceeds
to $\mathrm{down}(u)$ and the node $\mathrm{next}(u)$ does not appear on
the search path.
\subsection{The Solution}
Luckily, there is a fairly easy remedy, though it does use more space.
We implement the todolist so that each node $u$ in a list $L_i$ stores an
additional key, $\mathrm{keynext}(u)$, that is the key associated with
the node $\mathrm{next}(u)$. This means that determining the next node
to visit after $u$ can be done using the key, $\mathrm{keynext}(u)$, stored
at node $u$ rather than having to dereference $\mathrm{next}(u)$. The
resulting structure is illustrated in \figref{packed-in}.
\begin{figure}
\caption{The memory layout of an efficient todolist implementation.}
\end{figure}
With this modification, todolists achieve faster search times---even with
fairly large values of $\varepsilon$---than binary search tree structures. Indeed,
retrofitting this idea into the skiplist implementation improves its
performance considerably so that it outperforms some (but not all)
of the tree-based structures.
\subsection{Experiments}
\seclabel{experiments}
To test the performance of todolists, we implemented them and tested them
against other comparison-based dictionaries that are popular, either in
practice (red-black trees) and/or in textbooks (scapegoat trees, treaps,
and skiplists). The implementation of all data structures was done in
C++ and all the code was written by the second author.\footnote{The
implementations of all but todolists were adapted from the second
author's textbook \cite{morin:open}.} To ensure that this code is
comparable to so-called industrial strength C++ code, the tests also
include the C++ Standard Template Library \texttt{set} class that comes
as part of \texttt{libsdc++}. This \texttt{set} class is implemented
as a red-black tree and performed indistinguishably from our red-black
tree implementation.
The code used to generate all the test data in this section is available
for download at github.\footnote{The source code is available at
\url{https://github.com/patmorin/todolist}. The final version of this
paper will provide a digital object identifier (DOI) that provides a
link to the permanent fixed version of the source code that was used to
generate all data in the final paper.}
The experimental data in this section was obtained from the program
\texttt{main.cpp} that can be found in the accompanying source code.
This program was compiled using the command line: \texttt{g++ -std=c++11 -Wall -O4 -o main main.cpp}. The compiler was the \texttt{gcc} compiler, version
4.8.2 that ships with the Ubuntu 14.04 Linux distribution. Tests were
run on a desktop computer having 16GB DDR3 1600MHz memory and a Intel
Core i5-4670K processor with 6MB L3 cache and running at 3.4GHz.
\subsubsection{Varying $\varepsilon$}
\Figref{epsilon} shows the results of varying the value of $\varepsilon$ from
$0.02$ to $0.68$ in increments of $0.01$. In this figure, $n=10^6$
random integers in the set $\{0, 5,\ldots,5(n-1)\}$ were chosen
(with replacement) and inserted into a todolist. Since dictionaries
discard duplicates, the resulting todolist contained $906,086$ values.
This todolist was then searched $m=5n$ times with random integers chosen,
with replacement, from $\{-2,\ldots,5n+3\}$. This figure illustrates
that todolists do behave roughly as \thmref{todolist} predicts.
Insertion time increases roughly proportionally to $1/\varepsilon$ and search
times seem to be of the form $c(d+\varepsilon)$ for some constant $c$ and $d$
(though there is certainly lots of noise in the search times).
In terms of implementation guidance, this figure suggests that values
of $\varepsilon$ below $0.1$ are hard to justify. The improvement in search
time does not offset the increase in insertion time. At the same
time, values of $\varepsilon$ greater than $0.35$ do not seem to be of much
use either since they increase the search time and don't decrease the
insertion time significantly. At some point beyond this---at around
$\varepsilon=0.45$---increasing $\varepsilon$ increases both the insertion time and
the search time (since every insertion starts with a search).
\begin{figure}
\caption{The trade-off between search time and insertion time as a function of $\varepsilon$.}
\end{figure}
\subsubsection{The Race}
Next, we tested the performance of todolists against a number of other
common dictionary data structures, including skiplists, red-black trees,
scapegoat trees, and treaps. As a baseline, we also measured the search
performance of two static data structures: sorted arrays, and perfectly
balanced binary search trees.
In these tests, the value of $n$ varied from $25,000$ to $2\times 10^6$
in increments of $25,000$. Each individual test followed the same
pattern as in the previous section and consisted of $n$ insertions
followed by $5n$ searches.
\paragraph{Searches: Todolists win.}
The timing results for the searches are shown in \figref{bigsearch}. In
terms of search times, todolists---with $\varepsilon = 0.2$ and $\varepsilon=0.35$---are
the winners among all the dynamic data structures, and even match the
performance of statically-built perfectly-balanced binary search trees.
The next fastest dynamic data structures are red-black trees which,
for larger $n$ have a search time of roughly 1.4 times that of
perfectly-balanced binary search trees.
Surprisingly, todolists beat red-black trees because of their memory
layout, not because they reduce the number of comparisons. In
these experiments, the average number of comparisons done by
red-black trees during a search was measured to be $\alpha\log n$
for $\alpha\in[1.02,1.03]$.\footnote{This incredibly good performance
of red-black trees created by inserting random data has been observed
already by Sedgewick \cite{sedgewick:left-leaning}, who conjectures
that the average depth of a node in a such a red-black tree is $\log
n-1/2$.} This is substantially less than the number of comparisons done
by todolists, which is about $1.2\log n$ (for $\varepsilon=0.2$) and $1.35\log
n$ for ($\varepsilon=0.35$). The optimal binary search trees also have a
similarly efficient memory layout because the algorithm that constructs
them allocates nodes in the order they are encountered in a pre-order
traversal. A consequence of this is that the left child of any node,
$u$, is typically placed in a memory location adjacent to $u$. Thus,
during a random search, roughly half the steps proceed to an adjacent
memory location.
\begin{figure}
\caption{Search time comparison between different data structures. The top graph shows absolute times, in seconds. The bottom graph shows relative times, normalized by the time taken in the optimal search tree.}
\end{figure}
\paragraph{Insertions: Todolists lose.}
The timing results for the insertions are shown in
\figref{biginsertion}. This is where the other shoe drops. Even with
$\varepsilon=0.35$, insertions take about three to four times as long in
a todolist as in a red-black tree. Profiling the code shows that
approximately 65\% of this time is spent doing partial rebuilding and
another 6\% is due to global rebuilding.
One perhaps surprising result is that scapegoat trees, which are also
based on partial rebuilding, outperform todolists in terms of insertions.
This is because scapegoat trees are opportunistic, and only perform
partial rebuilding when needed to maintain a small height. Randomly built
binary search trees have logarithmic depth, so scapegoat trees do very
little rebuilding in our tests. In a similar test that inserts elements
in increasing order, scapegoat tree insertions took approximately 50\%
longer than todolist insertions.
\begin{figure}
\caption{Insertion time comparison between different data structures. The top graph shows absolute times, in seconds. The bottom graph shows relative times, normalized by the running time of the red-black tree.}
\end{figure}
\section{Conclusion}
If searches are much more frequent than updates, then todolists may
be the right data structure to use. When implemented properly, their
search times are difficult to beat. They perform $\log_{2-\varepsilon}
n$ comparisons and roughly half these lead to an adjacent array
location. Thus, a search in a todolist should incurs only about
$\frac{1}{2}\log_{2-\varepsilon} n$ cache misses on average. $B$-trees
\cite{bayer.mccreight:organization} and their cache-oblivious counterparts
\cite{bender.demaine.ea:cache-oblivious,bender.duan.ea:locality-preserving}
can reduce this to $O(\log_B n)$, where $B$ is the size of a cache line,
but they have considerably higher implementation complexity and running-time
constants.
On the other hand, todolists leave a lot to be desired in terms of
insertion and deletion time. Like other structures that use partial
rebuilding, the restructuring done during an insertion takes time
$\Omega(\log n)$, so is non-negligible. The implementation of the
insertion algorithm used in our experiments is fairly naïve and could
probably be improved, but it seems unlikely that its performance will
ever match that of, for example, red-black trees.
\end{document}
|
\begin{document}
\title{Surrogate models for oscillatory systems using
sparse polynomial chaos expansions and stochastic time warping}
\author[1]{C. V. Mai} \author[1]{B. Sudret}
\affil[1]{Chair of Risk, Safety and Uncertainty Quantification,
ETH Zurich, Stefano-Franscini-Platz 5, 8093 Zurich, Switzerland}
\date{}
\maketitle
\abstract{ Polynomial chaos expansions (PCE) have proven efficiency in a number
of fields for propagating parametric uncertainties through
computational models of complex systems, namely structural and fluid
mechanics, chemical reactions and electromagnetism, etc. For problems
involving oscillatory, time-dependent output quantities of interest,
it is well-known that reasonable accuracy of PCE-based approaches is
difficult to reach in the long term. In this paper, we propose a fully
non-intrusive approach based on stochastic time warping to address
this issue: each realization (trajectory) of the model response is
first rescaled to its own time scale so as to put all sampled
trajectories in phase in a common virtual time line. Principal
component analysis is introduced to compress the information contained
in these transformed trajectories and sparse PCE representations using
least angle regression are finally used to approximate the components.
The approach shows remarkably small prediction error for particular
trajectories as well as for second-order statistics of the latter. It
is illustrated on different benchmark problems well known in the
literature on time-dependent PCE problems, ranging from rigid body
dynamics, chemical reactions to forced oscillations of a non linear
system.
\\[1em]
{\bf Keywords}: surrogate models -- sparse polynomial chaos expansions
-- stochastic ordinary differential equations -- stochastic time
warping -- dynamical systems }
\maketitle
\section{Introduction}
\section{Introduction}
In modern engineering, it is of utmost importance to investigate the
significant effects of uncertainties when considering the behavior of
complex systems. These uncertainties may arise from environmental
factors ({e.g. } excitations, boundary conditions) or inherent sources ({e.g. }
natural variability of the materials) and are usually represented by
random variables. In this context, the framework of uncertainty
quantification was introduced, of which a major component is the
propagation of uncertainty from the input parameters through the system
to the output quantities of interest. The outcomes of uncertainty
propagation ({e.g. } statistical, reliability and sensitivity measures)
allow a better understanding of the system and are critical in decision
making.
So far Monte Carlo simulation (MCS) is universally used for solving
uncertainty propagation problems. The idea behind MCS is to perform the
simulation a sufficiently large number of times by varying input
parameters such that the average of the response quantity of interest
converges to the expected value according to the law of large numbers.
However, the use of MCS is hindered by the fact that a large number of
simulations is not affordable in many practical problems ({e.g. } when each
evaluation of the computational model is time- and memory-consuming).
To overcome this issue, spectral methods have been used in the last two
decades as an alternative approach to traditional MCS. The spectral
approach consists in representing the response quantity of interest in a
space spanned by well-defined basis functions. Among a wide variety of
basis functions that have been investigated, polynomial functions have
shown particular effectiveness \cite{Ghanembook2003, LemaitreBook,
Soize2004}. The spectral approach that uses polynomial chaos functions
as a basis is simply named polynomial chaos expansions (PCEs).
In practice, PCEs are widely used as an approximate model to substitute
a computationally expensive model for uncertainty propagation. They can
be used in an either intrusive or non-intrusive setup. The former
requires knowledge of the mathematical equations describing the
considered system. One has to interfere with the original set of
equations, reformulate it and then solve the reformulated system to
compute the PCE coefficients. In contrast, the latter does not
necessitate any prior knowledge of the governing equations. It considers
the deterministic computational model as a black box and only requires to
define an experimental design, {i.e. } a set of input and corresponding
output values. In several studies, PCEs have shown great efficiency
compared to the traditional uncertainty propagation approach with MCS,
see {e.g. } \cite{Dossantos2008,Rajabi2014}.
PCEs, however, face challenges when used for dynamical systems that are
encountered in the fields of structural and fluid dynamics or in
chemical engineering \cite{Beran:2006,Ghosh2007, LeMaitre2009, Wan2006}.
In these cases, the governing equations are a system of ordinary
differential equations with random parameters. First, the response as a
function of time is no longer a scalar quantity but may be cast as a
vector after proper time discretization. Applying PCEs at each time
instant might require large computational resources. To reduce the
computational cost, Blatman and Sudret \cite{BlatmanIcossar2013} used principal component
analysis to capture the main stochastic features of the vector-valued
response quantities by means of a small number of variables which can be
represented by PCEs. The greatest challenge is the decrease in time of
the accuracy of the PCE model as reported in numerous publications
\cite{Beran:2006,Gerritsma2010,Ghosh2007,LemaitreBook,LeMaitre2009,Wan2006}
though. The features of the accuracy degeneration, {i.e. } its onset (the
instant at which PCEs start being insufficiently accurate) or its rate
(how fast the accuracy decreases), depend on the considered problem.
The cause of the decaying accuracy of PCEs in dynamics can be classified
into an approach-related cause and an inherent cause. The
approach-related cause refers uniquely to intrusive techniques. In fact,
the latter solves a system of reformulated ordinary differential
equations which are derived from the original system of equations by
substituting PCE for the quantity of interest. At any given instant, the
PCE is truncated after $P$ terms, thus introducing a truncation error.
The latter is accumulated in time, therefore the results deteriorate
\cite{Ghosh2007}.
By means of the non-intrusive approach, one can avoid this source of
error since the responses at different instants can be examined
``independently'', which prevents the accumulation of error at later
instants provided the deterministic solver is equally accurate whatever
the realization of the input parameters. The inherent cause refers to
the fact that the problem itself demonstrates increasing complexity as
time evolves, as shown through examples in \cite{Ghosh2007, Pettit2006}.
The growing complexity makes it increasingly hard for PCEs to capture the
behavior of the system.
The growth in time of the inherent complexity of the problem is
characterized by the increasingly complicated relationship between the
output quantity and the input parameters, exhibiting important
non-linearity, abrupt changes and possibly discontinuities (see {e.g. }
\cite{Desai2013, Witteveen2013}). It may be related to the difference in
terms of frequency and phase content of the various response time-series
obtained with distinct values of the uncertain input parameters
\cite{LeMaitre2009,Witteveen2008}. These discrepancies tend to be more
and more severe when time passes. In other words, trajectories tend to
be similar at early instants and less and less in phase in the long term
\cite{Wan2006,Wan2005}.
To alleviate this issue, Blatman and Sudret \cite{Blatman2010b} introduced adaptive sparse
PCEs that allow one to take advantage of the sparsity in the structure
of the model (if this sparse structure exists), thus extending the time
range where the computation of PCEs is tractable and the result is
sufficiently accurate. In other words, adaptive sparse PCEs may delay
the onset of the accuracy degeneration.
{Le Ma\^{i}tre et al. \cite{LeMaitre2004} developed adaptive methods for multi-resolution
analysis, which relies on a multi-wavelet basis of compact piecewise-smooth polynomial functions.}
Lucor and Karniadakis \cite{Lucor2004} used adaptive
generalized PCEs, which consists in detecting the first-order terms with
the most important effects on the fluctuation of the response and then
building the higher-order terms that only include the selected
first-order terms. From the same perspective, Mai and Sudret \cite{MaiUncecomp2015}
developed the hierarchical PCEs which aims at updating the set of
candidate polynomials adaptively by adding selected interaction terms
while selecting only the regressors with the most importance.
In most papers, the proposed high-order PCE approaches consist in using
assumptions to reduce the size of the high-order PCE basis or using
advanced computational techniques for computing them.
Wan and Karniadakis \cite{Wan2006, Wan2006a, Wan2005} proposed multi-element PCEs, in which
the random space is divided into multiple subspaces in such a way that
the complexity of the model in each subspace is reduced, thus requiring
only low-order PCEs. Jakeman et al. \cite{Jakeman2013} also used multi-element PCEs
with a discontinuity detector in order to minimize the number of
subspaces. Nouy \cite{Nouy2010} and Soize \cite{Soize2015} approximated a
multimodal random variable ({i.e. } the output quantity of interest) by a
mixture of unimodal random variables, each modeled by PCEs. This
approach might help to improve the effectiveness of PCEs in the context
of dynamical systems, when the responses at late instants usually
exhibit multi-modal distributions as will be shown in the current paper
through numerical applications. In the above approaches, the input
space is divided into subspaces according to the detected
discontinuities or dissimilarities. One then builds a local PCE in each
subspace and combines those PCE models to obtain a global metamodel.
Therefore these approaches can be classified as local PCEs. The use of
polynomial functions in local domains, however, requires an accurate
decomposition of the input space and will not be straightforward in
high-dimensional problems.
From a different perspective, Gerritsma et al. \cite{Gerritsma2010} proposed to compute
time-dependent PCEs by updating the polynomial chaos basis on-the-fly.
If the approximation error is excessive at a considered time instant,
the authors add to the existing set of random variables a new variable,
which is the response quantity at the previous instant. This is based on
the idea that a fixed set of random variables at the beginning of the
process is not sufficient to model the system in the long term and thus,
the set of random variables the PCEs depend on needs to be updated.
This approach can be viewed as a nested PCE model, {i.e. } a PCE model of
another PCE model. Luchtenburg et al. \cite{Luchtenburg2014} used flow map composition,
which is in principle similar to time-dependent PCEs. The time-history
response is composed of short-term flow maps, each modeled by PCE. The
idea of constructing the basis on-the-fly was also applied by
Cheng et al. \cite{Cheng2013} and Choi et al. \cite{Choi2014}, who derived intrusively a system
of equations governing the evolution of the time-dependent spatial and
stochastic basis. In the context of structural dynamics,
Spiridonakos and Chatzi \cite{Spiridonakos2015, Spiridonakos2015a} proposed the combination of
PCEs and autoregressive models which consists in representing the
response as a function of its past values. This approach is currently
investigated with the use of sparse adaptive PCEs by
Mai et al. \cite{Mai2016IJ4UQ2}. Recently, Ozen and Bal \cite{Ozen2016} introduced the
dynamical PCEs, which is also based on the idea that the future
evolution of the response depends on the present solution.
As explained earlier, the accuracy of PCEs may degenerate in time due to
the time-increasing dissimilarity between the response trajectories when
considering distinct values of the uncertain input parameters. To
alleviate the accuracy decay, one may naturally think of increasing the
similarity between the response trajectories. For this purpose, an
attractive approach is to pre-process the response trajectories in order
to increase the similarity between them.
To this end, Witteveen et al. \cite{Desai2013, Witteveen2008} represented
the dynamic response trajectories as functions of the phase $\phi$
instead of time $t$ in order to obtain in-phase vibrations. The phases
are extracted from the observations, based on the local extrema of the
time series. The response trajectories are then transformed from
time-histories to phase-histories. PCEs are eventually applied in the
phase space.
Le Ma\^{i}tre et al. \cite{LeMaitre2009} represented the responses in a rescaled
time $\tau$ such that the dynamic responses vary in a small
neighborhood of a reference trajectory. The time scale $\tau$ is
intrusively adjusted at each time step so that the distance between the
dynamic response and the reference solution is minimized, thus in-phase
vibrations are achieved.
{From the same perspective, Alexanderian et al. \cite{Alexanderian2012, Alexanderian2014} introduced a
multiscale stretching of the responses which allows an efficient PC representations
of the stochastic dynamics with non-intrusive spectral projections.}
As a summary, PCEs fail to represent long-term time-dependent system
responses because of their inherent increasing complexity. To the
authors' knowledge there is no versatile tool that helps overcome the
problem in a \emph{non-intrusive} setup. This paper aims at filling this
gap by introducing a fully non-intrusive approach that allows efficient
use of PCEs for time-dependent problems showing oscillatory behaviors.
The proposed approach relies on a \emph{stochastic time warping} and the
subsequent rescaling of the response trajectories.
The paper is organized as follows: in Section 2, the fundamentals of
PCEs for time-independent problems are recalled. We introduce so-called
\emph{time-frozen} PCEs that will be used for comparison. In Section 3,
we propose an original non-intrusive PCE approach for uncertain
dynamical systems based on \emph{stochastic time-warping}. Five
applications are finally considered to show the efficiency of the
proposed approach.
\section{Polynomial chaos expansions}
\subsection{Spectral representation}
Let us consider the model $Y={\mathcal M}(\ve{X})$ where $\ve{X}=(X_1 , \, \dots \,, X_M)$ is a $M$-dimensional input vector of random variables with given joint probability density function $f_{\ve{X}}$ defined over an underlying probability space $(\ve{a}rOmega, \mathcal{F}, \mathbb{P})$ and ${\mathcal M}:\, \ve{x} \in {\mathcal D}_X \subset {\mathbb R}^M \mapsto {\mathbb R}$ is the computational model of interest, where ${\mathcal D}_{\ve{X}}$ is the support of the distribution of $\ve{X}$. Herein, we assume that the input random variables are independent, {i.e. } the joint probability density function (PDF) is the product of the marginal PDFs:
\begin{equation}
f_{\ve{X}}(\ve{x})= f_{X_1}(x_1) \ldots f_{X_M}(x_M).
\label{eq2.1}
\end{equation}
Assuming that the scalar output $Y$ is a second order random variable, {i.e. } $\Esp{Y^2} < +\infty$, is equivalent to require that the computational model ${\mathcal M}$ belongs to the Hilbert space ${\mathcal H}$ of square-integrable functions with respect to the inner product:
\begin{equation}
\innprod{u}{v}{{\mathcal H}} = \int\limits_{{\mathcal D}_{\ve{X}}} u(\ve{x}) v(\ve{x}) f_{\ve{X}}(\ve{x}) \di \ve{x} .
\label{eq2.2}
\end{equation}
Denote by ${\mathcal H}_i$ the Hilbert space of square-integrable functions with respect to the marginal probability measure $\mathbb{P}_{X_i}(\di x_i)= f_{X_i}(x_i) \di x_i$. Let us equip ${\mathcal H}_i$ with an inner product:
\begin{equation}
\innprod{u}{v}{{\mathcal H}_i} = \int\limits_{{\mathcal D}_{X_i}} u(x_i) v(x_i) f_{X_i}(x_i) \di x_i ,
\label{eq2.3}
\end{equation}
where ${\mathcal D}_{X_i}$ is the support of the distribution of $X_i$
and denote by $\{ \phi_k^i, k \in {\mathbb N} \}$ an orthonormal basis of ${\mathcal H}_i$ which satisfies:
\begin{equation}
\innprod{\phi_k^i}{\phi_l^i}{{\mathcal H}_i} = \delta_{kl} ,
\label{eq2.4}
\end{equation}
in which $\delta_{kl}$ is the Kronecker symbol, which is equal to 1 if $k = l$ and equal to 0 otherwise.
As shown by Soize and Ghanem \cite{Soize2004}, the Hilbert space ${\mathcal H}$ is isomorphic to
the tensor product $\operatorname*{ \otimes}_{i=1}^M {\mathcal H}_i $. Thus a
basis of ${\mathcal H}$ may be obtained by the tensor product of the univariate
bases $\acc{\phi_k^i, k \in {\mathbb N}}, \, i = 1 , \, \dots \,, M$. As a consequence,
the random variable $Y = {\mathcal M}(\ve{X})$ that results of the propagation of
the uncertainties modeled by $\ve{X}$ through the computational model
${\mathcal M}$ may be cast as:
\begin{equation}
Y = \sum\limits_{\alpha_1 \in {\mathbb N}} \ldots \sum\limits_{\alpha_M \in {\mathbb N}} y_{\alpha_1 \ldots \alpha_M} \phi_{\alpha_1}^1(X_1) \ldots \phi_{\alpha_M}^M(X_M) .
\label{eq2.8}
\end{equation}
For the sake of simplicity, introducing multi-indices $\ve{\alpha} = \acc{\alpha_1 , \, \dots \,, \alpha_M}$, $Y$ may be rewritten as:
\begin{equation}
Y= \sum\limits_{\ve{\alpha} \in {\mathbb N}^{M}} y_{\ve{\alpha}} \ve{\phi}_{\ve{\alpha}}(\ve{X}) .
\label{eq2.9}
\end{equation}
where $\ve{\phi}_{\ve{\alpha}}(\ve{X})= \prod\limits_{i=1}^{M} \phi_{\alpha_i}^i(X_i) $ are the multivariate basis functions and $y_{\ve{\alpha}}$ are the associated deterministic coefficients.
\subsection{Polynomial chaos expansions}
The univariate basis functions $\phi_k^i, k \in {\mathbb N}, \, i = 1 , \, \dots \,, M$ may
be constructed using orthonormal polynomials \cite{Abramowitz} leading
to the so-called generalized polynomial chaos expansion \cite{Xiu2002,
Soize2004}. For instance when $X_i$ is a uniform (resp. standard
normal) random variable, the corresponding polynomial basis comprises
orthonormal Legendre (resp. Hermite) polynomials. Then Eq.~\eqref{eq2.9}
becomes:
\begin{equation}
Y= \sum\limits_{\ve{\alpha} \in {\mathbb N}^{M}} y_{\ve{\alpha}} \ve{\psi}_{\ve{\alpha}}(\ve{X}),
\label{eq2.2.1}
\end{equation}
in which $\ve{\alpha} = (\alpha_1 , \, \dots \,, \alpha_M)$ are the multi-indices with $\alpha_i, i=1 , \, \dots \,, M$ denoting the degree of the univariate polynomial in $X_i$ and $\ve{\psi}_{\ve{\alpha}}(\ve{X}) = \prod\limits_{i=1}^{M} \psi_{\alpha_i}^i(X_i)$ are multivariate \emph{orthonormal} polynomials obtained by the tensor product of univariate polynomials.
In practice, the use of infinite-dimensional PCEs is not tractable. One always truncates the expansion to obtain an approximate representation:
\begin{equation}
Y = \sum\limits_{\ve{\alpha} \in \mathcal{A}} y_{\ve{\alpha}} \ve{\psi}_{\ve{\alpha}}(\ve{X}) + \epsilon ,
\label{eq2.2.2}
\end{equation}
in which $\mathcal{A}$ is a truncation set and $\epsilon$ is the truncation-induced error. A classical truncation scheme consists in selecting all polynomials of total degree less than or equal to $p$, when the truncation set reads:
\begin{equation}
\mathcal{A}^{M,p} =\{ \ve{\alpha} \in {\mathbb N}^M: \quad \norme{\ve{\alpha}}{1} \eqdef \alpha_1 + \ldots + \alpha_M \leqslant p \} .
\end{equation}
\subsection{Computation of PC coefficients and error estimation}
\label{section2.3}
The computation of the coefficients $\acc{y_{\ve{\alpha}}, \, \ve{\alpha} \in
\mathcal{A}}$ in Eq.~\eqref{eq2.2.2} can be conducted using intrusive
({i.e. } Galerkin scheme) or non-intrusive approaches ({e.g. } projection,
regression and quadrature methods). In the following, we will compute
the coefficients of the expansions using the adaptive sparse PCE
technique proposed by Blatman and Sudret \cite{Blatman2011b} which is a non-intrusive
least-square minimization technique based on the least angle regression
algorithm \cite{Efron2004}. The reader is referred to
\cite{Blatman2011b} for more details on this approach.
The accuracy of the representation is estimated by means of the
leave-one-out (LOO) cross-validation, which allows a fair error
estimation at an affordable computational cost
\cite{Blatman2010b,BlatmanThesis}. The principle of cross validation is
to use different sets of points to build PCEs, then compute the errors
with the actual model.
Assume that one is given a sample set ${\mathcal X} = \acc{\ve{x}^{(i)}, \, i = 1 , \, \dots \,, n}$. The computational model ${\mathcal M}$ is run for each point in ${\mathcal X}$, resulting in the vector of output quantity values ${\mathcal Y} = \acc{y^{(i)}, \, i = 1 , \, \dots \,, n}$.
Setting one point $\ve{x}^{(i)}$ apart from ${\mathcal X}$, one can build a PCE model ${\mathcal M}^{\text{PC}\backslash i}({\mathcal D}ot)$ from the remaining points ${\mathcal X} \backslash \ve{x}^{(i)} = \acc{\ve{x}^{(1)} , \, \dots \,, \ve{x}^{(i-1)}, \ve{x}^{(i)} , \, \dots \,, \ve{x}^{(n)} }$.
The predicted residual error at point $\ve{x}^{(i)}$ reads:
\begin{equation}
\Delta^{(i)} \eqdef {\mathcal M}(\ve{x}^{(i)}) - {\mathcal M}^{\text{PC}\backslash i}(\ve{x}^{(i)}) .
\end{equation}
The LOO error is defined as follows:
\begin{equation}
\widehat{\text{Err}}_{LOO} = \dfrac{1}{n} \sum\limits_{i=1}^{n} \Delta_i^2 .
\end{equation}
At first glance, one could think that evaluating the LOO error is
computationally demanding since it requires $n$~different predicted
residuals, each of them obtained from a different PCE. However, by means
of algebraic derivations, one can compute $\widehat{\text{Err}}_{LOO}$
from a \emph{single} PCE ${\mathcal M}^{\text{PC}}({\mathcal D}ot)$ built with the full
experimental design as follows \cite{BlatmanThesis}:
\begin{equation}
\widehat{\text{Err}}_{LOO} = \dfrac{1}{n} \sum\limits_{i=1}^{n} \prt{ \dfrac{ {\mathcal M}(\ve{x}^{(i)}) - {\mathcal M}^{\text{PC}}(\ve{x}^{(i)}) }{ 1 - h_i} }^2 ,
\end{equation}
where $h_i$ is the $i^{\text{th}}$ diagonal term of the projection matrix $\mat{A} \, \prt{ \mat{A}^{\textsf T} \mat{A} }^{-1} \mat{A}^{\textsf T} $ and the information matrix $\mat{A}$ is defined by
$\acc{A_{ij} = \ve{\psi}_j(\ve{x}^{(i)}), \, i = 1 , \, \dots \,, n, \, j = 1 , \, \dots \,, {\mathcal A}rd \, \mathcal{A}}$, {i.e. } the $i^{\text{th}}$ row of $\mat{A}$ is the evaluation of the polynomial basis functions at the point $\ve{x}^{(i)}$ in the ED.
Note that in practice, a normalized version of the LOO error is used:
\begin{equation}
\hat{\epsilon}_{LOO} = \dfrac{ \widehat{\text{Err}}_{LOO} }{ \Var{{\mathcal Y}}} ,
\end{equation}
where $\Var{{\mathcal Y}}$ is the empirical variance of the sample of outputs.
\subsection{Time-frozen polynomial chaos expansions}
In the context of time-dependent problems, {i.e. } $Y(t) = {\mathcal M}(\ve{X},t)$, the polynomial chaos representation of the response quantity reads:
\begin{equation}
Y(t) = \sum\limits_{\ve{\alpha} \in \mathcal{A}} y_{\ve{\alpha}}(t) \ve{\psi}_{\ve{\alpha}}(\ve{X}) + \epsilon(t)
\label{eq2.3.1}
\end{equation}
in which the notation $y_{\ve{\alpha}}(t)$ indicates the time-dependent
coefficients of PCEs. The representation of a time-dependent quantity by
means of PCEs as in Eq.~\eqref{eq2.3.1} is widely used in the
literature, see {e.g. } \cite{Pettit2006,LeMaitre2009,Gerritsma2010}. At a
given time instant~$t$, the coefficients $\acc{y_{\ve{\alpha}}(t),\ve{\alpha} \in
\mathcal{A}}$ and the accuracy of the PCEs are estimated by means of the
above mentioned techniques (see Section~\ref{section2.3}). The
metamodel of the response is computed independently at each time
instant, hence the name time-frozen PCEs.
We now introduce the use of time-frozen PCEs for computing the time-dependent statistics of the response. The multivariate polynomial chaos functions are orthonormal, {i.e. }:
\begin{equation}
\Esp{ \ve{\psi}_{\ve{\alpha}}(\ve{X}) \, \ve{\psi}_{\ve{\beta}}(\ve{X}) } \eqdef \int\limits_{{\mathcal D}_{X}} \ve{\psi}_{\ve{\alpha}}(\ve{x}) \, \ve{\psi}_{\ve{\beta}}(\ve{x}) \, f_{\ve{X}}(\ve{x}) \, \di \ve{x} = \delta_{\ve{\alpha} \ve{\beta}} \; \forall \ve{\alpha}, \, \ve{\beta} \in {\mathbb N}^M ,
\end{equation}
in which $\delta_{\ve{\alpha} \ve{\beta}}$ is the Kronecker symbol that is equal to 1 if $\ve{\alpha} = \ve{\beta}$ and equal to 0 otherwise. Indeed, each multivariate polynomial is orthogonal to $\ve{\psi}_{\ve{0}}(\ve{X}) = 1$, which means
$\Esp{\ve{\psi}_{\ve{\alpha}}(\ve{X})} = 0 \, \forall \ve{\alpha} \neq \ve{0}$ and $\Var{\ve{\psi}_{\ve{\alpha}}(\ve{X})} = \Esp{ \prt{\ve{\psi}_{\ve{\alpha}}(\ve{X})}^2 } = 1 \; \forall \ve{\alpha} \neq \ve{0}$.
Thus, the time-dependent mean and standard deviation of the response can be estimated by means of a mere post-processing of the truncated PC coefficients (in Eq.~\eqref{eq2.3.1}) with no additional cost as follows:
\begin{equation}
\Esp{Y(t)} \approx \Esp{ \sum\limits_{\ve{\alpha} \in \mathcal{A}} y_{\ve{\alpha}}(t) \ve{\psi}_{\ve{\alpha}}(\ve{X})} = y_{0}(t) ,
\end{equation}
\begin{equation}
\sigma_{Y(t)}^2 = \Var{ Y(t) } \approx \Var{ \sum\limits_{\ve{\alpha} \in \mathcal{A}} y_{\ve{\alpha}}(t) \ve{\psi}_{\ve{\alpha}}(\ve{X}) } = \sum\limits_{\substack{{\ve{\alpha} \in \mathcal{A}} \\ \ve{\alpha} \neq \ve{0} }} y_{\ve{\alpha}}^2(t).
\end{equation}
\section[Stochastic time-warping PCEs for random oscillations]{Stochastic time-warping polynomial chaos expansions for random oscillations}
\label{sec:tw_theory}
\subsection{Introduction}
An interesting problem emerges in nonlinear oscillating systems possessing a limit cycle\footnote{Limit cycle is a closed isolated trajectory in the phase-space of self-oscillated oscillators. The nearby trajectories can either spiral in toward or away from the limit cycle.} which may depend on the uncertain parameters.
Limit cycle oscillations (LCO) represent a class of time-dependent
problems that plays an important role in several fields, see {e.g. }
aerospace engineering \cite{Bunton2000} and mechanical engineering
\cite{Sarrouy2013} among others. Use of PCEs to represent LCO systems
has attracted a large attention and actually almost all novel ideas with
PCEs are applied first to LCO systems or systems involving periodicity.
For instance, Wan and Karniadakis \cite{Wan2006} used multi-element PCEs whereas
Beran et al. \cite{Beran2006} proposed different methods namely use of Haar wavelets
as local bases or use of B-spline functions. These approaches aim at
resolving the highly nonlinear behavior of LCO responses in the
stochastic domain. There are also techniques that are designed
specifically for LCO.
Le Ma\^{i}tre et al. \cite{LeMaitre2009} proposed an intrusive time transform of the
trajectories which aims at representing the transformed time-histories
in a small neighborhood of a reference trajectory, {i.e. } to reduce their
variability by making them in-phase. A transformed time line $\tau$ is
introduced, of which the varying clock speed $\dot \tau = \dfrac{\di
\tau}{\di t}$ is adjusted in an intrusive setup at each time step.
This is achieved by minimizing the Euclidean distance between the
distinct trajectories and the reference counterpart.
{The use of a stochastically stretched time variable in a non-intrusive setting
has been investigated by Alexanderian et al. \cite{Alexanderian2012, Alexanderian2014}.
The random responses were preconditioned by random scalings of the time horizon and their amplitudes. Consequently, the scaled responses exhibit similar dynamical features, more precisely they become in-phase.}
From a similar perspective, Witteveen and Bijl \cite{Witteveen2008} interpolated the oscillatory
responses on the phase space to obtain in-phase oscillations. Inspired
by the mentioned approaches, a non-intrusive time transform, which
consists in finding a suitable \emph{stochastic warping} of the time
line to increase the \emph{similarity} between different trajectories in
the transformed (warped) time scale, is introduced in this section. The
proposed approach focuses on increasing the frequency and phase
similarity of the considered trajectories in problems involving
periodicity.
It is worth noting that in the engineering literature, the time-warping
technique has been of interest for decades. In the context of voice
recognition, Sakoe and Chiba \cite{Sakoe1978} first proposed the time-warping to
eliminate the timing differences and obtain maximum coincidences between
two speech patterns. Wang and Gasser \cite{Wang1997} introduced a novel cost function
to determine the time-warping function. Later, Ramsay and Li \cite{Ramsay1998} used
the technique under the name ``curve registration'' for biological data.
The essential idea consists in the registration (or alignment) of
salient curve features by means of a suitable \emph{smooth monotone
transformation} of the temporal variable $t$. The actual analyses are
then carried out on the aligned curves. Note that the same idea can
also be conducted in the spatial domain. For instance,
Bookstein \cite{Bookstein1997} showed particular applications of registering the
outcomes over surfaces or volumes in medical imaging.
Herein, we are adding one dimension to the time-warping technique by
incorporating the effects of uncertainties in the transformation
function. This results in a stochastic time-transform framework. Indeed,
due to the inherent randomness of the stochastic problem, a time
transformation function with deterministic parameters is not suitable.
Therefore, stochastic transform parameters must be used and will be cast
as functions of the original random parameters. The theoretical
foundation of this work was originally presented by Mai and Sudret \cite{MaiIcasp2015}.
\subsection{Stochastic time-warping polynomial chaos expansions}
Consider a dynamical system ({e.g. } a structural dynamic or chemical system) whose behavior is modeled by a system of ordinary differential equations (ODEs):
\begin{equation}
\frac{\di \ve{y}}{\di t} = \ve{f}(\ve{y},\ve{\xi},t) ,
\end{equation}
where the initial condition is $\ve{y} (t=0) = \ve{y}_0$ and the random
vector $\ve{\xi}$ comprises independent second-order random variables
defined over a probability space $(\ve{a}rOmega, \mathcal{F}, {\mathbb P})$. $\ve{\xi}$
may include the parameters governing the system behavior, {e.g. } masses,
stiffness, damping ratio, reaction parameters, frequency and amplitude
of excitation. The initial condition can also be uncertain, in which
case it becomes a random variable belonging to $\ve{\xi}$. The
time-dependent response of the system is denoted by $\ve{y}(t,\ve{\xi})$.
Without loss of generality, we consider {a generic response of the
uncertain dynamical system}, {e.g. } $y(t,\ve{\xi})$ with the initial condition $y(t=0) = y_0$.
At each time instant, $y(t,\ve{\xi})$ is assumed to be a second-order random
variable. As in \cite{LeMaitre2009,Wan2006, Wan2005,Witteveen2008},
herein we focus on the class of problems when $y(t,\ve{\xi})$ is an
oscillatory response with random frequencies and amplitudes.
The time-dependent response $y(t,\ve{\xi})$ is represented by time-frozen PCEs as:
\begin{equation}
y(t, \ve{\xi}) = \sum\limits_{\ve{\alpha} \in \mathcal{A}} y_{\ve{\alpha}}(t) \ve{\psi}_{\ve{\alpha}}(\ve{\xi}) + \epsilon(t).
\label{eq:timefroPCE}
\end{equation}
A virtual time variable $\tau(t,\ve{\xi})$, which is obtained by a stochastic time-warping, is introduced as follows:
\begin{equation}
\tau(t,\ve{\xi}) = \sum\limits_{i=1}^{N_{\tau}} c_i(\ve{\xi}) \, f_i(t) = F(t, \ve{\xi}),
\label{eq3.3}
\end{equation}
where $\acc{f_i(t), i = 1 , \, \dots \,, N_{\tau}}$ are functions of time $t$ and $\acc{c_i(\ve{\xi}), i = 1 , \, \dots \,, N_{\tau}}$ are coefficients which depend on the input random variables $\ve{\xi}$. The coefficients $c_i(\ve{\xi})$ can be represented by PCEs as:
\begin{equation}
c_i(\ve{\xi}) = \sum\limits_{\ve{\alpha} \in {\mathbb N}^M} {c_i}_{\ve{\alpha}} \, \ve{\psi}_{\ve{\alpha}}(\ve{\xi}),
\label{eq3.4}
\end{equation}
where $\ve{\psi}_{\ve{\alpha}}(\ve{\xi})$ and ${c_i}_{\ve{\alpha}}$ are respectively the orthonormal polynomial functions and the coefficients of the expansion.
The only constraint on the time-warping is that $\tau$ is a strictly monotonically increasing function of $t$ {given a random set of parameters $\ve{\xi}$. This constraint ensures that there is no repeated value on the virtual time line}. Then the inverse transform may be cast as:
\begin{equation}
t(\tau,\ve{\xi}) = F^{-1}(\tau, \ve{\xi}).
\end{equation}
Note that, in the sequel, linear transform of the form:
\begin{equation}
\tau(t,\ve{\xi}) = k(\ve{\xi}) \, t + \phi(\ve{\xi})
\end{equation}
is considered.
For each realization $\ve{\xi}_0$, {i.e. } each trajectory of the system response, we assume a one-to-one mapping between $t$ and $\tau$. The response trajectory may then be represented in the transformed (warped) time scale by:
\begin{equation}
y(\tau,\ve{\xi}) = \sum\limits_{\ve{\beta} \in {\mathcal B}} y_{\ve{\beta}}(\tau) \ve{\psi}_{\ve{\beta}}(\ve{\xi}) + \epsilon(\tau),
\label{eq:timewarpPCE}
\end{equation}
in which ${\mathcal B}$ is the truncation set of the multi-indices $\ve{\beta}$. The inverse time transform allows one to obtain the PCEs of the response in the physical time scale as follows:
\begin{equation}
y(t,\ve{\xi}) = y(F^{-1}(\tau,\ve{\xi}),\ve{\xi}).
\end{equation}
{It is worth remarking that for complex problems involving a complex
time transform, the inversion may be delicate to evaluate, as
discussed by Alexanderian et al. \cite{Alexanderian2012,
Alexanderian2014}. In practice, one shall make sure that all
realizations are sampled over sufficiently long time horizons (in the
original time scale) so that their counterparts in the transformed
time scale are properly defined over the time interval of interest.}
The objective is to find a suitable time-warping defined by
Eq.~\eqref{eq3.3} and \eqref{eq3.4} so that the cardinality of ${\mathcal B}$
remains small ({i.e. } low-degree PCEs can be used) to achieve an acceptable
error $\epsilon(\tau)$ even at late instants. This can be obtained if
the trajectories $y(\tau(t,\ve{\xi}))$ become in-phase, as suggested by Le Ma\^{i}tre et al.
\cite{LeMaitre2009} and Witteveen and Bijl \cite{Witteveen2008}.
First, a deterministic reference trajectory $y_r(t)$ is introduced. The
stochastic time-warping (Eq.~\eqref{eq3.3}) is determined by maximizing
the similarity between $y(\tau(t,\ve{\xi}))$ and the reference counterpart
$y_r(t)$ for all values of $\ve{\xi}$, which makes the responses become
in-phase. This allows the effective computation of
Eq.~\eqref{eq:timewarpPCE}. Having at hand the time-warping
(Eq.~\eqref{eq3.3}) and the PCEs of the response in the virtual time
line $\tau$ (Eq.~\eqref{eq:timewarpPCE}), one can finally obtain the
PCEs in the physical time line $t$ by conducting the inverse
time-warping.
The proposed non-intrusive time-warping approach is explained in detail in the following. For the sake of clarity, it is graphically summarized in \figref{fig3.1.0}.
\begin{figure}\label{fig3.1.0}
\end{figure}
\begin{itemize}
\item One first chooses a reference trajectory $y_r(t)$ which is for instance obtained by considering the mean values of the input vector $\ve{\xi}$, {i.e. } $y_r(t)=y(t,\Esp{\ve{\xi}})$. In general, $y_r(t)$ may be any realization of the response quantity $y(t)$ obtained with a specific sample $\ve{\xi}_0$. For the numerical case studies considered in the current chapter, the choice of $y_r(t)$ did not affect the accuracy of the final results.
\item Let us start now the time-warping, which consists in transforming the time line with the purpose of increasing the similarity between different realizations of the output $y(t,\ve{\xi})$. Assume that one is given a set of trajectories $y_i(t) \equiv y(t,\ve{\xi}_i), \, i=1 , \, \dots \,, n$ for $n$ realizations of $\ve{\xi}$ corresponding to an experimental design in the input space ${\mathcal D}_{\ve{\ve{a}rXi}}$.
Then for the realization $\#i$, $i = 1 , \, \dots \,, n$, the following steps are performed:
\begin{itemize}
\item Define a linear time-warping $\tau = k_i \, t + \phi_i$. In
general, the functions $f_i(t)$ in Eq.~\eqref{eq3.3} might be
polynomials of $t$. However, when investigating the problem of
vibration with random frequencies, a linear transform usually
suffices. This is due to the periodicity of the considered response
trajectories. In the intrusive time transform approach
\cite{LeMaitre2009}, although a linear warping function is not
specified for the considered examples, the resulting transformed time
$\tau$ eventually represents a linear relationship when plotted
against $t$. {Alexanderian et al. \cite{Alexanderian2012, Alexanderian2014}
investigated linear-based stretching of the time variable.}
Wang and Gasser \cite{Wang1997} also used a linear
warping function. In particular, given the complexity of the problems
under investigation, use of a linear function facilitates the inverse
transform in the next phase, which is highly convenient. This linear
warping represents two actions, namely scaling and shifting,
respectively driven by the parameters $k_i$ and $\phi_i$. The time
line is stretched (resp. compressed) when $k_i >1$ (resp. $0<k_i < 1$)
and is shifted to the left (resp. to the right) when $\phi_i<0$ (resp.
$\phi_i>0$). In fact, the scaling factor $k_i$ (resp. shifting factor
$\phi_i$) allows to maximize the similarity in frequency (resp. phase)
between the considered trajectories.
\item Determine the parameters $(k_i, \, \phi_i)$ governing the time-warping as the solution of an optimization problem which aims at maximizing the similarity between the response trajectory $y_i(k_i \, t + \phi_i)$ and the reference counterpart $y_r(t)$. The details of the optimization problem, in which a measure of similarity is introduced, will be described in Section~\ref{sec:determine_k_phi}.
\item Represent $y_i(t)$ on the transformed time line $\tau$. For this purpose, one chooses a grid line of $\tau$ with the desired time interval. In fact, the finer the grid is, the smaller is the error introduced by the \emph{interpolation}. The trajectory $y_i(t)$ is projected onto $\tau_i = k_i \, t + \phi_i$ to obtain $y_i(\tau_i)$.
In order to assure that all transformed time lines $\tau_i$ start at $0$, when $t \leq t_0$, one uses the following transform $\tau_i = \dfrac{k_i \, t_0 + \phi_i}{t_0} \, t$. The small value $t_0$ is chosen so that $k_i \, t_0 + \phi_i > 0 \quad \forall i = 1 , \, \dots \,, n $. For instance, $t_0 = 0.2$~s is used for the numerical applications that follow.
Finally the projected trajectory is linearly \emph{interpolated} on the selected time line $\tau$ yielding $y_i(\tau)$.
\end{itemize}
\item One builds PCEs of $k(\ve{\xi})$, $\phi(\ve{\xi})$ and $y(\tau,\ve{\xi})$ using the realizations $\{k_i, \phi_i, y_i(\tau)\}$, $i=1 , \, \dots \,, n$ as the experimental design (or training set):
\begin{equation}
k(\ve{\xi}) = \sum\limits_{\ve{\gamma} \in {\mathcal G}} k_{\ve{\gamma}} \, \ve{\psi}_{\ve{\gamma}}(\ve{\xi}) + \epsilon_k ,
\label{eq:k_vs_xi}
\end{equation}
\begin{equation}
\phi(\ve{\xi}) = \sum\limits_{\ve{\theta} \in {\mathcal T}} \phi_{\ve{\theta}} \, \ve{\psi}_{\ve{\theta}}(\ve{\xi}) + \epsilon_{\phi},
\label{eq3.8}
\end{equation}
\begin{equation}
y(\tau,\ve{\xi}) = \sum\limits_{\ve{\beta} \in {\mathcal B}} y_{\ve{\beta}}(\tau) \ve{\psi}_{\ve{\beta}}(\ve{\xi}) + \epsilon_{y}(\tau).
\label{eq3.9}
\end{equation}
In the above equations, $\ve{\gamma}$, $\ve{\theta}$ and $\ve{\beta}$ are
multi-indices belonging to the truncation sets ${\mathcal G}$, ${\mathcal T}$ and ${\mathcal B}$ of
the expansions. $k_{\ve{\gamma}}$, $\phi_{\ve{\theta}}$ and $y_{\ve{\beta}}(\tau)$
are coefficients computed by means of sparse adaptive PCEs
\cite{Blatman2011b}. $k(\ve{\xi})$ and $\phi(\ve{\xi})$ are scalar
quantities, therefore the computation of their PCE models is
straightforward. However, for the vector-valued response
$y(\tau,\ve{\xi})$, it might be computationally expensive when the number
of discretization points of the $\tau$-line is large. This computational
cost can be reduced significantly by coupling PCEs with the principal
component analysis \cite{BlatmanIcossar2013}. The combination of PCA and
PCEs will be described in detail in Section \ref{sec:pcapce}.
\end{itemize}
\subsection{Determination of time-warping parameters}
\label{sec:determine_k_phi}
This section describes the optimization problem used for determining the parameters $k$ and $\phi$ of the time-warping process.
We first propose a function to measure the similarity between two trajectories $y_1(t)$ and $y_2(t)$:
\begin{equation}
g(y_1(t),y_2(t))= \dfrac{\abs{ \int\limits_{0}^T y_1(t) y_2(t) \di t}}{\| y_1(t) \| \| y_2(t) \| },
\label{eq29}
\end{equation}
in which $\int\limits_{0}^T y_1(t) y_2(t) \di t$ is the inner product of
the two considered time histories and $\| {\mathcal D}ot \|$ is the associated
$L^2$-norm. In practice, the trajectories are discretized and thus, the
inner product (resp. the $L^2$-norm) becomes the classical dot product
between two vectors (resp. the Euclidean norm). By the Cauchy-Schwarz
inequality, this similarity measure always takes values in the interval
$[0,1]$. {For responses of limit cycle oscillation systems which
feature a dominant frequency, the proposed similarity measure} attains
its maximum when the considered trajectories have the same frequency and
phase content. {In the following, constraints on the parameters will
be imposed so that the solution of the optimization problem is
unique.}
The parameters $(k_i, \, \phi_i), i = 1 , \, \dots \,, n$ are determined as the maximizers of the similarity measure between $y_i(\tau)$ and $y_r(t)$. The objective function reads:
\begin{equation}
g(k_i,\phi_i) = \dfrac{\abs{ \int\limits_{0}^T y_i(k_i\,t+\phi_i) y_r(t) \di t}}{\| y_i(k_i\,t+\phi_i) \| \| y_r(t) \| } .
\label{eq:tw_objfunc}
\end{equation}
Note that the optimal warping parameters $(k_i, \, \phi_i)$ are different for each trajectory. This results in varying total durations of the trajectories after the warping process.
This also occurred in the intrusive time transform approach \cite[Figure 4]{LeMaitre2009}.
The objective function is therefore computed on the overlapped duration between the warped trajectory and the reference one.
Let us now examine the solution $(k_i,\phi_i)$ of the proposed optimization problem.
The constraint that $\tau$ is a strictly monotonically increasing function of $t$ requires that $k_i >0$.
In case $y_r(t)$ and $y_i(t,\ve{\xi}_i)$ are both monochromatic signals, the value of $k_i$ that maximizes their similarity in frequency is unique.
However, there are multiple values for the shifting factor $\phi$ that make the considered trajectories in phase. This will be investigated in the next paragraph.
\figref{fig3.2} depicts the objective function $g(k,\phi)$ as a similarity measure between the reference trajectory $y_r(t)=\sin(\pi \, t)$ and a response $y(t)=\sin(2 \, \pi \,t)$. The two trajectories are chosen in such a way that $(k, \phi)=(2,0)$ is the maximizer of $g(k,\phi)$. However, there are three global maxima in the depicted interval $[-1.5,\,1.5]$ of $\phi$. This is due to the fact that in the virtual time line $\tau$, if the transformed trajectory $y(\tau)$ is shifted (whether to the left or to the right) a distance equal to one half of the period $T_r = 2~s$ of the reference counterpart, the similarity measure reaches another global maximum.
In fact, if $T_r/4 \leq \phi \leq T_r/2$ (resp. $-T_r/2 \leq \phi \leq -T_r/4$) maximizes the similarity measure, then $\phi-T_r/2$ (resp. $\phi+T_r/2$) in the interval $[-T_r/4,\,T_r/4]$ is also a maximizer.
In addition, for the sake of simplicity, it is preferable that $\phi$ is as close to $0$ as possible, {i.e. } the time line of the scaled trajectory is shifted as least as possible.
Therefore, the selected value of $\phi$ needs to satisfy the condition that the shifted distance (in time) is not larger than $1/4$ of the period $T_r$ of the reference trajectory $y_r(t)$, {i.e. } $\abs{\phi}\leq T_r/4$. This constraint ensures that the solution is unique. By adopting the constraint on $\phi$, one finds the solution $(k, \, \phi) = (2,\,0)$ for the considered example.
\begin{figure}\label{fig3.2}
\end{figure}
Finally, one can set up the global optimization problem for determining the time-warping parameters as follows:
\begin{equation}
(k_i,\phi_i) = \arg \max\limits_{\substack{k_i \in {\mathbb R}^+ \\ \abs{\phi_i}\leqslant T_r/4}} g(k_i,\phi_i) .
\end{equation}
This problem can be solved by means of global optimization methods.
{At this point, it is worth noting that the similarity metric
(Eq.~\eqref{eq29}) is computed over the time horizon of the simulation
under consideration. Therefore, the parameters of the time transform
are optimized with respect to this metric, {i.e. } in a time-average
sense. It might be of interest to optimize the parameters on shorter
time horizons. This has been recently investigated by Yaghoubi et al.
\cite{Yaghoubi2016} in which a {\em multi-linear} transform is
conducted in the frequency domain. The problem consists in determining
the break-points between the various intervals. In other cases, one
might also consider a bilinear time transform, namely for the
transient (resp. the stationary phase) of the response trajectories.}
\subsection{Principal component analysis and time-warping polynomial chaos expansions}
\label{sec:pcapce}
The instant-wise application of PCEs to model the response in the
transformed time line (\eqrefe{eq3.9}) might lead to an important
computational burden when the discretized vector $\tau$ is of large
length. To overcome this issue, Blatman and Sudret \cite{BlatmanIcossar2013} proposed a
two-step approach which combines principal component analysis (PCA) and
PCEs. The first step consists in conducting PCA to capture the
stochastic features of the random vector-valued response with a small
number of deterministic principal components and the associated
non-physical random variables. The second step relies on representing
the resulting random variables with adaptive sparse PCEs.
Consider a sample set of the response trajectories ${\mathcal Y} = \acc{
y^{(1)}(\tau) , \, \dots \,, y^{(n)}(\tau) }$ represented at the discretized
points $\acc{\tau_1 , \, \dots \,, \tau_{K}}$ in the transformed time line.
By stacking up the discretized responses, one obtains a matrix of
trajectories of size $n \times K$ denoted by $\mat{Y}$. The response can
be represented by PCA as follows:
\begin{equation}
{\ve{y}}(\tau,\ve{\xi}) = \bar{\ve{y}}(\tau) + \sum\limits_{i=1}^{K} A_i(\ve{\xi}) \, \tilde{\ve{v}}_i(\tau),
\end{equation}
where $\bar{\ve{y}}(\tau)$ is the empirical mean vector, $\tilde{\ve{v}}_i(\tau)$ is an empirical eigenvector determined with $\mat{Y}$ and $A_i(\ve{\xi})$ is a finite variance random variable.
Only a few eigenvectors are retained in the decomposition, which leads to:
\begin{equation}
{\ve{y}}(\tau,\ve{\xi}) = \bar{\ve{y}}(\tau) + \sum\limits_{i=1}^{K'} A_i(\ve{\xi}) \, \tilde{\ve{v}}_i(\tau) + \epsilon_1(\tau).
\end{equation}
The number of principal components is selected so that the relative error $1 - \dfrac{ \sum\limits_{i= 1}^{K'} \lambda_i }{ \sum\limits_{i= 1}^{K} \lambda_i}$ is smaller than a prescribed threshold, {e.g. } $\epsilon = 0.01$.
The samples of the random coefficient $A_i(\ve{\xi})$ can be obtained using $\ve{\alpha}letter_i = (\mat{Y} - \bar{\mat{Y}}) \, \tilde{\ve{v}}_i$ with $\bar{\mat{Y}} = \acc{\bar{\ve{y}}(\tau) , \, \dots \,, \bar{\ve{y}}(\tau) } $ being a $n \times K$ matrix obtained by replicating $n$ times the empirical mean $\bar{\ve{y}}(\tau)$. The computed samples of $A_i(\ve{\xi})$ are then used as the experimental design to compute the PCE of this random coefficient:
\begin{equation}
A_i(\ve{\xi}) = \sum\limits_{\ve{\alpha} \in {\mathcal A}} c_{i, \ve{\alpha}} \, \ve{\psi}_{\ve{\alpha}}(\ve{\xi}) + \epsilon_{2,i}.
\end{equation}
Finally, the response in the transformed time scale is represented by coupling PCA and PCEs as follows:
\begin{equation}
{\ve{y}}(\tau,\ve{\xi}) = \bar{\ve{y}}(\tau) + \sum\limits_{i=1}^{K'} \sum\limits_{\ve{\alpha} \in {\mathcal A}} c_{i, \ve{\alpha}} \, \ve{\psi}_{\ve{\alpha}}(\ve{\xi}) \, \tilde{\ve{v}}_i(\tau) + \epsilon(\tau).
\label{eq:PCAPCEtrares}
\end{equation}
Note that Blatman and Sudret \cite{BlatmanIcossar2013} introduced a
measure of the \emph{upper bound} of the total error induced by the
truncation of the principal component analysis and the approximation of
the random coefficients $A_i(\ve{\xi})$ by PCEs. The reader is referred to
the mentioned publication for more details. Herein this error measure
can be used as an indicator of the accuracy of the computed surrogate
models.
\subsection{Predicting random oscillations with time-warping polynomial \{\mathcal H}aos expansions}
Let us now demonstrate the use of time-warping PCEs to predict responses of the model given a new set of input parameters $\ve{\xi}'$. For the sake of clarity, the procedure is depicted in \figref{fig:tw_predict} and explained in two steps as follows:
\begin{itemize}
\item First, one predicts $k(\ve{\xi}')$, $\phi(\ve{\xi}')$ and $y(\tau, \ve{\xi}')$ using the computed PCEs in equations \eqref{eq:k_vs_xi}, \eqref{eq3.8} and \eqref{eq:PCAPCEtrares}.
\item Second, one maps $y(\tau, \ve{\xi}')$ into $y(t, \ve{\xi}')$ using the inverse time-warping $t=\dfrac{\tau-\phi(\ve{\xi}')}{k(\ve{\xi}')}$. To this end, the discretized trajectory in the warped time $\acc{y(\tau_1, \ve{\xi}') , \, \dots \,, y(\tau_K, \ve{\xi}') }$ is attached to the real time instants $t_1 = \dfrac{\tau_1-\phi(\ve{\xi}')}{k(\ve{\xi}')}$ $ , \, \dots \,, t_K = \dfrac{\tau_K-\phi(\ve{\xi}')}{k(\ve{\xi}')} $
\end{itemize}
\begin{figure}\label{fig:tw_predict}
\end{figure}
\section{Numerical applications}
\label{sec:tw_applications}
Time-warping-based polynomial chaos expansions (PCEs) developed in Section~\ref{sec:tw_theory}
are now applied to five engineering problems, namely a model of rigid body dynamics, the Kraichnan-Orszag three-mode model, the so-called Oregonator model describing the chemical reaction between three species and a Bouc-Wen oscillator subject to a stochastic sinusoidal excitation. {The vibration of a nonlinear Duffing oscillator is investigated in the Supplement.}
In each case, time-frozen sparse adaptive PCEs\footnote{The term ``time-frozen sparse adaptive PCEs'' refers to the instantaneous computation of sparse adaptive PCEs.} are applied first to show the degradation of the prediction accuracy after a certain time. Time-warping PCEs with simple linear time transforms are then investigated.
The PCE surrogate models are computed using a small number of numerical simulations of the original model as experimental design, then validated on a large independent validation set of size $N_{val} = 10,000$. The accuracy of the time-frozen and time-warping PCE models are judged on the basis of predicting the responses to specific values of input parameters and estimating the time histories of first- and second-order statistics of the responses.
The accuracy of the prediction $\# i$ is indicated by the relative error, {{i.e. } the mean of squared error normalized by the variance of the response time series,} which reads:
\begin{equation}
\epsilon_{val, i} = \dfrac{ \sum\limits_{t=1}^{K} (y(t, \ve{\xi}_i) - \hat{y}(t, \ve{\xi}_i))^2 }{\sum\limits_{t=1}^{K} (y(t, \ve{\xi}_i) - \bar{y}(t, \ve{\xi}_i) )^2} ,
\end{equation}
where $\hat{y}(t, \ve{\xi}_i)$ is the output trajectory predicted by PCEs
and {$\bar{y}(t, \ve{\xi}_i) = \dfrac{1}{K} \sum\limits_{t=1}^{K} y(t,
\ve{\xi}_i) $} is the mean value of the actual response time series
$y(t,\ve{\xi}_i)$ which is obtained from the original numerical solver.
The above formula is also used to assess the accuracy of the predicted
time-dependent statistics ({i.e. } mean and standard deviation).
These problems are solved in the UQLab framework \cite{Marelli2014},
more specifically using the least angle regression algorithm implemented
in the polynomial chaos expansion module (Marelli and Sudret \cite{UQdoc_09_104}).
\subsection{Rigid body dynamics}
\label{ex1}
We first consider the rotation of a rigid body described by Euler's
equations \cite{Peraire2009}. The conservation of angular momentum
reads:
\begin{equation}
\left\{
\begin{array}{l}
M_x = I_{xx} \, \dot{x} - (I_{yy}-I_{zz}) \, y \, z , \\
M_y = I_{yy} \, \dot{y} - (I_{zz}-I_{xx}) \, z \, x , \\
M_z = I_{zz} \, \dot{z} - (I_{xx}-I_{yy}) \, x \, y ,
\end{array}
\right.
\label{eq4.1.1}
\end{equation}
in which $M_x$, $M_y$, $M_z$ are the external moments, $I_{xx}$, $I_{yy}$, $I_{zz}$ are the moments of inertia and $x$, $y$, $z$ are the angular velocities about the principal axes.
In the case when the rigid body rotates freely under no external excitation, {i.e. } $M_x=M_y=M_z=0$
and $I_{xx}=\dfrac{1-\xi}{2} I_{yy}$, $I_{zz}=\dfrac{1+\xi}{2} I_{yy}$, one obtains the following set of reduced equations:
\begin{equation}
\left\{
\begin{array}{l }
\dot{x}(t) = y(t) \, z(t) ,\\
\dot{y}(t) = \xi \, x(t) \, z(t) ,\\
\dot{z}(t) = -x(t) \, y(t) .
\end{array}
\right.
\label{eq:rigbody}
\end{equation}
The initial conditions are set equal to $x(0)=0$, $y(0)=1$, $z(0)=1$.
Assume that $\xi$ is modeled by a random variable with uniform distribution: $\xi \sim {\mathcal U}(-1,1)$. Suppose a solver of the coupled ODEs is available. For any realization of $\xi$, this solver provides discretized trajectories $\acc{ \acc{x(t_i), y(t_i), z(t_i)}, t_i = 0 , \Delta_t , \, \dots \,, K \, \Delta_t \equiv T }$. In this example, the equations are solved using the Matlab ordinary differential equation solver \texttt{ode45} (Runge-Kutta method, total duration $T= 50~s$, time step $\Delta_t = 0.01$). We aim at building PCEs of the angular velocity $x(t)$ as a function of the random variable $\xi$. Note that the corresponding polynomial functions are from the family of orthonormal Legendre polynomials since $\xi$ is uniformly distributed.
\figref{fig4.1.2} depicts a set of 50 trajectories of $x(t)$ obtained
for different realizations of the random variable $\xi$. This set is
used as the experimental design for fitting the time-frozen PCEs. $x(t)$
are oscillatory trajectories which fluctuate around zero at different
frequencies. This is a typical example of the problem of stochastic
oscillation with uncertain frequencies \cite{Wan2005, Wan2006}. At the
early instants ($t<10~s$), one can differentiate between the distinct
trajectories, whereas this is hardly the case at later instants, since
the patterns are mixed up completely. Due to the growing difference in
frequency and phase, $x(t,\xi)$ is more and more non-linear as a
function of $\xi$ for increasing $t$ (\figref{fig4.1.3a}). Subsequently,
the probability density function of $x(t)$ becomes bi-modal at late
instants (\figref{fig4.1.3b}). This explains why increasing-degree
time-frozen PCEs are required in order to represent $x(t)$ properly. As
analyzed previously, this is not a sustainable approach since the
required degree of PCEs will certainly become too high at some point.
\begin{figure}\label{fig4.1.2}
\end{figure}
\begin{figure}\label{fig4.1.3a}
\label{fig4.1.3b}
\label{fig4.1.3}
\end{figure}
Time-frozen sparse PCEs are now utilized to model the variability of the response trajectories, and exemplify the deficiency of such an approach. At each instant $t$, an adaptive PCE scheme with candidate polynomials up to total degree 20 is used (Eq.~\eqref{eq:timefroPCE}) based on the available 50 data points from the experimental design made of the 50 trajectories. The PCE model which results in the smallest leave-one-out (LOO) error is retained. \figref{fig4.1.4} depicts the LOO error of these time-frozen PCEs, which is increasing in time, showing that the accuracy of the PCE model degenerates.
\begin{figure}\label{fig4.1.4}
\end{figure}
For validation purpose, a set of $10,000$ trajectories is computed using the \texttt{ode45} Matlab solver.
\figref{fig4.1.7} depicts two particular response trajectories predicted by time-frozen PCEs versus the actual responses obtained by numerically solving the system of ordinary differential equations \eqref{eq:rigbody}. After $15~s$ (when the LOO error is approximately $10^{-2}$) the PCE prediction deviates significantly from the actual trajectory. In particular, there are signs of instability in the PCE model, {e.g. } the PCE-based prediction for consecutive instants differ noticeably in terms of accuracy.
\begin{figure}\label{fig4.1.7}
\end{figure}
We now consider the time-dependent mean and standard deviation of the response $x(t)$ which are depicted in \figref{fig4.1.8}. In the early time instants ($t<15~s$), time-frozen PCEs represent the statistics with relatively small error compared to Monte Carlo simulation (MCS). However, after $15~s$, the accuracy declines quickly. In particular, PCEs cannot mimic the oscillatory behavior of the standard deviation. Another interpretation is that even degree-20 time-frozen PCEs cannot capture the complex distribution of the response at late time instants.
\begin{figure}\label{fig4.1.8}
\end{figure}
Let us now apply the time-warping approach to pre-process the
trajectories $x(t)$. Provided that the initial condition is equal to 0,
it suffices to use a linear time-warping $\tau = k \, t$. For each
computed realization of the angular velocity $x(t,\xi_i), i = 1 , \, \dots \,,
50$, the parameters $k_i$ is estimated as the maximizer of the
similarity measure described in Eq.~\eqref{eq:tw_objfunc}. Note that the
same 50 trajectories are used as the experimental design for this
approach and the reference trajectory is obtained with the mean value of
the input parameter.
The optimization problem is solved using the global optimization toolbox in Matlab. The function \texttt{fmincon} based upon an interior-point algorithm is used while allowing for a maximum of $2,000$ function evaluations.
Adaptive sparse PCEs for candidate bases up to total degree 20 are used to represent the parameter $k$. The relative LOO error is $3.82 \times 10^{-4}$, which indicates a high accuracy of the PCE model.
The time-warping is carried out using the estimated parameters and the responses are interpolated into the transformed time line $\tau$, leading to in-phase trajectories $x(\tau)$ (see \figref{fig4.1.5a}). As expected, $x(\tau)$ are smooth functions of $\xi$ at all instants, which allows the effective use of PCEs (\figref{fig4.1.5b}).
\begin{figure}\label{fig4.1.5a}
\label{fig4.1.5b}
\label{fig4.1.5}
\end{figure}
Principal component analysis (PCA) is then conducted on the obtained
transformed trajectories. The first $18$ principal components are
retained in order to achieve a PCA truncation error $\epsilon_1 = {
\sum\limits_{i= K' + 1}^{K} \lambda_i } / { \sum\limits_{i= 1}^{K}
\lambda_i}$ smaller than $1 \times 10^{-3}$. The first eight
principal components are plotted in \figref{fig:rigpcacmps} in the Supplement.
\figref{fig:rigpcaerror} depicts the PCA truncation error $\epsilon_1$
as a function of the number of retained principal components, the LOO
error $\epsilon_2$ of the PCE for the coefficient of each principal
component and the upper bound of the total error of the PCA-PCE model.
It shows that the PCA truncation error $\epsilon_1$ decreases
exponentially with the number of retained principal components. Using
PCE to represent the first PCA coefficient, the obtained relative LOO
error is $7.7 \times 10^{-3}$. It is also clear that it is harder to
represent the higher mode PCA coefficients by PCEs, as was observed in
\cite{BlatmanIcossar2013}. However, it is worth noting that most of the
stochastic features of the response is captured by the first few
components.
\begin{figure}\label{fig:rigpcaerror}
\end{figure}
\figref{fig4.1.7} depicts two specific realizations of the angular velocity $x(t)$ predicted by time-warping PCEs, which are plotted together with the predictions by time-frozen sparse PCEs and the actual responses obtained by the numerical solver.
As mentioned previously, one observes that starting from $15~s$, the direct approach encounters instability, which results in inaccurate predictions. The time-warping approach allows one to improve notably the quality of the surrogate model. The predictions by time-warping PCEs are in excellent agreement with the actual responses. A relative error exceeding $0.1$ is recorded in only $79$ simulations among $10,000$ validations.
In \figref{fig4.1.8}, the time-dependent mean and standard deviation of the response are plotted. Time-frozen PCEs allow one to represent the mean trajectory with relatively small discrepancy compared to the trajectory obtained with the MCS. It can faithfully predict the standard deviation at the early instants $t<15~s$, however becomes suddenly unstable afterwards.
In contrast, time-warping PCEs provide estimates of the statistics that are almost indistinguishable from the MCS estimates. The relative errors between the reference and predicted mean and standard deviation are $7.31 \times 10^{-4}$ and $7.19 \times 10^{-4}$, respectively.
\subsection{Kraichnan-Orszag model}
Let us investigate dynamical systems with random initial conditions, {e.g. }
the so-called Kraichnan-Orszag three-mode problem. It was introduced by Kraichnan
\cite{Kraichnan1960} to model a system of several interacting shear
waves and later was studied by Orszag \cite{Orszag1967} in the case of
Gaussian initial conditions. This model is described by the following
system of ODEs:
\begin{equation}
\left\{
\begin{array}{l}
\dot{x}(t) = y(t) \, z(t) , \\
\dot{y}(t) = z(t) \, x(t), \\
\dot{z}(t) = -2 \, x(t) \, y(t).
\end{array}
\right.
\end{equation}
The initial condition of $x(t)$ is considered stochastic, {i.e. } $x(t=0) =
\alpha + 0.01 \, \xi$ with $\xi \sim {\mathcal U}[-1,1]$ whereas $y(t=0) = 1.0$,
$z(t=0)= 1.0$. Herein, we consider $\alpha = 0.99$ as investigated by Gerritsma et al.
\cite{Gerritsma2010} with the time-dependent PCEs. Note that when
$\alpha$ is in the range $[0,0.9]$, the responses are insensitive to the
initial conditions. For $\alpha \in [0.9, 1]$, there is a strong
dependence of the responses on the initial state. \figref{fig:KO1Dxvst}
depicts the large discrepancies between time-histories of $x(t)$ due to
a minor variability of the initial condition $x(t=0)$.
\begin{figure}\label{fig:KO1Dxvst}
\label{fig:KO1Dxvstau}
\end{figure}
The surrogate model of the response $x(t)$ is computed with time-frozen and time-warping PCEs using an experimental design of size $N = 50$ (\figref{fig:KO1Dxvst}).
On the one hand, adaptive sparse PCEs with candidate bases up to total degree 20 are used for the time-frozen approach. On the other hand, a time-transform scheme $\tau = k \, t$ with one governing parameter is used for the time-warping scheme. The trajectories resulting from the time-warping process are depicted in \figref{fig:KO1Dxvstau}. The adaptive sparse PCE representing $k$ has the relative LOO error $2.2 \times 10^{-6}$. The first $13$ principal components are retained so that $99.9\%$ of the response's variance is explained.
The relative LOO errors of PCEs for the first two components are $9.4 \times 10^{-5}$ and $7 \times 10^{-3}$, respectively.
The time-warping PCE model is then validated by accessing the accuracy of its predictions. \figref{fig:KO1D2predict} plots two specific predictions of the surrogate model which are graphically indistinguishable from the actual time-histories obtained with the original Matlab solver.
Only $1.27\%$ of the $10,000$ predictions experiences a relative error larger than $0.1$.
Regarding the mean and standard deviation trajectories (\figref{fig:KO1Dmeanstd}), the time-warping approach leads to respective relative errors $2.1 \times 10^{-4}$ and $5.3 \times 10^{-4}$, which shows an excellent agreement between the predictions and the references.
These figures also show that the time-frozen sparse PCEs computed with the same experimental design of size $50$ lead to predictions which are not sufficiently accurate.
\begin{figure}\label{fig:KO1D2predict}
\end{figure}
\begin{figure}\label{fig:KO1Dmeanstd}
\end{figure}
This numerical application illustrates the potential application of the
proposed time-warping approach to systems subject to uncertain initial
conditions. The excellent performance of the approach is even more
impressive given {the fact that} the
responses are strongly sensitive with respect to a minor variability of
the initial condition.
\subsection{Oregonator model}
We consider now the Oregonator model which describes the dynamics of a
well-stirred, homogeneous chemical system governed by a three species
coupled mechanism. Note that this benchmark problem was used by Le Ma\^{i}tre et al.
\cite{LeMaitre2009} to illustrate the intrusive
time-transform approach. This chemical system undergoes an oscillation
governed by the following system of ODEs:
\begin{equation}
\left\{
\begin{array}{l}
\dot{x}(t)= k_1 \, y(t) - k_2 \, x(t) \, y(t) + k_3 \, x(t) - k_4 \, x(t)^2, \\
\dot{y}(t)= -k_1 \, y(t) - k_2 \, x(t) \, y(t) + k_5 \, z(t), \\
\dot{z}(t)= k_3 \, x(t) - k_5 \, z(t),
\end{array}
\right.
\label{eq4.2.1}
\end{equation}
in which $(x,y,z)$ denotes the three species concentration and the
coefficients $k_i, \, i=1 , \, \dots \,, 5$ are the reaction parameters.
Hereafter, all the reaction parameters are considered independent random
variables with uniform and normal distributions (see Table \ref{tab:1}).
It is worth noting that Le Ma\^{i}tre et al. \cite{LeMaitre2009}
considered only $k_4$ and $k_5$ as uniform random variables while fixing
the remaining parameters ({i.e. } $k_1 = 2,\, k_2 = 0.1, \, k_3 = 104$).
The initial condition is $(x_0, y_0, z_0) =(6,000; 6,000; 6,000)$, which
corresponds to a deterministic mixture. We aim at building PCEs of the
concentration $x(t)$ as a function of the random parameters $\ve{\xi} =
(k_1, \, k_2, \, k_3, \, k_4, \, k_5)$.
\begin{table}[!ht]
{\mathcal A}ption{Reaction parameters of the Oregonator model}
\centering
\begin{tabular}{|c|c|c|c|c|}
\hline
Parameters & Distribution & Mean & Standard deviation & Coefficient of variation \\
\hline
$k_1$ & Uniform & $2$ & $0.2/\sqrt{3}$ & $0.0577$ \\\hline
$k_2$ & Uniform & $0.1$ & $0.005/\sqrt{3}$ & $0.0289$ \\\hline
$k_3$ & Gaussian & $104$ & $1.04$ & $0.01$ \\\hline
$k_4$ & Uniform & $0.008$ & $4 \times 10^{-4} /\sqrt{3}$ & $0.0289$ \\\hline
$k_5$ & Uniform & $26$ & $2.6/\sqrt{3}$ & $0.0577$ \\
\hline
\end{tabular}
\label{tab:1}
\end{table}
\figref{fig4.3.1a} depicts 50 trajectories among $500$ realizations of $x(t)$, which are used as the experimental design for fitting time-frozen PCEs. One notices that after 5 seconds, the different trajectories are completely out-of-phase. Time-frozen sparse PCEs with candidate polynomials up to total degree 20 are used.
The PCE model actually starts degenerating at $t=3~s$. In particular, \figref{fig4.3.3} shows that when used for predicting the responses, time-frozen PCE provide negative values of the concentration at some instants, which is non physical for the considered problem.
\begin{figure}\label{fig4.3.1a}
\label{fig4.3.1b}
\label{fig4.3.1}
\end{figure}
We now apply the proposed non-intrusive time-warping approach to this problem. Note that only \emph{50 trajectories} of $x(t)$ are used as an experimental design for this approach. A linear time-transform $\tau = k\, t + \phi$ is again utilized. The parameters $k$ and $\phi$ are determined and sparse PCEs of $k$ and $\phi$ are then computed.
The relative LOO errors of the PCE models for $k$ and $\phi$ are respectively $4.42 \times 10^{-5}$ and $4.8 \times 10^{-2}$, which indicate a high accuracy. The response trajectories are interpolated into the transformed time line $\tau$ (\figref{fig4.3.1b}) and adaptive sparse PCEs with candidate polynomials up to total degree 20 combined with PCA are then used.
The first $18$ components are retained in PCA to obtain a truncation error $\epsilon_1$ smaller than $1 \times 10^{-2}$.
The PCEs for the first two coefficients have relative errors $7.57 \times 10^{-4}$ and $1.5 \times 10^{-3}$, respectively.
A validation set of $10,000$ trajectories is used to get reference trajectories of the concentration $x(t)$.
\figref{fig4.3.3} depicts two particular realizations computed by the numerical solver (Matlab ordinary differential equation solver \texttt{ode45}, using a time step $\Delta_t = 0.01$ for the total duration $T=40$~s) and predictions by PCEs with and without time-warping. It is shown that without time-warping, PCEs fail to capture the oscillatory behavior of the response.
In contrast, the use of time-warping allows PCEs to predict the response with great accuracy. Only $1.24\%$ of the predictions (among $10,000$ samples) has a relative error larger than $0.1$.
\begin{figure}\label{fig4.3.3}
\end{figure}
\figref{fig4.3.4} depicts the statistics of $x(t)$ predicted by time-frozen and time-warping PCEs in comparison with MCS-based trajectories. Without time-warping, the estimates by PCEs differ significantly from the reference trajectories already from $3$~s. The discrepancies then quickly increase in time. For instance, PCEs without time-warping estimate a decreasing trend in time for the standard deviation, whereas the latter actually oscillates around a constant value (around 1400) with high frequency. By introducing the time-warping pre-processing, one can use sparse PCEs to capture the complex behavior of the time-dependent statistics of the response all along the trajectories.
The relative error for the mean and standard deviation trajectories are $3.11 \times 10^{-4}$ and $3.6 \times 10^{-3}$, respectively.
\begin{figure}\label{fig4.3.4}
\end{figure}
Finally, the time-warping PCE scheme is applied to surrogate the responses $y(t)$ and $z(t)$ of the system using the same experimental design of size $50$ and the same procedure. \figref{fig4.3.5} shows a great agreement between two specific trajectories, the mean and standard deviation of $(x,y,z)$ in the state-space predicted by time-warping PCEs and the reference functions.
\begin{figure}\label{fig4.3.5}
\end{figure}
\subsection{Forced vibration of a Bouc-Wen oscillator}
In the previous case studies, self-oscillating systems were considered.
In this example, we show that the proposed approach is also applicable
to forced-vibration systems. Let us now consider the SDOF Bouc-Wen
oscillator \cite{Kafali2007} subject to a stochastic excitation. The
equation of motion of the oscillator reads:
\begin{equation}
\left\{
\begin{array}{l}
\ddot{y}(t) + 2 \, \zeta \, \omega \, \dot{y}(t) + \omega^2 (\rho \, y(t) + (1-\rho) \, z(t) ) = - x(t) \, , \\
\dot{z}(t) = \gamma \dot{y}(t) - \alpha \, \abs{\dot{y}(t)} \, \abs{z(t)}^{n-1} z(t) - \beta \, \dot{y}(t) \, \abs{z(t)}^n \, .
\end{array}
\right.
\end{equation}
in which $\zeta$ is the damping ratio, $\omega$ is the fundamental frequency, $\rho$ is the post- to pre-yield stiffness ratio, $\gamma$, $\alpha$, $\beta$, $n$ are parameters governing the hysteretic loops and the excitation $x(t)$ is a sinusoidal function given by $x(t) = A \, \sin (\omega_x \, t)$.
Deterministic values are used for the following parameters of the Bouc-Wen model: $\rho =0$, $\gamma=1$, $n=1$, $\beta=0$. The remaining parameters $\ve{\xi} = \prt{\zeta,\,\omega,\, \alpha,\, A,\, \omega_x}$ are considered independent random variables with associated distributions given in Table~\ref{tab:boucparam}.
\begin{table}[!ht]
{\mathcal A}ption{Uncertain parameters of the Bouc-Wen model}
\centering
\begin{tabular}{|c|c|c|c|c|}
\hline
Parameters & Distribution & Mean & Standard deviation & Coefficient of variation \\
\hline
$\zeta$ & Uniform & 0.02 & 0.002 & $0.1$ \\\hline
$\omega$ & Uniform & $2 \, \pi$ & $0.2 \, \pi$ & $0.1$\\\hline
$\alpha$ & Uniform & $50$ & $5$ & $0.1$ \\\hline
$A$ & Uniform & $1$ & $0.1$ & $0.1$ \\\hline
$\omega_x$ & Uniform & $\pi$ & $0.1 \, \pi$ & $0.1$ \\
\hline
\end{tabular}
\label{tab:boucparam}
\end{table}
One aims at representing the oscillator displacement $y(t)$ as a function of the uncertain input parameters using time-frozen and time-warping PCEs. To this end, $100$ simulations of the oscillator are carried out using the Matlab solver \texttt{ode45} with time increment $\Delta_t = 0.005$~s for the total duration $T = 30$~s and initial condition $y(t=0) = 0$, $\dot{y}(t=0) = 0$. The displacement trajectories are depicted in \figref{fig:boucyta}.
\begin{figure}\label{fig:boucyta}
\label{fig:boucytb}
\label{fig:boucyt}
\end{figure}
First, the time-frozen sparse PCEs are computed with candidate polynomials up to total degree $20$.
For this case study, a time-warping scheme $\tau = k \, t$ with only one parameter is used. After the time-warping process, the trajectories become in-phase as depicted in \figref{fig:boucytb}. Adaptive sparse PCE representing $k$ has the relative LOO error $5 \times 10^{-5}$. In order to achieve a truncation error $\epsilon_1$ smaller than $1 \times 10^{-3}$, $13$ first principal components are retained in PCA.
The relative LOO errors of PCEs for the first two components are $6 \times 10^{-3}$ and $6.21 \times 10^{-2}$, respectively.
Let us validate the accuracy of the time-warping PCE model. In \figref{fig:bouc2predict}, two specific predictions of the PCE model are plotted against the actual responses obtained with the original Matlab solver. A remarkable agreement can be observed.
Among $10,000$ validations, only $4.87\%$ has a relative error larger than $0.1$.
Regarding the time-dependent mean and standard deviation of the oscillator, time-warping PCE-based estimates outstandingly match the reference trajectories (\figref{fig:boucmeanstd}). Only a minor discrepancy can be observed at the end of the considered time duration $T=30$~s, which is due to the modest number of simulations used as the experimental design. The corresponding relative errors are both $2.4 \times 10^{-3}$. On the contrary, time-frozen PCEs exhibit a low level of accuracy after $5$ seconds.
\begin{figure}\label{fig:bouc2predict}
\end{figure}
\begin{figure}\label{fig:boucmeanstd}
\end{figure}
It is worth noting that in the current case study, we considered both uncertainties from the mechanical properties and the excitations. In particular, complicated hysteretic behavior was investigated. To the best of the authors' knowledge, this is the first time that such a system is considered in the literature of uncertainty quantification for the purpose of deriving time-dependent surrogate models.
\section{Discussion}
The various numerical applications in chemical and mechanical engineering have proved the effectiveness of the time-warping PCE approach, which may be shortly explained as follows.
It was observed that when represented in the space of the temporal variable $t$, the system's responses are increasingly non-linear functions of the uncertain parameters.
When projecting the responses onto a \emph{suitable} space, in this case the transformed time line $\tau$, the resulting trajectories become smooth functions of the uncertain input parameters, whose complexity does hardly increase with time. Therefore, PCEs can be applied effectively to the projected responses and represent well the solutions at late instants.
In this paper, a measure of similarity was proposed to define a suitable space for projecting the responses, which exploits the periodicity of the trajectories.
Further investigations are required to clearly determine such a suitable space in a more general case.
In the proposed approach, the virtual time $\tau$ is a function of the uncertain parameters $\ve{\xi}$.
In other words, the basis $\tau$ onto which the responses are projected is not deterministic. {This is a feature shared by the approach based on multiscale stochastic preconditionners \cite{Alexanderian2012, Alexanderian2014}.}
This differs significantly from approaches commonly used in the literature, in which the response trajectories are first projected onto a set of \emph{deterministic} reduced basis determined a priori using a set of numerical simulations of the system.
This is usually done with a simple \emph{linear} transform, for instance data compression techniques such as principal component analysis or wavelet decomposition.
When analyzing further, one discovers a particular feature which constitutes a major difference between the classical time-frozen PCE approach and the proposed time-warping method.
The PC coefficients $y_{\ve{\beta}}(\tau)$ in the time-warping representation (\eqrefe{eq:timewarpPCE}) are functions of $\tau$, therefore being dependent on $\ve{\xi}$. This contradicts the representation of time-frozen PCEs (\eqrefe{eq:timefroPCE}), in which $t$ and $\ve{\xi}$ intervene in the solution in a separated manner.
From a more general perspective, the effectiveness of the approach can be explained by analyzing the functionalities of the time-warping process and PCEs. The most important feature of an oscillatory trajectory consists in its spectral content, which is characterized by the vibration periodicity. The other feature is the temporal content characterized by the vibration amplitude.
The pre-processing step handles partially the dynamics of the system by dealing with the frequency content. Using the time-warping process, the resulting trajectories have similar frequencies and phases. In other words, in terms of frequencies, the transformed trajectories exhibit a similar dynamical behavior, which is close to that of the reference trajectory. The other aspect of the dynamics, {i.e. } the random temporal amplitude of the trajectories, is handled with sparse PCEs. As a summary, the dynamics is captured by the time-warping process, whereas the uncertainties are represented by PCEs.
As explained, sparse PCEs alone are not capable of dealing with the
dynamics. The proposed approach illustrates a novel way to solve
stochastic dynamical problems, in which a specialized technique might be
used to capture the dynamical aspect whereas sparse PCEs are used to
propagate uncertainties. From this perspective, Yaghoubi et al. \cite{Yaghoubi2016}
have recently applied the warping-based approach in the frequency domain
to surrogate the frequency response function of mechanical systems.
This principle is further developed by Mai et al. \cite{Mai2016IJ4UQ2} to tackle
more complex problems in which non-linear uncertain structures subject
to stochastic motions are of interest and where the response
trajectories are non-stationary, {i.e. } they do not show pseudo-periodic
oscillations. The projection of the responses onto a special basis made
of auto-regressive functions will allow us to represent the non-linear
dynamical behavior of the systems.
In addition, it is worthwhile mentioning that the proposed methodology is fully non-intrusive, {i.e. } the surrogate models of the systems' response trajectories are obtained by using a pre-computed set of trajectories related to an experimental design. In this respect, the methodology is readily applicable to any other problems featuring randomized limit cycle oscillations.
{Finally, it is noteworthy that the current approach exhibits some limitations. First of all, a linear time transform was used for all the considered numerical applications. More generalized transforms involving a non-linear dependence of the transformed time on the physical temporal variable, see {e.g. } \cite{Alexanderian2012,Alexanderian2014}, might be considered in future researches. A multi-linear stochastic time transform similar to the approach introduced in \cite{Yaghoubi2016} in the frequency domain should be investigated to handle
the responses of uncertain dynamical systems in the transient and stationary phases or
address the complex random polychromatic responses.}
\section{Conclusions and perspectives}
Polynomial chaos expansions (PCEs) represent an effective metamodeling technique which has been efficiently used in several practical problems in a wide variety of domains. It is, however, well known that PCEs fail when modeling the stochastic responses at late instants of dynamical systems. In this paper, we pointed out the cause of the failure, which is mainly associated with the large dissimilarities between distinct responses introduced by the variability of the uncertain parameters.
To address the above issue, we suggested an approach which consists in representing the responses into a virtual time line where the similarities between different response trajectories are maximized. The virtual time line is obtained by warping, {i.e. } scaling and shifting, the original time grid.
The parameters governing the trajectory-dependent time warping are determined by means of a global optimization problem using an objective function herein introduced to quantify the similarity between distinct trajectories.
The proposed approach allows one to effectively solve complex benchmark problems from mechanics and chemistry using only low-order PCEs.
This approach also suggests that when representing the original response quantities onto a suitable transformed space, the complexity of the responses may reduce significantly, thus allowing more effective application of PCEs. In general, pre-processing the experimental design before applying PCEs is a promising approach that needs further investigation.
\appendix
\section*{APPENDIX}
\section{Rigid body dynamics}
This section presents supplementary results of the investigation on the
rigid body system. Figure~\ref{fig:rigpcacmps} presents the eight first
components obtained from the principal component analysis of the
trajectories in the time-warped scale.
\begin{figure}\label{fig:rigpcacmps}
\end{figure}
\section{Oregonator model}
This section presents supplementary results of the investigation on the
Oregonator model. Figure~\ref{fig4.3.1s} presents 50 trajectories
plotted in the original time scale (Fig. (A)) and after time warping
(Fig.~(B)). It is visually obvious that the time-warping pre-processing
aligns well these trajectories with each other.
\begin{figure}\label{fig4.3.1s}
\end{figure}
Figure~\ref{fig4.3.3s} shows two particular trajectories obtained from
the original Oregonator model, as well as their prediction using
time-frozen and time-warping PCE. Time-frozen PCE essentially generates
numerical noise after a few seconds, whereas the prediction by
time-warping PCE is accurate until the latest time instants.
\begin{figure}\label{fig4.3.3s}
\end{figure}
\section{Duffing oscillator}
Let us consider a non-linear damped single-degree-of-freedom (SDOF)
Duffing oscillator under free vibration, which is described by the
following equation of motion:
\begin{equation}
\ddot{y}(t) + 2 \, \omega \, \zeta \, \dot{y}(t) + {\omega}^2 \,(y(t) + \epsilon \, y^3(t) ) = 0.
\label{eq4.3.1}
\end{equation}
The oscillator is driven by uncertain parameters $\ve{\xi} = \prt{\zeta,\,
\omega,\, \epsilon}$ described in Table~\ref{tab:2}. The initial
conditions are considered deterministic with $y(t=0)=1$ and
$\dot{y}(t=0)=0$. Note that a simplified form of this equation which
represents an undamped linear oscillator was used in other publications
for illustrating the time-dependent generalized polynomial chaos
\cite{Gerritsma2010}, the intrusive time-transform approach
\cite{LeMaitre2009} and the flow map composition PCEs
\cite{Luchtenburg2014}.
\begin{table}[!ht]
{\mathcal A}ption{Duffing oscillator -- Probabilistic model of the uncertain parameters}
\centering
\begin{tabular}{|c|c|c|c|c|}
\hline
Parameters & Distribution & Mean & Standard deviation & Coefficient of variation \\
\hline
$\zeta$ & Uniform & $0.03$ & $0.015/\sqrt{3}$ & $0.2887$ \\\hline
$\omega$ & Uniform & $ 2\, \pi$ & $\pi/\sqrt{3}$ & $0.2887$ \\\hline
$\epsilon$ & Uniform & $-0.5$ & $0.25/\sqrt{3}$ & $0.2887$\\
\hline
\end{tabular}
\label{tab:2}
\end{table}
Hereafter, we aim at building PCEs of the displacement $y(t)$ as a function of the random variables $(\zeta, \, \omega, \, \epsilon)$.
First, we use $200$ trajectories of $y(t)$ as experimental design to compute time-frozen sparse PCEs of adaptive degree up to 20.
Next, we use the time-warping approach, which requires only 50 trajectories $y(t)$ as experimental design.
The $50$ trajectories in the original time scale are plotted in \figref{fig:dufyt}. The same trajectories after time-warping are plotted in \figref{fig:dufyvstau}.
A linear time-warping with two parameters, {i.e. } $\tau = k \, t + \phi$, is used for each trajectory.
Using sparse PCEs of degree up to 20, the metamodels of $k$ and $\phi$ are obtained with relative LOO errors $1.87\times 10^{-5}$ and $2.08 \times 10^{-4}$ respectively, which indicates a high level of accuracy.
PCA is then applied to retrieve eight principal components that results in the PCA truncation error smaller than $1 \times 10^{-3}$.
The relative LOO errors of PCE models for the first two components are $8 \times 10^{-4}$ and $4 \times 10^{-3}$, respectively.
\begin{figure}\label{fig:dufyt}
\label{fig:dufyvstau}
\label{fig:dufytau}
\end{figure}
An independent validation set of $10,000$ runs is used to judge the accuracy of the PCE models. \figref{fig4.2.3} presents two specific realizations of the displacement $y(t)$ obtained with two distinct sets of parameters $(\zeta,\omega,\epsilon)$.
Without time-warping, PCEs are capable of predicting the response at the early time instants ($t<3~s$), then their accuracies degenerate with time, resulting in incorrect predictions. By introducing the time-warping of the trajectories, PCEs can faithfully capture the damped oscillatory behaviour.
Only $0.18\%$ of $10,000$ predictions exhibits a relative error exceeding $0.1$.
Note that an experimental design of size 200 is used for time-frozen PCEs, whereas only 50 trajectories are used for computing time-warping PCEs.
This emphasizes the fact that the time-warping pre-processing of the response allows one to build accurate PCEs at an extremely small computational cost.
\begin{figure}\label{fig4.2.3}
\end{figure}
In terms of time-dependent statistics (\figref{fig4.2.4}), time-frozen PCEs can predict rather well the mean trajectory, however fail to represent the standard deviation after early instants ($t>3~s)$. In contrast, the time-warping approach provides excellent accuracy on the mean and standard deviation time histories. The relative discrepancies between mean and standard deviation time histories predicted by time-warping PCEs with the reference trajectories are $3.27 \times 10^{-5}$ and $3.47 \times 10^{-4}$, respectively.
\begin{figure}\label{fig4.2.4}
\end{figure}
\end{document}
|
\begin{document}
\large
\renewcommand{1.2}{1.2}
\normalsize
\begin{abstract}We prove that Yang-Mills connections on a surface are characterized as those with the property that the holonomy around homotopic closed paths only depends on the oriented area between the paths. Using this we have an alternative proof for a theorem of Atiyah and Bott that the Yang-Mills connections on a compact orientable surface can be characterized by homomorphisms to the structure group from an
extension of the fundamental group of the surface. In addition for $M = S^2$ we obtain the results that the Yang-Mills connections on $S^2$ are isolated and correspond with the
conjugacy classes of closed geodesics through the identity in the structure group.
\end{abstract}
\maketitle
In 1954 Kobayashi [K] showed that connections on principal $G$-bundles over a manifold $M$ can be defined in terms of their parallel transport as homomorphisms from the group of closed paths of $M$ to the structure group $G$. More recently Atiyah and Bott [AB] showed that the Yang-Mills connections on a compact orientable surface can be characterized by homomorphisms to $G$ from an extension of the fundamental group of $M$. The purpose of this paper is to present a new proof of the result of Atiyah and Bott, using the path group formulation for connections. We show that Yang-Mills connections are characterized as those with the property that the holonomy around homotopic closed paths only depends on the oriented area between the paths. In addition for $M = S^2$ we easily obtain the result that the Yang-Mills connections on $S^2$ are isolated, a result obtained in [FH] by other means. We can see, too, that the equivalence classes of Yang-Mills connections on $S^2$ are in one- to-one correspondence with the conjugacy classes of closed geodesics of $G$ through the identity. This was described in the introduction of [AB] and worked out in detail in [FH].
\pagebreak
\section{The path group description of connections}
Fix a base point $x_0$ in $M$. The path group is defined with reference to the base point but up to isomorphism it is independent of the base point. The path group consists of equivalence classes of closed, piecewise smooth paths of $M$ where a closed path is the oriented image of a piecewise smooth, base point preserving map from $S^1$ to $M$. It may be thought of as the loop space with the operations of concatenation and reversal to make it into a group. A path may be reparameterized by choosing a different map with the same image as long as the orientation does not change. The group operation is concatenation so that $\lambda_1\lambda_2$ means ``traverse $\lambda_2$ and then traverse $\lambda_1$.'' The inverse of a path $\lambda$ is got by reversing the orientation.
This means that $\lambda\lambda^{-1}$ must be identified with the constant path at
$x_0$ . It also follows that if $\lambda^*:[0,1] \rightarrow M$ is a parameterization of $\lambda$ with the property that there exist $0 \le t_1 \le t_2 \le t_3 \le 1$ such that $\lambda^*$ restricted to $[t_1,t_2]$ has exactly the same image as $\lambda^*$ restricted to $[t_2,t_3]$ but with opposite orientations, then $
\lambda$ is equal in the path group to the path parameterized by
\[
[0,1] \rightarrow M:t \mapsto
\begin{cases}\lambda^*(t),\quad &0 \le t \le t_1, \\
\lambda^*(t_1), \quad &t_1 \le t \le t_3, \\
\lambda^*(t), \quad & t_3 \le t \le 1.
\end{cases}
\]
The retraced piece of the path has been clipped.
Let $\Phi(M)$ denote the path group. It is a topological group with the compact-open topology. The fundamental group $\pi_1(M)$ is the group of path components of $\Phi(M)$. The main result of [K] is the following.
\begin{theorem} The equivalence classes of connections on principal $G$-bundles over $M$ are in one-to-one correspondence with the conjugacy classes of continuous homomorphisms from $\Phi(M)$ to $G$.
\end{theorem}
\begin{proof} This is an outline of the main steps. From a principal $G$-bundle with a connection the parallel transport around closed paths based at $x_0$ defines a homomorphism
$\rho:\Phi(M) \rightarrow G$ called the holonomy. However, equivalent connections define holonomy maps which are conjugate ($\rho_1$ and $\rho_2$ are conjugate if there exists $g \in G$ so that
$\rho_1(\lambda) = g\rho_2(\lambda)g^{-1}$ for all $\lambda \in \Phi(M)$). In the other direction, given $\rho:\Phi(M) \rightarrow G$ we construct a principal $G$-bundle $P$ and a connection as follows: let $S(M)$ be the space of open paths based at $x_0$, i.e., oriented images of piecewise smooth maps $\sigma :[0,1] \rightarrow M$ with
$\sigma(0) = x_0$. Now $\Phi(M)$ acts on the right on $S(M)$ since $\sigma\lambda$ is an open path based at $x_0$. (First traverse the closed path $\lambda$ and then $\sigma$.) Define an action of $\Phi(M)$ on $S(M) \times G$ by $(\sigma,g)\cdot \lambda = (\sigma\lambda,\rho(\lambda)^{-1}g)$. Define $P := (S(M) \times G) / \Phi(M)$ to be the orbit space of this action. In fact, $S(M)$ is a principal $\Phi(M)$-bundle over $M$, where the projection
$S(M) \rightarrow M$ maps a path to its endpoint, and $P$ is just the principal $G$-bundle arising from the homomorphism of structure groups $\rho:\Phi(M) \rightarrow G$. To describe the connection on $P$
we describe the horizontal lift of any curve $c:[0,1]\rightarrow M$ with an initial point $p_0 \in P_{c(0)}$.
Let $c(0) = y_0$ , $c(1) = y_1$, and suppose $p_0$ is the equivalence class $[(\sigma_0,g)]$ where $\sigma_0$ is a path from $x_0$ to $y_0$ and $g \in G$. For each $t \in [0,1]$ define the path $\sigma_t$ to be $\sigma_0$ followed by the segment of $c$ between $c(0)$ and $c(t)$. Then the horizontal lift of the curve $c$ is $\tilde{c} : [0,1] \rightarrow P : t \mapsto [(\sigma_t , g)]$. It is straightforward to check that $\tilde{c}$ is well-defined and that the holonomy around a closed path $\lambda$ is given by $\rho(\lambda)$.
\end{proof}
\section{Yang-Mills connections}
Put a Riemannian metric on $M$. Let $P \rightarrow M$ be a principal $U(n)$-bundle. (More generally we could consider $G$-bundles for any compact Lie group $G$; the proofs that follow will work in that generality, but we will explicitly use $G = U(n)$ in this paper.) Let
$\langle X,Y \rightarrowngle:= \mathrm{tr\,} XY^*$ be the invariant inner product on the Lie algebra $\mathfrak{u}(n)$ of skew-Hermitian matrices.
These are the ingredients necessary to define the Yang-Mills functional on the space of unitary connections of $P \rightarrow M$, namely the integral over $M$ of the norm squared of the curvature. The Yang-Mills connections are, by definition, the critical points of the Yang- Mills functional. Flat connections are zeros of the functional and are obviously the absolute minima on a bundle admitting flat connections.
The Euler-Lagrange equations asserting that the derivative of the Yang-Mills functional vanishes at a critical connection is the first of the Yang-Mills equations:
\[ \begin{cases} d_A \,*\!F ( A ) = 0 \\ d_A \; F(A)=0 \end{cases} \]
The second is the Bianchi identity that holds for all connections. The notation here is that of [AB]: $A$ is a connection, $d_A$ its covariant derivative, $F(A)$ the curvature of $A$, which is a section in $\Omega^2(M,\mathrm{ad} P)$, and $*$ is the Hodge star operator
\[ *:\Omega^k(M,\mathrm{ad\,} P) \rightarrow \Omega^{n-k}(M,\mathrm{ad\,} P) \]
extended from scalar forms to $\mathrm{ad\,} P$-valued forms using the invariant inner product of $\mathfrak{u}(n)$.
Define the subgroup $\Phi_\omega(M) \subset \Phi(M)$ to consist of the contractible paths enclosing area zero. More precisely, $\sigma$ is in $\Phi_\omega(M)$ when $\int_S \omega = 0$ where $S$ is the interior of $\sigma$. The quotient group $\Phi(M) / \Phi_\omega(M)$ is the group of equivalence classes of closed paths; two
paths are equivalent if they are homotopic and the area between them is zero. The main result of this paper is the following characterization of Yang-Mills connections by their holonomy.
\begin{theorem}The gauge equivalence classes of Yang-Mills connections on all principal $U(n)$-bundles over $M$ are in one-to-one correspondence with the conjugacy classes of
homomorphisms from $\Phi(M) / \Phi_\omega(M)$ to $U(n)$.
\end{theorem}
\begin{proof} First we prove that for an irreducible connection the following two statements are equivalent:
\begin{enumerate}
\item$A$ is a Yang-Mills connection on a $U(n)$-bundle.
\item $F(A) = i\lambda I_n \otimes \omega$ for some $\lambda \in \mathbf{R}$.
\end{enumerate}
(1) $\mathbf{R}ightarrow$ (2)\; Consider an irreducible connection $A$ satisfying $d_A\, *F(A) = 0$, with holonomy representation $\rho: \Phi(M) \rightarrow U(n)$, also irreducible. Then $*F(A) \in \Omega^0(M, \mathrm{ad\,} P)$ is an infinitesimal automorphism of the connection $A$; that means it is in the Lie algebra of the automorphism group of $A$, but that is just the subgroup of $U(n)$ that stabilizes the representation $\rho$ under conjugation. (More precisely the two groups are identified because a covariant constant section is determined by its value at a single point of $M$.) Since $\rho$ is irreducible its stabilizer is the subgroup isomorphic to $U(1)$ consisting of the scalar multiples of the identity, the center of $U(n)$. This means $*F(A)$ has the form $i\lambda I_n$ for some $\lambda \in \mathbf{R}$. Assume that the area element $\omega$ has been normalized to have total area 1. Then $F(A) = i\lambda I_n \otimes \omega \in \Omega^2(M,\mathrm{ad\,}P)$.
(2) $\mathbf{R}ightarrow$ (1)\; $d_A\,*\!F(A)=d_A*\!(i\lambda I_n \otimes \omega) = d_A(i\lambda I_n)=0$.
A connection on a $U(n)$-bundle can be split into a direct sum of irreducible connections. The holonomy representation, the curvature and the Hodge star operator also split in the same way, so that a connection is Yang-Mills if and only if each of its irreducible components is Yang-Mills. Next we show the equivalence of the following three statements for a connection $A$ on a $U(n)$-bundle.
\begin{enumerate}
\setcounter{enumi}{2}
\item The holonomy of $A$ factors through $\Phi(M) / \Phi_\omega(M)$.
\item $F(A) = \Lambda \otimes \omega$ for some $\Lambda \in \mathfrak{u}(n)$.
\item $A$ is a Yang-Mills connection.
\end{enumerate}
(3) $\mathbf{R}ightarrow$ (4)\; Let $\Phi_0(M) \subset \Phi(M)$ be the subgroup of contractible paths. This subgroup is the connected component of the identity and contains the subgroup $\Phi_\omega(M)$. When the genus of $M$ is positive, the quotient group $\Phi_0(M) /\Phi_\omega(M)$ is isomorphic to $\mathbf{R}$, since the enclosed area characterizes each coset. When $M=S^2$ the quotient is isomorphic to $U(1)$. (See Theorem 2.3.) In either case the restricted holonomy is a homomorphism
$\overline{\rho}:\Phi_0(M) /\Phi_\omega(M) \rightarrow U(n)$ and hence describes a one-parameter subgroup of $U(n)$. Let
$\Lambda \in \mathfrak{u}(n)$ be the infinitesimal generator of the one-parameter subgroup $\overline{\rho}$, so that
$\overline{\rho}(t) = \exp(t\Lambda)$. For a matrix group $\Lambda = \lim_{t \rightarrow 0}(\rho(t)-I)/t$. We will show that $F(A) = \Lambda \otimes \omega$.
At $x_0 \in M$ let $u, v$ be tangent vectors. Recall that $F(A)(u,v)$ is given by infinitesimal parallel translation around the rectangle spanned by $u$ and $v$. Choose coordinates $(x_1, x_2 )$ so that the area form $\omega = dx_1 \wedge dx_2$ in a neighborhood of $x_0$ (Darboux's Theorem). For each $t$ in some interval containing $0$, define $\sigma_t$ to be the closed parallelogram spanned by $tu$ and $tv$. Then $F(A)(u,v) = \lim_{t \rightarrow 0} (\rho(\sigma_t ) - I)/t^2$ . Now $\rho(\sigma_t ) = \overline{\rho}(\omega(tu,tv)) = \overline{\rho}(t^2 \omega(u,v))$, since the area of the parallelogram spanned by $u$ and $v$ is $\omega(u,v)$. Making the substitution $s = t^2 \omega(u,v)$, the limit becomes
\[ \lim_{s \rightarrow 0} \frac{\overline{\rho}(s) - I}{s}\omega(u,v) = \Lambda \omega(u,v).\] Therefore $F(A) = \Lambda \otimes \omega$.
(4) $\mathbf{R}ightarrow$ (5) \; $d_A\,*\!F(A)=d_A\,*\!(\Lambda \otimes \omega) =d_A(\Lambda)=0$.
(5) $\mathbf{R}ightarrow$ (3)\; By splitting into irreducible components we may assume that $A$ is an irreducible Yang-Mills connection and hence that $F(A) = i\lambda I_n \otimes \omega$ for some $\lambda \in \mathbf{R}$ by the
equivalence of (1) and (2) above. The formula shows that the span of the curvature $\{F(A)(u,v) | u,v \in T_{x_{\rightarrowisebox{-2pt}{\tiny 0}}} M\}$ is the multiples of the identity, and hence by the Holonomy
Theorem [KN], the Lie algebra of the restricted holonomy group is isomorphic to $\mathbf{R}$.
The holonomy around a contractible path $\sigma : [0,1] \rightarrow M$ is $\exp( \int_0^1 \sigma^* A) = \exp(\int_\sigma A) = \exp(\int_S dA)$ by Stokes’ Theorem, where $\sigma$ is the boundary of $S$. But $dA$ is the curvature of $A$ since the holonomy around $\sigma$ is $\exp(\int_S F(A)) = \exp(i \lambda \,\mathrm{area}(S))\cdot I_n$ . If $\sigma \in \Phi_\omega(M)$, then $\mathrm{area}(S)=0$ and so $\sigma \in \mathrm{Ker}\, \rho$.
\end{proof}
Atiyah and Bott have shown that the Yang-Mills connections on a Riemann surface $M$ can be characterized as coming from representations of a central extension of $\pi_1(M)$. A brief summary of what is contained in \S 6 of [AB] is the following.
Let $M$ be a compact orientable surface of genus $g \ge 1$ endowed with a Riemannian metric (the complex structure of the Riemann surface is not used). The fundamental group
$\pi_1(M)$ has the standard presentation using $2g$ generators $\alpha_1,\dots,\alpha_g,\beta_1,\dots,\beta_g$ and one relation $\mathrm{P}od_{i=1}^g[\alpha_i,\beta_i]=1$. Define the central extension of $\pi_1(M)$ by $\mathbf{Z}$ using an additional generator $J$ that commutes with the generators $\alpha_i$, $\beta_i$ and satisfies the relation
$\mathrm{P}od_{i=1}^g[\alpha_i,\beta_i]=J$.
The subgroup generated by $J$ is isomorphic to $\mathbf{Z}$ and is the normal
subgroup of this extension, which is denoted by $\Gamma$. Thus we have the exact sequence
\[ 0 \rightarrow Z \rightarrow \Gamma \rightarrow \pi_1(M) \rightarrow 0. \]
Now extend the center of $\Gamma$ from $\mathbf{Z}$ to $\mathbf{R}$ to obtain the group $\Gamma_{\mathbf{R}}$ that fits into the exact sequence
\[ 0\rightarrow \mathbf{R}\rightarrow \Gamma_{\mathbf{R}} \rightarrow \pi_1(M) \rightarrow 0.\]
\begin{theorem}$\,$ If $M$ is a surface of genus $g \ge 1$, then the groups $\;\Gamma_{\mathbf{R}}\,$ and $\;\Phi(M)/\Phi_\omega(M)\;$ are isomorphic.
\end{theorem}
\begin{proof} The natural projection $\Phi(M)/\Phi_\omega(M) \rightarrow \pi_1(M)$ has kernel $\Phi_0(M)/\Phi_\omega(M)$ isomorphic to $\mathbf{R}$. Viewing $M$ as being constructed from the identification of the edges of a $4g$-sided polygon, with the edges labeled by the generators according to the relation, we see that the contractible path $\mathrm{P}od_{i=1}^g[\alpha_i,\beta_i]$ encloses the total area of $M$, normalized to be 1, and
so its class in $\Phi_0(M)/\Phi_\omega(M)$ is identified with $1 \in \mathbf{R}$. Thus $\Phi(M)/\Phi_\omega(M)$ is an
extension of $\pi_1(M)$ by $\mathbf{R}$ in exactly the same way that $\Gamma_{\mathbf{R}}$ is constructed by Atiyah and Bott.
\end{proof}
\begin{theorem} (Genus zero) The group $\Phi(S^2 )/\Phi_\omega(S^2)$ is isomorphic to $U(1)$.
\end{theorem}
\begin{proof} On the sphere all paths are contractible so that $\Phi_0(S^2 ) = \Phi(S^2)$. The interior of the path $\sigma$ is the exterior of $\sigma ^{-1}$ and so the area enclosed by $\sigma\sigma ^{-1}$, which is the total area of 1, must also be considered as area 0. Therefore the quotient group $\Phi(S^2)/\Phi_\omega(S^2)$ is not isomorphic to $\mathbf{R}$ as it is for surfaces of positive genus, but instead is isomorphic to
$\mathbf{R}/\mathbf{Z}$ or $U(1)$.
\end{proof}
From this we recover the results of [FH] that the gauge equivalence classes Yang-Mills connections on $S^2$ are in one-to-one correspondence with the conjugacy classes of
homomorphisms from $U(1)$ to $U(n)$, which are the closed geodesics through the identity of $U(n)$, and that they are isolated. These results also hold for any compact Lie group $G$ in
place of $U(n)$.
\begin{proposition} Yang-Mills connections on $S^2$ are isolated. More precisely, the space $\mathrm{Hom}(U(1),G) / G $ of equivalence classes of Yang-Mills connections is discrete.
\end{proposition}
\begin{proof} If $H$ and $G$ are Lie groups, then the Zariski tangent space at the equivalence class
$[\rho]$ $\in$ $\mathrm{Hom}(H,G) / G$ can be identified with $H^1(H,\mathfrak{g})$ where $\mathfrak{g}$ is an $H$-module via $\mathrm{Ad} \circ \rho$ and $\mathrm{Ad}$ is the adjoint action of $G$ on $\mathfrak{g}$. Now when $H$ is compact its cohomology groups vanish, and $\mathrm{Hom}(H,G) / G$ has zero-dimensional tangent spaces. Notice that it is the compactness of $U(1)$ that matters and not the compactness of $G$.
\end{proof}
\end{document}
|
\muathbb Egin{document}
\nuewcommand{2005.14225}{2005.14225}
\rhoenewcommand{\arabic{footnote}}{}
\rhoenewcommand{020}{020}
\FirstPageHeading
\SigmahortArticleName{A Spectral Triple for a Solenoid Based on the Sierpinski Gasket}
{\mathcal A}rticleName{A Spectral Triple for a Solenoid Based\\ on the Sierpinski Gasket\varphiootnote{This paper is a~contribution to the Special Issue on Noncommutative Manifolds and their Symmetries in honour of~Giovanni Landi. The full collection is available at \href{https://www.emis.de/journals/SIGMA/Landi.html}{https://www.emis.de/journals/SIGMA/Landi.html}}}
{\mathcal A}uthor{Valeriano AIELLO~$^{\rhom a}$, Daniele GUIDO~$^{\rhom b}$ and Tommaso ISOLA~$^{\rhom b}$}
{\mathcal A}uthorNameForHeading{V.~Aiello, D.~Guido and T.~Isola}
{\mathcal A}ddress{$^{\rhom a)}$~Mathematisches Institut, Universit\"at Bern, Alpeneggstrasse 22, 3012 Bern, Switzerland}
\EmailD{\href{mailto:[email protected]}{[email protected]}}
{\mathcal A}ddress{$^{\rhom b)}$~Dipartimento di Matematica, Universit\`a di Roma ``Tor Vergata'', I--00133 Roma, Italy}
\EmailD{\href{mailto:[email protected]}{[email protected]}, \href{mailto:[email protected]}{[email protected]}}
{\mathcal A}rticleDates{Received June 23, 2020, in final form February 10, 2021; Published online March 02, 2021}
{\mathcal A}bstract{The Sierpinski gasket admits a locally isometric ramified self-covering. A semifinite spectral triple is constructed on the resulting solenoidal space, and its main geometrical features are discussed.}
\Keywords{self-similar fractals; noncommutative geometry; ramified coverings}
\Classification{58B34; 28A80; 47D07; 46L05}
\rhoenewcommand{\arabic{footnote}}{\alpharabic{footnote}}
\varsigmagma} \renewcommand{\S}{\Sigmaetcounter{footnote}{0}
\varsigmagma} \renewcommand{\S}{\Sigmaection{Introduction}
In this note, we introduce a semifinite spectral triple on the $C^*$-algebra of continuous functions on the solenoid associated with a self-covering of the Sierpinski gasket. Such triple is finitely summable, its metric dimension coincides with the Hausdorff dimension of the gasket, and the associated non-commutative integral coincides up to a constant with a Bohr--F{\o}lner mean on the solenoid, hence reproduces the suitably normalized Hausdorff measure on periodic functions. The open infinite Sierpinski fractafold with a unique boundary point considered by Teplyaev~\chiite{Tep} embeds continuously as a dense subspace of the solenoid, and the Connes distance restricted to such subspace reproduces the geodesic distance on such fractafold.
On the one hand, this shows that our spectral triple describes aspects of both local and coarse geometry~\chiite{RoeLN}.
On the other hand, this implies that the topology induced by the Connes distance, being non compact, does not coincide with the weak$^*$-topology on the states of the solenoid algebra, as we call the $C^*$-algebra of continuous functions on the solenoid. This means that the solenoid, endowed with our spectral triple, is not a quantum metric space in the sense of Rieffel~\chiite{Rieffel}.
Related research concerning projective limits of (possibly quantum) spaces and the associated solenoids appeared recently in the literature. In the framework of noncommutative geometry, we~mention:
\chiite{LaPa}, where projective families of compact quantum spaces have been studied, showing their convergence to the solenoid w.r.t.\ the Gromov--Hausdorff propinquity distance;
\chiite{AGI01}, where, in the same spirit as in this note, a semifinite spectral triple has been associated with the projective limit generated by endomorphisms of $C^*$-algebras associated with commutative and noncommutative spaces;
\chiite{DGMW}, where a spectral triple on the stable Ruelle algebra for Wieler solenoids has been considered and its unboundedd KK-theory has been studied, based on the Morita equivalence between the stable Ruelle algebra and a Cuntz--Pimsner algebra. In the same paper these techniques are used for the study of limit sets of regular self-similar groups (cf.~\chiite{Nekra}).
When fractals are concerned, we mention the projective family of finite coverings of the octahedron gasket considered in~\chiite{Stri2009}, where, as in our present situation, an intermediate infinite fractafold between the tower of coverings and the projective limit is considered. Periodic and almost periodic functions on the infinite fractafold are considered, and a Fourier series description for the periodic functions is given, based on periodic eigenfunctions of the Laplacian (cf.\ also~\chiite{RuStri} for higher-dimensional examples). Let us remark that such coverings, as the ones considered in~this paper, are not associated with groups of deck transformations.
The starting point for the construction of this paper is the existence of a locally isometric ramified three-fold self-covering of the Sierpinski gasket with trivial group of deck transformations. Such self-covering gives rise to a projective family of coverings, whose projective limit is by definition a solenoid. Dually, the algebras of continuous functions on the coverings form an~injective family, whose direct limit (in the category of $C^*$-algebras) is the solenoid algebra.
In~\chiite{AGI01} we already considered various examples of self-coverings or, dually, of endomorphisms of some $C^*$-algebras, most of which were regular finite self-coverings. There we constructed a~spectral triple on the solenoid algebra as a suitable limit of~spectral triples on the algebras of continuous functions on the coverings. Given a spectral triple on the base space, attaching a~spectral triple to a finite covering is not a difficult task, and in our present case consists simply in ``dilating'' the triple on the base gasket so that the projections are locally isometric. However, there is no commonly accepted procedure to define a limit of spectral triples. Since the method used in~\chiite{AGI01} cannot be used here (see below), we follow another route, in a sense spatializing the construction, namely showing that there exists an open fractafold which is intermediate between the projective family of coverings and the solenoid.
More precisely, such fractafold space turns out to be an~infinite covering of each of the finite coverings of the family, and embeds in~a~continuous way in the solenoid. In this way all the algebras (and their direct limit) will act on a suitable $L^2$-space of the open fractafold, as do the Dirac operators of the associated spectral triples. In~this way the limiting Dirac operator is well defined, but the compact resolvent property will be~lost.
Let us notice here that we are not constructing a spectral triple on the open fractafold, where a weaker compact resolvent property (cf.~\chiite[Chapter~IV, Remark~12]{ConnesBook}) is retained, namely $f(D^2+I)^{-1/2}$ is a compact operator, where $D$ is the Dirac operator and $f$ is any function with compact support on the fractafold.
Since we are constructing a spectral triple on the solenoid, which is a compact space, the weaker form does not help.
In order to recover the needed compactness of the resolvent, we use a procedure first proposed by J. Roe for open manifolds with an amenable exhaustion in~\chiite{Roe,Roe-2}, where, based on the observation that the von~Neumann trace used by Atiyah~\chiite{Atiyah} for his index theorem for covering manifolds can be reformulated in the case of amenable groups via the F{\o}lner condition, he considered amenable exhaustions on open manifolds and constructed a trace for finite-propagation operators acting on sections of a fiber bundle on the manifold via a renormalization procedure.
Unfortunately such trace is not canonical, since it depends on a generalized limit procedure. However, in the case of infinite self-similar CW-complexes, it was observed in~\chiite{CGIs01} that such trace becomes canonical when restricted to the $C^*$-algebra of geometric operators.
We adapt these results to our present context, namely we replace the usual trace with a~renor\-ma\-li\-zed trace associated with an exhaustion of the infinite fractafold. Such trace comes together with a noncommutative $C^*$-algebra, the algebra of geometric operators, which is similar in spirit to the Roe $C^*$-algebras of coarse geometry~\chiite{HiRo,Roe,Roe-2,Roe96,WiYu}. This algebra contains the solenoid algebra, and the limiting Dirac operator is affiliated to it in a suitable sense. Such Dirac operator turns out to be $\tauau$-compact w.r.t.~the renormalized trace. We refer to~\chiite{CGIs01, GuIs7} for an analogous construction of the $C^*$-algebra and of a canonical trace based on the self-similarity structure.
As discussed above, the starting point for the construction of a spectral triple on the solenoid algebra is the association of a spectral triple to the fractal known as the Sierpinski gasket~\chiite{Sierpinski}.
The study of fractal spaces from a spectral, or noncommutative, point of view has now a~long history, starting from the early papers of Kigami and Lapidus~\chiite{KiLa, La94,La97}.
As for the spectral triples, various constructions have been considered in the literature, mainly based on~``small'' triples attached to specific subsets of the fractal, following a general procedure first introduced by Connes, then considered in~\chiite{GuIs9,GuIs10}, and subsequently abstracted in~\chiite{ChIv07}.
More precisely, the spectral triple on the Cantor set described by Connes~\chiite{ConnesBook} inspired two kinds of~spectral triples for various families of fractals in~\chiite{GuIs9,GuIs10}. These triples were further analysed in~\chiite{GuIs16} for the class of nested fractals. Such specral triples are obtained as direct sums of triples on two points (boundary points of an edge in some cases), and we call them discrete spectral triples. We~then mention some spectral triples obtained as direct sums of spectral triples on 1-dimensional subsets, such as those considered in~\chiite{Arau,CIL,CIS,CGIS02,LaSa}, where the 1-dimensional subsets are segments, circles or quasi-circles. Discrete spectral triples give a good description of metric aspects of the fractal, such as Hausdorff dimension and measure and geodesic distance, and, as shown in~\chiite{GuIs1}, may also reconstruct the energy functional (Dirichlet form) on the fractal, but are not suited for the study of K-theoretical properties since the pairing with K-theory is trivial. Conversely, spectral triples based on segments or circles describe both metric and K-theoretic properties of~the fractal but can't be used for describing the Dirichlet form. Finally, the spectral triple based on quasi-circles considered in~\chiite{CGIS02} describes metric and K-theoretic aspects together with the energy form, but requires a rather technical approach.
In the present paper, we make use of the simple discrete spectral triple on the gasket as described in~\chiite{GuIs16}, thus obtaining a semifinite spectral triple on the solenoid algebra which recovers the metric dimension and the Bohr--F{\o}lner mean of the solenoid, and the geodesic distance on the infinite fractafold.
Further analysis on the solenoid is possible, e.g., the construction of a Dirichlet form via noncommutative geometry or the study of K-theoretic properties. As explained above, the latter step will require a different choice of the spectral triple on the base gasket, such as the triples considered in~\chiite{CIL,CIS,CGIS02}, which admit a non-trivial pairing with the K-theory of the gasket.
As already mentioned, our aim here is to show that the family of spectral triples on the finite coverings produces a spectral triple on the solenoidal space. In the examples considered in~\chiite{AGI01}, the family of spectral triples had a simple tensor product structure, namely the Hilbert spaces were a tensor product of the Hilbert space ${\muathcal H}$ for the base space and a finite dimensional Hilbert space, and the Dirac operators could be described as (a finite sum of) tensor product operators. Then the ambient $C^*$-algebra turned out to be a product of ${\muathcal B}({\muathcal H})$ and a UHF algebra, allowing a GNS representation w.r.t.~a~semifinite trace.
In the example treated here we choose a different approach since two problems forbid such simple description. The first is a local problem, due to the ramification points. This implies that the algebra of a covering is not a free module on the algebra of the base space; in particular,
functions on a covering space form a proper sub-algebra of the direct sum of finitely many copies of the algebra for the base space.
The second is a non-local problem which concerns the Hilbert spaces, which are $\epsilonll^2$-spaces on edges, and the associated operator algebras. Indeed, the Hilbert spaces of the coverings cannot be described as finite sums of copies of the Hilbert space on the base space due to the appearance of longer and longer edges on larger and larger coverings.
We conclude this introduction by mentioning two further developments of the present analysis.
First, the construction of the spectral triple on the solenoid algebra allows the possibility of~lifting a spectral triple from a $C^*$-algebra to the crossed product of the C*-algebra with a~single endomorphism~\chiite{AGI02}, thus generalising the results on crossed products
with an automorphism group considered in~\chiite{BMR,Skalski,Paterson}.
Second, we observe that the construction given in the present paper goes in the direction of~possibly defining a $C^*$-spectral triple, in which the semifinite von Neumann algebra is replaced by a $C^*$-algebra with a trace to which both the Dirac operator and the ``functions'' on the non-commutative space are affiliated, where the compactness of the resolvent of the Dirac operator is measured by the trace on the $C^*$-algebra, cf.~also~\chiite{GuIs4}.
This paper is divided in six sections. After this introduction, Section~\rhoef{sec2} contains some pre\-li\-mi\-nary notions on fractals and spectral triples, Section~\rhoef{sec3} describes the geometry of the ramified covering and the corresponding inductive structure, together with its functional counterpart given by a family of compatible spectral triples. Section~\rhoef{sec4} concerns the self-similarity structure of the Sierpinski solenoid, whence the description of the inductive family of $C^*$-algebras as algebras of bounded functions on the fractafold. The Section~\rhoef{sec5} describes the algebra of geometric operators and the construction of a semicontinuous semifinite trace on it. Finally, the semifinite spectral triple together with its main features are contained in Section~\rhoef{sec6}.
\varsigmagma} \renewcommand{\S}{\Sigmaection{Preliminaries}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{sec2}
In this section we shall briefly recall various notions that will be used in the paper.
Though these notions are well known among the experts, our note concerns different themes, namely spectral triples in noncommutative geometry and nested fractals (the Sierpinski gasket in particular), so~that we decided to write this section with the aim of helping readers with different background to follow the various arguments, by collecting here the main notions and results that will be useful in the following.
\varsigmagma} \renewcommand{\S}{\Sigmaubsection{Spectral triples}
The notion of spectral triple plays a key role in Alain Connes'non\-com\-mu\-ta\-ti\-ve geometry~\chiite{ConnesBook,GBVF}. Basically, it consists of a triple $({\muathcal L},{\muathcal H},D)$, where ${\muathcal L}$ is a *-algebra acting faithfully on the Hilbert space ${\muathcal H}$, and $D$ is an unbounded self-adjoint operator on ${\muathcal H}$ satisfying the properties
\muathbb Egin{itemize}\itemsep=0pt
\item[$(1)$] ${\muathbb I}g(1+D^2{\muathbb I}g)^{-1/2}$ is a compact operator,
\item[$(2)$] $\pii(a)\muathcal{D} (D) \varsigmagma} \renewcommand{\S}{\Sigmaubset \muathcal{D} (D)$, and $[D,\pii(a)]$ is bounded for all $a\in {\muathcal L}$.
\epsilonnd{itemize}
We shall also say that $({\muathcal L},{\muathcal H},D)$ is a spectral triple on the $C^*$-algebra ${\muathcal A}$ generated by~${\muathcal L}$.
Such triple is meant as a generalization of a compact smooth manifold, the algebra ${\muathcal L}$ replacing the algebra of smooth functions, the Hilbert space describing a vector bundle (a spin bundle indeed) on which the algebra of functions acts, and the operator~$D$ generalizing the notion of Dirac operator.
Further structure may be added to the properties above, allowing deeper analysis of the geometric features of the noncommutative manifold, but these are not needed in~this paper.
Property $(2)$ above allows the definition of a (possibily infinite) distance (Connes distance) on the state space of the $C^*$-algebra ${\muathcal A}$ generated by ${\muathcal L}$ , defined as
\muathbb Egin{gather*}
d(\varphi,\pisi)=\varsigmagma} \renewcommand{\S}{\Sigmaup\{|\varphi(a)-\pisi(a)|{\muathcal O}lon \|[D,a]\|\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq1,\, a\in {\muathcal L}\}.
\epsilonnd{gather*}
When the Connes distance induces the weak$^*$-topology on the state space, the seminorm $\|[D,a]\|$ on~${\muathcal A}$ is called a Lip-norm (cf.~\chiite{Rieffel}) and the algebra ${\muathcal A}$ endowed with the Connes distance is a~quantum metric space.
A spectral triple is called finitely summable if ${\muathbb I}g(1+D^2{\muathbb I}g)^{-s}$ has finite trace for some $s>0$, in this case the abscissa of convergence~$d$ of the function $\taur{\muathbb I}g(1+D^2{\muathbb I}g)^{-s}$ is called the metric dimension of~the triple. Then the logarithmic singular trace introduced by Dixmier~\chiite{Dix} may be used to define a noncommmutative integral on~${\muathcal A}$.
Let us denote by $\{\mu_n(T)\}$ the sequence (with multiplicity) of~singular values of the compact operator~$T$, arranged in decreasing order.
Then, on the positive compact operators for which the sequence $\varsigmagma} \renewcommand{\S}{\Sigmaum_{k=1}^n\mu_n(T)$, is at most logarithimically divergent, we may consider the positive functional
\muathbb Egin{gather*}
\taur_\omega} \def\O{\Omega(T)={\mathcal L}im_\omega} \def\O{\Omega\varphirac{\varsigmagma} \renewcommand{\S}{\Sigmaum_{k=1}^n\mu_n(T)}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog n},
\epsilonnd{gather*}
where ${\mathcal L}im_\omega} \def\O{\Omega$ is a suitable generalized limit. Such functional extends to a positive trace on ${\muathcal B}({\muathcal H})$ which vanishes on trace class operators, and is called Dixmier (logarithmic) trace.
If ${\muathbb I}g(1+D^2{\muathbb I}g)^{-d}$ is in the domain of the Dixmier trace, one defines the following noncommutative integral:
\muathbb Egin{gather*}
\oint a=\taur_\omega} \def\O{\Omega{\muathbb I}g (a{\muathbb I}g(I+D^2{\muathbb I}g)^{-d/2}{\muathbb I}g),\qquad a\in{\muathcal A}.
\epsilonnd{gather*}
When the function ${\muathbb I}g(1+D^2{\muathbb I}g)^{-s}$ has a finite residue for $s=d$, such residue turns out to coincide, up to a constant, with the Dixmier trace, which therefore does not depend on the generalized limit procedure (cf.~\chiite{ConnesBook}, and~\chiite[Theorem~3.8]{CPS}):
\muathbb Egin{gather*}
d{\muathcal D}ot\taur_\omega} \def\O{\Omega {\muathbb I}g(a{\muathbb I}g(I+D^2{\muathbb I}g)^{-d/2}{\muathbb I}g)={\Res}_{s=d} \taur(a|D|^{-s}).
\epsilonnd{gather*}
We note in passing that spectral triples may also describe non-compact smooth manifolds, with the algebra~${\muathcal L}$ describing smooth functions with compact support and property~(1) replaced by~$a{\muathbb I}g(1+D^2{\muathbb I}g)^{-1/2}$ is a compact operator for any $a\in{\muathcal L}$.
\varsigmagma} \renewcommand{\S}{\Sigmaubsection{Semifinite spectral triples}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{SemST}
The notion of spectral triple has been generalized to the semifinite case, by replacing the ambient algebra ${\muathcal B}({\muathcal H})$ with a semifinite von~Neumann algebra ${\muathcal A}m$ endowed with a normal semifinite faithful trace~$\tau$.
We recall that an operator $T$ affiliated with $({\muathcal A}m,\tauau)$ is called $\tauau$-compact if its generalized s-number function $\muu_t(T)$ is infinitesimal or, equivalently, if $\tauau(e_{(t,\infty)}(T))<\infty$, for~any $t>0$ (cf.~\chiite[Section~1.8, p.~34]{Fack}, \chiite[Proposition~3.2]{FK}).
\muathbb Egin{dfn} [\chiite{CaPhi1}]\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{def:SFtriple}
An odd semifinite spectral triple $({\muathcal L},{\muathcal A}m,D)$ on a unital C$^*$-algebra ${\muathcal A}$ is given by a unital, norm-dense, $^*$-subalgebra ${\muathcal L}\varsigmagma} \renewcommand{\S}{\Sigmaubset{\muathcal A}$, a semifinite von Neumann algebra $({\muathcal A}m,\tauau)$, acting on a (separable) Hilbert space ${\muathcal H}$, a faithful representation $\pii{\muathcal O}lon{\muathcal A}\tauo{\muathcal B}({\muathcal H})$ such that $\pii({\muathcal A})\varsigmagma} \renewcommand{\S}{\Sigmaubset{\muathcal A}m$, and an unbounded self-adjoint operator $D \widehat{\in} {\muathcal A}m$ such that
\muathbb Egin{itemize}\itemsep=0pt
\item[$(1)$] ${\muathbb I}g(1+D^2{\muathbb I}g)^{-1/2}$ is a $\tauau$-compact operator,
\item[$(2)$] $\pii(a)\muathcal{D} (D) \varsigmagma} \renewcommand{\S}{\Sigmaubset \muathcal{D} (D)$, and $[D,\pii(a)] \in{\muathcal A}m$, for all $a\in{\muathcal L}$.
\epsilonnd{itemize}
\epsilonnd{dfn}
As in the type $I$ case, such triple is called finitely summable if ${\muathbb I}g(1+D^2{\muathbb I}g)^{-s}$ has finite trace for some $s>0$, and $d$ denotes the abscissa of convergence of the function $\tauau{\muathbb I}g(1+D^2{\muathbb I}g)^{-s}$, and is called the metric dimension of the triple.
The logarithmic Dixmier trace associated with the normal trace $\tau$ may be defined in this case too, (cf.~\chiite{CPS, GuIs1}) and, when the function ${\muathbb I}g(1+D^2{\muathbb I}g)^{-s}$ has a~finite residue for $s=d$, the equality $d{\muathcal D}ot\taur_\omega} \def\O{\Omegaega{\muathbb I}g(a|D|^{-d}{\muathbb I}g)={\Res}_{s=d} \taur{\muathbb I}g(a|D|^{-s}{\muathbb I}g)$ still holds \chiite[Theorem~3.8]{CPS}.
\varsigmagma} \renewcommand{\S}{\Sigmaubsection{Self-similar fractals}
Let $\O:= \{w_i {\muathcal O}lon i=1,\lambda} \def{\mathcal L}a{{\mathcal L}ambdadots,k \}$ be a family of contracting similarities of ${\muathbb R}^{N}$, with scaling para\-me\-ters~$\{\lambda} \def{\mathcal L}a{{\mathcal L}ambda_i\}$. The unique non-empty compact subset $K$ of ${\muathbb R}^{N}$ such that $K = {\muathbb I}gcup_{i=1}^{k} w_i(K)$ is called the {\it self-similar fractal} defined by $\{w_i \}_{i=1,\lambda} \def{\mathcal L}a{{\mathcal L}ambdadots,k}$. For any $i\in\{1,\lambda} \def{\mathcal L}a{{\mathcal L}ambdadots,k\}$, let $p_i\in{\muathbb R}^N$ be the unique fixed-point of $w_i$, and say that $p_i$ is an essential fixed-point of $\O$ if there are $i',j,j'\in\{1,\lambda} \def{\mathcal L}a{{\mathcal L}ambdadots,k\}$ such that $i'\nueq i$, and $w_j(p_i)=w_{j'}(p_{i'})$. Denote by $V_0(K)$ the set of essential fixed-points of $\O$, and let $E_0(K):=\{ (p,q){\muathcal O}lon p,q\in V_0,\ p\nueq q\}$. Observe that $(V_0,E_0)$ is a directed finite graph whose edges are in $1:1$ correspondence with ordered pairs of distinct vertices.
\muathbb Egin{dfn}
We call an element of the family $\{w_{i_1}{\muathcal D}ots w_{i_k}(K){\muathcal O}lon k\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0\}$ a {\it cell}, and call its diameter the size of the cell.
We call an element of the family $E(K)=\{w_{i_1}{\muathcal D}ots w_{i_k}(e){\muathcal O}lon k\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$, $e\in E_{0}(K)\}$ an {\it $($oriented$)$ edge} of $K$. We denote by $e^-$ resp.~$e^+$ the source, resp.~the target of~the oriented edge $e$.
\epsilonnd{dfn}
As an example, the Sierpinski gasket is the self-similar fractal determined by 3 similarities with scaling parameter 1/2 centered in the vertices of an equilateral triangle (see Fig.~\rhoef{fig:Gasket}).
\muathbb Egin{figure}[t]{\muathcal E}ntering
\includegraphics[scale=1]{Gasket1a.pdf}
\qquad\qquad
\includegraphics[scale=1]{Gasket1b.pdf}
\\[2ex]
\includegraphics[scale=1]{Gasket2a.pdf}
\qquad\qquad
\includegraphics[scale=1]{Gasket2b.pdf}
{\muathcal A}ption{The first four steps of the construction of the gasket.}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{fig:Gasket}
\epsilonnd{figure}
Under suitable conditions, the Hausdorff dimension $d_H$ of a self-similar fractal coincides with its scaling dimension, namely with the only positive number $d$ such that $\varsigmagma} \renewcommand{\S}{\Sigmaum_{i=1}^k\lambda} \def{\mathcal L}a{{\mathcal L}ambda_i^d=1$, therefore when all scaling parameters coincide with $\lambda} \def{\mathcal L}a{{\mathcal L}ambda$ we have $d_H=\varphirac{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog k}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog(1/\lambda} \def{\mathcal L}a{{\mathcal L}ambda)}$. In particular, the Hausdorff dimension of the Sierpinski gasket is $\varphirac{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog 3}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2}$.
We note in passing that one of the most important aspects of the Sierpinski gasket and of more general classes of fractals is the existence of a self-similar diffusion, associated with a Dirichlet form, see, e.g.,~\chiite{Kiga2}.
Even though Dirichlet forms on~fractals can be recovered in the noncommutative geometry framework~\chiite{GuIs16}, and in particular by~means of the spectral triples which we use in this paper, we do not analyse this aspect in the present note.
In~\chiite{GuIs10} discrete spectral triples have been introduced on some classes of fractals, generalizing an example of Connes in~\chiite[Chapter~4.3, Example~23]{ConnesBook}. Such triples have been further studied in~\chiite{GuIs16} for nested fractals.
On a self-similar fractal $K$, the triple $ ({\muathcal L},{\muathcal H},D)$ on the $C^*$-algebra ${\muathcal A}={\muathcal C}(K)$ is defined as follows:
\muathbb Egin{dfn}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{STnested}\qquad
\muathbb Egin{itemize}\itemsep=0pt
\item[$(a)$] ${\muathcal H}=\epsilonll^2(E(K))$,
\item[$(b)$] ${\muathcal A}$ acts on the Hilbert space as $\rhoho(f)e=f(e^+)e$, $f\in{\muathcal A}_n$, $e\in E_n$,
\item[$(c)$] $F$ is the orientation-reversing map on edges,
\item[$(d)$] $D$ maps an edge $e\in E(K)$ to $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)^{-1}Fe$,
\item[$(e)$] ${\muathcal L}$ is given by the elements $ f\in {\muathcal A}$ such that $\|[D,\rhoho(f)]\|<\infty$.
\epsilonnd{itemize}
\epsilonnd{dfn}
It turns out that ${\muathcal L}$ coincides with the algebra of Lipschitz functions on $K$, hence is dense in~${\muathcal A}$, and the seminorm $L(f):=\|[D,f]\|$ is a Lip-norm. By Theorem 3.3 in~\chiite{GuIs16}, see also Remark~2.11 in~\chiite{GuIs10}, the triple $({\muathcal L},{\muathcal H},D)$ is a finitely summable spectral triple on ${\muathcal A}$, its metric dimension coincides with the Hausdorff dimension, and the noncommutative integral recovers the Hausdorff measure up to a constant:
\muathbb Egin{gather}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{fractalNCint}
\oint f=\taur_\omega} \def\O{\Omegaega{\muathbb I}g(f|D|^{-d}{\muathbb I}g) = \varphirac{1}{ \lambda} \def{\mathcal L}a{{\mathcal L}ambdaog k} \varsigmagma} \renewcommand{\S}{\Sigmaum_{e\in E_0(K)} \epsilonll(e)^d \int_K f\, {\rhom d}H_d, \qquad f\in C(K),
\epsilonnd{gather}
where $H_d$ denotes the normalized Hausdorff measure on the fractal $K$.
Moreover, in some cases, and in particular for the Sierpinski gasket, the Connes distance induced by the Lip-norm $L(f):=\|[D,f]\|$ coincides with the geodesic distance on the points of the gasket $K$, see~\chiite[Corollary~5.14]{GuIs16}.
\varsigmagma} \renewcommand{\S}{\Sigmaubsection{Covering fractafolds and solenoids}
Generally speaking, a solenoid is the inverse limit of a projective family of coverings of a given space~\chiite{McCord}. Dually, the solenoid algebra is the direct limit of the family of algebras of continuous functions on the spaces of the projective family. In this sense the notion of solenoid makes sense for injective families of $C^*$-algebras, cf., e.g.,~\chiite{AGI01} for sequences generated by a single endomorphism and~\chiite{LaPa} for sequences of compact quantum spaces. Other examples of the treatment of solenoids in the recent literature have been mentioned in the introduction.
The notion of fractafold as a connected Hausdorff topological space such that every point has a neighborhood homeomorphic to a neighborhood in a given fractal has been introduced in~\chiite{Stri2003}, even though examples of such notion were already considered before, e.g., in~\chiite{BaPe,Stri1996,Tep}.
In some cases projective families of covering fractafold spaces related to the Sierpinski gasket have been considered.
Since the gasket does not admit a simply connected covering, one may consider coverings where more and more cycles are unfolded, in particular consider the regular infinite abelian covering $S_n$ where all the cycles of size at least $2^{-n}$ are unfolded. Each of those is a closed fractafold (with boundary) and they form a projective family.
The associated solenoid~$S_\infty$, i.e., the projective limit, which turns out to be an abelian counterpart of the Uniform Universal Cover introduced by Berestovskii and Plaut~\chiite{BerPla}, has been considered in~\chiite{CGIS13}, where it is shown that any locally exact 1-form on the gasket possesses a potential on~$S_\infty$.
Another projective family of covering fractafolds has been considered in~\chiite{Stri2009}, each element of~the family being a compact finite covering of the octahedral fractafold modeled on the gasket. Any element of the family is covered by the infinite Sierpinski gasket with a unique boundary point, which we call $K_\infty$ here (see Fig.~\rhoef{fig:infiniteBlowup}), considered in~\chiite[Lemma~5.11]{Tep}. The solenoid associated with the projective family is also mentioned explicitly in~\chiite{Stri2009}, together with the dense embedding of $K_\infty$ in it, and also a Bohr--F{\o}lner mean on the solenoid is considered (p.~1199).
\muathbb Egin{figure}[t]{\muathcal E}ntering
\includegraphics[scale=1.03]{infiniteGasket.pdf}
{\muathcal A}ption{The gasket and its infinite blowup.}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{fig:infiniteBlowup}
\epsilonnd{figure}
In the present paper a self-covering of the gasket gives rise to a projective family of finite ramified coverings, the fractafold $K_\infty$ projects onto each element of the family and embeds densely in the solenoid, and we recover the Bohr--F{\o}lner mean on the solenoid via a noncommutative integral.
\varsigmagma} \renewcommand{\S}{\Sigmaection{A ramified covering of the Sierpinski gasket}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{sec3}
Let us choose an equilateral triangle of side 1 in the Euclidean plane with vertices $v_0$, $v_1$, $v_2$ (numbered in a counterclockwise order) and consider the associated Sierpinski gasket as in the previous section, namely the set $K$ such that
\muathbb Egin{gather*}
K={\muathbb I}gcup_{j=0,1,2}w_j(K),
\epsilonnd{gather*}
where $w_j$ is the dilation around $v_j$ with contraction parameter $1/2$.
Clearly, for the cell $C=w_{i_1}{\muathcal D}ots w_{i_k}(K)$, $\varsigmagma} \renewcommand{\S}{\Sigmaize(C)=2^{-k}$ and, if $e_0\in E_{0}(K)$ and $e=w_{i_1}{\muathcal D}ots w_{i_k}(e_0)$, $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^{-k}$.
\lambda} \def{\mathcal L}a{{\mathcal L}ambdaooseness=1
5In the following we shall set $K_0:=K$, $E_0=E_0(K)$, $K_n=w_0^{-n}K_0$.
Let us now consider the middle point $x_{i,i+1}$ of the segment ${\muathbb I}g(w_0^{-1}v_i,w_0^{-1}v_{i+1}{\muathbb I}g)$, $i=0,1,2$, the map $R_{i+1,i}{\muathcal O}lon w_0^{-1}w_{i}K\tauo w_0^{-1}w_{i+1}K$ consisting of the rotation of $\varphirac43\pii$ around the point $x_{i,i+1}$, $i=0,1,2$, and observe that
\muathbb Egin{gather}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{IdOnCells}
R_{i,i+2}\chiirc R_{i+2,i+1}\chiirc R_{i+1,i} = {\rhom id}_{w_0^{-1}w_i K},\qquad i=0,1,2.
\epsilonnd{gather}
Setting $R_{i,i+1} = R_{i+1,i}^{-1}$, the previous identities may also be written as
\muathbb Egin{gather*}
R_{i+2,i+1}\chiirc R_{i+1,i} = R_{i+2,i},\qquad i=0,1,2.
\epsilonnd{gather*}
We then construct the map $p{\muathcal O}lon K_1\tauo K$ given by
\muathbb Egin{gather*}
p(x)=
\muathbb Egin{cases}
x,&x\in K,
\\
R_{0,1}(x),&x\in w_0^{-1}w_1 K,
\\
R_{0,2}(x),&x\in w_0^{-1}w_2 K,
\epsilonnd{cases}
\epsilonnd{gather*}
and observe that this map, which appears to be doubly defined in the points $x_{i,i+1}$, $i=0,1,2$, is indeed well defined (see Fig.~\rhoef{fig:covering}).
\muathbb Egin{figure}[t]{\muathcal E}ntering
\varsigmagma} \renewcommand{\S}{\Sigmacalebox{3.8}{\piartialf\taurianglewidth{2cm}
\pigfdeclarelindenmayersystem{Sierpinski triangle}{
\varsigmagma} \renewcommand{\S}{\Sigmaymbol{X}{\pigflsystemdrawforward}
\varsigmagma} \renewcommand{\S}{\Sigmaymbol{Y}{\pigflsystemdrawforward}
\rhoule{X -> X-Y+X+Y-X}
\rhoule{Y -> YY}
}
\varphioreach \lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel in {7}{
\tauikzset{
l-system={step=\taurianglewidth/(2^\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel), order=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel, angle=-120}
}
\muathbb Egin{tikzpicture}
\varphiill [black] (0,0) -- ++(0:\taurianglewidth) -- ++(120:\taurianglewidth) -- cycle;
\delta} \def\D{\Deltaraw [draw=none] (0,0) l-system [l-system={Sierpinski triangle, axiom=X},fill=white];
\delta} \def\D{\Deltaraw[line width=0.05mm, <-] (.75,-.1) to[out=-90,in=-90] (1.25,-.1);
\delta} \def\D{\Deltaraw[line width=0.05mm, <-] (0.25,.6) to[out=135,in=120] (.5,1);
\nuode (bbb) at (-.05,-0.075) {$\varsigmagma} \renewcommand{\S}{\Sigmacalebox{.25}{$v_0$}$};
\nuode (bbb) at (.395,.87) {$\varsigmagma} \renewcommand{\S}{\Sigmacalebox{.25}{$x_{2,0}$}$};
\nuode (bbb) at (1.6,.87) {$\varsigmagma} \renewcommand{\S}{\Sigmacalebox{.25}{$x_{1,2}$}$};
\nuode (bbb) at (1,1.8) {$\varsigmagma} \renewcommand{\S}{\Sigmacalebox{.25}{$w_0^{-1}v_2$}$};
\nuode (bbb) at (1,-.075) {$\varsigmagma} \renewcommand{\S}{\Sigmacalebox{.25}{$x_{0,1}=v_1$}$};
\nuode (bbb) at (2.05,-.075) {$\varsigmagma} \renewcommand{\S}{\Sigmacalebox{.25}{$w_0^{-1}v_1$}$};
\nuode (bbb) at (0.1,.87) {$\varsigmagma} \renewcommand{\S}{\Sigmacalebox{.25}{$R_{0,2}$}$};
\nuode (bbb) at (1,-.35) {$\varsigmagma} \renewcommand{\S}{\Sigmacalebox{.25}{$R_{0,1}$}$};
\epsilonnd{tikzpicture}
}
}
{\muathcal A}ption{The covering map $p{\muathcal O}lon K_1\tauo K$.}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{fig:covering}
\epsilonnd{figure}
The following result is easily verified.
\muathbb Egin{prop}
The map $p$ is a well defined continuous map which is a ramified covering, with ramification points given by $\{x_{i,i+1},i=0,1,2\}$. Moreover, the covering map is isometric on suitable neighbourhoods of the non-ramification points.
\epsilonnd{prop}
Since $K_1$ and $K$ are homeomorphic, this map may be seen as a self-covering of the gasket.
The map $p$ gives rise to an embedding $\alpha_{1,0}{\muathcal O}lon {\muathcal C}(K)\tauo{\muathcal C}(K_1)$, hence, following~\chiite{Cuntz}, to an inductive family of C$^*$-algebras ${\muathcal A}_n={\muathcal C}(K_n)$, whose inductive limit ${\muathcal A}_\infty$ consists of continuous function on the solenoidal space based on the gasket.
As in Definition~\rhoef{STnested}, we consider the triple $ ({\muathcal L}_n,{\muathcal H}_n,D_n)$ on the $C^*$-algebra ${\muathcal A}_n$, $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$, where
${\muathcal H}_n=\epsilonll^2(E_n)$, $E_n=\{w_0^{-n}e,\, e\in E_0\}$ (the set of oriented edges in $K_n$).
Let us also note that, since the covering projections are locally isometric and any Lip-norm $L_m(f)=\|[D_m,f]\|$ associated with the triple $({\muathcal A}_m,{\muathcal H}_m,D_m)$ produces the geodesic distance on~$K_m$, we get $L_{m+q}(\alpha_{m+q,m}(f))=L_m(f)$, namely we obtain a seminorm on the algebraic inductive limit of the ${\muathcal A}_n$'s.
\varsigmagma} \renewcommand{\S}{\Sigmaection[A groupoid of local isometries on the infinite Sierpinski fractafold]
{A groupoid of local isometries on the infinite \\Sigmaierpinski fractafold}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{sec4}
Let us consider the infinite fractafold $K_\infty={\muathcal U}p_{n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0}K_n$~\chiite{Tep} endowed with the Hausdorff measure $\vol$ of dimension $d=\varphirac{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2}$ normalized to be 1 on $K=K_0$, with the exhaustion $\{K_n\}_{n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0}$, and with the family of local isometries $R={\muathbb I}g\{R^n_{i+1,i},R^n_{i,i+1}{\muathcal O}lon i=0,1,2, n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0{\muathbb I}g\}$, where $R^n_{i,j} = w_0^{-n}R_{i,j}w_0^{n}{\muathcal O}lon C^n_j \tauo C^n_i$, and $C^n_i := w_0^{-n-1}w_iK$, $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0$, $i,j\in\{0,1,2\}$. We also denote by $s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma)$ and $r(\gamma} \delta} \def\D{\Deltaef\G{\Gamma)$ the domain and range of the local isometry~$\gamma} \delta} \def\D{\Deltaef\G{\Gamma$. Such local isometries act on points and on oriented edges of $K_\infty$.
We say that the product of the two local isometries $\gamma} \delta} \def\D{\Deltaef\G{\Gamma_1$, $\gamma} \delta} \def\D{\Deltaef\G{\Gamma_2\in R$ is defined if $\gamma} \delta} \def\D{\Deltaef\G{\Gamma_2^{-1}(s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma_1)){\muathcal A}p$ $s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma_2) \nueq \varnothing$. In this case we consider the product
\muathbb Egin{gather*}
\gamma} \delta} \def\D{\Deltaef\G{\Gamma_1{\muathcal D}ot\gamma} \delta} \def\D{\Deltaef\G{\Gamma_2{\muathcal O}lon\ \gamma} \delta} \def\D{\Deltaef\G{\Gamma_2^{-1}(s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma_1)){\muathcal A}p s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma_2)\tauo r{\muathbb I}g(\gamma} \delta} \def\D{\Deltaef\G{\Gamma_1|_{s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma_1)){\muathcal A}p r(\gamma} \delta} \def\D{\Deltaef\G{\Gamma_2)}{\muathbb I}g).
\epsilonnd{gather*}
We then consider the family ${\muathcal G}$ consisting of all (the well-defined) finite products of isometries in $R$. Clearly, any $\gamma} \delta} \def\D{\Deltaef\G{\Gamma$ in ${\muathcal G}$ is a local isometry, and its domain and range are cells of the same size. We set ${\muathcal G}_n=\{g\in{\muathcal G}{\muathcal O}lon s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma)\,\&\,r(\gamma} \delta} \def\D{\Deltaef\G{\Gamma)$ are cells of size $2^n\}$, $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$.
\muathbb Egin{prop}
For any $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$, $C_1$, $C_2$ cells of size $2^n$, $\epsilonxists!\, \gamma} \delta} \def\D{\Deltaef\G{\Gamma\in{\muathcal G}_n$ such that $s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma)=C_1$, $r(\gamma} \delta} \def\D{\Deltaef\G{\Gamma)=C_2$. In particular, if $C$ has size $2^n$, the identity map of $C$ belongs to ${\muathcal G}_n$, $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$.
\epsilonnd{prop}
\muathbb Egin{proof}
It is enough to show that for any cell $C$ of size $2^n$ there exists a unique $\gamma} \delta} \def\D{\Deltaef\G{\Gamma\in{\muathcal G}_n$ such that $\gamma} \delta} \def\D{\Deltaef\G{\Gamma{\muathcal O}lon C\tauo K_n$.
For any cell $C$, let $m=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(C)$ be the minimum number such that $C\varsigmagma} \renewcommand{\S}{\Sigmaubset K_m$.
We~prove the existence: if $C$ has size $2^n$ and $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(C)=m>n$, then $C\varsigmagma} \renewcommand{\S}{\Sigmaubset C^{m-1}_i$, for some $i=1,2$, hence $R^{m-1}_{0,i}(C) \varsigmagma} \renewcommand{\S}{\Sigmaubset K_{m-1}$. Iterating, the result follows.
The second statement follows directly by equation~\epsilonqref{IdOnCells}.
As for the uniqueness, $\varphiorall\, n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$, we call $R^n_{i,0}$ ascending, $i=1,2$, $R^n_{0,i}$ descending, $i=1,2$, $R^n_{i,j}$~constant-level, $i,j\in\{1,2\}$.
Indeed, if $C \varsigmagma} \renewcommand{\S}{\Sigmaubset s(R^n_{i,0})$, then $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(C)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n$ and $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(R^n_{i,0}(C)) = n+1$; if $C \varsigmagma} \renewcommand{\S}{\Sigmaubset s(R^n_{0,i})$, then $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(C)=n+1$, $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(R^n_{0,i}(C))\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n$ and $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(R^n_{j,i}(C))= n+1$, $i,j\in\{1,2\}$, $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$.
The following facts hold:
\muathbb Egin{itemize}\itemsep=0pt
\item The product $R^n_{l,k} {\muathcal D}ot R^m_{j,i}$ of two constant-level elements $R^n_{l,k}$, $R^m_{j,i}$ is defined iff $n=m$ and~$k=j$, therefore any product of constant-level elements in $R$ is either the identity map on~the domain or coincides with a single constant-level element.
\item Any product of constant level elements in $R$ followed by a descending element coincides with a single descending element: indeed, if the product of constant level elements is the identity, the statement is trivially true; if it coincides with a single element, say $R^n_{i,j}$ with~$i,j\in\{1,2\}$, then, by compatibility, the descending element should be $R^n_{0,i}$ so that the product is $R^n_{0,i},$ by equation~\epsilonqref{IdOnCells}.
\item Given a cell $C$ with $\varsigmagma} \renewcommand{\S}{\Sigmaize (C)=2^n$ and $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(C)>n$, the exists a unique descending element $\gamma} \delta} \def\D{\Deltaef\G{\Gamma \in R$ such that $C \varsigmagma} \renewcommand{\S}{\Sigmaubset s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma)$: indeed, if $m=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(C)$, then $C\varsigmagma} \renewcommand{\S}{\Sigmaubset C^{m-1}_i$, for some $i\in\{1,2\}$. The only descending element is then $\gamma} \delta} \def\D{\Deltaef\G{\Gamma=R^{m-1}_{0,i}$.
\item Any product of an ascending element followed by a descending one is the identity on the domain: indeed if the ascending element is $R^n_{i,0}$, then, by compatibility, the descending element should be $R^n_{0,i}$.
\epsilonnd{itemize}
Now let $\varsigmagma} \renewcommand{\S}{\Sigmaize(C)=2^n$, $\gamma} \delta} \def\D{\Deltaef\G{\Gamma \in{\muathcal G}_n$ such that $\gamma} \delta} \def\D{\Deltaef\G{\Gamma{\muathcal O}lon C\tauo K_n$, $\gamma} \delta} \def\D{\Deltaef\G{\Gamma=\gamma} \delta} \def\D{\Deltaef\G{\Gamma_p{\muathcal D}ot\gamma} \delta} \def\D{\Deltaef\G{\Gamma_{p-1}{\muathcal D}ots\gamma} \delta} \def\D{\Deltaef\G{\Gamma_2{\muathcal D}ot\gamma} \delta} \def\D{\Deltaef\G{\Gamma_1$, where $\gamma} \delta} \def\D{\Deltaef\G{\Gamma_j\in R$, $1\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq j\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq p$. Since $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(C) \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \lambda} \def{\mathcal L}a{{\mathcal L}ambdaevel(K_n) = n$, for any possible ascending element $\gamma} \delta} \def\D{\Deltaef\G{\Gamma_i$ there should be a $j>i$ such that $\gamma} \delta} \def\D{\Deltaef\G{\Gamma_j$ is descending. If $i+q$ is the minimum among such $j$'s, all terms $\gamma} \delta} \def\D{\Deltaef\G{\Gamma_j$, $i<j<i+q$, are constant-level, hence the product $\gamma} \delta} \def\D{\Deltaef\G{\Gamma_{i+q}{\muathcal D}ot\gamma} \delta} \def\D{\Deltaef\G{\Gamma_{i+q-1}{\muathcal D}ots \gamma} \delta} \def\D{\Deltaef\G{\Gamma_{i}={\rhom id}_{s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma_i)}$. Then, we note that $\gamma} \delta} \def\D{\Deltaef\G{\Gamma_p$ can only be descending. As a consequence, $\gamma} \delta} \def\D{\Deltaef\G{\Gamma$ can be reduced to a product of descending elements, and, by the uniqueness of the descending element acting on a given cell, we get the result.
\epsilonnd{proof}
Let us observe that each ${\muathcal G}_n$, and so also ${\muathcal G}$, is a groupoid under the usual composition rule, namely two local isometries are composable if the domain of the first coincides with the range of the latter.
We now consider the action on points of the local isometries in ${\muathcal G}$.
\muathbb Egin{prop}
Let us define $\widetilde{\muathcal A}_n$ as the algebra
\muathbb Egin{gather*}
\widetilde{\muathcal A}_n=\{f\in{\muathcal C}_b(K_\infty){\muathcal O}lon f( \gamma} \delta} \def\D{\Deltaef\G{\Gamma(x))= f(x),\, x\in s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma),\, \gamma} \delta} \def\D{\Deltaef\G{\Gamma\in{\muathcal G}_n\}.
\epsilonnd{gather*}
Then, for any $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$, the following diagram commutes,
\muathbb Egin{gather*}
\muathbb Egin{matrix}
\widetilde{\muathcal A}_n & \varsigmagma} \renewcommand{\S}{\Sigmaubset & \widetilde{\muathcal A}_{n+1} \\
\Big\delta} \def\D{\Deltaownarrow\iota_n & & \Big\delta} \def\D{\Deltaownarrow\iota_{n+1} \\
{\muathcal A}_n & \muathop{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaongrightarrow}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits^{\alpha_{n+1,n}} & {\muathcal A}_{n+1},
\epsilonnd{matrix}
\epsilonnd{gather*}
where $\iota_n{\muathcal O}lon f\in\widetilde{\muathcal A}_n \tauo f|_{K_n} \in {\muathcal A}_n$ are isomorphisms.
Hence the inductive limit ${\muathcal A}_\infty$ is isomorphic to a $C^*$-subalgebra of ${\muathcal C}_b(K_\infty)$.
\epsilonnd{prop}
\muathbb Egin{proof}
The request in the definition of $\widetilde{\muathcal A}_n$ means that the value of $f$ in any point of $K_\infty$ is~determined by the value on $K_n$, while such request gives no restrictions on the values of $f$ on~$K_n$. The other assertions easily follow.
\epsilonnd{proof}
As shown above, we may identify the algebra ${\muathcal A}_n$, $0\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \infty$, with its isomorphic copy~$\widetilde{\muathcal A}_n$ in~${\muathcal C}_b(K_\infty)$, so that the embeddings $\alpha_{k,j}$ become inclusions.
Moreover, we may consider
the operator $\widetilde D_n$ on $\epsilonll^2(E_\infty)$, with $E_\infty={\muathcal U}p_{n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0}E_n$, given by $\widetilde D_n e=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)^{-1}Fe$, if $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq2^n$, and $\widetilde D_n e=0$, if $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e) >2^n$, where $F$ is defined as in Definition~\rhoef{STnested}$(c)$.
Then the spectral triples $({\muathcal A}_n,{\muathcal H}_n,D_n)$ are isomorphic to the spectral triples $(\widetilde{\muathcal A}_n,{\muathcal H}_n,\widetilde D_n)$, where ${\muathcal C}_b(K_\infty)$ acts on~the space $\epsilonll^2(E_\infty)$ through the representation $\rhoho$ given by $\rhoho(f)e=f(e^+)e$.
\muathbb Egin{rem}
Because of the isomorphism above, from now on we shall remove the tildes and denote by ${\muathcal A}_n$ the subalgebras of ${\muathcal C}_b(K_\infty)$ and by $D_n$ the operators acting on $\epsilonll^2(E_\infty)$.
\epsilonnd{rem}
\varsigmagma} \renewcommand{\S}{\Sigmaection[The C*-algebra of geometric operators and a tracial weight on it]
{The $\betaoldsymbol{C^*}$-algebra of geometric operators\\ and a tracial weight on it}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{sec5}
We now come to the action of local isometries on edges. We shall use the following notation, where in the table below to any subset of edges listed on the left we indicate on the right the projection on the closed subspace spanned by the same subset:
\muathbb Egin{table}[h!]{\muathcal E}ntering
{\muathcal A}ption{Edges and projections.}
\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{tab:table1}
{\rhoenewcommand{\alpharraystretch}{1.4}
\muathbb Egin{tabular}{l|l}
\hline
\muulticolumn{1}{c|}{Subsets of $E_\infty$} & \muulticolumn{1}{c}{Projections} \\[.5ex]
\hline
$E_n=\{e\varsigmagma} \renewcommand{\S}{\Sigmaubset K_n\}$, $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$ &\qquad $P_n$
\\%[.5ex]
$E^{k,p}_n={\muathbb I}g\{e\in E_n{\muathcal O}lon 2^k\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq2^p{\muathbb I}g\}$, for $k\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq p\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n$ &\qquad $P_n^{k,p}$
\\%[.5ex]
$E^k_n=E^{k,k}_n={\muathbb I}g\{e\in E_n{\muathcal O}lon \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^k{\muathbb I}g\}$, for $k\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n$ &\qquad $P^k_n$
\\%[.5ex]
$E^{k,p}={\muathcal U}p_n E^{k,p}_n={\muathbb I}g\{e\in E_\infty{\muathcal O}lon 2^k\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq2^p{\muathbb I}g\}$ &\qquad $P^{k,p}$
\\%[.5ex]
$E^k=E^{k,k}={\muathbb I}g\{e\in E_\infty{\muathcal O}lon \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^k{\muathbb I}g\}$ &\qquad $P^{k}$
\\%[.5ex]
$E_C = \{e\in E_\infty{\muathcal O}lon e\varsigmagma} \renewcommand{\S}{\Sigmaubset C\}$, $C$ being a cell &\qquad $P_C$
\epsilonnd{tabular}}
\epsilonnd{table}
Let us note that any local isometry $\gamma} \delta} \def\D{\Deltaef\G{\Gamma\in{\muathcal G}$, $\gamma} \delta} \def\D{\Deltaef\G{\Gamma{\muathcal O}lon s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma) \tauo r(\gamma} \delta} \def\D{\Deltaef\G{\Gamma)$, gives rise to a partial isometry~$V_\gamma} \delta} \def\D{\Deltaef\G{\Gamma$ defined as
\muathbb Egin{gather*}
V_\gamma} \delta} \def\D{\Deltaef\G{\Gamma e=\muathbb Egin{cases}
\gamma} \delta} \def\D{\Deltaef\G{\Gamma(e), & e\varsigmagma} \renewcommand{\S}{\Sigmaubset s(\gamma} \delta} \def\D{\Deltaef\G{\Gamma),\\
0, & \muathrm{elsewhere}.
\epsilonnd{cases}
\epsilonnd{gather*}
In particular, if $C$ is a cell, and $\gamma} \delta} \def\D{\Deltaef\G{\Gamma={\rhom id}_C$, $V_\gamma} \delta} \def\D{\Deltaef\G{\Gamma=P_C$.
We then consider the subalgebras ${\muathcal B}_n$ of~$B(\epsilonll^2(E_\infty))$,
\muathbb Egin{gather*}
{\muathcal B}_n=\{V_\gamma} \delta} \def\D{\Deltaef\G{\Gamma{\muathcal O}lon\gamma} \delta} \def\D{\Deltaef\G{\Gamma\in{\muathcal G}_m,\,m\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq n\}',\qquad {\muathcal B}_{\tauext{fin}} = {\muathbb I}gcup_n{\muathcal B}_n,\qquad {\muathcal B}_\infty=\overlineerline{{\muathcal B}_{\tauext{fin}}},
\epsilonnd{gather*}
{\varsigmagma} \renewcommand{\S}{\Sigmaloppy and note that the elements of ${\muathcal B}_n$ commute with the projections $P_C$, for all cells $C$ s.t.\ \mubox{$\varsigmagma} \renewcommand{\S}{\Sigmaize(C)\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 2^n$}.
By definition, the sequence ${\muathcal B}_n$ is increasing, therefore, since the ${\muathcal B}_n$'s are von Neumann algebras, ${\muathcal B}_\infty$ is a $C^*$-algebra. Let us observe that, $\varphiorall\, n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$, $\rhoho({\muathcal A}_n)\varsigmagma} \renewcommand{\S}{\Sigmaubset{\muathcal B}_n$.
}
\muathbb Egin{dfn}
The elements of the $C^*$-algebra ${\muathcal B}_\infty$ are called geometric operators.
\epsilonnd{dfn}
Now consider the hereditary positive cone
\muathbb Egin{gather*}
\muathcal{I}_0^+ = {\muathbb I}g\{T\in {\muathcal B}_{\tauext{fin}}^+{\muathcal O}lon\epsilonxists\, c_T\in{\muathbb R} \muathrm{\ such\ that\ }\taur (P_{m}T) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq c_T \vol(K_m), \, \varphiorall\, m \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0 {\muathbb I}g\}.
\epsilonnd{gather*}
\muathbb Egin{lem}
For any $T\in\muathcal{I}_0^+$, the sequence
$
\varphirac{\taur (P_{m} T) }{\vol(K_m)}$ is eventually increasing, hence convergent. In particular
\muathbb Egin{gather}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{evConst}
\taur(P_{p}^{p} T)=0\qquad \varphiorall\, p>m\Rightarrow \tau_0(T)=\varphirac{\taur (P_{m} T) }{\vol(K_{m})}.
\epsilonnd{gather}
\epsilonnd{lem}
\muathbb Egin{proof}
Let $T\in{\muathcal B}_n^+$. Then we have, for $m\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq n$,
\muathbb Egin{gather*}
\taur (P_{{m+1}} T) = \!\!\varsigmagma} \renewcommand{\S}{\Sigmaum_{e\varsigmagma} \renewcommand{\S}{\Sigmaubset K_{m+1}} (e,Te) = \!\!\varsigmagma} \renewcommand{\S}{\Sigmaum_{i=0,1,2} \varsigmagma} \renewcommand{\S}{\Sigmaum_{e\in C^m_i }(e,Te) + \!\varsigmagma} \renewcommand{\S}{\Sigmaum_{e\in E_{m+1}^{m+1}}\! (e,Te)
= 3\taur (P_{{m}} T) + \taur(P_{m+1}^{m+1} T),
\epsilonnd{gather*}
hence
\muathbb Egin{gather*}
\varphirac{\taur (P_{m+1} T) }{\vol(K_{m+1})}=\varphirac{\taur (P_{m} T) }{\vol(K_m)}+\varphirac{\taur(P_{m+1}^{m+1} T)}{\vol(K_{m+1})},
\epsilonnd{gather*}
from which the thesis follows.
\epsilonnd{proof}
We then define the weight $\tau_0$ on ${\muathcal B}_\infty^+$ as follows:
\muathbb Egin{gather*}
\tau_0(T) =
\muathbb Egin{cases}
\delta} \def\D{\Deltaisplaystyle{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{m\tauo\infty}\varphirac{\taur (P_{m} T)}{\vol(K_m)}},& T\in{\muathcal{I}_0^+},\\
0,& \muathrm{elsewhere}.
\epsilonnd{cases}
\epsilonnd{gather*}
The next step is to regularize the weight $\tau_0$ in order to obtain a semicontinuous semifinite tracial weight $\tau$ on ${\muathcal B}_\infty$.
\muathbb Egin{lem} \lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{hereditary}
For any $T\in\muathcal{I}_0^+$, $A\in{\muathcal B}_{\tauext{fin}}$, it holds $ATA^* \in \muathcal{I}_0^+$, and $\tau_0(ATA^*) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \|A\|^2 \tau_0(T)$.
\epsilonnd{lem}
\muathbb Egin{proof}
Let $A\in{\muathcal B}_n$. Then, for any $m>n$, we have
\muathbb Egin{gather*}
\taur(P_mATA^*) = \taur(A^*AP_mT) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \| A^*A \| \taur(P_mT) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \|A\|^2 c_T \vol(K_m),
\epsilonnd{gather*}
and the thesis follows.
\epsilonnd{proof}
\muathbb Egin{prop} \lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{def.Qfi}
For all $p\in\muathbb N$, recall that $P^{-p,\infty}$ is the orthogonal projection onto the closed vector space generated by ${\muathbb I}g\{ e\in\epsilonll^2(E_\infty){\muathcal O}lon \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e) \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 2^{-p} {\muathbb I}g\}$, and let $\varphi_p(T) := \tau_0(P^{-p,\infty}TP^{-p,\infty})$, $\varphiorall\, T\in{\muathcal B}_\infty^+$. Then $P^{-p,\infty}\in{\muathcal B}_0$, $\varphi_p$ is a positive linear functional, and $\varphi_p(T) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \varphi_{p+1}(T) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \tau_0(T)$, $\varphiorall\, T\in{\muathcal B}_\infty^+$.
\epsilonnd{prop}
\muathbb Egin{proof}
We first observe that
\muathbb Egin{gather}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{Pjn}
\taur(P^j_n)=\#{\muathbb I}g\{e\in K_n{\muathcal O}lon \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^j{\muathbb I}g\}=6 {\muathcal D}ot 3^{n-j},\qquad j\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n.
\epsilonnd{gather}
Then it is easy to verify that $P^{-p,\infty}\in{\muathcal B}_0$. Since
\muathbb Egin{gather}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{P-pinfty}
\varphi_p(I)
= \tau_0(P^{-p,\infty})
= \lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{n\tauo\infty} \varphirac{\taur P^{-p,n}_n}{\muu_d(K_n)}
= \lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{n\tauo\infty} 3^{-n} \varsigmagma} \renewcommand{\S}{\Sigmaum_{j=-p}^n \taur(P^j_n)
=\varsigmagma} \renewcommand{\S}{\Sigmaum_{j=-p}^\infty6{\muathcal D}ot 3^{-j}
= 3^{p+2},
\epsilonnd{gather}
$\varphi_p$ extends by linearity to a positive functional on ${\muathcal B}_\infty$. Moreover, by Lemma~\rhoef{hereditary}, $\varphi_p(T) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \tau_0(T)$, $\varphiorall\, T\in{\muathcal B}_\infty^+$. Finally, since $P^{-p,\infty}P_n=P_nP^{-p,\infty}=P^{-\infty,n}_n$, $\varphiorall\, n\in\muathbb N$, we get, for all $T\in{\muathcal B}_\infty^+$,
\muathbb Egin{align*}
\varphi_{p+1}(T) - \varphi_p(T)
& = \tau_0(P^{-(p+1),\infty}TP^{-(+1)p,\infty}) - \tau_0(P^{-p,\infty}TP^{-p,\infty}) \\
&= \lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{n\tauo\infty} \varphirac{\taur((P^{-(p+1),n)}_n -P^{-p,n}_n)T)}{\muu_d(K_n)}
=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{n\tauo\infty} \varphirac{\taur(P^{-(p+1))}_n T)}{\muu_d(K_n)}
\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0.
\tauag*{\qed}
\epsilonnd{align*}
\rhoenewcommand{\qed}{}
\epsilonnd{proof}
\muathbb Egin{prop} \lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{def.tau}
Let $\tau(T) := \lambda} \def{\mathcal L}a{{\mathcal L}ambdaim\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits_{p\tauo\infty} \varphi_p(T)$, $\varphiorall\, T\in{\muathcal B}_\infty^+$. Then
\muathbb Egin{itemize}\itemsep=0pt
\item[$(i)$] $\tau$ is a lower semicontinuous weight on ${\muathcal B}_\infty$,
\item[$(ii)$] $\tau(T) = \tau_0(T)$, $\varphiorall\, T\in\muathcal{I}_0^+$.
\epsilonnd{itemize}
\epsilonnd{prop}
\muathbb Egin{proof}
$(i)$ Let $T\in{\muathcal B}_\infty^+$. Since $\{ \varphi_p(T) \}_{p\in\muathbb N}$ is an increasing sequence, there exists $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits_{p\tauo\infty} \varphi_p(T) = \varsigmagma} \renewcommand{\S}{\Sigmaup\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits_{p\in\muathbb N} \varphi_p(T)$. Then $\tau$ is a weight on ${\muathcal B}_\infty^+$. Since $\varphi_p$ is continuous, $\tau$ is lower semicontinuous.
$(ii)$ Let us prove that, $\varphiorall\, T\in{\muathcal B}_n^+$,
\muathbb Egin{gather} \lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{uguaglianza}
\varphirac{\taur(P^j_mT)}{\muu_d(K_m)} = \varphirac{\taur(P^j_nT)}{\muu_d(K_n)}, \qquad j\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq m.
\epsilonnd{gather}
Indeed,
\muathbb Egin{align*}
\taur{\muathbb I}g(P^j_{m+1}T{\muathbb I}g) & = \varsigmagma} \renewcommand{\S}{\Sigmaum_{\varsigmagma} \renewcommand{\S}{\Sigmaubstack{e \varsigmagma} \renewcommand{\S}{\Sigmaubset K_{m+1} \\ \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^j}} \pis{ e }{ Te } = \varsigmagma} \renewcommand{\S}{\Sigmaum_{i=0}^2 \varsigmagma} \renewcommand{\S}{\Sigmaum_{\varsigmagma} \renewcommand{\S}{\Sigmaubstack{e \varsigmagma} \renewcommand{\S}{\Sigmaubset C^m_i \\ \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^j}} \pis{ e }{ Te } = \varsigmagma} \renewcommand{\S}{\Sigmaum_{i=0}^2 \varsigmagma} \renewcommand{\S}{\Sigmaum_{\varsigmagma} \renewcommand{\S}{\Sigmaubstack{e \varsigmagma} \renewcommand{\S}{\Sigmaubset K_m \\ \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^j}} \pis{ V_{R^m_i}e }{ TV_{R^m_i}e } \\
& = \varsigmagma} \renewcommand{\S}{\Sigmaum_{i=0}^2 \varsigmagma} \renewcommand{\S}{\Sigmaum_{\varsigmagma} \renewcommand{\S}{\Sigmaubstack{e \varsigmagma} \renewcommand{\S}{\Sigmaubset K_m \\ \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^j}} \pis{ e }{ Te } = 3 \taur{\muathbb I}g(P^j_mT{\muathbb I}g),
\epsilonnd{align*}
from which~\epsilonqref{uguaglianza} follows. Let us now prove that
\muathbb Egin{gather} \lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{approx}
\tau(T) = \varsigmagma} \renewcommand{\S}{\Sigmaup_{p\in\muathbb N} \varphi_p(T) = \tau_0(T), \qquad T\in\muathcal{I}_0^+ .
\epsilonnd{gather}
Let $T\in{\muathcal B}_n^+ {\muathcal A}p \muathcal{I}_0^+$, and $\varepsilon>0$. From the definition of $\tau_0(T)$, there exists $r\in\muathbb N$, $r>n$, such that $\varphirac{\taur(P_rT)}{\muu_d(K_r)} > \tau_0(T)-\varepsilon$. Since $\varphirac{\taur(P_rT)}{\muu_d(K_r)} = \varsigmagma} \renewcommand{\S}{\Sigmaum_{j=-\infty}^r \varphirac{\taur(P^j_rT)}{\muu_d(K_r)}$, there exists $p\in\muathbb N$ such that $\varsigmagma} \renewcommand{\S}{\Sigmaum_{j=-p}^r \varphirac{\taur(P^j_rT)}{\muu_d(K_r)} > \varphirac{\taur(P_rT)}{\muu_d(K_r)} -\varepsilon > \tau_0(T)-2\varepsilon$. Then, for any $s\in\muathbb N$, $s>r$, we have
\muathbb Egin{align*}
\varphirac{\taur(P_sP^{-p,\infty}TP^{-p,\infty}P_s)}{\muu_d(K_s)} & = \varsigmagma} \renewcommand{\S}{\Sigmaum_{j=-p}^s \varphirac{\taur(P^j_sT)}{\muu_d(K_s)} = \varsigmagma} \renewcommand{\S}{\Sigmaum_{j=-p}^r \varphirac{\taur(P^j_sT)}{\muu_d(K_s)} + \varsigmagma} \renewcommand{\S}{\Sigmaum_{j=r+1}^s \varphirac{\taur(P^j_sT)}{\muu_d(K_s)} \\
& \varsigmagma} \renewcommand{\S}{\Sigmatackrel{\epsilonqref{uguaglianza}}{=} \varsigmagma} \renewcommand{\S}{\Sigmaum_{j=-p}^r \varphirac{\taur(P^j_rT)}{\muu_d(K_r)} + \varsigmagma} \renewcommand{\S}{\Sigmaum_{j=r+1}^s \varphirac{\taur(P^j_sT)}{\muu_d(K_s)} > \tau_0(T)-2\varepsilon,
\epsilonnd{align*}
and, passing to the limit for $s\tauo\infty$, we get
\muathbb Egin{gather*}
\varphi_p(T) = \tau_0(P^{-p,\infty}TP^{-p,\infty}) = \lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{s\tauo\infty} \varphirac{\taur(P_sP^{-p,\infty}TP^{-p,\infty}P_s)}{\muu_d(K_s)} \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \tau_0(T)-\varepsilon,
\epsilonnd{gather*}
and equation~\epsilonqref{approx} follows.
\epsilonnd{proof}
We want to prove that $\tau$ is a tracial weight.
\muathbb Egin{dfn}
An operator $U\in B{\muathbb I}g(\epsilonll^2(E_\infty){\muathbb I}g)$ is called $\delta} \def\D{\Delta$-unitary, $\delta} \def\D{\Delta>0$, if $\|U^*U-1\|<\delta} \def\D{\Delta$, and $\|UU^*-1\|<\delta} \def\D{\Delta$.
\epsilonnd{dfn}
Let us denote with ${\muathcal U}_\delta} \def\D{\Delta$ the set of $\delta} \def\D{\Delta$-unitaries in ${\muathcal B}_{\tauext{fin}}$ and observe that, if $\delta} \def\D{\Delta<1$, ${\muathcal U}_\delta} \def\D{\Delta$ consists of invertible operators, and $U\in{\muathcal U}_\delta} \def\D{\Delta$ implies $U^{-1}\in{\muathcal U}_{\delta} \def\D{\Delta/(1-\delta} \def\D{\Delta)}$.
\muathbb Egin{prop}
The weight $\tau_0$ is $\varepsilon$-invariant for $\delta} \def\D{\Delta$-unitaries in ${\muathcal B}_{\tauext{fin}}$, namely, for any $\varepsilon\in(0,1)$, there is $\delta} \def\D{\Delta>0$ s.t., for any $U\in{\muathcal U}_\delta} \def\D{\Delta$, and $T\in {\muathcal B}^+_\infty$,
\muathbb Egin{gather*}
(1-\varepsilon)\tau_0(T) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \tau_0(UTU^*) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq (1+\varepsilon)\tau_0(T) .
\epsilonnd{gather*}
\epsilonnd{prop}
\muathbb Egin{proof}
We first observe that, if $\delta} \def\D{\Delta\in(0,1)$ and $U\in{\muathcal U}_\delta} \def\D{\Delta$, $T\in\muathcal{I}_0^+{\mathcal L}eftrightarrow UTU^*\in\muathcal{I}_0^+$. Indeed, choose $n$ such that $U,T\in{\muathcal B}_n$. Then
$\taur (P_{n} UTU^*) = \taur (U^* UP_{n}TP_{n}) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \|U^*U\| \taur (P_{n}T) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq (1+\delta} \def\D{\Delta) c_T\vol(K_n)$, $\varphiorall\, n\in\muathbb N$, so that $UTU^* \in \muathcal{I}_0^+$. Moreover,
\muathbb Egin{gather*}
\tau_0(UTU^*) = \lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{n\tauo\infty} \varphirac{\taur(P_nUTU^*)}{\vol(K_n)} \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \| U^*U \| \lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{n\tauo\infty} \varphirac{\taur(P_nT)}{\vol(K_n)} = \| U^*U \| \tau_0(T) < (1+\delta} \def\D{\Delta) \tau_0(T).
\epsilonnd{gather*}
Conversely, $UTU^*\in\muathcal{I}_0^+$, and $U^{-1}\in {\muathcal U}_{\delta} \def\D{\Delta/(1-d)} \implies T \in\muathcal{I}_0^+$. Moreover,
\muathbb Egin{gather*}
\tau_0(T) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq {\muathbb I}g\| {\muathbb I}g(U^{-1}{\muathbb I}g)^*U^{-1} {\muathbb I}g\| \tau_0(UTU^*) < \varphirac1{1-\delta} \def\D{\Delta} \tau_0(UTU^*).
\epsilonnd{gather*}
The result follows by the choice $\delta} \def\D{\Delta=\varepsilon$.
\epsilonnd{proof}
\muathbb Egin{thm}
The lower semicontinuous weight $\tau$ in Proposition~$\rhoef{def.tau}$ is a trace on ${\muathcal B}_\infty$, that is, setting ${\muathcal J}^+:= \{A\in{\muathcal B}_\infty^+{\muathcal O}lon \tau(A)<\infty\}$, and extending $\tau$ to the vector space ${\muathcal J}$ generated by ${\muathcal J}^+$, we get
\muathbb Egin{itemize}\itemsep=0pt
\item[$(i)$] ${\muathcal J}$ is an ideal in ${\muathcal B}_\infty$,
\item[$(ii)$] $\tau(AB)=\tau(BA)$, for any $A\in{\muathcal J}$, $B\in{\muathcal B}_\infty$.
\epsilonnd{itemize}
\epsilonnd{thm}
\muathbb Egin{proof}
$(i)$ Let us prove that ${\muathcal J}^+$ is a unitarily-invariant face in ${\muathcal B}_\infty^+$, and suffices it to prove that $A\in{\muathcal J}^+$ implies that $UAU^*\in{\muathcal J}^+$, for any $U\in{\muathcal U}({\muathcal B}_\infty)$, the set of unitaries in ${\muathcal B}_\infty$. To~reach a~contradiction, assume that there exists $U\in{\muathcal U}({\muathcal B}_\infty)$ such that $\tau(UAU^*)=\infty$. Then there is $p\in\muathbb N$ such that $\varphi_p(UAU^*) > 2\tau(A)+2$. Let $\delta} \def\D{\Delta<3$ be such that $V\in{\muathcal U}_\delta} \def\D{\Delta$ implies $\tau(VAV^*)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq 2\tau(A)$, and let $U_0\in{\muathcal B}_{\tauext{fin}}$ be such that $\|U-U_0\|< \muin{\muathbb I}g\{ \varphirac{\delta} \def\D{\Delta}{3}, \varphirac1{3\|A\|\|\varphi_p\|} {\muathbb I}g\}$. The inequalities
\muathbb Egin{gather*}
\|U_0U_0^*-1\| = \|U^*U_0U_0^*-U^*\| \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \|U^*U_0-1\|\|U_0^*\|+\|U_0^*-U^*\| < \delta} \def\D{\Delta
\epsilonnd{gather*}
and $\|U_0^*U_0-1\|<\delta} \def\D{\Delta$, prove that $U_0\in{\muathcal U}_\delta} \def\D{\Delta$. Since
\[ |\varphi_p(U_0AU_0^*)-\varphi_p(UAU^*)|\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq 3\| \varphi_p \| \|A\| \|U-U_0\| <1,\] we get
\muathbb Egin{gather*}
2\tau(A)\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \tau(U_0AU_0^*) \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \varphi_p(U_0AU_0^*) \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \varphi_p(UAU^*) - 1 \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 2\tau(A)+1
\epsilonnd{gather*}
which is absurd.
$(ii)$ We only need to prove that $\tau$ is unitarily-invariant. Let $A\in{\muathcal J}^+$, $U\in{\muathcal U}({\muathcal B}_\infty)$. For any $\varepsilon>0$, there is $p\in\muathbb N$ such that $\varphi_p(UAU^*)>\tau(UAU^*)-\varepsilon$, since, by $(1)$, $\tau(UAU^*)$ is finite. Then, arguing as in the proof of $(1)$, we can find $U_0\in{\muathcal B}_{\tauext{fin}}$, so close to $U$ that
\muathbb Egin{gather*}
|\varphi_p(U_0AU_0^*)-\varphi_p(UAU^*)|<\varepsilon, \\
(1-\varepsilon)\tau(A)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \tau(U_0AU_0^*) \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq (1+\varepsilon)\tau(A).
\epsilonnd{gather*}
Then
\muathbb Egin{align*}
\tau(A) & \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \varphirac1{1+\varepsilon}\ \tau(U_0AU_0^*) \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \varphirac1{1+\varepsilon}\ \varphi_p(U_0AU_0^*)
\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \varphirac1{1+\varepsilon}\ (\varphi_p(UAU^*) -\varepsilon)
\\
& \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \varphirac1{1+\varepsilon}\ (\tau(UAU^*) -2\varepsilon).
\epsilonnd{align*}
By the arbitrariness of $\varepsilon>0$, we get $\tau(A)\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq \tau(UAU^*)$.
Exchanging $A$ with $UAU^*$, we get the thesis.
\epsilonnd{proof}
\muathbb Egin{prop}
The lower semicontinuous tracial weight $\tau$ defined in Proposition~$\rhoef{def.tau}$ is semifinite and faithful.
\epsilonnd{prop}
\muathbb Egin{proof}
Let us recall that, for any $p\in\muathbb N$, $P^{-p,\infty} \in\muathcal{I}_0^+$ by Proposition~\rhoef{def.Qfi}. From Proposition~\rhoef{def.tau} follows that $\tau(P^{-p,\infty})=\tau_0(P^{-p,\infty})<\infty$, hence $P^{-p,\infty}\in{\muathcal J}^+$. Then, for any $T\in{\muathcal B}_\infty^+$, $S_p := T^{1/2}P^{-p,\infty}T^{1/2} \in{\muathcal J}^+$, and $0 \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq S_p \lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq T$. Moreover,
\muathbb Egin{align*}
\tau(S_p) &= \tau{\muathbb I}g(T^{1/2}P^{-p,\infty}T^{1/2}{\muathbb I}g) = \tau(P^{-p,\infty}TP^{-p,\infty}) = \varsigmagma} \renewcommand{\S}{\Sigmaup_{q\in\muathbb N} \tau_0(Q_qP^{-p,\infty}TP^{-p,\infty}Q_q)\\
& = \tau_0(P^{-p,\infty}TP^{-p,\infty}) = \varphi_p(T),
\epsilonnd{align*}
so that $\varsigmagma} \renewcommand{\S}{\Sigmaup\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits_{p\in\muathbb N} \tau(S_p) = \tau(T)$, and $\tau$ is semifinite. Finally, if $T\in{\muathcal B}_\infty^+$ is such that $\tau(T)=0$, then $\varsigmagma} \renewcommand{\S}{\Sigmaup\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits_{p\in\muathbb N} \varphi_p(T)=0$. Since $\{\varphi_p(T) \}_{p\in\muathbb N}$ is an increasing sequence, $\varphi_p(T)=0$, $\varphiorall\, p\in\muathbb N$. Then, for a~fixed $p\in\muathbb N$, we get $0 = \tau_0(P^{-p,\infty}TP^{-p,\infty}) = \lambda} \def{\mathcal L}a{{\mathcal L}ambdaim\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits_{n\tauo\infty} \varphirac{ \taur(P_nP^{-p,\infty}TP^{-p,\infty}P_n) }{\muu_d(K_n)}$. Since the sequence ${\muathbb I}g\{ \varphirac{ \taur(P_nP^{-p,\infty}TP^{-p,\infty}P_n) }{\muu_d(K_n)} {\muathbb I}g\}_{n\in\muathbb N}$ is definitely increasing, we get $\taur(P_nP^{-p,\infty}TP^{-p,\infty}P_n) = 0$ definitely, that is $TP^{-p,\infty}P_n = 0$ definitely, so that $TP^{-p,\infty}=0$. By the arbitrariness of $p\in\muathbb N$, we~get~\mubox{$T=0$}.
\epsilonnd{proof}
\varsigmagma} \renewcommand{\S}{\Sigmaection[A semifinite spectral triple on the inductive limit A infty]
{A semifinite spectral triple on the inductive limit $\betaoldsymbol{{\muathcal A}_\infty}$}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{sec6}
Since the covering we are studying is ramified, the family $\{{\muathcal A}_n,{\muathcal H}_n,D_n\}$ does not have a simple tensor product structure, contrary to what happened in~\chiite{AGI01}. We therefore use a different approach to construct a semifinite spectral triple on ${\muathcal A}_\infty$: our construction is indeed based on the pair $({\muathcal B}_\infty,\tau)$ of the $C^*$-algebra of geometric operators and the semicontinuous semifinite weight on~it.
The Dirac operator will be defined below (Definition~\rhoef{Dinfty}) through its phase and the functional calculi of its modulus with continuous functions vanishing at $\infty$.
More precisely we shall use the following
\muathbb Egin{dfn}
Let $(\muathfrak{C},\tauau)$ be a $C^*$-algebra with unit endowed with a semicontinuous semifinite faithful trace. A selfadjoint operator $T$ affiliated to $(\muathfrak{C},\tauau)$ is defined as a pair given by a closed subset $\varsigmagma} \renewcommand{\S}{\Sigma(T)$ in ${\muathbb R}$ and a $*$ homomorphism $\pihi{\muathcal O}lon{\muathcal C}_0(\varsigmagma} \renewcommand{\S}{\Sigma(T))\tauo \muathfrak{C}$, $f(T)\muathop{=}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits^{\muathrm{def}}\pihi(f)$, provided that the support of such homomorphism is the identity in the GNS representation $\pii_\tau$ induced by the trace $\tau$.
\epsilonnd{dfn}
The previous definition was inspired by that in~\chiite{DFR} appendix A, and should not be confused with that of Woronowicz for $C^*$-algebras without identity.
\muathbb Egin{rem}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{varieOps}
The $*$-homomorphism $\pihi_\tau=\pii_\tau\chiirc\pihi$ extends to bounded Borel functions on ${\muathbb R}$ and $e_{(-\infty,t]}\muathop{=}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits^{\muathrm{def}}\pihi_\tau({\muathcal H}i_{(-\infty,t]})$ tends strongly to the identity when $t\tauo\infty$, hence it is a spectral family. We shall denote by $\pii_\tau(T)$ the selfadjoint operator affiliated to $\pii_\tau(\muathfrak{C})''$ given by
\muathbb Egin{gather*}
\pii_\tau(T)\muathop{=}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits^{\muathrm{def}}\int_{\muathbb R} t\, {\rhom d} e_{(-\infty,t]}.
\epsilonnd{gather*}
\epsilonnd{rem}
\muathbb Egin{prop}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{aff-prop}
Let $T$ be a selfadjoint operator affiliated to $(\muathfrak{C},\tau)$ as above.
\muathbb Egin{itemize}\itemsep=0pt
\item[$(a)$] Assume that for any $n\in\muathbb N$, there is $\varphi_n\in{\muathcal C}({\muathbb R}){\muathcal O}lon 0\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq\varphi_n\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq 1,\varphi_n=1$ for $|t|\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq a_n$, $\varphi_n(t)=0$ for $|t|\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq b_n$ with $0<a_n<b_n$ and $\{a_n\}$, $\{b_n\}$ increasing to $\infty$. Then, for any $A\in\muathfrak{C}$, if~$\varsigmagma} \renewcommand{\S}{\Sigmaup\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits_n\|[T{\muathcal D}ot \varphi_n(T),A]\|=C<\infty$ then $[\pii_t(T),\pii_\tau(A)]$ is bounded and $\|[\pii_t(T),\pii_\tau(A)]\|=C$.
\item[$(b)$] If $\tau(f(T))<\infty$ for any positive function $f$ with compact support on the spectrum of $T$ then $\pii_\tau(T)$ has $\tau$-compact resolvent.
\epsilonnd{itemize}
\epsilonnd{prop}
\muathbb Egin{proof} $(a)$ Let ${\muathcal D}$ be the domain of $\pii_\tau(T)$, ${\muathcal D}_0$ the space of vectors in ${\muathcal D}$ with bounded support w.r.t.~to $\pii_\tau(T)$, and consider the sesquilinear form
$F(y,x)=(\pii_\tau(T)y,\pii_\tau(A)x)-(y,\pii_\tau(A)\pii_\tau(T)x)$ defined on ${\muathcal D}$. By hypothesis, for any $x,y\in{\muathcal D}_0$ there exists $n$ such that $\pii_\tau(\varphi_n(T))x=x$ and $\pii_\tau((\varphi_n(T))y=y$, hence $F(y,x)=(y,\pii_\tau([T{\muathcal D}ot \varphi_n(T),A])x)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq C\|x\|\ \|y\|$.
By the density of ${\muathcal D}_0$ in ${\muathcal D}$ w.r.t.~the graph norm of $\pii_\tau(T)$, the same bound holds on ${\muathcal D}$.
Then for $y,x\in{\muathcal D}$, $|(\pii_\tau(T)y,\pii_\tau(A)x)|\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq |(y,\pii_\tau(A)\pii_\tau(T)x)|+|F(y,x)|\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq (\|\pii_\tau(A)\pii_\tau(T)x\|+C\|x\|)\|y\|$ which implies $\pii_\tau(A)x$ belongs to the domain of $\pii_\tau(T)^*=\pii_\tau(T)$. Therefore $\pii_\tau(T)\pii_\tau(A)-\pii_\tau(A)\pii_\tau(T)$ is defined on ${\muathcal D}$ and its norm is bounded by $C$. Since $C$ is the optimal bound for the sesquilinear form $F$ it is indeed the norm of the commutator.
$(b)$ Let $\lambda} \def{\mathcal L}a{{\mathcal L}ambda$ be in the resolvent of~$|T|$. We then note that for any $f$ positive and zero on a~neigh\-bourhood of the origin there is a $g$ positive and with compact support such that $f{\muathbb I}g((|T|-\lambda} \def{\mathcal L}a{{\mathcal L}ambda I)^{-1}{\muathbb I}g)=g(|T|)$.
Therefore $\tau{\muathbb I}g(f{\muathbb I}g((|T|-\lambda} \def{\mathcal L}a{{\mathcal L}ambda I)^{-1}{\muathbb I}g){\muathbb I}g)<\infty$, hence
$\tau{\muathbb I}g(e_{(t,+\infty)}{\muathbb I}g(\pii_\tau{\muathbb I}g((|T|-\lambda} \def{\mathcal L}a{{\mathcal L}ambda I)^{-1}{\muathbb I}g){\muathbb I}g){\muathbb I}g)<\infty$ for any $t>0$, i.e., $\pii_\tau{\muathbb I}g((|T|-\lambda} \def{\mathcal L}a{{\mathcal L}ambda I)^{-1}{\muathbb I}g)$ is $\tauau$-compact (cf.~Section~\rhoef{SemST}).
\epsilonnd{proof}
\muathbb Egin{dfn}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{Dinfty}
We consider the Dirac operator $D=F|D|$ on $\epsilonll^2(E_\infty)$, where $F$ is the orientation reversing operator on edges and
\muathbb Egin{gather*}
|D|=\varsigmagma} \renewcommand{\S}{\Sigmaum_{n\in{\muathbb Z}}2^{-n}P^n,\qquad \varsigmagma} \renewcommand{\S}{\Sigma(|D|)=\{2^{-n},\ n\in{\muathbb Z}\}{\muathcal U}p\{0\}.
\epsilonnd{gather*}
\epsilonnd{dfn}
\muathbb Egin{prop}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{OnDinfty}
The following hold:
\muathbb Egin{itemize}\itemsep=0pt
\item[$(a)$] The
elements $D$ and $|D|$ are affiliated to $({\muathcal B}_\infty,\tauau)$.
\item[$(b)$] The following formulas hold: $\tau(P^n)=6{\muathcal D}ot 3^{-n}$, $\tau(P^{-p,\infty})=3^{p+2}$, as a consequence the operator $D$ has $\tau$-compact resolvents
\item[$(c)$] The trace $\tau(I+ D^{2})^{-s/2}<\infty$ if and only if $s>d=\varphirac{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2}$ and
\muathbb Egin{gather*}
\Res_{s=d}\tau{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}=\varphirac6{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2}.
\epsilonnd{gather*}
\epsilonnd{itemize}
\epsilonnd{prop}
\muathbb Egin{proof}
($a$) We first observe that the $*$-homomorphisms for $D$ and $|D|$ have the same support projection, then note that since $F$ and $P_n$ belong to ${\muathcal B}_0$ (which is a von Neumann algebra) for any $n\in\muathbb N$, then $f(D)$ and $f(|D|)$ belong to ${\muathcal B}_0$ for any $f\in{\muathcal C}_0({\muathbb R})$; therefore it is enough to show that the support of $f\muapsto f( |D|)$ is the identity in the representation $\pii_\tau$.
In order to prove this, it is enough to show that $\pii_\tau(e_{|D|} [0,2^p])$ tends to the identity strongly when $p\tauo\infty$, that is to say that $\pii_\tau(e_{|D|}(2^p,\infty))$ tends to 0 strongly when $p\tauo\infty$.
We consider then the projection $P^{-\infty,0}$ which projects on the space generated by the edges with $\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq 1$. Clearly, such projection belongs to ${\muathcal B}_0$, we now show that it is indeed central there. In fact, if $c$ is a cell with $\varsigmagma} \renewcommand{\S}{\Sigmaize(c)=1$, $P_c$ commutes with ${\muathcal B}_0$. Since $P^{-\infty,0}=\varsigmagma} \renewcommand{\S}{\Sigmaum_{\varsigmagma} \renewcommand{\S}{\Sigmaize(c)=1}P_c$, then $P^{-\infty,0}$ commutes with ${\muathcal B}_0$.
On the one hand, the von Neumann algebra $P^{-\infty,0}{\muathcal B}_0$ is isomorphic to ${\muathcal B}{\muathbb I}g(\epsilonll^2(K){\muathbb I}g)$ and the restriction of $\tau$ to $P^{-\infty,0}{\muathcal B}_0$ coincides with the usual trace on ${\muathcal B}{\muathbb I}g(\epsilonll^2(K){\muathbb I}g)$, therefore the representation $\pi_\tau$ is normal when restricted to $P^{-\infty,0}{\muathcal B}_0$. On the other hand, since $e_{|D|}(2^p,\infty)=P^{-\infty,-p-1}$ is, for $-p\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq 1$, a sub-projection of $P^{-\infty,0}$, and $P^{-\infty,-p-1}$ tends to 0 strongly in the given representation, the same holds of the representation $\pii_\tau$.
($b$) We prove the first equation. Indeed
\muathbb Egin{gather*}
\tau(P^n)
=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_m \varphirac{\taur P_{m}^n}{\vol(K_m)}
=\taur P_{0}^n+\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_m\varsigmagma} \renewcommand{\S}{\Sigmaum_{j=1}^m\varphirac{\taur P_j^j P^n}{\vol(K_j)}.
\epsilonnd{gather*}
The first summand is non-zero iff $n\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq0$, while the second vanishes exactly for such $n$. Since
\muathbb Egin{gather*}
\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_m\varsigmagma} \renewcommand{\S}{\Sigmaum_{j=1}^m\varphirac{\taur P_j^j P^n}{\vol(K_j)}
=\varphirac{\taur P_n^n }{\vol(K_n)},
\epsilonnd{gather*}
the result in~\epsilonqref{Pjn} shows that in both cases we obtain $6{\muathcal D}ot 3^{-n}$. We already proved in~\epsilonqref{P-pinfty} that $\tau_0(P^{-p,\infty})=3^{p+2}$. Since $P^{-p,\infty}\in{\muathcal B}_0$, the same holds for $\tau$ by Proposition~\rhoef{def.tau}$(ii)$.
Then the thesis follows by condition $(b)$ in Proposition~\rhoef{aff-prop}.
($c$) We have $\tau{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}=\tau{\muathbb I}g(P^{-\infty,0}{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}{\muathbb I}g)
+\tau{\muathbb I}g(P^{1,+\infty}{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}{\muathbb I}g)$.
A straightforward computation and~\epsilonqref{evConst} give
\muathbb Egin{gather*}
\tau{\muathbb I}g(P^{-\infty,0}{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}{\muathbb I}g)=\taur{\muathbb I}g(P_0{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}{\muathbb I}g)=
6\varsigmagma} \renewcommand{\S}{\Sigmaum_{n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0}{\muathbb I}g(1+2^{2n}{\muathbb I}g)^{-s/2}3^{n},
\epsilonnd{gather*}
which converges iff $s>d$. As for the second summand, we have
\muathbb Egin{align*}
\tau{\muathbb I}g(P^{1,+\infty}{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}{\muathbb I}g)
&=\tau_0{\muathbb I}g(P^{1,+\infty}{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}{\muathbb I}g)
=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_m\varphirac{\taur{\muathbb I}g(P^{1,m}_m{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}{\muathbb I}g)}{\muu_d(K_m)}
\\
&=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_m\varsigmagma} \renewcommand{\S}{\Sigmaum_{j=1}^m 3^{-m}\taur{\muathbb I}g(P^{1,m}_m{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}{\muathbb I}g)
=6\varsigmagma} \renewcommand{\S}{\Sigmaum_{j=1}^\infty 3^{-j}{\muathbb I}g(1+2^{-2j}{\muathbb I}g)^{-s/2},
\epsilonnd{align*}
which converges for any $s$ hence does not contribute to the residue. Finally
\muathbb Egin{align*}
\Res_{s=d}\tau{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}
&=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{s\tauo d^+}(s-d)\tau{\muathbb I}g(I+ D^{2}{\muathbb I}g)^{-s/2}\\
&=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{s\tauo d^+}{\muathbb I}gg(s-\varphirac{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2}{\muathbb I}gg)6\varsigmagma} \renewcommand{\S}{\Sigmaum_{n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0}{\muathbb I}g(1+2^{-2n}{\muathbb I}g)^{-s/2}
e^{n(\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3-s\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2)}\\
&=\varphirac6{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{s\tauo d^+}\varphirac{s\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2-\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3}{1-e^{-(s\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2-\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3)}}
=\varphirac6{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2}. \tauag*{\qed}
\epsilonnd{align*}
\rhoenewcommand{\qed}{}
\epsilonnd{proof}
\muathbb Egin{prop}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{commutator}
For any $f\in{\muathcal A}_n$ $\varsigmagma} \renewcommand{\S}{\Sigmaup\lambda} \def{\mathcal L}a{{\mathcal L}ambdaimits_{t>0}{\muathbb I}g\|{\muathbb I}g[ e_{[-t,t]}(D)\, D,\rhoho(f){\muathbb I}g]{\muathbb I}g\|
=\|[ D_n,\rhoho(f|_{K_n})]\|$.
\epsilonnd{prop}
\muathbb Egin{proof}
We observe that
$| D|$ is a multiplication operator on $\epsilonll^2(E_\infty)$, therefore it commutes with~$\rhoho(f)$. Hence,
\muathbb Egin{gather*}
{\muathbb I}g\|{\muathbb I}g[ D\,e_{[-2^p,2^p]}(D),\rhoho(f){\muathbb I}g]{\muathbb I}g\|
={\muathbb I}g\||D|\,e_{[0,2^p]}(|D|)\, (\rhoho(f)-F\rhoho(f)F){\muathbb I}g\|
\!=\!\!\varsigmagma} \renewcommand{\S}{\Sigmaup_{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 2^{-p}}\!\!\!\varphirac{|f(e^+)-f(e^-)|}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)}.
\epsilonnd{gather*}
As a consequence,
\muathbb Egin{gather*}
\varsigmagma} \renewcommand{\S}{\Sigmaup_{p\in{\muathbb Z}}{\muathbb I}g\|{\muathbb I}g[ D\,e_{[-2^p,2^p]}(D),\rhoho(f){\muathbb I}g]{\muathbb I}g\|
=\varsigmagma} \renewcommand{\S}{\Sigmaup_{e\in E_\infty}\varphirac{|f(e^+)-f(e^-)|}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)}.
\epsilonnd{gather*}
Recall now that any edge $e$ of length $2^{n+1}$ is the union of two adjacent edges $e_1$ and $e_2$ of~length~$2^n$ such that $e_1^+=e_2^-$, therefore
\muathbb Egin{gather*}
\varphirac{|f(e^+)-f(e^-)|}{2^{n+1}}
\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq\varphirac12{\muathbb I}gg(\varphirac{|f(e_1^+)-f(e_1^-)|}{2^{n}}+\varphirac{|f(e_2^+)-f(e_2^-)|}{2^{n}}{\muathbb I}gg)
\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq \varsigmagma} \renewcommand{\S}{\Sigmaup_{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^n}\varphirac{|f(e^+)-f(e^-)|}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)}.
\epsilonnd{gather*}
Iterating, we get
\muathbb Egin{gather*}
\varsigmagma} \renewcommand{\S}{\Sigmaup_{e\in E_\infty}\varphirac{|f(e^+)-f(e^-)|}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)}
=\varsigmagma} \renewcommand{\S}{\Sigmaup_{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq 2^{n}}\varphirac{|f(e^+)-f(e^-)|}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)}.
\epsilonnd{gather*}
Since $f\in{\muathcal A}_n$,
\muathbb Egin{gather*}
\varsigmagma} \renewcommand{\S}{\Sigmaup_{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq 2^{n}}\varphirac{|f(e^+)-f(e^-)|}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)}
=\varsigmagma} \renewcommand{\S}{\Sigmaup_{e\in K_n}\varphirac{|f(e^+)-f(e^-)|}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)}
=\|[ D_n,\rhoho(f|_{K_n})]\|.
\tauag*{\qed}
\epsilonnd{gather*}
\rhoenewcommand{\qed}{}
\epsilonnd{proof}
In the following Theorem we identify ${\muathcal B}_\infty$ with $\pii_\tau({\muathcal B}_\infty)$, the trace $\tau$ on $\pii_\tau({\muathcal B}_\infty)$ with its exten\-sion to $\pii_\tau({\muathcal B}_\infty)''$, and $D_n$ and $D$ as unbounded operators affiliated with $({\muathcal B}_\infty,\tau)$ with~$\pii_\tau(D_n)$ and $\pii_\tau(D)$ as unbounded operators affiliated with $(\pii_\tau({\muathcal B}_\infty)'',\tau)$.
\muathbb Egin{thm}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{SFtriple}
The triple $({\muathcal L},\pii_\tau(B_\infty)'',D)$ on the unital C$^*$-algebra ${\muathcal A}_\infty$ is an odd semifinite spectral triple, where ${\muathcal L}={\muathcal U}p_n\{f\in{\muathcal A}_n, f\tauext{\,Lipschitz}\}$.
The spectral triple has metric dimension $d=\varphirac{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3}{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog2}$, the functional
\muathbb Egin{gather}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{int-trace}
\oint f=\tau_\omega} \def\O{\Omega {\muathbb I}g(\rhoho(f){\muathbb I}g(I+D^2{\muathbb I}g)^{-d/2}{\muathbb I}g),
\epsilonnd{gather}
is a finite trace on ${\muathcal A}_\infty$ where $\tau_\omega} \def\O{\Omega$ is the logarithmic Dixmier trace associated with $\tau$, and
\muathbb Egin{gather}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{integral}
\oint f=\varphirac6{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3}\varphirac{\int_{K_n} f\,{\rhom d}\vol}{\vol(K_n)},\qquad f\in{\muathcal A}_n,
\epsilonnd{gather}
where $\vol$ is the Hausdorff measure of dimension $d$ normalized as above. As a consequence, $\oint f$ is a Bohr--F{\o}lner mean on the solenoid:
\muathbb Egin{gather*}
\oint f=\varphirac6{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3}\,\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_{n\in\muathbb N}\varphirac{\int_{K_n} f\,{\rhom d}\vol}{\vol(K_n)},\qquad
f\in{\muathcal A}_\infty.
\epsilonnd{gather*}
The Connes distance
\muathbb Egin{gather*}
d(\varphi,\pisi)=\varsigmagma} \renewcommand{\S}{\Sigmaup\{|\varphi(f)-\pisi(f)|{\muathcal O}lon f\in{\muathcal L},\, \| [ D,\rhoho(f) ] \| = 1 \},\qquad
\varphi,\pisi\in{\muathcal S}({\muathcal A}_\infty)
\epsilonnd{gather*}
between states on ${\muathcal A}_\infty$ verifies
\muathbb Egin{gather}\lambda} \def{\mathcal L}a{{\mathcal L}ambdaabel{distance}
d(\delta} \def\D{\Delta_x,\delta} \def\D{\Delta_y)=d_{\rhom geo}(x,y),\qquad x,y\in K_\infty,
\epsilonnd{gather}
where $d_{\rhom geo}$ is the geodesic distance on $K_\infty$.
\epsilonnd{thm}
\muathbb Egin{proof}
The properties of a semifinite spectral triple follow by the properties proved above, in particular property $(1)$ of Definition~\rhoef{def:SFtriple} follows by Propositions~\rhoef{aff-prop}$(a)$ and~\rhoef{commutator}, while pro\-perty $(2)$ follows by Proposition~\rhoef{OnDinfty}$(b)$.
The functional in equality~\epsilonqref{int-trace} is a finite trace by Proposition~\rhoef{OnDinfty}$(c)$. Equations~\epsilonqref{integral} and~\epsilonqref{distance} only remain to be proved. We observe that $(I+D^2)^{-d/2}-|D_n|^{-d}$ have finite trace. Indeed
\muathbb Egin{gather*}
{\muathbb I}g((I+D^2)^{-d/2}-|D_n|^{-d}{\muathbb I}g)e=
\muathbb Egin{cases}
{\muathbb I}g(1+4^{-k}{\muathbb I}g)^{-d/2}e, & \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^k,\quad k>n,
\\
{\muathbb I}g({\muathbb I}g(1+4^{-k}{\muathbb I}g)^{-d/2}-2^{dk}{\muathbb I}g)e, & \lambda} \def{\mathcal L}a{{\mathcal L}ambdaength(e)=2^k,\quad k>n k\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n,
\epsilonnd{cases}
\epsilonnd{gather*}
hence, makig use of a formula in Theorem~\rhoef{OnDinfty}$(b)$, we get
\muathbb Egin{align*}
{\muathbb I}g|\tau{\muathbb I}g({\muathbb I}g(I+D^2{\muathbb I}g)^{-d/2}-|D_n|^{-d}{\muathbb I}g){\muathbb I}g|
&\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq\varsigmagma} \renewcommand{\S}{\Sigmaum_{k>n}(1+4^{-k})^{-d/2}\tau{\muathbb I}g(P^k{\muathbb I}g)+\varsigmagma} \renewcommand{\S}{\Sigmaum_{k\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n}{\muathbb I}g|{\muathbb I}g(1+4^{-k}{\muathbb I}g)^{-d/2}-3^k{\muathbb I}g|\tau{\muathbb I}g(P^k{\muathbb I}g)\\
&\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq6{\muathbb I}g(1+4^{-(n+1)}{\muathbb I}g)^{-d/2}\varsigmagma} \renewcommand{\S}{\Sigmaum_{k>n}3^{-k}
+6\varsigmagma} \renewcommand{\S}{\Sigmaum_{k\lambda} \def{\mathcal L}a{{\mathcal L}ambdaeq n}{\muathbb I}g|{\muathbb I}g(1+4^{k}{\muathbb I}g)^{-d/2}-1{\muathbb I}g|
\epsilonnd{align*}
and both series are convergent. Since the Dixmier trace vanishes on trace class operators, this implies that
\muathbb Egin{gather*}
\tau_\omega} \def\O{\Omega (\rhoho(f){\muathbb I}g(I+D^2{\muathbb I}g)^{-d/2})=\tau_\omega} \def\O{\Omega {\muathbb I}g(\rhoho(f)|D_n|^{-d}{\muathbb I}g)
=\varphirac1d \Res_{s=d}\tau{\muathbb I}g(\rhoho(f)|D_n|^{-s}{\muathbb I}g),
\epsilonnd{gather*}
therefore, if $f\in{\muathcal A}_n$,
\muathbb Egin{gather*}
\oint f
=\varphirac1d \Res_{s=d}\tau{\muathbb I}g(\rhoho(f)|D_n|^{-s}{\muathbb I}g)
=\varphirac1d \Res_{s=d}\varphirac{\taur(\rhoho(f_{K_n})|D_n|^{-s})}{\vol(K_n)}
=\varphirac{\taur_\omega} \def\O{\Omega {\muathbb I}g(\rhoho(f)|D_n|^{-d}{\muathbb I}g)}{\vol(K_n)}.
\epsilonnd{gather*}
Now, by formula~\epsilonqref{fractalNCint} applied to $K_n$,
$\taur_\omega} \def\O{\Omegaega(\rhoho(f)|D_n|^{-d}) = \varphirac{6{\muathcal D}ot \epsilonll(e)^d}{ \lambda} \def{\mathcal L}a{{\mathcal L}ambdaog 3} \int_{K_n} f\, {\rhom d}H_d$, where $H_d$ is the Hausdorff measure normalized on $K_n$, hence $H_d=(\mu_d(K_d))^{-1}\mu_d=3^{-n}\mu_d$, and $e\in E_0(K_n)$, hence $\epsilonll(e)^d=3^n$. Therefore
$\taur_\omega} \def\O{\Omegaega{\muathbb I}g(\rhoho(f)|D_n|^{-d}{\muathbb I}g) = \varphirac6{\lambda} \def{\mathcal L}a{{\mathcal L}ambdaog3} \int_{K_n} f\, {\rhom d}\mu_d$ and formula~\epsilonqref{integral} follows.
As~for equation~\epsilonqref{distance}, given $x,y\in K_\infty$ let $n$ such that $x,y\in K_n$, $m\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq n$. Then, combining Propositions~\rhoef{aff-prop}$(a)$ and~\rhoef{commutator}, we have, for $f\in{\muathcal A}_m$,
\muathbb Egin{gather*}
\|[ D, \rhoho(f)]\|=\|[ D_m,\rhoho(f|_{K_m})]\|,
\epsilonnd{gather*}
and, by Theorem 5.2 and Corollary 5.14 in~\chiite{GuIs16},
\muathbb Egin{gather*}
\varsigmagma} \renewcommand{\S}{\Sigmaup\{|f(x)-f(y)|{\muathcal O}lon f\in{\muathcal A}_m,\|[D_m,\rhoho(f|_{K_n})]\|=1\}=d_{\rhom geo}(x,y),\qquad m\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq n.
\epsilonnd{gather*}
Therefore
\muathbb Egin{align*}
d(\delta} \def\D{\Delta_x,\delta} \def\D{\Delta_y)
&=\varsigmagma} \renewcommand{\S}{\Sigmaup\{|f(x)-f(y)|{\muathcal O}lon f\in{\muathcal L},\, \|[ D,\rhoho(f)]\|=1\}
\\
&=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_m\ \varsigmagma} \renewcommand{\S}{\Sigmaup\{|f(x)-f(y)|{\muathcal O}lon f\in{\muathcal A}_m,\, \|[ D,\rhoho(f)]\|=1\}
\\
&=\lambda} \def{\mathcal L}a{{\mathcal L}ambdaim_m\ \varsigmagma} \renewcommand{\S}{\Sigmaup\{|f(x)-f(y)|{\muathcal O}lon f\in{\muathcal A}_m,\, \|[D_m,\rhoho(f|_{K_n})]\|=1\}=d_{\rhom geo}(x,y).
\tauag*{\qed}
\epsilonnd{align*}
\rhoenewcommand{\qed}{}
\epsilonnd{proof}
\muathbb Egin{rem}
The last statement in Theorem~\rhoef{SFtriple} shows that the triple $({\muathcal L},{\muathcal A}m,D_\infty)$ recovers two incompatible aspects of the space ${\muathcal A}_\infty$: on the one hand the compact space given by the spectrum of the unital algebra ${\muathcal A}_\infty$, with the corresponding finite integral, and on the other hand the open fractafold $K_\infty$ with its geodesic distance. In particular, the functional on ${\muathcal L}$ given by~$L(f)=\|[D,\rhoho(f)]\|$ is not a Lip-norm in the sense of Rieffel~\chiite{Rieffel} because it does not give rise to~the weak$^*$ topology on ${\muathcal S}({\muathcal A}_\infty)$. In fact, such seminorm produces a distance which is unbounded on points, therefore the induced topology cannot be compact.
\epsilonnd{rem}
\varsigmagma} \renewcommand{\S}{\Sigmaubsection*{Acknowledgements}
We thank the referees of this paper for many interesting observations and suggestions.
V.A.~is supported by the Swiss National Science Foundation.
D.G.~and T.I.~are supported in part by~GNAMPA-INdAM and the ERC Advanced Grant 669240 QUEST ``Quantum Algebraic Structures and Models'', and acknowledge the MIUR Excellence Department Project awarded to the Department of Mathematics, University of Rome Tor Vergata, CUP E83C18000100006.
\pidfbookmark[1]{References}{ref}
\muathbb Egin{thebibliography}{99}
\varphiootnotesize\itemsep=0pt
{\muathbb I}bitem{AGI01}
Aiello V., Guido D., Isola T., Spectral triples for noncommutative solenoidal
spaces from self-coverings, \href{https://doi.org/10.1016/j.jmaa.2016.11.066}{\tauextit{J.~Math. Anal. Appl.}} \tauextbf{448}
(2017), 1378--1412, \href{https://arxiv.org/abs/1604.08619}{arXiv:1604.08619}.
{\muathbb I}bitem{AGI02}
Aiello V., Guido D., Isola T., Spectral triples on irreversible $C^*$-dynamical
systems, \href{https://arxiv.org/abs/2102.05392}{arXiv:2102.05392}.
{\muathbb I}bitem{Arau}
Arauza~Rivera A., Spectral triples for the variants of the {S}ierpi\'nski
gasket, \href{https://doi.org/10.4171/JFG/75}{\tauextit{J.~Fractal Geom.}} \tauextbf{6} (2019), 205--246,
\href{https://arxiv.org/abs/1709.00755}{arXiv:1709.00755}.
{\muathbb I}bitem{Atiyah}
Atiyah M.F., Elliptic operators, discrete groups and von {N}eumann algebras,
\tauextit{Ast\'erisque} \tauextbf{32--33} (1976), 43--72.
{\muathbb I}bitem{BaPe}
Barlow M.T., Perkins E.A., Brownian motion on the {S}ierpi\'nski gasket,
\href{https://doi.org/10.1007/BF00318785}{\tauextit{Probab. Theory Related Fields}} \tauextbf{79} (1988), 543--623.
{\muathbb I}bitem{BMR}
Bellissard J.V., Marcolli M., Reihani K., Dynamical systems on spectral metric
spaces, \href{https://arxiv.org/abs/1008.4617}{arXiv:1008.4617}.
{\muathbb I}bitem{BerPla}
Berestovskii V., Plaut C., Uniform universal covers of uniform spaces,
\href{https://doi.org/10.1016/j.topol.2006.12.012}{\tauextit{Topology Appl.}} \tauextbf{154} (2007), 1748--1777,
\href{https://arxiv.org/abs/math.AG/0607353}{arXiv:math.AG/0607353}.
{\muathbb I}bitem{CaPhi1}
Carey A., Phillips J., Unbounded {F}redholm modules and spectral flow,
\href{https://doi.org/10.4153/CJM-1998-038-x}{\tauextit{Canad.~J. Math.}} \tauextbf{50} (1998), 673--718.
{\muathbb I}bitem{CPS}
Carey A., Phillips J., Sukochev F., Spectral flow and {D}ixmier traces,
\href{https://doi.org/10.1016/S0001-8708(02)00015-4}{\tauextit{Adv. Math.}} \tauextbf{173} (2003), 68--113, \href{https://arxiv.org/abs/math.OA/0205076}{arXiv:math.OA/0205076}.
{\muathbb I}bitem{ChIv07}
Christensen E., Ivan C., Sums of two-dimensional spectral triples,
\href{https://doi.org/10.7146/math.scand.a-15015}{\tauextit{Math. Scand.}} \tauextbf{100} (2007), 35--60, \href{https://arxiv.org/abs/math.OA/0601024}{arXiv:math.OA/0601024}.
{\muathbb I}bitem{CIL}
Christensen E., Ivan C., Lapidus M.L., Dirac operators and spectral triples for
some fractal sets built on curves, \href{https://doi.org/10.1016/j.aim.2007.06.009}{\tauextit{Adv. Math.}} \tauextbf{217} (2008),
42--78, \href{https://arxiv.org/abs/math.MG/0610222}{arXiv:math.MG/0610222}.
{\muathbb I}bitem{CIS}
Christensen E., Ivan C., Schrohe E., Spectral triples and the geometry of
fractals, \href{https://doi.org/10.4171/JNCG/91}{\tauextit{J.~Noncommut. Geom.}} \tauextbf{6} (2012), 249--274,
\href{https://arxiv.org/abs/1002.3081}{arXiv:1002.3081}.
{\muathbb I}bitem{CGIs01}
Cipriani F., Guido D., Isola T., A {$C^*$}-algebra of geometric operators on
self-similar {CW}-complexes. {N}ovikov--{S}hubin and {$L^2$}-{B}etti numbers,
\href{https://doi.org/10.1016/j.jfa.2008.10.013}{\tauextit{J.~Funct. Anal.}} \tauextbf{256} (2009), 603--634,
\href{https://arxiv.org/abs/math.OA/0607603}{arXiv:math.OA/0607603}.
{\muathbb I}bitem{CGIS13}
Cipriani F., Guido D., Isola T., Sauvageot J.-L., Integrals and potentials of
differential 1-forms on the {S}ierpinski gasket, \href{https://doi.org/10.1016/j.aim.2013.02.014}{\tauextit{Adv. Math.}}
\tauextbf{239} (2013), 128--163, \href{https://arxiv.org/abs/1105.1995}{arXiv:1105.1995}.
{\muathbb I}bitem{CGIS02}
Cipriani F., Guido D., Isola T., Sauvageot J.-L., Spectral triples for the
{S}ierpinski gasket, \href{https://doi.org/10.1016/j.jfa.2014.02.013}{\tauextit{J.~Funct. Anal.}} \tauextbf{266} (2014),
4809--4869, \href{https://arxiv.org/abs/1112.6401}{arXiv:1112.6401}.
{\muathbb I}bitem{ConnesBook}
Connes A., Noncommutative geometry, Academic Press, Inc., San Diego, CA, 1994.
{\muathbb I}bitem{Cuntz}
Cuntz J., The internal structure of simple {$C^{\alphast} $}-algebras, in
Operator Algebras and Applications, {P}art~{I} ({K}ingston, {O}nt., 1980),
\tauextit{Proc. Sympos. Pure Math.}, Vol.~38, Amer. Math. Soc., Providence,
R.I., 1982, 85--115.
{\muathbb I}bitem{DGMW}
Deeley R.J., Goffeng M., Mesland B., Whittaker M.F., Wieler solenoids,
{C}untz--{P}imsner algebras and {$K$}-theory, \href{https://doi.org/10.1017/etds.2017.10}{\tauextit{Ergodic Theory Dynam.
Systems}} \tauextbf{38} (2018), 2942--2988, \href{https://arxiv.org/abs/1606.05449}{arXiv:1606.05449}.
{\muathbb I}bitem{Dix}
Dixmier J., Existence de traces non normales, \tauextit{C.~R.~Acad. Sci. Paris
S\'er.~A-B} \tauextbf{262} (1966), A1107--A1108.
{\muathbb I}bitem{DFR}
Doplicher S., Fredenhagen K., Roberts J.E., The quantum structure of spacetime
at the {P}lanck scale and quantum fields, \href{https://doi.org/10.1007/BF02104515}{\tauextit{Comm. Math. Phys.}}
\tauextbf{172} (1995), 187--220, \href{https://arxiv.org/abs/hep-th/0303037}{arXiv:hep-th/0303037}.
{\muathbb I}bitem{Fack}
Fack T., Sur la notion de valeur caract\'eristique, \tauextit{J.~Operator Theory}
\tauextbf{7} (1982), 307--333.
{\muathbb I}bitem{FK}
Fack T., Kosaki H., Generalized {$s$}-numbers of {$\tauau$}-measurable operators,
\href{https://doi.org/10.2140/pjm.1986.123.269}{\tauextit{Pacific~J. Math.}} \tauextbf{123} (1986), 269--300.
{\muathbb I}bitem{GBVF}
Gracia-Bond\'{\i}a J.M., V\'arilly J.C., Figueroa H., Elements of
noncommutative geometry, \tauextit{Birkh\"auser Advanced Texts: Basler Lehrb\"ucher},
\href{https://doi.org/10.1007/978-1-4612-0005-5}{Birkh\"auser Boston, Inc.}, Boston, MA, 2001.
{\muathbb I}bitem{GuIs1}
Guido D., Isola T., Singular traces on semifinite von {N}eumann algebras,
\href{https://doi.org/10.1006/jfan.1995.1153}{\tauextit{J.~Funct. Anal.}} \tauextbf{134} (1995), 451--485.
{\muathbb I}bitem{GuIs4}
Guido D., Isola T., Noncommutative {R}iemann integration and
{N}ovikov--{S}hubin invariants for open manifolds, \href{https://doi.org/10.1006/jfan.2000.3609}{\tauextit{J.~Funct. Anal.}}
\tauextbf{176} (2000), 115--152, \href{https://arxiv.org/abs/math.OA/9802015}{arXiv:math.OA/9802015}.
{\muathbb I}bitem{GuIs7}
Guido D., Isola T., A semicontinuous trace for almost local operators on an
open manifold, \href{https://doi.org/10.1142/S0129167X01001106}{\tauextit{Internat.~J. Math.}} \tauextbf{12} (2001), 1087--1102,
\href{https://arxiv.org/abs/math.DG/0110294}{arXiv:math.DG/0110294}.
{\muathbb I}bitem{GuIs9}
Guido D., Isola T., Dimensions and singular traces for spectral triples, with
applications to fractals, \href{https://doi.org/10.1016/S0022-1236(03)00230-1}{\tauextit{J.~Funct. Anal.}} \tauextbf{203} (2003),
362--400, \href{https://arxiv.org/abs/math.OA/0202108}{arXiv:math.OA/0202108}.
{\muathbb I}bitem{GuIs10}
Guido D., Isola T., Dimensions and spectral triples for fractals in {${\muathbb
R}^N$}, in Advances in Operator Algebras and Mathematical Physics,
\tauextit{Theta Ser. Adv. Math.}, Vol.~5, Theta, Bucharest, 2005, 89--108,
\href{https://arxiv.org/abs/math.OA/0404295}{arXiv:math.OA/0404295}.
{\muathbb I}bitem{GuIs16}
Guido D., Isola T., Spectral triples for nested fractals, \href{https://doi.org/10.4171/JNCG/11-4-7}{\tauextit{J.~Noncommut.
Geom.}} \tauextbf{11} (2017), 1413--1436, \href{https://arxiv.org/abs/1601.08208}{arXiv:1601.08208}.
{\muathbb I}bitem{Skalski}
Hawkins A., Skalski A., White S., Zacharias J., On spectral triples on crossed
products arising from equicontinuous actions, \href{https://doi.org/10.7146/math.scand.a-15572}{\tauextit{Math. Scand.}}
\tauextbf{113} (2013), 262--291, \href{https://arxiv.org/abs/1103.6199}{arXiv:1103.6199}.
{\muathbb I}bitem{HiRo}
Higson N., Roe J., Analytic {$K$}-homology, \tauextit{Oxford Mathematical Monographs},
Oxford University Press, Oxford, 2000.
{\muathbb I}bitem{Kiga2}
Kigami J., Analysis on fractals, \tauextit{Cambridge Tracts in Mathematics}, Vol.~143, \href{https://doi.org/10.1017/CBO9780511470943}{Cambridge University Press}, Cambridge, 2001.
{\muathbb I}bitem{KiLa}
Kigami J., Lapidus M.L., Self-similarity of volume measures for {L}aplacians on
p.c.f.\ self-similar fractals, \href{https://doi.org/10.1007/s002200000326}{\tauextit{Comm. Math. Phys.}} \tauextbf{217}
(2001), 165--180.
{\muathbb I}bitem{LaSa}
Lapidus M., Sarhad J., Dirac operators and geodesic metric on the harmonic
{S}ierpinski gasket and other fractal sets, \href{https://doi.org/10.4171/JNCG/174}{\tauextit{J.~Noncommut. Geom.}}
\tauextbf{8} (2014), 947--985, \href{https://arxiv.org/abs/1212.0878}{arXiv:1212.0878}.
{\muathbb I}bitem{La94}
Lapidus M.L., Analysis on fractals, {L}aplacians on self-similar sets,
noncommutative geometry and spectral dimensions, \href{https://doi.org/10.12775/TMNA.1994.025}{\tauextit{Topol. Methods
Nonlinear Anal.}} \tauextbf{4} (1994), 137--195.
{\muathbb I}bitem{La97}
Lapidus M.L., Towards a noncommutative fractal geometry? {L}aplacians and
volume measures on fractals, in Harmonic Analysis and Nonlinear Differential
Equations ({R}iverside, {CA}, 1995), \tauextit{Contemp. Math.}, Vol.~208, \href{https://doi.org/10.1090/conm/208/02742}{Amer.
Math. Soc.}, Providence, RI, 1997, 211--252.
{\muathbb I}bitem{LaPa}
Latr\'emoli\`ere F., Packer J., Noncommutative solenoids and the
{G}romov--{H}ausdorff propinquity, \href{https://doi.org/10.1090/proc/13229}{\tauextit{Proc. Amer. Math. Soc.}}
\tauextbf{145} (2017), 2043--2057, \href{https://arxiv.org/abs/1601.02707}{arXiv:1601.02707}.
{\muathbb I}bitem{McCord}
McCord M.C., Inverse limit sequences with covering maps, \href{https://doi.org/10.2307/1993997}{\tauextit{Trans. Amer.
Math. Soc.}} \tauextbf{114} (1965), 197--209.
{\muathbb I}bitem{Nekra}
Nekrashevych V., Self-similar groups, \tauextit{Mathematical Surveys and
Monographs}, Vol.~117, \href{https://doi.org/10.1090/surv/117}{Amer. Math. Soc.}, Providence, RI, 2005.
{\muathbb I}bitem{Paterson}
Paterson A.L.T., Contractive spectral triples for crossed products,
\href{https://doi.org/10.7146/math.scand.a-17112}{\tauextit{Math. Scand.}} \tauextbf{114} (2014), 275--298, \href{https://arxiv.org/abs/1204.4404}{arXiv:1204.4404}.
{\muathbb I}bitem{Rieffel}
Rieffel M.A., Metrics on states from actions of compact groups, \tauextit{Doc.
Math.} \tauextbf{3} (1998), 215--229, \href{https://arxiv.org/abs/math.OA/9807084}{arXiv:math.OA/9807084}.
{\muathbb I}bitem{Roe}
Roe J., An index theorem on open manifolds.~{I}, \href{https://doi.org/10.4310/jdg/1214441652}{\tauextit{J.~Differential Geom.}}
\tauextbf{27} (1988), 87--113.
{\muathbb I}bitem{Roe-2}
Roe J., An index theorem on open manifolds.~{II}, \href{https://doi.org/10.4310/jdg/1214441653}{\tauextit{J.~Differential
Geom.}} \tauextbf{27} (1988), 115--136.
{\muathbb I}bitem{Roe96}
Roe J., Index theory, coarse geometry, and topology of manifolds, \tauextit{CBMS
Regional Conference Series in Mathematics}, Vol.~90, \href{https://doi.org/10.1090/cbms/090}{Amer. Math. Soc.},
Providence, RI, 1996.
{\muathbb I}bitem{RoeLN}
Roe J., Lectures on coarse geometry, \tauextit{University Lecture Series},
Vol.~31, \href{https://doi.org/10.1090/ulect/031}{Amer. Math. Soc.}, Providence, RI, 2003.
{\muathbb I}bitem{RuStri}
Ruan H.-J., Strichartz R.S., Covering maps and periodic functions on higher
dimensional {S}ierpinski gaskets, \href{https://doi.org/10.4153/CJM-2009-054-5}{\tauextit{Canad.~J. Math.}} \tauextbf{61}
(2009), 1151--1181.
{\muathbb I}bitem{Sierpinski}
Sierpi\'nski R.S., Sur une courbe dont tout point est un point de ramification,
\tauextit{C.~R.~Acad. Sci. Paris} \tauextbf{160} (1915), 302--305.
{\muathbb I}bitem{Stri1996}
Strichartz R.S., Fractals in the large, \href{https://doi.org/10.4153/CJM-1998-036-5}{\tauextit{Canad.~J. Math.}} \tauextbf{50}
(1998), 638--657.
{\muathbb I}bitem{Stri2003}
Strichartz R.S., Fractafolds based on the {S}ierpi\'nski gasket and their
spectra, \href{https://doi.org/10.1090/S0002-9947-03-03171-4}{\tauextit{Trans. Amer. Math. Soc.}} \tauextbf{355} (2003), 4019--4043.
{\muathbb I}bitem{Stri2009}
Strichartz R.S., Periodic and almost periodic functions on infinite
{S}ierpinski gaskets, \href{https://doi.org/10.4153/CJM-2009-055-9}{\tauextit{Canad.~J. Math.}} \tauextbf{61} (2009),
1182--1200.
{\muathbb I}bitem{Tep}
Teplyaev A., Spectral analysis on infinite {S}ierpi\'nski gaskets,
\href{https://doi.org/10.1006/jfan.1998.3297}{\tauextit{J.~Funct. Anal.}} \tauextbf{159} (1998), 537--567.
{\muathbb I}bitem{WiYu}
Willett R., Yu G., Higher index theory, \tauextit{Cambridge Studies in Advanced
Mathematics}, Vol.~189, \href{https://doi.org/10.1017/9781108867351}{Cambridge University Press}, Cambridge, 2020.
\epsilonnd{thebibliography}{\mathcal L}astPageEnding
\epsilonnd{document}
|
\begin{document}
\title{Quantum teleportation from light beams to vibrational states \\
of a macroscopic diamond}
\author{P.-Y. Hou$^{1}$, Y.-Y. Huang$^{1}$, X.-X. Yuan$^{1}$, X.-Y. Chang$
^{1}$, C. Zu$^{1}$, L. He$^{1}$, L.-M. Duan}
\affiliation{Center for Quantum Information, IIIS, Tsinghua University, Beijing 100084,
PR China}
\affiliation{Department of Physics, University of Michigan, Ann Arbor, Michigan 48109, USA}
\begin{abstract}
With the recent development of optomechanics, the vibration in solids,
involving collective motion of trillions of atoms, gradually enters into the
realm of quantum control. Built on the recent remarkable progress in optical
control of motional states of diamonds, here we report an experimental
demonstration of quantum teleportation from light beams to vibrational
states of a macroscopic diamond under ambient conditions. Through quantum
process tomography, we demonstrate average teleportation fidelity $(90.6\pm
1.0)\%$, clearly exceeding the classical limit of $2/3$. The experiment
pushes the target of quantum teleportation to the biggest object so far,
with interesting implications for optomechanical quantum control and quantum
information science.
\end{abstract}
\maketitle
\section{Introduction}
\textbf{\ }Quantum teleportation has found important applications for
realization of various quantum technologies \cite{1,2,3,4}. Teleportation of
quantum states has been demonstrated between light beams \cite{6,7,8,8b},
trapped atoms \cite{9,10,11,12}, superconducting qubits \cite{13}, defect
spins in solids \cite{14}, and from light beams to atoms \cite{15,16} or
solid state spin qubits \cite{17,18}. It is of both fundamental interest and
practical importance to push quantum teleportation towards more macroscopic
objects.\textbf{\ }Observing quantum phenomenon in macroscopic objects is a
big challenge as their strong coupling to the environment causes fast
decoherence which quickly pushes them to the classical world. For example,
quantum coherence is hard to survive in mechanical vibration of macroscopic
solids, which involves collective motion of a large number of strongly
interacting atoms. Despite this challenge, achieving quantum control for the
optomechanical systems becomes a recent focus of interest with remarkable
progress \textbf{\textbf{\cite{19,20,21,22,22a1,22a2,23,24,24a,25,26,27}}}.
This is driven in part by the fundamental interest and in part by the
potential applications of these systems for quantum signal transduction
\textbf{\textbf{\cite{23,24,24a}}}, sensing \textbf{\textbf{\cite{19}}}, and
quantum information processing \textbf{\textbf{\cite{19,20,21}}}. There are
typically two routes to achieve quantum control for the optomechanical
systems: one needs to either identify some isolated degrees of freedom in
mechanical vibrations and cool them to very low temperature to minimize
their environmental coupling \textbf{\textbf{\cite{19,25,26,27}}}, or use
the ultrafast laser technology to fast process and detect quantum coherence
in such systems \textbf{\textbf{\cite{20,21,22,22a1,22a2}}}. A remarkable
example for the latter approach is provided by the optomechanical control in
macroscopic diamond samples \textbf{\textbf{\cite{20,21}}}, where the
motions of two separated diamonds have been cast into a quantum entangled
state \textbf{\textbf{\cite{20}}}.
In this paper, we report an experimental demonstration of quantum
teleportation from light beams to the vibrational states of a macroscopic
diamond sample of $3\times 3\times 0.3$ mm$^{3}$ in size under ambient
conditions. The vibration states are carried by two optical phonon modes,
representing collective oscillation of over $10^{16}$ carbon atoms. To
facilitate convenient qubit operations, we use the dual-rail representation
of qubits instead of the single-rail encoding used in previous experiments
\textbf{\textbf{\cite{20,21,22,22a1}}} and generate entanglement between the
paths of a photon and different oscillation patterns of the diamond
represented by two phononic modes. Using quantum state tomography, we
demonstrate entanglement fidelity of $(81.0\pm 1.8)\%$ with the raw data and
of $(89.7\pm 1.2)\%$ after the background noise subtraction. Using this
entanglement, we prepare arbitrary polarization states for the photon and
teleport these polarization states to the phonon modes with the Bell
measurements on the polarization and the path qubits carried by the same
photon. The teleportation is verified by quantum process tomography, and we
achieve a high average teleportation fidelity, about $(90.6\pm 1.0)\%$ (or $
(82.9\pm 0.8)\%$) after (or before) subtraction of the background noise. To
verify the phonon's state before its fast decay, our implementation of
teleportation adopted the technique of reversed time ordering introduced in
Ref. \textbf{\textbf{\cite{20}}} where the phonon's state is read out before
the teleportation is completed. Similar to the pioneering teleportation
experiment of photons \cite{6}, our implementation of teleportation is
conditional as the Bell measurements are not deterministic and require
postselecting of successful measurement outcomes.
\section{Results}
\subsection{Photon-to-phonon teleportation scheme}
We illustrate our entanglement generation and quantum teleportation scheme
in Fig. 1, using a type IIa single-crystal synthetic diamond sample cut
along the $100$ face from the Element Six company. Due to the strong
interaction of atoms in the diamond, the optical phonon mode, which
represents relative oscillation of the two sublattices in the stiff diamond
lattice (see Fig. 1a), has a very high excitation frequency about $40$ $THz$
near the momentum zero point in the Brillouin zone. The corresponding energy
scale for this excitation is significantly higher than the room temperature
thermal energy (about $6$ $THz$), and thus the optical phonon mode naturally
stays at the vacuum state under ambient conditions, which simplifies its
quantum control \textbf{\textbf{\cite{20,21}}}. The coherence life time of
the optical phonon mode is about $7$ ps at room temperature, which is short
but accessible with the ultrafast laser technology for which the operational
speed can be up to about $10$ THz \textbf{\textbf{\cite{20,21}}}.
We excite the optical phonon modes through ultrafast laser pulses of
duration around $150$ fs from the Ti-Sapphire laser, with the carrier
wavelength at $706.5$ nm. The diamond has a large bandgap of $5.5$ ev, so
the laser pulses are far detuned from the conduction band with a large gap
about $900$ THz. Each laser pulse generates, with a small probability $p_{
\text{s}}$, an excitation in the optical phonon mode and a Stokes photon of
wavelength $780$ nm in the forward direction (see Fig. 1a). The relevant
output state has the form
\begin{equation}
|\Psi \rangle =\left[ 1+\sqrt{p_{\text{s}}}b_{\text{n}}^{\dag }a_{\text{t}
}^{\dag }+o\left( p_{s}\right) \right] |\text{vac}\rangle ,
\end{equation}
where $b_{\text{n}}^{\dag }$ and $a_{\text{t}}^{\dag }$ represent,
respectively, the creation operators for an optical phonon and a Stokes
photon, and $|$vac$\rangle $ denotes the common vacuum state for both the
photon and the phonon modes.
To generate entanglement, we split the laser pulse into two coherent paths
as shown in Fig. 1b, and the pulse in each path generates the corresponding
phonon-photon correlated state described by Eq. (1). When there is an output
photon, in one of the two paths, it is in the following maximally entangled
state with the phonon excitation
\begin{equation}
|\Psi _{\text{nt}}\rangle =\left( \left\vert U\right\rangle _{\text{n}
}\left\vert U\right\rangle _{\text{t}}+\left\vert L\right\rangle _{\text{n}
}\left\vert L\right\rangle _{\text{t}}\right) /\sqrt{2}.
\end{equation}
Here, $\left\vert U\right\rangle $ or $\left\vert L\right\rangle $
represents an excitation in the upper or lower path, and its subscript
denotes the nature of the excitation, "n" for a phonon and "t" for a photon.
We drop the vacuum term in Eq. (1) as it is eliminated if we detect a photon
emerging from one of the two paths. After entanglement generation, the
photon state can be directly measured through single-photon detectors. To
read out the phonon state, we apply another ultrafast laser pulse after a
controllable delay within the coherence time of the optical phonon mode and
convert the phononic state to the same photonic state in the forward
anti-Stokes mode at the wavelength of $645$ nm (see Fig. 1c). The state of
the anti-Stokes photon is then measured through single-photon detectors
together with linear optics devices. Note that the retrieval laser pulse
could have a carrier frequency $\omega _{\text{r}}$ different from that of
the pump laser. For instance, with $\omega _{\text{r}}$ near the telecom
band, our teleportation protocol would naturally realize a quantum frequency
transducer that transfers the photon's frequency to a desired band without
changing its quantum state. A quantum frequency transducer is widely
recognized as an important component for realization of long-distance
quantum networks \textbf{\textbf{\cite{23,24,24a}}}.
To realize teleportation, we need to prepare another qubit, whose state will
be teleported to the phonon modes in the diamond. Similar to the
teleportation experiments in Refs. \textbf{\textbf{\cite{7,16}}}, we use the
polarization state of the photon to represent the input qubit, which can be
independently prepared into an arbitrary state $c_{0}\left\vert
H\right\rangle _{\text{t}}+c_{1}\left\vert V\right\rangle _{\text{t}}$,
where $\left\vert H\right\rangle _{\text{t}}$ and $\left\vert V\right\rangle
_{\text{t}}$ denote the horizontal and the vertical polarization states and $
c_{0},c_{1}$ are arbitrary coefficients. The Bell measurements on the
polarization and the path qubits carried by the same photon can be
implemented through linear optics devices together with single-photon
detection (see Fig. 1d), and the teleported state to the phononic modes is
retrieved and detected through its conversion to the anti-Stokes photon.
Same as Ref. \textbf{\textbf{\cite{20}}}, the short life time of the
diamond's vibration modes requires us to retrieve and detect the phonon's
state before applying detection on the Stokes photon, thus the phonon's
state is measured before the teleportation protocol is completed. The
reversed time ordering in this demonstration of quantum teleportation makes
it unsuitable for application in quantum repeaters which requires a much
longer memory time, however, it does not affect application of our
teleportation experiment for realization of a quantum frequency transducer
or a new source of entangled photons as discussed above.
\subsection{Experimental realization of teleportation}
Our experimental setup is shown in Fig. 2. First, we verify entanglement
generated between the Stokes photon and the optical phonon modes in the
diamond. For this step, we remove the optical elements in the state
preparation box shown in Fig. 2 and set the angle of HWP3 to $0^{o}$.
Different from the scheme illustrated in Fig. 1, we insert semicircle HWPs
set at $0^{o}$ and $45^{o}$, respectively, at the upper and the lower paths
of the pump beam, so that both the Stokes photon and the anti-Stokes photon
after the retrieval pulse have orthogonal polarizations along the two output
paths, which can be combined together through the calcites C2 and C3. This
facilitates the entanglement measurement through detection in complementary
local bases by rotating the polarizers P2 and P3 and the wave plates HWP5
and QWP2. Due to the different incident directions of the pump pulses at the
upper and the lower paths, the corresponding phonon modes excited in the
diamond have different momenta, so they represent independent modes even if
they have partial spatial overlap. The phonon is converted to the
anti-Stokes photon by the retrieval pulse, so we measure the photon-phonon
state by detecting the coincidence counts between Stokes and anti-Stokes
photons in different bases. In Fig. 3a, we show the registered coincidence
counts as we rotate the angle of the polarizer P2. The oscillation of the
coincidence counts with a visibility of $(74.6\pm 3.6)\%$ is an indicator of
coherence of the underlying state. To verify entanglement of the
photon-phonon state, we use quantum state tomography to reconstruct the full
density matrix from the measured coincidence counts \textbf{\textbf{\cite{28}
}}, with the resulting matrix elements shown in Fig. 3b. From the
reconstructed density matrix $\rho _{\text{e}}$, we find its entanglement
fidelity, defined as the maximum overlap of $\rho _{\text{e}}$ with a
maximally entangled state, $F_{\text{e}}=(81.0\pm 1.8)\%$, significantly
higher than the criterion of $F_{\text{e}}=0.5$ for verification of
entanglement \textbf{\textbf{\cite{29}}}. The error bars are determined by
assuming a Poissonian distribution for the photon counts and propagated from
the raw data to the calculated quantities through exact numerical
simulation. The dominant noise in this system comes from the accidental
coincidence between the detected Stokes and the anti-Stokes photons \textbf{
\textbf{\cite{20,21}}}. To measure the contribution of this accidental
coincidence, we introduce an extra time delay of $13$ $ns$, the repetition
period of our pump pulses, to one of the detectors when we record the
coincidence. When we subtract the background noise due to this accidental
coincidence, the resulting matrix elements reconstructed from the quantum
state tomography are shown in Fig. 3c. We find the entanglement fidelity is
improved to $F_{\text{e}}=(89.7\pm 1.2)\%$ after subtraction of the
accidental coincidence.
To perform quantum teleportation using the photon-phonon entanglement, we
first transform the effective photon-phonon entangled state to the standard
form of Eq. (2) by the semicircle HWPs in the state preparation box of Fig.
2. The polarizer P1 and the waveplates HWP2 and QWP1 then prepare the
to-be-teleported photon polarization to arbitrary superposition states $
|\Phi _{\text{in}}\rangle =c_{0}\left\vert H\right\rangle _{\text{t}
}+c_{1}\left\vert V\right\rangle _{\text{t}}$. We perform Bell measurement
through the calcite C2, the HWP3, the polarizer P2, and the detector APD2.
For instance, with the HWP3 set at $0^{o}$ and the polarizer P2 set along
the direction $\left\vert H\right\rangle +\left\vert V\right\rangle $, a
photon count in the detector APD2 corresponds to a projection to the Bell
state $\left( \left\vert H\right\rangle _{\text{t}}\left\vert U\right\rangle
_{\text{t}}+\left\vert V\right\rangle _{\text{t}}\left\vert L\right\rangle _{
\text{t}}\right) /\sqrt{2}$ for the polarization and the path qubits of the
photon before the measurement box. By rotating the angles of HWP3 and P2, we
can also perform projection to any other Bell states.
The experimental result for teleportation is shown in Fig. 4. The
teleportation fidelity is defined as $F=\left\langle \Phi _{\text{in}
}\right\vert \rho _{\text{out}}|\Phi _{\text{in}}\rangle $, where $|\Phi _{
\text{in}}\rangle $ is the input state at Alice's side and $\rho _{\text{out}
}$ denotes the output density matrix at Bob's side reconstructed through
quantum state tomography measurements. In Fig. 4a, we show the teleportation
fidelity under six complementary bases states with $|\Phi _{\text{in}
}\rangle =\left\vert H\right\rangle _{\text{t}},\left\vert V\right\rangle _{
\text{t}},\left\vert \pm \right\rangle _{\text{t}}=\left( \left\vert
H\right\rangle _{\text{t}}\pm \left\vert V\right\rangle _{\text{t}}\right) /
\sqrt{2},$ $\left\vert L\right\rangle _{\text{t}}=\left( \left\vert
H\right\rangle _{\text{t}}\pm i\left\vert V\right\rangle _{\text{t}}\right) /
\sqrt{2},\left\vert R\right\rangle _{\text{t}}=\left( \left\vert
H\right\rangle _{\text{t}}-i\left\vert V\right\rangle _{\text{t}}\right) /
\sqrt{2}$ in cases with and without subtraction of the background noise. The
average fidelity over these six bases states is $(93.9\pm 0.8)\%$ (or $
(83.0\pm 0.8)\%$) with (or without) background noise subtraction. This
average fidelity is significantly higher than $2/3$, the boundary value for
the fidelity that separates quantum teleportation from classical operations.
For more complete characterization, we also perform quantum process
tomography (QPT) for the teleportation operation. In the ideal case,
teleportation should be characterized by an identity transformation, meaning
that Alice's input state is teleported perfectly to Bob's side. The
experimentally reconstructed process matrix elements are shown in Fig. 4b
(see Methods for explanation of QPT). The process fidelity is given by $F_{
\text{p}}=(85.9\pm 1.6)\%$, which corresponds to a teleportation fidelity $
\overline{F}=(90.6\pm 1.0)\%$ averaged over all possible input states with
equal weight in the qubit space.
\section{Discussion}
Teleportation of quantum states from a photon to the vibration modes of a
millimeter-sized diamond under ambient conditions generates a quantum link
between the microscopic particle and the macroscopic world around us usually
under the law of classical physics. In our experiment, the ultrafast laser
technology provides the key tool for fast processing and detection of
quantum states within its short life time in macroscopic objects consisting
of many strongly interacting atoms that are coupled to the environment.
Combined with the tunability of the wavelength for the retrieval laser pulse
\textbf{\textbf{\cite{22a1}}}, the technique introduced in our experiment
would be useful for realization of a new source of entangled photons based
on the diamond optomechanical coupling with the dual-rail encoding. Such a
source could generate entangled photons at wavelengths inconvenient to
produce by other methods. For instance, we may generate entanglement between
ultraviolet and infrared photons, with the infrared photon good for quantum
communication and the ultraviolet photon convenient to be interfaced with
other qubits, such as the ion matter qubits. Such a photon source is hard to
generate by the conventional spontaneous parametric down conversion method.
In future, the tools based on the ultrafast pump and probe could be combined
with the powerful laser cooling or low-temperature technology, to provide
more efficient ways for quantum control of the optomechanical systems, with
important applications for realization of transduction of quantum signals
\textbf{\textbf{\cite{23,24}}}, processing of quantum information or
single-photon signals \textbf{\textbf{\cite{19,20,22a1}}}, and sensing of
small mechanical vibrations \textbf{\textbf{\cite{19}}}.
\section{Methods}
\subsection{Quantum process tomography}
Quantum process tomography (QPT) \cite{28} is defined by a completely
positive map $\varepsilon :\rho _{\text{f}}\equiv \varepsilon (\rho _{\text{i
}})$ that transfers an arbitrary input state $\rho _{\text{i}}$ to the
output $\rho _{\text{f}}$. It can be characterized by a unique process
matrix $\chi _{mn}$ through the map $\rho _{\text{f}}=\sum_{mn}E_{m}\rho _{
\text{i}}E_{n}^{\dagger }\chi _{mn}$ by choosing a fixed set of basis
operator ${E}_{m}$. In our experiment, we set the basis operators ${E}_{m}$
to be the identity operator $I$ and the three Pauli matrices $X=\sigma _{x}$
, $Y=-i\sigma _{y}$, $Z=\sigma _{z}$. This corresponds to a choice of six
complementary input states $|H\rangle $, $|V\rangle $, $|+\rangle $, $
|-\rangle $, $|L\rangle $, $|R\rangle $ for the teleportation. We
reconstruct the output state from teleportation by quantum state tomography
and use them to calculate the process matrix $\chi $ through the maximally
likelihood estimation \cite{28}. The process fidelity is determined by $F_{
\text{p}}=Tr(\chi \chi _{\text{id}})$, where $\chi _{\text{id}}$ is the
identity process matrix corresponding to the perfect case. The process
fidelity $F_{\text{p}}$ determines the average teleportation fidelity $
\overline{F}$ by the formula $\overline{F}=(2F_{\text{P}}+1)/3$ \cite{28},
where $\overline{F}$ is defined as the fidelity averaged over all possible
states of the input qubit with equal weight.
\textbf{Acknowledgements} This work was supported by the Ministry of
Education of China through its grant to Tsinghua University. LMD
acknowledges in addition support from the IARPA program, the ARL, and the
AFOSR MURI program.
\textbf{Author Contributions} L.M.D. designed the experiment and supervised
the project. P.Y.H., Y.Y.H., X.X.Y., X.Y.C., C.Z., L.H. carried out the
experiment. L.M.D. and P.Y.H. wrote the manuscript.
\textbf{Author Information} The authors declare no competing financial
interests. Correspondence and requests for materials should be addressed to
L.M.D. ([email protected]).
\end{document}
|
\begin{document}
\begin{abstract}
For $n\ge 5$, we prove that every $n\times n$ matrix $\mathcal M=(a_{i,j})$ with entries in $\{-1,1\}$ and absolute discrepancy $\abs{\disc(\mathcal M)}=\abs{\sum a_{i,j}}\le n$ contains a zero-sum square except for the split matrix (up to symmetries). Here, a square is a $2\times 2$ sub-matrix of $\mathcal M$ with entries $a_{i,j}, a_{i+s,s}, a_{i,j+s}, a_{i+s,j+s}$ for some $s\ge 1$, and a split matrix is a matrix with all entries above the diagonal equal to $-1$ and all remaining entries equal to $1$.
In particular, we show that for $n\ge 5$ every zero-sum $n\times n$ matrix with entries in $\{-1,1\}$ contains a zero-sum square.
\end{abstract}
\title{Zero-sum squares in bounded discrepancy $\mathbf{\{-1,1\}
\section{Introduction}
An \emph{Erickson matrix} is a square binary matrix that contains no \emph{squares} (defined below) with constant entries.
In \cite{Eri1996}, Erickson asked for the maximum value of $n$ for which there exists an $n\times n$ Erickson matrix.
In \cite{AM2008} Axenovich and Manske gave an upper bound of around $2^{2^{40}}$. This gargantuan bound was later improved by Bacher and Eliahou in \cite{BE2010} using computational means to the optimal value of $15$.
This paper is devoted to studying a zero-sum analogue of Erickson matrices considering matrices with entries in $\{-1,1\}$. For this purpose, of course, we need to take into account the discrepancy or deviation of the matrix.
Discrepancy theory is an important branch in combinatorics with deep connections to many other areas in mathematics (see \cite{Cha2001} for a good general reference on this topic). In particular, one important result is Tao's recent proof of the Erdős discrepancy conjecture, \cite{Tao2016}, which states that any sequence of the form $f:\mathbb N\to \{-1,1\}$ satisfies that $\sup_{n,d}\abs{\sum_{j=1}^n f(jd)}=\infty$.
In recent years, we have witnessed the study of zero-sum structures becoming increasingly popular. Some examples related to our work are the following. Caro et al. proved in \cite{CHM2019} that for any finite sequence $f:[1,n]\to \{-1,1\}$ satisfying that $\abs{\sum_{i=1}^nf(i)}$ is small, there is a set of consecutive numbers $B\subset [1,n]$ for which $\abs{\sum_{i\in B}f(i)}$ is also small (in particular, small can mean zero-sum). Another interesting work is \cite{BE11}, where Buttkewitz and Elsholtz proved the existence of zero-sum arithmetic progressions with four terms in certain sequences $f:\mathbb N\to \{-1,1\}$.
Balister et al. studied matrices where, for some fixed integer $p$, the sum of each row and each column is a multiple of $p$ \cite{BCRY02}; they showed that these matrices appear in any large enough integer square matrix.
Throughout this paper a matrix with entries in $\{-1,1\}$ will be called a \emph{binary matrix}. Given an $n\times m$ binary matrix $\mathcal M=(a_{i,j})$, the \emph{discrepancy} of $\mathcal M$ is the sum of all its entries, that is
\begin{equation}\label{eq:disc}
\disc(\mathcal M)=\sum_{\substack{1\le i\le n \\ 1\le j\le m}} a_{i,j}.
\end{equation}
Note that if $a^+$ is the number of entries in $\mathcal M$ equal to $1$ and $a^-$ is the number of entries in $\mathcal M$ equal to $-1$ then
\begin{equation}\label{eq:pm}
\disc(\mathcal M) = a^+ - a^- = 2 a^+-nm = nm-2 a^-.
\end{equation}
We define a \emph{zero-sum matrix} $\mathcal M$ as a binary matrix with $\disc(\mathcal M)=0$.
A \emph{square} $S$ in $M=(a_{i,j})$ is a $2\times 2$ sub-matrix of $\mathcal M$ of the form
\[
S =
\begin{pmatrix}
a_{i,j} & a_{i,j+s} \\
a_{i+s,j} & a_{i+s,j+s}
\end{pmatrix}
\]
for some positive integer $s$. A \emph{zero-sum square} is a square $S$ with $\disc(S)=0$. Note that a square in $\mathcal M$ is not zero-sum if and only if it has at least $3$ equal entries.
We are interested in studying matrices $\mathcal M$ which do not contain zero-sum squares, we call these matrices \emph{zero-sum-square-free}.
Note that this may also be seen as a $2$-coloring of an $n\times m$ rectangular grid. In this case, zero-sum is the same as balanced.
An $n\times m$ binary matrix $\mathcal M=(a_{i,j})$ is called \emph{$t$-split} if for some $0\le t < n+m$,
\[
a_{i,j}=\begin{cases}
-1 & \text{if } i+j\le t+1, \\
1 & \text{otherwise. } \\
\end{cases}\]
If either $\mathcal M$, its negative, or its horizontal or vertical reflections are $t$-split for some $t$, we say that $\mathcal M$ is \emph{split}. This is relevant since split matrices are always zero-sum-square-free. We are also interested in the possible discrepancies they can have.
\begin{obs}\label{obs:discdiag}
For an $n\times m$ $t$-split matrix $\mathcal M$ with $n\le m$,
\[\disc(\mathcal M)=\begin{cases}
n m - t (t+1) & \text{if } t\le n, \\
n m+n(n-1)-2 n t & \text{if } n< t\le m, \\
(n+m-t-1) (n+m-t)-n m & \text{if } m< t.
\end{cases}\]
\end{obs}
From this we may conclude the following.
\begin{coro}\label{coro:split}
Let $\mathcal M$ be a $t$-split binary matrix such that $\abs{\disc(\mathcal M)} \le n$. If $\mathcal M$ is of size $n\times n$, then $t \in \{n-1,n\}$ and $\abs{\disc(\mathcal M)} = n$. If $\mathcal M$ is of size $n\times (n+1)$, then $t = n$ and $\disc(\mathcal M) = 0$.
\end{coro}
In particular, the discrepancy of a square split matrix never vanishes.
Now we are ready to state our main theorem.
\begin{teo}\label{thm:main}
Let $n\ge 5$. Every $n\times n$ non-split binary matrix $\mathcal M$ with $\abs{\disc(\mathcal M)}\le n$ contains a zero-sum square. In particular, every $n\times n$ zero-sum matrix $\mathcal M$ contains a zero-sum square.
\end{teo}
Theorem \ref{thm:main} and Corollary \ref{coro:split} immediately yield the following.
\begin{coro}\label{coro:almostsquare}
Let $\mathcal M$ be an $n \times n$ binary matrix. If $n \ge 5$ and $\abs{\disc(\mathcal M)} \le n-1$, then $\mathcal M$ contains a zero-sum square.
\end{coro}
Our proof method suggests that a stronger result may hold.
\begin{conj}\label{conj}
For every $C>0$, there is an integer $N$ with the following property: For all $n\geq N$, every $n\times n$ non-split binary matrix $\mathcal M$ with $\abs{\disc(\mathcal M)}\le Cn$ contains a zero-sum square.
\end{conj}
There is a more general question.
Let $f:\mathbb N \to \mathbb N$ be the function associating to each $n \in \mathbb N$ the largest possible integer $f(n)$ such that every $n \times n$ non-split binary matrix $\mathcal M$ satisfying $\abs{\disc(\mathcal M)}\le f(n)$ contains a zero-sum square. Obviously $f(n) < n^2$. In fact, $f(n) \le \frac {n^2}{2} + o(n^2)$ as is shown by the $n\times n$ matrix $\mathcal M=(a_{i,j})$ defined by
\[
a_{i,j}=\begin{cases}
-1 & \text{if } i,j\text{ are both even,} \\
1 & \text{otherwise.}
\end{cases}
\]
This is a zero-sum-square-free matrix and its discrepancy is about $\frac {n^2}{2}$.
Theorem \ref{thm:main} implies $f(n) \ge n$ if $n \ge 5$. It would be very interesting to determine whether $f(n)$ is linear or quadratic in $n$.
This paper is organized as follows. Section \ref{sec:small} is devoted to particular cases, which were analyzed by a computer. In Section \ref{sec:proof} we give a stronger version of Theorem \ref{thm:main} and its proof. Finally, Section \ref{sec:concl} contains our conclusions and some open questions.
\section{Small cases}\label{sec:small}
Since the proof of Theorem \ref{thm:main} uses induction, we must analyze some of the smaller cases to obtain our induction basis. It is possible to do this by hand but the amount of work is quite large, so we aid ourselves with a computer program.
Our program takes three positive parameters as input: $n$, $m$ and $d$, which should satisfy $n\le m$ and $d\le nm$. The output is a list of all $n\times m$ binary matrices which are zero-sum-square-free and satisfy $\disc(\mathcal M)=d$. To do this we use a standard backtracking algorithm that explores all binary matrices with the desired properties.
The code is written in C++ and is available at
\begin{center}
\url{https://github.com/edyrol/ZeroSumSquares}.
\end{center}
We are mainly interested in two types of zero-sum-square-free matrices: square matrices (with $m=n$) and almost-square matrices (with $m=n+1$). These are the sizes of matrices we need to understand in order to prove Theorem \ref{thm:main}.
Recall that split matrices are zero-sum-square-free, so we always find these examples.
\begin{lema}\label{lem:compu}
Let $\mathcal M$ be a zero-sum-square-free binary matrix with $\abs{\disc(M)}\le 2n$. If $\mathcal M$ is of size $n\times (n+1)$ and $4\le n\le 11$, then $\mathcal M$ is either a split matrix or it is one of $28$ exceptional $4\times 5$ matrices. If $\mathcal M$ is of size $n\times n$ and $5\le n\le 11$, then $\mathcal M$ is either a split matrix or it is one of $32$ exceptional $5\times 5$ matrices.
\end{lema}
Although Lemma \ref{lem:compu} mentions $60$ exceptional matrices, there are essentially only $11$. The rest can be obtained by taking the symmetries of these $11$ (generated by reflections and rotations, and their negatives). These $11$ matrices are shown in Figure \ref{fig:small}.
\begin{figure}
\caption{Non-split zero-sum-square-free binary matrices.}
\label{fig:small}
\end{figure}
The computer program does not take too long to run. Using a home computer with an i7-3770 3.40GHz processor and compiling the program with GCC 8.1.0, it takes less than a second to analyze a $9\times 9$ matrix with fixed discrepancy. For the larger matrices, it can take a couple of minutes. For example, depending on the discrepancy, it takes between $30$ and $50$ seconds to analyze an $11\times 11$ binary matrix and between $1.5$ and $3$ minutes for an $11\times 12$ binary matrix.
\section{Proof}\label{sec:proof}
Our proof of Theorem \ref{thm:main} uses an induction argument. The main idea in the induction step is to split a large zero-sum-square-free matrix $\mathcal M$ into four square (with equal side-lengths) or almost-square (with side-lengths differing by $1$) sub-matrices. Since it is not always possible to only use squares, we are forced to understand the behavior of both square and almost-square zero-sum-square-free matrices. For the induction to work, we prove the following stronger statement.
\begin{teo}\label{thm:disc_m}
Let $n\ge 5$ and $m\in \{n,n+1\}$. Every $n\times m$ non-split binary matrix $\mathcal M$ with $\abs{\disc(\mathcal M)}\le n$ contains a zero-sum square.
\end{teo}
The basis of the induction is given by the computer analysis described in Section~\ref{sec:small}. It is not indispensable to use a computer to prove Lemma \ref{lem:compu}, although doing it by hand would require either substantial case analysis or a clever argument that has eluded us.
For the rest of the proof we proceed as follows: assuming that the discrepancy of $\mathcal M$ is not too large, we find a relatively large sub-matrix $\mathcal N$ of $\mathcal M$ with small discrepancy. By the induction hypothesis, if we assume that $\mathcal M$ is a zero-sum-square-free matrix, we conclude that $\mathcal N$ must be split. It turns out that having a relatively large split sub-matrix $\mathcal N$ determines the value of many other entries of $\mathcal M$. From those values we find that, either $\mathcal M$ is itself split as desired, or we can estimate $\disc(\mathcal M)$ and find that it is larger than $n$ which contradicts the hypothesis of Theorem \ref{thm:disc_m}.
For integers $h,j,k,l$ satisfying $1\leq h< k\leq n$ and $1\leq j< l\leq m$, we define a \emph{block of $\mathcal M$} as the $(k-h+1)\times(l-j+1)$ sub-matrix
\[
\mathcal M[h,k;j,l]=\begin{pmatrix}
a_{h,j} & a_{h,j+1} & \dots & a_{h,l-1} & a_{h,l} \\
a_{h+1,j} & a_{h+1,j+1} & \dots & a_{h+1,l-1} & a_{h+1,l} \\
\vdots & \vdots & \ddots & \vdots & \vdots \\
a_{k-1,j} & a_{k-1,j+1} & \dots & a_{k-1,l-1} & a_{k-1,l} \\
a_{k,j} & a_{k,j+1} & \dots & a_{k,l-1} & a_{k,l}
\end{pmatrix}.
\]
The next lemma shows that a block $\mathcal M'$ in a zero-sum-square-free matrix $\mathcal M$ is split if a certain sub-block of $\mathcal M'$ is also split. It is divided into four instances. Parts (a) and (b) refer to blocks obtained by removing the first row and the first column of $\mathcal M'$, respectively. Parts (c) and (d) refer to blocks obtained by removing the last row and the last column of $\mathcal M'$, respectively.
\begin{lema}\label{lem:tool}
Let $\mathcal M$ be a zero-sum-square-free $n\times m$ matrix with $n\ge 5$. Let $1\leq h< k\leq n$ and $1\leq j< l\leq m$ be integers such that $k-h=l-j=b\geq 2$. Consider the block $\mathcal M'=\mathcal M[h,k;j,l]$ of size $(b+1)\times(b+1)$,
\begin{enumerate}
\item[(a)] if $\mathcal M[h+1,k;j,l]$ is $b$-split then $\mathcal M'$ is $(b+1)$-split.
\item[(b)] if $\mathcal M[h,k;j+1,l]$ is $b$-split then $\mathcal M'$ is $(b+1)$-split.
\item[(c)] if $\mathcal M[h,k-1;j,l]$ is $b$-split then $\mathcal M'$ is $b$-split.
\item[(d)] if $\mathcal M[h,k;j,l-1]$ is $b$-split then $\mathcal M'$ is $b$-split.
\end{enumerate}
\end{lema}
\begin{proof}
If $\mathcal M[h+1,k;j,l]$ (respectively $\mathcal M[h,k;j+1,l]$) is $b$-split, we need to prove that all entries in the top row (respectively in the leftmost column) of $\mathcal M'$ are equal to $-1$. If $\mathcal M[h,k;j,l-1]$ (respectively $\mathcal M[h,k-1;j,l]$) is $b$-split, we need to prove that all entries in the rightest column (respectively bottom row) of $\mathcal M'$ are equal to $1$. Since the arguments are analogous for each case, we only show the first one. Assume that $\mathcal M[h+1,k;j,l]$ is $b$-split then, for every $1\leq i \leq b$,
\begin{equation}\label{eq:values}
a_{h+i,l-i}=-1 \mbox{ and } a_{h+i,l}=1.
\end{equation}
Consider now the square
\[
S=\begin{pmatrix}
a_{h,l-i} & a_{h,l} \\
a_{h+i,l-i} & a_{h+i,l}
\end{pmatrix}
\]
and recall that, since $\mathcal M'$ is a zero-sum-square-free matrix, any square $S$ in $\mathcal M_0$ has at least $3$ equal entries. Thus, (\ref{eq:values}) implies that $a_{h,l-i}=a_{h,l}$ for every $1\leq i \leq b$. Therefore, the elements in the first row of $\mathcal M'$, $a_{h,j},\dots,a_{h,l}$, are all equal. Finally, since $a_{h,j}=a_{h,j+1}$ and $a_{h+1,j}=a_{h+1,j+1}=-1$, the same argument for the square
\[S=\begin{pmatrix}
a_{h,j} & a_{h,j+1} \\
a_{h+1,j} & a_{h+1,j+1}
\end{pmatrix}\]
implies that $a_{h,j}=a_{h,j+1}=-1$, so all entries in the top row of $\mathcal M'$ are equal to $-1$. This shows that $\mathcal M'$ is indeed $(b+1)$-split.
\end{proof}
Once we have a $t$-split block $\mathcal M'$, we can also deduce the values of other entries which are not necessarily adjacent to $\mathcal M'$.
\begin{lema}\label{lem:tool2}
Let $\mathcal M$ be a zero-sum-square-free $n\times m$ matrix, where $\mathcal M' = \mathcal M\left[1,k;1,l\right]$ is $t$-split with $t < k <n$ and $t < l <m$.
If $l < r \le \min(t+l-1,n)$, then the entries $a_{r,i}$ have the same value for
\[i\in [1,\floor{(t+l-r+1)/2}]\cup [r-t+1,l].\]
Analogously, if $k<c\le \min(t+k-1,m)$, then the entries $a_{i,c}$ have the same value for
\[i\in [1,\floor{(t+k-c+1)/2}]\cup [c-t+1,k].\]
\end{lema}
\begin{proof}
Assume $1\le i\le (t+l-r+1)/2$ and consider the square
\[
S = \begin{pmatrix}
a_{r-l+i,i} & a_{r-l+i,l} \\
a_{r,i} & a_{r,l}
\end{pmatrix}.
\]
Note that, since $\mathcal M'$ is $t$-split and $r-l+2i\le t+1$, $a_{r-l+i,i}=-1$ and $a_{r-l+i,l}=1$.
So two entries of $S$ have opposite values and therefore $a_{r,i}=a_{r,l}$.
If $r-t+1 \le i \le l$, consider the square
\[
S = \begin{pmatrix}
a_{r+1-i,1} & a_{r+1-i,i} \\
a_{r,1} & a_{r,i}
\end{pmatrix}.
\]
Since $\mathcal M'$ is $t$-split and $r+1-i\le t$, $a_{r+1-i,1}=-1$. Furthermore, since $i\le l$, $a_{r+1-i,i}=1$. So two entries of $S$ have opposite values and therefore $a_{r,1}=a_{r,i}$.
In conclusion, $a_{r,i}=a_{r,l}$ for any $i\in [1,(t+l-r+1)/2]$, in particular, $a_{r,1}=a_{r,l}$. If $i\in [r-t+1,l]$ then $a_{r,i}=a_{r,1}$. Therefore, all of these values are equal. The proof for columns is analogous.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:disc_m}]
By Lemma \ref{lem:compu} we know the theorem holds for any $n\le 11$ and $m\in\{n,n+1\}$.
Let $\mathcal M=(a_{i,j})$ be a $n\times m$ binary matrix with $n\ge 12$, $m\in\{n,n+1\}$ and $\abs{\disc(\mathcal M)}\le n$. We need to prove that either $\mathcal M$ is split or it contains a zero-sum square, so we assume henceforth that $\mathcal M$ is zero-sum-square-free.
As stated before, we use induction on $n$, so we may assume that the theorem holds true for all square and almost-square binary matrices with smaller dimensions than those of $\mathcal M$.
We consider the four blocks of $\mathcal M$ formed by splitting $\mathcal M$ vertically and horizontally as evenly as possible. To be precise, let
\begin{alignat*}{2}
\mathcal M_1 & =\mathcal M\Big[1,\floor{\frac n2}; & & \quad 1,\floor{\frac m2}\Big], \\
\mathcal M_2 & =\mathcal M\Big[\floor{\frac n2}+1,n; & & \quad1,\floor{\frac m2}\Big], \\
\mathcal M_3 & =\mathcal M\Big[1,\floor{\frac n2}; & & \quad\floor{\frac m2}+1,m\Big] \text{ and} \\
\mathcal M_4 & =\mathcal M\Big[\floor{\frac n2}+1,n; & & \quad\floor{\frac m2}+1,m\Big].
\end{alignat*}
Note that, for $1\leq i\leq 4$, each block $\mathcal M_i$ is either a square or an almost-square matrix. Also, the smallest side of any $\mathcal M_i$ is $\floor{\frac n2}$ and the largest is $m-\floor{\frac m2}=\ceil{\frac m2}\le\ceil{\frac{n+1}2}\le \floor{\frac n2}+1$. Therefore, the side-lengths of each $\mathcal M_i$ are in the set $\{\floor{\frac n2},\floor{\frac n2}+1\}$.
\begin{claim}\label{claim:4split}
Either one of the four matrices $\mathcal M_i$ satisfies $\abs{\disc{(\mathcal M_i)}}<\floor{\frac n2}$ or two of these four matrices have discrepancies with opposite signs.
\end{claim}
\begin{proof}
If this is not the case and that the four matrices satisfy $\disc{(\mathcal M_i)}\ge\floor{\frac n2}$, then $n\ge \disc(\mathcal M)=\sum\disc(\mathcal M_i)\ge 4\floor{\frac n2}$ which is a contradiction. If the four matrices satisfy $\disc{(\mathcal M_i)}\leq-\floor{\frac n2}$ we obtain a contradiction in the same way. Therefore two of the $\mathcal M_i$ have discrepancies with opposite signs.
\end{proof}
What we actually wish to find is a relatively large block of $\mathcal M$ with small discrepancy. By an interpolation argument this is easily achievable.
\begin{claim}\label{claim:matrixN}
By exchanging $1$ and $-1$ if necessary, we may assume that there is an almost-square $\floor{\frac n2}$-split block $\mathcal N$ with side-lengths in the set $\{\floor{\frac n2},\floor{\frac n2}+1\}$ such that $\abs{\disc(\mathcal N)}<\floor{\frac n2}$.
\end{claim}
\begin{proof}
Claim \ref{claim:4split} either provides the block we want or it gives us two blocks $\mathcal N_+$ and $\mathcal N_-$ from the set $\{\mathcal{M}_1,\mathcal{M}_2,\mathcal{M}_3,\mathcal{M}_4\}$ with $\disc(\mathcal N_+)>0$ and $\disc(\mathcal N_-)<0$.
We can construct a sequence $\mathcal N_-=\mathcal N_1,\mathcal N_2,\dots,\mathcal N_k=\mathcal N_+$ of blocks of $\mathcal M$ with the following properties:
\begin{itemize}
\item The side-lengths of every $\mathcal N_i$ are in $\{\floor{\frac n2},\floor{\frac n2}+1\}$.
\item For each $1\le i<k$, one of $\mathcal N_i$ and $\mathcal N_{i+1}$ can be obtained from the other by removing one row or one column.
\end{itemize}
In other words, we start with $\mathcal N_1=\mathcal N_-$ and start moving towards $\mathcal N_+$. In each step we add or remove a row or column to $\mathcal N_i$ taking care to always leave $\mathcal N_{i+1}$ with side-lengths in the set $\{\floor{\frac n2},\floor{\frac n2}+1\}$. Note that in each step we switch from square to almost-square and vice-versa.
At some point the discrepancy changes from negative to positive, so assume that $\disc(\mathcal N_i)<0$ and $\disc(\mathcal N_{i+1})>0$ for some $1\le i<k$. Since, at each step the discrepancy changes by at most $\floor{\frac n2}+1$, we conclude that either $\mathcal N_i$ or $\mathcal N_{i+1}$ must have absolute discrepancy at most $(\floor{\frac n2}+1)/2<\floor{\frac n2}$. Let $\mathcal N$ be this block.
Now we can use our induction hypothesis on $\mathcal N$. Since $\abs{\disc(\mathcal N)}<\floor{\frac n2}$, either $\mathcal N$ contains a zero-sum square, or it must necessarily be split. Furthermore, by Corollary \ref{coro:split}, if $\mathcal N$ is zero-sum-square-free, then it must be an almost-square block and have discrepancy exactly $0$.
\end{proof}
In the following Claim we prove that several entries of $\mathcal M$ are forced. Note that, if $\mathcal N=\mathcal M[p,r;q,s]$, the $\floor{\frac n2}$-th diagonal of $\mathcal N$ is contained in the $(p+q+\floor{\frac n2}-2)$-th diagonal of $\mathcal M$. So, to simplify things, we define
\begin{equation}\label{eq:lower_t}
t=p+q+\floor{\frac n2}-2\ge\floor{\frac n2}.
\end{equation}
\begin{claim}\label{claim:tsplit}
By relabeling the entries of $\mathcal M$ and exchanging $1$ and $-1$ if necessary, we may assume that the block $\mathcal M_0 = \mathcal M\left[1,t+1;1,t+1\right]$ is $t$-split.
\end{claim}
\begin{proof}
We start with the block $\mathcal N=\mathcal M[p,r;q,s]$ described in Claim \ref{claim:matrixN}. We repeatedly apply Lemma~\ref{lem:tool} to obtain a sequence of split matrices $\mathcal N=\mathcal N_1\dots,\mathcal N_k$ in the following way.
Assume that $\mathcal N_i=\mathcal M[h,k;j,l]$ is a $b$-split block. The block $\mathcal N_i$ is either square or almost-square and $b$ differs from the side-lengths of $\mathcal N_i$ by at most $1$. There are four possibilities.
\begin{itemize}
\item If $b=k-h+1=j-l$, then it follows from parts (a) and (c) of Lemma \ref{lem:tool} that $\mathcal M[h-1,k;j,l]$ is $(b+1)$-split (if $h>1$) and $\mathcal M[h,k+1;j,l]$ is $b$-split (if $k<n$).
\item If $b=k-h=j-l+1$, then parts (b) and (d) of Lemma \ref{lem:tool} imply that $\mathcal M[h,k;j-1,l]$ is $(b+1)$-split (if $j>1$) and $\mathcal M[h,k;j,l+1]$ is $b$-split (if $l<m$).
\item If $b=k-h=j-l$ then we can remove the last row or column from $\mathcal N$ and apply parts (c) and (d) of Lemma \ref{lem:tool} to show that $\mathcal M[h-1,k;j,l]$ (if $h>1$) and $\mathcal M[h,k;j-1,l]$ (if $j>1$) are $b$-split.
\item If $b=k-h+1=j-l+1$ then we can remove the first row or column from $\mathcal N$ and apply parts (a) and (b) of Lemma \ref{lem:tool} to show that $\mathcal M[h,k+1;j,l]$ (if $k<n$) and $\mathcal M[h,k;j,l+1]$ (if $l<m$) are $b$-split.
\end{itemize}
In any case, let $\mathcal N_{i+1}$ be any of the larger split blocks described above, whenever possible.
The process can only stop at $\mathcal N_k=\mathcal M[h,k;j,l]$ if either $\mathcal N_k=\mathcal M$ or $\mathcal N_k$ is square with $(h,j)=(1,1)$ or $(k,l)=(n,m)$.
If $(h,j)=(1,1)$, we are done. If $(k,l)=(n,m)$ then we may relabel the entries of $\mathcal M$, exchanging $(1,1)$ and $(n,m)$ and exchange $1$ and $-1$ to obtain the desired result.
\end{proof}
\begin{claim}\label{claim:tsplit2}
The block
\[
\mathcal M_1 = \mathcal M\left[1,\min\left(n,\floor{\frac {3t}2}\right);1,\min\left(m,\floor{\frac {3t}2}\right)\right]
\]
is $t$-split.
\end{claim}
\begin{proof}
We start with the block $\mathcal M_0 = \mathcal M\left[1,t+1;1,t+1\right]$ from Claim \ref{claim:tsplit} and repeatedly apply Lemma \ref{lem:tool2} in the following way.
If $\mathcal M'=\mathcal M[1,k;1,k]$ is $t$-split with $t+1\le k<m$, apply Lemma \ref{lem:tool2} for columns with $l=k$ and $c=k+1$. The values $a_{i,c}$ are all equal for $i\in [1,k]$ whenever
\[(c-t+1)-1 \le (t+k-c+1)/2,\]
which is equivalent to $k+1=c\le 3t/2$.
If this is the case, consider the square
\[\begin{pmatrix}
a_{k-1,k}& a_{k-1,k+1}\\
a_{k,k}& a_{k,k+1}
\end{pmatrix}.\]
Since $a_{k-1,k}=a_{k,k}=1$ and $a_{k-1,k+1}=a_{k,k+1}$, then $a_{k,k+1}$ and therefore every $a_{i,k+1}$ with $i\in[1,k]$ is $1$.
Thus, $\mathcal M[1,k;1,k+1]$ is $t$-split as long as $k+1\le\floor{3t/2}$.
Now, starting with $\mathcal M[1,k;1,k+1]$, apply Lemma \ref{lem:tool2} for rows with $l=k+1$ and $r=k+1$. The values $a_{r,i}$ are all equal for $i\in [1,l]$ whenever
\[(r-t+1)-1 \le (t+l-r+1)/2,\]
which is equivalent to $k+1=r\le 3t/2+1/2$.
In the same way as before, we may conclude that $a_{i,k+1}=1$ if $i\in[1,k+1]$.
Thus, $\mathcal M[1,k+1;1,k+1]$ is $t$-split whenever $k+1\le\floor{3t/2}$.
This process stops when either $r$ or $c$ exceeds $\floor{3t/2}$ or the corresponding dimension of $\mathcal M$.
\end{proof}
In view of the previous Claim we may assume that $m\ge \floor{\frac{3t}{2}}+1$, otherwise $\mathcal M=\mathcal M_1$ is a split matrix. Since $m\le n+1$, this implies that
\begin{equation}\label{eq:upper_t}
t\le\frac{2n+1}{3},
\end{equation}
which will be relevant later.
In the case in which $\mathcal M_1$ does not cover $\mathcal M$, we may infer the values of additional entries of $\mathcal M$. This is done in a similar way to Claim \ref{claim:tsplit2}, although we are no longer able to obtain a $t$-split matrix. Instead, we obtain five regions outside of $\mathcal M_1$ for which $a_{i,j}=1$. These are illustrated in Figure \ref{fig:ext1}.
\begin{figure}
\caption{The regions described in Claims \ref{claim:tsplit}
\label{fig:ext1}
\end{figure}
The first region has a triangular shape bounded by the first column outside of $\mathcal M_1$, the first row of $\mathcal M$ and a certain line of slope $\frac 12$.
The second region is bounded by by the first column outside of $\mathcal M_1$, the last row of $\mathcal M_1$ and a line of slope $-1$.
Of course, there are corresponding regions to these below $\mathcal M_1$.
Lastly, the entries of the diagonal of $\mathcal M$ which are outside of $\mathcal M_1$ must also have value $1$.
This is formalized in the following claim.
\begin{claim}\label{claim:ones}
Let $T=\floor{\frac {3t}2}$, then $a_{i,j}=1$ and $a_{j,i}=1$ whenever $T < j$ and any of the following hold:
\begin{enumerate}
\item[(a)] $i \le \floor{\frac{T+t+1-j}{2}}$,
\item[(b)] $j-t< i\le T$, or
\item[(c)] $i=j$.
\end{enumerate}
\end{claim}
\begin{proof}
We start with the $t$-split block $\mathcal M_1 = \mathcal M\left[1,T;1,T\right]$ from Claim \ref{claim:tsplit2}.
We inductively deduce the values in column $j$ starting with $j=T+1$ and increasing $j$ one by one. Take $k=l=T$ and $c=j$ in Lemma \ref{lem:tool2} for columns.
Note that \eqref{eq:lower_t} implies that $j \le T+t-2$, so two things happen;
the values $a_{i,j}$ are all equal for
\[i\in [1,\floor{(t+T-j+1)/2}]\cup [j-t+1,T]\]
and the interval $[j-t+1,T]$ contains at least two elements.
By considering the square
\[\begin{pmatrix}
a_{T-1,j-1}& a_{T-1,j}\\
a_{T,j-1}& a_{T,j}
\end{pmatrix}\]
and using the fact that the elements $a_{T-1,j-1}$ and $a_{T,j-1}$ from the previous column have value $1$, we conclude that all the $a_{i,j}$ described above are equal to $1$.
Analogously, using Lemma \ref{lem:tool2} for rows, we can say the same for $a_{j,i}$.
Condition $i\in [1,\floor{(t+T-j+1)/2}]$ is equivalent to $i \le \floor{(T+t+1-j)/2}$ which proves part (a) of the claim, while $i\in [j-t+1,T]$ is equivalent to $j-t+1\le i \le T$ which proves part (b).
To prove part (c), for $T<i=j\le T+t-2$, consider the square
\[\begin{pmatrix}
a_{1,1}& a_{1,i}\\
a_{i,1}& a_{i,i}
\end{pmatrix}.\]
Since $a_{1,1}=-1$ and $a_{i,1}=a_{1,i}=1$, we must have that $a_{i,i}=1$.
\end{proof}
Now we can bound the discrepancy of $\mathcal M$. Recall from \eqref{eq:pm} that it is enough to know the number of positive entries $a^+$ of $\mathcal M$ in order to compute $\disc(\mathcal M)$.
Since $2a^+-nm=\disc(\mathcal M)\le n$, we have that
\begin{equation}\label{eq:onesle}
a^+\le \frac{n+nm}{2}.
\end{equation}
If this equation is violated, it means that $\mathcal M$ is not larger than $\mathcal M_1$. So, all that remains is to bound from below the number of positive entries $a^+$ of $\mathcal M$.
Let $R=n-T$ and define
\begin{align*}
a_0 &= \frac{t(t-1)}{2}+\floor{\frac t2}^2+2 t\floor{\frac t2},\\
a_1 &= 2\sum_{j=T+1}^n \floor{\frac {T+t+1-j}{2}}\\
&=2\sum_{k=1}^R \floor{\frac {t+1-k}{2}},\\
a_2 &= 2\sum_{j=T+1}^n (T-j+t)\\
&= 2 \sum_{k=1}^R (t-k) \text{ and}\\
a_3 &= R.
\end{align*}
A simple calculation gives the following claim.
\begin{claim}\label{claim:entries1}
\[a^+\ge a_0+a_1+a_2+a_3\]
\end{claim}
\begin{proof}
The number of positive entries in $\mathcal M_1$, described in Claim \ref{claim:tsplit2}, is $a_0$.
If $m=n$, then $a_1$, $a_2$ and $a_3$ equal the number of positive entries described in parts (a), (b) and (c) of Claim \ref{claim:ones}, respectively.
If $m=n+1$, by ignoring the positive entries in the last column of $\mathcal M$, we obtain that $a_1$, $a_2$ and $a_3$ are lower bounds for the number of positive entries described in parts (a), (b) and (c) of Claim \ref{claim:ones}, respectively.
\end{proof}
Recall that we are currently dealing with $n\ge 12$ and, from \eqref{eq:lower_t} and \eqref{eq:upper_t},
\begin{equation}\label{eq:tinterval}
\floor{\frac{n}{2}} \le t \le \floor{\frac{2n+1}{3}}.
\end{equation}
Before simplifying this lower bound, we can check that \eqref{eq:onesle} cannot be satisfied for small values of $n$. The following claim can be easily verified with aid from a computer.
\begin{claim}\label{claim:small}
For $12\le n\le 15$ and $\floor{\frac n2}\le t\le \floor{\frac {2n+1}{3}}$, we have that
\[a_0+a_1+a_2+a_3 > \frac {(n^2+2n)}{2}\ge \frac {(n+nm)}{2}.\]
\end{claim}
Therefore, we may assume that $n\ge 16$. What follows is a series of algebraic manipulations to obtain a simpler lower bound for $a^+$ which can be analyzed analytically.
\begin{claim}
For $n\ge 16$ and $\floor{\frac n2}\le t\le \floor{\frac {2n+1}{3}}$, we have that
\[a_0+a_1+a_2+a_3 \ge \frac{23 n^2-70 n-77}{32}.\]
\end{claim}
\begin{proof}
We can remove the integer parts in $a_0+a_1+a_2+a_3$ by using that, for any integer $x$, $\frac{x-1}2\le \floor{\frac x2}\le \frac x2$. It is convenient to do this in two parts, first we apply these inequalities but leave the variable $r$ as it is. This gives
\begin{align*}
a_0+a_1+a_2+a_3 &= \frac{t(t-1)}{2}+\floor{\frac t2}^2+2 t\floor{\frac t2} + 2\sum_{k=1}^R \left(\floor{\frac {t+1-k}{2}}\right)\\
&\quad + 2 \sum_{k=1}^R (t-k) + R\\
&\ge \frac{t(t-1)}{2}+\left(\frac{t-1}2\right)^2+t(t-1) + 2\sum_{k=1}^R\left(\frac{t-k}{2}\right)\\
&\quad + 2\sum_{k=1}^R\left(t-k\right) +R\\
&= \frac{7 t^2}{4} - 2 t + \frac{1}{4}+3 R t-\frac{3}{2} R^2-\frac{1}{2}R.
\end{align*}
Since $R=n-\floor{\frac {3t}2}$ we have that $n-\frac{3t}2\le R\le n-\frac{3t-1}2$, using this on the last expression we obtain
\begin{align*}
a_0+a_1+a_2+a_3 &\ge \frac{7 t^2}{4} - 2 t + \frac{1}{4}+3 \left(n-\frac{3t}2\right)t\\
&\quad -\frac{3}{2} \left(n-\frac{3t-1}2\right)^2-\frac{1}{2}\left(n-\frac{3t-1}2\right)\\
&= -\frac{49 t^2}{8}+\frac{15 n t}{2}+t-\frac{3 n^2}{2}-2 n-\frac{3}{8}.
\end{align*}
To minimize this last expression think of $n$ as fixed and consider it as a function of $t$. Then this is an upside-down parabola and, from \eqref{eq:tinterval}, the relevant values for $t$ are contained in the interval $\left[\frac{n-1}{2},\frac{2n+1}{3}\right]$. Therefore the parabola is bounded from below by the minimum between the values at $t=\frac{n-1}{2}$ and $t=\frac{2n+1}{3}$. These are, respectively,
\[
\frac{23 n^2-70 n-77}{32}\qquad \text{and} \qquad\frac{14 n^4-28 n -13}{18}.
\]
The former gives the smallest value.
\end{proof}
To conclude the proof, notice that the parabolas $\frac{1}{32}(23 n^2-70 n-77)$ and $\frac 12 (n^2+2n)$ intersect twice, once in the interval $(-1,0)$ and a second time in the interval $(15,16)$. Since $n\ge 16$, we have
\[
a^+ \ge a_0+a_1+a_2+a_3 \ge \frac{23 n^2-70 n-77}{32} > \frac{n^2+2n}{2}.
\]
This contradicts \eqref{eq:onesle}, so $\mathcal M=\mathcal M_1$ and therefore $\mathcal M$ is a split matrix.
\end{proof}
\section{Conclusions and further work}\label{sec:concl}
We were able to give an elemental proof of Theorem \ref{thm:main}, but we are sure that there is a deeper result in the direction of Conjecture \ref{conj}. It is also likely that something can be said for non-square matrices.
The fact that the final bound given for $a^+$ is significantly smaller than $n^2$ suggests that a much stronger theorem should hold.
It is possible to strengthen our proof to obtain a stronger version of Theorem \ref{thm:disc_m} with something like $\abs{\disc(\mathcal M)}\le 2n$ instead of $\abs{\disc(\mathcal M)}\le n$, however significantly more work is required to establish this and it is probably not worth the effort.
In \cite{RBM2010} Erickson matrices were generalized to \emph{$3$-squares}. A $k$-square in a matrix $\mathcal M$ is a $k\times k$ sub-matrix of $\mathcal M$ contained in $k$ rows of $\mathcal M$ of the form $i,i+s,\dots,i+(k-1)s$ and $k$ columns of $\mathcal M$ of the form $j,j+s,\dots,j+(k-1)s$.
We could ask about zero-sum-$k$-square-free binary matrices but this does not make sense when $k$ is odd. However, the case when $k$ is even seems interesting. For odd $k$ we can ask about binary matrices which don't have $k$-squares of sum $\pm 1$.
Lastly, we should point out that with the aid of Claims \ref{claim:tsplit} and \ref{claim:ones}, or with stronger versions of this claim, zero-sum-square-free matrices of much larger sizes may be analyzed by a computer. This might be useful for generalizing our results. However, a different type of computer search might likely be much more useful.
SAT-solvers have been used for finding lower bounds in Ramsey-like problems (see e.g. \cite{HHLM2007}) but it is not obvious how to include the discrepancy condition here. Perhaps linear integer programming could work.
Since we didn't need to analyze anything larger than an $11\times 12$ matrix, we didn't work much on making our program efficient.
\section*{Acknowledgments}
The authors would like to thank the anonymous referee for his comments which improved the paper greatly. They are also thankful for the facilities provided by the Banff International Research Station ``Casa Matemática Oaxaca'' during the ``Zero-Sum Ramsey Theory: Graphs, Sequences and More'' workshop (19w5132).
This research was supported by CONACyT project 282280 and PAPIIT project IN116519.
\end{document}
|
\begin{document}
\title[Twisted conjugacy in lamplighter-type groups]
{Twisted conjugacy in some lamplighter-type groups}
\author{Evgenij Troitsky}
\thanks{The work is supported by the Russian Science Foundation under grant 21-11-00080.}
\address{Moscow Center for Fundamental and Applied Mathematics, MSU Department,\newline
Dept. of Mech. and Math., Lomonosov Moscow State University, 119991 Moscow, Russia}
\email{[email protected]}
\keywords{Reidemeister number,
twisted conjugacy class,
lamplighter group,
wreath product}
\subjclass[2000]{
20E45;
37C25;
}
\begin{abstract}
For a restricted wreath product $G\wr \mathbb{Z}^k$, where $G$ is a finite abelian group, we determine (almost in all cases) whether this product has the $R_\infty$ property (i.e., each its automorphism has infinite Reidemeister number).
\end{abstract}
\maketitle
The \emph{Reidemeister number} $R(\varphi)$ of an automorphism $\varphi:\Gamma\to\Gamma$ is defined as the number of its
Reidemeister (or twisted conjugacy) classes (i.e., classes with the respect to the equivalence relation
$x\sim gx\varphi(g^{-1})$). We study the problem of determination of the class of groups with the property
$R_\infty$ (i.e., each its automorphism has infinite Reidemeister number) among restricted wreath products $G\wr \mathbb{Z}^k$, where $G$ is a finite abelian group.
We will prove the following two theorems, which give the answer almost in all cases. A part of cases in the first theorem was proved in \cite{FraimanTroitsky}.
\begin{teo}\label{teo:cases}
Suppose, the prime-power decomposition of $G$ is $\oplus_i ({\mathbb Z}_{(p_i)^{r_i}})^{d_i}$. Then under each of the following
conditions the corresponding wreath product $G\wr {\mathbb Z}^k$ admits an automorphism $\varphi$ with $R(\varphi)<\infty$, i.e. does not have the property $R_\infty$:
\begin{description}
\item[Case 1) (see \cite{FraimanTroitsky})] for all $p_i=2$ and $p_i=3$, we have $d_i\ge 2$ (and is arbitrary for primes $p_i>3$);
\item[Case 2) (see \cite{FraimanTroitsky})] there is no $p_i=2$ and $k$ is even;
\item[Case 3) (see \cite{FraimanTroitsky})] for all $p_i=2$, we have $d_i\ge 2$ and $k=4s$ for some $s$;
\item[Case 4)] for all $p_i=2$, we have $d_i \ge 3$ and $k$ is even;
\item[Case 5)] for all $p_i=2$, we have $d_i \ge 2$, $d_i \ne 3$ and $k$ is even, $k \ge 4$.
\end{description}
\end{teo}
\begin{teo}\label{teo:inverse}
In the above notation, suppose that there is a summand ${\mathbb Z}_{p^i}$ of multiplicity one, where $p=2$ and $k$ is arbitrary or $p=3$ and $k$ is odd. Then $G\wr {\mathbb Z}^k$ has the $R_\infty$ property.
\end{teo}
As a by-product we obtain a correction of an inaccuracy in \cite{gowon1} (see Remark \ref{rmk:gowon}).
We refer to \cite{gowon1,SteinTabackWong2015,TroLamp,Fraiman,FraimanTroitsky}
for the previous advances in the problem and to \cite{FelshtynTroitskyFaces2015JGT}
for a general look and a description of importance and applications of the problem under consideration.
Recall some facts to be used in the proofs.
We have by definition, $G\wr \mathbb{Z}^k = \Sigma \rtimes_\alpha \mathbb{Z}^k$, where
$\Sigma$ denotes $\oplus_{x\in \mathbb{Z}^k} G_x$, and $\alpha(x)(g_y) =g_{x+y}$. Here $g_x$ is $g \in G \cong G_x$.
Denote by $C(\varphi):=\{g\in \Gamma \colon \varphi(g)=g\}$ the subgroup of $\Gamma$, formed by $\varphi$-fixed elements.
For an inner automorphism as well as for its restriction on a normal subgroup, we use the notation $\tau_g(x)=gxg^{-1}$ .
\begin{lem}[Prop. 3.4 in \cite{FelLuchTro}]\label{lem:Jab_fin}
Suppose, $\Gamma$ is a finitely generated residually finite group, $\varphi:\Gamma\to\Gamma$ is an automorphism, and
$R(\varphi)<\infty$. Then $|C({\varphi})|< \infty$.
\end{lem}
\begin{lem}[\cite{FelHill,go:nil1}, see also \cite{polyc,GoWon09Crelle}]\label{lem:extensions}
Suppose, $\varphi:\Gamma\to \Gamma$ is an automorphism of a discrete group, $H$ is a normal $\varphi$-invariant subgroup of $\Gamma$, so $\varphi$ induces automorphisms $\varphi':H\to H$ and $\widetilde{\varphi}:\Gamma/H \to \Gamma/H$. Then
\begin{itemize}
\item the projection $\Gamma\to \Gamma/H$ maps Reidemeister classes of $\varphi$ onto Reidemeister classes of $\widetilde{\varphi}$, in particular $R(\widetilde{\varphi})\le R(\varphi)$;
\item if $|C(\widetilde{\varphi})|=n$, then $R(\varphi')\le R(\varphi)\cdot n$;
\item if $C(\widetilde{\varphi})=\{e\}$, then each Reidemeister class of $\varphi'$ is an intersection of the appropriate Reidemeister class of $\varphi$ and $H$;
\item if $C(\widetilde{\varphi})=\{e\}$, then
$R(\varphi)=\sum_{j=1}^R R(\tau_{g_j} \circ \varphi')$, where $g_1,\dots g_R$ are some elements of $\Gamma$ such that
$p(g_1),\dots,p(g_R)$ are representatives of all Reidemeister classes of $\widetilde{\varphi}$,
$p:\Gamma\to \Gamma/H$ is the natural projection and $R=R(\widetilde{\varphi})$.
\end{itemize}
\end{lem}
For a semidirect product $\Sigma \rtimes_\alpha {\mathbb Z}^k $, one has by \cite{Curran2008} that automorphisms $\varphi':\Sigma\to\Sigma$
and $\overline{\varphi}: \Sigma \rtimes_\alpha {\mathbb Z}^k /\Sigma \cong {\mathbb Z}^k \to {\mathbb Z}^k \cong \Sigma \rtimes_\alpha {\mathbb Z}^k /\Sigma$ define an automorphism
$\varphi$ of $\Sigma \rtimes_\alpha {\mathbb Z}^k$ (generally not unique) if and only if
\begin{equation}\label{eq:maineq}
\varphi'(\alpha(m)(h))=\alpha(\overline{\varphi}(g))(\varphi'(h)),\qquad h\in\Sigma,\quad m\in {\mathbb Z}^k.
\end{equation}
Since $\Sigma$ is abelian, by \cite[p.~207]{Curran2008} the mapping $\varphi_1$ defined as $\varphi'$ on $\Sigma$ and by $\overline{\varphi}$ on ${\mathbb Z}^k\subset \Sigma \rtimes {\mathbb Z}^k $ is still an automorphism.
That is why, as it is proved in \cite{FraimanTroitsky},
one can assume that
\begin{equation}\label{eq:restric_on_sub}
{\mathbb Z}^k \subset A\wr {\mathbb Z}^k \mbox{ is $\varphi$-invariant and } \varphi|_{{\mathbb Z}^k}=\overline{\varphi}.
\end{equation}
\begin{lem}[Lemma 2.4 in \cite{FraimanTroitsky}]\label{lem:R_nee}
An automorphism $\varphi: G\wr {\mathbb Z}^k \to G \wr {\mathbb Z}^k$ has $R(\varphi)<\infty$ if and only if
$R(\overline{\varphi})< \infty$ and $R(\tau_m \circ \varphi')<\infty$ for any $m \in {\mathbb Z}^k$ (in fact, it is sufficient to verify this for representatives
of Reidemeister classes of $\overline{\varphi}$).
\end{lem}
\begin{lem}[Lemma 2.5 in \cite{FraimanTroitsky}]\label{lem:how_to_de}
Suppose, $\overline{\varphi}:{\mathbb Z}^k \to {\mathbb Z}^k$ and $F:G\to G$ are automorphisms.
Define $\varphi'$ by
\begin{equation}\label{eq:how_to_def}
\varphi'(a_0)=(Fa)_0,\qquad \varphi'(a_x)=(Fa)_{\overline{\varphi}(x)}.
\end{equation}
Then (\ref{eq:maineq}) holds and $\varphi'$ is an automorphism of $G\wr {\mathbb Z}^k$.
Evidently the subgroups $\oplus G_x$, where $x$ runs over an orbit of $\overline{\varphi}$, are $\varphi'$-invariant direct summands of $\Sigma$.
\end{lem}
Also, one can verify (see \cite{FelshtynHill1993CM}) that,
for $\overline{\varphi}:{\mathbb Z}^k\to {\mathbb Z}^k$ defined by a matrix $M$, one obtains
\begin{equation}\label{eq:FHZk}
R(\overline{\varphi})=\# \mathrm{Coker} (\mathrm{Id} -\overline{\varphi})=|\det(E-M)|,
\end{equation}
if $R(\overline{\varphi})<\infty$, and $|\det(E-M)|=0$ otherwise.
It is well known that, for an automorphism $\varphi: A\to A$ of finite abelian group $A$, we have
\begin{equation}\label{eq:finabel}
R(\varphi)=|C(\varphi)|.
\end{equation}
\begin{proof}[Proof of Theorem \ref{teo:cases}]
In each of the cases we will consider a specific automorphism $\overline{\varphi}:{\mathbb Z}^k \to {\mathbb Z}^k$ with $R(\overline{\varphi})<\infty$
and define $\varphi':\Sigma\to \Sigma$ with properties as in Lemmas \ref{lem:R_nee} and \ref{lem:how_to_de}.
More precisely, we will have $R(\varphi')=1$.
\textbf{Case 4):} when $d_i >2$ for $p_i=2$ and $k=2 t$.
In this case the construction starts as in \cite{TroLamp}: we take $\overline{\varphi}:{\mathbb Z}^{2t} \to {\mathbb Z}^{2t}$ to be the direct sum of $t$ copies of
$$
{\mathbb Z}^2 \to {\mathbb Z}^2,\quad \begin{pmatrix}
u\\ v
\end{pmatrix} \mapsto M \begin{pmatrix}
u\\ v
\end{pmatrix}, \qquad M=\begin{pmatrix}
0 & 1\\
-1 & -1
\end{pmatrix}.
$$
Then $M$ generates a subgroup of $GL(2,{\mathbb Z})$, which is isomorphic to ${\mathbb Z}_3$ (see \cite[p. 179]{Newman1972book}).
All orbits of $M$ have length $3$ (except of the trivial one) and the corresponding Reidemeister number $=\det (E-M)=3$ (by (\ref{eq:FHZk})).
Similarly for $\overline{\varphi}$: the length of any orbit is $1$ or $3$ and $R(\overline{\varphi})=3^t$.
Now define $\varphi'$ as a direct sum of actions for ${\mathbb Z}_q$, $q=(p_i)^{r_i}$, $p_i \ge 3$, and for $({\mathbb Z}_{2^{r_i}})^2$, $({\mathbb Z}_{2^{r_i}})^3$.
For $p_i \ge 3$ choose $m=m_i$ such that
\begin{equation}\label{eq:condit_on_m_3}
m^3 \mbox{ and } 1-m^3 \mbox{ are invertible in }{\mathbb Z}_q.
\end{equation}
This can be done for $p_i\ge 3$: one can take $m=3$ for $p_i=7$ and $m=2$ in the remaining cases (and impossible for $p_i=2$).
Define $\varphi'(\delta_0)=m \delta_0$, where $\delta_0$ is $1\in ({\mathbb Z}_q)_0$. Then, to keep (\ref{eq:maineq}) we need to define
$\varphi'(\delta_g)=m \delta_{\overline{\varphi}(g)}$, where $\delta_g$ is $1\in ({\mathbb Z}_q)_g$, as in Lemma \ref{lem:how_to_de}. So, the corresponding subgroup $\oplus_{g \in {\mathbb Z}^k}({\mathbb Z}_q)_g \subset \Sigma$ is $\varphi'$-invariant and decomposed into infinitely many invariant summands
$({\mathbb Z}_q)_g \oplus ({\mathbb Z}_q)_{\overline{\varphi}(g)}\oplus ({\mathbb Z}_q)_{\overline{\varphi}^2(g)}$
isomorphic to $({\mathbb Z}_q)^3$ (over generic orbits of $\overline{\varphi}$) and one summand $({\mathbb Z}_q)_0$ (over the trivial orbit).
Then the corresponding restrictions of $\varphi'$ and $1-\varphi'$ can be written as multiplication by
$$
\begin{pmatrix}
0 & 0 & m \\
m & 0 & 0\\
0 & m & 0
\end{pmatrix}, \quad
\begin{pmatrix}
1 & 0 & -m \\
-m & 1 & 0\\
0 & -m & 1
\end{pmatrix}, \quad \mbox{and }
m, \quad 1-m,
$$
respectively. The three-dimensional mappings are isomorphisms by (\ref{eq:condit_on_m_3}). Since an element $\ell$ is not invertible in ${\mathbb Z}_{(p_i)^{r_i}}$ if and only if $\ell = u\cdot p_i$, the invertibility of one-dimensional mappings follows from
(\ref{eq:condit_on_m_3}) and the factorization $1-m^3=(1-m)(1+m+m^2)$. (This construction gives a more explicit presentation of a part of proof
of \cite[Theorem 4.1]{TroLamp})
Now pass to $2$-subgroup.
Define $F_j:({\mathbb Z}_q)^j \to ({\mathbb Z}_q)^j$ ($j=2,\dots,5$) as
\begin{equation}\label{eq:F2F3}
F_2 = \begin{pmatrix}
0 & 1\\
1 & 1
\end{pmatrix} , F_3 = \begin{pmatrix}
0&0 & 1\\
0&1 & 1\\
1& 1& 1
\end{pmatrix},
F_4 = \begin{pmatrix}
0&0 & 0& 1\\
1& 0&0 & 1\\
0& 1& 0 & 1\\
0& 0& 1& 1
\end{pmatrix},
F_5 = \begin{pmatrix}
0& 0& 0 & 0 & 1 \\
0& 0& 0 & 1 & 1 \\
0& 0& 1 & 1 & 1 \\
0& 1& 1 & 1 & 1\\
1 & 1 & 1 & 1 & 1
\end{pmatrix}.
\end{equation}
For components $({\mathbb Z}_{2^{r_i}})^3$, $({\mathbb Z}_{2^{r_i}})^4$ and $({\mathbb Z}_{2^{r_i}})^5$,
(in accordance with Lemma \ref{lem:how_to_de})
define $\varphi'$ by
\begin{equation}\label{eq:def_on_comp}
a_0 \mapsto F_3 a_0,\qquad a_0 \mapsto F_4 a_0,\qquad a_0 \mapsto F_5 a_0,
\end{equation}
respectively.
Then, for any orbit of $\overline{\varphi}$ of length $\gamma$, we wish to verify that $(F_5)^\gamma$ as a homomorphism $({\mathbb Z}_{2^i})^5\to ({\mathbb Z}_{2^i})^5$ has no non-trivial fixed elements
(to apply (\ref{eq:finabel})).
One can verify (better to write a small program) that the order of $F_5$ $\mod 2$ is $31$ and the less powers have no non-trivial fixed elements (here $\gamma=1$ or $3$ and we are interested in powers $1$ and $3$ only). An easier calculation shows the same for $F_3$ and $F_4$ (order $\mod 2$ is $7$ for both of them).
To complete the construction, it remains to observe that any number $\ge 3$ can be presented as a combination of $3$, $4$ and $5$ with positive integer coefficients.
Concerning $\tau_g\circ \varphi'$ we should observe the following general property for automorphisms constructed in this way. One has $\overline{\varphi}^3(x)=x$ and can easily verify that $\overline{\varphi}^2z+\overline{\varphi}z+z=0$. Thus
$$
(\tau_z \circ \varphi') (g_x) = (m g)_{\overline{\varphi}(x)+z}, \quad
(\tau_z \circ \varphi') (g_{\overline{\varphi}(x)+z})=(mg)_{\overline{\varphi}^2(x)+\overline{\varphi}z+z},
$$
$$
(\tau_z \circ \varphi')g_{\overline{\varphi}^2(x)+\overline{\varphi}z+z}=(mg)_{\overline{\varphi}^3(x)+\overline{\varphi}^2z+\overline{\varphi}z+z}=(mg)_x.
$$
Hence $\tau_z \circ \varphi'$ is defined by the same matrices as $\varphi'$, but on new invariant summands
$({\mathbb Z}_q)_x \oplus ({\mathbb Z}_q)_{\overline{\varphi}(x)+z}\oplus ({\mathbb Z}_q)_{\overline{\varphi}^2(x)+\overline{\varphi}z+z}$.
Similarly for the trivial orbit and for the $2$-subgroup.
The properties of $m$ and $F_j$ imply the absence of non-trivial fixed elements. Hence, $R(\varphi')=1$ by
(\ref{eq:finabel}).
Case 4) is proved.
\textbf{Case 5):} when $d_i >1$, $d_i \ne 3$ for $p_i=2$ and even $k \ge 4$ (i.e., we exclude the case $k=2$ from the consideration).
Using the cyclotomic polynomial we can define
an element of order 5 in $GL(4,{\mathbb Z})$
$$
M_4=
\begin{pmatrix}
0 & 0 & 0 & -1 \\
1 & 0 & 0 & -1 \\
0 & 1 & 0 & -1\\
0& 0& 1 & -1
\end{pmatrix}
$$
and similarly to $M_4$ an element $M_6$ of order 7 in $GL(6,{\mathbb Z})$ .
For any even $k\ge 4$, let $M\in GL(k,{\mathbb Z})$ be the direct sum of an appropriate number $s$ of $M_4$'s and maybe one $M_6$.
Let $\overline{\varphi}:{\mathbb Z}^k \to {\mathbb Z}^k$ be defined by $M$. One can calculate
$$
\det(M_4-E)=5, \qquad \det(M_6-E)=7, \qquad \det(M-E)=5^s \mbox{ or } 5^s 7.
$$
Hence, by (\ref{eq:FHZk}), $R(\overline{\varphi})=5^s \mbox{ or } 5^s 7<\infty$.
The length of any orbit is some divisor of $5$ or $35$, hence an \emph{odd} number.
For $p$-power components ${\mathbb Z}_{p^i}$ with $p>2$, we define
$\varphi'$ by $a_0 \mapsto (p-1) a_0$. Then, for an orbit $u, \overline{\varphi} u, \dots, \overline{\varphi}^\gamma u$, we need to verify (for finiteness of $R(\varphi')$) that $(p-1)^\gamma$ as a homomorphism ${\mathbb Z}_{p^i} \to {\mathbb Z}_{p^i}$ has no non-trivial fixed elements, i.e. $ (p-1)^\gamma \not\equiv 1 \mod p$. This is fulfilled because, for an odd $\gamma$, $(p-1)^\gamma -1 \equiv -2 \not\equiv 0 \mod p$.
For $2$-power components ${\mathbb Z}_{2^i}\oplus {\mathbb Z}_{2^i}$, we define
$\varphi'$ by $a_0 \mapsto F_2 a_0$.
For $2$-power components $({\mathbb Z}_{2^i})^5$, we define
$\varphi'$ by $a_0 \mapsto F_5 a_0$ . Then, for any orbit of $\overline{\varphi}$ of length $\gamma$, we need to verify that $(F_5)^\gamma$ as a homomorphism $({\mathbb Z}_{2^i})^5\to ({\mathbb Z}_{2^i})^5$ has no non-trivial fixed elements.
The end of the proof for Case 5) is completely analogous to Case 4).
\end{proof}
\begin{proof}[Proof of Theorem \ref{teo:inverse}]
Denote by $A_p$ the $p$-subgroup of $G$.
Suppose that $R(\varphi)<\infty$ and arrive to a contradiction. For this purpose consider the characteristic subgroup $\Sigma' =\oplus A' \subset \Sigma$, where $A' \subset A$ is formed by elements of order $p^{i-1}$ from $A_p$ and by all elements of the other $A_q$, $q\ne p$.
Then $(A\wr {\mathbb Z}^k) /\Sigma' \cong B\wr {\mathbb Z}^k$, where $B=B'\oplus {\mathbb Z}_p$ and $B'=\oplus_j ({\mathbb Z}_{p^j})^{d_j}$, $j\ge 2$, $d_j \ge 1$.
Conserve the notation $\varphi$, $\overline{\varphi}$ etc. for the induced automorphisms.
By Lemma \ref{lem:extensions}, $R(\varphi_B)\le R(\varphi)<\infty$, where $\varphi_B:B\wr {\mathbb Z}^k \to
B\wr {\mathbb Z}^k$ is the induced automorphism.
Consider the subgroup $D=B''\oplus {\mathbb Z}_p \subseteq B$ formed by all elements of order $p$ in $B$, where $B''$ is formed by all elements of order $p$ in $B'$. Then $D\subseteq B$ and $\Sigma_D=\oplus D \subseteq \Sigma'$ are characteristic subgroups
and $D\wr {\mathbb Z}^k$ is a subgroup of $B\wr {\mathbb Z}^k$ (generally not normal).
By (\ref{eq:restric_on_sub}) $D\wr {\mathbb Z}^k$ is $\varphi$-invariant and we
wish to prove that $R(\varphi_D)<\infty$, where $\varphi_D: D\wr {\mathbb Z}^k \to D\wr {\mathbb Z}^k$ is the restriction of $\varphi_B$.
For this purpose consider $ \Sigma_{B/D}=\oplus B/D$ and automorphism $\varphi'_*: \Sigma_{B/D}\to \Sigma_{B/D}$ induced in an evident way.
We claim that $\varphi'_*$ has $|C({\varphi'_*})| < \infty$. To prove this, consider the following commutative diagram
\begin{equation}\label{eq:diagr_for_D_1}
\xymatrix{0 \ar[r]& \Sigma_D \ar[r] \ar[d]_{\varphi'_D}& \Sigma_{B} \rtimes {\mathbb Z}^k=B\wr {\mathbb Z}^k \ar[r] \ar[d]_{\varphi_B} & \Sigma_{B/D} \rtimes {\mathbb Z}^k \ar[r] \ar[d]^{\varphi_*}& 0\\
0 \ar[r]& \Sigma_D \ar[r] & \Sigma_{B} \rtimes {\mathbb Z}^k=B \wr {\mathbb Z}^k \ar[r] & \Sigma_{B/D} \rtimes {\mathbb Z}^k \ar[r] & 0.}
\end{equation}
Then $\Sigma_{B/D} \rtimes {\mathbb Z}^k$ is a finitely generated residually finite group and $R(\varphi_*)<\infty$.
Hence by Lemma \ref{lem:Jab_fin}, we have $|C({\varphi'_*})|< \infty$.
Then by Lemma \ref{lem:extensions}, we obtain $R(\varphi'_D)\le R(\varphi'_B) \cdot |C({\varphi'_*})|< \infty$.
The same is true for $\tau_g \circ \varphi$. Thus, by Lemma \ref{lem:extensions},
since $C(\overline{\varphi})=\{0\}$,
from the commutative diagram
\begin{equation}\label{eq:diagr_for_D}
\xymatrix{0 \ar[r]& \Sigma_D \ar[r] \ar[d]_{\varphi'_D}& \Sigma_{D} \rtimes {\mathbb Z}^k=D\wr {\mathbb Z}^k \ar[r] \ar[d]_{\varphi_D} & {\mathbb Z}^k \ar[r] \ar[d]^{\overline{\varphi}}& 0\\
0 \ar[r]& \Sigma_D \ar[r] & \Sigma_{D} \rtimes {\mathbb Z}^k=D \wr {\mathbb Z}^k \ar[r] & {\mathbb Z}^k \ar[r] & 0.}
\end{equation}
(and the analogous diagrams for $\tau_g \circ \varphi$) we obtain $R(\varphi_D)<\infty$ as desired.
Now observe that $B''$ is invariant in $D$ for automorphisms coming from $B$ (as restrictions), because $(b,0)$ is $p$-divisible in $B$ while $(b',k)$ is not $p$-divisible, where $b\in B''$, $b'\in B''$, $k \in {\mathbb Z}_p$, $k\ne 0$. The same is true for $\Sigma_{B''}=\oplus B''$. In particular, it is invariant for $\tau_g$, i.e., normal in $ \Sigma_{D} \rtimes {\mathbb Z}^k$. Hence we can consider the diagram
\begin{equation}\label{eq:diagr_for_B''}
\xymatrix{0 \ar[r]& \Sigma_{B''} \ar[r] \ar[d]_{\varphi'_{B''}}& \Sigma_{D} \rtimes {\mathbb Z}^k=D \wr {\mathbb Z}^k \ar[r] \ar[d]_{\varphi_D} & \Sigma_{D/B''} \rtimes {\mathbb Z}^k
= {\mathbb Z}_p\wr {\mathbb Z}^k \ar[r] \ar[d]^{\varphi_p}& 0\\
0 \ar[r]& \Sigma_{B''} \ar[r] & \Sigma_{D} \rtimes {\mathbb Z}^k=D \wr {\mathbb Z}^k \ar[r] & \Sigma_{D/B''} \rtimes {\mathbb Z}^k= {\mathbb Z}_p\wr {\mathbb Z}^k \ar[r] & 0.}
\end{equation}
Then $R(\varphi_D)<\infty$ implies $R(\varphi_p)<\infty$ (Lemma \ref{lem:extensions}). But in the cases under consideration $R(\varphi_p)=\infty$ by
\cite{TroLamp}. A contradiction.
\end{proof}
\begin{rmk}\label{rmk:gowon}\rm
Note that in the proof of Theorem 3.6 of \cite{gowon1} there is an inaccuracy in the argument after (3.3) because $(G\wr {\mathbb Z})/(L\wr {\mathbb Z})$ is not $G/L$
but $\oplus_i G/L$ which is not finite and even not finitely generated. The proof there can be corrected in a way as in our theorem above.
\end{rmk}
\end{document}
|
\begin{document}
\title{Generating quantum correlated twin beams by four-wave mixing in hot cesium vapor}
\author{Rong Ma$^{1}$}
\author{Wei Liu$^{1}$}
\author{Zhongzhong Qin$^{1,2}$}
\email{[email protected]}
\author{Xiaojun Jia$^{1,2}$}
\author{Jiangrui Gao$^{1,2}$}
\affiliation{$^1$State Key Laboratory of Quantum Optics and Quantum Optics Devices,
Institute of Opto-Electronics, Shanxi University, Taiyuan 030006, People's
Republic of China\\
$^2$Collaborative Innovation Center of Extreme Optics, Shanxi University,
Taiyuan, Shanxi 030006, People's Republic of China\\
}
\begin{abstract}
Using a nondegenerate four-wave mixing process based on a double-$\Lambda$ scheme in hot cesium vapor, we generate quantum correlated twin beams with a maximum
intensity-difference squeezing of 6.5 dB. The substantially improved squeezing can be mainly attributed to very good frequency and phase-difference stability between the pump and probe beams in our experiment.
Intensity-difference squeezing can be observed within a wide experimental parameter range, which guarantees its robust generation. Since this scheme produces multi-spatial-mode twin beams at the
Cs $D_{1}$ line, it is of interest for experiments involving quantum imaging and coherent interfaces between atomic and solid-state systems.
\end{abstract}
\maketitle
\section{Introduction}
Quantum correlation and entanglement are significant for both fundamental tests of quantum physics \cite{EPR,Bell} and applications in
future quantum technologies, such as quantum metrology \cite{GravitationalWave}, quantum imaging \cite{QuantumImaging}, and quantum information processing \cite{BraunsteinRMP}.
The standard technique to generate quantum correlated beams and continuous-variable entangled states is by parametric down-conversion in a nonlinear crystal,
with an optical parametric oscillator (OPO) or optical parametric amplifier. While very large amounts of quantum noise reduction have been achieved in this way \cite{LauratOL,JiaOE},
the central frequency and linewidth of the generated nonclassical states usually do not naturally match the transitions of matters, such as atoms and solid-state systems,
which limits their applications in light-matter interactions.
On the other hand, the first experimental demonstration of a squeezed state of light was realized using a four-wave mixing (FWM)
process in sodium vapor \cite{SlusherPRL}. Since then, many groups have demonstrated squeezing in atomic vapor under a variety of different configurations
\cite{AtomsSqueezing1,AtomsSqueezing2,AtomsSqueezing3,AtomsSqueezing4,AtomsSqueezing5,AtomsSqueezing6,AtomsSqueezing7}.
However, the decoherence effect caused by spontaneous emission and absorption in atomic vapor limits the level of quantum noise reduction to no more than 2.2 dB \cite{AtomsSqueezing7}.
The nondegenerate FWM process in a double-$\Lambda$ scheme was recognized as a possible work-around for these limitations,
in which ground-state coherence based on coherent population trapping and electromagnetically induced transparency can reduce or eliminate
spontaneous emission noise \cite{DoubleLambda1,DoubleLambda2}. Recently, it was shown by Lett's group \cite{PooserOE,LettPRA} and several other groups \cite{QinOL,GlorieuxArxiv,JasperseOE} that the FWM process based on a double-$\Lambda$ scheme
in hot Rb vapor is an efficient way to generate quantum correlated twin beams. The highest degrees of intensity-difference squeezing obtained by several different groups are in the range of 7.0 to 9.2 dB \cite{PooserOE,QinOL,GlorieuxArxiv},
which approaches the best value reported for OPO \cite{LauratOL}. This system has proven to be very successful for a variety of applications \cite{EntangledImages,TunableDelay,SU11NC,QinPRL,QinAPL,MacRaPRL,QinLight,TravisOL}.
\begin{figure}
\caption{Experimental layout for generating quantum correlated twin beams. (a) Double-$\Lambda$ scheme in the $D_{1}
\end{figure}
\begin{figure*}
\caption{Experimental setup for generating and detecting quantum correlated twin beams with three different methods to generate the probe beam. (a) A Ti:sapphire laser and a diode laser are used as pump and probe beams, respectively. GL: Glan-laser polarizer;
GT: Glan-Thompson polarizer; BPD: balanced photodetector; SA: spectrum analyzer. (b) The same lasers are used, and the PLL works in the meanwhile. BS: beam splitter; PD: photodetector; PS: power splitter; PLL: phase-locked loop.
(c) An electro-optic modulator (EOM) is used to generate the probe beam. (d), (e), and (f) Corresponding beat spectrum of the pump and probe beams for the three methods shown in (a), (b), and (c), respectively. Insets of (e) and (f) show the beat spectrum in a 40-Hz span.}
\end{figure*}
Twin beams have also been experimentally generated based on the FWM process in other hot alkali vapor, e.g., cesium \cite{CsTwinBeams} and potassium \cite{KTwinBeams}, as well as
in metastable helium at room temperature \cite{HeTwinBeams}. Cesium offers certain advantages, e.g., the quantum correlation at the $^{133}$Cs $D_{1}$ line lies well within the
wavelength regime of the exciton emission from InAs quantum dots \cite{QuantumDots}, which provides a potential resource for coherent interfaces between atomic and solid-state systems.
The highest degree of intensity-difference squeezing of twin beams based on the FWM process in hot cesium vapor is 2.5 dB \cite{CsTwinBeams}. In Ref. \cite{CsTwinBeams}, the authors pointed out the experimental difficulty in getting high quantum correlation in cesium vapor
because of larger hyperfine splitting of the ground states compared with Rb, as well as experimental limitations such as laser frequency stability, the vapor cell's transmission efficiency, and mechanical vibration of the system.
In this paper, we substantially improve the squeezing to 6.5 dB, which can be mainly attributed to very good frequency and phase-difference stability between the pump and probe beams in our experiment.
Our work shows it is possible to produce narrowband quantum correlated twin beams with similar double-$\Lambda$ schemes at a particular wavelength \cite{KTwinBeams,HeTwinBeams}.
\section{Experimental setup}
As shown in Fig. 1(a), the $^{133}$Cs $D_{1}$ line is used to form the double-$\Lambda$ level structure with an excited level
($6P_{1/2}, F'=4$) and two ground levels ($6S_{1/2}, F=3$ and $F=4$). A weak probe beam with an intensity of $I_{0}$ crosses
with a strong pump beam inside a hot Cs vapor cell at a small angle [Fig. 1(b)]. After the FWM process, the intensity of the probe beam
is amplified to $GI_{0}$. In the meanwhile, another conjugate beam is generated on the other side of the pump beam with an intensity of $(G-1)I_{0}$.
The FWM process relies on the double-$\Lambda$ scheme in which two pump photons are simultaneously converted to one probe photon and one conjugate photon.
As a result, the relative intensity difference of the probe and conjugate beams is squeezed
compared with the corresponding shot-noise limit (SNL) by an amount of $1/(2G-1)$.
To produce quantum correlated twin beams efficiently, the pump-probe frequency difference is required to be approximately the ground-state hyperfine splitting (9.2 GHz for $^{133}$Cs).
There are several methods to achieve two laser beams with several gigahertz of frequency separation: (i) using two independent free-running lasers, (ii) using a phase-locked loop (PLL) to lock the frequency
and phase difference of two lasers \cite{LvovskyPLL}, (iii) using the diffraction effect of an acousto-optic modulator (AOM) to generate a laser beam with a certain frequency difference \cite{LettPRA,QinOL,GlorieuxArxiv,JasperseOE}, and (iv) generating a sideband with a certain frequency difference by the use of an electro-optic modulator (EOM) \cite{EOMSideband}.
We generate quantum correlated twin beams based on a FWM process in cesium vapor with
the 9.2-GHz frequency-difference probe beam achieved using methods (i), (ii), and (iv). Method (iii) is not chosen because an AOM which has
a 9.2-GHz frequency-shift ability is not commercially available. Furthermore, the diffraction efficiency of an AOM with a 3-GHz frequency shift is on the order
of 1\% \cite{LettPRA,QinOL,GlorieuxArxiv,JasperseOE}, so achieving a 9.2-GHz frequency shift with multiple cascaded AOMs is impractical.
\begin{figure}
\caption{Intensity-difference noise spectra of the quantum correlated twin beams with the probe beam generated using another laser (trace $A$), PLL (trace $B$), and the EOM sideband (trace $C$). Trace $D$ gives corresponding SNL.
The electronic noise floor and background noise are both 10 dB below the corresponding SNL at 1 MHz and have been subtracted from all of the traces.The sharp peaks on traces $A$, $B$, and $C$ can be attributed to classical noise from our lasers.}
\end{figure}
\subsection{Using two independent lasers}
First, we use a Ti:sapphire laser and a diode laser as the pump beam and probe beam, respectively [Fig. 2(a)]. The Ti:sapphire laser is tuned about 1.6 GHz
to the blue of $^{133}$Cs ($6S_{1/2}, F=3\rightarrow6P_{1/2}, F'=4$) with a total power of 1 W. Beams from the Ti:sapphire laser and the diode laser are mixed at a beam splitter,
and a total optical power of 5 mW is focused onto a high-bandwidth photodetector [shown in Fig. 2(b)]. As shown in Fig. 2(d), the beat signal of these two lasers, with a full width at half maximum (FWHM) of around 5 MHz,
is monitored on a spectrum analyzer (SA) to ensure its central frequency is equal to the ground-state hyperfine splitting (two-photon detuning $\delta=0$ MHz).
As shown in Fig. 2(a), by choosing vertical polarization for the pump and horizontal polarization for the probe, they can be combined in a Glan-laser (GL) polarizer.
The beams then cross each other at an angle of 6 mrad in the center of the cesium vapor cell. The vapor cell is 25 mm long and temperature stabilized at $112 ^{\circ}$C.
The windows of the vapor cell are antireflection coated at 895 nm on both faces, resulting in a transmissivity for the far-detuned probe beam of 98\% per window.
The pump beam and the probe beam are focused with waists of 560 and 300 $\mu$m (1/$e^{2}$ radius), respectively, at the crossing point to ensure that they overlap over almost the full length of the cell.
After the vapor cell, a Glan-Thompson (GT) polarizer with an extinction ratio of $10^{5}$:1 is used to filter out the pump beam. The amplified probe and the generated conjugate beams are directly sent into the
two ports of a balanced photodetector (BPD) with a gain of $10^{5}$ V/A and a quantum efficiency of 98\%. The output of the BPD is sent to a SA
with a resolution bandwidth (RBW) of 30 kHz and a video bandwidth (VBW) of 300 Hz. To measure the SNL, a coherent laser beam, whose power is equivalent to the total power of the probe and conjugate beams,
is split into two beams using a 50:50 beam splitter and sent to the BPD.
\begin{figure}
\caption{Intensity-difference noise power versus total optical power for twin beams using two independent lasers (curve $A$), PLL (curve $B$), and EOM (curve $C$), and for a coherent beam (curve $D$). The electronic noise and background noise have been subtracted from all the data points.
Error bars for experimental data represent $\pm1$ standard deviation and are obtained based on the statistics of the measured data.}
\end{figure}
\subsection{Using a PLL}
The basic schematic for our PLL is shown in Fig. 2(b). A power splitter splits the beat signal between a SA and the PLL. Two reference frequencies, one 9.18 GHz and the other 20 MHz,
are used to produce the error signal for the diode laser, closing the feedback loop. A commercial fast analog linewidth controller allows fast current injection modulation of the diode laser up to tens of megahertz.
Shown in Fig. 2(e) is the beat spectrum with a span of 20 MHz when the PLL works. The inset shows the beat spectrum in a 40-Hz span. It clearly shows the FWHM of the beat signal is around 1 Hz, which indicates very good frequency and phase-difference stability between the
pump and probe beams.
\subsection{Using an EOM}
As shown in Fig. 2(c), we use an EOM to produce optical sidebands at $\pm9.2$ GHz from the carrier frequency of the Ti:sapphire laser. The EOM is driven by a signal generator and a power amplifier with a radio-frequency
power of 34 dBm. The tuning range of the EOM is $\pm50$ MHz centered at 9.2 GHz. The relative optical power in the first-order sidebands and carrier is monitored by a scanning cavity and amounts to 10\%, 10\%, and 80\%, respectively.
To select the probe frequency component (-1$^{st}$ order sideband) from the carrier and two sidebands, three successive temperature-stabilized etalons with a finesse of around 60 are used, which give a total transmissivity
of around 80\%. The 9.2-GHz frequency-shift efficiency of 8\% is high enough that no further amplification of the probe beam is needed.
Shown in Fig. 2(f) is the beat spectrum of the -1$^{st}$ order sideband and the carrier frequency with a span of 20 MHz. The inset of Fig. 2(f) shows the beat spectrum in a 40-Hz span. The FWHM of the beat signal is also around 1 Hz.
\begin{figure*}
\caption{Dependence of the normalized intensity-difference squeezing and the FWM gain on (a) pump power, (b) one-photon detuning, (c) two-photon detuning,
and (d) temperature of the vapor cell. The traces for squeezing are plotted in red, and the traces for the FWM gain are in blue. The gray lines at 0 dB show the corresponding SNL.}
\end{figure*}
\section{Results and discussions}
We measure the intensity-difference noise power spectra of the probe and conjugate beams for three different methods to generate the probe beam (indicated as traces $A$, $B$, and $C$ in Fig. 3).
All of these three traces are normalized to the corresponding SNL (trace $D$ in Fig. 3). When two independent lasers are used, the maximum degree of intensity-difference squeezing is 3.7 dB at 0.23 MHz with a squeezing bandwidth of 0.72 MHz (trace $A$).
When the PLL is used, the maximum degree of intensity-difference squeezing is improved to 5.9 dB at 0.23 MHz (trace $B$). However, trace $B$ is even more noisy than trace $A$
in the frequency range from 0.72 to 4 MHz. This may be due to the excess intensity noise induced by the PLL since it modulates the current of the diode laser \cite{LvovskyPLL}.
Trace $C$ shows the intensity-difference noise spectrum when the probe is generated by the EOM sideband. The maximum degree of intensity-difference squeezing is 6.5 dB at around 1 MHz with
a much larger squeezing bandwidth of over 4 MHz. It shows that keeping the frequency and phase difference between the pump and probe beams stable is critical in our experiment and the EOM sideband is an optimal option.
To better compare the noise reduction of the FWM process using two independent lasers, PLL, and EOM, we vary the seed probe power and record the intensity-difference noise power versus the total optical power of the twin beams (curves $A$, $B$, and $C$, respectively, in Fig. 4).
It must be pointed out that optimal frequency is chosen for each configuration (0.23 MHz for curves $A$ and $B$ and 1 MHz for curve $C$).
Similarly, we also record the noise power of a coherent beam at 1 MHz at different optical powers using the SNL measurement method described above (curve $D$ in Fig. 4). After fitting these four curves to straight lines, we find that the ratios of slopes between
curves $A$, $B$, and $C$ and curve $D$ are equal to 0.421, 0.262, and 0.222, respectively, which shows that the degrees of intensity-difference squeezing of the twin beams generated in these three configurations are 3.7, 5.8, and 6.5 dB, respectively.
It can be clearly seen that the highest degree and bandwidth of intensity-difference squeezing, among three configurations, can be achieved by using the EOM sideband from Figs. 3 and 4.
We then study the dependence of the FWM gain and the degree of intensity-difference squeezing on the experimental parameters, such as pump power, one-photon detuning, two-photon detuning, and the temperature of the vapor cell with the probe beam generated by the EOM sideband.
First, the effect of pump power is investigated [shown in Fig. 5(a)]. To do this, we measure the intensity-difference noise power for the twin beams and the corresponding SNL at 1 MHz as a function of pump power and then
calculate the difference of these two mean values of data points on these two curves. For simplicity, we fix $\Delta = 1.6$ GHz, $\delta = 0$ MHz, and $T$ = 112 $^{\circ}$C. It can be seen that both the gain and degree
of intensity-difference squeezing increase with the pump power, in agreement with the expected enhancement of the optical nonlinearity. However, other nonlinear effects (such as self-focusing \cite{SelfFocusing})
set an experimental limit even though the pump power increases further.
Next, we set the pump power at 600 mW, $T$ = 112 $^{\circ}$C, and $\delta = 0$ MHz and scan the one-photon detuning from 0.8 to 3.7 GHz to study the impact of one-photon detuning [shown in Fig. 5(b)].
One can clearly see that quantum correlation represented by the degree of intensity-difference squeezing always exists for most of the region unless \emph{G} is around 1, which shows our system is operated very close to the quantum limit \cite{LettPRA,PooserOE}.
The dependence of the degree of quantum correlation on one-photon detuning can be understood in the following way. Far from resonance, the nonlinearity decreases, which reduces the gain and quantum correlation.
Close to resonance, absorption becomes dominant and thus also degrades quantum correlation.
We then turn our attention to the effect of two-photon detuning when setting pump power at 600 mW, $T$ = 112 $^{\circ}$C, and $\Delta = 1.6$ GHz. As shown in Fig. 5(c), while the FWM gain decreases
all the way as the two-photon detuning varies from $-$44 MHz to $+$48 MHz, the degree of quantum correlation displays an optimum value around 0 MHz. This can be attributed to the resonant enhancement effect.
At higher two-photon detuning, the gain decreases, and thus the degree of quantum correlation decreases. However, at lower two-photon detuning, other higher-order nonlinear effects become dominant,
which also degrades quantum correlation even though the gain increases.
Last, we study the dependence of intensity-difference squeezing on the temperature of the vapor cell [shown in Fig. 5(d)]. We fix the pump power at 600 mW, $\Delta = 1.6$ GHz, and $\delta = 0$ MHz.
Across the presented temperature range (98 $^{\circ}$C to 117 $^{\circ}$C), the gain increases dramatically due to the rapidly changing atomic densities. In contrast, the degree of intensity-difference squeezing displays a maximum value at 112 $^{\circ}$C.
At lower temperatures, the FWM gain is low; thus the quantum correlation between the twin beams is weak. As the temperatures increase further from 112 $^{\circ}$C, the degree of quantum correlation degrades because higher-order nonlinear effects
occur and absorption loss by the hot cesium vapor starts to become more and more dominant.
\section{Conclusion}
In summary, we have measured 6.5-dB intensity-difference squeezing based on the FWM process in hot cesium vapor. Three different methods are used to generate the 9.2-GHz frequency-difference probe beam.
Our result shows that the EOM sideband is an optimal method to achieve a high degree and bandwidth of intensity-difference squeezing. Intensity-difference squeezing can be observed within a wide experimental parameter range, which guarantees its robust generation.
It is expected that a higher degree of intensity-difference squeezing can be achieved by using a shorter vapor cell and higher pump power. This multi-spatial-mode narrowband nonclassical light source near the Cs $D_{1}$ line paves the way to applications involving
quantum imaging and coherent interfaces between light and matter, such as atomic ensembles and a solid-state system.
This research was supported by the Key Project of the Ministry of Science and Technology of China (Grant No. 2016YFA0301402), the National Natural Science Foundation of China (Grants No. 61601270, No. 11474190, and No. 11654002),
the Applied Basic Research Program of Shanxi Province (Grant No. 201601D202006), and the Fund for Shanxi ``1331" Project Key Subjects Construction.
\end{document}
|
\mathcal{B}egin{equation}gin{document}
\mathcal{B}ibliographystyle{plain}
\mathbb{T}heoremstyle{definition}
\mathcal{N}ewtheorem{defin}[equation]{Definition}
\mathcal{N}ewtheorem{lem}[equation]{Lemma}
\mathcal{N}ewtheorem{prop}[equation]{Proposition}
\mathcal{N}ewtheorem{thm}[equation]{Theorem}
\mathcal{N}ewtheorem{claim}[equation]{Claim}
\mathcal{N}ewtheorem{ques}[equation]{Question}
\mathcal{N}ewtheorem{remark}[equation]{Remark}
\mathcal{N}ewtheorem{fact}[equation]{Fact}
\mathcal{N}ewtheorem{axiom}[equation]{Technical Axiom}
\mathcal{N}ewtheorem{newaxiom}[equation]{New Technical Axiom}
\mathcal{N}ewtheorem{cor}[equation]{Corollary}
\mathcal{N}ewtheorem{exam}[equation]{Example}
\mathcal{M}aketitle
\mathcal{B}egin{equation}gin{abstract}
If $V$ is an analytic set in a pseudoconvex domain $\Omegamega$, we show there is always a pseudoconvex domain $G \subseteq \Omegamega$ that contains $V$ and has the property that every
bounded holomorphic function on $V$ extends to a bounded holomorphic function on $G$ with the same norm. We find such a $G$ for some particular analytic sets.
When $\Omegamega$ is an operhedron
we show there is a norm on holomorphic functions on $V$ that can always be preserved by extensions to $\Omegamega$.
\end{abstract}
\section{Motivation}
Let $\Omegamega\subset \mathcal{M}athbb C^n$ be a domain of holomorphy and $V$ be a subset. We study when
a bounded, holomorphic function on $V$ extends to a bounded holomorphic function with the same norm defined on the whole domain $\Omegamega$. If $V$ is a discrete set, this is an interpolation problem.
In this note we are interested in the case when $V$ is a fatter set. It is then natural to assume it has an analytic structure.
The first significant result in this direction was obtained in \mathbb{C}ite{agmc_vn}, where the simplest non-trivial case $\Omegamega = \mathcal{M}athbb D^2$ was studied. It was shown using operator theory that if $V\subset \mathcal{M}athbb D^2$ is algebraic, then it must be a retract to have the isometric extension property. The function theory argument of P. Thomas \mathbb{C}ite{tho03} inspired further research: it turns out that a similar result holds for the $n$-dimensional Euclidean ball and more generally for strictly convex domains and strongly linearly convex domains in $\mathcal{M}athbb C^2$ \mathbb{C}ite{kmc19}. In all these cases $V$ was shown to be a retract. Some methods developed there remained true for strongly convex and linearly convex domains in $\mathcal{M}athbb C^n$ with arbitrary $n>1$. In particular, it was shown that $V$ was always totally geodesic with respect to the Kobayashi metric on $G$ --- consequently, it had no singularities.
The aim of this paper is to show a result in the opposite direction. Using a totally different approach, we prove that if $V$ is an analytic set in a pseudoconvex domain $\Omegamega$, there is always a pseudoconvex domain $G \subseteq \Omegamega$ that contains $V$ and has the property that every bounded holomorphic function on $V$ extends to a bounded holomorphic function on $G$ with the same norm. We find such a $G$ for some analytic sets. These particular examples show that the situation when $G$ is a convex domain while $V$ has singularities (that are either cusps or obtained as intersections of two curves) can occur. This is intriguing as the crucial role in the strategy of proving extension results has been so far played by the Lempert theory of invariant functions on convex domains.
\section{Introduction}
If $\Omegamega$ is a domain of holomorphy in $\mathbb{C}^n$, then an \emph{analytic set in $\Omegamega$} is a relatively closed set $V$ in $\Omegamega$ such that for each point $\lambdaambda\in V$ there exist a neighborhood $U$ of $\lambdaambda$ ($U$ is open in $\mathbb{C}^n$) and holomorphic functions $f_1, f_2,\lambdadots, f_m$ on $U$ such that
\[
V\mathbb{C}ap U =\set{\mathcal{M}u\in U}{f_i(\mathcal{M}u)=0 \mathbb{T}ext{ for } i=1,2,\lambdadots,m}.
\]
We say that a function $f:V\mathbb{T}o \mathbb{C}$ is \emph{holomorphic on $V$} if for each $\lambdaambda \in V$ there exist an open set $U \subseteq \mathbb{C}^n$ containing $\lambdaambda$ and a holomorphic function $F$ defined on $U$ such that $F(\mathcal{M}u) =f(\mathcal{M}u)$ for all $\mathcal{M}u \in V \mathbb{C}ap U$.
The following fact is one of Henri Cartan's numerous profound contributions to several complex variables.
\mathcal{B}egin{equation}gin{thm}
\lambdaabel{thm11}
(Cartan Extension Theorem)
If $\Omega$ is a domain of holomorphy, $V$ is an analytic set in $\Omega$,
and $f$ is a holomorphic function defined on $V,$ then there exists a holomorphic function $F$ defined on $\Omegamega$ such that
\[
f = F\, |\, V.
\]
\end{thm}
This theorem, a consequence of Cartan's deep work in the theory of analytic sheaves, was originally presented in \mathbb{C}ite{car51}. Accessible proofs can be found in the classic texts \mathbb{C}ite{gur} and \mathbb{C}ite{hor} (Theorem 7.4.8) and also in the more modern text \mathbb{C}ite{tay} (Corollary 11.5.2).
This prompts the following definition.
\mathcal{B}egin{equation}gin{defin}\lambdaabel{car10}
We say that a pair $(\Omegamega,V)$ is a \emph{Cartan pair} if $\Omegamega$ is a domain of holomorphy in $\mathbb{C}^n$ for some $n$ and $V$ is an analytic set in $\Omegamega$.
\end{defin}
In this paper we shall be interested in cases where one can obtain bounds in Cartan's theorem.
If $(\Omegamega,V)$ is a Cartan pair, say that $(\Omegamega,V)$ has the \emph{bounded extension property} if every bounded holomorphic function on $V$ has a bounded extension to $\Omegamega$. In the case when $\Omegamega=\mathcal{M}athbb{D}^n$, the polydisc, geometric conditions which imply that $(\Omegamega,V)$ has the bounded extension property have been investigated by Herbert Alexander \mathbb{C}ite{ale69}, Walter Rudin \mathbb{C}ite{rud69} (Theorem 7.5.1), Edgar Lee Stout \mathbb{C}ite{sto75}, Guennadi Henkin and Pierre Polyakov \mathbb{C}ite{henpol84}, and Greg Knese \mathbb{C}ite{kn08ua}. One lesson to be gleaned from these papers is that the bounded extension property is quite sensitive to the interaction of the boundary of $V$ and $\mathbb{T}^n$, the distinguished boundary of $\mathcal{M}athbb{D}^n$. For example, in \mathbb{C}ite{ale69} it is shown that a certain class of varieties in $\mathcal{M}athbb{D}^n$ introduced by Rudin have the bounded extension property if and only if $\partial V \mathbb{C}ap \mathbb{T}^d = \varnothing$ and in \mathbb{C}ite{kn08ua} it is shown that if $V$ is an algebraic distinguished variety in $\mathcal{M}athbb{D}^2$ (i.e., $V$ is an algebraic set in $\mathbb{C}^2$ and $\partial (V \mathbb{C}ap \mathcal{M}athbb{D}^2) \subseteq \mathbb{T}^2$) with no singularities on $\mathbb{T}^2$, then $(\mathcal{M}athbb{D}^2,V \mathbb{C}ap \mathcal{M}athbb{D}^2)$ has the bounded extension property. Other work on the bounded extension property,
when $\Omegamega$ is assumed to be strictly pseudoconvex (but its boundary is not assumed to be smooth) and $V$ is assumed to have the form $V=D\mathbb{C}ap \Omegamega$, where $D$ is a relatively closed complex submanifold of a neighborhood of $\Omegamega^-$, can be found in \mathbb{C}ite{henlei84} and \mathbb{C}ite{aac99}.
A stronger notion was studied in \mathbb{C}ite{agmc_vn}, namely when an extension can always be found with the same norm. This will be the focus of the current paper.
\mathcal{B}egin{equation}gin{defin}\lambdaabel{car20}
If $(\Omegamega,V)$ is a Cartan pair, then we say that $(\Omegamega,V)$ has the \emph{norm preserving extension property} (or simply, $(\Omegamega,V)$ is an \emph{np pair}) if for every bounded holomorphic function $f$ on $V$ there exists a holomorphic function $F$ on $\Omegamega$ such that $F(\mathcal{M}u) =f(\mathcal{M}u)$ for all $\mathcal{M}u \in V$ and
\[
\sup_{\mathcal{M}u \in \Omegamega} |F(\mathcal{M}u)| = \sup_{\mathcal{M}u \in V} |f(\mathcal{M}u)|.
\]
\end{defin}
We organize our results in this paper around the following three fundamental problems associated with the study of np pairs. \\ \\
{\mathcal{B}f Problem A.} Given a domain of holomorphy $\Omegamega$, find all analytic sets $V$ in $\Omegamega$ such that $(\Omegamega,V)$ is an np pair.\\ \\
{\mathcal{B}f Problem B.} Given a Cartan pair $(\Omegamega,V)$, find all domains of holomorphy $G$ such that $(G,V)$ is an np pair.\\ \\
{\mathcal{B}f Problem C.} Given a Cartan pair $(\Omegamega,V)$, how can one concretely construct a domain of holomorphy $G$ such that $(G,V)$ is an np pair?
\section{Results}
\subsection{Problem A}
It is obvious that if $V$ is a holomorphic retract of $\Omega$ (which means there is a holomorphic
function $r: \Omega \mathbb{T}o V$ which equals the identity on $V$) then $f \mathcal{M}apsto f \mathbb{C}irc r$ is a norm preserving homomorphism that gives extensions.
On the bidisk this turns out to be the only way that a reasonably nice set can have the norm-preserving extension property.
We say that $V$ is relatively polynomially convex in a domain $\Omega$ if $\mathcal{M}athcal{O}verline{V}$ is polynomially convex and $\mathcal{M}athcal{O}verline{V} \mathbb{C}ap \Omega = V$.
\mathcal{B}egin{equation}gin{thm}
(Theorem 1.20 from \mathbb{C}ite{agmc_vn}) Let $V$ be a nonempty relatively polynomially convex subset of $\mathcal{M}athbb{D}^2$. Every polynomial has a norm preserving extension to $\mathcal{M}athbb{D}^2$ if and only if $V$ is a holomorphic retract of $\mathcal{M}athbb{D}^2$.
\end{thm}
Non-trivial retracts of $\mathcal{M}athbb{D}^2$ are of the form $\{(z, \phi(z)) : z \in \mathcal{M}athbb{D}\}$
or $\{( \phi(z),z) : z \in \mathcal{M}athbb{D}\}$ for some holomorphic $\phi: \mathcal{M}athbb{D} \mathbb{T}o \mathcal{M}athbb{D}$ \mathbb{C}ite{hs81},
so in particular if $V$ is a relatively polynomially convex subset of $\mathcal{M}athbb{D}^2$
and $(\mathcal{M}athbb{D}^2, V)$ is norm preserving, then $V$ is a manifold.
Both these results hold more generally. In \mathbb{C}ite{kmc19}, it was shown that if $(\Omega,V)$ is an np pair and $V$ is relatively polynomially convex, then $V$ must be a retract if any of the following hold:
\mathcal{B}egin{equation}gin{itemize}
\item
$\Omega$ is a ball in any dimension;
\item
$\Omega$ is strictly convex in $\mathbb{C}^2$;
\item
$\Omega$ is strongly linearly convex in $\mathbb{C}^2$ with $C^3$ boundary.
\end{itemize}
It was also proved that $V$ must at least be a complex submanifold if either
$\Omega$ is the tridisk \mathbb{C}ite{kmc20} or is strongly linearly convex with $C^3$ boundary in any dimension.
It came as a surprise, then, when the first author, together with Z. Lykova and
N. Young, solved Problem A for the case that $\Omega$ is the symmetrized bidisk and $V$
an algebraic subset \mathbb{C}ite{aly19}; see also \mathbb{C}ite{bhsa18}. It was shown that some special $V$'s arise that are not
only not retracts, but are not even submanifolds. This result spurred the current investigation.
\subsection{Problem B}
In Section \mathbb{R}ef{sec3} we show that the geometry of $V$ has very little to do with whether
there is an np pair $(G,V)$.
{\mathcal{B}f Theorem \mathbb{R}ef{exist.thm.20}.}
If $(\Omegamega,V)$ is a Cartan pair, then there exists $G \subseteq \Omegamega$ such that $(G,V)$ is a norm preserving Cartan pair if and only if $V$ is connected.
\vskip 5pt
In Section \mathbb{R}ef{secd} we study the situation for the set $T = \mathcal{M}athbb{D} \mathbb{T}imes \{0 \} \mathbb{C}up \{ 0 \} \mathbb{T}imes \mathcal{M}athbb{D}$
in $\mathbb{C}^2$, consisting of two crossed disks.
We prove:
{\mathcal{B}f Theorem \mathbb{R}ef{thm44}.}
Let $G$ be a balanced domain of holomorphy in $\mathbb{C}^2$ with $T\subseteq G$. $(G,T)$ is norm preserving if and only if $G \subseteq \Delta = \set{\lambdaambda}{\ |\lambdaambda_1|+|\lambdaambda_2|<1}$.
\vskip 5pt
In Section \mathbb{R}ef{seclinear}, we show that though $(\Delta, T)$ is np, there is no linear isometric extension.
Without the hypothesis of balanced, the description of all np pairs $(G,T)$ is more complicated.
{\mathcal{B}f Theorem \mathbb{R}ef{thm45}.}
Let $G$ be a domain in $\mathbb{C}^2$. Then
$(G,T)$ is an np pair if and only if $T$ is a relatively closed subset of $G$ and there exist a pseudoconvex set $U$ in $\mathbb{C}^2$ and a function $\mathbb{T}au \mathcal{M}apsto C_\mathbb{T}au$ from $\mathbb{T}^2$ into $\mathcal{M}athcal{H}ol(U)$ such that
\mathcal{B}egin{equation}gin{equation}
\lambdaabel{eq46}
G= \mathcal{B}igcap_{\mathbb{T}au \in \mathbb{T}^2} \set{\lambdaambda \in U\ }{\ \ |\mathbb{T}au \mathcal{M}athbb{C}^dot \lambdaambda +\lambdaambda_1\lambdaambda_2 C_\mathbb{T}au(\lambdaambda)|<1}.
\end{equation}
\subsection{Problem C}
In Sections \mathbb{R}ef{sec7} and \mathbb{R}ef{sec8}, we study the set
\mathcal{B}egin{equation}
\lambdaabel{eq23}
\mathbb{C}alv \ =\ \set{z \in \mathcal{M}athbb{D}^3}{z_3^2 =z_1z_2} .
\end{equation}
We show how one is naturally led to consider the domain
\mathcal{B}egin{equation}
\lambdaabel{eq24}
\mathbb{C}alg \ = \
\set{z \in \mathcal{M}athbb{D}^3}{|z_1 z_2 - z_3^2| < (1-|z_3|^2) + \sqrt{1-|z_1|^2}\sqrt{1-|z_2|^2}} .
\end{equation}
{\mathcal{B}f Theorems \mathbb{R}ef{thm814} and \mathbb{R}ef{thm828}.}
Let $\mathbb{C}alv$ be given by \eqref{eq23}, and $\mathbb{C}alg$ be defined by \eqref{eq24}. Then
$\mathbb{C}alg$ is convex, and
$(\mathbb{C}alg,\mathbb{C}alv)$ is an np pair. Moreover, if $G$ is any balanced set in $\mathbb{C}^3$, then
$(G, \mathbb{C}alv)$ is an np pair if and only if $G \subseteq \mathbb{C}alg$.
\vskip 5pt
Our construction depends on writing down a $2$-to-$1$ branched cover of $\mathbb{C}alv$ by $\mathcal{M}athbb{D}^2$,
and developing a model for even bounded holomorphic functions on $\mathcal{M}athbb{D}^2$ in Section \mathbb{R}ef{sec6}.
\subsection{Noncommutative np extensions}
In Section \mathbb{R}ef{sec9} we study np extensions in the free setting, and
prove in Theorems~\mathbb{R}ef{free.thm.40} and \mathbb{R}ef{free.thm.50} that in some generality,
norm preserving extensions always exist, but the norm that is preserved is not
the supremum of $|f|$ evaluated on points in $\mathbb{C}^d$, but rather the supremum of $\| f \|$ evaluated
on $d$-tuples of matrices.
This allows us to prove a version of Cartan's Theorem \mathbb{R}ef{thm11}.
We restrict $\Omega$ to be an operhedron (a slight generalization of a poynomial polyhedron in $\mathbb{C}^n$) and
$V$ to be an algebraic subset, but we gain norm estimates: we show we can always extend a function
$f$ on $V$ to a function $F$ on $\Omega$ with the {\em same norm}, where again the norms are
given by taking the supremum of $\| f(x) \|$ (resp. $\| F (y) \|$) where $x$ ranges over
certain $d$-tuples of matrices associated with $V$, and $y$ ranges over $n$-tuples associated with $\Omega$.
See Theorem~\mathbb{R}ef{free.thm.60} for an exact statement.
Although the proof of Theorem~\mathbb{R}ef{free.thm.60} uses non-commutative function theory, the statement
is just a theorem about extending holomorphic functions from analytic sets to domains in $\mathbb{C}^n$.
\section{Existence of Norm Preserving Pairs}
\lambdaabel{sec3}
Rudin \mathbb{C}ite{rud69} was the first to exploit the following theorem, essentially a consequence of the Michael Selection Theorem \mathbb{C}ite{mic56}, to study extensions of holomorphic functions defined on subvarieties on polydiscs.
\mathcal{B}egin{equation}gin{thm}\lambdaabel{exist.thm.10}
If $X$ and $Y$ are Frechet spaces and $L:X \mathbb{T}o Y$ is a continuous linear surjection, then there exists a continuous function $S:Y \mathbb{T}o X$ satisfying $L(S(y))=y$ for all $y\in Y$.
\end{thm}
\mathcal{B}egin{equation}gin{proof}
The theorem is a special case of the far more general Michael Selection Theorem \mathbb{C}ite{mic56}. Alternately, for a brief, direct proof consult the proof of Theorem 7.2.3 in \mathbb{C}ite{rud69}.
\end{proof}
If $\Omegamega$ is a domain we let $\mathcal{M}athcal{H}ol(\Omegamega)$ denote the collection of holomorphic functions on $\Omegamega$, let $\mathcal{M}athcal{H}inf(\Omegamega)$ denote the collection of bounded holomorphic functions on $\Omegamega$, and let $\mathscr{S}(\Omegamega)$ denote the Schur class of $\Omegamega$, i.e., the subset of $\mathcal{M}athcal{H}inf(\Omegamega)$ defined by
\[
\mathscr{S}(\Omegamega)=\set{\phi \in \mathcal{M}athcal{H}ol(\Omegamega)}{\sup_{\lambdaambda \in \Omegamega} |\phi(\lambdaambda)|\lambdae 1}.
\]
We let $\mathcal{M}athcal{H}erg(\Omega)$ denote the Herglotz class of $\Omega$, namely
\[
\mathcal{M}athcal{H}erg(\Omegamega)=\set{\phi \in \mathcal{M}athcal{H}ol(\Omegamega)} { \mathcal{F}orall \lambdaambda \in \Omegamega, \ \mathscr{R}e \phi (\lambdaambda) \geq 0 }.
\]
In similar fashion, if $(\Omegamega,V)$ is a Cartan pair, then we let $\mathcal{M}athcal{H}ol(V)$ denote the collection of holomorphic functions on $V$, let $\mathcal{M}athcal{H}inf(V)$ denote the collection of bounded holomorphic functions on $V$, and let $\mathscr{S}(V)$ and $\mathcal{M}athcal{H}erg(V)$ denote the Schur and Herglotz classes of $V$, i.e., those subsets of $\mathcal{M}athcal{H}ol(V)$ \mathcal{B}lack defined by
\mathcal{B}egin{equation}gin{eqnarray*}
\mathscr{S}(V) &\ =\ & \set{\phi \in \mathcal{M}athcal{H}ol(V)}{\sup_{\lambdaambda \in V} |\phi(\lambdaambda)|\lambdae 1}\\
\mathcal{M}athcal{H}erg(V) &= & \set{\phi \in \mathcal{M}athcal{H}ol(\Omegamega)} { \mathcal{F}orall \lambdaambda \in V, \ \mathscr{R}e \phi (\lambdaambda) \geq 0 }.
\end{eqnarray*}
It is well known and elementary that $\mathcal{M}athcal{H}ol(\Omegamega)$ is a Frechet space when equipped with the topology of uniform convergence on compact subsets of $\Omegamega$. That $\mathcal{M}athcal{H}ol(V)$ is a Frechet space as well, when so equipped, is a very deep result implicit in the original seminars of Cartan \mathbb{C}ite{car51} (see Theorem~7.4.9 \mathbb{C}ite{hor}). As consequence, we are able to apply Theorem \mathbb{R}ef{exist.thm.10} to prove the following highly useful technical result.
\mathcal{B}egin{equation}gin{lem}\lambdaabel{exist.lem.10}
If $(\Omegamega,V)$ is a Cartan pair, then their exists a continuous function $S:\mathcal{M}athcal{H}ol(V) \mathbb{T}o \mathcal{M}athcal{H}ol(\Omegamega)$ such that $S(f)|V =f$ for all $f \in \mathcal{M}athcal{H}ol(V)$
\end{lem}
\mathcal{B}egin{equation}gin{proof}
By the Cartan Extension Theorem (Theorem 7.4.8 in \mathbb{C}ite{hor}), the map $L$ defined by $L(F)=F|V$ is a continuous linear surjection from $\mathcal{M}athcal{H}ol(\Omegamega)$ onto $\mathcal{M}athcal{H}ol(V)$. Therefore, as $\mathcal{M}athcal{H}ol(\Omegamega)$ and $\mathcal{M}athcal{H}ol(V)$ are Frechet spaces, the desired continuous function $S$ exists by Theorem \mathbb{R}ef{exist.thm.10}.
\end{proof}
Another useful lemma is the following. Recall that a TVS is said to be a Montel space if it is barrelled and the Heine-Borel Theorem is true, i.e., a set is compact if and only if it is closed and bounded.
\mathcal{B}egin{equation}gin{lem}\lambdaabel{exist.lem.20}
If $(\Omegamega,V)$ is a Cartan pair, then $\mathcal{M}athcal{H}ol(V)$ is a Montel space.
\end{lem}
\mathcal{B}egin{equation}gin{proof}
By \mathbb{C}ite[Thm. 2.4.2]{tay}, $\mathcal{M}athcal{H}ol(\Omega)$ is a Montel space. By Cartan's theorem, $\mathcal{M}athcal{H}ol(V)$ is the quotient of $\mathcal{M}athcal{H}ol(\Omega)$ by the
closed subspace of functions vanishing on $V$, and so is a separated quotient.
By \mathbb{C}ite[Prop. 11.1.1]{tay} a
separated quotient of a Montel space is Montel.
\end{proof}
For a final lemma we have the following result.
\mathcal{B}egin{equation}gin{lem}\lambdaabel{exist.lem.30}
Let $(\Omegamega,V)$ be a Cartan pair, and assume $V$ is connected. Fix a point $\lambdaambda_0 \in V$, and let
\[
\mathscr{S}_{\lambdaambda_0} (V) =\set{f \in \mathscr{S}(V)}{f(\lambdaambda_0)=0}.
\]
Then $(\Omegamega,V)$ is norm preserving if and only if each $f \in \mathscr{S}_{\lambdaambda_0}(V)$ has an extension to an element $F \in \mathscr{S}(\Omegamega)$.
\end{lem}
\mathcal{B}egin{equation}gin{proof}
Let $f \in \mathscr{S}(V)$. If $|f(\lambdaambda_0)|=1$, then by the Maximum Principle (\mathbb{C}ite{gur} Theorem 16, Chapter III) $f$ is constant on $V$ and trivially has a norm preserving extension to $\Omegamega$. Otherwise, $|f(\lambdaambda_0)|<1$ and there exists a M\"obius transformation $m$ of $\mathcal{M}athbb{D}$ such that $(m \mathbb{C}irc f) (\lambdaambda_0)=0$. As $h=m \mathbb{C}irc f \in \mathscr{S}(V)$, the assumption of the lemma implies that there exists $H \in \mathscr{S}(\Omegamega)$ such that $H|V=h$. But then if we define $F=m^{-1} \mathbb{C}irc H$, $F \in \mathscr{S}(\Omegamega)$ and $F|V =f$.
\end{proof}
\mathcal{B}egin{equation}gin{thm}\lambdaabel{exist.thm.20}
If $(\Omegamega,V)$ is a Cartan pair, then there exists $G \subseteq \Omegamega$ such that $(G,V)$ is a norm preserving Cartan pair if and only if $V$ is connected.
\end{thm}
\mathcal{B}egin{equation}gin{proof}
If $V$ is not connected, there is a non-trivial characteristic function in $\mathcal{M}athcal{H}inf(V)$.
By the maximum principle, any extension of this to a pseudoconvex domain $G$ must have a larger
norm.
Conversely, assume $V$ is connected.
Fix $\lambdaambda_0 \in V$ and let $S$ be as in Lemma \mathbb{R}ef{exist.lem.10}. For each $f\in \mathscr{S}_{\lambdaambda_0}(V)$ define $\Omegamega_f$ in $\Omegamega$ by
\[
\Omegamega_f=\set{\lambdaambda \in \Omegamega}{|S(f)(\lambdaambda)|<1}
\]
and let $G$ be the connected component of
\[
\Big(\mathcal{B}igcap_{f\in \mathscr{S}_{\lambdaambda_0}(V)}\Omegamega_f\Big)^\mathbb{C}irc.
\]
containing $V$.
We first observe that, as every connected component of each $\Omegamega_f$ is a domain of holomorphy
\mathbb{C}ite[Prop. 4.1.7]{jj20}, it follows that $G$ is a domain of holomorphy \mathbb{C}ite[Cor. II.3.19]{ran}.
Hence, $(G,V)$ will be a Cartan pair if
$V \subseteq G$. Assume to the contrary that $\mathcal{M}u \in V \setminus G$. Then for each positive integer $n$, there exist $\mathcal{M}u_n \in B(\mathcal{M}u,1/n)\mathbb{C}ap \Omegamega$ and $f_n \in \mathscr{S}_{\lambdaambda_0}(V)$ with $|S(f_n)(\mathcal{M}u_n)| \ge 1$. As Lemma \mathbb{R}ef{exist.lem.20} implies that $\mathscr{S}_{\lambdaambda_0}(V)$ is a compact subset of $\mathcal{M}athcal{H}ol(V)$, there exists a subsequence $\{f_{n_k}\}$ and $f\in \mathscr{S}_{\lambdaambda_0}(V)$ such that $f_{n_k} \mathbb{T}o f$ in $\mathcal{M}athcal{H}ol(V)$ as $k \mathbb{T}o \infty$. It follows by the continuity of $S$, that $|f(\mathcal{M}u)|\ge 1$. But as $f \in \mathscr{S}_{\lambdaambda_0}$, this contradicts the Maximum Principle.
Now observe that by construction,
\[
f\in \mathscr{S}_{\lambdaambda_0}(V) \implies \sup_{\lambdaambda \in G}|Sf(\lambdaambda)|\lambdae 1.
\]
Hence, if $f\in \mathscr{S}_{\lambdaambda_0}(V)$ and
\[
\sup_{\lambdaambda \in V}|f(\lambdaambda)|=\mathbb{R}ho \mathcal{N}ot=0,
\]
then, as $\mathbb{R}ho^{-1}f \in \mathscr{S}_{\lambdaambda_0}(V)$,
\[
\sup_{\lambdaambda \in G}|S(\mathbb{R}ho^{-1}f)(\lambdaambda)|\lambdae 1,
\]
which implies that $\mathbb{R}ho S(\mathbb{R}ho^{-1}f)$ is a norm preserving extension of $f$ to $G$. Consequently, Lemma \mathbb{R}ef{exist.lem.30} implies that $(G,V)$ is norm preserving.
\end{proof}
\mathcal{B}egin{equation}gin{remark} Some authors, such as H\"ormander \mathbb{C}ite{hor}, do not require that a domain of holomorphy be connected; in this case the connectivity of $V$ can then also be dropped.
Indeed, we just need to find disjoint pseudoconvex subdomains of $\Omega$ each of which contains at most one
component of $V$. To do this, let $\{ V_j : j = 1, \mathcal{M}athbb{D}ots \}$ be an enumeration of the at most countably many components of $V$. Let $f$ be the function that is $0$ on $V_1$ and $1$ on
each other $V_j$.
Since $(\Omega,V)$ is a Cartan pair, there is a holomorphic $F$ on $\Omega$ extending $f$.
Let $\Omega_1 = \{ \lambda \in \Omega : |F(\lambda) | < 1/2 \}$. Let $D = \{ \lambda \in \Omega : |F(\lambda) - 1 | < 1/2 \}$.
Apply Theorem~\mathbb{R}ef{exist.thm.20} to $(\Omega_1, V_1)$ to get a norm preserving pair $(G_1, V_1)$
with $G_1 \subseteq \Omega_1$. Now replace $(\Omega,V)$ with $(D, \mathbb{C}up_{j \geq 2} V_j)$ and continue by induction.
\end{remark}
\section{Problems B and C for Two Crossed Discs}
\lambdaabel{secd}
In this section we let $D_1$ and $D_2$ be the two sets in $\mathbb{C}^2$ defined by
\[
D_1=\set{(z,0)}{z \in \mathcal{M}athbb{D}}\qquad \mathbb{T}ext{ and }\qquad D_2 =\set{(0,z)}{z\in\mathcal{M}athbb{D}}
\]
and define an analytic set $T$ in $\mathbb{C}^2$ by
\[
T = D_1 \mathbb{C}up D_2.
\]
It is easy to construct domains of holomorphy $\Omegamega$ in $\mathbb{C}^2$ such that $(\Omegamega,T)$ is a Cartan pair. For example, as
\[
T=\set{\lambdaambda \in \mathcal{M}athbb{D}^2}{\lambdaambda_1\lambdaambda_2=0},
\]
$(\mathcal{M}athbb{D}^2,T)$ is a Cartan pair. In this section we shall identify conditions which will guarantee that $(\Omegamega,T)$ is a norm preserving Cartan pair.
\mathcal{B}egin{equation}gin{lem}
\lambdaabel{lem41}
A function $f$ is holomorphic on $T$ if and only if there exist holomorphic functions $f_1$ and $f_2$ defined on $\mathcal{M}athbb{D}$ such that $f_1(0)=f_2(0)$ and $f$ is given by the formula
\mathcal{B}egin{equation}gin{equation}
\lambdaabel{eq41}
f(\lambdaambda) =
\lambdaeft\{
\mathcal{B}egin{equation}gin{array}{ll}
f_1(z) & \mathcal{M}box{if } \lambdaambda=(z,0)\in D_1 \\ \\
f_2(z) & \mathcal{M}box{if } \lambdaambda=(0,z)\in D_2.
\end{array}
\mathbb{R}ight.
\end{equation}
\end{lem}
The proof of Lemma~\mathbb{R}ef{lem41} is
straightforward
and follows from the fact that a function given by formula \eqref{eq41} can be extended to a neighborhood of $T$ (a possible extension to $\mathcal{M}athbb{D}^2$ is given by the formula $F(z,w) = f(z,0) + f(0,w) - f(0,0)$).
Let $\mathcal{M}athcal{H}erg_{\lambdaambda_0}(V)$ denote the holomorphic functions of positive real part that map
$\lambdaambda_0$ to $1$.
For each $\mathbb{T}au \in \mathbb{T}^2$, we may define $b_\mathbb{T}au \in \mathscr{S}_{(0,0)}(T)$ and $h_\mathbb{T}au \in \mathcal{M}athcal{H}erg_{(0,0)} (T)$ by the formulas
\[
b_\mathbb{T}au(\lambdaambda) =
\lambdaeft\{
\mathcal{B}egin{equation}gin{array}{ll}
\mathbb{T}au_1 z & \mathcal{M}box{if } \lambdaambda=(z,0)\in D_1 \\ \\
\mathbb{T}au_2 z & \mathcal{M}box{if } \lambdaambda=(0,z)\in D_2
\end{array}
\mathbb{R}ight.
\]
and
\[
h_\mathbb{T}au(\lambdaambda) =
\lambdaeft\{
\mathcal{B}egin{equation}gin{array}{ll}
\mathcal{F}rac{1+\mathbb{T}au_1 z}{1-\mathbb{T}au_1 z} & \mathcal{M}box{if } \lambdaambda=(z,0)\in D_1 \\ \\
\mathcal{F}rac{1+\mathbb{T}au_2 z}{1-\mathbb{T}au_2 z} & \mathcal{M}box{if } \lambdaambda=(0,z)\in D_2.
\end{array}
\mathbb{R}ight.
\]
Since the map
\[
C: z \mathcal{M}apsto \mathcal{F}rac{1 + z}{1-z}
\]
is a conformal map from the unit disk onto the right-half plane, it is immediate
that one can extend the function $f$ from $\mathscr{S}(T)$ to $\mathscr{S}(G)$ if and only if
one can extend $C \mathbb{C}irc f$ from $\mathcal{M}athcal{H}erg(T)$ to $\mathcal{M}athcal{H}erg(G)$.
\mathcal{B}egin{equation}gin{lem}\lambdaabel{lem42}
If $(G,T)$ is a Cartan pair, then $(G,T)$ is norm preserving if and only if for each $\mathbb{T}au \in \mathbb{T}^2$, there exists $B_\mathbb{T}au \in \mathscr{S}(G)$ such that $B_\mathbb{T}au|T=b_\mathbb{T}au$.
\end{lem}
\mathcal{B}egin{equation}gin{proof}
First note that necessity is obvious. To show sufficiency assume that for each $\mathbb{T}au \in \mathbb{T}^2$, there exists $B_\mathbb{T}au \in \mathscr{S}(G)$ such that $B_\mathbb{T}au|T=b_\mathbb{T}au$, or equivalently
that
\[
\mathcal{F}orall_{\mathbb{T}au\in \mathbb{T}^2}\ \ \exists_{H_\mathbb{T}au \in \mathcal{M}athcal{H}erg(G)}\ \
H_\mathbb{T}au |T = h_\mathbb{T}au.
\]
To prove that $(G,T)$ is a norm preserving pair, by Lemma \mathbb{R}ef{exist.lem.30} it suffices to show that if $h \in \mathcal{M}athcal{H}erg_{(0,0)}(T)$ then there exists $H\in \mathcal{M}athcal{H}erg(G)$ such that $H|T=h$.
Assume that $h \in \mathcal{M}athcal{H}erg_{(0,0)}(T)$. By Lemma \mathbb{R}ef{lem41}, there exist $h_1,h_2 \in \mathcal{M}athcal{H}erg_0(\mathcal{M}athbb{D})$ such that
\[
h(\lambdaambda) =
\lambdaeft\{
\mathcal{B}egin{equation}gin{array}{ll}
h_1(z) & \mathcal{M}box{if } \lambdaambda=(z,0)\in D_1 \\ \\
h_2(z) & \mathcal{M}box{if } \lambdaambda=(0,z)\in D_2.
\end{array}
\mathbb{R}ight.
\]
By the Herglotz Representation Theorem, there exist probability measures $\mathcal{M}u_1,\mathcal{M}u_2$ on $\mathbb{T}$ such that
\[
h_1(z) = \int \mathcal{F}rac{1+\mathbb{T}au z}{1-\mathbb{T}au z}\ d\mathcal{M}u_1(\mathbb{T}au)\qquad \mathbb{T}ext{ and }\qquad
h_2(z) = \int \mathcal{F}rac{1+\mathbb{T}au z}{1-\mathbb{T}au z}\ d\mathcal{M}u_2(\mathbb{T}au).
\]
Let $\mathcal{M}u=\mathcal{M}u_1\mathbb{T}imes\mathcal{M}u_2$ (so that $\mathcal{M}u$ is a probability measure on $\mathbb{T}^2$).
First, suppose that the map $\mathbb{T}au \mathcal{M}apsto H_\mathbb{T}au$ is measurable.
Then we define $H$ by the formula
\[
H(\lambdaambda)=\int_{\mathbb{T}au \in \mathbb{T}^2} H_\mathbb{T}au(\lambdaambda)\ d\mathcal{M}u(\mathbb{T}au);
\]
Clearly, as $\mathcal{M}u$ is a probability measure and $H_\mathbb{T}au\in \mathcal{M}athcal{H}erg(G)$ for all $\mathbb{T}au \in \mathbb{T}^2$, $H\in \mathcal{M}athcal{H}erg(G)$.
If $\lambdaambda =(z,0) \in D_1$,
\mathcal{B}egin{equation}gin{align*}
H(\lambdaambda) &= \int_{\mathbb{T}au \in \mathbb{T}^2} H_\mathbb{T}au(\lambdaambda)\ d\mathcal{M}u(\mathbb{T}au)\\ \\
&= \int_{\mathbb{T}au \in \mathbb{T}^2} h_\mathbb{T}au(\lambdaambda)\ d\mathcal{M}u(\mathbb{T}au)\\ \\
&=\int_{\mathbb{T}au_2\in \mathbb{T}}\Big(\int_{\mathbb{T}au_1\in \mathbb{T}} \mathcal{F}rac{1+\mathbb{T}au_1 z}{1-\mathbb{T}au_1 z}\ d\mathcal{M}u_1(\mathbb{T}au_1)\Big)\ d\mathcal{M}u_2(\mathbb{T}au_2)\\ \\
&=h_1(z)\\ \\
&=h(\lambdaambda).
\end{align*}
Likewise, if $\lambdaambda =(0,z) \in D_2$, $H(\lambdaambda)= h(\lambdaambda)$.
Consequently, $H\in \mathcal{M}athcal{H}erg(G)$ and $H|T=h$, as was to be proved.
Now, let us drop the measurability hypothesis; we shall use an approximation argument to get round
the difficulty.
Let $K_n$ be a compact exhaustion of $G$. It thus follows
(use e.g. a Montel type argument) that there is a constant $\mathcal{M}athbb{D}elta_{n} > 0$ so that
\[ \| B_\mathbb{T}au \|_{K_n} \ \lambdaeq \ 1 - \mathcal{F}rac{1}{\mathcal{M}athbb{D}elta_{n}} \quad \mathcal{F}orall \ \mathbb{T}au \in \mathbb{T}^2 .
\]
\mathcal{B}lack
Let
\[
M_{n} \ := \
\sup \{ \| \lambdaambda \|_{\infty} : \lambdaambda \in K_n \} \ < \infty .
\]
Choose a finite set of points $F_{n}$ in $\mathbb{T}^2$ so that every point in
$\mathbb{T}^2$ is less than $\mathcal{M}athbb{D}elta_{n}/M_{n}$ away from some point of $F_{n}$ in the $\ell^1$ norm. Let
$\mathcal{M}athcal{A}lpha: \mathbb{T}^2 \mathbb{T}o F_{n}$ be a Borel map such that $\| \mathcal{M}athcal{A}lpha(\mathbb{T}au) - \mathbb{T}au \|_{\ell^1} < \mathcal{M}athbb{D}elta_{n}/M_{n}$ for all $\mathbb{T}au$.
Write
\[
B_\mathbb{T}au(\lambdaambda) \ = \ \lambdaambda_1 \mathbb{T}au_1 + \lambdaambda_2 \mathbb{T}au_2 +
\lambdaambda_1 \lambdaambda_2 \Gamma_\mathbb{T}au(\lambdaambda).
\]
Define
\[
B^{(n)}_\mathbb{T}au(\lambdaambda) \ = \ \lambdaambda_1 \mathbb{T}au_1 + \lambdaambda_2 \mathbb{T}au_2
+
\lambdaambda_1 \lambdaambda_2 \Gamma_{\mathcal{M}athcal{A}lpha(\mathbb{T}au)} (\lambdaambda).
\]
Then the map $\mathbb{T}au \mathcal{M}apsto B^{(n)}_\mathbb{T}au$ is Borel, and as
\[
| B^{(n)}_\mathbb{T}au (\lambda) - B_{\mathcal{M}athcal{A}lpha(\mathbb{T}au)}(\lambdaambda) |
\ \lambdaeq \ M_{n} \| \mathbb{T}au - \mathcal{M}athcal{A}lpha(\mathbb{T}au) \|_{\ell^1} \| \lambdaambda \|_{\ell^\infty},
\]
we have that $ |B^{(n)}_\mathbb{T}au | < 1$ on $K_n$.
So if we define
$H^{(n)}_\mathbb{T}au = C \mathbb{C}irc B^{(n)}_\mathbb{T}au$,
then $H^{(n)}_\mathbb{T}au$ is a holomorphic map from $K_n$ to the right-half plane.
Let
\[
H^{(n)}(\lambdaambda)=\int_{\mathbb{T}au \in \mathbb{T}^2} H^{(n)}_\mathbb{T}au(\lambdaambda)\ d\mathcal{M}u(\mathbb{T}au).
\]
The sequence $H^{(n)}$ will have a subsequence that converges uniformly on
compact subsets of $G$ to some function $H$ in $\mathcal{M}athcal{H}erg(G)$; this will be our
desired extension of $h$.
\end{proof}
For $v$ a unit vector in $\mathbb{C}^2$ and $G$ a neighborhood of the origin in $\mathbb{C}^2$ let
\[
R_v = \sup\ \set{r}{\mathcal{F}orall_{z\in \mathbb{C}}\ \ |z|<r \implies zv \in G}.
\]
\mathcal{B}egin{equation}gin{lem}\lambdaabel{lem43}
Assume that $G$ a neighborhood of the origin in $\mathbb{C}^2$ and $v$ is a unit vector in $\mathbb{C}^2$. If $(G,T)$ is np, then $R_v \lambdae \mathcal{F}rac{1}{|v_1|+|v_2|}$.
\end{lem}
\mathcal{B}egin{equation}gin{proof}
By Lemma \mathbb{R}ef{lem42}, for each $\mathbb{T}au\in \mathbb{T}$, there exists $B_\mathbb{T}au \in \mathscr{S}(G)$ such that $B_\mathbb{T}au|T=b_\mathbb{T}au$. Evidently, we have that
\[
\mathcal{F}rac{\partial B_\mathbb{T}au}{\partial \lambdaambda_1}(0,0)=\mathbb{T}au_1\qquad \mathbb{T}ext{ and }\qquad
\mathcal{F}rac{\partial B_\mathbb{T}au}{\partial \lambdaambda_2}(0,0)=\mathbb{T}au_2.
\]
Hence, by the chain rule, if we define an analytic function $f$ on $\mathcal{M}athbb{D}$ by the formula
\[
f(z)=B_\mathbb{T}au(zR_v v),
\]
\[
f'(0)=(\mathbb{T}au_1v_1 +\mathbb{T}au_2v_2)R_v.
\]
Hence, by Schwarz's Lemma,
\[
|\mathbb{T}au_1v_1 +\mathbb{T}au_2v_2| R_v \lambdae 1.
\]
As $\mathbb{T}au \in \mathbb{T}^2$ is arbitrary, the result follows.
\end{proof}
\mathcal{B}egin{equation}gin{thm}\lambdaabel{thm44}
Let $G$ be a balanced domain of holomorphy in $\mathbb{C}^2$ with $T\subseteq G$. $(G,T)$ is norm preserving if and only if $G \subseteq \set{\lambdaambda}{\ |\lambdaambda_1|+|\lambdaambda_2|<1}$
\end{thm}
\mathcal{B}egin{equation}gin{proof}
Lemma \mathbb{R}ef{lem43} implies necessity and Lemma \mathbb{R}ef{lem42} implies sufficiency.
\end{proof}
In the unbalanced case things are not so clean. However, the following theorem obtains.
\mathcal{B}egin{equation}gin{thm} \lambdaabel{thm45}
Let $G$ be a domain in $\mathbb{C}^2$. Then
$(G,T)$ is an np pair if and only if $T$ is a relatively closed subset of $G$ and there exist a pseudoconvex set $U$ in $\mathbb{C}^2$ and a function $\mathbb{T}au \mathcal{M}apsto C_\mathbb{T}au$ from $\mathbb{T}^2$ into $\mathcal{M}athcal{H}ol(U)$ such that
\mathcal{B}egin{equation}gin{equation}
\lambdaabel{eq46}
G= \mathcal{B}igcap_{\mathbb{T}au \in \mathbb{T}^2} \set{\lambdaambda \in U\ }{\ \ |\mathbb{T}au \mathcal{M}athbb{C}^dot \lambdaambda +\lambdaambda_1\lambdaambda_2 C_\mathbb{T}au(\lambdaambda)|<1}\Big.
\end{equation}
\end{thm}
\mathcal{B}egin{equation}gin{proof}
Suppose $G$ has the form \eqref{eq46}. Since $U$ is pseudoconvex, the pre-image
of any pseudoconvex set under a holomorphic map is pseudoconvex \mathbb{C}ite[Prop. 4.1.7]{jj20}, so each of the sets indexed by $\mathbb{T}au$ on the right-hand side of \eqref{eq46} is
pseudoconvex. Therefore the interior of their intersection is
\mathbb{C}ite[Cor. II.3.19]{ran}, and since $G$ is assumed open, this means that
$G$ is a domain of holomorphy, and hence $(G,T)$ is a Cartan pair.
Every $b_\mathbb{T}au$ has an extension to the function
\mathcal{B}egin{equation}gin{equation}
\lambdaabel{eq47}
B_\mathbb{T}au(\lambdaambda) \ = \
\mathbb{T}au \mathcal{M}athbb{C}^dot \lambdaambda +\lambdaambda_1\lambdaambda_2 C_\mathbb{T}au(\lambdaambda)
\end{equation}
in $\mathscr{S}(G)$, so by Lemma \mathbb{R}ef{lem42} we conclude that $(G,T)$ is np.
Conversely, suppose $(G,T)$ is np. By Lemma \mathbb{R}ef{lem42}, there is a map
$\mathbb{T}au \mathcal{M}apsto B_\mathbb{T}au$ where each $B_\mathbb{T}au$ is a norm preserving extension of $b_\mathbb{T}au$, and therefore of the form \eqref{eq47}. Choosing $U = G$ we get \eqref{eq46}.
\end{proof}
Let $\Delta = \set{\lambdaambda}{\ |\lambdaambda_1|+|\lambdaambda_2|<1}$.
Using Theorem \mathbb{R}ef{thm45} one can construct simple $G$'s that neither contain or are contained in $\Delta$ and such that $(G,T)$ is norm preserving---see e.g. Example \mathbb{R}ef{exam49}.
However, one cannot construct $G$'s that properly contain $\Delta$ such that $(G,T)$ is norm preserving.
\mathcal{B}egin{equation}gin{thm}
If $G$ properly contains $\Delta$, then $(G,T)$ is not norm preserving.
\end{thm}
\mathcal{B}egin{equation}gin{proof}
There is some point $P$ on $\partial \Delta$ that is not in $\partial G$.
Therefore the intersection of the complex line through $0$ and $P$ with $G$ is a set $U$ that properly
contains the unit disk in that plane.
Define
\[
f(\lambdaambda) =
\lambdaeft\{
\mathcal{B}egin{equation}gin{array}{ll}
\mathcal{F}rac{\mathcal{M}athcal{O}verline{P_1}}{|P_1|} z & \mathcal{M}box{if } \lambdaambda=(z,0)\in D_1 \\ \\
\mathcal{F}rac{\mathcal{M}athcal{O}verline{P_2}}{|P_2|} z & \mathcal{M}box{if } \lambdaambda=(0,z)\in D_2,
\end{array}
\mathbb{R}ight.
\]
where we interpret $0/0$ as $0$. Then by Schwarz's lemma, $f$ cannot have a norm-preserving extension to $U$, and therefore not to $G$ either.
\end{proof}
\mathcal{B}egin{equation}gin{exam}
\lambdaabel{exam49}
Define
\[
C_\mathbb{T}au(\lambdaambda) \ = \ (\mathbb{T}au \mathcal{M}athbb{C}^dot \lambdaambda) ,
\]
and define $G$ by \eqref{eq46}, where $U = \mathcal{M}athbb{D}^2$.
Then
\[
G \ = \ \{ \lambda \in \mathcal{M}athbb{D}^2 | \ ( | \lambda_1 | + | \lambda_2 |) | 1 + \lambda_1 \lambda_2 | < 1 \} ,
\]
so $G$ is open and connected, and hence $(G,T)$ is an np pair.
However $G$ is not contained in $\Delta$, since it contains
the points $(r,-r)$ for $0 \lambdaeq r < 1$.
\end{exam}
\section{Linear isometric extension operators}
\lambdaabel{seclinear}
In \mathbb{C}ite[Sec. 7.5]{rud69} Rudin asks if $(\Omega,V)$ is a Cartan pair with the
bounded extension property,
is there a bounded linear extension operator? In this section we show that the answer is no in the case of norm preserving extensions.
We shall say that a Cartan pair $\mathbb{C}ar$ has the {\em linear norm preserving extension property}
if there is a linear isometry $E: \mathcal{M}athcal{H}inf(V) \mathbb{T}o \mathcal{M}athcal{H}inf(\Omega)$ such that $(E f)(\lambdaambda) = f(\lambdaambda)$
for every $\lambdaambda$ in $V$.
We show in Theorem \mathbb{R}ef{thm51} that $(\Delta,T)$ does not have the linear norm preserving extension property.
There are Cartan pairs $(G,T)$ that do have the linear norm preserving
extension property.
Let
\[
m_a(z) \ =\ \mathcal{F}rac{a - z}{1 - \mathcal{B}ar a z}
\]
be the M\"obius map swapping $a$ and $0$.
Whenever $f_1, f_2$ are in $\mathcal{M}athcal{H}ol(\mathcal{M}athbb{D})$ and
$f_1(0) = f_2(0)$, let
$\Psi_{f_1,f_2}$ be the function in $\mathcal{M}athcal{H}ol(T)$ given by the right-hand side of \eqref{eq41}.
\mathcal{B}egin{equation}gin{thm}
\lambdaabel{thm51}
The pair $(\Delta, T)$ does not have the linear
norm preserving extension property.
\end{thm}
\mathcal{B}egin{equation}gin{proof}
Suppose there were an isometric linear extension operator $E$.
Let the extensions of $\Psi_{z,0}$ and $\Psi_{0,z}$ be given by
\mathcal{B}egin{equation}gin{eqnarray*}
E(\Psi_{z,0}) (\lambdaambda) &\ = \ & \lambdaambda_1 + \lambdaambda_1 \lambda_2 f_1(\lambda) \\
E(\Psi_{0,z}) (\lambdaambda) &\ = \ & \lambdaambda_2 + \lambdaambda_1 \lambda_2 g_1(\lambda)
\end{eqnarray*}
where $f_1,g_1$ are holomorphic functions on $\Delta$.
For unimodular $\mathbb{T}au_1$ and $\mathbb{T}au_2$ we get, by linearity of $E$, that
$$
E\mathbb{C}irc \Psi_{\mathbb{T}au_1 z, \mathbb{T}au_2 z} : \lambda \mathcal{M}apsto \mathbb{T}au_1 \lambda_1 + \mathbb{T}au_2 \lambda_2 + \lambda_1 \lambda_2 (\mathbb{T}au_1 f_1(\lambda) + \mathbb{T}au_2 g_1(\lambda))$$
maps $\Delta$ into the unit disc. For $t\in [0,1]$
$$\psi_t:\zeta \mathcal{M}apsto E(\Psi_{\mathbb{T}au_1 z_1, \mathbb{T}au_2 z_2})(\mathcal{B}ar\mathbb{T}au_1 t \zeta, \mathcal{B}ar \mathbb{T}au_2 (1-t) \zeta)$$ is thus a well defined holomorphic
selfmap of the unit disc such that $\psi_t(0)=0$ and $\psi'_t(0)=1$. Therefore, $\psi_t(\zeta) = \zeta$ for every $\zeta \in \mathcal{M}athbb{D}$ and $t\in [0,1]$.
So $\mathbb{T}au_1f_1 + \mathbb{T}au_2 g_1 \equiv 0$ on the
three real dimensional set $\{(\mathcal{B}ar\mathbb{T}au_1 t \zeta, \mathcal{B}ar \mathbb{T}au_2 (1-t) \zeta):\ \zeta\in \mathcal{M}athbb{D},\ t\in [0,1]\}.$ Consequently $\mathbb{T}au_1 f_1 + \mathbb{T}au_2 g_1 = 0$, and as this is true for all choices of
$\mathbb{T}au$, we get $f_1=g_1=0$.
By the maximum principle, $E(1) = 1$, so $E$ is the identity on all affine polynomials.
Now let us consider $\Psi_{m_a(z), m_a(\mathcal{M}athcal{O}mega z)}$, where $\mathcal{M}athcal{O}mega \in \mathbb{T}$, and $a \in \mathcal{M}athbb{D} \setminus \{ 0 \}$.
Observe that
$$
E(\Psi_{m_a(z), m_a(\mathcal{M}athcal{O}mega z)})(\lambda) = a - (1-|a|^2) (\lambda_1 + \mathcal{M}athcal{O}mega \lambda_2) + \lambda_1 \lambda_2 h(\lambda),$$
for some $h \in \mathcal{M}athcal{H}ol(\Delta)$.
It follows from the Schwarz lemma that the function
\[
\zeta \mathcal{M}apsto E(\Psi_{m_a(z), m_a(\mathcal{M}athcal{O}mega z)})(\zeta/2, \mathcal{B}ar \mathcal{M}athcal{O}mega \zeta/2)
\]
is equal to the M\"obius function $m_a(\zeta)$.
It follows that the mappings $E(\Psi_{m_a(z), m_a(\mathcal{M}athcal{O}mega z)})(\lambda)$ and $m_a(\lambda_1 + \mathcal{M}athcal{O}mega \lambda_2)$ coincide on $\lambda_1=0$, $\lambda_2=0$ and $\lambda_2 = \mathcal{B}ar \mathcal{M}athcal{O}mega \lambda_1$; in particular
\mathcal{B}egin{equation}gin{equation}\lambdaabel{eq: ma1}
E(\Psi_{m_a(z), m_a(\mathcal{M}athcal{O}mega z)})(\lambda) = m_a(\lambda_1+ \mathcal{M}athcal{O}mega \lambda_2) + \lambda_1 \lambda_2 (\mathcal{B}ar \mathcal{M}athcal{O}mega \lambda_1 - \lambda_2) F_{\mathcal{M}athcal{O}mega}(\lambda_1,\lambda_2).\end{equation}
To get a contradiction we shall compute $E(\Psi_{m_a(z), m_a(\mathcal{M}athcal{O}mega z)})$ using a Taylor expansion of $m_a$. Let $f_n,g_n$ be functions in $\mathcal{M}athcal{H}ol(\Delta)$ such that
\mathcal{B}egin{equation}gin{eqnarray*}
E(\Psi_{z^n,0}) (\lambda) & \ = & \lambda_1^n + \lambda_1 \lambda_2 f_n(z) \\
E(\Psi_{0,z^n}) (\lambda) &= & \lambda_2^n + \lambda_1 \lambda_2g_n(\lambda) .
\end{eqnarray*}
Write $m_a(z) = \sum_n a_n z^n$, where $a_0 = a$ and $a_n = - (1-|a|^2) \mathcal{B}ar a^{n-1}$ for $n \geq 1$.
Then
\mathcal{B}egin{equation}gin{eqnarray*}
E(\Psi_{m_a(z), m_a(\mathcal{M}athcal{O}mega z)})(\lambda) &\ =\ & E(\Psi_{m_a, 0}) (\lambda) + E(\Psi_{0,m_a( \mathcal{M}athcal{O}mega z)})(\lambda) - a \\
&=&
a - (1-|a|^2) (\lambda_1 + \mathcal{M}athcal{O}mega \lambda_2) + \sum_{n\geq 2} a_n ( \lambda_1^n + \mathcal{M}athcal{O}mega^n \lambda_2^n) + \\
&&\quad \lambda_1 \lambda_2
\sum_{n\geq 2} a_n ( f_n(\lambda) + \mathcal{M}athcal{O}mega^n g_n(\lambda))\\
&=&
m_a(\lambda_1 + \mathcal{M}athcal{O}mega \lambda_2) + 2 \mathcal{M}athcal{O}mega \mathcal{B}ar a (1-|a|^2) \lambda_1 \lambda_2 +\\
&&\quad \lambda_1 \lambda_2\sum_{n\geq 2} a_n f_n(\lambda) + \lambda_1 \lambda_2\sum_{n\geq 2} \mathcal{M}athcal{O}mega^n a_n g_n (\lambda) + O(||\lambda||^3).
\end{eqnarray*}
Comparing the last equation with \eqref{eq: ma1}, we see that the coefficients of $\lambda_1 \lambda_2$
must agree, which yields
\mathcal{B}egin{equation}gin{equation}
0 \ = \ 2 \mathcal{M}athcal{O}mega \mathcal{B}ar a (1-|a|^2) + \sum_{n\geq 2} a_n (f_n(0) + \mathcal{M}athcal{O}mega^n g_n(0)) .
\lambdaabel{eq53}
\end{equation}
As \eqref{eq53} holds for all $\mathcal{M}athcal{O}mega\in \mathbb{T}$, we get $a=0$, a contradiction.
\end{proof}
By Theorem \mathbb{R}ef{thm44} $(\Delta,T)$ does have the norm preserving extension
property. One can also write down a simple formula to see this.
If $f$ is given by \eqref{eq41} and has norm one and is not constant, let $a = f(0)$.
Then the function
\[
F(\lambda_1, \lambda_2) \ =\ m_a \lambdaeft( m_a(f(\lambda_1,0) + m_a(f(0,\lambda_2) \mathbb{R}ight)
\]
is a norm-preserving extension of $f$.
The singularity in $T$ does not prevent the existence of np pairs $(G,T)$ with the linear
norm preserving extension property.
\mathcal{B}egin{equation}gin{thm}
There is a domain of holomorphy $G$ containing $T$ so that $(G,T)$ has the linear
norm preserving extension property.
\end{thm}
\mathcal{B}egin{equation}gin{proof}
Define a linear extension operator $E$ by
\[
E(\Psi_{f_1, f_2}) (\lambda) \ = \ f_1(\lambda_1) + f_2(\lambda_2) - f_1(0) .
\]
Define $G$ by
\mathcal{B}egin{equation}gin{equation}
\lambdaabel{eq54}
\mathcal{N}otag
G \ = \ \lambdaeft[ \mathcal{B}igcap_{f \in \mathcal{M}athcal{H}inf(T), \ f \ {\mathbb{R}m non-constant} } \{ \lambda \in \mathbb{C}^2 | \ | E \mathbb{C}irc f (\lambda) | < \| f \|_T \}
\mathbb{R}ight]^\mathbb{C}irc .
\end{equation}
By \mathbb{C}ite[Cor. II.3.19]{ran}, $G$ is a domain of holomorphy, provided it is non-empty. So we need to prove that
$T \subset G$.
Define $H \subset \mathcal{M}athbb{D}^2$ by
\[
H \ = \ \{
\mathcal{F}rac{|\lambda_2|}{1-|\lambda_2|} < \mathcal{F}rac{1}{2}\mathcal{F}rac{1 - |\lambda_1|}{1+|\lambda_1|} \}
\mathbb{C}up \{ \mathcal{F}rac{|\lambda_1|}{1-|\lambda_1|} < \mathcal{F}rac{1}{2}\mathcal{F}rac{1 - |\lambda_2|}{1+|\lambda_2|} \} .
\]
We will show that $H \subseteq G$, and as $T \subset H$, we will be done.
Choose $\lambda \in H$, and assume without loss of generality that
it satisfies the first inequality. Let $f = \Psi_{f_1,f_2}$ be a non-constant function
in $\mathcal{M}athcal{H}ol(T)$, and assume $\| f \|_T = 1$. Let $c = |f(0)|$. Then
\mathcal{B}egin{equation}gin{eqnarray*}
|E \mathbb{C}irc f (\lambda) | & \ \lambdaeq \ & | f_1(\lambda_1) | + |f_2(\lambda_2) - f_2(0) |\\
&\lambdaeq &
\mathcal{F}rac{c + |\lambda_1|}{1 + |\lambda_1| c}
+
\mathcal{F}rac{|\lambda_2|}{1-|\lambda_2|}(1 - c^2) \\
&\lambdaeq &
\mathcal{F}rac{c + |\lambda_1|}{1 + |\lambda_1| c}
+
\mathcal{F}rac{1}{2}\mathcal{F}rac{1 - |\lambda_1|}{1+|\lambda_1|}
(1 - c^2) \\
& < &
\mathcal{F}rac{c + |\lambda_1|}{1 + |\lambda_1| c}
+
\mathcal{F}rac{1 - |\lambda_1|}{1+|\lambda_1|}
(1 - c) \\
& < &
\mathcal{F}rac{c + |\lambda_1|}{1 + |\lambda_1| c}
+
\mathcal{F}rac{1 - |\lambda_1|}{1+|\lambda_1|c}
(1 - c) \\
&=& 1.
\end{eqnarray*}
In the second line we used Lemma \mathbb{R}ef{lem55}, and in the third the defining inequality for $H$.
We have shown that every point $\lambda$ in $H$ satisfies $|E \mathbb{C}irc f (\lambda) | < \| f \|_T$ for every non-constant
function in $\mathcal{M}athcal{H}inf(T)$, and since $H$ is open we conclude that $H \subseteq G$, as desired.
\end{proof}
The following lemma is a straightforward consequence of the Schwarz-Pick lemma.
\mathcal{B}egin{equation}gin{lem} \lambdaabel{lem55}
Suppose $g: \mathcal{M}athbb{D} \mathbb{T}o \mathcal{M}athbb{D}$ is holomorphic. Then for all $z \in \mathcal{M}athbb{D}$
\mathcal{B}egin{equation}gin{eqnarray}
\lambdaabel{eq56}
\mathcal{N}otag
| g(z) | & \ \lambdaeq \ & \mathcal{F}rac{|g(0)| + |z|}{1 + |z| |g(0)|} \\
|g(z) - g(0)| & \lambdaeq & \mathcal{F}rac{|z|}{1-|z|}(1 - |g(0)|^2) .
\lambdaabel{eq57}
\mathcal{N}otag
\end{eqnarray}
\end{lem}
\mathcal{B}egin{equation}gin{ques}
Is there a Theorem \mathbb{R}ef{exist.thm.20} for the linear
norm preserving extension property?
\end{ques}
\section{The Representation of Even Schur Functions on the Bidisc}
\lambdaabel{sec6}
For use in Section \mathbb{R}ef{sec7}, we derive a model and a representation
for even Schur functions on the bidisc.
A {\em decomposed Hilbert space}
is a Hilbert space $\mathcal{M}$ together with two orthogonal subspaces
$\mathcal{M}_1$ and $\mathcal{M}_2$ satisfying $\mathcal{M}=\mathcal{M}_1 \mathcal{M}athcal{O}plus \mathcal{M}_2$.
By \mathbb{C}ite{ag90}, a function $\phi: \mathcal{M}athbb{D}^2 \mathbb{T}o \mathbb{C}$ is in $\mathcal{M}athscr{S}(\mathcal{M}athbb{D}^2)$ if
and only if there exist a separable decomposed Hilbert space $\mathcal{M}=\mathcal{M}_1 \mathcal{M}athcal{O}plus \mathcal{M}_2$,
and a
function $v:\mathcal{M}athbb{D}^2 \mathbb{T}o \mathcal{M}$ such that
\mathcal{B}egin{equation}gin{equation}
\lambdaabel{eq61}
1-\mathcal{M}athcal{O}verline{\phi(\mathcal{M}u)}\phi(\lambdaambda) = \ip{(1- \mathcal{M}u^* \lambdaambda) v_\lambdaambda}{v_\mathcal{M}u}.
\end{equation}
In \eqref{eq61} we write $v_\lambdaambda$ for $v(\lambda)$, and the operator $\lambda : \mathcal{M} \mathbb{T}o \mathcal{M}$
is defined by
\mathcal{B}egin{equation}
\lambdaabel{eq611}
\lambda \ = \ \lambda_1 I_{\mathcal{M}_1} \mathcal{M}athcal{O}plus \lambda_2 I_{\mathcal{M}_2} .
\end{equation}
We call \eqref{eq61} a {\em model} for $\phi$.
We say $\phi \in \mathcal{M}athcal{H}ol(\mathcal{M}athbb{D}^2)$ is \emph{even} if
\[
\mathcal{F}orall_{\lambdaambda \in \mathcal{M}athbb{D}^2}\ \ \phi(-\lambdaambda)=\phi(\lambdaambda).
\]
We let $\mathcal{M}athcal{H}ol_{\, \mathbb{R}m even}\ (\mathcal{M}athbb{D}^2)$ denote the even holomorphic functions on $\mathcal{M}athbb{D}^2$,
and $\mathcal{M}athscr{S}_e(\mathcal{M}athbb{D}^2)$ the ones in Schur class.
\mathcal{B}egin{equation}gin{prop}
\lambdaabel{prop62}
Let $\phi:\mathcal{M}athbb{D}^2 \mathbb{T}o \mathbb{C}$. Then $\phi$ is in $\mathcal{M}athscr{S}_e(\mathcal{M}athbb{D}^2)$ if and only if there exist a separable decomposed Hilbert space $\mathcal{M}=\mathcal{M}_1 \mathcal{M}athcal{O}plus \mathcal{M}_2$, a unitary operator $U$ acting on $\mathcal{M}$, and
an even
function $u:\mathcal{M}athbb{D}^2 \mathbb{T}o \mathcal{M}, \lambda \mathcal{M}apsto u_\lambda,$ such that
\mathcal{B}egin{equation}
\lambdaabel{eq63}
1-\mathcal{M}athcal{O}verline{\phi(\mathcal{M}u)}\phi(\lambdaambda) = \ip{(1-(\mathcal{M}u U \mathcal{M}u)^* (\lambdaambda U\lambdaambda))u_\lambdaambda}{u_\mathcal{M}u}.
\end{equation}
\end{prop}
\mathcal{B}egin{equation}gin{proof}
Note that
if \eqref{eq63} holds, then it
defines $\phi$
(up to a unimodular constant), and it is clear that $\phi$ is even and bounded by $1$ in modulus.
So to see it is in $\mathcal{M}athscr{S}(\mathcal{M}athbb{D}^2)$, we must show it is holomorphic.
This follows from the argument in the proof of \mathbb{C}ite[Proposition 4.26]{amy20}, which shows
that $\lambdaambda \mathcal{M}apsto u_\lambdaambda$ is holomorphic.
Conversely, suppose that $\phi$ is an even function in $\mathcal{M}athscr{S}(\mathcal{M}athbb{D}^2)$. Then there is some
model so that \eqref{eq61} holds. Define
\mathcal{B}egin{equation}gin{eqnarray*}
v_e(\lambda) & \ = \ & \mathcal{F}rac{1}{2} ( v(\lambda) + v(-\lambda) ) \\
v_o(\lambda) &=& \mathcal{F}rac{1}{2} ( v(\lambda) - v(-\lambda) )
\end{eqnarray*}
A lurking isometry argument (see \mathbb{C}ite[Sec. 2.4]{amy20}) shows that there is a partial isometry $W : \mathbb{C} \mathcal{M}athcal{O}plus \mathcal{M} \mathcal{M}athcal{O}plus \mathcal{M} \mathbb{T}o \mathbb{C} \mathcal{M}athcal{O}plus \mathcal{M} \mathcal{M}athcal{O}plus \mathcal{M}$
satisfying
\mathcal{B}egin{equation}
\lambdaabel{eq64}
W :
\mathcal{B}egin{equation}gin{pmatrix}
1 \\
\lambda v_o(\lambda) \\
\lambda v_e(\lambda)
\end{pmatrix} \
\mathcal{M}apsto \
\mathcal{B}egin{equation}gin{pmatrix}
\phi(\lambda) \\
v_e(\lambda) \\
v_o(\lambda)
\end{pmatrix}
\end{equation}
Expanding both sides of \eqref{eq64} in powers of $\lambda$, we see that $W$ has to map the odd part to the odd part, so
\mathcal{B}egin{equation}
\lambdaabel{eq65}
W \mathcal{B}egin{equation}gin{pmatrix}0 \\ 0 \\ \lambda v_e(\lambda)) \end{pmatrix} =\mathcal{B}egin{equation}gin{pmatrix} 0 \\ 0 \\ v_o(\lambda) \end{pmatrix}.
\end{equation}
Thus we see that $U(\lambda v_e(\lambda)) = v_o (\lambda), $ for some partial isometry $U:\mathcal{M}\mathbb{T}o \mathcal{M}$.
By adding a separable infinite dimensional Hilbert space to $\mathcal{M}$ if necessary, one can additionally assume that the
partial isometry $U$ is a unitary.
Define $u_\lambda = v_e (\lambda)$. Substituting this into \eqref{eq64} and using \eqref{eq65}, we get
\mathcal{B}egin{equation}
\lambdaabel{eq66}
W :
\mathcal{B}egin{equation}gin{pmatrix}
1 \\
\lambda U \lambda u_\lambda \\
\lambda u_\lambda
\end{pmatrix} \
\mathcal{M}apsto \
\mathcal{B}egin{equation}gin{pmatrix}
\phi(\lambda) \\
u_\lambda \\
U \lambda u_\lambda
\end{pmatrix}
\end{equation}
Again we expand \eqref{eq66} into odd and even parts, and get
\mathcal{B}egin{equation}
\lambdaabel{eq67}
W :
\mathcal{B}egin{equation}gin{pmatrix}
1 \\
\lambda U \lambda u_\lambda \\0
\end{pmatrix} \
\mathcal{M}apsto \
\mathcal{B}egin{equation}gin{pmatrix}
\phi(\lambda) \\
u_\lambda \\ 0
\end{pmatrix}
\end{equation}
Since $W$ is a partial isometry, we get that for any $\lambda,\mathcal{M}u$ in $\mathcal{M}athbb{D}^2$ we have
\mathcal{B}egin{equation}
\mathcal{N}otag
\Big\lambdaangle
\mathcal{B}egin{equation}gin{pmatrix}
1 \\
\lambda U \lambda u_\lambda \\ 0
\end{pmatrix} ,
\mathcal{B}egin{equation}gin{pmatrix}
1 \\
\mathcal{M}u U \mathcal{M}u u_\mathcal{M}u \\0
\end{pmatrix} \Big\mathbb{R}angle_{\mathbb{C}\mathcal{M}athcal{O}plus \mathcal{M} \mathcal{M}athcal{O}plus \mathcal{M}}
\ = \ \Big\lambdaangle
\mathcal{B}egin{equation}gin{pmatrix}
\phi(\lambda) \\
u_\lambda \\0
\end{pmatrix} ,
\mathcal{B}egin{equation}gin{pmatrix}
\phi(\mathcal{M}u) \\
u_\mathcal{M}u \\0
\end{pmatrix} \Big\mathbb{R}angle_{\mathbb{C}\mathcal{M}athcal{O}plus \mathcal{M} \mathcal{M}athcal{O}plus \mathcal{M}},
\end{equation}
which rearranges into \eqref{eq63}.
\end{proof}
Let $\mathcal{M}$ be a Hilbert space. We define $\mathcal{B}all[ \mathcal{B} (\mathcal{M})]$ by
\[
\mathcal{B}all[ \mathcal{B}(\mathcal{M})] = \set{X\in \mathcal{B}(\mathcal{M})}{\mathcal{N}orm{X}<1}.
\]
We let $\mathcal{M}athscr{R}(\mathcal{M})$ denote the set of 4-tuples $\xi=(a,\mathcal{B}egin{equation}ta, \gamma,D)$ such that $a\in \mathbb{C}$, $\mathcal{B}egin{equation}ta \in \mathcal{M}$, $\gamma \in \mathcal{M}$, $D\in \mathcal{B}(\mathcal{M})$, and the block operator $L_\xi \in \mathcal{B}(\mathbb{C} \mathcal{M}athcal{O}plus \mathcal{M})$ defined by
\mathcal{B}egin{equation}
\lambdaabel{eq68}
L_\xi = \mathcal{B}egin{equation}gin{bmatrix}
a & 1\mathcal{M}athcal{O}times \mathcal{B}egin{equation}ta\\
\gamma \mathcal{M}athcal{O}times 1 & D
\end{bmatrix}
\end{equation}
is unitary.
If $\xi\in \mathcal{M}athscr{R}(\mathcal{M})$, then we may define a function $F_\xi$ on $\mathcal{B}all[\mathcal{B}(\mathcal{M})]$ by the formula
\mathcal{B}egin{equation}
\lambdaabel{eq681}
F_\xi(X) = a+\ip{X(1-DX)^{-1}\gamma}{\mathcal{B}egin{equation}ta}_\mathcal{M},\qquad X\in \mathcal{B}all[\mathcal{B}(\mathcal{M})]
\end{equation}
\mathcal{B}egin{equation}gin{prop}
\lambdaabel{prop69}
$\phi \in \mathscr{S}_e(\mathcal{M}athbb{D}^2)$ if and only if there exists a separable decomposed Hilbert space
$\mathcal{M}=\mathcal{M}_1 \mathcal{M}athcal{O}plus \mathcal{M}_2$, a unitary $U\in \mathcal{B}(\mathcal{M})$, and $\xi \in \mathscr{R}(\mathcal{M})$ such that
\[
\mathcal{F}orall_{\lambdaambda \in \mathcal{M}athbb{D}^2}\ \ \phi(\lambdaambda)=F_\xi(\lambdaambda U \lambdaambda).
\]
\end{prop}
\mathcal{B}egin{equation}gin{proof}
This follows from Proposition \mathbb{R}ef{prop62},
where we write $U$ as $L_\xi$ in \eqref{eq68}, and then use \eqref{eq67} to solve for
$\phi$.
\end{proof}
Propositions~\mathbb{R}ef{prop62} and \mathbb{R}ef{prop69} easily generalize to Schur functions $\phi$ that
satisfy $\phi(\mathcal{M}athcal{O}mega \lambda) = \phi(\lambda)$, where $\mathcal{M}athcal{O}mega$ is a primitive $n^{\mathbb{R}m th}$ root of unity.
One just replaces $\lambda U \lambda$ by $\lambda (U \lambda)^{n-1}$.
\section{The Representation of Schur Functions on $\mathbb{C}alv$}
\lambdaabel{sec7}
We define a variety in $\mathcal{M}athbb{D}^3$ by
\mathcal{B}egin{equation}
\lambdaabel{eq71}
\mathbb{C}alv=\set{z \in \mathcal{M}athbb{D}^3}{z_3^2 =z_1z_2}
\end{equation}
and let $\pi:\mathcal{M}athbb{D}^2 \mathbb{T}o \mathbb{C}alv$ denote the surjective map defined by
\[
\pi(\lambdaambda)=(\lambdaambda_1^2,\lambdaambda_2^2,\lambdaambda_1\lambdaambda_2).
\]
The map $\pi$ is a two-to-one branched cover of $\mathcal{M}athbb{D}^2$ over $\mathbb{C}alv$.
The key observation is that
\mathcal{B}egin{equation}\lambdaabel{eq72}
\Phi\in \mathscr{S}(\mathbb{C}alv)\iff \Phi \mathbb{C}irc \pi\in \mathscr{S}_e(\mathcal{M}athbb{D}^2).
\end{equation}
For a separable decomposed Hilbert space $\mathcal{M}=\mathcal{M}_1\mathcal{M}athcal{O}plus \mathcal{M}_2$ and a unitary operator $U\in\mathcal{B}(\mathcal{M})$ we may decompose $U$,
\[
U=\mathcal{B}egin{equation}gin{bmatrix}
A & B \\
C & D
\end{bmatrix},
\]
where $A:\mathcal{M}_1 \mathbb{T}o \mathcal{M}_1$, $B:\mathcal{M}_2 \mathbb{T}o \mathcal{M}_1$, $C:\mathcal{M}_1 \mathbb{T}o \mathcal{M}_2$, and $D:\mathcal{M}_2 \mathbb{T}o \mathcal{M}_2$. Furthermore, for each $z\in \mathbb{C}^3$, we may define an operator $z_U$ by the formula
\[
z_U =\mathcal{B}egin{equation}gin{bmatrix}
Az_1 & Bz_3 \\
Cz_3 & Dz_2
\end{bmatrix},\qquad z\in \mathbb{C}^3.
\]
We then have that if we view $\lambdaambda$ as an operator on $\mathcal{M}$ (as in \eqref{eq611}) and $z=\pi(\lambdaambda)$, then
\[
\lambdaambda U \lambdaambda = \mathcal{B}egin{equation}gin{bmatrix}
A\lambdaambda_1^2 & B\lambdaambda_1\lambdaambda_2 \\
C\lambdaambda_1\lambdaambda_2 & D\lambdaambda_2^2
\end{bmatrix}
=\mathcal{B}egin{equation}gin{bmatrix}
Az_1 & Bz_3 \\
Cz_3 & Dz_2
\end{bmatrix}
=z_U
\]
Consequently, using Propositions \mathbb{R}ef{prop62} and \mathbb{R}ef{prop69} we obtain the following description of the Schur class of $\mathbb{C}alv$.
\mathcal{B}egin{equation}gin{prop}
\lambdaabel{prop72}
$\Phi \in \mathscr{S}(\mathbb{C}alv)$ if and only if there exists a decomposed Hilbert space $\mathcal{M}$, a unitary $U\in \mathcal{B}(\mathcal{M})$, and $\xi \in \mathscr{R}(\mathcal{M})$ such that
\[
\mathcal{F}orall_{\lambdaambda \in \mathcal{M}athbb{D}^2}\ \ \Phi(\lambdaambda)=F_\xi(z_U).
\]
\end{prop}
Let $\mathcal{M}athscr{U}$ denote the collection of ordered pairs $(\mathcal{M},U)$ such that $\mathcal{M}$ is a separable decomposed Hilbert space and $U$ is a unitary operator acting on $\mathcal{M}$ and define $\mathbb{C}alg \subseteq \mathbb{C}^3$ by ordaining that $z \in \mathbb{C}alg$ precisely when
\[
\mathcal{N}orm{z_U}<1
\]
for all separable decomposed Hilbert spaces $\mathcal{M}$ and all unitary $U$ acting on $\mathcal{M}$. Thus,
\mathcal{B}egin{equation}\lambdaabel{rep.10}
z \in \mathbb{C}alg \iff \mathcal{F}orall_{(\mathcal{M},U) \in \mathscr{U}}\ \ \mathcal{N}orm{z_U} <1.
\end{equation}
It is clear from the definition that $\mathbb{C}alg$ is convex, and it
follows from
Lemma~\mathbb{R}ef{lem40}
that $\mathbb{C}alg$ is open.
\mathcal{B}egin{equation}gin{prop}
\lambdaabel{prop72}
If $\Phi \in \mathscr{S}(\mathbb{C}alv)$, then there exists $\Phi^\sim \in \mathscr{S}(\mathbb{C}alg)$ such that $\Phi=\Phi^\sim|\mathbb{C}alv$.\emph{}
\end{prop}
\mathcal{B}egin{equation}gin{proof}
Since $\Phi \in \mathscr{S}(\mathbb{C}alv)$ it has a realization as in Proposition \mathbb{R}ef{prop72}.
Define $\Phi^\sim$ by $\Phi^\sim (z) = F_\xi(z_U).$
It is immediate from \eqref{eq681} that this defines an analytic function;
to show it is in the Schur class requires a calculation. See eg \mathbb{C}ite[Sec. 3.9]{amy20}.
\end{proof}
\section{The Envelope}
\lambdaabel{sec8}
In this section we compute $\mathbb{C}alg$ defined by \eqref{rep.10}. For $z\in \mathbb{C}^3$, define $\mathcal{N}orm{z}_\mathscr{U}$ by
\[
\mathcal{N}orm{z}_\mathscr{U} = \sup_{(\mathcal{M},U) \in \mathscr{U}}\mathcal{N}orm{z_U}.
\]
\mathcal{B}egin{equation}gin{lem}\lambdaabel{env.lem.10}
If $z\in \mathbb{C}^3$, then
\[
z \in \mathbb{C}alg \iff \mathcal{N}orm{z}_\mathscr{U} <1.
\]
\end{lem}
\mathcal{B}egin{equation}gin{proof}
The lemma will follow if it can be shown that when $z\in \mathbb{C}^3$, there exists $(\mathcal{M}^\sim,U^\sim)\in \mathscr{U}$ such that
\mathcal{B}egin{equation}\lambdaabel{env.10}
\mathcal{N}orm{z_{U^\sim}}=\sup_{(\mathcal{M},U) \in \mathscr{U}}\mathcal{N}orm{z_U}.
\end{equation}
Choose a sequence $\{(\mathcal{M}^{(n)},U^{(n)})\}$ in $\mathscr{U}$ such that
\[
\mathcal{N}orm{z_{U^{(n)}}} \mathbb{T}o \sup_{(\mathcal{M},U) \in \mathscr{U}}\mathcal{N}orm{z_U}\ \ \mathbb{T}ext{ as }\ \ n\mathbb{T}o \infty.
\]
Let
\[
\mathcal{M}^\sim=\mathcal{B}igoplus_n \mathcal{M}^{(n)}\qquad \mathbb{T}ext{ and }\qquad U^\sim =
\mathcal{B}igoplus_n U^{(n)}.
\]
if we decompose $\mathcal{M}^\sim$,
\[
\mathcal{M}^\sim = \Big(\mathcal{B}igoplus_n (\mathcal{M}_1^{(n)} \mathcal{M}athcal{O}plus 0)\Big)\ \mathcal{M}athcal{O}plus\
\Big(\mathcal{B}igoplus_n (0 \mathcal{M}athcal{O}plus \mathcal{M}_2^{(n)})\Big),
\]
then $(\mathcal{M}^\sim,U^\sim) \in \mathscr{U}$, and as
\[
z_{U^\sim}\ \mathbb{C}ong\ \ \mathcal{B}igoplus_n z_{U^{(n)}},
\]
\eqref{env.10} holds.
\end{proof}
We let $\mathcal{M}athscr{C}$ denote the collection of ordered pairs $(\mathcal{M},T)$ where $\mathcal{M}=\mathcal{M}_1\mathcal{M}athcal{O}plus \mathcal{M}_2$ is a decomposed Hilbert space and $T$ is a contraction acting on $\mathcal{M}$. If $(\mathcal{M},T) \in \mathcal{M}athscr{C}$, we may decompose $T$,
\mathcal{B}egin{equation}\lambdaabel{env.20}
T=\mathcal{B}egin{equation}gin{bmatrix}
A & B \\
C & D
\end{bmatrix}
\end{equation}
and then for $z\in \mathbb{C}^3$ define $z_T \in \mathcal{B}(\mathcal{M})$ by
\[
z_T=\mathcal{B}egin{equation}gin{bmatrix}
Az_1 & Bz_3 \\
Cz_3 & Dz_2
\end{bmatrix}
\]
We let $\mathscr{C}_2$ denote the collection of ordered pairs $(\mathcal{M},T) \in \mathscr{C}$ satisfying $\mathcal{M}athbb{D}im \mathcal{M}_1 =\mathcal{M}athbb{D}im \mathcal{M}_2 = 2$.
\mathcal{B}egin{equation}gin{lem}\lambdaabel{env.lem.20}
If $z \in \mathbb{C}^3$, then
\[
\mathcal{N}orm{z}_\mathscr{U} \ge \sup_{(\mathcal{M},T) \in \mathscr{C}_2}\mathcal{N}orm{z_T}.
\]
\end{lem}
\mathcal{B}egin{equation}gin{proof}
Fix $(\mathcal{M},T) \in \mathscr{C}_2$ and assume that $T$ is represented as in \eqref{env.20} with respect to the decomposition $\mathcal{M}=\mathcal{M}_1\mathcal{M}athcal{O}plus \mathcal{M}_2$. By the Sz.-Nagy Dilation Theorem \mathbb{C}ite{szn53} there exists a decomposed Hilbert space
\[
\mathcal{M}athcal{H} =\mathcal{M}athcal{H}_0 \mathcal{M}athcal{O}plus \mathcal{M}_1 \mathcal{M}athcal{O}plus \mathcal{M}_2 \mathcal{M}athcal{O}plus \mathcal{M}athcal{H}_1
\]
on which the block operator $U$ defined on $\mathcal{M}athcal{H}$ by
\[
U=\mathcal{B}egin{equation}gin{bmatrix}
X_{11} & X_{12} & X_{13} & X_{14} \\
0 & A & B & X_{24} \\
0 & C & D & X_{34} \\
0 & 0 & 0 & X_{44}
\end{bmatrix}
\]
is unitary.
If we decompose $\mathcal{M}athcal{H}$ as
\[
\mathcal{M}athcal{H} =(\mathcal{M}athcal{H}_0 \mathcal{M}athcal{O}plus \mathcal{M}_1) \mathcal{M}athcal{O}plus (\mathcal{M}_2 \mathcal{M}athcal{O}plus \mathcal{M}athcal{H}_1),
\]
then
\[
z_U=\mathcal{B}egin{equation}gin{bmatrix}
X_{11}z_1 & X_{12}z_1 & X_{13}z_3 & X_{14}z_3 \\
0 & Az_1 & Bz_3 & X_{24}z_3 \\
0 & Cz_3 & Dz_2 & X_{34}z_2 \\
0 & 0 & 0 & X_{44}z_3
\end{bmatrix}
\]
and we see that
\[
\mathcal{N}orm{z_T}=\mathcal{N}orm{\mathcal{B}egin{equation}gin{bmatrix}
Az_1 & Bz_3 \\
Cz_3 & Dz_2
\end{bmatrix}}
\lambdae \mathcal{N}orm{z_U} \lambdae \mathcal{N}orm{z}_\mathscr{U}.
\]
As $(\mathcal{M},T)$ is an arbitrary element of $\mathscr{C}_2$, this proves the lemma.
\end{proof}
\mathcal{B}egin{equation}gin{lem}\lambdaabel{env.lem.30}
If $z \in \mathbb{C}^3$, then
\[
\mathcal{N}orm{z}_\mathscr{U} \lambdae \sup_{(\mathcal{M},T) \in \mathscr{C}_2}\mathcal{N}orm{z_T}.
\]
\end{lem}
\mathcal{B}egin{equation}gin{proof}
Fix $z \in \mathbb{C}^3$ and $\epsilon >0$. Choose $(\mathcal{M},U) \in \mathscr{U}$ such that
\mathcal{B}egin{equation}\lambdaabel{env.30}
\mathcal{N}orm{z}_\mathscr{U} -\epsilon/2 < \mathcal{N}orm{z_U}
\end{equation}
and choose $\gamma \in \mathcal{M}$ such that $\mathcal{N}orm{\gamma}=1$ and
\mathcal{B}egin{equation}\lambdaabel{env.40}
\mathcal{N}orm{z_U} -\epsilon/2 <\mathcal{N}orm{z_U \gamma}.
\end{equation}
If we let $P_1$ and $P_2$ denote the orthogonal projections of $\mathcal{M}$ onto $\mathcal{M}_1$ and $\mathcal{M}_2$, respectively, and define
\[
\mathcal{N}_1 =\spn \{P_1\gamma, P_1 z_U\gamma\}\ \ \mathbb{T}ext{ and }\ \ \mathcal{N}_2 =\spn \{P_2\gamma, P_2 z_U\gamma\},
\]
then $\mathcal{N}_1 \perp \mathcal{N}_2$, i.e., $\mathcal{N}=\mathcal{N}_1 +\mathcal{N}_2$ is a decomposed Hilbert space. Furthermore, if we define $T\in \mathcal{B}(\mathcal{N})$ by letting $T=P_\mathcal{N} U|\mathcal{N}$, then $(\mathcal{N},T) \in \mathscr{C}$, $\gamma \in \mathcal{N}$, and
\[
z_T \gamma =z_U \gamma.
\]
Hence, using \eqref{env.30} and \eqref{env.40} it follows that
\mathcal{B}egin{equation}\lambdaabel{env.50}
\mathcal{N}orm{z}_\mathscr{U} -\epsilon < \mathcal{N}orm{z_T \gamma} \lambdae \mathcal{N}orm{z_T}.
\end{equation}
Now, it might be the case that either $\mathcal{M}athbb{D}im \mathcal{N}_1$ or $\mathcal{M}athbb{D}im \mathcal{N}_2$ is strictly less than 2, i.e., $(\mathcal{N},T)\mathcal{N}ot\in \mathscr{C}_2$. However, we may choose Hilbert spaces $\lambda_1$ and $\lambda_2$ such that $\mathcal{M}athbb{D}im (\lambda_1 \mathcal{M}athcal{O}plus \mathcal{N}_1) = 2$ and $\mathcal{M}athbb{D}im (\mathcal{N}_2 \mathcal{M}athcal{O}plus \lambda_2) = 2$ and then define $(\mathcal{N}^\sim,T^\sim) \in \mathscr{C}_2$ in the following way. Let
\[
\mathcal{N}^\sim = (\lambda_1 \mathcal{M}athcal{O}plus \mathcal{N}_1) \mathcal{M}athcal{O}plus (\mathcal{N}_2 \mathcal{M}athcal{O}plus \lambda_2),
\]
and define $T^\sim$ by choosing contractions $L_1\in \mathcal{B}(\lambda_1)$ and $L_2 \in \mathcal{B}(\lambda_2)$ and letting
\[
T^\sim \mathcal{B}ig((x \mathcal{M}athcal{O}plus u) \mathcal{M}athcal{O}plus (v\mathcal{M}athcal{O}plus y)\mathcal{B}ig)= L_1 x \mathcal{M}athcal{O}plus (T (u \mathcal{M}athcal{O}plus v)) \mathcal{M}athcal{O}plus L_2 y,
\qquad x \in \lambda_1, y\in \lambda_2, u \in \mathcal{N}_1,v \in \mathcal{N}_2.
\]
Here, we have identified the two spaces
\[
(\lambda_1 \mathcal{M}athcal{O}plus \mathcal{N}_1) \mathcal{M}athcal{O}plus (\mathcal{N}_2 \mathcal{M}athcal{O}plus \lambda_2)\ \ \mathbb{T}ext{ and }\ \
\lambda_1 \mathcal{M}athcal{O}plus (\mathcal{N}_1 \mathcal{M}athcal{O}plus \mathcal{N}_2) \mathcal{M}athcal{O}plus \lambda_2.
\]
With this definition it follows that
\[
z_{T^\sim} \mathbb{C}ong (z_1L_1) \mathcal{M}athcal{O}plus z_T \mathcal{M}athcal{O}plus z_2 L_2.
\]
Therefore,
\[
\mathcal{N}orm{z_T} \lambdae \mathcal{N}orm{z_{T^\sim}},
\]
which implies via \eqref{env.50} that
\mathcal{B}egin{equation}\lambdaabel{env.60}
\mathcal{N}orm{z}_\mathscr{U} -\epsilon < \mathcal{N}orm{z_{T^\sim}}.
\end{equation}
Summarizing, we have shown that if $\epsilon>0$, then there exists $(\mathcal{N}^\sim,T^\sim) \in \mathscr{C}_2$ such that \eqref{env.60} holds. This proves the lemma.
\end{proof}
Finally we show that to check if $z \in \mathbb{C}alg$, you just need to check that $\| z_U \| < 1$ for every unitary on
a decomposed Hilbert space of dimension $2+2$.
We let $\mathscr{U}_2$ denote the collection of ordered pairs $(\mathcal{M},U) \in \mathscr{U}$ satisfying $\mathcal{M}athbb{D}im \mathcal{M}_1 =\mathcal{M}athbb{D}im \mathcal{M}_2 = 2$.
\mathcal{B}egin{equation}gin{prop}
\lambdaabel{prop810}
If $z \in \mathbb{C}^3$, then
\[
\mathcal{N}orm{z}_\mathscr{U} = \sup_{(\mathcal{M},U) \in \mathscr{U}_2}\mathcal{N}orm{z_U}.
\]
\end{prop}
\mathcal{B}egin{equation}gin{proof}
Fix a 2 dimensional Hilbert space $\mathcal{N}$, let $\mathcal{M}=\mathcal{N}\mathcal{M}athcal{O}plus \mathcal{N}$, and let $\mathcal{B}all \mathcal{B}(\mathcal{M})$ denote the closed unit ball of $\mathcal{B}(\mathcal{M})$. By Lemmas \mathbb{R}ef{env.lem.20} and \mathbb{R}ef{env.lem.30}
\[
\mathcal{N}orm{z}_\mathscr{U} = \sup_{T\in \mathcal{B}all \mathcal{B}(\mathcal{M})}\mathcal{N}orm{z_T}.
\]
But the map $T \mathcal{M}apsto \mathcal{N}orm{z_T}$ is convex and therefore attains its maximum at an extreme point of $\mathcal{B}all \mathcal{B}(\mathcal{M})$. As the extreme points of $\mathcal{B}all \mathcal{B}(\mathcal{M})$ are the unitaries, it follows that there exists $(\mathcal{M},U) \in \mathscr{U}_2$ such that $\mathcal{N}orm{z_U} = \mathcal{N}orm{z}_\mathscr{U}$.
\end{proof}
Now we shall derive an inequality that defines $\mathbb{C}alg$.
\mathcal{B}egin{equation}gin{lem}
\lambdaabel{lem.10}
$
\mathcal{B}egin{equation}gin{bmatrix}
A&B\\mathscr{C}&D
\end{bmatrix}$
is a block unitary acting on $ \mathbb{C}^2 \mathcal{M}athcal{O}plus \mathbb{C}^2$ if and only if
there exist 3 unitary $2\mathbb{T}imes 2$ matrices $u,v,w$ such that
\mathcal{B}egin{equation}\lambdaabel{35}
\mathcal{B}egin{equation}gin{bmatrix}
A&B\\mathscr{C}&D
\end{bmatrix}=
\mathcal{B}egin{equation}gin{bmatrix}
u&0\\0&v
\end{bmatrix}
\mathcal{B}egin{equation}gin{bmatrix}
a&b\\mathbb{C}&d
\end{bmatrix}
\mathcal{B}egin{equation}gin{bmatrix}
1&0\\0&w
\end{bmatrix}
\end{equation}
where
\mathcal{B}egin{equation}\lambdaabel{40}
\mathcal{B}egin{equation}gin{bmatrix}
a&b\\mathbb{C}&d
\end{bmatrix}
\mathbb{T}ext{ is a block unitary acting on } \mathbb{C}^2 \mathcal{M}athcal{O}plus \mathbb{C}^2
\end{equation}
and
\mathcal{B}egin{equation}\lambdaabel{50}
a,b,c \ge 0.
\end{equation}
\end{lem}
\mathcal{B}egin{equation}gin{proof}
Using polar decomposition there exist unitary $u,v$ and $a,c\ge 0$ such that
\[
A=ua\ \mathbb{T}ext { and }\ C=vc.
\]
Again using polar decomposition, there exists a unitary $w$ and $b\ge 0$ such that
\[
B=ubw.
\]
If we set $d=v^*Dw^*$, then \eqref{35} holds by direct computation, and, as the product of unitaries is unitary, \eqref{40} holds as well.
The converse is immediate.
\end{proof}
\mathcal{B}egin{equation}gin{lem}\lambdaabel{lem.20}
\eqref{40} and \eqref{50} hold if and only if there exists a unitary $2\mathbb{T}imes 2$ matrix $u$ and scalars $r\lambdae s$ in $[0,1]$ such that
\mathcal{B}egin{equation}\lambdaabel{60}
\mathcal{B}egin{equation}gin{bmatrix}
u^*&0\\0&u^*
\end{bmatrix}
\mathcal{B}egin{equation}gin{bmatrix}
a&b\\mathbb{C}&d
\end{bmatrix}
\mathcal{B}egin{equation}gin{bmatrix}
u&0\\0&u
\end{bmatrix}
=
\mathcal{B}egin{equation}gin{bmatrix}
r&0&\sqrt{1-r^2}&0\\
0&s&0&\sqrt{1-s^2}\\
\sqrt{1-r^2}&0&x_{11}&x_{12}\\
0&\sqrt{1-s^2}&x_{21}&x_{22}
\end{bmatrix}
\end{equation}
where
\mathcal{B}egin{equation}\lambdaabel{70}
\mathcal{B}egin{equation}gin{bmatrix}
x_{11}&x_{12}\\x_{21}&x_{22}
\end{bmatrix}
=
\mathbb{T}hreepartdef{
\mathcal{B}egin{equation}gin{bmatrix}
-r&0\\0&-s
\end{bmatrix}}{s<1,}
{\mathcal{B}egin{equation}gin{bmatrix}
-r&0\\0&\mathbb{T}au
\end{bmatrix},\mathbb{T}ext{ where } |\mathbb{T}au|=1,} {r<s=1,}
{V,\mathbb{T}ext{ where $V$ is unitary, }}{r=s=1.}
\end{equation}
\end{lem}
\mathcal{B}egin{equation}gin{proof}
By the spectral theorem there exists a unitary $2\mathbb{T}imes 2$ matrix $u$ and scalars $r\lambdae s$ in $[0,1]$ such that
\[
u^*au =
\mathcal{B}egin{equation}gin{bmatrix}
r&0\\0&s
\end{bmatrix}
\]
As \eqref{40} holds we have that
\mathcal{B}egin{equation}\lambdaabel{75}
\mathcal{B}egin{equation}gin{bmatrix}
a&c\\mathcal{B}&d^*
\end{bmatrix}\mathcal{B}egin{equation}gin{bmatrix}
a&b\\mathbb{C}&d
\end{bmatrix}=
\mathcal{B}egin{equation}gin{bmatrix}
1&0\\0&1
\end{bmatrix}=
\mathcal{B}egin{equation}gin{bmatrix}
a&b\\mathbb{C}&d
\end{bmatrix}
\mathcal{B}egin{equation}gin{bmatrix}
a&c\\mathcal{B}&d^*
\end{bmatrix}
\end{equation}
In particular, $a^2+c^2=1$. Hence, as $c\ge 0$, $c=\sqrt{1-a^2}$. But then
\[
u^*cu=u^*(\sqrt{1-a^2})u=\sqrt{1-(u^*au)^2}=\mathcal{B}egin{equation}gin{bmatrix}
\sqrt{1-r^2}&0\\0&\sqrt{1-s^2}.
\end{bmatrix}
\]
Likewise, as $a^2 +b^2 =1$ and $b\ge0$,
\[
u^*bu=\mathcal{B}egin{equation}gin{bmatrix}
\sqrt{1-r^2}&0\\0&\sqrt{1-s^2}.
\end{bmatrix}
\]
There remains to show that $d$ has the form described in the statement of the lemma. This follows by simple calculations using the relations
\[
b^2+d^*d= 1= c^2 +dd^*,\ \ ab+cd =0,\ \ \mathbb{T}ext{ and }\ \ ca +db=0.
\]
\end{proof}
Combining Proposition \mathbb{R}ef{prop810} with Lemmas \mathbb{R}ef{lem.10} and \mathbb{R}ef{lem.20} yields the following lemma.
\mathcal{B}egin{equation}gin{lem}\lambdaabel{lem.30}
The point $z \in \mathcal{M}athbb{D}^3$ is in $\mathbb{C}alg$
if and only if
\mathcal{B}egin{equation}\lambdaabel{80}
\sup_{r\in[0,1]}\mathcal{B}ig\| z_r\mathcal{B}ig\| < 1
\end{equation}
where
\[
z_r=\mathcal{B}egin{equation}gin{bmatrix}
rz_1&\sqrt{1-r^2} z_3\\ \sqrt{1-r^2} z_3&-rz_2
\end{bmatrix}.
\]
\end{lem}
\mathcal{B}egin{equation}gin{proof}
For $U =
\mathcal{B}egin{equation}gin{bmatrix}
A&B\\mathscr{C}&D
\end{bmatrix}$, we have by Lemma \mathbb{R}ef{lem.20} that
$z_U$ is unitarily equivalent to $z_r \mathcal{M}athcal{O}plus z_s$ when $ s < 1$,
so \eqref{80} is necessary. It is easy to check the cases when $s=1$ to see that
the condition is also sufficient.
\end{proof}
\mathcal{B}egin{equation}gin{lem}
\lambdaabel{lem40} The point
$z \in \mathcal{M}athbb{D}^3$ is in $\mathbb{C}alg$ if and only if
\mathcal{B}egin{equation}
\lambdaabel{eq:38}
|z_1 z_2 - z_3^2| < (1-|z_3|^2) + \sqrt{1-|z_1|^2}\sqrt{1-|z_2|^2}.
\end{equation}
\end{lem}
\mathcal{B}egin{equation}gin{proof}
We need to find those $z$ such that spectral radius of $z_r z_r^*$ is less then one for every $r\in [0,1].$
Letting $t = r^2$, we get that the trace $\mathbb{T}au$ of the matrix $z_r z_r^*$ is
$$\mathbb{T}au = t |z_1|^2 + t |z_2|^2 + 2 (1-t) |z_3|^2$$
(this must be in the interval $(0,2)$)
and the determinant $d$ is $$d= |t z_1 z_2 + (1-t) z_3^2|^2.$$
For the spectral radius to be less than one, we need to have $\mathbb{T}au + \sqrt{\mathbb{T}au^2 -4 d } < 2$. Since $\mathbb{T}au < 2$, this
is equivalent to $\mathbb{T}au < 1+ d$. So we want
$$t |z_1|^2 + t |z_2|^2 + 2(1-t)|z_3|^2 < 1 + t^2 |z_1 z_2|^2 + (1-t)^2 |z_3|^4 + 2 t (1-t) \mathscr{R}e z_1 z_2 \mathcal{B}ar z_3^2.
$$
Simple calculations show that this inequality is equivalent to
\mathcal{B}egin{equation}gin{equation}\lambdaabel{eq:1}
t^2|z_1 z_2 - z_3^2|^2 - t \lambdaeft(|z_1 z_2 - z_3^2|^2 + (1-|z_3|^2)^2 - (1-|z_1|^2) (1- |z_2|)^2\mathbb{R}ight) + (1- |z_3|^2)^2>0.
\end{equation}
We want \eqref{eq:1} to be satisfied for every $t\in (0,1)$. Let
$A=|z_1 z_2 - z_3^2|$, $B= (1-|z_3|^2)$ and $C=\sqrt{1-|z_1|^2}\sqrt{1-|z_2|^2}$,
and $a:=A^2$, $b:= B^2$, $c:=C^2$, $a\geq 0$, $b,c>0$.
Note that
\mathcal{B}egin{equation}
\lambdaabel{eq:39}
a t^2 - (a+b-c) t + b >0\ \mathcal{F}orall \ t\in [0,1]
\end{equation}
if either $a+b -c \lambdaeq 0$ or $(a+b-c)/2a \geq 1$ or $(a+b-c)^2 < 4 a b$.
(The inequality is automatically satisfied when $t=0$ or $1$.)
The first equality means that $a\lambdaeq c-b$. The second one that $a\lambdaeq b-c$ (so both together mean that $a\lambdaeq |b-c|$). The third one can be transformed to $$(a-b-c)^2 < 4bc,$$ that is $|A^2 - B^2 - C^2|< 2 BC$, which means that $|B-C|<A< B+C$.
Summing up, either $A^2\lambdaeq |B^2 - C^2|$ or $|B-C|<A<B+C$. Since $|B-C|^2\lambdaeq |B^2 - C^2|$ we get that
\eqref{eq:39} holds if and only if $A<B+C$, that is
\eqref{eq:38}.
\end{proof}
Putting all these results together, we get the following theorem.
\mathcal{B}egin{equation}gin{thm}
\lambdaabel{thm814}
Let $\mathbb{C}alv$ and $\mathbb{C}alg$ be defined by
\mathcal{B}egin{equation}gin{eqnarray*}
\mathbb{C}alv &\ =\ & \set{z \in \mathcal{M}athbb{D}^3}{z_3^2 =z_1z_2}\\
\mathbb{C}alg &\ =\ & \set{z \in \mathcal{M}athbb{D}^3}{|z_1 z_2 - z_3^2| < (1-|z_3|^2) + \sqrt{1-|z_1|^2}\sqrt{1-|z_2|^2}} .
\end{eqnarray*}
Then $\mathbb{C}alg$ is convex, and
$(\mathbb{C}alg,\mathbb{C}alv)$ is an np pair.
\end{thm}
Now we shall show that $\mathbb{C}alg$ is the largest balanced set for which $(G,\mathbb{C}alv)$ is np.
We exploit the fact that even though $\mathbb{C}alv$ is two-dimensional, it effectively has a 3-dimensional
tangent space at $0$.
\mathcal{B}egin{equation}gin{lem}
\lambdaabel{lem826}
Assume that $(G, \mathbb{C}alv)$ is a Cartan pair.
Let $\psi \in \mathcal{M}athcal{H}ol(G)$, let $f = \psi |_G$, and let
$F \in \mathcal{M}athcal{H}ol(G)$ be any holomorphic extension of $f$ to $G$.
Then $D F (0) = D\psi(0)$.
\end{lem}
\mathcal{B}egin{equation}gin{proof}
As $F - \psi$ vanishes on $\mathbb{C}alv$, we have
\mathcal{B}egin{equation}
\lambdaabel{eq827}
F(z) - \psi(z) \ = \
(z_3^2 - z_1z_2) h(z)
\end{equation}
for some $h \in \mathcal{M}athcal{H}ol(G)$ \mathbb{C}ite[p. 27]{chi90}.
As the derivative of the right-hand side of \eqref{eq827}
vanishes at $0$, we get the result.
\end{proof}
\mathcal{B}egin{equation}gin{thm}
\lambdaabel{thm828}
Let $G$ be a balanced domain in $\mathbb{C}^3$, and assume that $(G, \mathbb{C}alv)$ is an np pair.
Then $G \subseteq \mathbb{C}alg$.
\end{thm}
\mathcal{B}egin{equation}gin{proof}
Suppose that $G$ is not a subset of $\mathbb{C}alg$. Since $G$ is open, this means
we can find a point $\lambda \in G$ that is not in $\mathcal{M}athcal{O}verline{\mathbb{C}alg}$.
By Lemma \mathbb{R}ef{env.lem.10} and Proposition \mathbb{R}ef{prop810}, this means that there is a pair
$(M,U) \in \mathscr{U}_2$ so that $\| \lambda_U \| > 1 $.
This means there are unit vectors $\xi, \eta \in \mathbb{C}^4$ so that the linear function
\[
f(z) \ = \ \lambdaangle z_U \xi, \eta \mathbb{R}angle
\]
is larger than $1$ when $z = \lambda$.
But by Lemma \mathbb{R}ef{env.lem.10}, we have $|f|$ is less than $1$ on $\mathbb{C}alg$, and hence on $\mathbb{C}alv$.
Let $F$ be a norm-preserving extension of $f$ to $\mathscr{S}(G)$.
Define a function of one variable $g(\zeta) = F(\zeta \lambda)$.
Since $G$ is balanced, $g$ is defined on $\mathcal{M}athbb{D}$, and it is in the Schur class since $F$ is.
Moreover $g(0) = 0$ and by Lemma \mathbb{R}ef{lem826}
\[
g'(0) = D F (0) \mathcal{B}egin{equation}gin{bmatrix} \lambda_1\\ \lambda_2 \\ \lambda_3 \end{bmatrix}
= D f (0) \mathcal{B}egin{equation}gin{bmatrix} \lambda_1\\ \lambda_2 \\ \lambda_3 \end{bmatrix}
= f(\lambda) .
\]
This contradicts Schwarz's lemma.
\end{proof}
\section{The Noncommutative Analysis Setting}
\lambdaabel{sec9}
\subsection{Overview}
Joe Taylor introduced noncommutative analysis in his seminal work \mathbb{C}ite{tay73} on functional calculus for noncommuting elements of a Banach algebra. Landmarks in the development of the theory are Dan Voiculescu's works \mathbb{C}ite{voi04,voi10} in the context of developing the theory of free probability, Bill Helton's result \mathbb{C}ite{helt02} proving that positive free polynomials are sums of squares, the advance of Helton and McCullough \mathbb{C}ite{hm12} as a step in Helton's fruitful program to develop a descriptive theory of the domains on which LMI and semi-definite programming apply, and the recent monograph by Kaliuzhnyi-Verbovetzkii and Vinnikov
\mathbb{C}ite{kv14} that gives a panoramic view of the field to date, and establishes
a beautiful ``Taylor-Taylor'' formula for nc-functions.
There is much other notable work as well. As a sampling, there are articles of Popescu \mathbb{C}ite{po06, po08, po10, po11} which extend various results from classical function theory to functions of d-tuples of bounded operators; the magnum opus \mathbb{C}ite{bgm} of Ball,Groenewald and Malakorn, which extends realization formulas for functions of commuting operators to functions of non-commuting operators; Alpay and Kalyuzhnyi-Verbovetzkii \mathbb{C}ite{alpkal} which studies realization formulas for noncommutative rational functions that are J-inner; and \mathbb{C}ite{hkm11a,hkm11b} where Helton, Klep and
McCullough study mappings of noncommutative domains.
\\ \\
In this section, which is largely expository in nature, we shall present some of the techniques developed
in the papers \mathbb{C}ite{agmc15a}, \mathbb{C}ite{agmc15b}, and \mathbb{C}ite{agmc13b} to prove the existence of free holomorphic extensions of both holomorphic functions defined on varieties in $\mathbb{C}^d$ and free holomorphic functions defined on free varieties in $\mathcal{M}^d$, the ``$d$ dimensional noncommutative universe''. As an application one obtains a classical Cartan theorem with sharp bounds,
with norms that are defined using matrices.
\subsection{Free Holomorphic Functions}
There are many types of analysis that can be carried out on functions in noncommuting variables. Here, we shall focus on \emph{free analysis}, by which we mean the study of \emph{free holomorphic functions}.
Let $\mathbb{P}_d$ denote the free algebra on $d$ generators and let $\mathcal{M}n$ denote the space of $n\mathbb{T}imes n$ matrices with complex entries. Let $\mathcal{M}n^d$ denote the space of $d$-tuples of $n \mathbb{T}imes n$ matrices and define $\mathcal{M}d$, the \emph{$d$-dimensional nc universe}, by
\[
\mathcal{M}d = \mathcal{B}igcup_{n=1}^\infty \mathcal{M}n^d.
\]
We may equip $\mathcal{M}d$ with the \emph{coproduct topology} wherein one ordains that a set $D$ in $\mathcal{M}d$ is open if and only if $D \mathbb{C}ap \mathcal{M}n^d$ is open in $\mathcal{M}n^d$ for each integer $n\ge 1$.
If $\mathcal{M}athbb{D}elta \in \mathbb{P}_d$ and $x\in \mathcal{M}d$ then we may form $\mathcal{M}athbb{D}elta(x)$ in the natural way. More generally, if $\mathcal{M}athbb{D}elta=[\mathcal{M}athbb{D}elta_{ij}]\in \mathbb{P}_dij$, the collection of $I\mathbb{T}imes J$ matrices with entries in $\mathbb{P}_d$ and $x \in \mathcal{M}d$, we may form
\[
\mathcal{M}athbb{D}elta(x) = [\mathcal{M}athbb{D}elta_{ij}(x)].
\]
We say that a subset of $\mathcal{M}d$ is \emph{basic} if it has the form
\[
B_\mathcal{M}athbb{D}elta=\set{x\in \mathcal{M}d}{\mathcal{N}orm{\mathcal{M}athbb{D}elta(x)}<1}
\]
for some matrix $\mathcal{M}athbb{D}elta$ with entries in $\mathbb{P}_d$. Noting that
\[
B_{\mathcal{M}athbb{D}elta_1} \mathbb{C}ap B_{\mathcal{M}athbb{D}elta_2} = B_{\mathcal{M}athbb{D}elta_1 \mathcal{M}athcal{O}plus \mathcal{M}athbb{D}elta_2},
\]
it follows that the collection of basic sets forms a basis for a topology on $\mathcal{M}d$, which we refer to as the \emph{free topology}. We say that a subset of $\mathcal{M}d$ is a \emph{domain} if it is open in the free topology.
If $D \subseteq \mathcal{M}d$ is a free domain, and $f:D \mathbb{T}o \mathcal{M}one$ is a function, we say that $f$ is a \emph{free holomorphic function} if $f$ can be locally uniformly approximated by free polynomials, i.e., for each $\lambdaambda \in D$ there exists a basic set $B_\mathcal{M}athbb{D}elta$ such that
\[
\lambdaambda \in B_\mathcal{M}athbb{D}elta \subseteq D
\]
and
\[
\mathcal{F}orall_{\epsilon>0}\ \exists_{\pi \in \mathbb{P}_d}\ \mathcal{F}orall_{x\in B_\mathcal{M}athbb{D}elta}\ \ \mathcal{N}orm{f(x)-\pi(x)} < \epsilon.
\]
If $D$ is a free domain and $E\subseteq D$, we shall refer to $(D,E)$ as a \emph{free pair}. If $(D,E)$ is a free pair, then a function $f:E \mathbb{T}o \mathcal{M}one$ is said to be \emph{free holomorphic on $E$} if for each point $\lambdaambda\in E$ there exist a basic set $B_\mathcal{M}athbb{D}elta$ and a free holomorphic function $F$ on $B_\mathcal{M}athbb{D}elta$ such that
\[
\lambdaambda \in B_\mathcal{M}athbb{D}elta \subseteq D\qquad \mathbb{T}ext{ and }\qquad
F|E =f.
\]
Finally, we say that a free pair $(D,E)$ is \emph{norm preserving} if for each bounded free holomorphic function $f$ on $E$, there exists a free holomorphic function $F$ on $D$ that extends $f$ and such that
\[
\sup_{x \in D} \mathcal{N}orm{F(x)} = \sup_{x \in E} \mathcal{N}orm{f(x)}.
\]
\subsection{Pick Pairs}
If $S\subseteq \mathbb{P}_d$, we define $V(S)$ in $\mathcal{M}d$ by
\[
V(S)=\set{x\in \mathcal{M}d}{\mathcal{F}orall_{\mathcal{M}athbb{D}elta \in S }\ \ \mathcal{M}athbb{D}elta(x)=0}
\]
and if $A \subseteq \mathcal{M}d$ we say that $A$ is a \emph{free variety in $\mathcal{M}d$} if $A=V(S)$ for some $S\subseteq \mathbb{P}_d$. If $D$ is a free domain and $A$ is a variety in $\mathcal{M}d$, we say that $V=A \mathbb{C}ap D$ is a \emph{variety in $D$}.
For a point $x\in \mathcal{M}d$ we define an ideal $I_x$ in $\mathbb{P}_d$ by
\[
I_x = \set{\mathcal{M}athbb{D}elta \in \mathbb{P}_d}{\mathcal{M}athbb{D}elta(x)=0}
\]
and define a variety in $\mathcal{M}d$ by
\[
V_x = V(I_x).
\]
Notice that if $A$ is a variety in $\mathcal{M}d$ and $x\in A$, then $V_x \subseteq A$.
\mathcal{B}egin{equation}gin{defin}\lambdaabel{free.def.10}
A \emph{Pick pair} is an ordered pair $(D,E)$ such that $D$ is a basic set in $\mathcal{M}d$ and $E$ is a subset of $D$ satisfying the following two properties.
\mathcal{B}egin{equation}\lambdaabel{free.60}
E \mathbb{T}ext{ is closed with respect to direct sums.}
\end{equation}
\mathcal{B}egin{equation}\lambdaabel{free.70}
x\in E \implies V_x\mathbb{C}ap D \subseteq E.
\end{equation}
\end{defin}
\mathcal{B}egin{equation}gin{exam}\lambdaabel{free.exam.10}
If $D$ is a basic set in $\mathcal{M}d$ and $V$ is a variety in $D$, then $(D,V)$ is a Pick pair.
\end{exam}
\mathcal{B}egin{equation}gin{proof}
Let $V=A \mathbb{C}ap D$ where $A$ is a variety in $\mathcal{M}d$. As $D$ and $A$ are closed with respect to direct sums, $V$ is closed with respect to direct sums, i.e., \eqref{free.60} holds. Furthermore, since $V_x \subseteq A$ whenever $x\in A$,
$V_x \mathbb{C}ap D \subseteq A \mathbb{C}ap D =V$ whenever $x\in A$, i.e., \eqref{free.70} holds.
\end{proof}
\mathcal{B}egin{equation}gin{defin}\lambdaabel{free.def.20}
If $(D,E)$ is a Pick pair, then \emph{Pick data on $E$} is a function $f:E \mathbb{T}o \mathcal{M}one$ satisfying the following properties:
\mathcal{B}egin{equation}\lambdaabel{free.80}
\mathcal{F}orall_{x\in E}\ \ f(x) \in \mathbb{P}_d(x).
\mathcal{F}ootnote{Here, $\mathbb{P}_d(x) =\set{\pi(x)}{\pi \in \mathbb{P}_d}$},
\end{equation}
and $f$ preserves direct sums, i.e.,
\mathcal{B}egin{equation}\lambdaabel{free.90}
\mathcal{F}orall_m\ \ \ x_1,\lambdadots,x_m \in E \implies f(\mathcal{M}athcal{O}plus_{i=1}^m x_i) = \mathcal{M}athcal{O}plus_{i=1}^m f(x_i).
\end{equation}
\end{defin}
\mathcal{B}egin{equation}gin{defin}\lambdaabel{free.def.30}
If $(D,E)$ is a Pick pair, we say that $(D,E)$ is \emph{norm preserving} if whenever $f$ is bounded Pick data on $E$, there exists a free holomorphic function $F$ on $D$ that extends $f$ and such that
\mathcal{B}egin{equation}\lambdaabel{free.100}
\sup_{x\in D} \mathcal{N}orm{F(x)} = \sup_{x\in E} \mathcal{N}orm{f(x)}.
\end{equation}
\end{defin}
It turns out that Pick pairs are always norm preserving.
\mathcal{B}egin{equation}gin{thm}\lambdaabel{free.thm.20}
(\mathbb{C}ite{agmc13b}, Theorem 1.5) If $(D,E)$ is a Pick pair, then $(D,E)$ is norm preserving.
\end{thm}
\mathcal{B}egin{equation}gin{proof}
Assume that $(D,E)$ is a Pick pair and $f$ is bounded Pick data on $E$. We wish to show that there exists a bounded free holomorphic function $F$ on $D$ that extends $f$ and such that \eqref{free.100} holds. Fix a sequence $\{\lambdaambda_j\}_{j=1}^\infty$ in $E$ that is dense in $E$ in the coproduct topology and for each $n\ge 1$ let
\[
\Lambda_n = \mathcal{M}athcal{O}plus_{j=1}^n \lambdaambda_j
\]
By $\eqref{free.60}$ $\Lambda_n \in E$. Hence, \eqref{free.80} implies that there exists $\pi_n \in \mathbb{P}_d$ such that
\mathcal{B}egin{equation}{}\lambdaabel{free.110}
f(\Lambda_n)=\pi_n(\Lambda_n).
\end{equation}
Now, fix $x\in V_{\Lambda_n}$. As $\Lambda_n \in E$, \eqref{free.70} implies that $x\in E$. Hence, by \eqref{free.60}, $\Lambda_n \mathcal{M}athcal{O}plus x \in E$. But then, \eqref{free.80} implies that there exists $\mathbb{R}ho\in \mathbb{P}_d$ such that
\[
f(\Lambda_n \mathcal{M}athcal{O}plus x) = \mathbb{R}ho(\Lambda_n \mathcal{M}athcal{O}plus x),
\]
or equivalently, via \eqref{free.90},
\[
f(\Lambda_n)=\mathbb{R}ho(\Lambda_n)\qquad \mathbb{T}ext{ and }\qquad f(x)=\mathbb{R}ho(x).
\]
The first equation above together with \eqref{free.110} imply that $\mathbb{R}ho(\Lambda_n)=\pi_n(\Lambda_n)$. Therefore, as $x \in V_{\Lambda_n}$, $\mathbb{R}ho(x)=\pi_n(x)$. But then the second equation implies that $f(x)=\pi_n(x)$.
Summarizing, in the previous paragraph we showed that if $x\in V_{\Lambda_n}$, then $f(x)=\pi_n(x)$. Hence,
\[
\sup_{x \in V_{\Lambda_n}} \mathcal{N}orm{\pi_n(x)} = \sup_{x \in V_{\Lambda_n}} \mathcal{N}orm{f(x)} \lambdae \sup_{x \in E} \mathcal{N}orm{f(x)}.
\]
Consequently, by Theorem 1.3 in \mathbb{C}ite{agmc13b}, there exists a bounded free holomorphic function $F_n$ on $D$ that satisfies
\[
F_n(\Lambda_n) = f(\Lambda_n)\qquad \mathbb{T}ext{ and }\qquad \sup_{x\in D} \mathcal{N}orm{F_n(x)} \lambdae \sup_{x \in E} \mathcal{N}orm{f(x)}.
\]
The desired function $F$ can now be obtained by invoking a Montel Theorem as in the proof of Theorem 1.5 in \mathbb{C}ite{agmc13b}.
\end{proof}
\subsection{$p$ Norms}
In order to relate the free setting to the classical theory of functions in several complex variables of particular value is the algebraic set $\mathcal{M}dcom$ defined in $\mathcal{M}d$ by
\[
\mathcal{M}dcom=V(S)
\]
where
\[
S=\set{x_ix_j-x_jx_i}{i,j=1,\lambdadots,d}.
\]
We let $\mathbb{P}_dcom$ denote the algebra of polynomials in $d$ variables.
Naturally, $\mathbb{P}_dcom$ can be identified with the quotient $\mathbb{P}_d / I$ where $I$ is the ideal generated by the set $S$ above. Accordingly, each $p \in \mathbb{P}_dcom$ corresponds to a coset $\mathcal{M}athbb{D}elta +I$ and conversely to each coset $\mathcal{M}athbb{D}elta +I$ corresponds an element of $\mathbb{P}_dcom$. When $p$ and $\mathcal{M}athbb{D}elta$ are so related we write $p=[\mathcal{M}athbb{D}elta]$.
For $p$ an $I \mathbb{T}imes J$ matrix with entries in $\mathbb{P}_dcom$ we define a set $G_p$ in $\mathbb{C}^d$ by the formula
\[
G_p = \set{\lambdaambda \in \mathbb{C}^d}{\mathcal{N}orm{p(\lambdaambda)}<1},
\]
For example, if
\[
p(\lambdaambda)=\mathcal{B}egin{equation}gin{bmatrix}\lambdaambda_1&&&\\
&\lambdaambda_2&&\\
&&\mathcal{M}athbb{D}dots&\\
&&&\lambdaambda_d
\end{bmatrix},
\]
then $G_p$ equals the polydisc $\mathcal{M}athbb{D}^d$, if
\[
p(\lambdaambda)=\mathcal{B}egin{equation}gin{bmatrix}\lambdaambda_1\\
\lambdaambda_2\\
\mathcal{M}athbb{D}dots\\
\lambdaambda_d
\end{bmatrix},
\]
then $G_p$ is the unit ball in $\mathbb{C}^d$ centered at the origin, and if
\[
p(\lambdaambda)=\mathcal{B}egin{equation}gin{bmatrix}p_1(\lambdaambda)&&&\\
&p_2(\lambdaambda)&&\\
&&\mathcal{M}athbb{D}dots&\\
&&&p_n(\lambdaambda)
\end{bmatrix}
\]
where $p_1,\lambdadots,p_n$ are polynomials in $d$ variables,
then $G_p$ is the general polynomial polyhedron in $\mathbb{C}^d$. We say a domain $D$ in $\mathbb{C}^d$ is an \emph{operhedron} if $D=G_p$ for some matrix $p$ with entries in $\mathbb{P}_dcom$. (So an operhedron
is a polynomial polyhedron if and only if it can be written as $G_p$ where $p$ is a diagonal matrix.)
Basic sets in $\mathcal{M}d$ and operhedrons in $\mathbb{C}^d$ are related in a natural way described in the following lemma.
\mathcal{B}egin{equation}gin{lem}\lambdaabel{}
If $\mathcal{M}athbb{D}elta$ is an $I\mathbb{T}imes J$ matrix of free polynomials and $p=[\mathcal{M}athbb{D}elta]$, then
\[
([\lambdaambda_1],\lambdadots,[\lambdaambda_d]) \in B_\mathcal{M}athbb{D}elta \mathbb{C}ap \mathcal{M}d_1\qquad \iff\qquad (\lambdaambda_1,\lambdadots,\lambdaambda_d) \in G_p.
\]
\end{lem}
\mathcal{B}egin{equation}gin{proof}
Notice first that
\[
([\lambdaambda_1],\lambdadots,[\lambdaambda_d]) \in \mathcal{M}d_1\qquad \iff\qquad (\lambdaambda_1,\lambdadots,\lambdaambda_d) \in \mathbb{C}^d.
\]
Also, as $p=[\mathcal{M}athbb{D}elta]$,
\[
\mathcal{F}orall_{\lambdaambda \in \mathbb{C}^d}\ \ \mathcal{M}athbb{D}elta([\lambdaambda_1],\lambdadots,[\lambdaambda_d])=p(\lambdaambda_1,\lambdadots,\lambdaambda_d).
\]
Therefore,
\mathcal{B}egin{equation}gin{align*}
([\lambdaambda_1],\lambdadots,[\lambdaambda_d]) \in B_\mathcal{M}athbb{D}elta \mathbb{C}ap \mathcal{M}d_1 &\iff \mathcal{N}orm{\mathcal{M}athbb{D}elta([\lambdaambda_1],\lambdadots,[\lambdaambda_d])} <1\\
&\iff \mathcal{N}orm{p(\lambdaambda_1,\lambdadots,\lambdaambda_d)} <1\\
&\iff (\lambdaambda_1,\lambdadots,\lambdaambda_d) \in G_p.
\end{align*}
\end{proof}
For $G_p$ an operhedron, we define
\[
\mathcal{F}_p =\set{x \in \mathcal{M}dcom\ }{\ \mathcal{N}orm{p(x)} <1},
\]
and for $f\in \mathcal{M}athcal{H}ol(G_p)$, define $\mathcal{N}orm{f}_p$ by
\mathcal{B}egin{equation}
\lambdaabel{eq913}
\mathcal{N}orm{f}_p = \sup_{x\in \mathcal{F}_p} \mathcal{N}orm{f(x)}.
\end{equation}
By $f(x)$ on the right-hand side of \eqref{eq913} we mean we use functional calculus to
evaluate the holomorphic function $f$ on the $d$-tuple $x$ whose spectrum lies in the domain of $f$.
We let $\mathcal{M}athcal{H}inf_p$ denote the Banach algebra
\[
\mathcal{M}athcal{H}inf_p =\set{f\in \mathcal{M}athcal{H}ol(G_p)}{\mathcal{N}orm{f}_p<\infty}.
\]
The idea of studying function theory on polydiscs using p-norms dates back to \mathbb{C}ite{ag90}. The idea was first considered for general p-operhedrons by Ambrozie and Timotin in the beautiful paper \mathbb{C}ite{amti03}. In \mathbb{C}ite{babo04} Ball and Bolotnikov considerably extended the work in \mathbb{C}ite{amti03} introducing many powerful new tools.
A fundamental connection between p-norms and free analysis is revealed by the following result of two of the authors..
\mathcal{B}egin{equation}gin{thm}\lambdaabel{free.thm.40}(\mathbb{C}ite{agmc15b}, Theorem 8.5)
Assume that $\mathcal{M}athbb{D}elta$ is an $I\mathbb{T}imes J$ matrix of free polynomials and let $p=[\mathcal{M}athbb{D}elta]$. If $f \in \mathcal{M}athcal{H}inf_p$, then there exists a free holomorphic function $F$ on $B_\mathcal{M}athbb{D}elta$ such that
\[
\mathcal{F}orall_{\lambdaambda \in G_p}\ \ F([\lambdaambda_1],\lambdadots,[\lambdaambda_d]) = [f(\lambdaambda_1,\lambdadots,\lambdaambda_d)]\ \qquad \mathbb{T}ext{ and }\qquad \sup_{x\in B_\mathcal{M}athbb{D}elta} \mathcal{N}orm{F(x)}=\mathcal{N}orm{f}_p.
\]
Conversely, if $F$ is a bounded free holomorphic function on $B_\mathcal{M}athbb{D}elta$ and $f \in \mathcal{M}athcal{H}ol(G_\mathcal{M}athbb{D}elta)$ is defined by
\[
[f(\lambdaambda_1,\lambdadots,\lambdaambda_d)]=F([\lambdaambda_1],\lambdadots,[\lambdaambda_d]),\qquad \lambdaambda \in G_\mathcal{M}athbb{D}elta,
\]
then $f\in \mathcal{M}athcal{H}inf_p$ and
\[
\mathcal{N}orm{f}_p \lambdae \sup_{x\in B_\mathcal{M}athbb{D}elta} \mathcal{N}orm{F(x)}.
\]
\end{thm}
The proof of Theorem \mathbb{R}ef{free.thm.40} in \mathbb{C}ite{agmc15b} required a great deal of heavy lifting. Here, we present a simple proof using the fact that Pick pairs are norm preserving.
\mathcal{B}egin{equation}gin{proof}
Let $(D,E) = (B_\mathcal{M}athbb{D}elta,B_\mathcal{M}athbb{D}elta \mathbb{C}ap \mathcal{M}dcom)$. Then $D$ is a basic set, $E$ is a subset of $D$, and as both $B_\mathcal{M}athbb{D}elta$ and $\mathcal{M}dcom$ are closed with respect to direct sums, so also $E$ is closed with respect to direct sums, i.e., \eqref{free.60} holds. Also, since $\mathcal{M}dcom$ is an algebraic set in $\mathcal{M}d$, it is clear that $V_x \subseteq \mathcal{M}dcom$ whenever $x \in \mathcal{M}dcom$, so that \eqref{free.70} holds. Therefore, $(D,E)$ is a Pick pair.
Now fix $f\in \mathcal{M}athcal{H}inf_p$. As $f$ is analytic on $G_p$ and $\sigma(x) \subseteq G_p$ whenever $x\in E$
by Lemma~\mathbb{R}ef{lem914},
we may define a function $g:E \mathbb{T}o \mathcal{M}one$ by the formula
\mathcal{B}egin{equation}
\lambdaabel{eq914}
\mathcal{N}otag
g(x)=f(x),\qquad x \in E.
\end{equation}
As $g$ is Pick data on $E$, and
\[
\sup_{x\in E}\mathcal{N}orm {g(x)} = \sup_{x\in \mathcal{F}_p} \mathcal{N}orm{f(x)} =\mathcal{N}orm{f}_p,
\]
it follows from Theorem \mathbb{R}ef{free.thm.20} that there exists $F$ with the desired properties.
To prove the converse, note that since $F$ is locally approximable by a sequence in $\mathbb{P}_d$, we get that
$f$ is locally approximable by a sequence in $\mathbb{P}_dcom$, and hence is holomorphic.
The inequality $\mathcal{N}orm{f}_p \lambdae \sup_{x\in B_\mathcal{M}athbb{D}elta} \mathcal{N}orm{F(x)}$ is automatic.
\end{proof}
\mathcal{B}egin{equation}gin{lem}
\lambdaabel{lem914}
Let $x \in B_\mathcal{M}athbb{D}elta \mathbb{C}ap \mathcal{M}dcom$, and let $p=[\mathcal{M}athbb{D}elta]$.
Then $\sigma(x) \subseteq G_p$.
\end{lem}
\mathcal{B}egin{equation}gin{proof}
Let $x \in \mathcal{M}n^d$.
Suppose $\lambda \in \sigma(x)$. Let $\xi$ be a unit vector in $\mathbb{C}^J$ such that
$\| p (\lambda) \xi \| = \| p( \lambda ) \|$.
Choose an orthonormal basis in $\mathbb{C}^n$ with respect to which $x$ is upper triangular.
Then for some $1 \lambdaeq j \lambdaeq n$, we have $\lambda = (\lambda_1, \mathcal{M}athbb{D}ots, \lambda_d)$ forms the $j^{\mathbb{R}m th}$ diagonal
entry of $x = (x_1, \mathcal{M}athbb{D}ots, x_d)$ (see \mathbb{C}ite{cur88} or \mathbb{C}ite{coh14}). Write $v$ for this particular $e_j$.
So for any polynomial $q$ we have $q(x) v = q(\lambda) v + w$, where $w \perp v$.
So \[
p(x) (\xi \mathcal{M}athcal{O}times v) = p(\lambda) (\xi ) \mathcal{M}athcal{O}times v + \eta,
\] where $\eta \perp \mathbb{C}^J \mathcal{M}athcal{O}times v$.
Therefore $\| p(x) \| \geq \| p(\lambda) \|$, so $\lambda \in G_p$.
\end{proof}
\subsection{$p$ $V$ Norms}
\lambdaabel{ssec95}
It is easy to adapt the results of the previous subsection to the problem of extending holomorphic functions defined on varieties $V$ in $G_p$. The key is to replace the set $\mathcal{F}_p$ with the set $\mathcal{F}_{p,V}$, consisting of all $x\in \mathcal{F}_p$ with spectrum in $V$ and such that $F(x)$ depends only on the values of $F$ on $V$ whenever $F$ is holomorphic on a neighborhood of $V$.
The set $\mathcal{F}_{p,V}$ can be defined concretely as follows. For $\lambdaambda \in G_p$ define $X_\lambdaambda$ by
\[
X_\lambdaambda = \set{x\in \mathcal{M}dcom}{\sigma(x)=\{\lambdaambda\}}.
\]
For $V\subseteq G_p$ define $X_V$ to consist of the set of all finite direct sums
\[
x=\mathcal{B}igoplus_{i=1}^n x_{i},
\]
where $x_i \in X_{\lambdaambda_i}$ for each $i$, and $\lambdaambda_1,\lambdadots,\lambdaambda_n$ are points in $V$. Finally, let $Y_V$ be defined by
\[
Y_V = \set{y\in \mathcal{M}dcom}{\exists_{x\in X_V}\ \ y \mathbb{T}ext{ is similar to } x}.
\]
Evidently, $Y_V$ is the collection of all pairwise commuting $d$-tuples of matrices with spectrum in $V$.
(This can be seen by decomposing the space on which the $d$-tuple acts into generalized eigenspaces.)
We define a functional calculus for elements of $Y_V$ as follows. If $f$ is holomorphic on a neighborhood of $\lambdaambda$ and $x\in X_\lambdaambda$, then $f(x)$ can be defined by plugging $x$ into the power series expansion of $f$ at $\lambdaambda$ (which will result in a finite sum). More generally, if $f$ is holomorphic on $V$, and
\[
y = S^{-1}\ \mathcal{B}igoplus_{i=1}^n x_{i}\ S\ \in Y_V,
\]
where $x_i \in X_{\lambdaambda_i}$ for each $i$,
we define $f(y)$ by
\mathcal{B}egin{equation}
\lambdaabel{eq916}
f(y)=S^{-1}\ \mathcal{B}igoplus_{i=1}^n f(x_{i})\ S
\end{equation}
We shall say that $y \in Y_V$ is {\em subordinate} to $V$ if, whenever
$f$ is holomorphic on a on a neighborhood $U$ of $\sigma(Y)$ in $\mathbb{C}^d$, and
vanishes
on $U \mathbb{C}ap V$,
then $f(y) = 0$.
The promised set $\mathcal{F}_{p,V}$ is defined by
\[
\mathcal{F}_{p,V}=\set{y\in Y_V}{y \mathbb{T}ext{ is subordinate to } V}
\mathbb{C}ap \mathcal{F}_p .
\]
As before, for $f\in \mathcal{M}athcal{H}ol(V)$, define $\mathcal{N}orm{f}_{p,V}$ by
\[
\mathcal{N}orm{f}_{p,V} = \sup_{y\in \mathcal{F}_{p,V}} \mathcal{N}orm{f(y)}.
\]
and let $\mathcal{M}athcal{H}inf_{p,V}$ denote the Banach algebra
\[
\mathcal{M}athcal{H}inf_{p,V} =\set{f\in \mathcal{M}athcal{H}ol(V)}{\mathcal{N}orm{f}_{p,v}<\infty}.
\]
\mathcal{B}egin{equation}gin{exam}
Let $d =2$, let
\[
p(\lambda) \ = \
\mathcal{B}egin{equation}gin{bmatrix}
\lambda_1 & 0 \\
0 & \lambda_2
\end{bmatrix},
\]
so $G_p = \mathcal{M}athbb{D}^2$, and let
\[
V \ = \ \{ \lambda \in \mathcal{M}athbb{D}^2 : \lambda_1^2 = \lambda_2^2 \} .
\]
Then $Y_V$ is the set of pairs $y = (y_1,y_2)$
of commuting matrices whose spectrum is in $\mathcal{M}athbb{D}^2$,
and $\mathcal{F}_{p,V}$ is the subset for which both $y_1$ and $y_2$ have norm less than $1$ and in addition satisfy $y_1^2 = y_2^2$.
This is slightly larger than just the diagonalizable pairs of strict contractions; for example it contains the pair
\[
\mathcal{B}egin{equation}gin{bmatrix}
c & d & 0 & 0 \\
0 & c & 0 & 0 \\
0 & 0 & c & d \\
0 & 0 & 0 & c
\end{bmatrix}
\mathbb{T}ext{ and }
\mathcal{B}egin{equation}gin{bmatrix}
-c & -d & 0 & 0\\
0 & -c & 0 & 0 \\
0 & 0 & c & d \\
0 & 0 & 0 & c
\end{bmatrix}
\]
whenever $c$ and $d$ are small enough to give contractions.
But $\mathcal{F}_{p,V}$ does not contain the pair
\[
\mathcal{B}egin{equation}gin{bmatrix}
c & d \\
0 & c
\end{bmatrix}
\mathbb{T}ext{ and }
\mathcal{B}egin{equation}gin{bmatrix}
-c & d \\
0 & -c
\end{bmatrix}
\]
whenever $c$ and $d$ are non-zero, even though this pair is in $Y_V$.
\end{exam}
A very similar proof to the one for Theorem \mathbb{R}ef{free.thm.40} yields the following.
\mathcal{B}egin{equation}gin{thm}\lambdaabel{free.thm.50}
Assume that $\mathcal{M}athbb{D}elta$ is an $I\mathbb{T}imes J$ matrix of free polynomials and let $p=[\mathcal{M}athbb{D}elta]$. Let $A$ be an algebraic set\mathcal{F}ootnote{i.e. $A$ is the common set of 0's of a collection of polynomials in $\mathbb{P}_dcom$}
in $\mathbb{C}^d$ and let $V=A \mathbb{C}ap G_p$.
If $f \in \mathcal{M}athcal{H}inf_{p,V}$, then there exists a free holomorphic function $F$ on $B_\mathcal{M}athbb{D}elta$ such that
\[
\mathcal{F}orall_{\lambdaambda \in V}\ \ F([\lambdaambda_1],\lambdadots,[\lambdaambda_d]) = [f(\lambdaambda_1,\lambdadots,\lambdaambda_d)]\ \qquad \mathbb{T}ext{ and }\qquad \sup_{x\in B_\mathcal{M}athbb{D}elta} \mathcal{N}orm{F(x)}=\mathcal{N}orm{f}_{p,V}.
\]
Conversely, if $F$ is a bounded free holomorphic function on $B_\mathcal{M}athbb{D}elta$ and $f \in \mathcal{M}athcal{H}ol(V)$ is defined by
\[
[f(\lambdaambda_1,\lambdadots,\lambdaambda_d)]=F([\lambdaambda_1],\lambdadots,[\lambdaambda_d]),\qquad \lambdaambda \in V,
\]
then $f\in \mathcal{M}athcal{H}inf_{p,V}$ and
\[
\mathcal{N}orm{f}_p \lambdae \sup_{x\in B_\mathcal{M}athbb{D}elta} \mathcal{N}orm{F(x)}.
\]
\end{thm}
\mathcal{B}egin{equation}gin{proof}
Let $(D,E) = (B_\mathcal{M}athbb{D}elta, \mathcal{F}_{p,V})$. Then \eqref{free.60} holds, and since
$A$ is an algebraic set, so does \eqref{free.70}. Therefore, $(D,E)$ is a Pick pair.
Now fix $f\in \mathcal{M}athcal{H}inf_{p,V}$ and $y \in E$.
We may define a function $g:E \mathbb{T}o \mathcal{M}one$ by formula \eqref{eq916}.
Notice that this is well-defined. If at some $x_i$ with spectrum $\{ \lambda \} $ we choose two
different holomorphic functions $g,h$ which agree with $f$ on a neighborhood of $x_i$ in
$V$, then $g(x_i) = h(x_i)$ since $x_i$ is subordinate to $V$.
As $g$ is Pick data on $E$, and
\[
\sup_{y\in E}\mathcal{N}orm {g(x)} = \sup_{y\in \mathcal{F}_{p,V}} \mathcal{N}orm{f(x)} =\mathcal{N}orm{f}_{p,V},
\]
it follows from Theorem \mathbb{R}ef{free.thm.20} that there exists $F$ with the desired properties.
\end{proof}
\subsection{A Sharp Commutative Cartan Extension Theorem}
We can now use the non-commutative result Theorem \mathbb{R}ef{free.thm.50} to
prove a commutative extension theorem with sharp bounds for algebraic sets in operhedrons.
\mathcal{B}egin{equation}gin{thm}\lambdaabel{free.thm.60}
Let $p\in \mathbb{P}_dcomij$ and assume that $V$ is an
algebraic set
in $G_p$.
If $f\in \mathcal{M}athcal{H}inf_{p,V}$, then there exists an extension $F\in \mathcal{M}athcal{H}inf_p$ such that
\mathcal{B}egin{equation}
\lambdaabel{eq920}
F|V =f\qquad \mathbb{T}ext{}\qquad \mathcal{N}orm{F}_p = \mathcal{N}orm{f}_{p,V}.
\end{equation}
Moreover, the norm estimate in \eqref{eq920} cannot be improved.
If $F\in \mathcal{M}athcal{H}inf_p$, then $F|V\in \mathcal{M}athcal{H}inf_{p,V}$ and $\mathcal{N}orm{F|V}_{p,V}\lambdae \mathcal{N}orm{F}_p$.
\end{thm}
\mathcal{B}egin{equation}gin{proof}
Let $f\in \mathcal{M}athcal{H}inf_{p,V}$. Choose $\mathcal{M}athbb{D}elta$ in $\mathbb{P}_dij$ so that
$p = [ \mathcal{M}athbb{D}elta]$. By Theorem~\mathbb{R}ef{free.thm.50} there exists a free holomorphic function
$\Phi$ on $B_\mathcal{M}athbb{D}elta$ with norm equal to $\mathcal{N}orm{f}_{p,V}$ that extends $f$.
Define $F = \Phi |_{G_p}$. Then
\mathcal{B}egin{equation}
\| F \|_p \ \lambdaeq \ \| \Phi \|_{B_\mathcal{M}athbb{D}elta} \ = \ \mathcal{N}orm{f}_{p,V} .
\lambdaabel{eq921}
\end{equation}
But if $F|V = f$, we must have
\[
\| F \|_p \ \geq \ \mathcal{N}orm{f}_{p,V} ,
\]
since we are taking the supremum over a larger set.
Therefore we must have equality in \eqref{eq921}.
\end{proof}
\mathcal{B}ibliography{references}
\end{document}
|
\begin{document}
\title{Lagrangian Circle actions.}
\author{Cl\'ement Hyvrier}
\address{
C\'egep Saint-Laurent\\
D\'epartement de Math\'ematiques\\
625 avenue Sainte-Croix\\
Montreal, QC, H4L 3X7\\
Canada}
\email{
[email protected]
}
\maketitle
\begin{abstract} We consider paths of Hamiltonian diffeomorphism preserving a given compact monotone Lagrangian in a symplectic manifold that extend to an $S^1$--Hamiltonian action. We compute the leading term of the associated Lagrangian Seidel element. We show that such paths minimize the Lagrangian Hofer length. Finally we apply these computations to Lagrangian uniruledness and to give a nice presentation of the Quantum cohomology of real lagrangians in Fano symplectic toric manifolds.
\end{abstract}
\section{Introduction} Let $(M^{2n},\om)$ denote a symplectic manifold and let $L$ be a compact connected Lagrangian in $M$. Here, we will consider exact Lagrangian loops of $L$.
Consider the set of Hamiltonian isotopies starting at the identity and with ending point a Hamiltonian diffeomorphism preserving $L$:
$$\P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om):=\{\gamma:[0,1]\stackrel{C^{\infty}}{\lra} \mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)|\gamma_0=id,\quad \gamma_1(L)=L\}.$$
These are the paths generating exact Lagrangian loops of $L$ (see M.Akveld and D.Salamon \cite{AkveldSalamon}).
Similarly to loops of Hamiltonian diffeomorphisms, such paths define automorphisms of the Lagrangian quantum homology of $L$ when defined (see Hu-Lalonde-Leclerq \cite{HuLalondeLeclercq}). Any such automorphism can be seen as multiplication by an invertible element of the lagrangian quantum homology called \emph{Lagrangian Seidel element}.
For weakly exact lagrangians it has been shown in \cite{HuLalondeLeclercq} that the Lagrangian Seidel morphism is always trivial, hence the Seidel element is simply given by the fundamental class of $L$, when defined.
In this paper we are interested in computing Lagrangian Seidel elements for those paths admitting extensions to a loop of Hamiltonian diffeomorphisms coming from $S^1$--action on $(M,\om)$. In other words for the elements in $\P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$ that are homotopic to paths which, when squared, yield an $S^1$--Hamiltonian action on the symplectic manifold.
To ensure that all the automorphisms we want to compute are well-defined we will assume that $(M,L)$ is \emph{monotone}. If $\mu_L:\pi_2(M,L)\sra \mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}$ denotes the Maslov index and if $I_{\om}:\pi_2(M,L)\sra \R$ is the $\om$--valuation then monotonicity means that
\[\begin{cases} I_{\om}=\lambda \mu_L &\text{ $\lambda>0$}; \\
N_L:=\inf_{ \pi_2(M,L)}\{\mu_L(A)|A\neq 0\}\geq 2 &.\end{cases} \]
In this framework we will compute the leading term of the Seidel element. We will also show that in some cases all the other terms vanish. For instance, this is the case for the (monotone) totally real lagrangians in toric manifolds. These computations can be seen as the relative counter-part of the computation done by McDuff and Tolmans for the Seidel elements of an $S^1$--Hamiltonian action on $M$ \cite{McDuffTolman}.
These calculations imply that such Hamiltonian paths cannot define null-homotopic exact Lagrangian loops. We will further show that for paths giving $S^1$--Hamiltonian actions when squared, the Lagrangian Hofer length is minimized, hence they define relative geodesics in their homotopy class with fixed endpoints. This is not that surprising considering that the obtained Hamiltonian loops define a geodesics in their homotopy class as shown by D.McDuff and J.Slimowitz \cite{McDuffSlimowitz}. We point out that such results can be useful to study Lagrangian uniruling defined by P.Biran and O.Cornea \cite{BiranCorneaUniruling}. The main class of examples for which we concretely apply the calculations mentioned above are the \emph{real Lagrangians} in Fano symplectic toric manifolds, that is a symplectic manifold $(M^{2n},\om)$ with a Hamiltonian action of $\T^n$ with some positivity assumption. These Lagrangians are the fixed points set of the unique anti-symplectic involution preserving the moment map of the torus-action. Under a monotonicity assumption, L.Haug \cite{Haug} showed that these Lagrangian submanifolds are wide with respect to $\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2$--Laurent polynomials coefficient ring. This means that the corresponding Lagrangian Quantum homology must split as a product of the $\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2$--Morse homology of $L$ with the coefficient ring. We will show that the multiplicative quantum relations of $L$ are generated by Lagrangian Seidel elements. This can be seen as a relative version of the observation made by D.McDuff and S.Tolman in \cite{McDuffTolman}. Using L.Haug's result we then provide a description of the Lagrangian Quantum homology as quotient of some polynomial ring exactly analogous to that given in the absolute case.
\subsubsection{Formulation of the main result} We need to introduce Lagrangian Quantum homology. Roughly speaking this is the homology theory obtained by deforming the Morse differential on $L$ taking into account pseudo-holomorphic disks in $M$ with boundary in $L$. More precisely, this is the homology of the \emph{pearl complex}
$$C(L; f,g_L;J;\Lambda_L):=(R\la Crit f\ra\otimes \Lambda_L,d_Q),$$
where $(f,g_L)$ is a Morse-Smale pair for $L$, $\Lambda_L:=R[q^{-1},q]$ is the ring of $R$--Laurent polynomials graded by requiring that $|q|=1$ and where the differential $d_Q$ can be written a sum of $\Lambda_L$-linear maps
$$d_Q=d_0+d_1 \otimes q^{-N_L}+d_2\otimes q^{-2N_L}+...$$
where $d_0$ stands for the Morse differential of $f$ and
\begin{equation*} d_k: R\la Crit_r(f)\ra \sra R\la Crit_{r+kN_L-1}(f) \ra
\end{equation*}
is obtained by counting \emph{pearl trajectories}, i.e. chains of gradient flow lines of $f$ and $J$-holomorphic disks in $M$ with boundary on $L$ with cumulative Maslov index $kN_L$. In the present text we will only be considering $R=\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2$ as ground coefficients.
From P.Biran and O.Cornea \cite{BiranCorneaQuantumHomology} the homology of this complex is generically well defined under the monotonicity assumption. We will denote by $QH(L;\Lambda_L)$ the corresponding Lagrangian quantum homology. For more on this, we refer to \cite{BiranCorneaQuantumHomology} and the references therein.
Lagrangian Seidel elements are invertibles $S_L(\gamma)\in QH(L;\Lambda_L)$ where $\gamma\in\P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$. Their definition involves counting pearl trajectories with pseudo-holomorphic sections in the Hamiltonian fibration, $M \hookrightarrow} \def\trans{\pitchfork} \def\la{\langle} \def\ra{\rangle} \def\LA{\left\langle} \def\RA{\right\rangle} \def\sra{\rightarrow} \def\lra{\longrightarrow} \def\lla{\longleftarrow} \def\To{\longrightarrow}\def\ddt{\left.\frac{d}{dt}\right| P_{\gamma}\stackrel{\pi}{\sra} D^2$, associated with $\gamma$, which boundary lies on the Lagrangian $\pi^{-1}(\partial D^2)$ (see Section 3). Here, we will discuss, to some extent, what happens when $\gamma$ is a path of Hamiltonian diffeomorphisms of $L$ admitting an $S^1$--Hamiltonian action extension:
\begin{defn}\label{definitionextension} We say that $\gamma\in\P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$ extends to an $S^1$--Hamiltonian action if it is homotopic relative endpoints to a path $\gamma':[0,1]\sra \mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$ such that the concatenation of $\gamma'$ with itself, $(\gamma')^2$, defines an $S^1$--Hamiltonian action. We denote by $\P$ the set of such paths.
\end{defn}
In particular, if $H_t:M\sra \R$ denotes the family of (normalized) Hamiltonian functions generating $\gamma'$, the action of $(\gamma')^2$ is generated by a smooth time-independant (normalized) function $K:M\sra\R$. Thus the normalized Hamiltonian generating $\gamma'$ is also time-independant and one has $K=2H$. In the remaining of the article, unless otherwise mentioned, we will assume that $\gamma$ is $\gamma'$, i.e. already extends to an $S^1$--action. Let $F_{max}$ denote the maximal fixed point set component of the $S^1$--action associated to $(\gamma')^2$. Throughout the paper we will restrict our attention to the following case:
\emph{\begin{itemize}\label{hypotheseaction}
\item[(A1):] $F_{max}$ is \emph{semifree}, i.e. the action is semifree in a neighbourhood of $F_{max}$.
\item[(A2):] $L$ intersects $F_{max}$ cleanly, and the intersection $F^L_{max}:=L\cap F_{max}$ is a Lagrangian submanifold of $F_{max}$. \end{itemize}}
\begin{rem}
\begin{itemize}
\item[1)] Note that (A2) implies $\dim(F^L_{max})=1/2\dim(F_{max})$.
\item[2)] If the gradient flow of $K$ is contained in $L$, then \emph{A2} holds. Indeed, let $J$ be an $S^1$--invariant almost complex structure on $M$ compatible with $\om$. At any $x\in F_{max}$ we have the splitting:
$$T_xM=T_xF_{max}\oplus N_x\cong \ker (1-d\gamma(x))\oplus N_x$$
where $N_x$ is the symplectic complement of $T_xF_{max}=\ker (1-d\gamma(x))$. Since $J$ is $S^1$--invariant, $J$ is also split and so is the Hermitian metric induced $g_J:=\om(\cdot,J\cdot)$. By the assumption, we have that $T_xL$ is compatible with that splitting meaning that:
$$T_xL=T_x F^L_{max}\oplus_{g_J} \{v\in T_xL| v\in N_x\}.$$
Since $L$ is Lagrangian, both terms in the above summand are maximally isotropic subspaces of $T_xF_{max}$ and $N_x$ respectively, and so the claim follows.
\end{itemize}
\end{rem}
For a fixed point $x$, let $w(x)$ the sum of the weights at $x$. We recall that for an $S^1$--invariant $\om$--compatible almost complex structure on $M$, the action of $S^1$ on $T_xM\cong \C^n$ is conjugate to a product of circle actions $z\mapsto e^{2\pi k_j t}z$, $t\in S^1$. Then the sum of the weights $w(x)=\sum k_j$ defines a locally constant function and as such only depends on the connected component of the fixed point set in which $x$ lies.
We will denote by $w_{max}$ the sum of the weights for points in the fixed point set $F_{max}$.
The main result of this paper is the following:
\begin{theorem}\label{calculelementdeSeidel} Let $L$ be a monotone compact Lagrangian submanifold of $(M,\om)$. Let $\gamma\in\P$ satisfying the assumptions (A1) and (A2). The corresponding Lagrangian Seidel element is given by
\begin{eqnarray*}S_L(\gamma)=[F^L_{max}]\otimes q^{-w_{max}}+\sum_{\{B\in\pi_2(M,L)|\mu_L(B)>0\}}a_B\otimes q^{-w_{max}-\mu_L(B)}
\end{eqnarray*}
where $\deg(a_B)=\dim(F^L_{max})+\mu_L(B)$.
In particular, if $\text{codim}} \def\T{\mathbb{T}(F_{max})=2$ then all lower order terms $a_B$ vanish. \end{theorem}
As an example let us mention the case of half of a Hamiltonian loop fixing a given divisor $D$ (a facet of the moment polytope) in a Fano symplectic toric manifold. The endpoints of such path fix the real Lagrangian in this manifold (e.g consider a meridian $S^1$ in $S^2$ and the action of rotating around the poles) and assumptions $(A1)$ and $(A2)$ are verified. Hence, if the real Lagrangian is monotone, one concludes that the corresponding Lagrangian Seidel element is given by $[D\cap L]\otimes q$. It is worth noticing that lower order terms may appear in situations that are reminiscent of those exposed in \cite[Theorem 1.10]{McDuffTolman} (see Example \ref{Theexample} in Section 5). In fact the more general results obtained in \cite{McDuffTolman} cannot go through with the techniques used here, as regularity of symmetric almost complex structures generally fails. \\
Theorem \ref{calculelementdeSeidel} can be applied to deduce some results about uniruledness of Lagrangian submanifolds. As defined by O.Cornea and P.Biran in \cite{BiranCorneaUniruling}, a monotone Lagrangian manifold $L$ in $M$ is said to be \emph{1--uniruled}, or \emph{uniruled}, if there exists a second category Baire subset of families of almost complex structures of $M$ with the property that: for each such almost complex structure there is a non-constant pseudo-holomorphic disk in $M$ with boundary on $L$ passing through any generic point of $L$.
\begin{theorem}\label{Lagrangianuniruledness} Let $L\subset M$ be a closed monotone Lagrangian and suppose there is $\gamma\in \P$ such that the corresponding $S^1$--Hamiltonian action verifies hypothesis (A1) and (A2), then $L$ is uniruled.
\end{theorem}
Theorem \ref{calculelementdeSeidel} also implies that exact Lagrangian loops respecting the assumptions of the Theorem cannot be null-homotopic since the higher order term is not $[L]$. We further show the following:
\begin{theorem}\label{lengthminimizing} Let $L\subset M$ be a closed monotone Lagrangian. Suppose there exists $\gamma\in \P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$ such that $\gamma^2$ defines a semi-free $S^1$--Hamiltonian action on $M$. Then $\gamma$ minimizes the Hofer length in its homotopy class with fixed endpoints.
\end{theorem}
The paper is organized as follows. In Section 2 we recall the definition of Hamiltonian fibrations associated to loops or paths of Hamitlonian diffeomorphisms. In Section 3 we introduce Lagrangian Seidel elements. Section 4 is devoted to the proof of Theorems \ref{calculelementdeSeidel} and \ref{lengthminimizing}. In Section 5 we apply our results to Lagrangian uniruledness (we give the proof of Theorem \ref{Lagrangianuniruledness}) and we show that the multiplicative relations for the Lagrangian Quantum Homology of real Lagrangians in Fano symplectic toric manifolds are generated by Lagrangian Seidel elements (more precisely we prove Proposition \ref{Quantumpresentation}).\\
\noindent\textbf{Aknowledgement.} I would like to thank Fran\c{c}ois Charette, Octav Cornea, Tobias Ekholm and Yasha Savelyev for usefull discussions. I would also like to thank Fran\c{c}ois Charette for his suggestions and comments on an earlier version of this note, that helped improve the presentation of the paper; in particular for explaining to me how to simplify the assumptions in Theorem \ref{Lagrangianuniruledness}.
\section{Hamiltonian fibrations} A Hamiltonian fibration $\pi:P\sra B$ with fiber $(M^{2n},\om)$, and compact symplectic base $(B,\om_B)$, is a symplectic fibrations which structure group reduces to $\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$. Such fibrations are naturally equipped with a family $\{\om_b\}_{b\in B}$ of symplectic forms in the fibers $\pi^{-1}(b)$ induced by $\om$. It was shown by Guillemin, Lerman, and Sternberg \cite{GLS}, and by McDuff and Salamon in full generality, that Hamiltonian fibrations are symplectically trivial over the 1-skeleton of $B$ and that they admit an Erhesman connection on $TP$ which holonomy around any loop is Hamiltonian. This latter condition can be formally expressed as follows: there exists a closed 2--form $\tau\in \Omega^2(P)$ extending $\omega$. The corresponding horizontal distribution, i.e. a direct complement in $TP$ to the \emph{vertical subbundle} $Vert:=\ker d\pi$, is given by
$$Hor_{\tau}(p):=\{w\in T_pP |\tau(w,v)=0\quad\forall v\in Vert_p=\ker d\pi(p)\}.$$
Different connection forms $\tau$ as above may determine the same horizontal distribution. However, a unique choice can be made by requiring that $\pi_*\tau^{n+1}=0$, where $\pi_*$ denotes integration over the fibers. When this latter normalization condition is satisfied we say that $\tau$ is a \emph{coupling form}.
Note that such Hamiltonian fibrations admit symplectic structures:
$$\Omega_{c}:= \tau+c\pi^* \om_B$$
where $c$ is a large enough strictly positive real number.
Recall that an $\om$--tame almost complex structure $J$ on a symplectic manifold $(M,\om)$ is a smooth endomorphism of $TM$ such that $$\forall p\in M,\quad (J(p))^2=-id_{T_pM}\quad \text{and}\quad \omega(\cdot,J \cdot)>0.$$
Let $\mathcal{J}(M,\omega)$ denote the set of $\om$--tame almost complex structures, which is contractible and non-empty \cite[Chapter 2]{MS2}. The symplectic manifold $(P,\Omega_{c})$ admits $\Omega_{c}$--tame almost complex complex structures $J_P$ that are \emph{compatible with $\pi$ and $\tau$ or fibered} in the following sense:
\begin{itemize}
\item $d\pi \circ J_P=J_B\circ d\pi$, where $J_B\in\mathcal{J}(B,\omega_B)$
\item $J_b:=\left.J_P\right|_{\pi^{-1}(b)}\in\mathcal{J}(\pi^{-1}(b),\omega_{b})$ for all $b\in B$,
\item $J_P$ preserves the horizontal distribution induced by $\tau$.
\end{itemize}
Let $\mathcal{J}(P,\Omega_{c},\tau,\pi)$ denote the set of such almost complex structures. In fact, for fixed $\tau$, any family $J=\{J_b\}_{b\in B}$ of $\omega_{b}$-tame almost complex structures and any given $J_B$ give rise to a unique fibered $J_P\in \mathcal{J}(P,\Omega_{c},\tau,\pi)$ for some $c\in\R$.
Hamiltonian fibrations as above are given two canonical cohomology classes. The first one is the \emph{vertical first Chern class} induced by any family of almost complex structures $\{J_b\}_{b\in B}$ and defined by
$$c_v:=c_1(Vert)\in H^2(P,\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}).$$
The second one is the deRham cohomology class of the coupling form $[\tau]\in H^2(P,\R)$: this is the unique class such that
$$\iota} \def\eps{\varepsilon} \def\om{\omegata^*[\tau]=[\om]\quad\text{and}\quad [\tau]^{n+1}=0.$$
In what follows we shall only consider Hamiltonian fibrations over $D^2$ and $S^2$.
\subsection{Hamiltonian fibrations associated to a loop of Hamiltonian diffeomorphisms} Let $\gamma_t\in \L \mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$. Such a loop defines a Hamiltonian fibration over $S^2$ via the \emph{clutching construction}. Namely, let $D^+$ and $D^-$ denote the unit discs in $\C$, but with opposite orientations. Then set
$$P_{\gamma}:=\left.D^{+}\times M\sqcup D^{-}\times M\right/ (e^{i2\pi t},x)\sim (e^{i2\pi t},\gamma_t(x)),\,\,t\in [0,1].$$
This is obviously a Hamiltonian fibration over $(S^2,\om_{FS})$ where $\om_{FS}$ denotes the Fubini-Study form on $S^2$ with total area two. Its isomorphism class only depends on the homotopy class of the
loop $\{\gamma_t\}$. In fact any Hamiltonian fibration over $S^2$ can be obtained in this way \cite{LMP}. If $\tau_{\gamma}$ denotes the corresponding coupling form, then for big enough positive constant $c$
$$\Omega_{c}:=\tau_{\gamma}+c\pi^* \om_{FS},$$
is symplectic.
When $\{\gamma_t\}$ is given by an $S^1$--action on $F$, $P_{\gamma}$ can be described in the following way. Let $p:S^3\sra S^2$ denote the Hopf fibration. The product $S^3\times M$ admits the free $S^1$ action:
$$e^{it}.((z_0,z_1),x)\mapsto((e^{-it}z_0,e^{-it}z_1), \gamma_t(x)),$$
and the quotient $S^3\times_{S^1} M$ can be identified with $P_{\gamma}$. In that setting, the coupling form is obtained by considering a connection 1--form on $S^3$. Namely, let $\alpha\in \Omega^1(S^3)$ be the standard contact structure, so that $d\alpha=p^*\om_{FS}$ where $\om_{FS}$ is the Fubini-Study form on $S^2$ normalized to have area 1. Then $\om-d(K\alpha)\in\Omega^2(S^3\times M)$ defines a closed basic form hence defines aclosed 2-form on the quotient:
$$\tau_{\gamma}=pr_*(\om-d(K\alpha))\in \Omega^2(S^3\times_{S^1} M)$$
where $pr:S^3\times M\sra S^3\times_{S^1} F$ denotes the projection and $pr_*$ denotes integration over the fibers of $pr$. This form clearly extends $\om$ on the fiber $F$, and since $K$ is normalized the integral $\pi_*\tau_{\gamma}^{n+1}$ vanishes. Thus, $\tau_{\gamma}$ is a coupling form.
In this particular framework, each fixed point of the action yields a section of $P_{\gamma}$. Namely, for $x\in Fix$, the corresponding section is $$\sigma_x:=S^3\times_{S^1}\{x\}.$$
The following will be useful later on:
\begin{lem}\cite[Lemma 2.2]{McDuffTolman}\label{McDuffTolman1} If $x$ is a fixed point of the Hamiltonian circle action of $\gamma$, then
$$c_v(\sigma_x)=w(x)\quad and \quad \tau_{\gamma}(\sigma_x)=-K(x)$$
Moreover, if $B$ is the class of a sphere formed by the $\gamma$-orbit of an arc between $x$ and $y$, then $B=\sigma_x-\sigma_y$.
\end{lem}
Consider now an $S^1$--invariant $\om$--tame almost complex structure on $M$. Note that the standard complex structure $J_0$ in $\C^2$ is $S^1$--invariant. Its restriction to $S^3$ preserves the contact structure $\ker \alpha$. Let $R$ denote the Reeb vector field associated to $\alpha$, and $X_K$ denote the Hamiltonian vector field of the $S^1$--action on $M$.
Then any vector field $v=[v_1,v_2]\in T(S^3\times_{S^1}M)$ admits a unique representative in $T(S^3\times M)$ lying in $\ker \alpha\oplus TM$ given by
$$(v_1-\alpha(v_1)R, v_2+\alpha(v_1) X_K).$$
It follows that $J_0\times J$ descends to a well-defined almost complex structure $\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{J}$ on $P_{\gamma}$, which is obviously fibered and tames $\Omega_{c}$ for $c>\max K$. Note that $\sigma_x$ is then $\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{J}$--holomorphic.
\subsection{Hamiltonian fibrations associated to $\gamma\in \P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$}
To any path $\gamma \in \P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$ one associates a Hamiltonian fibration over the 2-disc as follows. Let $\mathbb{H}$ denote the upper half plane in $\C$, and set
$$D^+_{+}:=\{z=x+iy\in \mathbb{H}| |z|\leq 1\}$$
and $D^+_{-}$ is the same half disc but with opposite orientation. In particular, the compactified upper-half plane $\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\mathbb{H}}=D^+_{+}\cup_{\varphi} D^+_{-}$ coincides with the disc of radius one $D^2$ (here,
$\varphi: D^{+}_+\sra D^+_-$ is given by $z\sra \bar{z}^{-1}$).
The Hamiltonian fibration associated to the path is then given by the \emph{half clutching construction}:
$$P_{\gamma}:=\left.D^+_+\times M\sqcup D^+_-\times M\right/ (e^{i\pi t},x)\sim (e^{i\pi t},\gamma_t(x)).$$
Again, its isomorphism class only depends on the homotopy class with fixed endpoints of $\gamma_t$. This manifold carries symplectic structures such that the subbundle defined by collecting all the copies of $L$ along the boundary of $\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\mathbb{H}}$ is a Lagrangian submanifold $N$ fibering over $S^1$. If $\gamma_t$ denote a Hamiltonian isotopy representing $\gamma\in \mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}_L(M,\om)$, then
$$N\cong \bigcup_{t\in[0,1]}\{e^{2\pi i t}\}\times \gamma_t(L)$$
This Lagrangian actually embeds in $P_{\gamma}\cong D^2\times M$ and is called exact Lagrangian loop (see \cite{AkveldSalamon}, \cite{HuLalondeLeclercq}). Considering the specific case where $\gamma\in \P$ one easily sees that $N$ is a Lagrangian.
A connection form on $P_{\gamma}$ is given by
\begin{equation}\label{explicitcouplingform}\tau_{\gamma}=\om-dH\wedge dx-dH\wedge dy-\frac{H}{\pi} dx\wedge dy,
\end{equation}
and symplectic structures are explicitly given by
$$\Omega_{c}= \tau_{\gamma}+ \frac{c}{\pi} dx\wedge dy,\quad \text{for $c>0$ big enough} .$$
One verifies that the parallel transport of $\tau_{\gamma}$ preserves the fiber bundle $N$.
Letting $L_s$ be the copy of $L$ lying in the fiber over $s\in \partial D^2$, this is the same as saying that the vector field along $N$
$$(s_0,\left.\frac{d}{ds}\right|_{s=0}\gamma_s(p)),\quad p\in L_{s_0}$$
is horizontal with respect to $\tau_{\gamma}$. Hence, $N$ is Lagrangian submanifold of $P_{\gamma}$ for the symplectic forms $\Omega_{\kappa}$.
Note also that $\tau$ vanishes on $N$. We will denote by $\mathcal{T}(\gamma)$ the set of connection 2--forms on $P_{\gamma}$ which parallel transport along the boundary preserves $N$. Equivalently these are the connection 2--forms that vanish identically on $N$ (see \cite{AkveldSalamon}, Lemma 3.1.). The set of relative cohomology classes $[\tau]\in H^2(D^2\times M, N;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D})$ associated to elements $\tau\in \mathcal{T}(\gamma)$ is a 1--dimensional affine space: for any $\tau_0,\tau_1\in \mathcal{T}(\gamma)$
\begin{equation*} [\tau_1]-[\tau_0]=c(\tau_1,\tau_0) [\frac{1}{\pi}dx\wedge dy],\quad s(\tau_1,\tau_0)\in\R.
\end{equation*}
Hence, for $c$ big enough $\tau=\tau_0+\frac{c}{\pi} dx\wedge dy$ is symplectic, and if small enough $-\tau$ is symplectic. Set
$\mathcal{T}^{\pm}(\gamma):=\{\tau\in \mathcal{T}(\gamma) | \pm\tau^{n+1}>0\}$.
Then, any value of $c$ for which $\tau$ is non-symplectic lies between the following two real numbers
\begin{equation*}\epsilon^+(\tau_0, N):=\inf \{c(\tau,\tau_0)|\tau\in\mathcal{T}^{+}(\gamma)\}
\end{equation*}
and
\begin{equation*}\epsilon^-(\tau_0, N):=\inf \{c(\tau,\tau_0)|\tau\in\mathcal{T}^-(\gamma)\}.
\end{equation*}
The width of the corresponding non-symplectic interval $\epsilon (N)$ does not depend on the reference point and is given by\begin{equation*}\epsilon (N)=\epsilon^+(\tau_0, N)-\epsilon^-(\tau_0, N) .
\end{equation*}
\subsection{Hofer length of exact Lagrangian loops} Let $\gamma_t$ denote a Hamiltonian isotopy representing $\gamma\in \P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$. As seen precedently this defines an exact Lagrangian loop $N\subset D^2\times M$. Assume $\gamma_t$ is generated by the family $H_t$ of Hamiltonians. In the case where $M$ is not compact these must be compactly supported. When $M$ is closed then we assume that $H_t$ is normalized for all $t$. The Hofer length of $N$ is defined to be
\begin{eqnarray*} \ell(N)=
\int_0^1 (\max_{x\in L_t} H_t(x)-\min_{x\in L_t} H_t(x)) dt,
\end{eqnarray*}
where $L_t=\gamma_t(L)$. Subsequently we will consider minimizing the Hofer length within the isotopy class of $\gamma$ with fixed endpoints. This is the same as minimizing $\ell(N)$ within its isotopy class of exact Lagrangian loops. In other words we will examine the quantity:
\begin{equation*} \nu(N; M,\om ):=\inf_{N'} \ell(N')
\end{equation*}
where the infimum is taken over all exact Lagrangian loops that are Hamiltonian isotopic to $N$. The following theorem is due to M. Akveld and D. Salamon:
\begin{theorem}[M. Akveld and D. Salamon, \cite{AkveldSalamon}, Theorem B] \label{theoremeBAkveldSalamon} For every exact Lagrangian loop $N$
$$\epsilon(N)\leq \nu(N).$$
\end{theorem}
\subsection{The doubling procedure}
Let $\gamma^2$ denote the loop of Hamiltonian diffeomorphisms associated to the path $\gamma\in\P$. First note that we have an obvious embedding
$$\iota} \def\eps{\varepsilon} \def\om{\omegata_2:P_{\gamma}\hookrightarrow} \def\trans{\pitchfork} \def\la{\langle} \def\ra{\rangle} \def\LA{\left\langle} \def\RA{\right\rangle} \def\sra{\rightarrow} \def\lra{\longrightarrow} \def\lla{\longleftarrow} \def\To{\longrightarrow}\def\ddt{\left.\frac{d}{dt}\right| P_{\gamma^2}.$$
Taking the pull-back of $pr_*(\om-d(K\alpha))$ under $\iota} \def\eps{\varepsilon} \def\om{\omegata_2$ actually yields $\tau_{\gamma}$ in \eqref{explicitcouplingform}.
In fact, $P_{\gamma^2}$ is made of two copies of $P_{\gamma}$ glued together along their boundary. More precisely,
$$ P_{\gamma^2}=P_{\gamma}\cup_{\varphi}P_{\gamma}$$
where
$$\varphi:\partial P_{\gamma}\sra \partial P_{\gamma},\quad (s, x)\to (-s, \gamma_1(x)).$$
This happens to be useful subsequently to induce information on $P_{\gamma}$ from $P_{\gamma^2}$. In particular, this will be handy when dealing with holomorphic sections. Concerning sections let us note that not only any section in $P_{\gamma^2}$ gives rise to a section in $P_{\gamma}$ (which may not have boundary on the Lagrangian), but also, any section $$\sigma :(D^2,S^1)\sra (P_{\gamma},N),\quad z\to (z,u(z))$$ can be doubled to give a section in $P_{\gamma^2}$ with the equator being constrained to the Lagrangian $\iota} \def\eps{\varepsilon} \def\om{\omegata_2(N)$. The new section is given by
$$\sigma_{db}:S^2\sra P_{\gamma^2},\quad z\to \begin{cases} (z,u(z)) & \text{if $z\in \overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\mathbb{H}}$}\\
(z,\gamma_1 (u(e^{-i\pi}z)))& \text{if $z\in e^{i\pi}\cdot\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\mathbb{H}}$}.
\end{cases}$$
Note that this is well-defined and continuous.
\section{Lagrangian Seidel elements}
Here we recall the definition of Seidel elements in both the absolute and relative cases.
\subsection{Section classes and vertical Maslov index}
Consider the pair $(P_{\gamma},N)$ as above. We say that $B\in \pi_2(P_{\gamma},N)$ is a \emph{section class} if and only if $\pi_*(B)=[D^2,S^1]$ is the positive generator. We say that it is a \emph{fiber class} if $B$ lies in the image of the inclusion map $$\pi_2(M,L)\sra \pi_2(P_{\gamma},N)$$ thus implying $\pi_*B=0$. As shown in \cite{HuLalondeLeclercq}, the following sequence is exact in the middle:
\begin{equation}\label{exactsequencedisc} \pi_2(M,L)\sra \pi_2(P_{\gamma},N)\sra \pi_2(D^2,S^1)
\end{equation}
\begin{defn}(\cite{HuLalondeLeclercq}) Let $u:D^2\sra P_{\gamma}$ be a smooth map representing $B\in\pi_2(P_{\gamma},N)$.The \emph{vertical Maslov Index} of $B$ is the Maslov Index of the pair $(u^*(T^vP_{\gamma}), u^*T^vN)$, where $T^vN$ denotes the vertical tangent bundle of the bundle $N$. We will denote this number by $\mu_{\gamma}^v(B)$ or $\mu_v$ for simplicity.
\end{defn}
It is a well-defined $\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}$--valued morphism of $\pi_2(P_{\gamma},N)$ which further verifies that:
$$\mu_N(B)=\mu_{\gamma}^v(B)+2\quad \text{and}\quad \mu_N(B-B')=\mu_L(B-B')$$ for two section classes $B$ and $B'$. We make the following identification on $\H_{rel}\subset H_2(P_{\gamma}, N)$ the set of section classes:
$$B\sim B'\Leftrightarrow \tau_{\gamma}(B-B')=0=\mu_{\gamma}^v(B-B').$$
This is obviously an equivalence relation. We set $\widetilde{\H_{rel}}:= \left.\H_{rel}\right/\sim$ and we will denote by $[B]$ the equivalence class of $B\in \H_{rel}$. The following follows directly from the definitions of the vertical Maslov class and of the doubling of a section:
\begin{lem}\label{relationMaslovclassandChernclass} Let $\gamma^2$ denote the loop of Hamiltonian diffeomorphisms associated to the path $\gamma\in\P$. If $\sigma$ represents the class $[\sigma]\in H_2(P_{\gamma}, N)$ then $\sigma_{db}$ represents the class $[\sigma^2]:=[\sigma\#\sigma]\in H_2(P_{\gamma^2})$ and one has
$$c_v([\sigma_{db}])=\mu_{\gamma}^v([\sigma])\quad \text{and}\quad \tau_{\gamma^2}([\sigma_{db}])=2\tau_{\gamma}([\sigma])$$
\end{lem}
\begin{proof} The second one follows by definition. For the first one:
$$2c_v([\sigma^2])=\mu_v([\sigma^2])=\mu_v([\sigma])+\mu_v([\sigma])=2\mu_v([\sigma])$$
hence the claim.
\end{proof}
\subsection{Holomorphic and anti-holomorphic sections} Let $j$ denote the standard complex structure on the disc, that is the anti-clockwise rotation by 90 degrees on the plane. Let $\{J_z\}$, $z\in D^2$, denote a smooth family of $\om$--tamed almost complex structures in $M$. Let
$$\mathbb{H}:=H\otimes dx+H \otimes dy.$$
This is a 1--form over $D^2$ with values in $C_0^{\infty}(M)$. Let $X_{\mathbb{H}}$ be the induced 1--form with values in Hamiltonian vector fields of $M$, and let $X^{0,1}_{\mathbb{H}}$ denote the corresponding $(j,J)$ anti-holomorphic part.
These data provide an almost complex structure on $P_{\gamma}$ as follows:
\[ J_P(\tau,J)(z,x):=\left(\begin{array}{cc} j(z) & 0 \\
X^{0,1}_{\mathbb{H}}(z,x) & J_z(x)\\
\end{array} \right)\]
It is easy to check that this is fibered. Furthermore, if $\tau \in \mathcal{T}^{\pm}(\gamma)$ then $J_P(\tau,\pm J)$ is $\pm\tau$--tamed. In fact $J_P(\tau, J)$ is $\Omega_c$--tamed for $c$ large enough.
We consider the following boundary value problem for smooth sections $u:D^2\sra P_{\gamma}$:
\begin{equation}\label{R-Hproblem}
J_P\circ du= du\circ j \quad \text{and} \quad u(\partial D^2)\subset N.
\end{equation}
Fix a section class $A$ and let $\M(P_{\gamma},A;\tau, J)$ denote the moduli space of $J_P(\tau,J)$--holomorphic sections representing $A$:
\[ \M(P_{\gamma},A;\tau, J):=\{u:D^2\sra P_{\gamma}|\eqref{R-Hproblem}\,\, \text{and}\,\, [u]=A\}.\]
For generic $(\tau, J)$ this is a manifold of dimension $n+\mu^v(A)$ [\cite{AkveldSalamon}, \cite{HuLalonde}]. Taking $-J$ instead of $J$, the moduli space $\M(P_{\gamma},A;\tau, -J)$ is similarly defined and is generically a manifold of dimension $n-\mu^v(A)$.
\begin{rem}\label{fixedpoint=holsection} Note that a fixed point $x\in L$ of $\gamma$ defines a section of $P_{\gamma}$: $u:D^2\sra P_{\gamma}$, $z\sra (z,x)$. This section is $J_P(\tau,\pm J)$--holomorphic. Indeed, for $z=s+it$ and for $u=(z,\tilde{u})$, the first part of equation \eqref{R-Hproblem} is equivalent to
\[\frac{\partial \tilde{u}}{\partial s}+J_z(\tilde{u})\frac{\partial\tilde{u}}{\partial t}+X_H(\tilde{u})-J_z(\tilde{u})X_H(\tilde{u})=0\]
If $x\in L$ is fixed under $\gamma$, then $\tilde(u)=x$ so that $\frac{\partial \tilde{u}}{\partial s}=\frac{\partial \tilde{u}}{\partial t}=0$. Furthermore, $X_H(\tilde{u})=0$ since $x$ is a fixed point.
\end{rem}
\subsection{The relative Seidel element} We now define the relative Seidel element associated to a path $\gamma\in \P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$. First, we recall the definition of the \emph{Lagrangien Seidel morphism} given in \cite{HuLalondeLeclercq}.
Consider a Morse-Smale pair $(F,G)$ where $F\in C^{\infty}(N)$ is a Morse function and $G$ is a metric on $N$ such that:
\begin{itemize}
\item[1)] $f_{\pm 1}:=\left.F\right|_{\pm 1}$ are Morse functions on the fibers $L_{\pm 1}$ over $\pm 1$ of $N$;
\item[2)] $Crit f_{+}\cup Crit f_-=Crit F$;
\item[3)] $\max f_-+1<\max f_+$.
\item[4)] there exists neighbourhoods $U_{\pm}\cong (-\epsilon,\epsilon)$ of $\pm 1\in S^1$ trivializing the fiber bundle $N$, with respect to which $\left. F\right|_{U_{\pm}}(t,x)=f_{\pm}(x)\mp \varphi(t)$ for any $(t,x)\in U_{\pm}\times L_{\pm 1}$ and where $\varphi$ is quadratic of index 1 at 0.
\item[5)] we also ask that $\left.G\right|_{U_{\pm}}$ is a product metric $dt^2+G_{\pm}$ and that $( f_{\pm}, G_{\pm})$ are Morse-smale pairs.
\end{itemize}
Such pairs $(F,G)$ can be chosen generically.
Fix $\tau\in\mathcal{T}(\gamma)$ and a family $J=\{J_z\}_{z\in D^2}$ of $\om$--tamed almost complex structures of $M$. Let $J_P(\tau,J)$ be the corresponding fibered almost complex structure of $P_{\gamma}$. For $[\sigma]\in \widetilde{\H_{rel}}$ and for $x_-\in Crit(f_-)$ and $x_+\in Crit(f_+)$ let
$$ \M^{pearl}(x_-,x_+, [\sigma]; \tau, J, F,G)$$
denote the set of pearl trajectories from $x_-$ to $x_+$ representing the equivalence class of section classes $[\sigma]$.
In particular elements of this moduli space have one $J_P(\tau,J)$--holomorphic section component with boundary on $N$, and possibly many $J_{\pm 1}$--holomorphic disk components with boundary on $L_{\pm 1}$.
For simplicity we will omit the auxiliary data $\tau, J,F$ and $G$ in the notations. This set is a manifold of dimension
$$ \dim \M^{pearl}(x_-,x_+, [\sigma])=|x_-|_N-|x_+|_N+\mu_N([\sigma])-1=|x_-|_L-|x_+|_L+\mu_v([\sigma]).$$
The Lagrangian Seidel morphism is defined to be:
\begin{defn}[\cite{HuLalondeLeclercq}] For $\gamma\in \P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$, the Lagrangian Seidel morphism associated to $\gamma$ is an endomorphism
\begin{eqnarray*}S_L(\gamma): R\la Crit_{\star}(f_-)\ra &\sra & R\la Crit_{\star}(f_+) \ra \\
x_- &\to & \sum_{\{[\sigma]\in \widetilde{\H_{rel}}||x_+|_L=|x_-|+\mu_v([\sigma])\}}\#_{\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2} \M^{pearl}(x_-,x_+, [\sigma])x_+q^{-\mu_v([\sigma])}
\end{eqnarray*}
\end{defn}
This only depends on the homotopy class of paths with fixed endpoints of $\gamma$.
Moreover, since $L$ is monotone with $N_L\geq2$, the Lagrangian Seidel morphism is a chain morphism (with respect to the pearl differential) and is generically well-defined with respect to the data of $J_P$, $F$ and $G$ [\cite{BiranCorneaUniruling},\cite{HuLalondeLeclercq}]. Since it is a chain morphism and since $[L_-]$, the maximum of $Crit(f_-)$, defines a pearl cycle, $S_L(\gamma)([L_-])$ is also a cycle.
\begin{defn} For $\gamma\in \P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$, the \emph{Lagrangian Seidel element} associated to $\gamma$ is the homology class \begin{eqnarray*}[S_L(\gamma)([L_-])]:=\left[\sum_{\{[\sigma]\in \widetilde{\H_{rel}}||x_+|_L=n+\mu_v([\sigma])\}}\#_{\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2} \M^{pearl}([L_-],x_+, [\sigma])x_+q^{-\mu_v([\sigma])}\right]\in QH_{n}(L_+).
\end{eqnarray*}
\end{defn}
\section{Proofs of Theorem \ref{calculelementdeSeidel} and Theorem \ref{lengthminimizing}}
\subsection{Proof of Theorem \ref{calculelementdeSeidel}}
For the proof we follow the steps given by D. McDuff and S. Tolman in \cite{McDuffTolman} where they compute the absolute Seidel element of $S^1$--Hamiltonian manifolds. Let $\sigma_x$ denote the relative section class associated to a fixed point $x\in L$. In the special case where $x\in F_{max}$ we write $\sigma_{max}$ instead. The lemma below follows from Lemma \ref{McDuffTolman1} and Lemma \ref{relationMaslovclassandChernclass}.
\begin{lem}\label{weight=maslov} Let $\gamma\in \P$ with Hamiltonian $H$. If $x\in L$ is a fixed point of the Hamiltonian circle action of $\gamma$, then
$$\mu_v(\sigma_x)=w(x)\quad and \quad \tau(\sigma_x)=-H(x)$$
\end{lem}
\begin{proof} Let $\sigma^2_x$ denote the class section in $P_{\gamma^2}$ corresponding to $\sigma_x$. Recall that, by definition, $\gamma^2$ has Hamiltonian $2H$. Thus, by Lemma \ref{McDuffTolman1} we have
$$c_v(\sigma^2_x)=w(x)\quad and \quad \tau_{\gamma^2}(\sigma^2_x)=-2H(x).$$ By Lemma \ref{relationMaslovclassandChernclass} $$c_v(\sigma^2_x)=\mu_v(\sigma_x) \quad \text{and}\quad \tau(\sigma^2_x)=2\tau_{\gamma}(\sigma_x)$$ and the two equalities follow.
\end{proof}
Let $\gamma\in\P$, $\tau\in \mathcal{T}(\gamma)$ and $J_P\in\J(P,\om,\tau,\Omega_{c})$ constructed from an $S^1$--invariant $J\in \J(M,\om)$ via the doubling procedure, that is $J_P$ is the pull-back of an $S^1$--invariant almost complex structure of $P_{\gamma^2}$ under the embedding $\iota} \def\eps{\varepsilon} \def\om{\omegata_2:P_{\gamma}\sra P_{\gamma^2}$. Fix $B\in H_2^D(M,L)$, and consider the moduli space of $J_P$--pseudo-holomorphic disks with no marked points
$$ \overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\M}(P_{\gamma},\sigma_{max}+B, J_P).$$
This moduli space consists of stable maps representing section classes $\sigma$. Concretely, those stable maps consist of only one $J_P$--holomorphic section component that we call the \emph{root}, and the other components are $J_P$--holomorphic disks contained in some fibers of $P_{\gamma}$ that we will call \emph{bubbles}. If the root represents the section class $\sigma'\in H_2(P_{\gamma},N)$ and the bubbles represent fiber classes $B_i\in H_2(M,L)$, $i\in A$, we further have that $$\sigma_{max}+B=\sigma'+\sum_{i\in A}B_i.$$
\begin{prop}\label{propositionprincipale}
If $B\neq 0$ and $\om(B)\leq0$ the moduli space $\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\M}(P_{\gamma},\sigma_{max}+B, J_P)$ is empty. Furthermore, if $B=0$ then $J_P$ is regular, and the moduli space $\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\M}(P_{\gamma},\sigma_{max}, J_P)$ is compact and can be identified with $L\cap F_{max}$.
\end{prop}
\begin{proof} We begin to show the first assertion. It is sufficient to show that for a $J_P$--holomorphic section with boundary on $N$ representing a section class $\sigma=\sigma_{max}+B$ one has:
\begin{equation}\label{eq:minimalarea}\Omega_{c}(\sigma)\geq \Omega_{c}(\sigma_{max}),
\end{equation}
with equality only if $B=0$. Indeed, suppose \eqref{eq:minimalarea} holds and assume there is a $J_P$--holomorphic section representing $\sigma_{max}+B$ with, $B\neq 0$ and $\om(B)\leq 0$. Then
$$\Omega_{c}(\sigma)=\Omega_{c}(\sigma_{max}+B)=\Omega_{c}(\sigma_{max})+\om(B)\leq \Omega_{c}(\sigma_{max}).$$
This is impossible by \eqref{eq:minimalarea} unless $\om(B)=0$. This latter condition implies that $B=0$ which contradicts $B\neq 0$. Let us now prove $\eqref{eq:minimalarea}$.
Fix a point $[z,p]\in P_{\gamma}$ and consider $w\in T_{[z,p]}P_{\gamma}$. Write $w=h+v$ where $h$ and $v$ are respectively the horizontal and vertical parts of $w$. Choose $c>0$ such that $c> H_{max}$. Then,
\begin{eqnarray} \Omega_{c}(w,J_P w) &=& (\om-dH\wedge dx- dH\wedge dy -\frac{H}{\pi} dx\wedge dy+\frac{c}{\pi}dx\wedge dy)(v+h, Jv+J_0h)\nonumber\\
&=& \om_p(v,Jv)+\frac{(c-H(p))}{\pi}dx\wedge dy(h,J_0h)\nonumber \\
&\geq& \frac{c-H_{max}}{\pi}dx\wedge dy(h,J_0h).\label{minarea}
\end{eqnarray}
where the last inequality holds since $J$ is $\om$--compatible. Since $\frac{1}{\pi}dx\wedge dy$ evaluates to one on the disc of radius one,
$$\Omega_{c}(\sigma)\geq (c- H_{max})=\Omega_{c}(\sigma_{max})$$
for a $J_P$--holomorphic section with boundary on $N$ representing a section class $\sigma=\sigma_{x}+B$, with $x$ some fixed point. Note that equality in \eqref{minarea} only occurs when the vertical part of $w$ vanishes. Hence, equality holds only when $B=0$ and $x\in F_{max}\cap L$.
Next, we show that $\M(P_{\gamma},\sigma_{max}, J_P)$ is compact and coincides with $L\cap F_{max}$. Consider a stable map representing $\sigma$. Such a stable map consists of exactly one root $\sigma'$ and possibly many bubbles representing classes $B_i$, $i\in A$, with positive $\om$--area. It follows that the only stable maps representing a class $\sigma$ such that $$\Omega_{c}(\sigma)\leq(c-H_{max})$$ are the constant sections $\sigma_x$, with $x\in F_{max}\cap L$, which proves the claim.
There remains to show that $J_P$ is regular for $\sigma_{max}$. Let $\B$ denote the set of smooth maps $u: (D^2,\partial D^2)\sra (P_{\gamma},N)$ representing the class $\sigma_{max}$. For $u\in \B$, set
$$\E_u:=C^{\infty}(\Lambda^{0,1}_{J_P}(D^2, u^*TP_{\gamma}))\quad and \quad \E:=\bigsqcup_{u\in \B}\E_u.$$
We have to show that the linearization of $$\ov{\partial}} \def\bs{\backslash_{J_P}:\B\sra\E,\quad u\mapsto du+J_P\circ du\circ j$$ is surjective at every $u\in \overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\M}(P_{\gamma},\sigma_{max}, J_P)$ (at least between suitable completions of the source and the target). Upto completion, this linearization is given by
$$L_{\ov{\partial}} \def\bs{\backslash_{J_P},u}:C^{\infty}(u^*TP_{\gamma}, u^*TN)\sra C^{\infty}( \Lambda^{0,1}(D^2,u^*TP_{\gamma})).$$
Since we only consider sections one only needs to verify that
$$L^v_{\ov{\partial}} \def\bs{\backslash_{J_P},u}:C^{\infty}(u^*Vert, u^*(TN\cap Vert))\sra C^{\infty}( \Lambda^{0,1}(D^2,u^*Vert)).$$
We show that the partial indices of the holomorphic bundle pair $(u^*TP_{\gamma}, u^*TN)$ must be at least bigger than -1. Then we conclude by applying the results of Oh in \cite{OhRiemann-Hilbert}. Let $u$ be a $J_P$--holomorphic section representing $\sigma_x$ with $x\in F_{max}$.
Since $x$ is a fixed point, $u^*Vert$ reduces to $T_xM\cong \C^n$, $u^*(TN\cap Vert)$ reduces to $T_xL\cong \R^n$ and the restriction of $u$ to $S^1$ defines a loop of lagrangian subspaces in $ \C^n$. Moreover, this loop is given by
$$\left.TL\right|_{u(e^{i2\pi t})}\equiv d\gamma(t) T_xL\subset T_xM.$$
Since the action is semi-free, and since $J_P$ comes from an $S^1$--invariant almost complex structure $J$ of $(M,\om)$, $ d\gamma(t):\C^n\sra \C^n$ takes the following diagonal expression after an appropriate change of basis of $\C^n$:
\begin{equation}
\label{eq:1}
d\gamma(t):=
\left( \begin{array}{cccc} e^{i \pi m_{1} t} & 0 & \cdots & 0 \\
0 & \ddots & \ddots &\vdots \\
\vdots & \ddots & \ddots & 0 \\
0 & \cdots & 0 & e^{i \pi m_n t}
\end{array}\right)
\end{equation}
where $m_1,...,m_n$ are the the weights of the action at $x$ and are given by $m_1=...=m_l=0$, $l=\dim F_{max}$, and $m_{l+1}=...=m_{n}=-1$. By projecting on each factor of $\C^n$ the initial Riemann-Hilbert problem splits to a direct sum of 1-dimensional Riemann-Hilbert problems of the form: $$\begin{cases}\ov{\partial}} \def\bs{\backslash \xi_j(z)=0\,\, \text{on $D^2$} &\\
\xi_j(e^{2\pi i t})\in \R\la e^{i \pi m_{j} t}\frac{\partial}{\partial x_j} \ra &
\text{} \\
\end{cases} $$
where $\xi_j$ denotes the projection of $\xi: D^2\sra \C^n$ to the $j^{th}$ factor, and where $x_j$ are the real coordinates in $\C^n$. In this situation the partial indices coincide with the weights (Maslov indices) of each summand. Here partial indices are all greater than -1, but Oh \cite{OhRiemann-Hilbert} proved that regularity holds for holomorphic discs with partial indices greater than -1 which ends the proof.
\end{proof}
To end the proof of Theorem \ref{calculelementdeSeidel},
we show the vanishing of all the other terms provided $F_{max}$ is of codimension two.
\begin{prop} If $\text{codim}} \def\T{\mathbb{T}(F_{max})=2$, then $a_B=0$ for all $B\in\pi_2(M,L)$ with $\mu_L(B) >0$.
\end{prop}
\begin{proof} This is done by a simple dimension argument. Note that for $\sigma=\sigma_{max}+B$, the moduli space $\M^{pearl}([L_-],x_+, [\sigma])$ is empty unless
$$|x_+|=n+\mu_v(\sigma_{max})+\mu_L(B).$$
From Lemma \ref{weight=maslov} and equation \eqref{eq:1} we have
$$n=\dim(F^L_{max})-w_{max}=\dim(F^L_{max})-\mu_v(\sigma_{max}).$$
This implies that
$$\mu_L(B)\leq\text{codim}} \def\T{\mathbb{T} (F^L_{max}).$$
In particular, it follows from monotonicity of $L$ that if $B$ is representable by a $J$--pseudo-holomorphic disk one must have $$2\leq\text{codim}} \def\T{\mathbb{T} (F^L_{max}).$$
We conclude that when $F_{max}$ is of codimension exactly 2, there are no contributions in the Seidel element coming from $\sigma_{max}+B$ with $\om(B)>0$.
\end{proof}
\subsection{Proof of Theorem \ref{lengthminimizing}}
The idea here is to adapt M.Akveld and D.Salamon's line of proof for length minimizing exact Lagrangian loops in $\C P^n$ (see \cite{AkveldSalamon}). We will make use of the following general result they showed:
\begin{prop}[M.Akveld, D.Salamon, \cite{AkveldSalamon}, Lemma 5.2 and 5.3]\label{propositionAkveldSalamon} Let $\gamma\in\P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$ with $\gamma\in\P$. Let $A\in H_2(P_{\gamma},N)$ be a section class. Suppose that for any $\tau\in \mathcal{T}^{\pm}(\gamma)$ there exists a family $J=\{J_z\}_{z\in D^2}$ of $\om$--tame almost complex structures in $M$ such that the moduli space $\M(P_{\gamma},A;\tau, \pm J)$ is not empty. Then,
\begin{equation*}\epsilon^+(N)\geq -\la[\tau_0],A\ra\quad\text{and}\quad \epsilon^-(N)\leq -\la[\tau_0],A\ra
\end{equation*}
for any connection 2--form $\tau_0\in\mathcal{T}(\gamma)$.
\end{prop}
We begin by observing that in Proposition \ref{propositionprincipale}, the results are independant of the choice of connection 2--form in $\mathcal{T}(\gamma)$. Note that the arguments in this proposition apply to show that $J_P$ is regular for $\sigma_{min}$, assuming the minimum fixed point set $F_{min}$ to be semi-free. Also, $\M(P_{\gamma},\sigma_{min},J_P)$ is non empty and coincides with $F_{\min}\cap L$.
With this in mind, one argues as follows. Assume $\tau\in \mathcal{T}^+(\gamma)$. Then, there is a regular $J_P$ such that $\M(P_{\gamma},\sigma_{max}, J_P)$ is non-empty. Similarly, assuming $\tau\in \mathcal{T}^-(\gamma)$, there is a regular $J_P$ such that $\M(P_{\gamma},\sigma_{min}, J_P)$ is non-empty. Let $\tau_0=\tau_{\gamma}$. Then, by Proposition \ref{propositionAkveldSalamon} one has
\begin{eqnarray*}\epsilon(N)&=& \epsilon^+(\tau_{\gamma},N)- \epsilon^-(\tau_{\gamma},N)\\
&\geq& -\la[\tau_{\gamma}],\sigma_{max}\ra+\la[\tau_{\gamma}],\sigma_{min}\ra\\
&\geq & \la[\om-H dx\wedge dy-dH\wedge dx-dH\wedge dy],\sigma_{min}-\sigma_{max} \ra\\
&\geq & -(H_{min}-H_{max})\\
&=& \ell(N)
\end{eqnarray*}
It follows from \cite[Theorem B]{AkveldSalamon} that:
$$ \ell(N)\leq\epsilon (N)\leq \nu(N).$$
hence the proof.
\section{Application to Fano toric manifolds}
\subsection{Toric manifolds: the Delzant construction} The following is taken from \cite{CoxKatz} or \cite{AnnaCannasdaSilva} or \cite{GuilleminSternberg}. Let $\la,\ra:\R^{n}\times (\R^{n})^*\sra\R$ denote the standard pairing. Symplectic toric manifolds are compact connected symplectic manifolds $(M^{2n},\om)$ together with an effective Hamiltonian action of $\mathbb{T}^n$ and a choice of corresponding moment map $\mu$. It is well-known that the image $\Delta:=\mu(M)\subset (\R^n)^*$ is a convex \emph{polytope}, meaning that this is an intersection of a collection of affine half planes in $(\R^{n})^*$. Such half planes are determined by vectors $\{v_i\}_{i\in 1,...,d}$ in $\R^n$ and real numbers $\{a_i\}_{i\in 1,...,d}$. Explicitly, the polytope is given by:
\begin{eqnarray*} \Delta:=\{f\in (\R^{n})^*| \la f,v_i\ra\geq a_i, i\in 1,...,d\}.
\end{eqnarray*}
The $v_i$'s represent inward-pointing normal vectors to the facets of the polytope, and the faces of $\Delta$ are in bijection with the sets
$$F_I:=\{f\in (\R^n)^*|\la f,v_i\ra=a_i, i\in I\},\quad I\subset[1,n],\quad F_I\neq\emptyset$$
Symplectic toric manifolds are in 1-1 correspondence with \emph{Delzant polytopes}, i.e. polytopes verifying:
\begin{itemize}
\item[1)] each vertex has $n$ edges.
\item[2)] the edges at any vertex $p$ are rational in the sense that they are given by some $p+tf_i$ with $t\in [0,1]$ and $f_i\in \mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}^n$, $i=1,...,n$.
\item[3)] at each vertex the corresponding vectors $f_1,...,f_n$ can be chosen to be a $\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}$-basis of $\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}^n$.
\end{itemize}
The symplectic toric manifold $M$ with moment polytope $\Delta$ can be realized as a symplectic reduction of a Hamiltonian torus action of $\mathbb{T}^{d-n}$ on $(\C^{d},\om_{st})$. The construction is as follows. Let $\{e_i\}_{i=1,...,d}$ denote the standard basis of $\R^d$. It is easy to see that the map $\pi:\R^d\sra\R^n,\quad e_i\mapsto v_i$ descends to a surjective Lie group morphism:
$$\pi:\mathbb{T}^d\sra \mathbb{T}^n.$$
Let $N:=\ker\pi$. If $\iota} \def\eps{\varepsilon} \def\om{\omegata:N\sra \mathbb{T}^d$ denotes the inclusion, then the composition of $\iota} \def\eps{\varepsilon} \def\om{\omegata$ with the standard Hamiltonian action of $\mathbb{T}^d$ on $\C^d$
$$(e^{i\theta_1},...,e^{i\theta_d}).(z_1,...,z_d)=(e^{-2\pi i\theta_1}z_1,...,e^{-2\pi i\theta_d}z_d)$$ gives a Hamiltonian action of $\mathbb{T}^{d-n}$ on $\C^{d}$. Let $\{w_1,...,w_{d-n}\}\in \ker \pi$ be a basis where $w_i=\sum_{j=1}^d w_i^j e_j$.
Then,
$$\exp(w_i).(z_1,...,z_d)=(e^{-2\pi i w_i^1}z_1,...,e^{-2\pi iw_i^d}z_d)$$
Furthermore, considering the following exact sequence of dualized Lie algebras:
$$0\sra (\R^n)^*\stackrel{\pi^*}{\sra}(\R^d)^*\stackrel{\iota} \def\eps{\varepsilon} \def\om{\omegata^*}{\sra}(Lie(\ker\pi))^* \sra0,$$
and setting for $j=1,...,d$
$$\rho_j:=\iota} \def\eps{\varepsilon} \def\om{\omegata^* e^*_j$$
then the action becomes:
$$\exp(w).(z_1,...,z_d)=(e^{-2\pi i \la\rho_1,w\ra}z_1,...,e^{-2\pi i\la\rho_d,w\ra}z_d).$$
The moment of this action is then given by the composition $\iota} \def\eps{\varepsilon} \def\om{\omegata^*\circ\mu_{st}$, where $$\mu_{st}(z_1,...,z_d)=(\pi|z_1|^2,..., \pi |z_d|^2)+(a_1,...,a_d).$$
Explicitly one gets:
\begin{eqnarray*}\iota} \def\eps{\varepsilon} \def\om{\omegata^*\circ\mu_{st}(z_1,...,z_d)&=& \iota} \def\eps{\varepsilon} \def\om{\omegata^*(\sum_{i=1}^d(\pi|z_i|^2+a_i)e_i^*)\\
&=& \sum_{i=1}^d(\pi|z_i|^2+a_i)\rho_i\\
&=& \sum_{i=1}^d \sum_{m=1}^{d-n}(\pi|z_i|^2+a_i)w_m^i w_m^*
\end{eqnarray*}
Then $0$ is a regular value for $\iota} \def\eps{\varepsilon} \def\om{\omegata^*\circ\mu_{st}$. Moreover, $\ker\pi$ acts freely on the compact submanifold $Z:=(\iota} \def\eps{\varepsilon} \def\om{\omegata^*\circ\mu_{st})^{-1}(0)$. It follows that $$M=\left.(\iota} \def\eps{\varepsilon} \def\om{\omegata^*\circ\mu_{st})^{-1}(0)\right/N$$
is a compact manifold. Let $\iota} \def\eps{\varepsilon} \def\om{\omegata_Z:Z\sra \C^d$ denote the inclusion map and $p_M:Z\sra M$ denote the quotient map. Then, by the Marden-Weinstein theorem, $M$ is equipped with a canonical symplectic structure $\om$ such that:
$$p_M^*\om= \iota} \def\eps{\varepsilon} \def\om{\omegata_Z^*\om_0.$$
With respect to $\om$ the action of the $n$--torus $\T^n=\left.\T^d\right/N$, which leaves $Z$ invariant, is Hamiltonian. The corresponding moment map $\mu$ is defined by
$$\mu_{st}\circ\iota} \def\eps{\varepsilon} \def\om{\omegata_Z=\pi^*\circ (\mu\circ p_M)$$
and has image $\Delta$. \\
\subsection{Alternative construction of the toric manifold} Here we describe an alternative construction of the toric manifold $M$ as a complex manifold.
Extend the map previously seen $\pi:\R^d\sra\R^n$ to a mapping $$\pi_{\C}:\C^d\sra \C^n.$$
Note that $\pi_{\C}$ sends the standard lattice $\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}^d$ to the set of primitive integral generators of the facets of $\Delta$.
Hence it induces a map between complex tori $\pi_{\C}:\T^d_{\C}\sra \T^n_{\C}$. Let $N_{\C}$ denote the kernel of $\pi_{\C}$ so that we have an exact sequence of complex groups:
$$0\sra N_{\C}\sra\T^d_{\C}\sra \T^n_{\C}\sra 0. $$
Consider now the linear diagonal action $\kappa$ of $\T_{\C}^d $ on $\C^d$ given by
$$\kappa(\exp w).(z_1,...,z_d)=(\exp(w_1)z_1,...,\exp(w_d)z_d).$$
For any subset $I=\{i_1,...,i_k\}\subset \{1,...,d\}$ set
$$\C^d_I:= \{z\in\C^d| z_i=0\quad iff\quad i\in I\}.$$
Note that this set is a $\T^d_{C}$--orbit and every $\T^d_{C}$--orbit is actually of this type. Now consider
the following subspace of $\C^d$:
$$\C^d_{\Delta}:= \bigcup_{\{I|F_I\,\textrm{is a face of}\, \Delta\}}\C^d_I.$$
This open subset of $\C^d$ is in fact the biggest subset on which $N_{\C}$ has no singular orbit: in fact $N_{\C}$ acts on $\C^d_{\Delta}$ freely and properly \cite{GuilleminSternberg}.
The corresponding quotient manifold $\left.\C^d_{\Delta}\right/N_{\C}$ is a compact and complex. Moreover, the $\T^d_{C}$--action on $\C^d_{\Delta}$ induces $\T^n_{C}$--action on the quotient.
This is this quotient that corresponds to $M$. The relation between the two constructions is expressed in the following theorem:
\begin{theorem} The manifold $Z$ is contained in $\C^d_{\Delta}$ and the restriction of an $N_{\C}$--orbit to $Z$ is an $N$--orbit.
\end{theorem}
\subsection{The anti-symplectic involution} This involution is the one induced by complex conjugation in $\C^d$. In fact, complex conjugation is well-defined on the subset $\C^d_{\Delta}$. Furthermore it commutes with the action of $N_{\C}$ since for every $w\in \T^d_{\C}$ we have
$$\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\kappa(w)(z)}=\kappa(\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{w})\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{z}.$$
Thus it defines an involution on the quotient space
$$\tau: M\sra M$$
satisfying
\begin{equation}\label{actionVSinvolution} \tau(w\cdot x)= w^{-1}\cdot \tau (x),\quad \forall x\in M,\quad\forall w\in \T^d.
\end{equation}
That $\tau$ is anti-symplectic and that it preserves the moment map of the $\T^n$--action on $M$ then follows from the fact that complex conjugation is anti-symplectic with respect to the standard symplectic structure and that the moment map associated to the diagonal $\T^d$--action on $\C^d$ is invariant under this conjugation.
\subsection{The homology of toric manifolds and their real lagrangian.}
In this section we describe the $\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2$--cohomology rings of the toric manifold $M$ and of its real lagrangian $L=\textrm{Fix}(\tau)$. We will also explain how these rings are isomorphic, the isomorphism being given by a degree 2 ring homomorphism.
\subsubsection{On the homology of the toric manifold}
The homology of the toric manifolds $M$ is generated by its \emph{toric divisors}, that is the complex codimension 1 faces (facets) of $\Delta$. If $D_1,...,D_d$ denote those facets then they are geometrically realized as follows
$$D_k= Z\cap \C^d_k. $$
These determine codimension 2 cycles in $X$. Let $Y_1,...,Y_d\in H_{2n-2}(X)$ denote the homology of these facets. Then
$$H_*(M;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)=\frac{\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2[Y_1,...,Y_d]}{P(\Delta)+SR(\Delta)}$$
where $P(\Delta)$ and $SR(\Delta)$ denote the following ideals
$$P(\Delta):=\LA\sum_{k}\la \xi,v_k \ra Y_k| \xi\in (\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_d)^* \RA$$
and
$$SR(\Delta):=\LA \prod_{i\in I}Y_i| \textrm{ $I\subset [1,d]$ is such that $D_I:=D_{i_1}\cap...\cap D_{i_k}= \emptyset$, $I$ is primitive} \RA$$
where $I$ is \emph{primitive} if for all $i_{m}\in I$, $D_{I\bs \{i_m\}}\neq \emptyset$.
In this setting the Chern class $c_1(X)$ of $X$ is given by the Poincar\'e dual of $Y_1+... +Y_d$.
We should also mention that there is a natural isomorphism between $H_2(M,\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)$ and the set of tuples $A=(a_1,...,a_d)\in \mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}^d$ such that
$$\sum_{k}a_k v_k=0.$$
Under this isomorphism the pairing of $A$ with $PD(Y_i)$ (the Poincar\'e dual of $Y_i$) simply coincides with the projection to the $i$--factor of $A$:
$$\la A,PD(Y_i)\ra= a_i.$$
The following result due to Batyrev will be useful:
\begin{theorem}[Batyrev, \cite{Batyrev}]\label{Batyrev} For any primitive $I\in [1,d]$ there is a unique vector $a_I=(a_1,...,a_d)\in H_2(M,\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)$
such that:
\begin{equation*} \textrm{$a_k=1$ for all $k\in I$},\quad \textrm{$a_k\leq 0$ for $k\notin I $}
\end{equation*}
\end{theorem}
\subsubsection{The cohomology of the real lagrangian}
Let $g$ be a $\tau$--invariant Riemmanian metric on $M$. Let $g$ also denote the restriction of $g$ to $L$. Note that for generic $\xi\in Lie(\T^n)$ the function
$$f_{\xi}: M\sra \R,\quad x\sra \la \mu(x),\xi\ra$$
is Morse. Moreover, there exists a second category Baire subset of $\tau$--invariant metrics such that the pair $(f_{\xi},g)$ is Morse-Smale. Then $Crit( f_{\xi})$ corresponds to the vertices of the Delzant polytope, i.e. the critical point of the moment map. Moreover, for any vertex $p$ the Morse index is given by $$|p|_M=2 \times \#\{\textrm{1--dimensional faces $\psi$ at $p$ such that $\la\psi, \xi\ra<0$}\},$$
hence $f_{\xi}$ is perfect. Let $\left.f_{\xi}\right|_L$ be the restriction of $f_{\xi}$ to $L$. It is not hard to see that
$$Crit \left.f_{\xi}\right|_L =Crit f_{\xi}. $$
Furthermore the restricted pair $(\left.f_{\xi}\right|_L,g)$ is also Morse-Smale. In fact, the inclusion of $L$ into $M$ induces an isomorphism of Morse chain complexes:
\begin{theorem}[Duistermaat \cite{Duistermaat}, Haug \cite{Haug}]\label{theoremhomologielag} The map
\begin{equation}incl:Crit_k \left.f_{\xi}\right|_L\sra Crit_{2k} f_{\xi},\quad p\mapsto p\end{equation}
defines a ring isomorphism between Morse homologies with $\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2$--coefficients that doubles the degrees:
$$incl:H_*(L,\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)\stackrel{\cong}{\longrightarrow} H_{2*}(M,\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2).$$
\end{theorem}
The restriction to $\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2$--coefficients is essential; it is needed to show that $ \left.f_{\xi}\right|_L$ is perfect (see \cite{Haug}). Note that it is easy to find examples where the theorem doesn't hold for other coefficient-rings, for example $\R P^n$ in $\C P^n$.
\subsubsection{The quantum cohomology of the real lagrangian} Set $$P_L(\Delta):=incl^{-1} P(\Delta)\quad\text{and}\quad SR_L(\Delta):= incl^{-1}(SR(\Delta)).$$
By theorem \ref{theoremhomologielag} and from the description of $H^*(M,\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)$ we can write
$$H_*(L;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)=\frac{\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2[X_1,...,X_d]}{\la P_L(\Delta)+SR_L(\Delta)\ra}$$
where $X_j$ is a formal variable representing the homology class of $D_j\cap L$.
Let $I=(i_1,...,i_d)$ be a multi-index of non negative integers. Then set:
$$X^I:= X_1^{i_1}...X_d^{i_d}.$$
The degree $|I|$ of $X^I$ is naturally $\sum_k i_kd_k$ where $d_k$ stands for the degree of $X_k$. Since the homology of $L$ is generated by the classes of degree $n-1$, the Lagrangian is either $wide$ or $narrow$ according to \cite{BiranCorneaUniruling}. Luis Haug \cite{Haug} showed that the Floer differential vanishes for the standard complex structure which happens to be generic, thus proving that real Lagrangians are actually wide:
\begin{theorem}\cite[Theorem A]{Haug} The real Lagrangians $L$ are wide as $d^Q$ generically vanishes. Furthermore, the isomorphism
$QH_*(L;\Lambda_L)\cong H_*(L;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)\otimes \Lambda_L$
is canonical.
\end{theorem}
By the theorem above each of the $X_i$ defines an element in $QH_*(L;\Lambda_L)$ also denoted $X_i$.
By Theorem \ref{Batyrev}, for any primitive $I\subset [1,d]$ there is a unique vector $a_I=(a_1,...,a_d)\in H_2(M;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)$
such that:
\begin{equation}\label{relationBatyrev} \textrm{$a_k=1$ for all $k\in I$},\quad \textrm{$a_k\leq 0$ for $k\notin I $}
\end{equation}
From Theorem \ref{theoremhomologielag} the same relations exist in $H_*(L;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)$. Write $I=(i_1,..., i_l)$ and let $J=(j_1,..., j_m)$ denote the complement of $I$ in $[1,d]$. Set
$$P_L^Q(\Delta):=P_L(\Delta)$$
and
$$SR_L^Q(\Delta):=\LA X_{i_1}\cdots X_{i_l} -X^{|a_{j_1}|}_{j_1}\cdots X^{|a_{j_m}|}_{j_m}q^{-l+\sum^m_{r=1}|a_{j_r}|}| \textrm{ $I=(i_1,...,i_l)\subset [1,d]$, $I$ is primitive} \RA.$$ The remaining of this section is dedicated to show the following:
\begin{prop}\label{Quantumpresentation}
$$QH(L;\Lambda_L)\cong \frac{\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2[X_1,...,X_d][q^{-1},q]}{P_L(\Delta)+SR_L^Q(\Delta)}.$$
\end{prop}
First, write
$$H_*(L;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)=\frac{\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2[X_1,...,X_d]}{\la f_1,...,f_r\ra}$$
where $f_1,...,f_k$ denote the polynomial relations in the $X_i$.
For $j=1,...,k$ let $f^Q_j$ to be the polynomials in the $X_i$ variables where the standard product is replaced by the quantum product.
The following two results are straighforward adaptations of lemmata \cite[Lemmata]{SiebertTian}. We prove them for conveniency:
\begin{lem} The elements $X_1,...,X_d$ generate $QH_*(L;\Lambda_L)$.
\end{lem}
\proof Assume for simplicity that $X_1,...,X_d$ are critical points of a perfect Morse function on $L$. The proof is by decreasing induction on the degree of pure elements, starting at degree $deg=2n$. For $deg=2n$, since $N_L\geq 2$, the quantum differential $\left.d_Q\right|_{R\la Crit_{2n}(f) \ra}$ restricts to $d_0$. Consequently, the unique maximum of $f$ defines an element $QH_{n}(L;\Lambda_L)$, namely $[L]$.
Assume that pure elements of degree upto $deg$ are generated by $X_1,...,X_d$: we show that every monomial $X^I$ of degree $deg-1$ is generated by the $X_k$. Let $X^{I,Q}$ denote the element obtained by making the Lagrangian quantum product of $X_k$ with multi-index $I$. By definition of the quantum product we have that:
$$X^{I,Q}=X^I+ \sum_{j\geq 1, |R|\geq deg} \lambda_{R,j} X^{R}t^j,\quad \lambda_{R,j}\in \mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2.$$
It follows from the induction hypothesis that the $X^R$ in the equation above can be written as quantum products of the $X_k$; hence the conclusion.
\qed\\
Argumenting as in the preceding lemma, we obtain that:
\begin{eqnarray*}f^Q_j(X_1,...,X_d) & = & f_j(X_1,...,X_d)+ g^Q_j(X_1,..,X_d)\\
&=& g^Q_j(X_1,..,X_d)
\end{eqnarray*}
since $f_j$ is assumed to be a relation in homology. Thus, the polynomial in abstract variables $q_1,...,q_d$:
\begin{equation*}
f_j^{[\om]}(q_1,...,q_d):= f^Q_j(q_1,...,q_d)- g^Q_j(q_1,...,q_d)
\end{equation*}
define relations in the quantum homology when we evaluate them at $(X_1,...,X_d)$.
We have the following:
\begin{lem} \label{quantumhomologyrelations}
$$QH_*(L,\Lambda_L)=\left.\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2[q,q^{-1}] ([X_1,...,X_d])\right/ \la f^{[\om]}_1,...,f^{[\om]}_r\ra.$$
\end{lem}
\proof We have seen that the $f_j^{[\om]}$ define relations in Quantum Homology. Let $\mathcal{I}$ denote the ideal generated by the $f_j^{[\om]}$, $j=1,...,r$. We show that any polynomial defining a relation in the quantum homology actually belongs to $\mathcal{I}$. Let $P^Q\in \mathcal{I}\bs \{0\}$ be of degree $deg$ (in the abstract variables $q_1,...,q_n$). Then, we can write $P^Q= P^Q_{deg}+R$ where $P^Q_{deg}$ is the degree $deg$ term and where $\deg R>deg$. Since $P^Q$ defines a relation, evaluating at $X_1,...,X_d $ gives:
$$P^Q_{deg}(X_1,...,X_d)=-R(X_1,...,X_d).$$
By definition of the quantum product, $P^Q_{deg}(X_1,...,X_d)$ can be written as a sum of a degree $deg$ polynomial $P_{deg}(X_1,...,X_d)$ (where the product is the intersection product) and a polynomial of degree bigger than $deg$. Thus, $P_{deg}(X_1,...,X_d)=0$ which implies that there is a polynomial function $\phi$ such that $P_{deg}=\phi(f_1,...,f_r)$. Again, replacing the standard product by the quantum product gives:
$$\phi(f_1^{\om},...,f_r^{\om})=P^Q_{deg}+R',\quad \deg R' >deg. $$
This implies that $P^Q=\phi(f_1^{\om},...,f_r^{\om})+R-R'$, with $\deg (R-R')>deg$. To finish the proof we do a decreasing induction on the degree.
\qed\\
\proof (Proposition \ref{Quantumpresentation})
Now, we show how to use the formula for relative Seidel morphisms in order to compute $QH_*(L;\Lambda_L)$. For a primitive $I=(i_1,...,i_l)\subset [1,d]$, let $J=(j_1,...,j_m)$ denote its complement in $[1,d]$. Consider the unique vector $a_I=(a_1,...,a_d)\in H_1(L;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)$
such that:
\begin{equation}\label{relationBatyrev2} \textrm{$a_k=1$ for all $k\in I$},\quad \textrm{$a_k\leq 0$ for $k\notin I $},\quad \sum a_kv_k=0.
\end{equation}
Let $\Lambda^{1/2}_j$ denote the half-turn map associated to $\Lambda_j$ the $S^1$--action generated by the normal to the $j$--face of the Delzant polytope. Note that from \eqref{actionVSinvolution} we have $\Lambda^{1/2}_j\in \P_L\mathrm{Ham}} \def\Hor{\mathrm{Hor}}\def\Diff{\mathrm{Diff}}\def\symp{\mathrm{Symp}(M,\om)$. Thus, in terms of Hamiltonian paths preserving $L$, \eqref{relationBatyrev2} means that:
\begin{equation}\label{relationchemin} (\Lambda^{1/2}_1)^{a_1}\cdots (\Lambda^{1/2}_d)^{a_d}=Id_L.
\end{equation}
Since we are dealing with a torus action, the order of the terms in the left member of \eqref{relationchemin} can be reorganized in such way that we finally get:
\begin{equation}\label{relationchemin2} (\Lambda^{1/2}_{i_1})\cdots (\Lambda^{1/2}_{i_l})= (\Lambda^{1/2}_{j_1})^{-a_{j_1}}\cdots (\Lambda^{1/2}_{j_m})^{-a_{j_l}}=(\Lambda^{1/2}_{j_1})^{|a_{j_1}|}\cdots (\Lambda^{1/2}_{j_l})^{|a_{j_m}|}.
\end{equation}
Observe that the maximum fixed point set of each $\Lambda_j$ is given by the divisor $D_j$, hence is of codimension 2, and both assumptions \emph{A1} and \emph{A2} are verified in this context. It follows from Theorem \ref{calculelementdeSeidel} that $S_L(\Lambda^{1/2}_{j})=X_j\otimes q$. Thus, considering the Lagrangian Seidel element associated to both sides in \eqref{relationchemin2} we have:
\begin{equation*}X_{i_1}\star\cdots \star X_{i_l}\otimes q^l=X_{j_1}^{|a_{j_1}|}\star\cdots\star X_{j_m}^{|a_{j_m}|}\otimes q^{\sum_{r=1}^m |a_{j_r}|}
\end{equation*}
where $\star$ stands for the Quantum Lagrangian product (see \cite{BiranCorneaQuantumHomology}). As a consequence of Lemma \ref{quantumhomologyrelations}, these are the only multiplicative relations in $QH(L;\Lambda_L)$. Furthermore, the additive relations are the same as in standard homology. The presentation of $QH(L;\Lambda_L)$ follows.
\qed\\
\begin{rem} \begin{itemize}
\item[$\bullet$]
This presentation is the same as given in \cite[Proposition 5.2]{McDuffTolman} for the toric manifold $(M,\om)$. Let
$\Lambda_M:=\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2[Q^{-1},Q]$ with $|Q|=2$, and denote by $QH(M;\Lambda_M)$ the corresponding quantum homology. Then, as above,
$$QH(M;\Lambda_M)\cong\frac{\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2[Y_1,...,Y_n][Q^{-1},Q]}{P(\Delta)+SR^Q(\Delta)},$$
where $Y_i$ represents the class of the divisor $D_i$, $P(\Delta)$ stands for the set of linear relations between the divisors, and $SR^Q(\Delta)$ stands for the set of quantum multiplicative relations between the divisors.
It follows directly that there is a ring isomorphism:
$$\psi: QH_*(L;\Lambda_L)\sra QH_{2*}(M;\Lambda_M)$$
such that $\psi(X_i)=Y_i$ and $\psi(q)=Q$. This ring isomorphism is actually induced by the inclusion $incl$ in \ref{theoremhomologielag} as was shown by L.Haug in \cite{Haug}.
It is worth noticing that, in the notations above, $\psi(S_L(\Lambda^{1/2}_j))=S(\Lambda_j)$, i.e. relative Seidel elements associated to loops dual to facets are sent to the corresponding absolute Seidel elements under $\psi$.
\item[$\bullet$] Using the $QH(M;\Lambda_M)$-module structure of the quantum homology of $L$ one also has \cite{BiranCorneaUniruling, HuLalonde}
$$S_L(\Lambda_j^{1/2})\star S_L(\Lambda_j^{1/2})= S_L((\Lambda_j^{1/2})^2)=S (\Lambda_j)\odot [L],$$
in other words $S_L(\Lambda_j^{1/2})$ is somewhat a square root of $S (\Lambda_j)$. As it was pointed out to me by Fran\c{c}ois Charette, these relations completely determine the $QH(M;\Lambda_M)$-module structure of $QH(L;\Lambda_L)$. Moreover, it is possible to recover the $H(M)$--module structure of $H(L)$. For instance, when $N_L\geq 3$ the quantum product of $X_i$ with itself does not admit any quantum correction term. Hence,
\begin{equation*} S_L(\Lambda_j^{1/2})\star S_L(\Lambda_j^{1/2})=(X_i\otimes q)\star (X_i\otimes q) = (X_i\star X_i) q^2= X_i^2 q^2
\end{equation*}
and since $$S (\Lambda_j)\odot [L]=Y_iQ\odot [L]=(Y_i\odot[L])Q=(Y_i\odot[L])q^2$$ we get $X_i^2 =Y_i\odot[L]$.
\end{itemize}
\end{rem}
We end this Section with an example illustrating the results established so far.
\begin{ex}\label{Theexample}
Consider the pair $(\C P^3, \R P^3)$, where $\C P^3$ is equipped with the Fubini-Study form $\om_{FS}$. Let $(M,L)=(\wt{\C P^3}, \wt{\R P^3})$ denote the monotone Lagrangian blow-up of $(\C P^3, \R P^3)$ with symplectic form $\wt{\om}$ (see \cite{Rieser} for the definition). Topologically
$$\wt{\C P^3}\cong\C P^3 \#\overline} \def\wh{\widehat} \def\wt{\widetilde} \def\ti{\wtilde{\C P}^3\qquad \text{and}\qquad \wt{\R P^3}\cong\R P^3 \#\R P^3.$$
One can also view $\wt{\C P^3}$ as the the projectivisation of the rank 2 complex bundle $\mathcal{O}(-1)\oplus \C \sra \Sigma$ where $\Sigma\cong \C P^2$ denotes the exceptional divisor. In this point of view, $\wt{\R P^3}$ is a non trivial $S^1$--bundle over $\R P^2$. The group $H_2(M;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)$ is generated by the class $F$ of the fiber of this fibration and the class $E$ of the exceptional curve ( $E=L-F$ where $L=[\C P^1]$ is the class of a line.). A simple computation shows that $H^D_2(M, L;\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2)$ is generated by half of $E$ and half of $F$. We will use the same notations to refer to them.
Now, the symplectic form for the blow-up of weight $\lambda$ is given by $[\wt{\om}]=[\phi^*\om_{FS}]-\pi\lambda^2 e$, where $\phi : \wt{\C P^3}\sra \C P^3 $ is the blowing-down map, and where $e\in H^2(\wt{C P}^3,\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D})$ is the Poincar\'e dual of $\Sigma$.
Monotonicity then forces $\lambda$ to be $\sqrt{2}/2$. The torus $\T^3$ acts in a Hamiltonian way on $\C P^3$ as follows
$$(\theta_1,\theta_2,\theta_3)\cdot [z_0:z_1:z_2:z_3]=[z_0:e^{-2\pi i\theta_1}z_1:e^{-2\pi i\theta_2}z_2:e^{-2\pi i\theta_3}z_3]$$ The moment map of this action is
$$\mu([z_0:z_1:z_2:z_3])=\left(\frac{\pi |z_1|^2}{\sum_{i=0}^3|z_i|^2},\frac{\pi |z_2|^2}{\sum_{i=0}^3|z_i|^2},\frac{\pi |z_3|^2}{\sum_{i=0}^3|z_i|^2}\right)$$
so that the moment polytope is given by
$$\Delta=\{(x_1,x_2,x_3)\in \R^3|0\leq x_i, \,\,x_1+x_2+x_3\leq \pi\}.$$
This action lifts to a Hamiltonian action of $\T^3$ on the blow-up with moment map $\wt{\mu}$. The corresponding moment polytope can be identified with
$$\wt{\Delta}=\{(x_1,x_2,x_3)\in \R^3|0\leq x_1,\,\,0\leq x_2,\,\, 0\leq x_3\leq \pi/2, \,\,x_1+x_2+x_3\leq \pi\}.$$
Moreover, the restriction of $\wt{\mu}$ to $\wt{\R P}^3$ has also image $\wt{\Delta}$. Now, the outward normals to the facets are
$$v_1=(-1,0,0),\,\,v_2=(0,-1,0),\,\, v_3=(0,0,-1),\,\, v_4=(0,0,1),\,\, v_5=(1,1,1).$$
We introduce some more notations. Let $\Lambda_i$, $i=1,...,5$ denote the semi-free Hamiltonian circle action fixing the facets defined by the $v_i$'s. Let $\Lambda_i^{1/2}$ denote the Hamiltonian path corresponding to half of $\Lambda_i$ and let $X_i$ be formal variables representing the intersection of the divisors associated to the facets with normals $v_i$ with $L$. To compute the Quantum Homology of $L$ first note that
$$P(\wt{\Delta})=\la X_1=X_2=X_5,\,\, X_3=X_4+X_5 \ra,\qquad SR(\wt{\Delta})=\la X_1X_2X_5=0,\,\, X_4(X_4+X_5)=0 \ra. $$
Setting $X=X_1$ and $Y=X_4$ and applying Theorem \ref{Quantumpresentation} yields:
$$QH(L;\Lambda_L)=\mathbb{Z}} \def\Q{\mathbb{Q}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\G{PSL_2(\C)} \def\S{\mathcal{S}} \def\J{\mathcal{J}} \def\P{\mathcal{P}} \def\H{\mathcal{H}} \def\F{\mathcal{F}} \def\B{\mathcal{B}} \def\O{\mathcal{O}} \def\bu{\mathbf{u}} \def\S{\mathcal{S}} \def\F{\mathcal{F}} \def\H{\mathcal{H}} \def\L{\mathcal{L}} \def\C{\mathbb{C}} \def\E{\mathcal{E}} \def\A{\mathcal{A}} \def\M{\mathcal{M}} \def\N{\mathcal{N}} \def\X{\mathcal{X}} \def\W{\mathcal{W}} \def\D{\mathcal{D}_2[X,Y, q^{\pm1}]/\la X^3=Y q^{-2}, Y(X+Y)=[L]\otimes q^{-2} \ra$$
which is indeed isomorphic to $QH(M;\Lambda_M)$.
It is not hard to see that the product $YX$ has no quantum term. Set $YX=\partial E$, then $Y\star Y=\partial E+[L]\otimes q^{-2}$. Thus, $$S_L((\Lambda_4^{1/2})^2)=(S_L(\Lambda_4^{1/2}))^2=(Y\star Y)\otimes q^2=E\otimes q^2+[L]$$
and we see a lower order term appearing. Note however that the action of $(\Lambda_4)^2$ is not semi-free on the maximum subset.
Finally, we wish to show that lower order terms may appear when the maximum fixed point set is of codimension strictly greater than 2 in $M$. Consider the circle action $\Lambda$ associated to the combination $v_1+v_2+v_4$. The maximum fixed point set is semifree and corresponds to the point mapped to the intersection $D_1\cap D_2\cap D_4$ under $\wt{\mu}$. Then, we have
\begin{equation*}
S_L(\Lambda)=S_L(\Lambda_1^{1/2})\star S_L(\Lambda_2^{1/2})\star S_L(\Lambda_4^{1/2})= (X\star X\star Y)\otimes q^3.
\end{equation*}
It is not hard to check that $X\star X$ coincides with the intersection product $X\cdot X=\partial F$. In order to compute the Lagrangian quantum product $(X\cdot X)\star Y $, observe that
$$(X\cdot X)\star Y =[pt]+\alpha Y\otimes q^{-2}+\beta X\otimes q^{-2},\qquad \alpha,\beta\in \{0,1\}$$
for dimensional reasons and since $F$ and $E$ are the only effective Maslov 2 classes. By a direct computation one has $\alpha=1$, hence the conclusion.
\end{ex}
\subsection{Lagrangian uniruledness}
Recall that $H_n(L)\otimes\Lambda_L$ embeds in $QH(L;\Lambda_L)$ canonically. We set $Q_-$ to be the complement:
$$Q_-=H_*(\oplus_{k<n} Crit_k(f)\otimes \Lambda_L, d^Q).$$
\begin{lem} \label{unirulingandinvertibles} Let $L\subset M$ be a closed monotone Lagrangian with $N_L\geq 2$. Assume that $L$ is not narrow. If $L$ is not Lagrangian uniruled all the invertible elements of $QH(L,\Lambda_L)$ can be written as
$$\lambda[L]+ x,\quad\text{where $\lambda\in \Lambda_L\bs\{0\}$ et $x\in Q_-$}.$$
\end{lem}
\proof Assuming $L$ is not uniruled we show that $Q_-$ is an ideal in $QH(L,\Lambda_L)$. Suppose it is not the case, then there would $x\in QH(L,\Lambda_L)$ and $y\in Q_-$ such that $x\star y$ has a term of the type $r[L] t^{\mu(B)}$ with
$$r=\#_2\{\M^{pearl}(x,y;[L], B)\}\neq 0.$$ Note that $B\neq 0$ since in that case $r=0$ unless both $x$ and $y$ have index $n$. It follows by assumption that $Q_-$ is an ideal. Since the unit $[L]$ cannot belong to $Q_-$ unless $QH(L;\Lambda_L)$ vanishes, any invertible must have such a presentation.
\qed\\
Now we are prepared to prove Theorem \ref{Lagrangianuniruledness}.
\begin{proof}[Theorem \ref{Lagrangianuniruledness}]
Assume $L$ is not narrow, then the claim follows from Lemma \ref{unirulingandinvertibles}. If $L$ is narrow, then the fundamental class $[L]$ is a $d^Q$--boundary. Since $[L]$ is represented by the unique maximum of some generic Morse function on $L$, this implies that there is a pseudo-holomorphic disk through the maximum, which ends the proof.
\end{proof}
Here is another consequence of the Lemma above
\begin{cor} Any real monotone Lagrangian with $N_L\geq 2$ in a Toric manifold is Lagrangian uniruled.
\end{cor}
\proof It was seen that the fundamental class of $[L]$ do not appear in any Lagrangian Seidel element associated to an $S^1$--circle action fixing one of the codimension one faces of the Delzant polytope. However, Lagrangian Seidel elements are all invertibles. The claim then follows from Lemma \ref{unirulingandinvertibles}.
\qed\\
\end{document}
|
\begin{document}
\flushbottom
\title{Multi-Omic Data Integration and Feature Selection for Survival-based Patient Stratification via Supervised Concrete Autoencoders hanks{This preprint has not undergone peer review (when applicable) or any post-submission improvements or corrections. The Version of Record of this contribution was Accepted for publication on The 8th International Conference on machine Learning, Optimization and Data science - LOD 2022, please refer to that publication for the final version}
\begin{abstract}
Cancer is a complex disease with significant social and economic impact. Advancements in high-throughput molecular assays and the reduced cost for performing high-quality multi-omics measurements have fuelled insights through machine learning . Previous studies have shown promise on using multiple omic layers to predict survival and stratify cancer patients. In this paper, we developed a Supervised Autoencoder (SAE) model for survival-based multi-omic integration which improves upon previous work, and report a Concrete Supervised Autoencoder model (CSAE), which uses feature selection to jointly reconstruct the input features as well as predict survival. Our experiments show that our models outperform or are on par with some of the most commonly used baselines, while either providing a better survival separation (SAE) or being more interpretable (CSAE). We also perform a feature selection stability analysis on our models and notice that there is a power-law relationship with features which are commonly associated with survival. The code for this project is available at: \url{https://github.com/phcavelar/coxae}
\end{abstract}
\section{Introduction}
Given the rapid advance of high-throughput molecular assays, the reduction in cost for performing such experiments and the joint efforts by the community in producing high-quality datasets with multi-omics measurements available, the integration of these multiple omics layers has become a major focus for precision medicine \cite{nicora2020integrated,bode2017precision}. Such integration involves analysis of clinical data across multiple omics layers for each patient, providing a holistic view of underlying mechanisms during development of disease or patient response to treatment.
Methods for multi-omic data integration can be classified in sequential, late, and joint integration approaches, depending on the order of implemented tasks and at what point multi-omics data is integrated \cite{uyar2021multi}.
In sequential integration, each omic layer is analysed in sequence, i.e. one after the other, which was the used by many early approaches.
In late integration methods \cite{poirion2021deepprog,tong_deep_2020,wissel_hierarchical_2022,wang_similarity_2014,cabassi_multiple_2020}, each layer is analysed separately and then results are integrated, which helps capture patterns that are reproducible between different omics, but making the method blind to cross-modal patterns.
Finally, in joint integration methods \cite{chaudhary2018deep,zhang2018deep,ronen2019evaluation,asada2020uncovering,lee2020incorporating,uyar2021multi,wissel_hierarchical_2022, chalise_integrative_2017,shen_integrative_2009,meng_multivariate_2014,argelaguet_multiomics_2018,oconnell_rjive_2016}
all omics layers are analysed jointly from the start, with these methods often employing a dimensionality reduction method that maps all layers into a joint latent space representing all the layers \cite{cantini2021benchmarking}, making it possible to analyse cross-modal patterns that may help identify how multiple layers function and interact to affect a biological process.
All of the aforementioned approaches can be linked to particular challenges in the machine learning task. First, many of the datasets are sparse, often with some omics features missing between samples or studies, or even omics layers being unavailable for some patients. Second, molecular assay data are often highly complex, comprising thousands to tens-of-thousands of different features for each omics layer. Third, even with reduced cost of profiling, availability of data may still be prohibitively expensive and specialised, limiting the number of publicly-available datasets for analysis. Fourth, although experiments might be performed using the same assaying technologies and be collected with the same system in mind, there can be varying experimental conditions between datasets \cite{korsunsky2019fast} and batch effects may be present in the same dataset \cite{koch2018beginner}, which must be taken into account when analysing data, especially when analysis is attempted across multiple datasets. Finally, recent advances have allowed profiling on the level of single cells, which increases dataset size dramatically, posing challenges to available methodologies.
In this context there are many end-tasks for which we might use multi-omic datasets: (Stratification) we might try to analyse which patients group together and analyse their clinical information to see what emerging patters are visible, including whether a patient is considered high or a low risk, whether a patient expression patterns are distinct from others, etc; (Classification) or we might even try to stratify patients into previously known groups, such as whether a patient is in a previously-defined subtype group, whether they might respond or not to a certain drug or treatment, etc; (Regression) And we can also try to infer directly to which level a patient might react to something, such as reaction to a treatment or drug, or how long is a patient is expected to survive given his conditions, etc. (Biomarker identification) given any of the aforementioned tasks, we might also try to interpret why are patients stratified or classified in a certain way, or why they will react to the level predicted by the algorithm, this generally entails in identifying which biomarkers (that is, which features) are associated with these responses.
The benefits of integrated datasets include more accurate patient stratification (e.g. high/low risk), disease classification or prediction of disease progression. All of those may suggest better treatment strategies, resulting in better patient outcomes. Additionally, the combined information may also be used for biomarker identification supporting further research.
In this paper we provide several contributions to the field of survival-based autoencoder (AE) integration methods, which can be summarized in the following points:
\begin{enumerate*}
\item In Subsection~\ref{ssec:coxsae}, we develop a simpler Supervised Autoencoder (SAE) as an alternative to the HierSAE model \cite{wissel_hierarchical_2022} for data integration, a method which provides stable and efficient survival separation, and use it as an upper-bound baseline for performance testing our concrete supervised-autoencoder.
\item In Subsection~\ref{ssec:concretesae}, we propose the Concrete Supervised-Autoencoder (CSAE), building up on Concrete Autoencoders \cite{balin2019concrete}, a method for supervised feature selection, which we showcase with the case study of survival-based feature selection.
\item In Subsection~\ref{ssec:pipeline}, we provide a testing framework more stringent than that used by previous work with which we compare our results with a standard PCA pipeline as well as the more advanced Maui \cite{ronen2019evaluation} method.
\item With our testing framework, in Section~\ref{sec:results}, we show that the Concrete Supervised Autoencoder has achieved performance on par with that of more complex baselines, while simultaneously being more interpretable, and also provide, to the best of our knowledge, the first feature importance analysis with multiple runs on a model of such a family.
\end{enumerate*}
\section{Related Work}
\subsection{Autoencoders for Multi-Omics}
After an initial publication in 2018 showing the use of Autoencoders (AEs) for dimensionality reduction in multi-omic datasets \cite{chaudhary2018deep}, there has been a wave of re-application of this technique in cancer risk separation, prognostication, and biomarker identification \cite{chaudhary2018deep,zhang2018deep,asada2020uncovering,lee2020incorporating,poirion2021deepprog}. All of the applications of these methods share the same pipeline, and most \cite{chaudhary2018deep,zhang2018deep,asada2020uncovering,poirion2021deepprog} use the same techniques up to the AE optimisation, having only minor differences in the hyperparameters and loss functions. Only \cite{lee2020incorporating} has a major difference in their model, using Adam instead of SGD as the optimiser. Another publication in this vein is \cite{poirion2021deepprog}, which does not perform early-integration, instead opting to project the input for each omics layer separately, concatenating features from different omics layers after Cox-PH selection, and using a boosting approach to train and merge multiple models into a single predictor. One of the main differences from these methods and ours is that the AEs are used to perform risk subgroup separation on the whole dataset, which is then used as ground truth for another classification model, whereas our pipeline is entirely cross-validated.
In \cite{ronen2019evaluation} a method was proposed using a Variational Autoencoder (VAE), dubbed Maui, to learn reduced-dimensionality fingerprints of multiple omics layers for colorectal cancer types, showing that their method both correctly mapped most samples into the existing subtypes, but also identified more nuanced subtypes through their approach, while still keeping a level of interpretability by relating input features with embedding features through correlation. The same group expanded their analysis on a pan-cancer study \cite{uyar2021multi}, changing their interpretability approach to consider the absolute value of the multiplication of the neural path weights for each input-fingerprint feature pair. This interpretability is one of the many methodological differences that sets these works apart from the aforementioned AE approaches based on \cite{chaudhary2018deep}. These VAE-based works also use the fingerprints to cluster samples into different risk subgroups and for hazard regression. The main difference with our proposed framework is the type of AE used (VAE instead of AE), and that our models are supervised with a Cox loss and that our Concrete Supervised Autoencoder uses a different type of encoding function.
\subsection{Supervised Autoencoders}
One can also perform Cox regression on neural networks \cite{katzman2018deepsurv,ching2018cox,huang2019salmon}, and this obviously implies that one can add a hazard-predicting neural network block on an Autoencoder's fingerprints. Independently from our Cox-SAE model, presented in Subsection~\ref{ssec:coxsae}, two other works developed similar techniques. In \cite{tong_deep_2020}, they developed the same principles of performing Cox-PH regression on the fingerprints generated by the autoencoder. However, the main difference is that the integration is done on the fingerprint-level -- that is, they perform dimensionality reduction through the autoencoder as normal, and then either concatenate the fingerprints to perform Cox-PH regression
(Figure~4 of their paper)
, or they do cross-omics decoding, with the Cox-PH loss being calculated on the average of both generated fingerprints
(Figure~5 of their paper)
. They also limit themselves to 2-omics integration. A recent paper also improved on this idea by proposing an autoencoder which tries to reconstruct the concatenation of the fingerprints generated through the other encoders \cite{wissel_hierarchical_2022}
, as seen in Figure~1D of their paper,
while performing a 6-omics integration also using clinical data.
\subsection{Concrete Autoencoders}
Recently, the efficacy of using a concrete selection layer as the encoder of an autoencoder was shown \cite{balin2019concrete}, dubbing this model the Concrete Autoencoder, and providing tests with many different feature types, including gene expression for providing an alternative to the ``943 landmark genes'' \cite{lamb_connectivity_2006}, as well as mice protein expression levels. A concrete selection layer \cite{maddison_concrete_2017} is an end-to-end differentiable feature selection method, that uses the reparametrisation trick \cite{kingma_auto-encoding_2014}, to provide a continuous approximation of discrete random variables, which Balin et al. used in its autoencoder model with an exponentially decreasing temperature during training to provide a smooth transition from random feature selection to discrete feature selection \cite{balin2019concrete}.
\section{Methods}
\subsection{Datasets}
We wanted to test our models on open high-quality cancer data with multiple omics layers and which had survival information. The TCGA datasets fit these criteria, being used as a baseline testing dataset for many developed methods, including the most relevant related work \cite{chaudhary2018deep,ronen2019evaluation,uyar2021multi,poirion2021deepprog,tong_deep_2020,wissel_hierarchical_2022}. We use the datasets provided by \cite{wissel_hierarchical_2022} and described in Table~\ref{tab:coxae-dsets}, following the same preprocessing steps, however we use our own set of splits for cross validation, as we perform 10-fold cross validation, with 10 repeats, as compared to 2 repeats of 5-fold cross validation in the initial work.
\begin{table*}
\centering
\begin{tabular}{lrrrrrrrrrrr}
\toprule
Dataset & Samples & Clinical & GEx & CNV & Methylation & $\mu$RNA & Mutation & RPPA & Total & Used\\
\midrule
BLCA & 325 & 9 & 20225 & 24776 & 22124 & 740 & 16317 & 189 & 84380 & 4938 \\
BRCA & 765 & 9 & 20227 & 24776 & 19371 & 737 & 15358 & 190 & 80668 & 4936 \\
COAD & 284 & 16 & 17507 & 24776 & 21424 & 740 & 17569 & 189 & 82221 & 4945 \\
ESCA & 118 & 17 & 19076 & 24776 & 21941 & 737 & 9012 & 193 & 75752 & 4947 \\
HNSC & 201 & 16 & 20169 & 24776 & 21647 & 735 & 11752 & 191 & 79286 & 4942 \\
KIRC & 309 & 14 & 20230 & 24776 & 19456 & 735 & 9252 & 189 & 74652 & 4938 \\
KIRP & 199 & 5 & 20178 & 24776 & 21921 & 738 & 8486 & 190 & 76294 & 4933 \\
LGG & 395 & 15 & 20209 & 24776 & 21564 & 740 & 10760 & 190 & 78254 & 4945 \\
LIHC & 157 & 3 & 20078 & 24776 & 21739 & 742 & 8719 & 190 & 76247 & 4935 \\
LUAD & 338 & 11 & 20165 & 24776 & 21059 & 739 & 16060 & 189 & 82999 & 4939 \\
LUSC & 280 & 20 & 20232 & 24776 & 20659 & 739 & 15510 & 189 & 82125 & 4948 \\
OV & 161 & 17 & 19064 & 24776 & 19639 & 731 & 8347 & 189 & 72763 & 4937 \\
PAAD & 100 & 26 & 19932 & 24776 & 21586 & 732 & 9412 & 190 & 76654 & 4948 \\
SARC & 190 & 45 & 20206 & 24776 & 21724 & 739 & 8385 & 193 & 76068 & 4977 \\
SKCM & 238 & 3 & 20179 & 24776 & 21635 & 741 & 17731 & 189 & 85254 & 4933 \\
STAD & 304 & 7 & 16765 & 24776 & 21506 & 743 & 16870 & 193 & 80860 & 4943 \\
UCEC & 392 & 24 & 17507 & 24776 & 21692 & 743 & 19199 & 189 & 84130 & 4956 \\
\bottomrule
\end{tabular}
\caption{Number of features in each of the used TCGA datasets. The ``Used'' column indicates how many features we expect the models to use after the second pipeline step, which involved selecting the top 1000 features for each omics layer. All datasets were used as preprocessed and made available by \cite{wissel_hierarchical_2022}.}
\label{tab:coxae-dsets}
\end{table*}
Some points raised in the literature about these datasets are interesting to be reiterated here: The LUSC and PRAD datasets were considered to be some of the hardest ``(...) As the default, we use 10 models with 80\% of original training samples to construct all the cancer models, except for LUSC and PRAD which we use 20 models since they are more difficult to train. (...)'' \cite{poirion2021deepprog}; The combination of Clinical factors and Gene Expression is said to perform better than using multiple omics layers with regards to performance on simpler models \cite{wissel_hierarchical_2022}.
\subsection{Evaluation and Metrics}
\subsubsection{Concordance Index}
The most commonly-used quantitative metric for both Survival Regression and and Survival Stratification is the Concordance-Index (C-Index), which can be seen as a generalisation of the AUC metric for regression, being similarly interpreted, with a C-Index of 0 representing perfect anti-concordance, 1 representing perfect concordance, and 0.5 being the expected result from random predictions. The metric is calculated by analysing the number of times a set of model predictions $f(x_i) > f(x_j)$ given that $y_i > y_j$ as well, while also handling censored data, due to the fact that if a value $y_j$ is censored, it is less certain to say that $y_i$ is in fact greater than $y_j$. That is, given a set of features $X$ to which a function $f$ is applied to, and the ground-truth consisting of both the set event occurrences $E$ as well as the drop-out times $Y$, we would have the metric defined as:
\begin{equation*}
\operatorname{CI}(f(X),Y,E) =
\frac{
\operatorname{CP}(f(X),Y,E) +
\frac{
\operatorname{TP}(f(X),Y,E)
}{
2
}
}{
\operatorname{AP}(f(X),Y,E)
}
\end{equation*}
Where $\operatorname{CI}(f(X),Y,E)$ if the concordance index, $\operatorname{CP}(f(X),Y,E)$ is the number of correct pairs, $\operatorname{TP}(f(X),Y,E)$ the number of tied pairs, and $\operatorname{AP}(f(X),Y,E)$ the number of admissible pairs. An admissible pair is one that both events were observed or where a single event $e_i$ was observed and $y_i \leq y_j$. The number of correct and tied pairs are be taken from the only from the admissible pairs. To the best of our knowledge, none of the related work has the entire pipeline validated as we show here, with most related work normalising the whole dataset before the pipeline \cite{chaudhary2018deep,ronen2019evaluation,tong_deep_2020,wissel_hierarchical_2022}
\subsubsection{Qualitative Analysis}
We can also perform a qualitative analysis of the models by analysing how well the expected survival of subgroups classifies when using the method as an analysis method. One way to qualitatively assess a model for the Stratification task would be to fit Kaplan-Meier (KM) curves for each subgroup the models stratifies, and then analysing the behaviour of each subgroup. This has been done in many of the related works, where they report the KM curves for all the samples, accompanied of the logrank p values for the subgroup separation \cite{chaudhary2018deep,ronen2019evaluation,uyar2021multi,poirion2021deepprog}.
\subsection{Main Testing Pipeline} \label{ssec:pipeline}
We followed the common and well-established practice of cross validation of the whole pipeline, During our preliminary testing, performing scaling only on training samples versus on the whole dataset accounted for a drastic performance change. Also, analysing logrank p-values and concordance indexes on the whole dataset generally means that the logrank p values will be significant only due to the fact that the training dataset is generally larger than the test dataset, skewing the results towards already-seen data. The testing framework we adopt solves both these issues, and this testing framework is one of the biggest methodological differences between our model and some previous work \cite{chaudhary2018deep,zhang2018deep,asada2020uncovering,lee2020incorporating,poirion2021deepprog,ronen2019evaluation,uyar2021multi}, which either provide external validation through other cohorts and/or validate piecewise.
\begin{figure*}
\caption{A diagram showing the pipeline used in our testing framework.}
\label{fig:pipeline}
\end{figure*}
As part of our pipeline we perform 6 steps, shown with letters A to F in \ref{fig:pipeline}:
\begin{enumerate*}[label=(\Alph*)]
\item First perform per-omics feature selection by selecting the $k$ most variable features from each omics layer, as was done in previous work \cite{ronen2019evaluation,tong_deep_2020,poirion2021deepprog,uyar2021multi};
\item After that, doing feature scaling by z-scoring the selected features, here being different from most related work in that we only perform feature scaling with those samples available during training;
\item Then, perform feature compression (i.e. integration, selection, or linear combination) through the model in question;
\item After this, performing Cox-PH univariate feature selection to select those features that are considered to be relevant for survival, and then;
\item Performing 2-clustering (with a clustering algorithm that allows for clustering new inputs) on this integrated dataset, which can be used for survival subgroup separation and differential expression analyses;
\item Or performing hazard prediction, which can be used to rank patients with regards to survival probability, and is also the endpoint from which we can calculate C-indexes for the pipeline.
\end{enumerate*}
We run all of the experiments on a HPC cluster, creating a single process for each dataset, using the same consistent seeds across algorithm repetitions to ensure the same 10-fold cross validation splits were used for all models. For the BRCA and STAD we ran 5 repetitions of 10-fold cross validation due to limitations in our compute budget, and for all other datasets we used 10 repetitions of 10-fold cross validation. For the BRCA, STAD and KIRP datasets we ran the experiments with 16000MB of available RAM memory, and for all other datasets we made available 8000MB of RAM. All experiments were run with 16 cores available to the program.
All models use:
\ref{fig:pipeline}:
\begin{enumerate*}[label=(\Alph*)]
\item $k\leq1000$ for feature selection on each omics layer, which gives us the ``Used'' column in Table~\ref{tab:coxae-dsets};
\item We then proceed to perform feature scaling using the mean and standard deviation available in the training dataset for each fold;
\item Then, we use as a default 128 target ``fingerprint'' features for all models, and on traditional autoencoder-based models we choose 512 neurons on the hidden layer to give us roughly 10x, and then a further 5x compression on the input feature size. The Maui model was trained for 400 epochs (taken as a default from their codebase) whereas our models were trained for 256 epochs, with the Adam optimiser using $0.01$ as a learning rate and $0.001$ as the l2 normalisation weight. All of the AE models implemented by us used $0.3$ dropout rate and a gaussian noise with zero mean and $0.2$ standard deviation added to input features during training, and we used rectified linear units as a nonlinearity on all intermediate layers. We used the same temperature settings provided in \cite{balin2019concrete} for our concrete selection layers, starting with a temperature of $10$ and ending with a temperature of $0.1$;
\item Cox-PH univariate ``fingerprint'' feature selection is done with a significance threshold of $p<0.05$, falling back on using all the of the fingerprints if no fingerprint is identified as significant for survival;
\item 2-clustering was done with KMeans with 10 initialisations, using the best in terms of inertia, with a maximum of 300 iterations and a tolerance of $0.001$.;
\item All Cox-Regression was done non-penalised, unless the model failed to converge, in which case we did Cox-Regression with $0.1$ penalisation.
\end{enumerate*} Furthermore, if a model fails to run on any fold, we drop that value. Since we perform 10 repetitions of the 10-fold cross validation, this means that a model has at least some results for each dataset, but the PCA model failed to produce any results on two datasets due to convergence problems.
\subsection{Cox-Supervised Autoencoder} \label{ssec:coxsae}
Many methods available in the literature have used, or attempted to use, autoencoders to perform dimensionality reduction and then select survival-relevant features from the autoencoder fingerprints through Cox-PH regression \cite{chaudhary2018deep,zhang2018deep,ronen2019evaluation,asada2020uncovering,lee2020incorporating,poirion2021deepprog,uyar2021multi}. We would like to argue that the hidden assumption contained within an autoencoder loss function is insufficient to provide features relevant for survival, due to the fact that feature combinations that are good at predicting other features might not necessarily be good for survival prediction, an argument that has support with preliminary tests where the non-trained models perform just as good as the trained models. To solve these issues in this section we propose our independently developed method of a Cox-Supervised Autoencoder (SAE) that addresses this issue.
To solve the lack of an inductive bias towards survival, we would like to introduce a Cox-PH model inside the neural network as a normalising loss, so that the model learns not only to create codes which are good at reconstructing the input, but also codes that are indicative of survival. Since a Cox-PH model is end-to-end differentiable, this is easily done by simply adding a Cox-PH model on the generated fingerprints, and performing Cox-PH regression with regards to the input survival times and observed events. We can see a schematic overview of such a model in Figure~\ref{fig:coxae-diagram}.
\begin{figure}
\caption{A diagram visualising how a Cox-Supervised Autoencoder (SAE) model works. We use both the multi-omics input $x$ to generate the encoding $z$ and the reconstruction $\hat{x}
\label{fig:coxae-diagram}
\end{figure}
In mathematical terms, then, we would have a model composed of the three neural network blocks shown in Figure~\ref{fig:coxae-diagram}: An encoder $E$, which takes the input $x$ and produces a set of fingerprints $z$ from which a decoder $D$ produces a reconstruction of the input $\hat{x}$, and finally a linear layer $C$ which uses the same fingerprints $z$ to produce a log-hazard estimate $log(h)$ for each patient. We then perform gradient descent on a loss $L$ which is composed not only of a reconstruction loss $L_{Rec}(x,\hat{x})$ and a normalisation loss $L_{norm}(E,D,C)$ on the weights of $E$, $D$, and $C$, but also the Cox-PH regression loss $L_{Cox}(log(h),t,e)$ using the information available about the survival time $t$ and the binary information of whether the event was censored or not $e$, giving us the equation below:
\begin{equation}\label{eq:sae}
\begin{aligned}
L(x,t,e,E,D,C) = &L_{rec}(x,D(E(x))) +
\\&
L_{cox}(C(E(x)),t,e) +
\\&
L_{norm}(E,D,C)
\end{aligned}
\end{equation}
Here we use the Cox-PH loss from \cite{katzman2018deepsurv} as implemented by the pycox library (\url{https://github.com/havakv/pycox}) version 0.2.3, which, assuming that $x$, $t$, and $e$ are sorted on $t$, and that we have $k$ examples, is defined as:
\begin{equation}
\begin{aligned}
L_{cox}(log(h)),t,e) &= \frac{\sum_{1 \leq i \leq k} (log(h_i)) - log(g_i) + \gamma}{\sum_{1 \leq i \leq k} e_i}, \\
g_i &= \sum_{1 \leq j \leq j} e^{log(h_j) - \gamma}, \\
\gamma &= max(log(h))
\end{aligned}
\end{equation}
For the reconstruction, we chose the Mean Square Error (MSE) loss, due to its symmetry, as below:
\begin{equation}
L_{rec}(x,\hat{x}) = \frac{\sum{1 \leq i \leq k}\sum{1 \leq j \leq d} (x_{i,j}-\hat{x_{i,j}})^2}{k}
\end{equation}
And, finally, we use the L2 norm of the model weights as our normalisation loss, using the Frobenius norm $||\cdot||_F$ and a hyperparameter $\lambda$ to control the how much of the norm is applied:
\begin{equation}
L_{norm}(E,D,C) = \lambda \sum_{w \in E,D,C} ||w||^2_F
\end{equation}
Note that in our definitions here, the autoencoder receives as input all of the omics layers at the same time, much like many models in the literature \cite{chaudhary2018deep,ronen2019evaluation,uyar2021multi}, and we argue that this provides integration, since all of the fingerprints may be composed of combinations of features from different omics levels. This is highly different from models where each omics layer is used as an input to a separate autoencoder, and then concatenated \cite{poirion2021deepprog,tong_deep_2020}; or otherwise combined through pooling \cite{wissel_hierarchical_2022}; or even through an hierarchical autoencoder \cite{wissel_hierarchical_2022}, which only then would de-facto integrate the omics layers through the separately-compressed layer fingerprints through a much more complex procedure.
\subsection{Concrete Supervised Autoencoder} \label{ssec:concretesae}
Another possible point of concern for many of the multi-omics analysis pipelines is that using neural-network-based models can lead to less interpretable results. To address this, we use the Concrete Autoencoder proposed by \cite{balin2019concrete} to build a Concrete Supervised Autoencoder (CSAE), using the same concrete selection layer and reparametrization tricks as described previously \cite{kingma_auto-encoding_2014,maddison_concrete_2017,balin2019concrete}. We can see a diagramatic representation of a concrete selection layer and the gumbel distribution, used in its training, in Figure~\ref{fig:concrete-selection}.
\begin{figure*}
\caption{A diagramatic Concrete Selection Layer's neuron (\subref{subfig:concrete-selection-layer}
\label{fig:concrete-selection}
\end{figure*}
Thus, our encoder follows the same exponential decay rate, and model reparametrisation described previously \cite{balin2019concrete}, to give us a sampling in $d$ dimensions with parameters $\alpha \in \mathcal{R}^d, \alpha_i > 0 \forall \alpha_i$, regulated by a temperature $T(b)$, and with d values $g \in \mathcal{R}^d$ sampled from a Gumbel distribution, giving us a probability distribution $m \in \mathcal{R}^d$:
\begin{equation}
g_j = \frac{e^{log(\alpha_j+g_j)/T(b)}}{\sum_{k=1}^{d} e^{log(\alpha_k+g_k)/T(b)}}
\end{equation}
During training, each element outputted by a concrete selection ``neuron'' $i$, would be a linear combination of the input $x \in \mathcal{R}^d$:
\begin{equation}
E(x)_i = x_i*g_j
\end{equation}
But as the temperature $T(b) \rightarrow 0$, we have that each node will select only a single input, at which point we can switch from the linear combination to simple indexing:
\begin{equation}
E(x)_i = \arg \max_j \alpha_j
\end{equation}
This allows us to smoothly transition for feature combination and also to reduce evaluation-time memory requirements, since the indexing takes $O(1)$ space per neuron instead of $O(d)$ of the linear approximation. To perform such smooth transition, we use the previously defined exponential temperature decay schedule, with $T_0$ being the initial temperature, $T_B$ the minimum temperature, $b$ being the current epoch, and $B$ the maximum number of epochs:
\begin{equation}
T(b) = T_0(T_B/T_0)^{b/B}
\end{equation}
Having our encoder thus defined, we simply apply the same optimisation as in Equation~\ref{eq:sae}, replacing the traditional pyramidal MLP used as the encoder for the Autoencoder with our concrete selection layer, and performing both input reconstruction with the decoder reverse-pyramidal decoder $D$ as well as hazard prediction through the hazard prediction network $C$.
\subsection{Baselines}
\subsubsection{Maui} For this baseline \cite{ronen2019evaluation,uyar2021multi} we used the code made available by the original authors on github (\url{https://github.com/BIMSBbioinfo/maui}), and incorporated it into our testing pipeline as an integration method. We also adapted their cox-PH selection code to save the indexes which are selected as relevant for survival. We used our own Cox-regressor class on the significant factors, since it is equivalent to ones by Maui. The original Maui paper also used KMeans as a clusterer, but we limit our analyses to 2 subgroups, since this is a harder test for the model. Other small changes include changing their code to work with a different, more recent, version of Keras and Tensorflow. Note that, although we are using the Maui original code or adaptations of their code (where the original code does not store a model for later use), our pipeline is drastically different to Maui and more stringent, which might cause different performance to be reported here.
\subsubsection{Autoencoder + Cox-PH} Our AE baseline could be seen as a rough equivalent of \cite{lee2020incorporating}, and is the base on which our Supervised Autoencoder model was built. The model we've first attested following a similar approach \cite{chaudhary2018deep} uses an SGD optimiser instead of an Adam optimiser, and does so for a very small amount of training epochs, which in our initial testing proved to be equivalent to not training the algorithm at all, and might be seen as a form of random projections, like in \cite{bingham_random_2001}. Another approach used both omics-specific autoencoders (thus not performing cross-omics combinations in the fingerprints) and makes heavy use of boosting to improve the models joint performance, as well as still uses the SGD optimiser \cite{poirion2021deepprog}. Using code available on the model's github page (\url{https://github.com/lanagarmire/DeepProg}), we attested that the model provided similar outputs when not trained and when trained with the default number of epochs, most likely due to the use of SGD as an optimiser, which again makes the model be interpretable as a form of random projection \cite{bingham_random_2001}. These models were not included in our final comparison due to the abovementioned methodological differences.
\subsubsection{PCA + Cox-PH} Our main baseline is the PCA baseline, which has been thoroughly used as a baseline in other papers, and was also used by the paper which first described the (unsupervised) Concrete Autoencoders \cite{balin2019concrete} as an upper-bound for the reconstruction loss of their CAE model. For the PCA baseline the first $d$ principal components are used as a drop-in equivalent for the $d$ fingerprint nodes in any of the autoencoder-based models.
\section{Results} \label{sec:results}
\subsection{Concordance Index Analysis}
Using our stringent testing pipeline we evaluated all of the aforementioned models' capabilities to predict same-cancer out-of-sample instances through the cross-validation scheme. This is due to the fact that normalisation was calibrated using only training data, which is different, for example, to the methodology previously used for MAUI \cite{ronen2019evaluation}, where normalisation is done on all samples before validation, and the impact for this can be clearly seen on Figure~\ref{fig:c-index-comparison}, where we see that the concordance indexes reported in the original paper are higher than the ones we've encountered with this difference, something which was also noticed during our preliminary studies.
Also in Figure~\ref{fig:c-index-comparison}, we can see that our models either perform better than a PCA-based model or are not significantly different from the traditional PCA pipeline. In fact, Maui was the model which had the worst average rank, with 4.88
average rank, whereas the CSAE ranked equal to the PCA model at 2.94, only slightly worse than an AE model without Cox supervision with 2.53 average rank, with the cox-supervised autoencoder having 1.82 average rank.
With regards to overall Concordance-index, the previous ordering remains very much the same, with the SAE being the best overall with an average test score of 0.632 (all p-values statistically significant $<10^{-4}$ through an independent two-sided t-test), with the AE model having an average score of 0.610 (statistically significant difference $p<0.007$ to the Maui and PCA models), the CSAE with an average score of 0.603 (statistically significant difference $p<10^{-46}$ to the Maui model), and the Maui model having an average test C-index of 0.526.
These results tell us two things: the first one being that joint supervision on both the cox and reconstruction objectives improves out-of-sample performance in autoencoder based models. This was already somewhat attested previously \cite{tong_deep_2020,wissel_hierarchical_2022}, but none of these studies performed joint integration directly, using one autoencoder per omics layer \cite{tong_deep_2020}, which can be argued to not consist of integration at all, since the omics layers do not cross-contribute to the generated fingerprints, or having the generated fingerprints be de-factor integrated only through a two-step hierarchical step \cite{wissel_hierarchical_2022}. Here we attest that models that do perform this direct multi-omics integration, such as \cite{chaudhary2018deep,zhang2018deep,asada2020uncovering,lee2020incorporating,ronen2019evaluation,uyar2021multi} could greatly benefit from Cox Supervision as a joint optimisation loss.
\begin{figure*}
\caption{Violin plot of the Concordance Index (c\_index) cross-validation test performance of each method on each tested cancer Subtype as well as the Violin plot over all results. We can see that, even though our CSAE model relies on simpler feature selection instead of multiple nonlinear combinations, it manages to perform on-par with most other baselines.}
\label{fig:c-index-comparison}
\end{figure*}
\subsection{SKCM Analysis}
We can see that the Cox-Supervised Autoencoder fingerprints for the SKCM dataset provide a very clear separation in terms of survival outcomes. In Figure~\ref{subfig:sae-whole-dataset-survival-separation}, we see the 2-clustering results on the survival-relevant fingerprints for this model, which clearly separates a group of high-risk patients and a group of low-risk patients, despite it being 3rd place in terms of average C-index (only statistically significantly worse to PCA $p=0.02333$) provided a better separation on the Kaplain-Meier than what was previously reported with whole-dataset Kaplan-Meier plots in previous works (e.g., \cite{poirion2021deepprog}). The Concrete-SAE model, which was 4th place (but also only statistically significantly worse than PCA, $p=0.001685$), still manages an adequate survival stratification despite its ``fingerprints'' consisting of the original features, showing that it can still provide a good survival separation only on the basis of highly-interpretable feature selection (Figure~\ref{subfig:concretesae-whole-dataset-survival-separation}).
\begin{figure*}
\caption{Survival curve separations for the SKCM dataset using our Supervised Autoencoder model \subref{subfig:sae-whole-dataset-survival-separation}
\label{fig:whole-dataset-survival-separation}
\end{figure*}
With regards to layer selections, we ran our models 32 times each and analysed the most important feature for each of their fingerprint features. In the case of the SAE model, possibly due to the fact that each fingerprint consists of a combination of combination of features, one feature consistently outranked the others and thus was used as the most important feature with regards to absolute neural path weight \cite{uyar2021multi}. The Concrete SAE model, however, since each fingerprint feature maps directly to an input feature, had a richer feature set selection, whose distributions can be seen in Figure~\ref{fig:layer-distr} and show a varied selection from multiple omics layers as well as a strong preference for a single Clinical factor which is very relevant for survival.
\begin{figure*}
\caption{Layer feature selection distribution for the SKCM dataset. In \subref{subfig:layer-distr-counts}
\label{fig:layer-distr}
\end{figure*}
\section{Discussion}
In this paper we proposed two different models for Multi-Omics analysis: Our independently developed Cox-Supervised Autoencoder model, which is conceptually simpler than previously described models which also attempt at survival-based multi-omics analysis \cite{tong_deep_2020,wissel_hierarchical_2022}, proved to be very efficient, while providing true integration with regards to the sense adopted in previous work \cite{chaudhary2018deep,zhang2018deep,asada2020uncovering,lee2020incorporating,ronen2019evaluation,uyar2021multi}; our Concrete Cox-Supervised Autoencoder model, which performs Multi-Omics feature selection, instead, also proved to be a strong alternative for cases where interpretability is more favourable than expressive power, being more interpretable than, while being as powerful as, the PCA baseline, and not straying too far from its theoretical maximal baseline, our Cox-Supervised Autoencoder model.
Our proposed models, however, are not a one-size-fits-all solution to all survival-based multi-omics integration/feature selection challenges. Although one of our models ranked at least first or second with regards to survival separation on all but one dataset, the Concrete Supervised Autoencoder model is not expressive enough to capture cross-omics relationships due to its simple feature selection method, and our Supervised Autoencoder model might still be less expressive than its more complicated counterpart, the Hierarchical Supervised Autoencoder \cite{wissel_hierarchical_2022}, a comparison which we left for future work.
We believe that our Cox-Supervised Autoencoder model presented here provides a clear path forward, with a simple method for survival-based multi-omics integration, which can be further enriched with multitasking in its supervision, possibly also integrating drug responses, which can then lead to possible applications in drug discovery. Our Concrete Cox-Supervised Autoencoder model also makes use of recent advances \cite{maddison_concrete_2017,balin2019concrete} to provide an end-to-end differentiable feature selection, whose ramifications can range from finding specific sets of omics features that map to tasks other than survival, allowing us to leverage the power of differentiable programming techniques to discover new relationships in molecular assay datasets.
\end{document}
|
\begin{document}
\title{A note on generically stable measures and $fsg$ groups}
\begin{abstract} We prove (Proposition 2.1)
that if $\mu$ is a generically stable measure in an $NIP$ theory, and $\mu(\phi(x,b)) = 0$ for all $b$ then for some $n$,
$\mu^{(n)}(\exists y(\phi(x_{1},y)\wedge .. \wedge \phi(x_{n},y))) = 0$. As a consequence we show (Proposition 3.2) that if $G$ is a definable group with $fsg$ in an $NIP$ theory, and $X$ is a definable subset of $G$ then $X$ is generic if and only if every translate of $X$ does not fork over $\emptyset$, precisely as in stable groups, answering positively Problem 5.5 from \cite{NIP2}.
\end{abstract}
\section{Introduction and preliminaries}
This short paper is a contribution to the generalization of stability theory and stable group theory to $NIP$ theories, and also provides another example where we need to resort to measures to prove statements (about definable sets and/or types) which do not explicitly mention measures. The observations in the current paper can and will be used in the future to sharpen existing results around measure and $NIP$ theories (and this is why we wanted to record the observations here). Included in these sharpenings will be: (i) replacing average types by generically stable types in a characterization of strong dependence in terms of measure and weight in \cite{strongdep}, and (ii) showing the existence of ``external generic types" (in the sense of Newelski \cite{Newelski}), over any model, for $fsg$ groups in $NIP$ theories, improving on Lemma 4.14 and related results from \cite{Newelski}.
If $p(x)\in S(A)$ is a stationary type in a stable theory and $\phi(x,b)$ any formula, then we know that $\phi(x,b)\in p|\mathfrak C$ if and only if $\models \bigwedge_{i=1,,.n}\phi(a_{i},b)$ for some independent realizations $a_{1},..,a_{n}$ of $p$ (for some $n$ depending on $\phi(x,y)$). Hence $\phi(x,b)\notin p|\mathfrak C$ for all $b$ implies that
(and is clearly implied by) the inconsistency of $\bigwedge_{i=1,..,n}\phi(a_{i},y)$ for some (any) independent set
$a_{1},..,a_{n}$ of realizations of $p$. This also holds for generically stable types in $NIP$ theories (as well as for generically stable types in arbitrary theories, with definition as in \cite{PT}). In \cite{strongdep}, an analogous result was proved for ``average measures" in strongly dependent theories. Here we prove it (Proposition 2.1) for generically stable measures in arbitrary $NIP$ theories, as well as giving a generalization (Remark 2.2).
The $fsg$ condition on a definable group $G$ is a kind of ``definable compactness" assumption, and in fact means precisely this in $o$-minimal theories and suitable theories of valued fields (and of course stable groups are $fsg$). Genericity of a definable subset $X$ of $G$ means that finitely many translates of $X$ cover $G$. Proposition 2.1 is used to show that for $X$ a definable subset of an $fsg$ group $G$, $X$ is generic if and only if every translate of $X$ does not fork over $\emptyset$. This is a somewhat striking extension of stable group theory to the $NIP$ environment.
We work with an $NIP$ theory $T$ and inside some monster model $\mathfrak C$. If $A$ is any set of parameters, let $L_x(A)$ denote the Boolean algebra of $A$-definable sets in the variable $x$. A \emph{Keisler measure} over $A$ is a finitely additive probability measure on $L_x(A)$. Equivalently, it is a regular Borel probability measure on the compact space $S_x(A)$. We will denote by $\mathfrak M_x(A)$ the space of Keisler measures over $A$ in the variable $x$. We might omit $x$ when it is not needed or when it is included in the notation of the measure itself ({\it e.g.} $\mu_x$).
If $X$ is a sort, or more generally definable set, we may also use notation such $L_{X}(A)$, $S_{X}(A)$, $\mathfrak M_{X}(A)$, where for example $S_{X}(A)$ denote the complete types over $A$ which contain the formula defining $X$ (or which ``concentrate on $X$").
\begin{defi}
A type $p\in S_x(A)$ is \emph{weakly random} for $\mu_{x}$ if $\mu(\phi(x))>0$ for any $\phi(x)\in L(A)$ such that $p\vdash \phi(x)$. A point $b$ is weakly random for $\mu$ over $A$ if $\tp(b/A)$ is weakly random for $\mu$.
\end{defi}
We briefly recall some definitions and properties of Keisler measures, referring the reader to \cite{NIP3} for more details.
If $\mu \in \mathfrak M_x(\mathfrak C)$ is a global measure and $M$ a small model, we say that $\mu$ is $M$-invariant if $\mu(\phi(x,a) \triangle \phi(x,a'))=0$ for every formula $\phi(x,y)$ and $a,a'\in \mathfrak C$ having the same type over $M$. Such a measure admits a Borel defining scheme over $M$: For every formula $\phi(x,y)$, the value $\mu(\phi(x,b))$ depends only on $\tp(b/M)$ and for any Borel $B\subset [0,1]$, the set $\{p\in S_y(M) : \mu(\phi(x,b))\in B \text{ for some }b\models p\}$ is a Borel subset of $S_y(M)$.
Let $\mu_x \in \mathfrak M(\mathfrak C)$ be $M$-invariant. If $\lambda_y\in \mathfrak M(\mathfrak C)$ is any measure, then we can define the \emph{invariant extension} of $\mu_x$ over $\lambda_y$, denoted $\mu_x \otimes \lambda_y$. It is a measure in the two variables $x,y$ defined in the following way. Let $\phi(x,y) \in L(\mathfrak C)$. Take a small model $N$ containing $M$ and the parameters of $\phi$. Define
$\mu_x \otimes \lambda_y (\phi(x,y)) = \int f(p) d\lambda_y,$ the integral ranging over $S_y(N)$
where $f(p) = \mu_x(\phi(x,b))$ for $b\in \mathfrak C$, $b\models p$ (this function is Borel by Borel definability). It is easy to check that this does not depend on the choice of $N$.
If $\lambda_y$ is also invariant, we can also form the product $\lambda_y \otimes \mu_x$. In general it will not be the case that $\lambda_y \otimes \mu_x=\mu_x \otimes \lambda_y$.
If $\mu_x$ is a global $M$-invariant measure, we define by induction: $\mu^{(n)}_{x_1...x_n}$ by $\mu^{(1)}_{x_1}=\mu_{x_1}$ and $\mu^{n+1}_{x_1...x_{n+1}} = \mu_{x_{n+1}} \otimes \mu^{(n)}_{x_1...x_n}$. We let $\mu^{(\omega)}_{x_1x_2...}$ be the union and call it the \emph{Morley sequence} of $\mu_x$.
\\
Special cases of $M$-invariant measures include definable and finitely satisfiable measures. A global measure $\mu_x$ is \emph{definable} over $M$ if it is $M$-invariant and for every formula $\phi(x,y)$ and open interval $I\subset [0,1]$ the set $\{p\in S_y(M) : \mu(\phi(x,b))\in I \text{ for some }b\models p\}$ is open in $S_y(M)$. The measure $\mu$ is \emph{finitely satisfiable} in $M$ if $\mu(\phi(x,b))>0$ implies that $\phi(x,b)$ is satisfied in $M$. Equivalently, any weakly random type for $\mu$ is finitely satisfiable in $M$.
\begin{lemma} Let $\mu \in \mathfrak M_{x}(\mathfrak C)$ be definable over $M$, and $p(x)\in S_{x}(\mathfrak C)$ be weakly random for $\mu$. Let $\phi(x_{1},..,x_{n})$ be a formula over $\mathfrak C$. Suppose that
$\phi(x_{1},..,x_{n})\in p^{(n)}$. Then $\mu^{(n)}(\phi(x_{1},..,x_{n})) > 0$.
\end{lemma}
\begin{proof} We will carry out the proof in the case where $\mu$ is definable (over $M$), which is anyway the case we need. Note that $p^{(m)}$ is $M$-invariant for all $m$. The proof of the lemma is by induction on $n$. For $n=1$ it is just the definition of weakly random. Assume true for $n$ and we prove for $n+1$. So suppose $\phi(x_{1},..,x_{n},x_{n+1}) \in p^{(n+1)}$. This means that for $(a_{1},..,a_{n})$ realizing $p^{(n)}|M$, $\phi(a_{1},..,a_{n},x) \in p$. So as $p$ is weakly random for $\mu$, $\mu(\phi(a_{1},..,a_{n},x)) = r>0$. So as $\mu$ is $M$-invariant, $tp(a_{1}',..,a_{n}'/M) = tp(a_{1},..,a_{n}/M)$ implies $\mu(\phi(a_{1}',..,a_{n}',x)) = r$ and thus also
$r-\epsilon < \mu(\phi(a_{1}',..,a_{n}',x))$ for any small positive $\epsilon$. By definability of $\mu$ and compactness there is a formula $\psi(x_{1},..,x_{n}) \in tp(a_{1},..,a_{n}/A)$ such that
$\models\psi(a_{1}',..,a_{n}')$ implies $ 0 < r-\epsilon < \mu(\phi(a_{1}',..,a_{n}',x))$. By induction hypothesis,
$\mu^{(n)}(\psi(x_{1},..,x_{n})) > 0$. So by definition of $\mu^{(n+1)}$ we have that $\mu^{(n+1)}(\phi(x_{1},..,x_{n},x_{n+1})) > 0$ as required.
\end{proof}
A measure $\mu_{x_1,...,x_n}$ is \emph{symmetric} if for any permutation $\sigma$ of $\{1,...,n\}$ and any formula $\phi(x_1,...,x_n)$, we have $\mu(\phi(x_1,...,x_n))=\mu(\phi(x_{\sigma.1},...,x_{\sigma.n}))$. A special case of a symmetric measure is given by powers of a generically stable measure as we recall now. The following is Theorem 3.2 of \cite{NIP3}:
\begin{fact}\label{genstable}
Let $\mu_x$ be a global $M$-invariant measure. Then the following are equivalent:
\begin{enumerate}
\item $\mu_x$ is both definable and finitely satisfiable (necessarily over $M$),
\item $\mu^{(n)}_{x_1,...,x_n}|_M$ is symmetric for all $n<\omega$,
\item for any global $M$-invariant Keisler measure $\lambda_y$, $\mu_x \otimes \lambda_y=\lambda_y \otimes \mu_x$,
\item $\mu$ commutes with itself: $\mu_x \otimes \mu_y=\mu_y\otimes \mu_x$.
\end{enumerate}
If $\mu_x$ satisfies one of those properties, we say it is \emph{generically stable}.
\end{fact}
If $\mu\in \mathfrak M_x(A)$ and $D$ is a definable set such that $\mu(D)>0$, we can consider the \emph{localisation} of $\mu$ at $D$ which is a Keisler measure $\mu_D$ over $A$ defined by $\mu_D(X)=\mu(X\cap D)/\mu(X)$ for any definable set $X$.
We will use the notation $Fr(\theta(x),x_1,...,x_n)$ to mean $$\frac 1 n |\{i\in \{1,...,n\} : \models \theta(x_i)\}|.$$
The following is a special case of Lemma 3.4. of \cite{NIP3}.
\begin{prop}\label{genslemma}
Let $\phi(x,y)$ be a formula over $M$ and fix $r\in (0,1)$ and $\epsilon >0$.
Then there is $n$ such that for any symmetric measure $\mu_{x_1,...,x_{2n}}$, we have $$\mu_{x_1,...,x_{2n}}( \exists y (|Fr(\phi(x,y),x_1,....,x_n) - Fr(\phi(x,y),x_{n+1},...,x_{2n}) | > r)) \leq \epsilon.$$
\end{prop}
\section{Main result}
\begin{prop}
Let $\mu_x$ be a global generically stable measure. Let $\phi(x,y)$ be any formula in $L(\mathfrak C)$. Suppose that $\mu(\phi(x,b))=0$ for all $b\in \mathfrak C$. Then there is $n$ such that $\mu^{(n)}( \exists y (\phi(x_1,y) \wedge ... \wedge \phi(x_n,y)))=0$.
Moreover, $n$ depends only on $\phi(x,y)$ and not on $\mu$.
\end{prop}
\begin{proof}
Let $\mu_x$ be a global generically stable measure and $M$ a small model over which $\phi(x,y)$ is defined and such that $\mu_x$ is $M$-invariant. Assume that $\mu(\phi(x,b))=0$ for all $b\in \mathfrak C$. For any $k$, define $$W_k = \{(x_1,...,x_n) : \exists y (\wedge_{i=1..k} \phi(x_i,y))\}.$$ This is a definable set. We want to show that $\mu^{(n)}(W_n)=0$ for $n$ big enough. Assume for a contradiction that this is not the case.
Let $n$ be given by Proposition \ref{genslemma} for $r=1/2$ and $\epsilon=1/2$. Consider the measure $\lambda_{x_1,...,x_{2n}}$ over $M$ defined as being equal to $\mu^{(2n)}$ localised on the set $W_{2n}$ (by our assumption, this is well defined). As the measure $\mu^{(2n)}$ is symmetric and the set $W_{2n}$ is symmetric in the $2n$ variables, the measure $\lambda$ is symmetric. Let $\chi(x_1,...,x_{2n})$ be the formula ``$(x_1,...,x_{2n}) \in W_{2n} \wedge \forall y (|Fr(\phi(x,y),x_1,...,x_n)-Fr(\phi(x,y),x_{n+1},...,x_{2n})| \leq 1/2)$". By definition of $n$, we have
$\lambda ( \exists y (|Fr(\phi(x,y),x_1,....,x_n) - Fr(\phi(x,y),x_{n+1},...,x_{2n}) | > 1/2)) \leq 1/2$. Therefore $\mu^{(2n)} (\chi(x_1,...,x_{2n})) >0$.
As $\mu$ is $M$-invariant, we can write
$$\mu^{(2n)}(\chi(x_1,...,x_{2n})) = \int_{q \in S_{x_1,...,x_n}(M)} \mu^{(n)} (\chi(q,x_{n+1},...,x_{2n}))d\mu^{(n)},$$
where $\mu^{(n)} (\chi(q,x_{n+1},...,x_{2n}))$ stands for $\mu^{(n)}(\chi(a_1,...,a_n,x_{n+1},...,x_{2n}))$ for some (any) realization $(a_1,...,a_n)$ of $q$. As $\mu^{(2n)}(\chi(x_1,...,x_{2n})) >0$, there is $q\in S_{x_1,...,x_n}$ such that \\(*) $\mu^{(n)}(\chi(q,x_{n+1},...,x_{2n}))>0$.\\Fix some $(a_1,...,a_n)\models q$. By (*), we have $(a_1,...,a_n) \in W_n$. So let $b\in \mathfrak C$ such that $\models \bigwedge_{i=1...n} \phi(a_i,b)$. Again by (*), we can find some $(a_{n+1},...,a_{2n})$ weakly random for $\mu^{(n)}$ over $Ma_1...a_nb$ and such that
\\ (**) $\models \chi(a_1,...,a_n,a_{n+1},...,a_{2n})$.
\\ In particular, for $j=n+1,...,2n$, $a_j$ is weakly random for $\mu$ over $Mb$ hence $\models \neg \phi(a_j,b)$. But then $|Fr(\phi(x,b);a_1,...,a_n) - Fr(\phi(x,b);a_{n+1},...,a_{2n})| =1$. This contradicts (**).
\end{proof}
\begin{rem} The proof above adapts to showing the following generalization:
\newline
Let $\mu_{x}$ be a global generically stable measure, $\phi(x,y)$ a formula in $L(\mathfrak C)$. Let $\Sigma(x)$ be the partial type (over the parameters in $\phi$ together with a small model over which $\mu$ is definable) defining $\{b:\mu(\phi(x,b)) = 0\}$. Then for some $n$: $\mu^{(n)}(\exists y(\Sigma(y) \wedge \phi(x_{1},y)\wedge .. \wedge\phi(x_{n},y))) = 0$.
\end{rem}
\section{Generics in $fsg$ groups }
Let $G$ be a definable group, without loss defined over $\emptyset$. We call a definable subset $X$ of $G$ left (right) generic if finitely many left (right) translates of $X$ cover $G$, and a type $p(x)\in S_{G}(A)$ is left (right) generic if every formula in $p$ is. We originally defined (\cite{NIP1}) $G$ to have ``finitely satisfiable generics", or to be $fsg$, if there is some global complete type $p(x)\in S_{G}(\mathfrak C)$ of $G$ every left $G$-translate of which is finitely satisfiable in some fixed small model $M$.
The following summarizes the situation, where the reader is referred to Proposition 4.2 of \cite{NIP1} for (i) and Theorem 7.7 of \cite{NIP2} and Theorem 4.3 of \cite{NIP3} for (ii), (iii), and (iv).
\begin{fact} Suppose $G$ is an $fsg$ group. Then
\newline
(i) A definable subset $X$ of $G$ is left generic iff it is right generic, and the family of nongeneric definable sets is a (proper) ideal of the Boolean algebra of definable subsets of $G$,
\newline
(ii) There is a left $G$-invariant Keisler measure $\mu\in \mathfrak M_G(\mathfrak C)$ which is generically stable,
\newline
(iii) Moreover $\mu$ from (ii) is the unique left $G$-invariant global Keisler measure on $G$ as well as the unique right $G$-invariant global Keisler measure on $G$,
\newline
(iv) Moreover $\mu$ from (ii) is {\em generic} in the sense that for any definable set $X$, $\mu(X) > 0$ iff $X$ is generic.
\end{fact}
Remember that a definable set $X$ (or rather a formula $\phi(x,b)$ defining it) forks over a set $A$ if $\phi(x,b)$ implies a finite disjunction of formulas $\psi(x,c)$ each of which divide over $A$, and $\psi(x,c)$ is said to divide over $A$ if for some $A$-indiscernible sequence $(c_{i}:i<\omega)$ with $c_{0} = c$, $\{\phi(x,c_{i}):i<\omega\}$ is inconsistent.
\begin{prop} Suppose $G$ is $fsg$ and $X\subseteq G$ a definable set. Then $X$ is generic if and only if for all $g\in X$, $g\cdot X$ does not fork over $\emptyset$ (if and only if for all $g\in G$, $X\cdot g$ does not fork over $\emptyset$).
\end{prop}
\begin{proof} Left to right: It suffices to prove that any generic definable set $X$ does not fork over $\emptyset$, and as the set of nongenerics forms an ideal it is enough to prove that any generic definable set does not divide over $\emptyset$. This is carried out in (the proof of) Proposition 5.12 of \cite{NIP2}.
\noindent
Right to left: Assume that $X$ is nongeneric. We will prove that for some $g\in G$, $g\cdot X$ divides over $\emptyset$ (so also forks over $\emptyset$).
Let $\mu_{x}$ be the generically stable $G$-invariant global Keisler measure given by Fact 3.1. Let $M_{0}$ be a small model such that $\mu$ does not fork over $M_{0}$ (namely, as $\mu$ is generic, every generic formula does not fork over $M_{0}$) and $X$ is definable over $M_{0}$. Let $\phi(x,y)$ denote the formula defining $\{(x,y)\in G\times G: y\in x\cdot X\}$. So
$\phi$ has additional (suppressed) parameters from $M_{0}$. Note that for $b\in G$, $\phi(x,b)$ defines the set
$b\cdot X^{-1}$. As $X$ is nongeneric, so is $X^{-1}$ so also $b\cdot X^{-1}$ for all $b\in G$. Hence, as $\mu$ is generic, $\mu(\phi(x,b)) = 0$ for all $b$. By Proposition 2.1, for some $n$
$\mu^{(n)}(\exists y(\phi(x_{1},y)\wedge .. \wedge \phi(x_{n},y))) = 0$. Let $p$ be any weakly random type for $\mu$ (which in this case amounts to a global generic type, which note is $M_{0}$-invariant). So by Lemma 1.2 the formula
$\exists y(\phi(x_{1},y)\wedge .. \wedge \phi(x_{n},y)))\notin p^{(n)}$. Let $(a_{1},..,a_{n})$ realize $p^{(n)}|M_{0}$.
Then $(a_{1},..,a_{n})$ extends to an $M_{0}$-indiscernible sequence $(a_{i}:i=1,2,....)$, a Morley sequence in $p$ over $M_{0}$, and $\models \neg \exists y(\phi(a_{1},y) \wedge ... \wedge \phi(a_{n},y))$. So in particular
$\{\phi(a_{i},y):i=1,2,...\}$ is inconsistent. Hence the formula $\phi(a_{i},y)$ divides over $M_{0}$, so also divides over $\emptyset$. But $\phi(a_{1},y)$ defines the set $a_{1}\cdot X$, so $a_{1}\cdot X$ divides over $\emptyset$ as required.
\end{proof}
Recall that we called a global type $p(x)$ of a $\emptyset$-definable group $G$, left $f$-generic if every left $G$-translate of $p$ does not fork over $\emptyset$.
We conclude the following (answering positively Problem 5.5 from \cite{NIP2} as well as strengthening Lemma 4.14 of \cite{CPI}):
\begin{cor} Suppose $G$ is $fsg$ and $p(x) \in S_{G}(\mathfrak C)$. Then the following are equivalent:
\newline
(i) $p$ is generic,
\newline
(ii) $p$ is left (right) $f$-generic,
\newline
(iii) (Left or right) $Stab(p)$ has bounded index in $G$ (where left $Stab(p) = \{g\in G:g\cdot p = p\}$).
\end{cor}
\begin{proof} The equivalence of (i) and (ii) is given by Proposition 3.2 and the definitions. We know from \cite{NIP1}, Corollary 4.3, that if $p$ is generic then $Stab(p)$ is precisely $G^{00}$. Now suppose that $p$ is nongeneric. Hence there is a definable set $X\in p$ such that $X$ is nongeneric. Let $M$ be a small model over which $X$ is defined. Note that the $fsg$ property is invariant under naming parameters. Hence $G$ is $fsg$ in
$Th(\mathfrak C,m)_{m\in M}$. By Proposition 3.2 (as well as what is proved in ``Right to left" there), for some $g\in G$, $g\cdot X$ divides over $M$. As $X$ is defined over $M$ this means that there is an $M$-indiscernible sequence
$(g_{\alpha}:\alpha < {\bar\kappa})$ (where $\bar\kappa$ is the cardinality of the monster model) and some $n$ such
that $g_{\alpha_{1}}\cdot X \cap ... \cap g_{\alpha_{n}}\cdot X = \emptyset$ whenever $\alpha_{1} < ... < \alpha_{n}$.
This clearly implies that among $\{g_{\alpha}\cdot p: \alpha < \bar\kappa\}$, there are $\bar\kappa$ many types, whereby $Stab(p)$ has unbounded index.
\end{proof}
\end{document}
|
\begin{document}
\title[$p $-variational Calculus]{$p $-variational Calculus}
\author{\.{I}lker Gen\c{c}t\"{u}rk}
\address{Department of Mathematics, K{\i }r{\i }kkale University,
Yahsihan,
71450 K{\i }r{\i }kkale, Turkey}
\email{[email protected]}
\date{\today }
\subjclass[2000]{Primary 49K05, 49K15; Secondary 47B39}
\keywords{$ p $-calculus, Euler-Lagrange difference equations, calculus of variations}
\begin{abstract}
The aim of this paper is to bring together a new type of quantum calculus, namely $p $-calculus, and variational calculus. We develop $p $-variational calculus and obtain a necessary
optimality condition of Euler-Lagrange type and a sufficient optimality
condition.
\end{abstract}
\maketitle
\section{Introduction}
One of interesting fields in mathematics is quantum calculus known as calculus without limits. It is well-known that it changes the classical derivative by a quantum difference operator. It has important applications in physics and chemical physics. Moreover, it plays an important role in several fields of mathematics such as orthogonal polynomials, analytic number theory, geometric function theory, combinatorics etc. \cite{AnnMan,HarmanI,HarmanII,KocaGenAyd,pashaev2014q}.
There are several types of quantum calculus such as $ h $-calculus, $ q $-calculus and the others which some generalizations of $ h $-calculus and $ q $-calculus. In the beginning of the twentieth century, Jackson introduced the $ q $-calculus with following notation $$ \frac{f(qt)-f(t)}{(q-1)t} $$ where $ q $ is a fixed number different from $ 1 $, $ t \neg 0 $ and $ f $ is areal function. It is clear that if $ f $ is differentiable at $ t \neq 0 $, then $f'(t)=\lim\limits_{q \to 1} \frac{f(qt)-f(t)}{(q-1)t}$. We refer to reader to \cite{KacCheung}, \cite{Ernstcomp}, for basic concepts of quantum calculus and history of $ q $-calculus.
In \cite{NeaMeh}, authors produced a new type of quantum calculus with the following expression $$ \frac{f(t^p)-f(t)}{t^p-t} $$ and above notation is definition of the $ p $-derivative. Moreover, some new properties of functions and Steffensen inequality in $ p $-calculus \cite{NeaMeh2,yadollahzadeh2019steffensen} and $ pq $-calculus which a generalization of $ p $-calculus \cite{genccturk2019new} were given.
The calculus of variation is one of the classical subjects in mathematics and it establishes the relation between other branches of mathematics such as differential equations, geometry, and physics. Also it has important applications in mechanics, engineering, economics, biology and electrical engineering. The calculus of variation deals with finding extrema and, in this sense, one can say that it is a branch of optimization. Because of its importance, studies based in quantum calculus are occurred. In this regard, we refer \cite{aldwoah2011power,bangerezako2004variational,da2012q,da2018general,da2012higher} to readers to for details in calculus of variations based in different quantum operators.
The main objective of this paper is to provide a necessary optimality
condition and a sufficient optimality condition for the $p$-variational
problem
\begin{align}
\mathcal{L}[y]
&=\int \limits_{a}^{b}L\bigg(t,y(t^{p}),D_{p}[y](t)
\bigg)d_{p}t\longrightarrow extremize \notag \\
& \tag*{(P)} \label{problem} \\
y &\in \mathcal{Y}^{{}}\big( [a, b ]_{p},
\mathbb{R}
\big),~y(a)=\alpha,~y(b)=\beta \notag
\end{align}
where a and b are fixed real numbers and by extremize, we mean minimize or
maximize. Problem \ref{problem} with detailed will be given in Section 3.
Additionally, Lagrangian $L$ has the desired the following hypotheses:
\begin{enumerate}
\item[(H1)\label{H1}] $(u,v)\rightarrow L(t,u,v)$ is a $C^{1}(
\mathbb{R}
^{2},
\mathbb{R}
)$ function for any $t\in I;$
\item[(H2)]\label{H2} $t\rightarrow L\big(t,y(t^{p}),D_{p}[y](t)\big)$ is continuous at$1$ for
any admissible function $y;$
\item[(H3)] \label{H3} functions $t\rightarrow \partial _{i+2}L\big(t,y(t^{p}),D_{p}[y](t)\big)$
belongs to $\mathcal{Y}^{{}}\big( [a,b]_{p},
\mathbb{R}
\big) $ for all admissible $y$, $i=0,1;$
\end{enumerate}
where $I$ is an interval of $
\mathbb{R}
$ containing 1; $a,b\in I,$ $a<b,$ and $\partial _{j}L$ denotes the partial
derivative of $L$ with respect to its $j$th argument.
This paper is organized as follows. In section 2 we call up some necessary definitions and theorems about $ p $-calculus. In section 3, we give our results for the $ p $-variational calculus.
\section{Preliminaries}
Let $p\in (0,1)$ be and consider interval $J$ =$[0,\infty )$ . We will
denote by $J_{p}$ the set $J_{p}:=\{x^{p}:x\in J\}$. Throughout this paper,
we assume that function $f(x)$ is defined on $J$.
We need some definitions and fundamental results on $p$-calculus to prove
our results.\cite{NeaMeh}
\begin{definition}
Consider an arbitrary function $f(x)$. Its $p$-derivative is defined as
\begin{equation*}
D_{p}f(x)=\frac{f(x^{p})-f(x)}{x^{p}-x},\text{ if }x\neq 0,1,
\end{equation*}
and
\begin{equation*}
D_{p}f(0)=\lim_{x\rightarrow 0^{+}}D_{p}f(x),\text{ \ }D_{p}f(1)=\lim_{x
\rightarrow 1}D_{p}f(x).
\end{equation*}
\end{definition}
\begin{corollary}
If $f(x)$ is differentiable, then~$\lim \limits_{p\rightarrow
1}D_{p}f(x)=f^{\prime }(x),$ and also if $f^{\prime }(x)$ exists in a
neighborhood of $x=0$, $x=1$ and is continuous at $x=0$ and $x=1$, then we
have
\begin{equation*}
D_{p}f(0)=f_{+}^{\prime }(0),~D_{p}f(1)=f^{\prime }(1).
\end{equation*}
\end{corollary}
\begin{definition}
The $p$-derivative of higher order of function $f(x)$ is defined by
\begin{equation*}
\left( D_{p}^{0}f\right) (x)=f(x),~\left( D_{p}^{n}f\right)
(x)=D_{p}\left( D_{p}^{n-1}f\right) (x),\text{ }n\in
\mathbb{N}
.
\end{equation*}
\end{definition}
We note that the $p$-derivative has the following properties.
\begin{theorem}
Let $f$ and $g$ be $p$-differentiable on $J$, let $\alpha $ and $\beta \in
\mathbb{R}
$ and $x\in J_{p}$. Then
\begin{enumerate}
\item $D_{p}f\equiv 0,$ then $f$ is a constant. Conversely, $D_{p}c\equiv 0$
for any $c.$
\item $D_{p}(\alpha f+\beta g)(x)=\alpha D_{p}f(x)+\beta D_{p}g(x).$
\item $D_{p}(f(x)g(x))=g(x^{p})D_{p}f(x)+f(x)D_{p}g(x).$
\item $$D_{p}\left( \frac{f}{g}\right) (x)=\frac{g(x)D_{p}f(x)-f(x)D_{p}g(x)}{
g(x)g(x^{p})}.$$
\end{enumerate}
\end{theorem}
\begin{definition}
A function $F(x)$ is a $p$-antiderivative of $f(x)$ if $D_{p}F(x)=f(x)$. It
is denoted by
\begin{equation*}
F(x)=\int f(x)d_{p}x.
\end{equation*}
\end{definition}
\begin{definition}
The $p$-integral of $f(x)$ is defined to be the series
\begin{equation*}
\sum \limits_{j=0}^{\infty }(x^{p^{j}}-x^{p^{j+1}})f(x^{p^{j}}).
\end{equation*}
\end{definition}
In \cite{NeaMeh}, authors considered the following three cases to define the
definite $p $-integral.
\begin{enumerate}
\item[\textbf{Case 1.}] Let $1<a<b$ where $a,b$ $\in
\mathbb{R}
$ and function $f$ is defined on $(1,b].$ Notice that for any $j\in
\{0,1,2,3,\ldots \},$ $b^{p^{j}}\in (1,b].$
\begin{definition}
The $p$-integral of a function $f(x)$ on the interval $(1,b]$ is defined as
\begin{equation}
\int \limits_{1}^{b}f(x)d_{p}x=\lim_{n\rightarrow \infty }\sum
\limits_{j=0}^{N}(b^{p^{j}}-b^{p^{j+1}})f(b^{p^{j}})=\sum
\limits_{j=0}^{\infty }(b^{p^{j}}-b^{p^{j+1}})f(b^{p^{j}}),
\label{case1defint}
\end{equation}
and
\begin{equation*}
\int \limits_{a}^{b}f(x)d_{p}x:=\int \limits_{1}^{b}f(x)d_{p}x-\int
\limits_{1}^{a}f(x)d_{p}x.
\end{equation*}
\end{definition}
\item[\textbf{Case 2.}] Let $0<b<1$ where $b$ $\in
\mathbb{R}
$ $.$ Notice that for any $j\in \{0,1,2,3,\ldots \},$ $b^{p^{j}}\in \lbrack
b,1)$ and $b^{p^{j}}<b^{p^{j+1}}.$
\begin{definition}
The $p$-integral of a function $f(x)$ on the interval $[b,1)$ is defined as
\begin{equation*}
\int \limits_{b}^{1}f(x)d_{p}x=\lim_{n\rightarrow \infty }\sum
\limits_{j=0}^{N}(b^{p^{j+1}}-b^{p^{j}})f(b^{p^{j}})=\sum
\limits_{j=0}^{\infty }(b^{p^{j+1}}-b^{p^{j}})f(b^{p^{j}}).
\end{equation*}
The $p$-integrals defined above are also denoted by
\begin{equation*}
\int \limits_{1}^{b}f(x)d_{p}x=I_{p^{+}}f(b),\text{ \ \ }\int
\limits_{b}^{1}f(x)d_{p}x=I_{p^{-}}f(b).
\end{equation*}
\end{definition}
\item[\textbf{Case 3}] Let $0<a<b<1$ where $a,b$ $\in
\mathbb{R}
$ $.$ Notice that for any $j\in \{0,1,2,3,\ldots \},$ $b^{p^{-j}}\in (0,b]$
and $b^{p^{-j-1}}<b^{p^{-j}}.$
\begin{definition}
The $p$-integral of a function $f(x)$ on the interval $(0,b](b<1)$ is
defined as
\begin{eqnarray*}
I_{p}f(b) &=&\int \limits_{0}^{b}f(x)d_{p}x=\lim_{n\rightarrow \infty }\sum
\limits_{j=0}^{N}(b^{p^{-j}}-b^{p^{-j-1}})f(b^{p^{-j-1}}) \\
&=&\sum \limits_{j=0}^{\infty }(b^{p^{-j}}-b^{p^{-j-1}})f(b^{p^{-j-1}})
\end{eqnarray*}
and
\begin{equation*}
\int \limits_{a}^{b}f(x)d_{p}x:=\int \limits_{0}^{b}f(x)d_{p}x-\int
\limits_{0}^{a}f(x)d_{p}x.
\end{equation*}
\end{definition}
\end{enumerate}
\begin{remark}\cite{NeaMeh}
If $p\in (0,1),$ then for any $j\in \{0,\pm 1,\pm 2,\pm 3,\ldots \},$ we
have $p^{p^{j}}\in (0,1),$ $p^{p^{j}}<p^{p^{j+1}}$ and
\begin{equation}\label{01int}
\int \limits_{0}^{1}f(x)d_{p}x=\sum \limits_{j=-\infty }^{\infty }\int
\limits_{p^{p^{j}}}^{p^{p^{j+1}}}f(x)d_{p}x=\sum \limits_{j=-\infty
}^{\infty }(p^{p^{j+1}}-p^{p^{j}})f(p^{p^{j}}).
\end{equation}
\end{remark}
By definitions of $p $-integrals in \cite{NeaMeh}, we give a more general formula:
\begin{corollary}[Cf. Corollary 4.12, \cite{NeaMeh}]
Suppose $0 \leq a<1<b $. The $p$-integral of a function $f(x)$ on the
interval $[a,b]$ is defined as
\begin{equation} \label{key}
\int \limits_{a}^{b}f(t)d_{p}t=\int \limits_{0}^{b}f(t)d_{p}t -\int
\limits_{0}^{a}f(t)d_{p}t
\end{equation}
where
\begin{equation}
\int \limits_{0}^{x}f(t)d_{p}t=\sum \limits_{j=0}^{\infty
}(x^{p^{-j}}-x^{p^{-j-1}})f(x^{p^{-j-1}}), ~\text{if } 0 \leq x <1,
\end{equation}
and
\begin{align}
\int \limits_{0}^{x}f(t)d_{p}t=&\int \limits_{0}^{1}f(t)d_{p}t+\int \limits_{1}^{x}f(t)d_{p}t\\
=&\sum \limits_{j=-\infty
}^{\infty }(p^{p^{j+1}}-p^{p^{j}})f(p^{p^{j}}) \\
&+\sum
\limits_{j=0}^{\infty }(x^{p^{j+1}}-x^{p^{j}})f(x^{p^{j}}) ~\text{if } x >1,
\end{align}
provided the series converges at $x=a $ and $x=b $. In that case, $f $ is
said to be $p $-integrable on $[a,b] $.
\end{corollary}
\begin{definition}
The $p$-integral of higher order of a function $f$ is given by
\begin{equation*}
\left( I_{p}^{0}f\right) (x)=f(x),\text{ \ }\left( I_{p}^{n}f\right)
(x)=I_{p}\left( I_{p}^{n-1}f\right) (x),\text{ }n\in
\mathbb{N}
.
\end{equation*}
\end{definition}
Following lemmas are given to obtain fundamental theorem of $ p $-calculus by authors in \cite{NeaMeh}.
\begin{lemma}
If $x>1$ and $p\in (0,1)$, then $D_{p}I_{p^{+}}f(x)=f(x)$, and also if
function $f$ is continuous at $x=1$, then we have $
I_{p^{+}}D_{p}f(x)=f(x)-f(1).$
\end{lemma}
\begin{lemma}
If $x,p\in (0,1),$ then $D_{p}I_{p^{-}}f(x)=-f(x)$, and also if function $f$
is continuous at $x=1$, then we have $I_{p^{-}}D_{p}f(x)=f(1)-f(x).$
\end{lemma}
\begin{lemma}
If $x,p\in (0,1)$ and $I_{p}f(x)=\int \limits_{0}^{x}f(s)d_{p}s,$ then $
D_{p}I_{p}f(x)=f(x)$, and also if function $f$ is continuous at $x=0$, then
we have $I_{p}D_{p}f(x)=f(x)-f(0).$
\end{lemma}
\begin{theorem}[\textbf{Fundamental theorem of }$p$\textbf{-calculus},\cite{NeaMeh}]
Let $p\in (0,1)$. If $F(x)$ is an antiderivative of $f(x)$ and $F(x)$ is
continuous at $x=0$ and $x=1$, then for every $0\leq a<b\leq \infty $, we
have
\begin{equation*}
\int \limits_{a}^{b}f(x)d_{p}x=F(b)-F(a).
\end{equation*}
\end{theorem}
\begin{corollary}
If $f(x)$ is continuous at $x=0$ and $x=1$, then we have
\begin{equation*}
\int \limits_{a}^{b}D_{p}f(x)d_{p}x=f(b)-f(a).
\end{equation*}
\end{corollary}
\begin{corollary}
If $f(x)$ and $g(x)$ is continuous at $x=0$ and $x=1$, then we have
\begin{equation} \label{byparts}
\int
\limits_{a}^{b}f(x)D_{p}g(x)d_{p}x=f(b)g(b)-f(a)g(a)-\int
\limits_{a}^{b}g(x^{p})D_{p}f(x)d_{p}x.
\end{equation}
This formula is called $p$-integration by parts.
\end{corollary}
The $p$-integral has the following properties:
\begin{theorem}
Let $f,g:I\rightarrow
\mathbb{R}
$ be $p$-integrable on $J\,,a,b,c\in J$ and $\ \alpha ,\beta \in
\mathbb{R}
.$Then
\begin{enumerate}
\item $\int \limits_{a}^{a}f(t)d_{p}t=0;$
\item $\int \limits_{a}^{b}f(t)d_{p}t=-\int \limits_{b}^{a}f(t)d_{p}t;$
\item $\int \limits_{a}^{b}f(t)d_{p}t=\int \limits_{a}^{c}f(t)d_{p}t+\int
\limits_{c}^{b}f(t)d_{p}t;$
\item $\int \limits_{a}^{b}\left( \alpha f+\beta g\right) (t)d_{p}t=\alpha
\int \limits_{a}^{b}f(t)d_{p}t+\beta \int \limits_{a}^{b}g(t)d_{p}t.$
\end{enumerate}
\end{theorem}
\begin{proof}
By using definitions of definite $ p $-integral, the proof is clear.
\end{proof}
In what follows, for a given $t\in J$, we denote
\begin{equation*}
\lbrack t]_{p}:=\{t^{p^{j}}:j\in N_{0}\} \cup \{0,1\},
\end{equation*}
and
\begin{equation*}
\lbrack a,b]_{p}:=[a]_{p}\cup \lbrack b]_{p}.
\end{equation*}
Because of different definitions of $ p $-integral depending on interval, from now on, we assume that $0<a<1<b$ for $a,b\in J$.
\begin{proposition}
Let $f$ and $g$ be $p$-integrable on $J$ and $a,b\in J$ such
that $0<a<1<b$. If $|f(t)|\leq g(t)$ for all $t\in \lbrack a,b]_{p}$, then
for $x,y\in \lbrack a,b]_{p}$, $x<1<y,$ we have
\begin{enumerate}
\item
\begin{equation*}
\left \vert \int \limits_{1}^{y}f(t)d_{p}t\right \vert \leq \int
\limits_{1}^{y}g(t)d_{p}t,
\end{equation*}
\item
\begin{equation*}
\left \vert \int \limits_{1}^{x}f(t)d_{p}t\right \vert \leq -\int
\limits_{1}^{x}g(t)d_{p}t.
\end{equation*}
\item
\begin{equation*}
\left \vert \int \limits_{x}^{y}f(t)d_{p}t\right \vert \leq \int
\limits_{x}^{y}g(t)d_{p}t.
\end{equation*}
Consequently, if $g(t)\geq 0$ for all $t\in \lbrack a,b]_{p}$, then the
inequalities $$\int \limits_{1}^{b}g(t)d_{p}t\geq 0 \text{ and} \int
\limits_{a}^{b}g(t)d_{p}t\geq 0$$ hold for all for $x,y\in \lbrack
a,b]_{p}$.
\end{enumerate}
\end{proposition}
\begin{proof}
\begin{enumerate}
\item Since $y>1 $, then $y^{p^{j+1}}<y^{p^{j}},$ $j\in N_{0}$, $y\in
\lbrack a,b]_{p},$ \
\begin{eqnarray*}
\left \vert \int \limits_{1}^{y}f(t)d_{p}t\right \vert &=&\left \vert \sum
\limits_{j=0}^{\infty }(y^{p^{j}}-y^{p^{j+1}})f(y^{p^{j}})\right \vert \leq
\sum \limits_{j=0}^{\infty }(y^{p^{j}}-y^{p^{j+1}})\left \vert
f(y^{p^{j}})\right \vert \\
&\leq &\sum \limits_{j=0}^{\infty }(y^{p^{j}}-y^{p^{j+1}})g(y^{p^{j}})=\int
\limits_{1}^{y}g(t)d_{p}t.
\end{eqnarray*}
\item
\begin{eqnarray*}
\left \vert \int \limits_{x}^{1}f(t)d_{p}t\right \vert &=&\left \vert \sum
\limits_{j=0}^{\infty }(x^{p^{j+1}}-x^{p^{j}})f(x^{p^{j}})\right \vert \leq
\sum \limits_{j=0}^{\infty }(x^{p^{j}}-x^{p^{j+1}})\left \vert
f(x^{p^{j}})\right \vert \\
&\leq &-\sum \limits_{j=0}^{\infty
}(x^{p^{j}}-x^{p^{j+1}})g(x^{p^{j}})=-\int \limits_{1}^{x}g(t)d_{p}t.
\end{eqnarray*}
\item The proof is similar previous ones.
\end{enumerate}
\end{proof}
\section{Main Results}
The main objective of this section is to introduce the $p$-variational
calculus. For this purpose, we consider the following variational
problem:
\begin{align}
\mathcal{L}[y]
&=\int \limits_{a}^{b}L\big(t,y(t^{p}),D_{p}[y](t)
\big)d_{p}t\longrightarrow extremize \notag \\
& \tag*{(P)} \label{problem} \\
y &\in \mathcal{Y}^{{}}\big( [a, b ]_{p},
\mathbb{R}
\big),~y(a)=\alpha,~y(b)=\beta \notag
\end{align}
where by extremize we mean minimize or maximize and $y\in \mathcal{Y}$, where
\begin{equation*}
\mathcal{Y}:=\{y:I\rightarrow
\mathbb{R}
\text{ }|\text{ }y\text{ and }D_{p}[y]\text{ are bounded on }[a , b
]_{p}\text{ and continuous at }0 \text{~and~} 1\}
\end{equation*}
equipped with the norm
\begin{equation*}
||y||=\sup_{t\in \lbrack a , b ]_{p}}|y(t)|+\sup_{t\in \lbrack
a , b ]_{p}}|D_{p}[y](t)|.
\end{equation*}
\begin{definition}
We say that $y$ is an admissible function for problem \ref{problem} if $y\in \mathcal{
Y }^{}\left( [a , b]_{p},
\mathbb{R}
\right) $ and $y$ satisfies the boundary conditions $y(a)=\alpha,~y(b)=\beta$.
\end{definition}
\begin{definition}
We say that $y_{\ast }$ is a local minimizer (resp. local maximizer) for
problem \ref{problem} if $y_{\ast }$ is an admissible function and there exists $
\delta >0$ such that
\begin{equation*}
\mathcal{L} \lbrack y_{\ast }]\leq \mathcal{L} \lbrack y]\text{ \ \ }(\text{resp. }\mathcal{L} \lbrack y_{\ast }]\geq \mathcal{L} \lbrack y])
\end{equation*}
for all admissible $y$ with $||y_{\ast }-y||_{}<\delta $.
\end{definition}
\begin{definition}
We say that $\eta \in \mathcal{Y}^{}\left( [a, b ]_{p},
\mathbb{R}
\right) $ is an admissible variation for problem \ref{problem} if $\eta (a)=0=\eta
(b). $
\end{definition}
\subsection{Basic Lemmas\protect
}
In order to get our results, we need important lemma:
\begin{lemma}[\textbf{Fundamental Lemma of }$p$\textbf{-variational Calculus}]\label{fundlemmavar}
Let $f\in \mathcal{Y}^{}([a,b]_{p},
\mathbb{R}
)$. One has
\begin{equation*}
\int \limits_{a}^{b}f(t)h(t^{p})d_{p}t=0
\end{equation*}
for all functions $h\in \mathcal{Y}^{}$ with $h(a)=h(b)=0$ if and only if $
f(t)=0$ for all $t\in \lbrack a,b]_{p}.$
\end{lemma}
\begin{proof}
The implication "$\Leftarrow $" is obvious. Let us prove the implication "$
\Rightarrow $". Conversely, suppose that exists $q\in \lbrack
a,b]_{p}$ such that $f(q)\neq 0$.
\begin{enumerate}
\item If $q\neq 0$, then $q=a^{p^{-j}}$ or $q=b^{p^{-j}}$ for some $j\in
\mathbb{N}
_{0}.$\newline
\begin{enumerate}
\item Suppose that $a\neq 0$ and $b\neq 0$. In this case we can assume,
without loss of generality, that $q=b^{p^{j}}$. Define
\begin{equation*}
h(t)=\left \{
\begin{array}{cc}
f(a^{p^{-j-1}}),\text{ } & t=a^{p^{-j}}, \\
0, & \text{otherwise.}
\end{array}
\right.
\end{equation*}
Then
\begin{align*}
\int \limits_{a}^{b}f(t)h(t^{p})d_{p}t =&\int
\limits_{0}^{b}f(t)h(t^{p})d_{p}t-\int \limits_{0}^{a}f(t)h(t^{p})d_{p}t \\
=&\int
\limits_{0}^{1}f(t)h(t^{p})d_{p}t+\int
\limits_{1}^{b} f(t)h(t^{p})d_{p}t-\int \limits_{0}^{a}f(t)h(t^{p})d_{p}t \\
=&\sum \limits_{j=-\infty
}^{\infty }(p^{p^{j+1}}-p^{p^{j}})f(p^{p^{j}})h((p^{p^{j}})^p)+\sum \limits_{j=0}^{\infty
}(b^{p^{-j}}-b^{p^{-j-1}})f(b^{p^{-j-1}})h(\left( b^{p}\right)
^{p^{-j-1}})\\
&-\sum \limits_{j=0}^{\infty
}(a^{p^{-j}}-a^{p^{-j-1}})f(a^{p^{-j-1}})h(\left( a^{p}\right) ^{p^{-j-1}})
\\
=&-(a^{p^{-j}}-a^{p^{-j-1}})\left[ f(a^{p^{-j-1}})\right] ^{2}\neq 0,
\end{align*}
which is a contradiction.
\item Suppose that $a=0$ and $b\neq 0$, then $q=b^{p^{-j}}$for some $
j\in
\mathbb{N}
_{0}.$Define
\begin{equation*}
h(t)=\left \{
\begin{array}{cc}
f(b^{p^{-j-1}}),\text{ } & t=b^{p^{-j}}, \\
0, & \text{otherwise.}
\end{array}
\right.
\end{equation*}
and as in the proof (a), we obtain a contradiction.
\item The case $b=0$ and $a\neq 0$ is similar to the previous one.
\end{enumerate}
\item If $q=0$, without loss of generality, we can assume that $f(q)>0.$
Since
\begin{equation*}
\lim_{j\rightarrow \infty }a^{p^{-j-1}}=\lim_{j\rightarrow \infty
}b^{p^{-j-1}}=1
\end{equation*}
and $f$ is continuous at $1$, we have
\begin{equation*}
\lim_{j\rightarrow \infty }f(a^{p^{-j-1}})=\lim_{j\rightarrow \infty
}f(b^{p^{-j-1}})=f(1).
\end{equation*}
Therefore, there exists an order $j_{0}\in
\mathbb{N}
$ such that for all $j>j_{0}$ the inequalities
\begin{equation*}
f(a^{p^{-j-1}})>1\text{ and }f(b^{p^{-j-1}})>1
\end{equation*}
hold.
\begin{enumerate}
\item If $a,b\neq 0$, then for some $k>j_{0}$, we define
\begin{equation*}
h(t)=\left \{
\begin{array}{cc}
f(b^{p^{-j-1}}),\text{ } & \text{if }t=b^{p^{-j}}, \\
f(a^{p^{-j-1}}), & \text{if }t=a^{p^{-j}}, \\
0, & \text{otherwise.}
\end{array}
\right.
\end{equation*}
Hence
\begin{equation*}
\int
\limits_{a}^{b}f(t)h(t^{p})d_{p}t=(b^{p^{-j}}-b^{p^{-j-1}})f(b^{p^{-j-1}})-(a^{p^{-j}}-a^{p^{-j-1}})f(a^{p^{-j-1}})\neq 0.
\end{equation*}
\item If $a=0$, then we define
\begin{equation*}
h(t)=\left \{
\begin{array}{cc}
f(b^{p^{-j-1}}),\text{ } & \text{if }t=b^{p^{-j}}, \\
0, & \text{otherwise.}
\end{array}
\right.
\end{equation*}
Therefore
\begin{equation*}
\int \limits_{0}^{b}f(t)h(t^{p})d_{p}t=(b^{p^{-j}}-b^{p^{-j-1}})\left[
f(b^{p^{-j-1}})\right] ^{2}\neq 0.
\end{equation*}
\item If $b=0$, this follows by the same method as in the previous case.
\end{enumerate}
\end{enumerate}
\end{proof}
\begin{definition}\label{uniformdefn}
Let $\ s\in I$ and $g:I\times (-\theta ,\theta )\rightarrow
\mathbb{R}
.$ We say that $g(t,\cdot )$ is differentiable at $\theta _{0}$ uniformly in $
[s]_{p}$ if for every $\epsilon >0$ there exists $\delta >0$ such that
\begin{equation*}
0<|\theta -\theta _{0}|<\delta \Longrightarrow \left \vert \frac{g(t,\theta
)-g(t,\theta _{0})}{\theta -\theta _{0}}-\partial _{2}g(t,\theta _{0})\right
\vert <\epsilon
\end{equation*}
for all $t\in \lbrack s]_{p},$ where $\partial _{2}g=\frac{\partial g}{
\partial \theta }.$
\end{definition}
\begin{lemma}[Cf. \cite{malinowska2010hahn}]\label{uniformlemma}
Let $\ s\in I$ and \ assume that $g:I\times (-\theta ,\theta )\rightarrow
\mathbb{R}
$ is differentiable at $\theta _{0}$ uniformly in $[s]_{p}.$ If $\int
\limits_{0}^{s}g(t,\theta _{0})d_{p}t$ exists, then $G(\theta ):=\int
\limits_{0}^{s}g(t,\theta )d_{p}t$ \ for $\theta $ near $\theta _{0}$ is
differentiable at $\theta _{0}$ and
\begin{equation*}
G^{\prime }(\theta _{0})=\int \limits_{0}^{s}\partial _{2}g(t,\theta
_{0})d_{p}t.
\end{equation*}
\end{lemma}
\begin{proof}
\begin{enumerate}
\item[i.)] Let $ s<1 $ be. Since $g(t,\cdot)$ is differentiable at $\theta _{0}$ uniformly in $[s]_{p}$,
then for every $\epsilon >0$ there exists $\delta >0$ such that for
all $t\in $ $[s]_{p}$ and for $0<|\theta -\theta _{0}|<\delta $, the
following inequalities hold:
\begin{equation*}
\left \vert \frac{g(t,\theta )-g(t,\theta _{0})}{\theta -\theta _{0}}
-\partial _{2}g(t,\theta _{0})\right \vert <\frac{\epsilon }{2s},
\end{equation*}
\begin{eqnarray*}
\left \vert \frac{G(\theta )-G(\theta _{0})}{\theta -\theta _{0}}-G^{\prime
}(\theta _{0})\right \vert &\leq &\int \limits_{0}^{s}\left \vert \frac{
g(t,\theta )-g(t,\theta _{0})}{\theta -\theta _{0}}-\partial _{2}g(t,\theta
_{0})\right \vert d_{p}t \\
&<&\int \limits_{0}^{s}\frac{\epsilon }{2s}d_{p}t=\frac{\epsilon }{2}
<\epsilon .
\end{eqnarray*}
\item[ii.)]
Let $ s>1 $ be. Since $g(t,\cdot)$ is differentiable at $\theta _{0}$ uniformly in $[s]_{p}$,
then for every $\epsilon >0$ there exists $\delta >0$ such that for
all $t\in $ $[s]_{p}$ and for $0<|\theta -\theta _{0}|<\delta $, the
following inequalities hold:
\begin{equation*}
\left \vert \frac{g(t,\theta )-g(t,\theta _{0})}{\theta -\theta _{0}}
-\partial _{2}g(t,\theta _{0})\right \vert <\frac{\epsilon }{2(s-1)},
\end{equation*}
\begin{eqnarray*}
\left \vert \frac{G(\theta )-G(\theta _{0})}{\theta -\theta _{0}}-G^{\prime
}(\theta _{0})\right \vert &\leq &\int \limits_{0}^{s}\left \vert \frac{
g(t,\theta )-g(t,\theta _{0})}{\theta -\theta _{0}}-\partial _{2}g(t,\theta
_{0})\right \vert d_{p}t \\
&<&\int \limits_{0}^{s}\frac{\epsilon }{2(s-1)}d_{p}t=\frac{\epsilon }{2}
<\epsilon .
\end{eqnarray*}
\end{enumerate}
Hence, $G(\cdot)$ is differentiable at $\theta _{0}$ and $G^{\prime }(\theta
_{0})=\int \limits_{0}^{s}\partial _{2}g(t,\theta _{0})d_{p}t.$
\end{proof}
\subsection{$p$-variational Problem}
For an admissible variation $\eta $ and an admissible function $y$, \ we
define the real function $\phi $ by
\begin{equation*}
\phi (\varepsilon )=\phi (\varepsilon ,y,\eta ):=\mathcal{L} \lbrack
y+\varepsilon \eta ].
\end{equation*}
The first variation of the functional
$\mathcal{L} $ of the problem
\ref{problem} is defined by
\begin{equation*}
\delta \mathcal{L} \lbrack y,\eta ]:=\phi ^{\prime }(0).
\end{equation*}
Note that
\begin{align*}
\mathcal{L} \lbrack y+\varepsilon \eta ] =&\int
\limits_{a}^{b}L\bigg(t,y(t^{p})+\varepsilon \eta (t^{p}),D_{p}[y](t)+\varepsilon
D_{p}[\eta ](t)\bigg)d_{p}t \\
=&\mathcal{L} _{b}[y+\varepsilon \eta ]-\mathcal{L} _{a}[y+\varepsilon \eta
]
\end{align*}
where
\begin{equation*}
\mathcal{L} _{\xi }[y+\varepsilon \eta ]=\int \limits_{0}^{\xi
}L\bigg(t,y(t^{p})+\varepsilon \eta (t^{p}),D_{p}[y](t)+\varepsilon D_{p}[\eta
]\bigg)(t)d_{p}t
\end{equation*}
with $\xi \in \{a,b\}$. Therefore,
\begin{equation*}
\delta \mathcal{L} \lbrack y,\eta ]=\delta \mathcal{L} _{b}[y,\eta ]-\delta
\mathcal{L} _{a}[y,\eta ].
\end{equation*}
The following lemma is direct consequence of Lemma \ref{uniformlemma}. In what follows $
\partial _{i}L$ denotes the partial derivative of $L$ with respect to its $i$
th argument.
\begin{lemma}\label{lemadm}
For an admissible variation $\eta $ and an admissible function $y.$ Let
\begin{equation*}
g(t,\varepsilon ):=L\bigg(t,y(t^{p})+\varepsilon \eta
(t^{p}),D_{p}[y](t)+\varepsilon D_{p}[\eta ](t)\bigg).
\end{equation*}
Assume that
\begin{enumerate}
\item $g(t,\cdot)$ is differentiable at $0$ uniformly in $[a , b ]_{p};$
\item $\mathcal{L} _{a}[y+\varepsilon \eta ]=\int
\limits_{0}^{a}g(t,\varepsilon )d_{p}t$ and $\mathcal{L} _{b}[y+\varepsilon
\eta ]=\int \limits_{0}^{b}g(t,\varepsilon )d_{p}t$ exist for $\varepsilon
\approx 0;$
\item $\int \limits_{0}^{a}\partial _{2}g(t,0)d_{p}t$ and $\int
\limits_{0}^{b}\partial _{2}g(t,0)d_{p}t$ exists.
\end{enumerate}
Then
\begin{align*}
\phi ^{\prime }(0)=&\delta \mathcal{L} \lbrack y,\eta ]\\
=&\int
\limits_{a}^{b}\Big( \partial _{2}L\big(t,y(t^{p}),D_{p}[y](t)\big)\eta
(t^{q})+\partial _{3}L\big(t,y(t^{p}),D_{p}[y](t)\big)D_{p}\eta (t^{{}})\Big)
d_{p}t.
\end{align*}
\end{lemma}
\subsection{Optimality conditions}
In this section, we will present a necessary condition (the $p$-Euler-Lagrange equation) and a sufficient condition to our Problem \ref{problem}.
\begin{theorem}[The $p$-Euler-Lagrange equation] \label{theopeulerlagrange}
Under hypotheses (H1)-(H3) and conditions 1-3 of Lemma \ref{lemadm} on the Lagrangian $
L $, if $y_{\ast }\in \mathcal{Y}^{}$ is a local extremizer for problem
\ref{problem}, then $y_{\ast }$ satisfies the $p$-Euler-Lagrange equation
\begin{equation}\label{peulerlagrange}
\partial _{2}L\big(t,y(t^{p}),D_{p}[y](t)\big)=D_{p}\big[ \partial _{3}L(\cdot
,y(\cdot ^{p}),D_{p}[y](\cdot ))\big] (t)
\end{equation}
for all $t\in \lbrack a, b]_{p}.$
\end{theorem}
\begin{proof}
Let $y_{\ast }$ be a local minimizer (resp. maximizer) for problem \ref{problem} and $
\eta $ an admissible variation. Define $\phi :
\mathbb{R}
\rightarrow
\mathbb{R}
$ by
\begin{equation*}
\phi (\varepsilon ):=\mathcal{L} \lbrack y_{\ast }+\varepsilon \eta ].
\end{equation*}
A necessary condition for $y_{\ast }$ to be an extremizer is given by $\phi
^{\prime }(0)=0.$ By Lemma \ref{lemadm}, we conclude that
\begin{equation*}
\int \limits_{a}^{b}\Big( \partial _{2}L\big(t,y(t^{p}),D_{p}[y](t)\big)\eta
(t^{q})+\partial _{3}L\big(t,y(t^{p}),D_{p}[y](t)\big)D_{p}\eta (t)\Big)
d_{p}t=0
\end{equation*}
By integration by parts \eqref{byparts}, we get
\begin{eqnarray*}
&&\int \limits_{a}^{b}\partial _{3}L\big(t,y(t^{p}),D_{p}[y](t)\big)D_{p}\eta
(t^{{}})d_{p}t \\
&=&\partial _{3}L\big(t,y(t^{p}),D_{p}[y](t)\big)\eta (t^{{}})|_{a}^{b}-\int
\limits_{a}^{b}D_{p}\partial _{3}L\big(\cdot ,y(\cdot ^{p}),D_{p}[y](\cdot
)\big)(t)\eta (t^{p})d_{p}t.
\end{eqnarray*}
Since $\eta (a)=\eta (b)=0$, then
\begin{equation*}
\int \limits_{a}^{b}\bigg( \partial _{2}L\big(t,y(t^{p}),D_{p}[y](t)\big)\eta
(t^{p})-D_{p}\partial _{3}L\big(\cdot ,y(\cdot ^{p}),D_{p}[y](\cdot )\big)(t)\eta
(t^{p})\bigg) d_{p}t=0
\end{equation*}
Finally, by Lemma \ref{fundlemmavar}, for all $t\in \lbrack a, b ]_{p}$
\begin{equation*}
\partial _{2}L\bigg(t,y(t^{p}),D_{p}[y](t)\bigg)=D_{p}\partial _{3}L\bigg(\cdot ,y(\cdot
^{p}),D_{p}[y](\cdot )\bigg)(t).
\end{equation*}
\end{proof}
To conclude this section, we prove a sufficient optimality condition for the
Problem \ref{problem}.
\begin{definition}
Given a function $L$,we say that $L(t,u,v)$ is jointly convex(resp. concave)
in $(u,v)$ iff $\partial _{i}L$, $i=2,3$, exist and continuous and verify the
following conditions:
\begin{equation*}
L(t,u+u_{1},v+v_{1})-L(t,u,v) ~\geq{(\text {resp.~}\leq )}~\partial
_{2}L(t,u,v)u_{1}+\partial _{3}L(t,u,v)v_{1}
\end{equation*}
for all $(t,u,v),(t,u+u_{1},v+v_{1})\in I\times
\mathbb{R}
^{2}.$
\end{definition}
\begin{theorem}\label{convextheo}
Suppose that $a<b$ and $a,b\in \lbrack c]_{p}$ for some $c\in I$. Also
assume that $L$ is a jointly convex(resp. concave) function in $(u,v).$ If $
y_{\ast }$ satisfies the $p$-Euler-Lagrange equation \eqref{peulerlagrange}, then $y_{\ast }$ is
global minimizer (resp. maximizer) to the problem \ref{problem}.
\end{theorem}
\begin{proof}
Let $L$ be a jointly convex function in $(u,v)$. Then for any admissible variation $\eta $, we have
\begin{align*}
\mathcal{L} \lbrack y_{\ast }+\eta ]-\mathcal{L} \lbrack y_{\ast }] &=\int
\limits_{a}^{b}\Bigg[ L\bigg(t,y_{\ast }(t^{p})+\eta (t^{p}),D_{p}[y_{\ast
}](t)+D_{p}[\eta ](t)\bigg)-L\bigg(t,y_{\ast }(t^{p}),D_{p}[y_{\ast }](t)\bigg)\Bigg]
d_{p}t \\
&\geq \int \limits_{a}^{b}\Bigg[ \partial _{2}L\bigg(t,y_{\ast
}(t^{p}),D_{p}[y_{\ast }](t)\bigg)\eta (t^{p})+\partial _{3}L\bigg(t,y_{\ast
}(t^{p}),D_{p}[y_{\ast }](t)\bigg)D_{p}[\eta ](t)\Bigg] d_{p}t.
\end{align*}
Using integrations by part, formula \eqref{byparts}, we get
\begin{align*}
\mathcal{L} \lbrack y_{\ast }+\eta ]-\mathcal{L} \lbrack y_{\ast }] &\geq
\int \limits_{a}^{b}\Big[ \partial _{2}L\big(t,y_{\ast }(t^{p}),D_{p}[y_{\ast
}](t)\big)\eta (t^{p})\Big] d_{p}t \\
&\quad +\int \limits_{a}^{b}\Big[ \partial
_{3}L\big(t,y_{\ast }(t^{p}),D_{p}[y_{\ast }](t)\big)D_{p}[\eta ](t)\Big] d_{p}t \\
&\geq \int \limits_{a}^{b}\Big[ \partial _{2}L\big(t,y_{\ast
}(t^{p}),D_{p}[y_{\ast }](t)\big)\eta (t^{p})\Big] d_{p}t \\
&\quad +\partial _{3}L\big(t,y_{\ast }(t^{p}),D_{p}[y_{\ast }](t)\big)\eta
(t)|_{a}^{b} \\
&\quad -\int \limits_{a}^{b}D_{p}\partial _{3}L\big(\cdot ,y(\cdot
^{p}),D_{p}[y](\cdot )\big)(t)\eta (t^{p})d_{p}t.
\end{align*}
Since $y_{\ast }$ satisfies Theorem \ref{theopeulerlagrange} and $\eta $ is an admissible variation, we
obtain
\begin{equation*}
\mathcal{L} \lbrack y_{\ast }+\eta ]-\mathcal{L} \lbrack y_{\ast }]\geq 0,
\end{equation*}
proving that $y_{\ast }$ is a minimizer of Problem \ref{problem}.
The same conclusion can be drawn for the concave case.
\end{proof}
\section{An example}
We require that $ p $ is a fixed number different from $ 1 $. For $ a<b $ in $ [a,b]_{p} $, consider the following problem
\begin{equation}
\begin{cases}
\mathcal{L} \lbrack y ] = \int \limits_{a}^{b} \left(t+\frac{1}{2}\left( D_p [y](t)\right)^2\right)d_p t \rightarrow \text{~minimize}\\
y \in \mathcal{Y}^{}([a,b]_{p}, \mathbb{R}) \\
y(a)=a, \\
y(b)=b.
\end{cases}
\end{equation}
If $ y_{\ast } $ is a local minimizer of the problem, then $ y_{\ast } $ satisfies the $ p $-Euler-Lagrange equation:
\begin{equation}
D_p\left[ D_p[y](p\cdot)\right](t)=0 \text{~ for all } t \in [a,b]_{p}.
\end{equation}
It can be easily seen that the function $ y_{\ast }(t)=t $ is a candidate to the solution of this problem. Since the Lagrangian function is jointly convex in $ (u,v) $, then by using Theorem \ref{convextheo}, it follows immediately that the function $ y_{\ast} $ is a minimizer of the problem.
\end{document}
|
\begin{document}
\begin{abstract} We show that the group ${\mathbb Q \rtimes \mathbb Q^*_+}$ of orientation-preserving affine transformations of the
rational numbers is quasi-lattice ordered by its subsemigroup $\mathbb N^{\times}nx$.
The associated Toeplitz $C^*$-algebra $\mathbb TT(\mathbb N^{\times}nx)$ is universal for
isometric representations which are covariant in the sense of Nica.
We give a presentation of $\mathbb TT(\mathbb N^{\times}nx)$ in terms of generators and relations, and use this to show that the $C^*$-algebra $\mathcal Q_\mathbb N$ recently introduced by
Cuntz is the boundary quotient of $({\mathbb Q \rtimes \mathbb Q^*_+}, \mathbb N^{\times}nx)$ in the sense of
Crisp and Laca. The Toeplitz algebra $\mathbb TT(\mathbb N^{\times}nx)$ carries a natural dynamics $\sigma$, which induces the one considered by Cuntz on the quotient
$\mathcal Q_\mathbb N$, and our main result is
the computation of the KMS$_\beta$ (equilibrium) states of the
dynamical system $(\mathbb TT(\mathbb N^{\times}nx), \mathbb R,\sigma)$
for all values of the inverse temperature $\beta$.
For $\beta \in [1, 2]$ there is a unique
KMS$_\beta$ state, and the KMS$_1$ state factors through the quotient map onto $\mathcal Q_\mathbb N$, giving the unique KMS state discovered by Cuntz. At
$\beta =2$ there is a phase transition, and for $\beta>2$ the KMS$_\beta$ states are indexed
by probability measures on
the circle. There is a further phase transition at $\beta=\infty$, where the KMS$_\infty$ states are indexed by the probability measures on the
circle, but the ground states are indexed by the states on the classical Toeplitz algebra~$\mathbb TT(\mathbb N)$.
\end{abstract}
\maketitle
\section*{Introduction}
Cuntz has recently introduced and studied a $C^*$-algebra $\mathcal Q_\mathbb N$ which is generated by an isometric representation of the semidirect product $\mathbb N^{\times}nx$ of the additive semigroup $\mathbb N$ by the natural action of the multiplicative semigroup $\mathbb N^\times$ \cite{cun2}. Cuntz proved that $\mathcal Q_\mathbb N$ is simple and purely infinite, and that it admits a natural dynamics for which there is a unique KMS state, which occurs at inverse temperature $1$. He also showed that $\mathcal Q_\mathbb N$ is closely related to other very interesting $C^*$-algebras, such as the Bunce-Deddens algebras and the Hecke $C^*$-algebra of Bost and Connes~\cite{bos-con}.
In recent years there has been a great deal of interest in other $C^*$-algebras generated by isometric representations of a semigroup $P$, such as the Toeplitz algebra $\mathbb TT(P)$ which is generated by the canonical isometric representation on $\ell^2(P)$, and it is natural to ask how Cuntz's algebra relates to these other $C^*$-algebras. It is obviously not the same as $\mathbb TT(\mathbb N^{\times}nx)$: in $\mathcal Q_\mathbb N$, the isometry associated to the additive generator is unitary. So one is led to guess that $\mathcal Q_\mathbb N$ might be a quotient of $\mathbb TT(\mathbb N^{\times}nx)$. If so, the Toeplitz algebra $\mathbb TT(\mathbb N^{\times}nx)$ looks very interesting indeed. There is a general principle, going back at least as far as \cite{eva} and used to effect in \cite{EL,lacaN}, which suggests that the Toeplitz algebra should have a much richer KMS structure than its simple quotient. (The principle is illustrated by the gauge action on the Cuntz algebra $\mathcal O_n$, where the Toeplitz-Cuntz analogue $\mathbb TT\mathcal O_n$ has KMS states at every inverse temperature $\beta\geq \log n$, but only the one with $\beta=\log n$ factors through a state of $\mathcal O_n$.)
Our goal here is to answer these questions. We first prove that the pair consisting of the semigroup $\mathbb N^{\times}nx$ and its enveloping group ${\mathbb Q \rtimes \mathbb Q^*_+}$ form a quasi-lattice ordered group in the sense of Nica \cite{nica}; this is itself a little surprising, since it is not one of the semi-direct product quasi-lattice orders discussed in \cite{nica}. However, once we have established that $({\mathbb Q \rtimes \mathbb Q^*_+},\mathbb N^{\times}nx)$ is quasi-lattice ordered, it follows
that the Toeplitz algebra $\mathbb TT(\mathbb N^{\times}nx)$ is universal for Nica-covariant isometric representations of $\mathbb N^{\times}nx$. We can then run this pair through the general theory of \cite{CL2}, and with some effort we can recognise $\mathcal Q_\mathbb N$ as the boundary quotient of $\mathbb TT(\mathbb N^{\times}nx)$. From this we deduce that $\mathcal Q_\mathbb N$ is a partial crossed product, and thereby provide another proof that it is purely infinite and simple.
We then consider a natural dynamics $\sigma$ on $\mathbb TT(\mathbb N^{\times}nx)$ which induces that
studied by Cuntz on $\mathcal Q_\mathbb N$, and compute the simplices of KMS${}_\beta$
states for every inverse temperature $\beta$. For
$\beta>2$ the KMS${}_\beta$ states are parametrised by probability
measures on the unit circle. For $\beta \in [1,2]$, only the one
corresponding to Lebesgue measure on the circle survives, and there is
a unique KMS${}_\beta$ state. This phase transition is associated to
the pole of the
partition function, which is the shifted Riemann zeta function $\zeta
(\beta-1)$.
Our system $(\mathbb TT(\mathbb N^{\times}nx),\mathbb R,\sigma)$ therefore exhibits some of the behaviour of other number-theoretic systems \cite{bos-con,diri, HL, CM,LvF, cmgl2}, even though our system lacks some of the features which make
the other number-theoretic systems so interesting, such as the
presence of a large symmetry group like
the idele class group of $\mathbb Q$ in \cite{bos-con} or
its two-dimensional analogue in \cite{CM}.
However, the KMS states in our system also display
several interesting phenomena which have not previously occurred for
dynamical systems of number-theoretic origin.
First, not all KMS states factor through the expectation onto the
commutative subalgebra spanned by the range projections of the
isometries: for $\beta>2$, the KMS$_\beta$ states do not necessarily
vanish on the additive generator, which for this system is fixed by
the dynamics. Second, the unique KMS${}_\beta$ states for $1\leq \beta
\leq 2$ have a circular symmetry, which is broken at $\beta=2$, but
this symmetry does not come from an action of the circle on the $C^*$-
algebra $\mathbb TT(\mathbb N^{\times}nx)$. This phenomenon appears to be related to the
fact that the enveloping group ${\mathbb Q \rtimes \mathbb Q^*_+}$ is nonabelian, and the dual
coaction of ${\mathbb Q \rtimes \mathbb Q^*_+}$ on $\mathbb TT(\mathbb N^{\times}nx)$ cannot be ``restricted'' to a
coaction of the additive subgroup $\mathbb Q$. And third, as foreshadowed in
\cite[Definition 3.7]{CM2}, the set of
KMS$_\infty$ states (the states that are limits of KMS$_\beta$ states
as $\beta \to \infty$),
which is isomorphic to the simplex of probability measures on the
circle, is much smaller than the set of ground states, which is
isomorphic to the state space of the classical Toeplitz algebra, and
hence is not a simplex.
We begin our paper with a brief discussion of notation and preliminaries from quasi-lattice ordered groups and number theory. Then in \secref{semigpisql}, we show that the semigroup $\mathbb N^{\times}nx$ induces a quasi-lattice order on the group ${\mathbb Q \rtimes \mathbb Q^*_+}$,
and deduce that the associated Toeplitz $C^*$-algebra is generated by a universal Nica-covariant isometric representation (\corref{Toepl=univ}).
In \secref{euclideanalgorithm} we work out a version of the euclidean algorithm suitable for computations involving
Nica-covariant isometric representations of $\mathbb N^{\times}nx$. Once this is done, we characterise in \secref{secpresentation} the Toeplitz
$C^*$-algebra $\mathbb TT(\mathbb N^{\times}nx) $ of $\mathbb N^{\times}nx$ by giving a presentation in terms of generators and relations (\thmref{toeplitzpresentation});
some of the relations are recognizably variants on Cuntz's relations for $\mathcal Q_\mathbb N$, but others are new.
To apply the structure theory of Toeplitz $C^*$-algebras of quasi-lattice orders, we need to understand the Nica spectrum of $\mathbb N^{\times}nx$, and in \secref{nicaspectrum} we give an explicit parametrisation of this spectrum using integral adeles and supernatural numbers. This allows us to identify the \emph{boundary} of the spectrum, as defined in \cite{CL2}. The boundary in \cite{CL2} is the smallest of many boundaries: there are many ways one can ``go to infinity'' in the semigroup $\mathbb N^{\times}nx$. Of particular interest is the {\em additive boundary}, which corresponds to going to infinity along the additive semigroup $\mathbb N$. In \proref{prodstructure} we show that the additive boundary has a direct product decomposition, which later plays a crucial role in our construction and analysis of KMS$_\beta$ states. In \secref{sectionqn}, we prove that Cuntz's $\mathcal Q_\mathbb N$ is isomorphic to the \emph{boundary quotient} studied in \cite{CL2} (Theorem~\ref{qnisboundaryquotient}), and we use the theory developed in \cite{CL2} to give a quick proof that $\mathcal Q_\mathbb N$ is simple and purely infinite.
In \secref{secKMS} we introduce a natural dynamics $\sigma$ on $\mathbb TT(\mathbb N^{\times}nx)$,
and state our main result, which
describes the phase transition associated to this natural dynamics (\thmref{maintheorem}).
We also discuss the significance of this phase transition in relation to the symmetries and the structure of the $C^*$-algebra $\mathbb TT(\mathbb N^{\times}nx)$.
We begin the proof of the main theorem in \secref{seccharacterisationKMSground}. We first show that there are no KMS states for $\beta <1$, and then we characterise the KMS$_\beta$ states by their behaviour on a spanning set for $\mathbb TT(\mathbb N^{\times}nx)$. This characterisation implies that a KMS$_\beta$ state is determined by its behaviour on the $C^*$-subalgebra generated by the additive generator (\lemref{KMScharacterisationlemma}). In \lemref{lemmagroundcharacterisation}, we give a similar characterisation of ground states.
In \secref{secconstructionKMSground}, we construct KMS$_\beta$ states for $\beta \in [1,\infty]$ by inducing a probability measure
on the additive boundary to a state of $\mathbb TT(\mathbb N^{\times}nx)$ via the conditional expectation of the dual coaction of ${\mathbb Q \rtimes \mathbb Q^*_+}$ (\proref{productmeasure}).
In \proref{constructKMS>2}, we consider $\beta \in (2,\infty]$, and give a spatial construction of a KMS$_\beta$ state for each probability measure on the circle.
A parallel construction also yields a ground state for each state of $\mathbb TT(\mathbb N)$.
We complete the proof of \thmref{maintheorem} in \secref{secsurjectivity}, by showing that the explicit
constructions of \secref{secconstructionKMSground}
correspond exactly to the possibilities left open in \secref{seccharacterisationKMSground}.
The interesting case here is $\beta\in [1,2]$,
and our key idea is the reconstruction formula of
\lemref{phirestrictedtoQB}, which was inspired by Neshveyev's ergodicity proof of the uniqueness of KMS$_\beta$ states on the Hecke $C^*$-algebra of Bost and Connes \cite{nes}. Curiously, though, we can now see that the analogous reconstruction formula for the Bost-Connes system does not need ergodicity at all. We give this ``ergodicity-free" version of Neshveyev's proof in Appendix~ \ref{bcuniqueness}.
\section{Notation and preliminaries} \label{notationandpreliminaries}
\subsection{Toeplitz algebras}\label{Toeplitzalgs} Every cancellative semigroup $P$ has a natural \emph{Toeplitz representation} $T:P\to B(\ell^2(P))$, which is characterised in terms of the usual basis $\{e_x:x\in P\}$ by $T_ye_x=e_{yx}$. Notice that the operators $T_y$ are all isometries. The \emph{Toeplitz algebra} $\mathbb TT(P)$ is the $C^*$-subalgebra of $B(\ell^2(P))$ generated by the operators $\{T_y\}$. Our semigroups $P$ will always be generating subsemigroups of a group $G$; as a motivating example, consider the subgroup $\mathbb N^2$ of $\mathbb Z^2$. Any isometric representation $V$ of $\mathbb N^2$ on Hilbert space is determined by the two commuting isometries $V_{(1,0)}$ and $V_{(0,1)}$. In the Toeplitz representation of $\mathbb N^2$, however, the two generators satisfy the extra relation $T_{(1,0)}^*T_{(0,1)}=T_{(0,1)}T_{(1,0)}^*$, and it turns out that this relation uniquely characterises the Toeplitz algebra $\mathbb TT(\mathbb N^2)$ among $C^*$-algebras generated by non-unitary isometric representations of $\mathbb N^2$. Nica's theory of quasi-lattice ordered groups $(G,P)$ provides a far-reaching generalisation of this result.
A submonoid $P$ of a group $G$ naturally induces a left-invariant partial order on by $x\leq y$ iff $x^{-1} y \in P$.
Following Nica \cite{nica}, we say that $(G,P)$ is {\em quasi-lattice ordered}
if every pair of elements $x$ and $y$ in $G$ which have a common upper bound in $G$ have a least upper bound $x\vee y $. When they have a common upper bound we write $x\vee y < \infty$, and otherwise $x\vee y=\infty$. (This is not quite Nica's original definition, but it is equivalent. This and other reformulations are discussed in \cite[\S3]{CL1}.) An isometric representation $V:P\to B(\mathcal H)$ is \emph{Nica covariant} if
\[
V_xV_x^*V_yV_y^*=\begin{cases}V_{x\vee y}V_{x\vee y}^*&\text{if $x\vee y<\infty$}\\
0&\text{if $x\vee y=\infty$,}\end{cases}
\]
or equivalently, if
\begin{equation}\label{genNicacov}
V_x^*V_y=\begin{cases}V_{x^{-1}(x\vee y)}V_{y^{-1}(x\vee y)}^*&\text{if $x\vee y<\infty$}\\
0&\text{if $x\vee y=\infty$.}\end{cases}
\end{equation}
Nica showed that there is a $C^*$-algebra $C^*(G,P)$ which is generated by a universal Nica-covariant repesentation $w:P\to C^*(G,P)$, and we then have $C^*(G,P)=\overline{\operatorname{span}}\{w_xw_y^*:x,y\in P\}$. Nica identified an amenability condition which implies that the representation $\pi_T:C^*(G,P)\to\mathbb TT(P)$ is faithful (see \cite{nica} or \cite[Theorem~3.7]{quasilat}). This amenability hypothesis is automatically satisfied when the group $G$ is amenable \cite[\S4.5]{nica}. Since the enveloping group ${\mathbb Q \rtimes \mathbb Q^*_+}$ of our semigroup $\mathbb N^{\times}nx$ is amenable, we can use Nica's theorem to view our Toeplitz algebra $\mathbb TT(\mathbb N^{\times}nx)$ as the $C^*$-algebra generated by a universal Nica-covariant representation $w:\mathbb N^{\times}nx\to \mathbb TT(\mathbb N^{\times}nx)$. (Here we use the lower case $w$ to denote the Toeplitz representation $T$ to emphasise that it has a universal property; the obvious letter $t$ is not available because it will later denote time.)
Nica studied $\mathbb TT(P)$ by exploiting what he called its ``crossed product-like structure,'' which the present authors subsequently recognised as that of a semigroup crossed product $B_P\rtimes P$ \cite{quasilat}. The underlying algebra $B_P$ is the closed subset of $\ell^\infty(P)$ spanned by the characteristic functions $1_x $ of the sets $x P=\{y\in P:y\geq x\}$, which is a $C^*$-subalgebra because
$1_x 1_y = 1_{ x \vee y}$. Nica showed that the homomorphisms from $B_P$ to $\mathbb C$ are given by the nonempty hereditary subsets $\omega$ of $P$ which are directed by the partial order on $G$: the corresponding homomorphism is defined by\[
\hat\omega(f):=\lim_{x\in \omega}f(x),
\]
which makes sense because $\omega$ is directed and $f$ is the uniform limit of functions which are eventually constant. Notice that $\hat\omega$ is characterised by the formula
\[
\hat \omega(1_x)=\begin{cases}1&\text{if $x\in \omega$}\\
0&\text{if $x\notin \omega$.}
\end{cases}
\]
The collection $\Omega$ of nonempty directed hereditary sets $\omega$, viewed as a subset of the compact space $\{0,1\}^P$, is now called the \emph{Nica spectrum} of $P$.
An important tool in our analysis will be the realisation of $C^*(G,P)$ as a partial crossed product $C(\Omega_{\mathcal N})\rtimes_\alpha ({\mathbb Q \rtimes \mathbb Q^*_+})$ obtained in \cite[\S6]{topfree}. The space $\Omega_{\mathcal N}$ used in \cite{topfree} looks slightly different: its elements are the hereditary subsets of $G$ which contain the identity $e$ of $G$. However, it was observed in \cite[Remark~6.5]{topfree} that $\omega\mapsto \omega\cap P$ is a homeomorphism of $\Omega_{\mathcal N}$ onto the Nica spectrum $\Omega$, so we can apply the general theories of \cite{topfree} and \cite{CL2} in our situation. For $x\in P$, the partial map $\theta_x$ has domain all of $\Omega$, and $\theta_x(\omega)$ is the hereditary closure $\operatorname{Her}(x\omega)$ of $x\omega:=\{xy:y\in \omega\}$; since the domain of $\theta_g$ is empty unless $g\in PP^{-1}$, this completely determines $\theta$. The action $\alpha$ is defined by $\alpha_g(f)(\omega)=f(\theta_{g^{-1}}(\omega))$ when this makes sense, and $\alpha_g(f)(\omega)=0$ otherwise.
\subsection{Number theory} We write $\mathbb N$ for the semigroup of natural numbers (including $0$) under addition and $\mathbb N^{\times}$ for the semigroup of positive integers under multiplication. We also write $\mathbb Q^*$ for the group of non-zero rational numbers under multiplication, and $\mathbb Q^*_+$ for the subgroup of positive rational numbers.
We denote the set of prime numbers by $\mathcal P$, and we write $e_p(a)$ for the exponent of $p$ in the prime factorization of $a\in \mathbb N^{\times}$, so that $a=\prod_{p\in\mathcal P}p^{e_p(a)}$; then $a\mapsto \{e_p(a)\}$ is an isomorphism of $\mathbb N^{\times}$ onto the direct sum $\bigoplus_{p\in\mathcal P}\mathbb N$. We also use the \emph{supernatural numbers}, which are formal products $N=\prod_{p\in\mathcal P}p^{e_p(N)}$ in which the exponents $e_p(N)$ belong to $\mathbb N\cup\{\infty\}$; thus the set $\mathcal N$ of supernatural numbers is the direct product $\mathcal N= \prod_{p \in\mathcal P} p^{\mathbb N\cup \{\infty\}}$. For $M,N\in\mathcal N$, we say that $M$ divides $N$ (written $M|N$) if $e_p(M)\leq e_p(N)$ for all $p$, and then any pair $M,N$ has a greatest common divisor $\gcd(M,N)$ in $\mathcal N$ and a lowest common multiple $\operatorname{lcm}(M,N)$.
For $N\in \mathcal N$, the \emph{$N$-adic integers} are the elements of the ring
\[
\mathbb Z/N := \varprojlim \big((\mathbb Z/a\mathbb Z):a\in\mathbb N^{\times},\;a|N\big).
\]
When $N$ is a positive integer, $\mathbb Z/N$ is the ring $\mathbb Z/N\mathbb Z$, and for each prime $p$, $\mathbb Z/p^\infty$ is the usual ring $\mathbb Z_p$ of $p$-adic integers. When $N=\nabla:=\prod_p p^\infty$ is the largest supernatural number, $\mathbb Z/\nabla$ is the ring $\widehat\mathbb Z$ of integral ad{\`e}les. If $M$ and $N$ are supernatural numbers and $M|N$, then there is a canonical topological-ring homomorphism of $\mathbb Z/N$ onto $\mathbb Z/M$, and we write $r(M)$ for the image of $r\in \mathbb Z/N$ in~$\mathbb Z/M$.
It is standard practice to freely identify $\widehat\mathbb Z$ with the product $\prod_p \mathbb Z_p$, and the next proposition gives a similar product decomposition for $\mathbb Z/N$. The main ingredient in the proof is the Chinese remainder theorem.
\begin{proposition}\label{prodvsinvlim}
Let $N =\prod_{p} p^{e_p(N)} $ be a supernatural number. Then the map $r\mapsto \{r(p^{e_p(N)})\}_{p\in \mathcal P}$ is a (topological ring) isomorphism
of $\mathbb Z/N$ onto $\prod_{p\in\mathcal P}\mathbb Z/p^{e_p(N)}$.
\end{proposition}
Our arguments involve a fair bit of modular arithmetic, and we will often need to change base. So the next lemma will be useful.
\begin{lemma}\label{timesb}
Suppose that $a$ and $b$ are integers greater than $1$. Then the map $n\mapsto an$ induces a well-defined injection $\times a:\mathbb Z/b\mathbb Z\to \mathbb Z/ab\mathbb Z$; the image of this map is $\{m\in\mathbb Z/ab\mathbb Z:m\equiv 0\pmod a\}$, so we have a short exact sequence
\[
\xymatrix{
0\ar[r]&{\mathbb Z/b\mathbb Z}\ar[r]^{\times a} &\mathbb Z/ab\mathbb Z\ar[rr]^{\pmod a}&&\mathbb Z/a\mathbb Z\ar[r]&0.
}
\]
\end{lemma}
As a point of notation, when $b$ is clear from the context, we write $an$ for the image of $n\in \mathbb Z/b\mathbb Z$ under the map $\times a$.
\section{A quasi-lattice order on ${\mathbb Q \rtimes \mathbb Q^*_+}$}\label{semigpisql}
Let ${\mathbb Q \rtimes \mathbb Q^*_+}$ denote the semidirect product of the additive rationals by the
multiplicative positive rationals, where the group operation and inverses are given by
\begin{align*}
(r,x)(s,y)&= (r+xs, xy) \qquad\text{ for } r,s \in \mathbb Q \text{ and } x,y \in \mathbb Q^*_+,\ \text{ and }\\
(r,x)^{-1}&= (-x^{-1} r , x^{-1}) \qquad \text{ for } r \in \mathbb Q \text{ and } x \in \mathbb Q^*_+.
\end{align*}
\begin{proposition}
The group ${\mathbb Q \rtimes \mathbb Q^*_+}$ is generated by the elements $(1,1)$ and $\{(0,p): p\in \mathcal P\}$ which satisfy the relations
\begin{equation}\label{presentationqxqx}
(0,p) (1,1) = (1,1)^p (0,p) \quad \text{ and } \quad (0,p) (0,q) = (0,q) (0,p) \qquad \text{ for all } p,q\in \mathcal P,
\end{equation}
and this is a presentation of ${\mathbb Q \rtimes \mathbb Q^*_+}$.
\end{proposition}
\begin{proof}
It is easy to check that the elements $(1,1)$ and $(0,p)$ satisfy \eqref{presentationqxqx}. Suppose $G$ is a group containing elements $s$ and $\{v_p:p\in \mathcal P\}$ satisfying the relations $v_ps=s^pv_p$ and $v_pv_q=v_qv_p$. Since $\mathbb Q^*_+$ is the free abelian group generated by $\mathcal P$ and $v_p$ commutes with $v_q$, the map $p\mapsto v_p$ extends to a homomorphism $v:\mathbb Q^*_+\to G$. Since $\mathbb Z$ is free abelian, for each $n\in \mathbb N^{\times}$ there is a homomorphism $\phi_n:n^{-1}\mathbb Z\to G$ satisfying $\phi_n(n^{-1}k)=
v_n^{-1}s^k v_n $, and these combine to give a well-defined homomorphism $\phi:\mathbb Q=\bigcup_nn^{-1}\mathbb Z$ into $G$. Now the first relation extends to $v_rs^k=s^{rk}v_r$, and it follows easily that $v$ and $\phi$ combine to give a homomorphism of the semidirect product ${\mathbb Q \rtimes \mathbb Q^*_+}$ into $G$.
\end{proof}
We shall consider the unital subsemigroup $\mathbb N^{\times}nx$ of
${\mathbb Q \rtimes \mathbb Q^*_+}$, which has the same presentation, interpreted in the category of monoids. Since $(\mathbb N^{\times}nx)\cap(\mathbb N^{\times}nx)^{-1}=\{(0,1)\}$, the subsemigroup $\mathbb N^{\times}nx$ induces a left-invariant partial order on ${\mathbb Q \rtimes \mathbb Q^*_+}$ as follows: for $(r,x)$ and $(s,y)$ in
${\mathbb Q \rtimes \mathbb Q^*_+}$,
\begin{align}
(r,x) \leq (s,y) & \iff (r,x)^{-1} (s,y) \in \mathbb N^{\times}nx \nonumber \\
& \iff (-x^{-1} r, x^{-1}) (s,y) \in \mathbb N^{\times}nx \nonumber \\
& \iff (-x^{-1} r + x^{-1} s , x^{-1} y) \in \mathbb N^{\times}nx \nonumber \\
& \iff x^{-1}(s-r) \in \mathbb N \text{ \ and \ } x^{-1}y \in\mathbb N^{\times}. \label{lequseful}
\end{align}
Our first goal is to show that this ordering has the quasi-lattice property used in \cite{nica} and \cite{quasilat}.
\begin{proposition}\label{qlproperty}
The pair $({\mathbb Q \rtimes \mathbb Q^*_+}, \mathbb N^{\times}nx)$ is a quasi-lattice ordered group.
\end{proposition}
\begin{proof}
By \cite[Lemma 7]{CL1}, it suffices to show that if an element $(r,x)$ of $ {\mathbb Q \rtimes \mathbb Q^*_+}$ has an upper bound in $\mathbb N^{\times}nx$, then it has a least upper bound in $\mathbb N^{\times}nx$. Suppose $(k,c)\in \mathbb N^{\times}nx$ and $(r,x)\leq (k,c)$. Then from \eqref{lequseful} we have $k\in r+x\mathbb N$, so $(r+x\mathbb N)\cap \mathbb N$ is nonempty; let $m$ be the smallest element of $(r+x\mathbb N)\cap \mathbb N$. Write $x=a/b$ with $a,b\in\mathbb N$ and $a$, $b$ coprime. We claim that $(m,a)$ is a least upper bound for $(r,x)$ in $\mathbb N^{\times}nx$.
To see that $(r,x)\leq (m,a)$, note that $m\in r+x\mathbb N$ and $x^{-1}a=b\in\mathbb N^{\times}$, and apply \eqref{lequseful}. To see that $(m,a)$ is a \emph{least} upper bound, suppose that $(l,d)\in \mathbb N^{\times}nx$ satisfies $(r,x)\leq (l,d)$, so that by \eqref{lequseful} we have (i) $x^{-1}d\in \mathbb N^{\times}$ and (ii) $x^{-1}(l-r)\in\mathbb N$. Property (i) says that $a^{-1}bd=x^{-1}d$ belongs to $\mathbb N^{\times}$, which since $(a,b)=1$ implies that $a^{-1}d\in\mathbb N^{\times}$. Property (ii) implies that $l\in r+x\mathbb N$, so that $l\geq m:=\min((r+x\mathbb N)\cap \mathbb N)$. Property (ii) also implies that
\[
a^{-1}b(l-m)=x^{-1}(l-m)\in x^{-1}((r+x\mathbb N)-(r+x\mathbb N))\subset \mathbb Z,
\]
which, since $(a,b)=1$, implies that $a^{-1}(l-m)\in \mathbb Z$. Since $l\geq m$, we have $a^{-1}(l-m)\in \mathbb N$. Now we have $a^{-1}d\in \mathbb N^{\times}$ and $a^{-1}(l-m)\in\mathbb N$, which by \eqref{lequseful} say that $(m,a)\leq (l,d)$, as required.
\end{proof}
\begin{remark}
Two elements $(m,a)$ and $(n,b)$ of $\mathbb N^{\times}nx$ have a common upper bound if and only if the set $(m+a\mathbb N)\cap (n+b\mathbb N)$ is nonempty, and in fact it is easy to check that \begin{equation} \label{Vcharacterisation}
(m,a) \vee (n,b) = \begin{cases}
\infty & \text{ if } (m+a\mathbb N)\cap (n+b\mathbb N) = \emptyset, \\
( l , \operatorname{lcm}(a,b)) & \text{ if } (m+a\mathbb N)\cap (n+b\mathbb N)\not = \emptyset,
\end{cases}
\end{equation}
where $l$ is the smallest element of $(m+a\mathbb N)\cap (n+b\mathbb N)$. In the next section we will see that $(m+a\mathbb N)\cap (n+b\mathbb N)\neq\emptyset$ if and only if $m-n$ is divisible by the greatest common divisor $\gcd(a,b)$, and provide an algorithm for computing $l$ when it exists.
\end{remark}
As we remarked in the Introduction, we found Proposition~\ref{qlproperty} a little surprising, because the pair of semigroups $P=\mathbb N^{\times}$ and $Q=\mathbb N$ do not satisfy the hypotheses of \cite[Example~2.3.3]{nica}. Since its proof is really quite elementary, we stress that Proposition~\ref{qlproperty} has some important consequences. In particular, since the group ${\mathbb Q \rtimes \mathbb Q^*_+}$ is amenable, we can immediately deduce from the work of Nica discussed in \secref{Toeplitzalgs} that the Toeplitz algebra $\mathbb TT(\mathbb N^{\times}nx)$ enjoys a universal property.
\begin{corollary}\label{Toepl=univ}
The Toeplitz algebra $\mathbb TT(\mathbb N^{\times}nx)$ is generated by a universal Nica-covariant isometric representation $w:\mathbb N^{\times}nx\to \mathbb TT(\mathbb N^{\times}nx)$.
\end{corollary}
\section{The euclidean algorithm}\label{euclideanalgorithm}
Suppose that $c,d\in \mathbb N$ are relatively prime. Then we know from the usual euclidean algorithm that, for every $k\in\mathbb N$, there are integers $\alpha$ and $\beta$ such that $k=\alpha c-\beta d$. Since $\alpha+md$ and $\beta+mc$ then have the same property for each $m\in \mathbb Z$, we can further assume that $\alpha$ and $\beta$ are non-negative. Since the set
\[
\{\alpha\in \mathbb N:\text{there exists $\beta\in \mathbb N$ such that $k=\alpha c-\beta d$\}}
\]
is bounded below, it has a smallest element, and then the corresponding $\beta$ is the smallest non-negative integer for which there exists an $\alpha$ with $k=\alpha c-\beta d$. Thus it makes sense to talk about the \emph{smallest non-negative solution} $(\alpha,\beta)$ of $k=\alpha c-\beta d$. In the proof of Theorem~\ref{toeplitzpresentation} we use the numbers $\alpha_i$ and $\beta_i$ arising in the following variation of the euclidean algorithm which computes this smallest solution $(\alpha,\beta)$.
\begin{proposition}\label{Euclid}
Suppose $\gcd(c,d)=1$ and $k\in \mathbb N$. Define sequences $\alpha_n$, $\beta_n$ inductively as follows:
\begin{itemize}
\item define $\alpha_0$ to be the unique non-negative integer such that $-c<k-\alpha_0c\leq 0;$
\item given $\alpha_i$ for $0\leq i\leq n$ and $\beta_i$ for $0\leq i<n$, define $\beta_n$ by
\begin{equation}\label{defbetan}
0\leq k-\Big(\sum_{i=0}^n\alpha_i\Big)c+\Big(\sum_{i=0}^n\beta_i\Big)d<d;
\end{equation}
\item given $\alpha_i$ for $0\leq i\leq n$ and $\beta_i$ for $0\leq i\leq n$, define $\alpha_{n+1}$ by
\begin{equation}\label{defalphan+1}
-c< k-\Big(\sum_{i=0}^{n+1}\alpha_i\Big)c+\Big(\sum_{i=0}^n\beta_i\Big)d\leq 0.
\end{equation}
\end{itemize}
Then there exist $n(\alpha)$ and $n(\beta)$ (which is either $n(\alpha)$ or $n(\alpha)-1$) such that $\alpha_i=0$ for $i>n(\alpha)$ and $\beta_i=0$ for $i>n(\beta)$, and the pair $(\alpha,\beta)=
\big(\sum_{i=0}^{n(\alpha)}\alpha_i,\sum_{i=0}^{n(\beta)}\beta_i\big)$
is the smallest non-negative solution of $k=\alpha c-\beta d$.
\end{proposition}
\begin{proof}
We know from the discussion at the start of the section that there is a smallest solution $(\alpha,\beta)$; our problem is to show that the sequences $\{\alpha_n\}$ and $\{\beta_n\}$ behave as described. We first note that if any $\alpha_n$ or any $\beta_n$ is zero, then so are all subsequent $\alpha_i$ and $\beta_i$. We deal with the cases $c>d$ and $c<d$ separately.
Suppose that $c>d$. Then for every $n\geq 0$, Equation~\eqref{defbetan} implies that
\[
-c\leq k-\Big(\sum_{i=0}^n\alpha_i\Big)c+\Big(\sum_{i=0}^n\beta_i\Big)d-c<d-c<0.
\]
so $\alpha_{n+1}$ is either $0$ (if we have equality at the left-hand end) or $1$. So the sequence $\{\alpha_n\}$ starts off $\{\alpha_0,1,1,\cdots\}$, and is eventually always $1$ or always $0$. Since $k=\alpha c-\beta d\leq \alpha c$, we have $\alpha_0\leq \alpha$. We define $n(\alpha)=\alpha-\alpha_0$, and claim that
$\alpha_n=1$ for $1\leq n\leq n(\alpha)$. To see this, suppose to the contrary that $\alpha_n=0$ for some $n$ satisfying $1\leq n\leq n(\alpha)$. Then \eqref{defbetan} and \eqref{defalphan+1} imply that
\[
k-\Big(\sum_{i=0}^{n-1}\alpha_i\Big)c+\Big(\sum_{i=0}^{n-1}\beta_i\Big)d= 0,
\]
which, since $\sum_{i=0}^{n-1}\alpha_i=\alpha_0+n-1<\alpha_0+(\alpha-\alpha_0)=\alpha$, contradicts the assumption that $(\alpha,\beta)$ is the smallest solution. So $\alpha_n=1$ for $1\leq n\leq n(\alpha)$, as claimed. But now $\alpha=\sum_{i=0}^{n(\alpha)}\alpha_i$, and \eqref{defbetan} becomes
\begin{equation}\label{slippery}
0\leq k-\Big(\sum_{i=0}^{n(\alpha)}\alpha_i\Big)c+\Big(\sum_{i=0}^{n(\alpha)}\beta_i\Big)d
=-\beta d+\Big(\sum_{i=0}^{n(\alpha)}\beta_i\Big)d<d,
\end{equation}
which is only possible if $-\beta +\sum_{i=0}^{n(\alpha)}\beta_i=0$; then we have equality in \eqref{slippery}, and this implies that $\alpha_n=0$ and $\beta_n=0$ for $n>n(\alpha)$. So when $c>d$, $n(\alpha)=\alpha-\alpha_0$ and either $n(\beta)=n(\alpha)-1$ (if $\beta_{n(\alpha)}=0$) or $n(\beta)=n(\alpha)$ (if $\beta_{n(\alpha)}\not=0$) have the required properties.
For $c<d$, a similar argument shows that $\beta_n=1$ for $0\leq n\leq \beta-1$, so $n(\beta):=\beta-1$ and either $n(\alpha)=n(\beta)$ or $n(\alpha)=n(\beta)+1$ have the required properties.
\end{proof}
If $k\in \mathbb Z$ and $k<0$, we can apply Proposition~\ref{Euclid} to $-k$ and the pair $d$, $c$, obtaining a smallest non-negative solution of $-k=\beta d-\alpha c$. Notice that we then have $k=\alpha c-\beta d$. This situation occurs so often that it is worth making the following simplifying convention:
\begin{convention}\label{convsmallestsol}
When $k$ is an integer and we say that ``$(\alpha,\beta)$ is the smallest non-negative solution of $k=\alpha c-\beta d$,'' we mean that $(\alpha,\beta)$ is the smallest non-negative solution of $k=\alpha c-\beta d$ when $k\geq 0$ (as before), and that $(\beta,\alpha)$ is the smallest non-negative solution of $-k=\beta d-\alpha c$ when $k<0$.
\end{convention}
The next proposition explains why this discussion of the euclidean algorithm is relevant to the semigroup $\mathbb N^{\times}nx$. Recall that $\operatorname{lcm}(a,b)$ is the lowest common multiple of $a$ and $b$.
\begin{prop}\label{eucliduseful}
Suppose that $(m,a)$ and $(n,b)$ are in $\mathbb N^{\times}nx$. Then $(m+a\mathbb N)\cap (n+b\mathbb N)$ is nonempty if and only if $\gcd(a,b)|m-n$. If so, write $a'=a/\gcd(a,b)$, $b'=b/\gcd(a,b)$, and let $(\alpha,\beta)$ be the smallest non-negative solution of $(n-m)/\gcd(a,b)=\alpha a'-\beta b'$ (using Convention~\ref{convsmallestsol}). Then $l:=m+a\alpha =n+b\beta$ is the smallest element of $(m+a\mathbb N)\cap (n+b\mathbb N)$,
and we have
\begin{align*}
(m,a) \vee (n,b) & = (l,\operatorname{lcm}(a,b))\\
(m,a)^{-1} (l,\operatorname{lcm}(a,b))&= (a^{-1}(l-m), a^{-1}\operatorname{lcm}(a,b)) = (\alpha, b'),\\
(n,b)^{-1} (l,\operatorname{lcm}(a,b))&= (b^{-1}(l-n), b^{-1}\operatorname{lcm}(a,b)) = (\beta, a').
\end{align*}\end{prop}
\begin{proof}
The discussion at the start of the section shows that
\[
(m+a\mathbb N)\cap (n+b\mathbb N)\not=\emptyset\Longleftrightarrow (m+a\mathbb Z)\cap (n+b\mathbb Z)\not=\emptyset\Longleftrightarrow m\equiv n\pmod{\gcd(a,b)}.
\]
Then any solution of $(n-m)/\gcd(a,b)=\alpha a'-\beta b'$ will satisfy $m+a\alpha =n+b\beta$, and the smallest non-negative solution of $(n-m)/\gcd(a,b)=\alpha a'-\beta b'$ will give the smallest common value. The last two formulas are an easy calculation.
\end{proof}
\section{A presentation for the Toeplitz algebra of $\mathbb N^{\times}nx$}\label{secpresentation}
Our goal in this section is to verify the following presentation for $\mathbb TT(\mathbb N^{\times}nx)$. Recall from \secref{Toeplitzalgs} that $\mathbb TT(\mathbb N^{\times}nx)$ is generated by a universal Nica-covariant representation $w:\mathbb N^{\times}nx\to \mathbb TT(\mathbb N^{\times}nx)$.
\begin{theorem} \label{toeplitzpresentation}
Let $A$ be the universal $C^*$-algebra generated by isometries $s$ and $\{v_p:p\in \mathcal P\}$ satisfying the relations
\begin{itemize}
\item[] \begin{itemize}
\item[(T1)]\ $v_p s = s^p v_p$,
\item[(T2)]\ $v_p v_q = v_q v_p$,
\item[(T3)]\ $v_p^* v_q = v_q v_p^*$ when $p\neq q$,
\item[(T4)]\ $s^* v_p = s^{p-1} v_p s^*$, and
\item[(T5)]\ $v_p^* s^k v_p = 0$ for $1 \leq k < p$.
\end{itemize}
\end{itemize}
Then there is an isomorphism $\pi$ of $\mathbb TT(\mathbb N^{\times}nx)$ onto $A$ such that $\pi(w_{(1,1)})=s$ and $\pi(w_{(0,p)})=v_p$ for every $p\in \mathcal P$.
\end{theorem}
\begin{remark}
We usually use upper case $V$ or $W$ to denote isometric representations of semigroups, and lower case $v$ or $w$ if we are claiming that they have some universal property. Similarly, we usually write $S$ for a single isometry to remind us of the unilateral shift and $s$ for a single isometry with a universal property. We discovered towards the end of this project that our notation clashes with that used by Cuntz --- indeed, we couldn't have got it more different if we had tried. (He denotes his additive generator by $u$ and his isometric representation of $\mathbb N^{\times}$ by $s$.) By the time we noticed this,
the shift $s$ and the isometries $v_p$ were firmly embedded in our manuscript and in our minds, and it seemed to be asking for trouble to try to correct them all, so we didn't. But we apologise for any confusion this causes.
\end{remark}
To prove this theorem, we show
\begin{itemize}
\item[(a)] that the operators $S=w_{(1,1)}$ and $V_p=w_{(0,p)}$ satisfy the relations (T1--5), and hence there is a homomorphism $\rho_w:A\to \mathbb TT(\mathbb N^{\times}nx)$ such that $\rho_w(s)=w_{(1,1)}$ and $\rho_w(v_p)=w_{(0,p)}$; and
\item[(b)] that the formula
\[
X_{(m,a)}:=s^mv_a:=s^m{\textstyle \prod_{p\in\mathcal P} v_p^{e_p(a)}}
\]
defines a Nica-covariant isometric representation $X=X^{s,v}$ of $\mathbb N^{\times}nx$ in $A$, and hence induces a homomorphism $\pi_{s,v}:\mathbb TT(\mathbb N^{\times}nx)\to A$.
\end{itemize}
Given these, it is easy to check that $\rho_w$ and $\pi_{s,v}$ are inverses of each other, and $\pi:=\pi_{s,v}$ is the required isomorphism.
In view of \eqref{genNicacov} and Proposition~\ref{eucliduseful}, an isometric representation $W$ of $\mathbb N^{\times}nx$ is Nica covariant if and only if
\begin{equation}
\label{covarianceformula}
W_{(m,a)}^* W_{(n,b)} = \begin{cases}
0 & \text{ if } m\not\equiv n\pmod{\gcd(a,b)} \\
W_{(\alpha,b')} W_{(\beta,a')}^* & \text{ if } m\equiv n \pmod{\gcd(a,b)},
\end{cases}
\end{equation}
where $a' = a/\gcd(a,b)$, $b' = b/\gcd(a,b)$, and (using Convention~\ref{convsmallestsol}) $(\alpha,\beta)$ is the smallest non-negative solution of $(n-m)/\gcd(a,b)=\alpha a'-\beta b'$. The proof of Theorem~\ref{toeplitzpresentation} uses the euclidean algorithm of Proposition~\ref{Euclid} to recognise the $\alpha$ and $\beta$ appearing on the right-hand side of \eqref{covarianceformula}.
To prove (a) we note that (T1) holds because $(0,p)(1,1)=(p,1)(0,p)$ and $W$ is a homomorphism, and (T2) holds because $(0,p)(0,q)=(0,pq)=(0,q)(0,p)$. Equations~(T3), (T4) and (T5) are the Nica covariance relation \eqref{covarianceformula} for $(m,a)=(0,p)$ and $(n,b)=(0,q)$; for $(m,a)=(1,1)$ and $(n,b)=(0,p)$; and for $(m,a)=(0,p)$ and $(n,b)=(k,p)$, respectively.
So now we turn to (b). The first observation, which will be used many times later, is that the relations (T1)--(T5) extend to the $v_a$, as follows.
\begin{lemma}\label{relsata}
Suppose that $s$ and $\{v_p:p\in \mathcal P\}$ are isometries satisfying the relations \textnormal{(T1)--(T5)}. Then the isometries $v_a:=\prod_{p\in\mathcal P} v_p^{e_p(a)}$ for $a\in \mathbb N^\times$ satisfy
\begin{itemize}
\item[] \begin{itemize}
\item[(T1')]\ $v_a s = s^a v_a$,
\item[(T2')]\ $v_a v_b = v_b v_a$,
\item[(T3')]\ $v_a^* v_b = v_b v_a^*$ whenever $\gcd(a,b)=1$,
\item[(T4')]\ $s^* v_a = s^{a-1} v_a s^*$, and
\item[(T5')]\ $v_a^* s^k v_a = 0$ for $1 \leq k < a$.
\end{itemize}
\end{itemize}
\end{lemma}
After we have proved this lemma, a reference to (T5), for example, could refer to either the original (T5) or to (T5').
\begin{proof}
Equations (T1'), (T2') and (T3') follow immediately from their counterparts for $a$ prime. We prove (T4') by induction on the number of prime factors of $a$. We know from (T4) that (T4') holds when $a$ is prime. Suppose that (T4') is true for every $a\in \mathbb N^{\times}$ with $n$ prime factors, and $b=aq\in \mathbb N^{\times}$ has $n+1$ prime factors. Then
\begin{align*}\label{2paragraphsabove}
s^*v_b&=s^* v_{aq} = s^* v_{a}v_{q} = s^{a-1} v_a s^* v_q = s^{a-1} v_a s^{q-1} v_q s^*\\
& = s^{a-1} s^{a(q-1)} v_av_q s^*= s^{aq -1} v_{aq} s^*=s^{b-1}v_bs^*,
\end{align*}
and we have proved (T4'). For (T5'), first prove by induction on $n$ (using (T1) as well as (T4)) that $v_{p}^{*n}s^kv_{p}^n\not=0$ implies $p^n|k$. Then
\begin{align*}
v_a^*s^kv_a\not=0&\Longrightarrow v_p^{*e_p(a)}s^kv_p^{*e_p(a)}\not=0\ \text{ for all $p|a$}\\
&\Longrightarrow p^{e_p(a)}|k\ \text{ for all $p|a$}\\
&\Longrightarrow a|k,
\end{align*}
which is a reformulation of (T5').
\end{proof}
Relations (T1') and (T2') imply that
$X$ is an isometric representation of $\mathbb N^{\times}nx$, and it remains for us to prove that the representation $X$ in (b) satisfies the Nica-covariance relation \eqref{covarianceformula}. Since
\begin{equation}\label{theproduct}
X_{(m,a)}^*X_{(n,b)}=(s^mv_a)^* s^n v_b = v_a^* (s^*)^m s^n v_b,
\end{equation}
the following lemma gives the required Nica-covariance (the formula
\eqref{formofNica} which expresses this covariance in terms of generators will be useful later).
\begin{lemma}\label{covarianceonngenerators}
Suppose that $s$ and $\{v_p:p\in \mathcal P\}$ are isometries satisfying the relations \textnormal{(T1)--(T5)}.
For $m,n\in \mathbb N$ and $a,b\in \mathbb N^{\times}$, we let $a':=a/\gcd(a,b)$, $b':=b/\gcd(a,b)$, and suppose that $(\alpha, \beta)$ is the smallest non-negative solution of $(n-m)/{\gcd(a,b)}=\alpha a'-\beta b'$. Then
\begin{equation}\label{formofNica}
v_a^*s^{*m}s^nv_b= \begin{cases}0 & \text{ if } m\not\equiv n\pmod{\gcd(a,b)} \\
s^{\alpha}v_{b'}v_{a'}^*s^{*\beta}& \text{ if } m\equiv n \pmod{\gcd(a,b)}.
\end{cases}
\end{equation}
\end{lemma}
\begin{proof}
First suppose that $m\not\equiv n\pmod{\gcd(a,b)}$, so that
$(m,a) \vee (n,b)=\infty$. Then $\gcd(a,b)$ has a prime factor $p$ which does not divide $n-m$
and we can write $n-m=cp+k$ with $0< k<p$. Now we factor $a=a_0p$, $b=b_0p$ and apply (T4') to get
\begin{align*}
v_a^*s^{*m}s^nv_b&=\begin{cases}
v_{a_0}^*v_p^*s^ks^{cp}v_pv_{b_0}&\text{if $c\geq 0$}\\
v_{a_0}^*v_p^*s^{*|c|p}s^kv_pv_{b_0}&\text{if $c<0$}
\end{cases}\\
&=\begin{cases}
v_{a_0}^*v_p^*s^kv_ps^{c}v_{b_0}&\text{if $c\geq 0$}\\
v_{a_0}^*s^{*|c|}v_p^*s^kv_pv_{b_0}&\text{if $c<0$};
\end{cases}
\end{align*}
in both cases, the inside factor $v_p^* s^k v_p$ vanishes by (T5), and we have $v_a^*s^{*m}s^nv_b = 0$, as required.
Suppose now that $m\equiv n \pmod{\gcd(a,b)}$, so that $(m,a) \vee (n,b)<\infty$. Write $k=(n-m)/\gcd(a,b)$. As in the proof of (T4'), we can use (T1') to pull $s^{k\gcd(a,b)}$ or $s^{*|k|\gcd(a,b)}$ past $v_{\gcd(a,b)}$ or $v_{\gcd(a,b)}^*$, obtaining
\[
v_a^*s^{*m}s^nv_b=
\begin{cases}
v_{a'}^*s^kv_{b'}&\text{if $k\geq 0$}\\
v_{a'}^*s^{*|k|}v_{b'}&\text{if $k<0$.}
\end{cases}
\]
It suffices by symmetry to compute $v_{a'}^*s^kv_{b'}$ for $k>0$.
Peeling one factor off $s^k$ and applying the adjoint of (T4') gives
\[
v_{a'}^* s^{k} v_{b'} =
sv_{a'}^* s^{*(a'-1)} s^{k-1 }v_{b'}^*=
\begin{cases}
sv_{a'}^* s^{(k-a')}v_{b'}^*&\text{ if $k-a'>0$}\\
sv_{a'}^* s^{*(a'-k)}v_{b'}^*&\text{ if $k-a'\leq 0$.}
\end{cases}
\]
If $k-a'>0$, we can peel another $s$ off $s^{k-a'}$, and pull it across $v_{a'}^*$; we can do it yet again if $k-2a'>0$. The number of times we can do this is precisely the number $\alpha_0$ appearing in the euclidean algorithm of Proposition~\ref{Euclid}, applied to $a'$, $b'$ and $k$. We wind up with
\[
v_{a'}^* s^{k} v_{b'} =s^{\alpha_0}v_{a'}^*s^{*(\alpha_0 a'-k)}v_{b'}.
\]
Now we apply (T4') to pull factors of $s^*$ through $v_{b'}$: we can do this $\beta_0$ times, and obtain
\[
v_{a'}^* s^{k} v_{b'} =s^{\alpha_0}v_{a'}^*s^{(k-\alpha_0 a'+\beta_0 b')}v_{b'}s^{*\beta_0}.
\]
We can continue this process, using alternately the adjoint of (T4') to pull out factors of $s$ to the left and (T4') to pull out $s^*$ to the right. This finishes when there aren't any left, and this is precisely when the euclidean algorithm terminates. Now the equations $\alpha=\sum_i\alpha_i$ and $\beta=\sum_j\beta_j$ from Proposition~\ref{Euclid} gives
\[
v_{a'}^* s^{k} v_{b'} =s^{\alpha_0}s^{\alpha_1}\cdots s^{\alpha_{n(\alpha)}}v_{a'}^*v_{b'}s^{*\beta_0}\cdots s^{*\beta_{n(\beta)}}=s^\alpha v_{a'}^*v_{b'}s^{*\beta}.
\]
Finally, we observe that since $a'$ and $b'$ are coprime, (T3) implies that $v_{a'}^*v_{b'}=v_{b'}v_{a'}^*$. Thus, if $n-m>0$, we have $k>0$ and
\[
v_a^*s^{*m}s^nv_b =s^\alpha v_{a'}^*v_{b'}s^{*\beta}=s^\alpha v_{b'}v_{a'}^*s^{*\beta}.
\]
On the other hand, if $n-m<0$, we have
\begin{align*}
v_a^*s^{*m}s^nv_b &= v_{a'}^*s^{*|k|}v_{b'}= (v_{b'}^*s^{|k|}v_{a'})^*\\
& = (s^\beta v_{a'}v_{b'}^*s^{*\alpha})^*=s^\alpha v_{b'}v_{a'}^*s^{*\beta},
\end{align*}
where we now use Convention \ref{convsmallestsol} to interpret ``$(\alpha,\beta)$ is the smallest non-negative solution of $k=\alpha a' - \beta b'$''.
\end{proof}
It follows from \lemref{covarianceonngenerators} that the representation $X$ is Nica covariant, and we have proved (b). This completes the proof of Theorem~\ref{toeplitzpresentation}.
\section{The Nica spectrum of $({\mathbb Q \rtimes \mathbb Q^*_+}, \, \mathbb N^{\times}nx)$}\label{nicaspectrum}
To get a convenient parametrisation of the Nica spectrum, we need to identify the nonempty hereditary directed subsets of $\mathbb N^{\times}nx$. First we give some examples (which will turn out to cover all the possibilities).
\begin{proposition} \label{defAB}
Suppose $N$ is a supernatural number. For each $k\in \mathbb N$, we define
\[
A(k,N) : = \{ (m,a) \in \mathbb N^{\times}nx : a|N\text{ and }a^{-1}(k-m)\in \mathbb N\},
\]
and for each $r \in \mathbb Z/N$, we recall that $r(a)$ denotes the projection of $r$ in $\mathbb Z/a$ and we define
\[
B(r,N) : = \{ (m,a) \in \mathbb N^{\times}nx : a | N \text{ and } m \in r(a)\}.
\]
Then $A(k,n)$ and $B(r,N)$ are nonempty hereditary directed subsets of $\mathbb N^{\times}nx$.
\end{proposition}
\begin{remark}
The map $(k,c)\mapsto A(k,c)=\{(m,a)\in \mathbb N^{\times}nx:(m,a)\leq (k,c)\}$ is the standard embedding of the quasi-lattice ordered semigroup $\mathbb N^{\times}nx$ in its spectrum, see \cite[Section 6.2]{nica}.
\end{remark}
\begin{proof}[Proof of Proposition~\ref{defAB}]
Let $N$ be given.
If $(m,a)$ and $(n,b)$ are in $A(k,N)$, then $(k,\operatorname{lcm}(a,b)) \in A(k,N)$ is a common upper bound for $(m,a)$ and $(n,b)$, and hence $A(k,N)$ is directed. To see that $A(k,N)$ is hereditary, suppose $(m,a) \in A(k,N)$ and $(0,1) \leq (n,b) \leq (m,a)$. Then $b|a$ and $b^{-1}(m-n)\in \mathbb N$. Since $a|N$ and $a^{-1}(k-m)\in \mathbb N$ we have $b | a | N$, and thus $b^{-1}(k-n) = (b^{-1}a)a^{-1}(k-m) + b^{-1}(m-n)$ belongs to $\mathbb N$.
Thus $A(k,N)$ is hereditary.
We next prove that $B(r,N)$ is directed. Suppose $(m,a)$ and $(n,b)$ are in $B(r,N)$, so that $a$ and $b$ divide $N$ and $m\in r(a)$, $n\in r(b)$. Since $r(a)=r(\operatorname{lcm}(a,b))(a)$, there exists $k\in\mathbb Z$ such that $m+ak\in r(\operatorname{lcm}(a,b))$, and similarly there exists $l\in\mathbb Z$ such that $n+bl\in r(\operatorname{lcm}(a,b))$; by adding multiples of $\operatorname{lcm}(a,b)$ to both sides, we can further suppose that $k,l\in \mathbb N$ and that $m+ak=n+bl=t$, say. Then $t\in (m+a\mathbb N ) \cap (n+b\mathbb N)$, and $(t,\operatorname{lcm}(a,b))$ is an upper bound for $(m,a)$ and $(n,b)$. Since $t=m+ak\in r(\operatorname{lcm}(a,b))$, and $\operatorname{lcm}(a,b)$ divides $N$, this upper bound belongs to $B(r,N)$. Thus $B(r,N)$ is directed.
To see that $B(r,N)$ is hereditary, suppose $(0,1)\leq (n,b) \leq (m,a) \in B(r,N)$. Then we have $b|a$ and $b^{-1}(m-n) \in \mathbb N$. Then $m\in r(a)\subset r(b)$, and since $n$ has the form $n=m-bk$ for some $k\in \mathbb N$, we have $n\in r(b)$ also. Thus $(n,b)\in B(r,N)$, as required.
\end{proof}
\begin{lemma}\label{kandN}
Suppose $\omega$ is a nonempty hereditary directed subset of $ \mathbb N^{\times}nx $.
For each prime $p$ let $e_p(\omega) :=\sup\{e_p(a):(m,a)\in\omega\}\in \mathbb N\cup\{\infty\}$, and define a supernatural number by $N_{\omega}:=\prod_p p^{e_p(\omega)}$. Define
$k_\omega \in \mathbb N \cup \{\infty\}$ by
\[
k_\omega := \sup \{ m : (m,a) \in \omega \text{ for some } a\in \mathbb N^{\times}\}.
\]
Suppose $a | N_\omega$. Then there exists $m \in \mathbb N$ such that $(m,a) \in \omega$, and moreover
\begin{enumerate}
\item if $k_\omega < \infty$, then $(k_\omega,a) \in \omega$;
\item if $k_\omega = \infty$, then there is a sequence $n_i \in \mathbb N$ such that $(n_i, a) \in \omega$ and $n_i \to \infty$.
\end{enumerate}
\end{lemma}
\begin{proof}
For each prime $p$ with $e_p(a)>0$, we have $e_p(a)\leq e_p(\omega)$, so there exists $(m_p,b) \in \omega$ such that
$e_p(a)\leq e_p(b)$. Then $(m_p,p^{e_p(a)}) \leq (m_p,p^{e_p(b)}) \leq (m_p,b)$, and $(m_p, p^{e_p(a)})$ belongs to $\omega$ because $\omega$ is hereditary. Since $\omega$ is directed, the finite set $\{(m_p,p^{e_p(a)}):p\in\mathcal P,\;e_p(a)>0\}$ has an upper bound in $\omega$, and since $\omega$ is hereditary, $(m,c):= \vee_p (m_p, p^{e_p(a)})$ also belongs to $\omega$. But $c=\prod_p p^{e_p(a)}=a$, so we have found $m$ such that $(m,a)\in\omega$.
When $k_\omega$ is finite, there exists $d\in\mathbb N^\times$ such that $(k_\omega,d) \in \omega$, and since $\omega$ is directed, it contains the element $(l,\operatorname{lcm}(a,d)) = (m,a) \vee (k_\omega,d)$, where $l := \min((m+a\mathbb N) \cap (k_\omega+d\mathbb N))$.
But then $l \leq k_\omega$ by definition of $k_\omega$, and since $l \in k_\omega+ d\mathbb N$, we conclude that $l = k_\omega$. Since
$(k_\omega,a) \leq (k_\omega,\operatorname{lcm}(a,d))$, we deduce that $(k_\omega,a) \in \omega$, proving part (1).
To prove part (2), suppose $a|N_\omega$, and choose $(n_1,a) \in \omega$. Assume that
we have obtained $n_1 < n_2 < \cdots < n_i$
such that $(n_i,a) \in \omega$.
Since $\{m\in \mathbb N: (m,b) \in \omega\}$ is unbounded, we may choose $(m,b)$ with $m > n_i$.
Then $(n_{i+1}, \operatorname{lcm}(a,b)):=(n_i,a) \vee (m,b) $ belongs to $\omega$; since $(n_{i+1},a)\leq (n_{i+1},\operatorname{lcm}(a,b))$ and $\omega$ is hereditary, $(n_{i+1},a)$ belongs to $\omega$, and part (2) follows by induction.
\end{proof}
\begin{remark}\label{ABorder}
Part (2) of the lemma implies that $B(r,M)$ is never contained in $A(k,N)$. The possible inclusions are characterized as follows:
\begin{align*}
B(t,N) \subset B(r,M)\ &\iff\ N|M \text{ and }
t(a) = r(a) \text{ for every } a|N ;
\\
A(k,N) \subset B(r,M)\ &\iff\ N|M\text{ and } k\in r(a)\text{ for every }a|N ;\\
A(l,N) \subset A(k,M) \ &\iff \ N|M \text { and }
k - l \in a\mathbb N \text{ for every } a | N.
\end{align*}
For $N\in \mathcal N \setminus \mathbb N^{\times}$, we have $k - l \in a\mathbb N$ for every $a | N $ if and only if $k=l$, so for such $N$, $A(l,N) \subset A(k,M)$ implies $k = l$.
Notice also that it follows easily from these inclusions that the sets $A(k,N)$ and $B(r,N)$
are distinct for different values of the parameters.
\end{remark}
Next we show that every hereditary directed subset of $\mathbb N^{\times}nx$ is either an $A(k,N)$ or a $B(r,N)$.
\begin{proposition}\label{spectrum}
Suppose $\omega$ is a nonempty hereditary directed subset of $\mathbb N^{\times}nx$, and let $k_\omega$
and $N_\omega$ be as in \lemref{kandN};
\begin{enumerate}
\item if $k_\omega< \infty$, then $\omega = A(k_\omega,N_\omega)$;
\item if $k_\omega = \infty$, then there exists
$r_{\omega}\in \mathbb Z/N_{\omega}$ such that $r_{\omega}(a)=m$ for every $(m,a)\in \omega$, and we then have $\omega=B(r_{\omega},N_{\omega})$.
\end{enumerate}
\end{proposition}
\begin{proof}
Suppose first that $k_\omega < \infty$, and $(m,a) \in A(k_\omega,N_\omega)$. Then $a| N_\omega$ and
$a^{-1}(k_\omega-m) \in \mathbb N$. Then part (1) of \lemref{kandN} implies that
$(k_\omega,a)$ is in $\omega$, and since $\omega$ is assumed to be directed and
$(m,a) \leq (k_\omega,a)$, we conclude that $(m,a)\in\omega$ and $A(k_\omega,N_\omega) \subset \omega$. On the other hand, suppose that $(m,a) \in \omega$. Since $(k_\omega,a) \in \omega$ and $\omega$ is directed, $(m,a) \vee (k_\omega,a)$ belongs to $\omega$; but $m\leq k_\omega$ by definition of $k_\omega$, so $\min ((m+a\mathbb N) \cap (k_\omega+a\mathbb N)) = k_\omega$, $(m,a) \vee (k_\omega,a) = (k_\omega,a) $, and $(m,a) \leq (k_\omega,a)$. Since $A(k_\omega,N_\omega)$ is hereditary,
we conclude that $(m,a)$ is in $A(k_\omega,N_\omega)$, and $\omega \subset A(k_\omega,N_\omega)$.
Now suppose that $k_\omega =\infty$. We need to produce a suitable $ r_\omega \in \mathbb Z/N_\omega$. We know from \lemref{kandN} that for every $a|N_\omega$ there exists $(m,a) \in \omega$, and
we naturally want to take $r_\omega(a)$ to be the class of $m$ in $\mathbb Z/a$. To see that this is well defined, suppose $(m,a)$ and $(n,a)$ are both in $\omega$; since $ \omega $ is directed, they have a common upper bound $(l,b)$, and then $(l-m) \equiv 0 \equiv (l-n) \pmod a$, so $m \equiv n \pmod a$.
Next we have to show that $r_\omega :=(r_\omega(a))_{a|N_\omega}$ is an element of the inverse limit, or in other words that $a|b|N_\omega$ implies $r_\omega(a)= r_\omega(b)(a)$.
Let $m$ be such that $(m,b) \in \omega$, so that $m\inr_\omega(b)$. Since $a|b$, we have $(m,a) \leq (m,b)$, and $(m,a)$ also belongs to $\omega$. Thus we also have $m \inr_\omega(a)$, and
$r_\omega(b)(a) =[m]=r_\omega(a)$, as required. Thus there is a well-defined class $r_\omega$ in $\mathbb Z/N_\omega$ with the required property.
It is clear from the way we chose $r_\omega$ that $\omega \subset B(r_\omega,N_\omega)$, so it remains to show the reverse inclusion. Suppose $(m,a) \in B(r_\omega,N_\omega)$. Since $a|N_\omega$ and $k_\omega = \infty$, part (2) of \lemref{kandN} implies that we can choose
$n>m$ such that $(n,a) \in \omega$. Now both $m$ and $n$ are in $r_\omega(a)$, so $a|(n-m)$; since $n-m >0$, this implies that $a^{-1}(n-m)\in \mathbb N$, and we have $(m,a) \leq (n,a)$. Since $\omega$ is hereditary, $(m,a) \in \omega$. Thus $B(r_\omega,N_\omega)\subset \omega$, and we have proved (2).
\end{proof}
\begin{corollary}\label{descOmega}
The Nica spectrum of $\mathbb N^{\times}nx$ is
\[
\Omega=\{A(k,M):M\in \mathcal N,k\in\mathbb N\}\cup\{B(r,N):N\in \mathcal N,r\in Z/N\}.
\]
\end{corollary}
To identify Cuntz's $\mathcal Q_\mathbb N$ as the boundary quotient of $\mathbb TT(\mathbb N^{\times}nx)$, we need to identify the \emph{boundary} $\partial \Omega$ of $\Omega$, as defined in \cite[Definition 3.3]{purelinf} or \cite[Lemma~3.5]{CL2}.
\begin{proposition}\label{boundary}
Let $\nabla := \prod_p p^\infty$ be the largest supernatural number. Then the map $r \mapsto B(r, \nabla)$ is a homeomorphism of the finite integral adeles $\widehat\mathbb Z$
onto the boundary $\partial \Omega$ of $({\mathbb Q \rtimes \mathbb Q^*_+},\mathbb N^{\times}nx)$. Under this homeomorphism, the left action of $(m,a)\in\mathbb N^{\times}nx$ on $\widehat\mathbb Z$ is given, in terms of the ring operations in $\widehat\mathbb Z$, by $(m,a)\cdot r=m+ar$.
\end{proposition}
A substantial part of the argument works in greater generality, and this generality will be useful for the construction of KMS states.
\begin{lemma}\label{omegaB}
The subset $\Omega_B := \{B{(r,N)} \in \Omega : N\in \mathcal N , \ r \in \mathbb Z/N\}$ is a closed subset of $\Omega$. For each fixed $N\in \mathcal N$, the map $r\mapsto B(r,N)$ is a homeomorphism of $\mathbb Z/N$ onto a closed subset of $\Omega_B$.
\end{lemma}
\begin{proof}
Suppose that $B(r_\lambda,N_\lambda)\to \omega$ in $\Omega$, so that
\[
B(r_\lambda,N_\lambda)^{^{\wedge}}(1_{m,a})\to \hat\omega(1_{m,a})\ \text{ for every $(m,a)\in\mathbb N^{\times}nx$.}
\]
Since the sets in $\Omega$ are non-empty, there exists $(m,a)$ such that $\hat\omega(1_{m,a})=1$. Then there exists $\lambda_0$ such that
\begin{align*}
\lambda\geq \lambda_0
&\Longrightarrow B(r_\lambda,N_\lambda)^{^{\wedge}}(1_{m,a})=1\\
&\Longrightarrow a|N_\lambda\text{ and } m\in r_\lambda(a)\\
&\Longrightarrow B(r_\lambda,N_\lambda)^{^{\wedge}}(1_{m+ka,a})=1\text{ for all $k\in \mathbb N$.}
\end{align*}
But this implies that the integer $k_{\omega}$ in Lemma~\ref{kandN} is infinity, and Proposition~\ref{spectrum}(2) implies that $\omega=B(r_\omega,N_\omega)$. Thus $\Omega_B$ is closed.
Since $\mathbb Z/N$ is compact and $r\mapsto B(r,N)$ is injective (see Remark~\ref{ABorder}), it suffices to prove that $r\mapsto B(r,N)$ is continuous. So suppose that $r_\lambda\to r$ in $\mathbb Z/N$, and let $a\in \mathbb N^{\times}$; we need to show that
\begin{equation}\label{togetctuity}
B(r_\lambda,N)^{^{\wedge}}(1_{m,a})\to B(r,N)^{^{\wedge}}(1_{m,a})\ \text{ for every $m\in \mathbb N$.}
\end{equation}
If $a\nmid N$, then $B(r_\lambda,N)^{^{\wedge}}(1_{m,a})=0=B(r,N)^{^{\wedge}}(1_{m,a})$. So suppose $a|N$. Then
\[
B(r,N)^{^{\wedge}}(1_{m,a})
=\begin{cases}
1&\text{ if $m\in r(a)$,}\\
0&\text{ otherwise.}
\end{cases}
\]
Since the maps $r\mapsto r(a)$ are continuous, we can choose $\lambda_0$ such that $\lambda\geq \lambda_0\Longrightarrow r_\lambda(a)=r(a)$. But then $m\in r_{\lambda}(a)$ if and only if $m\in r(a)$, and
\[
\lambda\geq \lambda_0\Longrightarrow
B(r_\lambda,N)^{^{\wedge}}(1_{m,a})= B(r,N)^{^{\wedge}}(1_{m,a}),
\]
confirming \eqref{togetctuity}.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{boundary}]
By definition, $\partial \Omega$ is the closure in the Nica spectrum $\Omega$ of the set of maximal hereditary directed subsets (see \cite[Definition 3.3]{purelinf} or \cite[Lemma~3.5]{CL2}). From \proref{spectrum} and the characterization of the inclusions given in
\remref{ABorder}, we see that a hereditary directed set is maximal if and only if it has the form $B(r,\nabla)$. Since $\{B(r,\nabla): r \in \widehat\mathbb Z\}$ is the image of the compact space $\widehat\mathbb Z$ under the homeomorphism
$r \mapsto B(r,\nabla)$ from \lemref{omegaB}, it is already closed and is equal to $\partial\Omega$.
The action $\theta_{(m,a)}$ on $\Omega$ satisfies
\begin{align}\label{calcaction}
\theta_{(m,a)}(B(r,\nabla)) & = \operatorname{Her}((m,a)B(r,\nabla))\\
&=\operatorname{Her} \{(m,a)(n,b): n\in r(b)\} \notag\\
& = \operatorname{Her} \{(m+an,ab):n\in r(b)\} \notag\\
& =\operatorname{Her} \{(k, ab) : k \in (m+ar)(ab)\notag\},
\end{align}
since $n\in r(b)\Longleftrightarrow an\in (ar)(ab)$. But this is precisely $\{(k,c):k\in (m+ar)(c)\}=B(m+ar\nabla)$.
\end{proof}
\begin{remark}
We think of supernatural numbers as limits of (multiplicatively) increasing sequences in $\mathbb N^\times$, and of classes in $\mathbb Z/N$ as limits of (additively) increasing sequences in $\mathbb N$. So the set $\Omega_B$ lies ``at additive infinity" and we call it the {\em additive boundary} of $\Omega$.
The set $\Omega_A:=\{A(k,N):N\notin\mathbb N^{\times}\}$ lies ``at multiplicative infinity'', and we call it the {\em multiplicative boundary}. Each of these defines a natural quotient of $\mathbb TT(\mathbb N^{\times}nx)$, and we plan to discuss these quotients elsewhere. The minimal boundary $\partial \Omega$ characterised in
\proref{boundary} lies at both additive and multiplicative infinity, and might be more descriptively called
the {\em affine boundary} of $\Omega$.
\end{remark}
In our construction of KMS states in \secref{secconstructionKMSground} we need a product decomposition of the additive boundary $\Omega_B$ over the set $\mathcal P$.
We describe the factors in the next Lemma, and the product decomposition in the following Proposition.
\begin{lemma}
For each prime $p$, the set
\[
X_p := \{B(r,p^k): k \in \mathbb N \cup\{\infty\}, \ r \in \mathbb Z/{p^k}\},
\]
is a closed subset of $\Omega$, and each singleton set $\{B(r,p^k)\}$ with $k<\infty$ is an open subset of $X_p$.
\end{lemma}
\begin{proof}
Suppose that the net $\{ B(r_{\lambda},p^{k_\lambda}) : \lambda \in \Lambda\}$ converges in $\Omega$; since $\Omega_B$ is closed in $\Omega$, the limit has the form $ B(r,N)$, and it suffices to prove that $B(r,N)\in X_p$, or, equivalently, that $N=p^k$ for some $k\in \mathbb N\cup \{\infty\}$. Suppose that $a|N$. Then $(r(a),a)\in B(r,N)$, so $B(r,N)^{^{\wedge}}(1_{r(a),a})=1$, and there exists $\lambda_0$ such that
\begin{equation}\label{N=p^k}
\lambda\geq \lambda_0\Longrightarrow B(r_\lambda,p^{k_\lambda})^{^{\wedge}}(1_{r(a),a})=1\Longrightarrow (r(a),a)\in B(r_\lambda,p^{k_\lambda})
\Longrightarrow a|p^{k_\lambda}.
\end{equation}
Since every divisor of $N$ is a power of $p$, so is $N$. Thus $X_p$ is closed.
Now suppose that $k<\infty$, and $r\in \mathbb Z/p^k$. To see that $\{B(r,p^k)\}$ is open, it suffices to prove that if $B(r_{\lambda},p^{k_\lambda})\to B(r,p^k)$, then $B(r_{\lambda},p^{k_\lambda})$ is eventually equal to $B(r,p^k)$ (for then the complement $X_p\setminus \{B(r,p^k)\}$ is closed). Choose an integer $n$ in the class $r$. Then the element $(n,p^k)$ of $\mathbb N^{\times}nx$ belongs to $B(r,p^k)$, so the argument in \eqref{N=p^k} implies that there exists $\lambda_1$ such that
\begin{align}\label{proplambda1}
\lambda\geq \lambda_1&\Longrightarrow(n,p^k)\in B(r_{\lambda},p^{k_\lambda})\\
&\Longrightarrow p^k|p^{k_\lambda}\text{ and }n\in r_\lambda(p^{k})\notag\\
&\Longrightarrow p^k|p^{k_\lambda}\text{ and }r=r_\lambda(p^{k});\notag
\end{align}
in particular, we have $k\leq k_\lambda$ for $\lambda\geq \lambda_1$. On the other hand, no element of the form $(m,p^{k+1})$ belongs to $B(r,p^k)$, so $B(r,p^k)^{^{\wedge}}(1_{m,p^{k+1}})=0$, and there exists $\lambda_2$ such that
\begin{equation}\label{negmembership}
\lambda \geq \lambda_2\Longrightarrow B(r_\lambda,p^{k_\lambda})^{^{\wedge}}(1_{m,p^{k+1}})=0\Longrightarrow (m,p^{k+1})\notin B(r_\lambda,p^{k_\lambda})\text{ for $0\leq m<p^{k+1}$;}
\end{equation}
since membership of an element $(m,a)$ in a set $B(t,M)$ depends only on the class of $m$ in $\mathbb Z/a$, at least one $m$ in the range belongs to $r_\lambda(p^{k+1})$, so we deduce from \eqref{negmembership} that
\[
\lambda \geq \lambda_2\Longrightarrow p^{k+1}\nmid p^{k_\lambda}\Longrightarrow k_\lambda\leq k.
\]
Now we choose $\lambda_3$ such that $\lambda_3\geq \lambda_1$ and $\lambda_3\geq \lambda_2$, and then $k_\lambda=k$ for $\lambda\geq \lambda_3$. Since \eqref{proplambda1} says that $r_\lambda(p^{k})=r$ for $\lambda\geq \lambda_3\geq\lambda_1$, we eventually have $r_\lambda=r_\lambda(p^{k_\lambda})=r_\lambda(p^k)=r$, and hence
\[
\lambda\geq \lambda_3\Longrightarrow B(r_\lambda,p^{k_\lambda})=B(r,p^k),
\]
as required.
\end{proof}
\begin{prop}\label{prodstructure} The map $f:B(r,N)\mapsto \{B(r(p^{e_p(N)}), p^{e_p(N)}):p\in\mathcal P\}$ is a homeomorphism of the additive boundary $\Omega_B$ onto the product space $\prod_{p\in \mathcal P} X_p$.
\end{prop}
\begin{proof}
For $p\in \mathcal P$ we define $f_p:\Omega_B\to X_p$ by $f_p(B(r,N))=B(r(p^{e_p(N)}), p^{e_p(N)})$. Then the maps $f_p$ are the coordinate maps of $f$, and $f$ is continuous if and only if all the $f_p$ are. So we fix $p$, and consider a convergent net $B(r_{\lambda},N_{\lambda})\to B(r,N)$ in $\Omega_B$. Let $(m,a)\in \mathbb N^{\times}nx$. Then eventually
\begin{equation}\label{given}
B(r_\lambda,N_\lambda)^{^{\wedge}}(1_{m,a})= B(r,N)^{^{\wedge}}(1_{m,a}),
\end{equation}
and we need to show that we eventually have
\begin{equation}\label{needed}
B(r_\lambda(p^{e_p(N_\lambda)}), p^{e_p(N_\lambda)})^{^{\wedge}}(1_{m,a})= B(r(p^{e_p(N)}), p^{e_p(N)})^{^{\wedge}}(1_{m,a}).
\end{equation}
Both sides of \eqref{needed} vanish unless $a=p^k$, so we just need to consider $a=p^k$. But then for all $B(t,M)$ we have
\[
B(t,M)^{^{\wedge}}(1_{m,p^k})=B(t(p^{e_p(M)}), p^{e_p(M)})^{^{\wedge}}(1_{m,p^k}),
\]
so for $a=p^k$, \eqref{needed} follows immediately from \eqref{given}. Thus $f_p$ is continuous, and so is $f$.
To see that $f$ is injective, we suppose $f(B(r,N))=f(B(s,M))$. Then
\begin{align*}
f(B(r,N))=f(B(t,M))
&\Longrightarrow f_p(B(r,N))=f_p(B(t,M))\text{ for all $p\in \mathcal P$}\\
&\Longrightarrow B(r(p^{e_p(N)}), p^{e_p(N)})=B(t(p^{e_p(M)}), p^{e_p(M)})\text{ for all $p\in \mathcal P$}\\
&\Longrightarrow p^{e_p(N)}=p^{e_p(M)}\text{ and }r(p^{e_p(N)})=t(p^{e_p(N)})\text{ for all $p\in \mathcal P$}\\
&\Longrightarrow N=M\text{ and }r(a)=t(a)\text{ for all $a$ such that $a|N$}\\
&\Longrightarrow N=M\text{ and }r=t\text{ in $\mathbb Z/N=\textstyle{\varprojlim_{a|N}}\mathbb Z/a\mathbb Z$.}
\end{align*}
To see that $f$ is surjective, suppose that $\{B(r_p,p^{k_p}):p\in \mathcal P\}$ is an element of $\prod_p X_p$. Take $N$ to be the supernatural number $\prod_p p^{k_p}$. Since the map $r\mapsto \{r(p^{k_p}):p\in \mathcal P\}$ is a homeomorphism of $\mathbb Z/N$ onto $\prod_{p\in\mathcal P}\mathbb Z/p^{e_p(N)}$ (by \proref{prodvsinvlim}), there exists $r\in \mathbb Z/N$ such that $r(p^{k_p})=r_p$ for all primes $p$. Then $\{B(r_p,p^{k_p})\}=f(B(r,N))$, and $f$ is onto.
We have now shown that $f$ is a bijective continuous map of the compact space $\Omega_B$ onto $\prod_p X_p$, and hence $f$ is a homeomorphism.
\end{proof}
\section{Cuntz's $\mathcal Q_\mathbb N$ as a boundary quotient}\label{sectionqn}
The $C^*$-algebra considered by Cuntz in \cite{cun2} is the universal $C^*$-algebra $\mathcal Q_\mathbb N$ generated by a unitary $s$ and isometries $\{u_a:a\in \mathbb N^{\times}\}$ satisfying
\begin{itemize}
\item[] \begin{itemize}
\item[(C1)]\ $u_a s = s^a u_a$ for $a\in \mathbb N^{\times}$,
\item[(C2)]\ $u_au_b=u_{ab}$ for $a,b\in\mathbb N^{\times}$, and
\item[(C3)]\ $\sum_{k=0}^{a-1} s^k u_au_a^*s^{*k}= 1$ for $a\in\mathbb N^{\times}$.
\end{itemize}
\end{itemize}
We aim to prove that $\mathcal Q_\mathbb N$ is the boundary quotient of the Toeplitz algebra $\mathbb TT(\mathbb N^{\times}nx)$, and it is helpful for this purpose to have a slightly different presentation of $\mathcal Q_\mathbb N$ which looks more like the presentation of $\mathbb TT(\mathbb N^{\times}nx)$ in \thmref{toeplitzpresentation}.
\begin{prop}\label{presentqn}
$\mathcal Q_\mathbb N$ is the universal $C^*$-algebra generated by isometries $s$ and $\{v_p: p\in \mathcal P\}$ satisfying
\begin{itemize}
\item[] \begin{itemize}
\item[(Q1)]\ $v_p s = s^p v_p$ for every $p\in\mathcal P$,
\item[(Q2)]\ $v_p v_q = v_q v_p$ for every $p,q\in\mathcal P$,
\item[(Q5)]\ $\sum_{k=0}^{p-1} (s^k v_p) (s^k v_p)^*= 1$ for every $p\in\mathcal P$, and
\item[(Q6)]\ $ss^*=1$.
\end{itemize}
\end{itemize}
We then also have
\begin{itemize}
\item[] \begin{itemize}
\item[(Q3)]\ $v_p^*v_q = v_qv_p^*$ for $p,q\in\mathcal P$ and $p\not=q$, and
\item[(Q4)]\ $s^*v_p=s^{p-1}v_ps^*$ for every $p\in\mathcal P$.
\end{itemize}
\end{itemize}
\end{prop}
\begin{proof}
If $s$ is unitary and $u_a$ satisfy (C1), (C2) and (C3), then clearly $s$ and $v_p:=u_p$ satisfy (Q1), (Q2), (Q5) and (Q6). Suppose, on the other hand, that $s$ and $v_p$ satisfy (Q1), (Q2), (Q5) and (Q6), and define $u_a:=\prod_{p\in\mathcal P} v_p^{e_p(a)}$. Then (Q1) implies that $v_ps^k=s^{kp}v_p$; thus $v_p^ns=s^{p^n}v_p^n$, and it follows that $v_as=s^av_a$ for all $a$, which is (C1). Equation (Q2) implies that the $v_p$ form a commuting family, and (C2) follows easily. To prove (C3), it suffices to show that if (C3) holds for $a=b$ and $a=c$, then it holds also for $a=bc$. So suppose (C3) holds for $a=b$ and $a=c$, and note that
\[
\{k:0\leq k<bc\}=\{l+mb: 0\leq l<b,\ 0\leq m<c \}.
\]
Thus, using (C1) and (C2), we have
\begin{align*}
\sum_{k=0}^{bc-1} s^k u_{bc}u_{bc}^*s^k
&=\sum_{l=0}^{b-1}\sum_{m=0}^{c-1}s^ls^{mb}u_bu_cu_c^*u_b^*s^{*mb}s^{*l}\\
&=\sum_{l=0}^{b-1}s^lu_b\Big(\sum_{m=0}^{c-1}s^mu_cu_c^*s^{*m}\Big)u_b^*s^{*l},
\end{align*}
which equals $1$ because (C3) holds for $a=c$ and $a=b$. Thus $\{s,u_a\}$ satisfies (C1)--(C3), and the two presentations are equivalent.
Since $s$ is unitary, multiplying (Q1) on the left and right by $s^*$ gives (Q4). To see (Q3), we apply (C3) with $a=pq$ and
\begin{equation}\label{Cuntz*commute}
v_p^*v_q=v_p^*\Big(\sum_{k=0}^{pq-1} s^ku_{pq}u_{pq}^*s^{*k}\Big)v_q
=\sum_{k=0}^{pq-1}v_p^*s^kv_pv_qv_p^*v_q^*s^{*k}v_q,
\end{equation}
where we used that $u_{pq}=v_pv_q=v_qv_p$. Since $v_p^*s^kv_p=0$ unless $p|k$, and $v_q^*s^kv_q=0$ unless $q|k$, the only non-zero term in the sum on the right of \eqref{Cuntz*commute} occurs when $k=0$, and we have
\[
v_p^*v_q=v_p^*v_pv_qv_p^*v_q^*v_q=v_q v_p^* .
\]
\end{proof}
Clearly condition (Q3) implies that $v_m^*v_n = v_nv_m^*$ for $m,n \in \mathbb N^{\times}$ with $\gcd(m,n)=1$
(this is \cite[Lemma 3.2(c)]{cun2} and has already been observed as (T3) $\implies$ (T3') in Lemma 4.3.).
\begin{corollary}\label{qnquotient}
Cuntz's $C^*$-algebra $\mathcal Q_\mathbb N$
is the quotient of the Toeplitz algebra $\mathbb TT(\mathbb N^{\times}nx)= C^*(s,v_p : p\in\mathcal P ) $ by the ideal $I $ generated by the elements $1-ss^*$ and $\{1-\sum_{k=0}^{p-1} (s^k v_p) (s^k v_p)^*:p\in\mathcal P\}$.
\end{corollary}
\begin{proof}
Relations (Q1) and (Q2) are the same as (T1) and (T2), and hence hold in any quotient of $\mathbb TT(\mathbb N^{\times}nx)$; clearly (Q5) and (Q6) hold in $\mathbb TT(\mathbb N^{\times}nx) / I$.
So \proref{presentqn} gives a homomorphism $\pi: \mathcal Q_\mathbb N \to \mathbb TT(\mathbb N^{\times}nx) / I$.
On the other hand relations (T1--4) are the same as (Q1--4), and hence hold in $\mathcal Q_\mathbb N$; (Q5) implies that the isometries $\{s^k v_p: 0 \leq k <p\}$ have mutually orthogonal ranges, which is the content of (T5). So \thmref{toeplitzpresentation}
gives a homomorphism $\rho: \mathbb TT(\mathbb N^{\times}nx) \to \mathcal Q_\mathbb N$ that vanishes on $I$, and hence induces a
homomorphism $\tilde{\rho} : \mathbb TT(\mathbb N^{\times}nx) / I \to \mathcal Q_\mathbb N $ which is an inverse for $\pi$.
\end{proof}
Recall from \cite[Lemma 3.5]{CL2}
that the boundary $\partial \Omega$ of a quasi-lattice order $(G,P)$
is the spectrum (in the sense of \cite[Definition 4.2]{topfree}) of
the elementary relations $\prod_{x\in F} (1 - W_x W_x^*) = 0$
corresponding to the sets in the family
\begin{equation}\label{collectionF}
\mathcal F:= \{ F\subset P: |F|<\infty\text{ and }\forall\ y\in P \ \exists\ x\in F \text{ such that } x\vee y <\infty\}
\end{equation}
from \cite[Definition 3.4]{CL2},
taken together with the Nica relations from \cite[Proposition 6.1]{topfree}.
Since we are working with covariant isometric representations, we will carry the implicit assumption that the Nica relations always hold,
so the spectrum of a set $\mathcal R$ of extra relations
is always a subset, denoted $\Omega(\mathcal R)$, of the Nica spectrum $\Omega$. In this notation, \cite[Lemma 3.5]{CL2} says that $\partial\Omega=\Omega(\mathcal{F})$, and the set $\Omega_\mathcal{N}=\Omega(\emptyset)$ of \cite[\S6]{topfree} is just $\Omega$.
Since $(\mathbb TT(\mathbb N^{\times}nx),w)$ is universal for Nica-covariant representations, Theorem~6.4 of \cite{topfree} implies that $\mathbb TT(\mathbb N^{\times}nx)$ is canonically isomorphic to the partial crossed product $C(\Omega)\rtimes ({\mathbb Q \rtimes \mathbb Q^*_+})$. The \emph{boundary quotient} of \cite{CL2} is then the partial crossed product $C(\partial \Omega) \rtimes ({\mathbb Q \rtimes \mathbb Q^*_+})$, which by \cite[Theorem 4.4]{topfree} and \cite[Proposition 6.1]{topfree} is isomorphic to the universal $C^*$-algebra generated by
a Nica-covariant semigroup of isometries $W$ subject to the
extra (boundary) relations
\begin{equation*}
\prod_{x\in F} (1 - W_x W_x^*) = 0 \quad \text{ for } F\in \mathcal F.
\end{equation*}
\begin{theorem}\label{qnisboundaryquotient}
Cuntz's $C^*$-algebra $\mathcal Q_\mathbb N$ is the boundary quotient $C(\partial \Omega) \rtimes ({\mathbb Q \rtimes \mathbb Q^*_+})$ of the Toeplitz algebra $\mathbb TT(\mathbb N^{\times}nx)$.
\end{theorem}
\begin{proof}
Since $(\mathbb TT(\mathbb N^{\times}nx),w)$ is universal for Nica-covariant isometric representations of $\mathbb N^{\times}nx$, Corollary~\ref{qnquotient} implies that $\mathcal Q_\mathbb N$ is universal for Nica-covariant representations $(S,V)$ of $\mathbb N^{\times}nx$ which satisfy
\begin{equation}\label{extrarelsinqn}
1-SS^*=0\ \text{ and }\ 1-\sum_{k=0}^{p-1} (S^kV_p)(S^kV_p)^*=0\ \text{ for $p\in \mathcal P$.}
\end{equation}
Since the terms in the sum are mutually orthogonal projections, the relations \eqref{extrarelsinqn} are equivalent to
\begin{gather}
1-SS^*=0,\ \text{ and }\label{q4}\\
\prod_{k=0}^{p-1} \big(1- (S^k V_p) (S^k V_p)^*\big)= 0\ \text{ for every $p\in \mathcal P$.}\label{q5}
\end{gather}
We will prove that $\partial\Omega:=\Omega(\mathcal F)$ coincides with $\Omega(\{\eqref{q4},\eqref{q5}\})$.
To see that $\partial\Omega\subset\Omega(\{\eqref{q4},\eqref{q5}\})$, it suffices to show that $\{(1,1)\}$ and $\{(k,p) : 0 \leq k < p\}$ belong to $\mathcal F$.
Suppose $(m,a) \in \mathbb N^{\times}nx$. Since $(1,1) \vee (m,a) = (m,a)$ when $m > 0$ and
$(1,1) \vee (0,a) = (a,a)$, the set $\{(1,1) \}$ is in $ \mathcal F$.
On the other hand, $m$ is in exactly one coset modulo $p$, say $m \in k+p\mathbb N$,
and then $(m+a\mathbb N) \cap (k+p\mathbb N) \neq \emptyset$, so $(m,a) \vee (k,p) < \infty$.
Thus $\{(k,p) : 0 \leq k < p\}$ is in $\mathcal F$.
Hence $\partial\Omega\subset\Omega(\{\eqref{q4},\eqref{q5}\})$.
For the reverse inclusion, we use the parametrization of the spectrum obtained in \lemref{spectrum}.
Suppose $\omega\in \Omega(\{\eqref{q4},\eqref{q5}\})$. Then, since
the spectrum of a set of relations is invariant by Proposition~4.1 of \cite{topfree}, $\omega$ is a hereditary directed subset of $\mathbb N^{\times}nx$
such that for all $(m,a) \in \mathbb N^{\times}nx$, the $(m,a)$-translates of the relations \eqref{q4} and \eqref{q5} (corresponding to conjugation of the relations by the isometry corresponding to $(m,a)$) hold at the point $\omega$. Thus
\begin{gather}\label{oldqp4ma}
\hat{\omega}(1_{(m,a)} - 1_{(m+1,a)})= 0 \quad \text{ for all } (m,a) \in \mathbb N^{\times}nx,\text{ and}\\
\label{oldqp5ma}
\displaystyle \hat{\omega}\Big(\prod_{k = 0}^{p-1} (1_{(m,a)} - 1_{(m+ak ,ap)}) \Big) = 0 \quad \text{ for all } (m,a) \in \mathbb N^{\times}nx.
\end{gather}
From \eqref{oldqp4ma} we see that if $(m,a) \in \omega$, then $(m+1,a) \in \omega$; none of the
$A(k,n)$ have this property (take $m = k$), we have $\omega=B(r,N)$ for some $N \in \mathcal N$ and $r \in \mathbb Z/N$. Now, using \eqref{oldqp5ma}, we get
\begin{equation}\label{oldqp4z}
\prod_{k = 0}^{p-1} \Big(B(r,N)^{^{\wedge}} (1_{(m,a)}) - B(r,N)^{^{\wedge}} (1_{(m+ak ,ap)} )\Big) =
0\quad \text{ for all } (m,a) \in \mathbb N^{\times}nx,
\end{equation}
Suppose now that $a | N$ and $p$ is a prime. Then for every $m\in r(a)$ we have $(m,a) \in B(r,N)$, and \eqref{oldqp4z} implies that there
exists $k $ such that $(m+ak, ap) \in B(r,N)$, which implies in particular that $ap|N$. Thus
$N$ is the largest supernatural number $\nabla$,
and $A = B(r,\nabla) \in \partial \Omega$
by \proref{boundary}. Thus $\partial \Omega\supset \Omega(\{\eqref{q4},\eqref{q5}\})$ and we have proved that $\Omega(\mathcal F) = \Omega(\{\eqref{q4},\eqref{q5}\})$, as required.
\end{proof}
The \emph{core} of a quasi-lattice ordered group $(G, P)$ is the subgroup $G_0$ of $G$ generated by the monoid
\[
P_0 = \{ x\in P: x\vee y <\infty \text{ for all } y\in P\}
\]
(see \cite[Definition 5.4]{CL2}). By \cite[Proposition 5.5]{CL2}, the partial action of $G$ on $\Omega$ is topologically free if and only if its restriction to the core $G_0$ is topologically free. So we want to identify the core:
\begin{lemma}
The core of $({\mathbb Q \rtimes \mathbb Q^*_+}, \mathbb N^{\times}nx)$ is $(\mathbb Z \rtimes \{1\} ,\mathbb N \rtimes \{1\})$.
\end{lemma}
\begin{proof}
Each $(m,1)$ is in the core,
because $k := \min (m+\mathbb N) \cap (n+b\mathbb N)$ is always finite and by \eqref{Vcharacterisation}, we have $(m,1) \vee (n,b) = (k,b) \in \mathbb N^{\times}nx$.
Suppose now $a \neq 1$; then $m \neq m+1 \pmod a$, so $(m,a) \vee ((m+1), a) = \infty$ by \eqref{Vcharacterisation},
and thus $(m,a)\notin P_0$.
\end{proof}
\begin{proposition}\label{paamentf}
The partial action of ${\mathbb Q \rtimes \mathbb Q^*_+}$ on the boundary $\partial \Omega$ is amenable and
topologically free.
\end{proposition}
\begin{proof}
The expectation of $C_0(\partial\Omega)\rtimes({\mathbb Q \rtimes \mathbb Q^*_+})$ onto $C_0(\partial\Omega)$ is obtained by averaging over the dual coaction of ${\mathbb Q \rtimes \mathbb Q^*_+}$, and hence is faithful (by the argument of \cite[Lemma~6.5]{quasilat}, for example).
Thus the partial action of ${\mathbb Q \rtimes \mathbb Q^*_+}$ on $\partial\Omega$ is amenable. Next, recall from Proposition~\ref{boundary} that $(k,1)B(r,\nabla)=B(r+k,\nabla)$; since $B(r +k, \nabla) = B(r, \nabla) $ implies $k = 0$, the core acts freely on $\partial \Omega$. The result now follows from \cite[Proposition 5.5]{CL2}.
\end{proof}
We can now recover \cite[Theorem 3.4]{cun2} from the analysis of \cite{CL2}.
\begin{corollary}[Cuntz]\label{cuntzsimple}
The $C^*$-algebra $\mathcal Q_\mathbb N$ is simple and purely infinite.
\end{corollary}
\begin{proof}
The boundary quotient is simple and purely infinite by \cite[Theorem 5.1]{CL2}, so
the result follows from \thmref{qnisboundaryquotient}.
\end{proof}
\begin{corollary}
There is a faithful representation $\pi$ of $\mathcal Q_\mathbb N$ on $\ell^2(\mathbb Z)$ such that $\pi(s)e_n=e_{n+1}$ and $\pi(v_p)e_n=e_{pn}$.
\end{corollary}
\begin{proof}
We define isometries $S$ and $V_p$ on $\ell^2(\mathbb Z)$ by $Se_n=e_{n+1}$ and $V_pe_n=e_{pn}$, and check easily that they satisfy (Q1), (Q2), (Q5) and (Q6). Thus \proref{presentqn} gives a representation $\pi$ of $\mathcal Q_\mathbb N$ such that $\pi(s)=S$ and $\pi(v_p)=V_p$. Since $S\not=0$, the representation is certainly not $0$, and hence by Corollary~\ref{cuntzsimple} is faithful.
\end{proof}
\section{The phase transition theorem }\label{secKMS}
Standard arguments using the presentation in \thmref{toeplitzpresentation} show that there is a strongly continuous action $\sigma$ of $\mathbb R$ on $\mathbb TT(\mathbb N^{\times}nx)$ such that
\begin{equation}\label{defsigma}
\sigma_t(s) = s\ \text{ and }\ \sigma_t(v_p) = p^{it} v_p\text{ for $p\in \mathcal P$ and $t\in \mathbb R$.}
\end{equation}
The action $\sigma$ is spatially implemented in the identity representation of
$\mathbb TT(\mathbb N^{\times}nx)$ on $\ell^2(\mathbb N^{\times}nx)$ by the unitary representation $U:\mathbb R\to {\mathcal U}(\ell^2(\mathbb N^{\times}nx))$ defined in terms of the usual basis by
\[
U_t e_{ (m,a)} : = a^{it} e_{(m,a)}.
\]
Our goal in this section is to describe the equilibrium states of the system $(\mathbb TT(\mathbb N^{\times}nx), \mathbb R,\sigma)$, which we do in \thmref{maintheorem},
and to discuss the implications of this theorem for the interplay between equilibrium and symmetries. The notion of equilibrium appropriate in
this context is that of KMS states; since there are some subtleties involved, we begin by recalling the relevant definitions.
Suppose that $\alpha$ is an action of $\mathbb R$ on a $C^*$-algebra $A$. An element $a$ of $A$ is \emph{analytic} for the action $\alpha$ if the function $t\mapsto\alpha_t(a)$ is the restriction to $\mathbb R$ of an entire function on $\mathbb C$; it is shown at the start of \cite[\S8.12]{ped}, for example, that the set $A^{\textnormal{a}}$ of analytic elements is a dense $*$-subalgebra of $A$. For $\beta\in (0,\infty)$, a state $\phi$ of $A$ is a \emph{KMS state at inverse temperature $\beta$ for $\alpha$}, or a \emph{KMS${}_\beta$ state for $\alpha$}, if it satisfies the following \emph{KMS$_\beta$ condition}:
\begin{equation}\label{defKMS}
\phi(dc) = \phi(c\alpha_{i\beta}(d))\ \text{ for all $c,d\in A^{\textnormal{a}}$.}
\end{equation}
In fact, it suffices to check \eqref{defKMS} for a set of analytic elements which spans a dense subspace of $A$ \cite[Proposition~8.12.3]{ped}, and hence this definition agrees with the one used in \cite[\S5.3]{bra-rob}. For $\beta>0$, every KMS${}_\beta$ state is $\alpha$-invariant \cite[Proposition~8.12.4]{ped}; for a state $\phi$ to be a KMS${}_0$ state, it is standard to require that $\phi$ satisfies \eqref{defKMS} with $\beta=0$ (so that $\phi$ is a trace), and that $\phi$ is $\alpha$-invariant.
For every system $(A,\mathbb R,\alpha)$, the set $K_\beta$ of KMS$_\beta$ states is a compact convex subset of the state space $S(A)$. The affine structure of the set $K_\beta$ is studied in \cite[\S5.2.3]{bra-rob}: it is always a simplex in the sense of Choquet, and the extremal KMS$_\beta$ states (that is, the extreme points of $K_\beta$) are always factor states. The same section in \cite{bra-rob} also discusses the relationship between KMS states and equilibrium states in models from quantum statistical mechanics.
For $\beta = \infty$ there are two possible notions of equilibrium. Following
Connes and Marcolli \cite[Definition 3.7]{CM2}, we distinguish between
\emph{KMS$_\infty$ states},
which are by definition the weak* limits of nets $\phi_i$ of KMS$_{\beta_i}$ states with $\beta_i\to\infty$, and \emph{ground states},
which are by definition states $\phi$ for which the entire functions
\[
z \mapsto \phi(d\alpha_z(c))\ \text{ for $c,d\in A^{\textnormal{a}}$}
\]
are bounded on the upper half-plane. With this distinction in mind, \cite[Proposition~5.3.23]{bra-rob} and \cite[Proposition 3.8]{CM2}
imply that the KMS$_\infty$ states form a compact convex subset of the ground states. As observed by Connes and Marcolli \cite[page 447]{CM2}, ground states need not be KMS$_\infty$
states, and our system provides another example of this phenomenon, see parts (3) and (4) of
Theorem 7.1 below. We point out that this relatively recent distinction
was not made in \cite{bra-rob} or \cite{ped}, where the terms ``ground state'' and ``KMS${}_\infty$ state'' are used interchangeably to refer to the ground states of \cite[Definition 3.7]{CM2}.
The definition of ground state in \cite{ped} looks different: there it is required that the functions $z \mapsto \phi(d\alpha_z(c))$ are bounded by $\|c\|\,\|d\|$. However, as pointed out in the proof of \cite[Proposition~5.3.19, $(2)\Longrightarrow(5)$]{bra-rob}, a variant\footnote{One suitable variant is formulated as Exercise~9 on page 264 of \cite{rud}.} of the Phragmen-Lindel\"of theorem implies that an entire function which is bounded on the upper half-plane is bounded by the sup-norm of its restriction to the real axis, which in this case is at most $\|c\|\,\|d\|$. It follows from the definition in \cite{ped} that it suffices to check boundedness for a set of elements which spans a dense subspace of $A^{\textnormal{a}}$.
For our system $(\mathbb TT(\mathbb N^{\times}nx),\mathbb R,\sigma)$, the spanning elements $s^mv_av_b^*v^{*n}$ for $\mathbb TT(\mathbb N^{\times}nx)$ satisfy
\[
\sigma_t(s^mv_av_b^*s^{*n})=(ab^{-1})^{it}s^mv_av_b^*s^{*n},
\]
and hence are all analytic. Thus to see that a state $\phi$ of $\mathbb TT(\mathbb N^{\times}nx)$ is a KMS${}_\beta$ state or ground state for $\sigma$, it suffices to check the appropriate condition for $c$ and $d$ of the form $s^mv_av_b^*s^{*n}$.
We can now state our main theorem. The function $\zeta$ appearing in the formulas is the Riemann zeta-function, defined for $r>1$ by $\zeta(r)=\sum_{n=1}^\infty n^{-r}$.
\begin{theorem}\label{maintheorem}
Let $\sigma$ be the dynamics on $\mathbb TT(\mathbb N^{\times}nx)$ which satisfies \eqref{defsigma}.
\begin{enumerate}
\item For $\beta\in [0,1)$ there are no KMS$_\beta$ states for $\sigma$.
\item For $\beta \in [1,2]$ there is a unique KMS$_\beta$ state $\psi_\beta$ for $\sigma$, and it is characterised by \[\psi_\beta (s^mv_av_b^*s^{*n}) = \begin{cases} 0 & \text{ if } a \neq b\text{ or } m \neq n\\
a^{-\beta} &\text{ if } a = b \text{ and } m=n.
\end{cases}
\]
\item For $\beta \in (2,\infty]$, the simplex of KMS$_\beta$ states for $\sigma$ is isomorphic to the simplex of probability measures on $\mathbb T$; for $z\in\mathbb T$, the extremal KMS$_\beta$ state $\psi_{\beta,z}$ corresponding to the point mass $\delta_z$ is a type I factor state satisfying
\[
\psi_{\beta, z}(s^m v_a v_b^* s^{*n}) = \begin{cases} \displaystyle 0 & \text{ if } a \neq b \text{ or } m \not\equiv n \pmod a,\\ \displaystyle
\frac{1}{a \zeta(\beta -1)} \sum_{\{x\,:\,a \mid x \mid (m-n)\}} x^{1-\beta} z^{({m-n})/{x}} & \text { if } a = b \text{ and } m \equiv n \pmod a.
\end{cases}
\]
\item If $\psi$ is a ground state for $\sigma$, then the restriction $\omega:=\psi|_{C^*(s)}$ is a state of $C^*(s)\cong \mathbb TT(\mathbb N)$, and we have
\begin{equation}\label{formground}
\psi(s^m v_a v_b^* s^{*n}) =
\begin{cases} 0 & \text{ unless } a = b = 1\\
\omega(s^ms^{*n}) & \text{ when } a = b = 1.
\end{cases}
\end{equation}
The map $\psi \mapsto \psi|_{C^*(s)}$ is an affine isomorphism of the compact convex set of ground states onto the state space of $\mathbb TT(\mathbb N)$, and a state $\psi$ is an extremal ground state if and only if $\psi|_{C^*(s)}$ is either a vector state of $\mathbb TT(\mathbb N)$ or is lifted from an evaluation map on the quotient $C(\mathbb T)$ of $\mathbb TT(\mathbb N)$.
\end{enumerate}
\end{theorem}
We will prove these assertions in the next three sections. Before we start, though, we make some
remarks on the significance of the theorem for symmetries and equilibrium.
\begin{remarks} (i) Although
a KMS$_\beta$ state $\psi$ (and in fact any state of $\mathbb TT(\mathbb N^{\times}nx)$) is uniquely determined by its value on products of the form
$s^{m} v_av_b^* s^{*n}$, it is not obvious that there are states satisfying the formulas in parts (2), (3), and (4). We will prove existence of such states in \S\ref{secconstructionKMSground} using spatial constructions.
(ii) The $C^*$-algebra $\mathbb TT(\mathbb N^{\times}nx)$ carries a dual action $\hat\tau$ of $({\mathbb Q^*_+})^\wedge$, which is characterised on generators by $\hat\tau_\gamma(s)=s$ and $\hat\tau_\gamma(v_p)=\gamma(p)v_p$, and a dual coaction $\delta$ of ${\mathbb Q \rtimes \mathbb Q^*_+}$ (see \cite[Proposition~6.1]{quasilat}). It may help coaction fans to observe that $\hat\tau$ is the action of $({\mathbb Q^*_+})^\wedge$ corresponding to the restriction $\delta|$ of the coaction $\delta$ to the quotient $\mathbb Q^*_+=({\mathbb Q \rtimes \mathbb Q^*_+})/\mathbb Q$. The coaction $\delta$ gives an expectation $E_{\mathbb Q \rtimes \mathbb Q^*_+}$ onto the fixed-point algebra $\overline{\operatorname{span}}\{s^{m} v_av_a^* s^{*m}:m \in \mathbb N, a \in \mathbb N^{\times}\}$, which is faithful because ${\mathbb Q \rtimes \mathbb Q^*_+}$ is amenable (see \cite[Lemma~6.5]{quasilat}), and $\hat\tau$ gives a faithful expectation $E_{\hat\tau}$ onto
\[
\mathbb TT(\mathbb N^{\times}nx)^{\hat\tau}=\overline{\operatorname{span}}\{s^{m} v_av_a^* s^{*n}: m, n \in \mathbb N, a \in \mathbb N^{\times}\}.
\]
The dynamics $\sigma$ is the composition of $\hat\tau$ with the embedding $t\mapsto (\gamma_t:r\mapsto r^{it})$ of $\mathbb R$ as a dense subgroup of $({\mathbb Q^*_+})^\wedge$. So $\mathbb TT(\mathbb N^{\times}nx)^{\hat\tau}=\mathbb TT(\mathbb N^{\times}nx)^{\sigma}$, and $E_{\hat\tau}$ is also the expectation onto the fixed-point algebra for $\sigma$.
(iii) It follows from \thmref{maintheorem}
that KMS$_\beta$ states vanish on the products $s^mv_av_b^*s^{*n}$ with $a \neq b$, and hence factor through
the conditional expectation $E_{\hat\tau}$ of the dual action of $({\mathbb Q^*_+})^\wedge$; for $\beta\in [1,2]$ they also vanish on the products
$s^{m} v_av_a^* s^{*n}$ with $m \neq n$, and hence
factor through the conditional expectation $E_{\mathbb Q \rtimes \mathbb Q^*_+}$ of the dual coaction of ${\mathbb Q \rtimes \mathbb Q^*_+}$.
Hence, for small $\beta$, the equilibrium states are symmetric with respect to the coaction of ${\mathbb Q \rtimes \mathbb Q^*_+}$
but for $\beta > 2$ they are symmetric only with respect to the (quotient) coaction of $\mathbb Q^*_+$.
Since the extreme states in part (3) are indexed by the circle,
there is a circular symmetry at the level of KMS states which is broken as $\beta$ increases through~$2$.
(iv) The relation (T1) makes it unlikely for there to be an action of the
Pontryagin dual $\mathbb T$ of $\mathbb Z$ on $\mathbb TT(\mathbb N^{\times}nx)$ that sends $s\mapsto zs$ for $z\in \mathbb T$, and certainly not one which has any $v_p$ as an eigenvector. Thus the symmetry which is apparently being broken as $\beta$ passes from $2^-$ to $2^+$ in \thmref{maintheorem} does not obviously come from a group action on the $C^*$-algebra $\mathbb TT(\mathbb N^{\times}nx)$.
(v) There is a further phase transition ``at infinity": the KMS$_\infty$ states form a proper subset of the ground states. Indeed, it follows from the formula in (3) that every KMS${}_\infty$ state satisfies $\psi(ss^*)=1$, and hence the extremal KMS$_\infty$ states are the ground states such that $\psi|_{C^*(s)}$ is lifted from an evaluation map on $C(\mathbb T)$. Notice also that the existence of the affine isomorphism in (4) implies that the ground states of $\mathbb TT(\mathbb N^{\times}nx)$ do not form a simplex, because the state space of the noncommutative subalgebra $C^*(s)\cong\mathbb TT(\mathbb N)$ is not a simplex (see, for example, \cite[Example 4.2.6]{bra-rob}).
(vi) (The partition function.) The extremal KMS$_\beta$ states (for $\beta>2$) are related to the KMS$_{\infty}$ states in the following way. Since each extremal KMS$_{\infty}$ state $\phi$ is $\mathbb R$-invariant, the dynamics is implemented in the GNS-representation $(\mathcal H_\phi,\pi_\phi,\xi_\phi)$ by a unitary group $U:\mathbb R\to U(\mathcal H_\phi)$. The \emph{Liouville operator} is the infinitesimal generator $H$ of this one-parameter group, which is an unbounded self-adjoint operator on $\mathcal H_\phi$. The functionals
\[
\phi_\beta:T\mapsto \frac{\operatorname{Tr} (e^{-\beta H}T)}{\operatorname{Tr} e^{-\beta H}}
\]
are then the extremal KMS$_\beta$ states; the normalising factor $\beta\mapsto \operatorname{Tr} e^{-\beta H}$ is called the \emph{partition function} of the system. On the face of it, the partition function will depend on the choice of KMS$_{\infty}$ state $\phi$, but in these number-theoretic systems it doesn't seem to. In the Bost-Connes system, for example, there is a large symmetry group of the underlying $C^*$-algebra which commutes with the dynamics and acts transitively on the extreme KMS$_\infty$ states, and all the Liouville operators in the associated GNS representations match up (see \cite[\S6]{bos-con}). The same thing happens for similar systems over more general number fields (see \cite[Remark~3.5]{LvF}). Here, even though there is no obvious symmetry group of $\mathbb TT(\mathbb N^{\times}nx)$ which implements the circular symmetry of the simplex of KMS$_\infty$ states, the GNS representations of the extreme KMS$_\infty$ states $\psi_{\infty,z}$ are all realisable on the same space $\ell^2(X)$, with the same cyclic vector $e_{0,1}$, the same unitary group implementing the dynamics, and the same Liouville operator (see the discussion at the start of the proof of Proposition~\ref{constructKMS>2}). That discussion shows also that the eigenvalues of $H$ are the numbers $\ln x$ for $x\in \mathbb N^{\times}$, and that the multiplicity of the eigenvalue $\ln x$ is $x$, so that $\operatorname{Tr} e^{-\beta H}=\zeta(\beta-1)$. So it makes sense for us to claim that: ``The partition function of the system $(\mathbb TT(\mathbb N^{\times}nx),\mathbb R,\sigma)$ is $\zeta(\beta-1)$.''
\end{remarks}
\section{Characterisation of KMS and ground states of the system}\label{seccharacterisationKMSground}
We begin with the case $\beta <1$.
\begin{proposition}[\thmref{maintheorem}(1)]\label{noequilibriumatlowbeta}
The system $(\mathbb TT(\mathbb N^{\times}nx),\mathbb R, \sigma)$ has
no KMS$_\beta$ states for $\beta<1$.
\end{proposition}
\begin{proof}
(Notice that our argument also rules out the existence of KMS$_\beta$ states for $\beta<0$.)
Suppose $\psi$ is a KMS$_\beta$ state for $\sigma$. Then the KMS$_\beta$ condition implies that, for $a\in\mathbb N^{\times}$ and $0\leq k<a$, we have
\[
\psi (s^{k} v_av_a^* s^{*k}) =\psi (v_a^* s^{*k}\sigma_{i\beta}(s^{k} v_a))=\psi (v_a^* s^{*k}a^{-\beta}s^{k} v_a)=a^{-\beta}\psi(1)=a^{-\beta}.
\]
The relation (T5) (or strictly speaking, (T5') in Lemma~\ref{relsata}) implies that the projections $ s^{k} v_av_a^* s^{*k}$ for $0 \leq k <a$ are mutually orthogonal, and hence $1 \geq \sum_{k=0} ^{a-1} s^{k} v_av_a^* s^{*k}$. Now positivity of $\psi$ implies that
\[
1=\psi(1)\geq \psi \Big(\sum_{k=0} ^{a-1} s^{k} v_av_a^* s^{*k}\Big)=aa^{-\beta},
\]
which implies $\beta\geq 1$.
\end{proof}
For $\beta \geq 1$ we need the characterisation of the KMS$_\beta$ states in Lemma~\ref{KMScharacterisationlemma}. Here and later we use the following notational convention to simplify formulas.
\begin{convention}\label{convention(())}
We write $s^{((k))}$
to mean $s^k$ when $k\geq 0$ and $s^{*(-k)}$ when $k<0$.
\end{convention}
\begin{lemma}\label{KMScharacterisationlemma}
Let $\beta \in [1,\infty)$. A state $\phi$ of $\mathbb TT(\mathbb N^{\times}nx)$ is
a KMS$_\beta$ state for $\sigma$ if and only if for every $a, b \in \mathbb N^{\times}$ and $m,n \in \mathbb N$ we have
\begin{equation}\label{KMScharacterisation}
\phi(s^m v_a v_b^* s^{*n}) = \begin{cases} 0 & \text{ if } a\neq b \text{ or } m \not\equiv n \pmod a\\
a^{-\beta} \phi\big(s^{((\frac{m-n}{a}))}\big) & \text { if } a = b \text{ and } n \equiv m \pmod a . \end{cases}
\end{equation}
\end{lemma}
\begin{proof}
Suppose first that $\phi$ is a KMS$_\beta$ state.
Applying the KMS condition twice gives
\[
\phi(s^m v_a v_b^* s^{*n}) = a^{-\beta} \phi( v_b^* s^{*n} s^m v_a) = (a/b)^{-\beta} \phi(s^m v_a v_b^* s^{*n}),
\]
which implies that
\begin{equation}\label{intermedcalc}
\phi (s^mv_av_b^*s^{*n}) =
\begin{cases}0&\text{ if $a\neq b$}\\
a^{-\beta} \phi (v_a^* s^{((m-n))} v_a )&\text{ if $a= b$.}
\end{cases}
\end{equation}
When $m\not\equiv n \pmod a$, the relation (T5) implies that $v_a^* s^{((m-n))} v_a=0$,
and when $m \equiv n \pmod a$, relation (T1) implies that $v_a^* s^{((m-n))} v_a =s^{(((m-n)/a))}$. Thus \eqref{intermedcalc} says that $\phi$ satisfies \eqref{KMScharacterisation}.
Suppose now that $\phi$ satisfies \eqref{KMScharacterisation}. Since it suffices to check the KMS condition
\eqref{defKMS} on spanning elements, $\phi$ is a KMS${}_\beta$ state for $\sigma$ and if and only if
\begin{equation}\label{KMSanalytic}
{a}^{\beta}\phi(s^m v_av_b^* s^{*n} \ s^q v_c v_d^* s^{*r} ) =
{b}^{\beta} \phi( s^q v_c v_d^* s^{*r}\ s^m v_av_b^* s^{*n} )
\end{equation}
for $a, b, c, d \in \mathbb N^{\times}$ and $m,n,q,r \in \mathbb N$. We prove this equality by computing both sides.
To compute the left-hand side of \eqref{KMSanalytic}, we first reduce
the expression using the covariance relation in \lemref{covarianceonngenerators}:
\begin{align*}
s^m v_av_b^* s^{*n} \ s^q v_c v_d^* s^{*r} &=
s^m v_av_b^* s^{((q-n))} v_cv_d^*s^{*r} \\
&= \begin{cases}
0 & \text{ if } q\not\equiv n \pmod {\gcd(b,c)} \\
s^m v_a ( s^\beta v_{c'} v^*_{b'} s^{*\gamma} ) v_d^* s^{*r} & \text{ if } q \equiv n \pmod {\gcd(b,c)}
\end{cases}
\\
&= \begin{cases}
0 & \text{ if } q\not\equiv n \pmod {\gcd(b,c)} \\
s^{m+\beta a} v_{ac'} v^*_{db'} s^{*r+\gamma d} & \text{ if } q \equiv n \pmod {\gcd(b,c)} ,
\end{cases}
\end{align*}
where $b' = b/\gcd(b,c)$, $c' = c/\gcd(b,c)$, and $( \beta, \gamma)$ is the smallest non-negative solution of $(q-n)/{\gcd(b,c)} = \beta b' - \gamma c'$.
Now \eqref{KMScharacterisation} implies that the left-hand side of \eqref{KMSanalytic} vanishes unless
$ q \equiv n \pmod{\gcd(b,c)}$, $ac' = db'$, and $m+\beta a\equiv r + \gamma d \pmod {ac'}$,
in which case it equals
\begin{equation}\label{lhsofkms}
(c')^{-\beta}\phi\big( s^{((\frac{m+\beta a-r-\gamma d}{ac'}))}\big) .
\end{equation}
The analogous computation shows that the right-hand side of
\eqref{KMSanalytic} vanishes unless
$ m \equiv r \pmod {\gcd(d,a)}$ and $ca' = bd'$. If so, we take $(\delta, \alpha)$ to be the smallest non-negative solution of $(m-r)/\gcd(d,a) = \delta d' - \alpha a'$. Now the right-hand side of \eqref{KMSanalytic} vanishes unless $q + \delta c\equiv n + \alpha b\pmod {bd'}$, and then equals
\begin{equation}\label{rhsofkms}
(d')^{-\beta}\phi( s^{((\frac{q + \delta c- n - \alpha b}{bd'}))} ).
\end{equation}
We need to verify that the conditions for a nonvanishing left-hand side
match those for the right-hand side,
and that when they hold, the values of \eqref{lhsofkms} and \eqref{rhsofkms} coincide. The situation is symmetric, so we suppose that $q \equiv n \pmod {\gcd(b,c)}$, that $ac' = db'$, and that, with $(\beta, \gamma)$ as defined two paragraphs above, $m+\beta a\equiv r + \gamma d \pmod {ac'}$.
Notice that
\[
ac' = db' \iff a/d = b'/c' \iff a'/d' = b'/c' \iff a'/d' = b/c \iff ca' = bd';
\]
these are all equivalent to $ac = bd$, and from the reduced form in the middle we deduce that $a' = b'$
and $c' = d'$. This implies in particular that the coefficients $(c')^{-\beta}$ in \eqref{lhsofkms} and $(d')^{-\beta}$ in \eqref{rhsofkms} coincide.
Next, notice that $m+\beta a\equiv r + \gamma d \pmod {ac'}$ implies that $m\equiv r \pmod {\gcd(d,a)}$, so it makes sense to take $(\delta, \alpha)$ to be the smallest non-negative solution of $(m-r)/{\gcd(d,a)} = \delta d' - \alpha a'$.
Consider now the exponent of $s$ on the left-hand side of \eqref{KMSanalytic}. The definition of $(\delta,\alpha)$ implies that $m-r=\delta d - \alpha a$, so, remembering that $a' = b'$ and $c' = d'$, we have
\begin{align*}
\frac{m+\beta a-r-\gamma d}{ac'}
&= \frac{ (\delta - \gamma) d + (\beta - \alpha) a}{\gcd(d,a)a'c'}
=\frac{ (\delta - \gamma) d' + (\beta - \alpha) a'}{a'c'}\\
&=\frac{ (\delta - \gamma) c' + (\beta - \alpha) b'}{b'd'}
=\frac{ \beta b - \gamma c + \delta c- \alpha b}{\gcd(b,c)b'd'}\\
&= \frac{q-n+\delta c- b\alpha}{bd'},
\end{align*}
which is the exponent of $s$ on the right-hand side of \eqref{KMSanalytic}.
Since $ac'$ divides $m+\beta a -r-\gamma d$, this calculation also shows that
$b'd$ divides $q + \delta c- n - \alpha b$, or equivalently that $q + \delta c\equiv n + \alpha b\pmod {bd'}$. This completes the proof of \eqref{KMSanalytic}, and hence we have shown that $\phi$ is a KMS$_\beta$ state.
\end{proof}
\begin{lemma}\label{lemmagroundcharacterisation}
A state $\phi$ of $\mathbb TT(\mathbb N^{\times}nx)$ is a ground state for $\sigma$ if and only if
\begin{equation}\label{groundcharacterisation}
\phi( s^m v_a v_b^* s^{*n}) = 0 \ \text{ whenever $a\not=1$ or $b\not=1$. }
\end{equation}
\end{lemma}
\begin{proof}
Let $\phi$ be a state of $\mathcal T (\mathbb N^{\times}nx)$.
The expression
\begin{equation*}
\phi( s^q v_c v_d^* s^{*r} \ \sigma _{\alpha + i\beta}( s^m v_a v_b^* s^{*n})) = (a/b)^{i\alpha - \beta} \phi( s^q v_c v_d^* s^{*r} \ s^m v_a v_b^* s^{*n})
\end{equation*}
is bounded on the upper half plane ($\beta >0$) if and only if
\begin{equation}\label{groundcharact-old}
\phi( s^q v_c v_d^* s^{*r} \ s^m v_a v_b^* s^{*n}) = 0 \quad \text{ whenever } a < b .
\end{equation}
Suppose $\phi$ is a ground state and choose
$ r = m$ and $d = a = 1$;
then \eqref{groundcharact-old} implies $\phi( s^q v_c v_b^* s^{*n}) = 0 $
for $1 < b$. Taking adjoints gives the same for $1 < c$. This proves \eqref{groundcharacterisation}
(with $q$ in place of $m$ and $c$ in place of $a$).
Conversely, suppose $\phi( s^m v_a v_b^* s^{*n}) = 0 $ whenever $a$ or $b$ is not $1$ and
choose two analytic elements $X= s^m v_av_b^* s^{*n}$ and $Y$ for $\sigma$; then
the Cauchy-Schwarz inequality yields
\begin{align*}
| \phi (Y^* \sigma _{\alpha + i\beta}(s^m v_av_b^* s^{*n} ) )|^2
& =
|(a/b)^{i\alpha -\beta} \phi(Y^* s^m v_av_b^* s^{*n} ) |^2 \\
& \leq
(a/b)^{-\beta} \phi(Y^* Y) \phi(s^n v_bv_a^* s^{*m} \ s^m v_av_b^* s^{*n} )\\
& = (b/a)^{\beta} \phi(Y^* Y) \phi(s^n v_b v_b^* s^{*n} ).
\end{align*}
Since the last factor vanishes for $b \neq 1$, the function
$\alpha + i \beta \mapsto \phi(Y^* \sigma _{\alpha + i\beta}(X))$ is bounded for $\beta >0$,
so $\phi $ is a ground state.
\end{proof}
\section{Construction of KMS and ground states}\label{secconstructionKMSground}
To prove that there exists a KMS$_\beta$ state satisfying the formula in part (2) of \thmref{maintheorem}
we use a product measure arising on the factorization $\Omega_B\cong \prod_{p\in \mathcal P} X_p$ of \proref{prodstructure}.
The construction makes sense for $\beta \geq 1$, but the case $\beta = 1$ requires special consideration.
\begin{proposition}[\thmref{maintheorem}(2): existence of a KMS$_\beta$ state that factors through $E_{\mathbb Q \rtimes \mathbb Q^*_+}$]
\label{productmeasure} For $k\in \mathbb N$ and $r\in \mathbb Z/p^k$, let $\delta_{(r,p^k)}$ denote the unit point mass at $B(r,p^k) \in X_p$.
For $\beta >1$, the series
\[
\mu_{\beta,p} = (1-p^{1-\beta}) \sum_{(r,p^k)\in X_P} p^{- \beta k} \delta_{(r,p^k)}
\]
defines a Borel probability measure on $X_p$; for $\beta = 1$, we let $\mu_{1,p}$ be the probability measure on $X_p$ coming from
additive Haar measure on $\mathbb Z_p$ via the embedding $r\mapsto B(r,p^\infty)$ of $\mathbb Z_p$ in $X_p$ (see Lemma~\ref{omegaB}). Let $\mu_\beta$ be the measure on $\Omega_B$ coming from the product measure $\prod_{p\in\mathcal P}\mu_{\beta,p}$ on $\prod_{p\in\mathcal P} X_p$ via the homeomorphism of Proposition~\ref{prodstructure}, let $\mu_{\beta}^*:f\mapsto\int f\,d\mu_{\beta}$ be the associated state on $C(\Omega)$, and view $\mu_\beta^*$ as a state on
$\mathbb TT(\mathbb N^{\times}nx)^\delta$ using the isomorphism of
\[
\mathbb TT(\mathbb N^{\times}nx)^\delta=\overline{\operatorname{span}}\{s^mv_av_a^*s^{*m}:(m,a)\in\mathbb N^{\times}nx\}
\]
onto $C(\Omega)$ which takes $s^mv_av_a^*s^{*m}$ to $1_{m,a}$. Then $\psi_\beta :=\mu_\beta^* \circ E_{\mathbb Q \rtimes \mathbb Q^*_+}$ is a KMS${}_\beta$ state for $1 \leq \beta \leq \infty$, and it satisfies
\begin{equation}\label{charpsibeta}
\psi_\beta(s^mv_av_b^*s^{*n})=\begin{cases}
0&\text{ unless $a=b$ and $m=n$}\\
a^{-\beta}&\text{ if $a=b$ and $m=n$.}
\end{cases}
\end{equation}
\end{proposition}
\begin{proof}
Suppose first that $1< \beta < \infty$. Then the series
\[
\sum_{(r,p^k)\in X_P} p^{- \beta k} = \sum_{k \in \mathbb N} p^k p^{- \beta k}
\]
converges with sum $(1-p^{1-\beta})^{-1}$, so the sum defining $\mu_{\beta,p}$ converges in norm in $M(\Omega)$ to a probability measure.
To prove that $\psi_\beta$ is a KMS${}_\beta$ state, we compute $\psi_\beta (s^m v_a v_b^* s^{*n}) $ and apply Lemma~\ref{KMScharacterisationlemma}. Since $\psi_\beta$ factors through $E_{\mathbb Q \rtimes \mathbb Q^*_+}$, we have
$\psi_\beta (s^m v_a v_b^* s^{*n}) =0 $ whenever $m\neq n$ or $a\neq b$.
So suppose that $m=n$ and $a = b= \prod_{p|a} p^{e_p(a)}$. The isomorphism of $\mathbb TT(\mathbb N^{\times}nx)^\delta$ with $C(\Omega)$ carries $s^mv_av_a^*s^{*m}$ into $1_{m,a}$, which is the characteristic function of the set
$\{B(r,N): a|N,\ r(a)=m(a)\}$; the homeomorphism of Proposition~\ref{prodstructure} carries this set into
\begin{equation}\label{supp1ma}
\Big(\prod_{p|a}\{B(r,p^k):k\geq e_p(a),\ r(p^{e_p(a)})=m(p^{e_p(a)}) \}\Big)\times\Big(\prod_{q\nmid a}X_q\Big).
\end{equation}
Thus
\begin{align*}
\psi_\beta (s^m v_a v_a^* s^{*m})
&=\int 1_{m,a}\,d\mu_\beta=\mu_\beta\big(\{B(r,N):a|N,\ r(a)=m(a)\}\big)\\
&=\prod_{p|a}\mu_{\beta,p}\big(\{B(r,p^k):k\geq e_p(a),\ r(p^{e_p(a)})=m(p^{e_p(a)})\} \big)\times\Big(\prod_{q\nmid a}\mu_{q,\beta}(X_q)\Big)\\
&=\prod_{p|a}\mu_{\beta,p}\big(\{B(r,p^k):k\geq e_p(a),\ r(p^{e_p(a)})=m(p^{e_p(a)}) \}\big)\\
&=\prod_{p|a}(1-p^{1-\beta})\Big(\sum_{k=e_p(a)}^\infty p^{-\beta k}\big(\#\{r\in \mathbb Z/p^k:r(p^{e_p(a)})=
m(p^{e_p(a)})\}\big)\Big).
\end{align*}
For $k\geq e_p(a)$ there are $p^{k-e_p(a)}$ elements $r$ in $\mathbb Z/p^k$ such that $r(p^{e_p(a)})=m(p^{e_p(a)})$. Thus
\begin{align*}
\psi_\beta (s^m v_a v_a^* s^{*m})
&=\prod_{p|a}(1-p^{1-\beta})\Big(\sum_{k=e_p(a)}^\infty p^{(1-\beta)k}p^{-e_p(a)}\Big)\\
&=\prod_{p|a}(1-p^{1-\beta})p^{-\beta e_p(a)}\Big(\sum_{l=0}^\infty p^{(1-\beta)l}\Big)\\
&=\prod_{p|a}p^{-\beta e_p(a)}=\Big(\prod_{p|a}p^{e_p(a)}\Big)^{-\beta }=a^{-\beta}.
\end{align*}
Since the expectation $E_{\mathbb Q \rtimes \mathbb Q^*_+}$ kills the nonzero powers of $s$, this calculation shows that $\psi_\beta$ satisfies \eqref{KMScharacterisation}, and hence Lemma~\ref{KMScharacterisationlemma} implies that $\psi_\beta$ is a KMS${}_\beta$ state.
Now suppose $\beta = 1$. Then the measure $\mu_{1}$ is the product of normalized Haar measures on the $\mathbb Z_p$, which is the normalised Haar measure on $\widehat\mathbb Z \cong \partial\Omega$. This satisfies $\mu_1(aE)= a^{-1}\mu_1(E)$, and since the support of $1_{m,a}$ is $m+a \widehat\mathbb Z$, we have
\[
\psi_1(s^m v_a v_a^* s^{*m})=\int_{\widehat\mathbb Z} 1_{m,a}\,d\mu_1=\mu_1(m+a\widehat\mathbb Z)=a^{-1}\mu_1(\widehat\mathbb Z)=a^{-1}.
\]
So Lemma~\ref{KMScharacterisationlemma} also implies that $\psi_1$ is a KMS${}_1$ state.
When $\beta = \infty$, the usual interpretation $a^{-\infty} = 0$ for $a>1$ and $1^{-\infty} = 1$ yields probability measures $\mu_{\infty, p}$ on $X_p$
concentrated at the point $(0,1)\in X_p$, and their product corresponds to the unit point mass $\mu_\infty$ concentrated at the point $B(0,1) \in \Omega_B$.
Then $\psi_\infty := (\mu_\infty )_* \circ E_{\mathbb Q \rtimes \mathbb Q^*_+}$ satisfies
\begin{equation}\label{charpsiinfty}
\psi_\infty (s^mv_av_b^*s^{*n})=\begin{cases}
0&\text{ unless $a=b =1$ and $m=n$,}\\
1&\text{ if $a=b =1$ and $m=n$.}
\end{cases}
\end{equation}
and is a ground state by \lemref{groundcharacterisation}.
The characterisations of $\psi_\beta$ and $\psi_\infty$ show that
$\psi_\beta (c) \to \psi_\infty (c)$ as $\beta \to \infty$ for $c=s^mv_av_b^*s^{*n}$, and hence $\psi_\infty$ is a KMS$_\infty$ state.
\end{proof}
To construct KMS$_{\beta}$ states for $\beta>2$, we use the Hilbert-space representation of $\mathbb N^{\times}nx$ described in the next lemma. For $2<\beta<\infty$, the state $\omega$ in the lemma will be lifted from a state on the quotient $C(\mathbb T)$ of $\mathbb TT(\mathbb N)$, hence given by a probability measure $\mu$ on $\mathbb T$, and then the isometry $U$ in the $GNS$ representation is the multiplication operator $(Uf)(z)=zf(z)$ on $L^2(\mathbb T,d\mu)$. When we construct ground states, $\omega$ can be any state of $\mathbb TT(\mathbb N)$.
\begin{lemma}\label{constructreps}
Let $\omega$ be a state of the Toeplitz algebra $\mathbb TT(\mathbb N)$, and let $U$ be the generating isometry for the GNS representation $(\mathcal H_\omega,\pi_\omega,\xi_\omega)$ of $\mathbb TT(\mathbb N)$. Set
\[
X:=\{(r,x):x\in \mathbb N^{\times},\ r\in \mathbb Z/x\}
\]
and let $e_{r,x}$ be the usual basis for $\ell^2(X)$.
Let $S$ and $V_p$ be the isometries on $\ell^2(X,\mathcal H_\omega)$ which are characterised by the following behaviour on elements of the form $fe_{(r,x)}$ for $f\in \mathcal H_\omega$:
\begin{align*}
S(fe_{r,x})&=\begin{cases} f e_{r +1,x} & \text{ if } r + 1 \not=0_{\mathbb Z/x},\
\\(Uf)e_{0,x} & \text{ if } r + 1 =0_{\mathbb Z/x}, \text{ and}\end{cases}\\
V_p(fe_{r,x})&=fe_{pr,px}.
\end{align*}
Then $S$ and $\{V_p:p\in\mathcal P \}$ satisfy the relations \textnormal{(T1)--(T5)} of \thmref{toeplitzpresentation}.
\end{lemma}
\begin{proof}
To verify (T1), first observe that
\begin{equation}\label{totp1}
V_p S(fe_{r,x})
= \begin{cases} fe_{pr +p,px} & \text{ if } r + 1\neq 0_{\mathbb Z/x}\\
(Uf)e_{0,px} & \text{ if } r + 1 = 0_{\mathbb Z/x}.\end{cases}
\end{equation}
To compute $S^pV_p$, first note that for $k\leq x$ we have
\[
S^k(fe_{r,x})
=\begin{cases}
fe_{r +k,x} & \text{ if $r +i\neq 0_{\mathbb Z/x}$ for $i$ satisfying $0<i\leq k$,} \\
(Uf)e_{k-i,x} & \text{ if there exists $i$ such that $0<i\leq k$ and $r +i= 0_{\mathbb Z/x}$,}\end{cases}
\]
which, since $p\leq px$, implies that
\begin{align*}
S^p V_p (fe_{r,x})
&=\begin{cases} fe_{pr +p,px} & \text{ if $pr +i\neq 0_{\mathbb Z/px}$ for $i$ satisfying $0<i\leq p$,} \\
(Uf)e_{p-i,px} & \text{ if there exists $i$ such that $0<i\leq p$ and $pr +i= 0_{\mathbb Z/px}$}\end{cases}\\
&=\begin{cases} fe_{pr +p,px} & \text{ if $pr +p\neq 0_{\mathbb Z/px}$,} \\
(Uf)e_{0,px} & \text{ if $pr +p= 0_{\mathbb Z/px}$,}\end{cases}
\end{align*}
which is the same as \eqref{totp1} because $\times p:\mathbb Z/x\to \mathbb Z/px$ is injective (see \lemref{timesb}). Thus (T1) holds.
To verify relation (T2), we just need to observe that $(\times p)\circ (\times q)=(\times q)\circ (\times p)$: indeed, both are just $\times pq:\mathbb Z/x\to \mathbb Z_{pqx}$. For (T3), we suppose that $p$ and $q$ are distinct primes. The adjoint $V_p^*$ is given by
\[
V_p^*(fe_{r,x})
=\begin{cases} fe_{w,p^{-1}x} & \text{ if $p|x$ and $r =pw$ for some $w\in \mathbb Z/p^{-1}x$,} \\
0 & \text{ otherwise.}\end{cases}
\]
Thus we have
\begin{equation}\label{vp*vq}
V_p^*V_q(fe_{r,x})=
\begin{cases} fe_{w,p^{-1}qx} & \text{ if $p|qx$ and $q r =pw$ for some $w\in \mathbb Z/p^{-1}qx$,} \\
0 & \text{ otherwise,}\end{cases}
\end{equation}
whereas
\begin{equation}\label{vqvp*}
V_qV_p^*(fe_{r,x})=
\begin{cases} fe_{q\zeta,qp^{-1}x} & \text{ if $p|x$ and $r =p\zeta$ for some $\zeta\in \mathbb Z/p^{-1}x$,} \\
0 & \text{ otherwise.}\end{cases}
\end{equation}
We know from \lemref{timesb} that $r=p\zeta\Longleftrightarrow r\equiv 0\pmod p$, which is equivalent to $q r\equiv 0\pmod{p}$ because $\gcd(q,p)=1$ ; thus the non-trivial cases in \eqref{vp*vq} and \eqref{vqvp*} coincide, with $w=q\zeta$, and we have $V_p^*V_q=V_qV_p^*$, which is (T3).
To verify (T4), we first compute the left-hand side:
\begin{equation}\label{lhst4}
S^* V_p f e_{r,x} = S^* f e_{pr,px}
=\begin{cases} f e_{pr -1, px} & \text{ if $pr \neq 0_{\mathbb Z/px}$} \\
(U^*f) e_{px -1,px} & \text{ if $pr = 0_{\mathbb Z/px}$.}\end{cases}\\
\end{equation}
For the right hand side, we have
\begin{align*}
S^{p-1} V_p S^* f e_{r,x}
&=\begin{cases} S^{p-1} V_p fe_{r -1 ,x} & \text{ if $r \neq 0_{\mathbb Z/x}$} \\
S^{p-1} V_p (U^*f)e_{x-1,x} & \text{if $r = 0_{\mathbb Z/x}$}\end{cases}\\
&=\begin{cases} S^{p-1} fe_{pr -p ,px} & \text{ if $r \neq 0_{\mathbb Z/x}$} \\
S^{p-1} (U^*f)e_{px-p,px} & \text{ if $r = 0_{\mathbb Z/x}$}\end{cases}\\
&=\begin{cases} fe_{pr -p +p-1,px} & \text{ if $pr \neq 0_{\mathbb Z/px}$} \\
(U^*f)e_{px-p+p-1,px} & \text{ if $pr = 0_{\mathbb Z/px}$,}\end{cases}
\end{align*}
which is the same as \eqref{lhst4}.
Finally, we verify (T5). Suppose $1\leq k<p$. Then
\[
V_p^*S^kV_p(fe_{r,x})
=\begin{cases}V_p^*(fe_{pr +k,px}) & \text{ if $pr +i\neq 0_{\mathbb Z/px}$ for $0<i\leq k$} \\
V_p^*((Uf)e_{k-i,x}) & \text{ if there exists $i$ such that $0<i\leq k$ and $pr +i= 0_{\mathbb Z/px}$.}\end{cases}
\]
Since $0<k<p$, the second possibility does not arise. Thus
\[
V_p^*S^kV_p(fe_{r,x})=\begin{cases}fe_{w,x}&\text{ if $pr +k=pw$}\\
0&\text{ otherwise,}
\end{cases}
\]
which has to be $0$ because $pr +k$ cannot be in the range of $\times p$ for $k$ in the given range. This confirms (T5), and completes the proof.
\end{proof}
We can now prove the existence of many KMS${}_\beta$ states for $\beta>2$.
\begin{proposition}[\thmref{maintheorem}(3): KMS$_\beta$ states from probability measures on $\mathbb T$]\label{constructKMS>2}
Suppose $\beta \in (2,\infty)$ and $\mu$ is a probability measure on $\mathbb T$. Then there is a state $\psi_{\beta,\mu}$ of
$\mathbb TT(\mathbb N^{\times}nx)$ such that
\begin{equation}\label{KMScomputation}
\psi_{\beta,\mu}(s^m v_a v_b^* s^{*n}) = \begin{cases} \displaystyle 0 & \text{ if } a \neq b \text{ or } m \not\equiv n \pmod a\\ \displaystyle
\frac{1}{a \zeta(\beta -1)} \sum_{a \mid x \mid (m-n)} x^{1-\beta} \int_{\mathbb T}z^{({m-n})/{x}}\,d\mu(z) & \text { if } a = b \text{ and } m \equiv n \pmod a.
\end{cases}
\end{equation}
There is also a KMS$_{\infty}$ state $\psi_{\infty,\mu}$ such that
\begin{equation}\label{KMSinftycomputation}
\psi_{\infty,\mu}(s^m v_a v_b^* s^{*n}) = \begin{cases} \displaystyle 0 & \text{ unless } a = b =1\\
\displaystyle \int_{\mathbb T} z^{m-n} \,d\mu(z) & \text { if } a = b = 1.\end{cases}
\end{equation}
For $\beta \in (2,\infty]$, the correspondence $\mu \to \psi_{\beta,\mu}$ is an affine map of the set $P(\mathbb T)$ of probability measures on the unit circle into the simplex of KMS$_\beta$ states.
Moreover, the extremal states $\psi_{\infty,z}$ for $z \in \mathbb T$ are pure and pairwise inequivalent.
\end{proposition}
\begin{proof}
As anticipated before \lemref{constructreps}, we apply that Lemma to the state $\omega$ of $\mathbb TT(\mathbb N)$ lifted from the measure $\mu$ on $\mathbb T$, so the Hilbert space $\mathcal H_\omega$ is $L^2(\mathbb T,d\mu)$ and $(Uf)(z)=zf(z)$. The resulting family $S,V$ gives us a representation $\pi_\mu:=\pi_{S,V}$ of $\mathbb TT(\mathbb N^{\times}nx)$ on the Hilbert space $\ell^2(X,L^2(\mathbb T,d\mu))$. We aim to use this representation to define the states $\psi_{\beta,\mu}$. For motivation, we suppose first that $\mu=\delta_z$; then $U$ is multiplication by $z$ on $\mathbb C$ and the Hilbert space is $\ell^2(X)$ with the usual orthonormal basis $\{e_{r,x}:(r,x)\in X\}$. In this special case, we can borrow a construction from \cite{bos-con}.
We first note that there is a unitary representation $W:\mathbb R\to U(\ell^2(X))$ such that $W_te_{r,x}=x^{it}e_{r,x}$, and this representation implements the dynamics $\sigma$ in the representation $\pi_\mu$ --- in other words, $(\pi_\mu,W)$ is a covariant representation of the system $(\mathbb TT(\mathbb N^{\times}nx),\mathbb R,\sigma)$. The infinitesimal generator $H$ of $W$ is the (unbounded) self-adjoint operator $H$ on $\ell^2(X)$ such that $W_t=e^{itH}$, and is diagonal with respect to the basis $\{e_{r,x}\}$, with eigenvalues $\ln x $ of multiplicity $x$. Then $e^{-\beta H}$ is a positive bounded operator which is also diagonalised by the $e_{r,x}$ and satisfies $e^{-\beta H}e_{r,x}=x^{-\beta}e_{r,x}$. Thus for $\beta>2$, $e^{-\beta H}$ is a trace-class operator with
\[
\operatorname{Tr} e^{-\beta H} = \sum_{(r,x)\in X} \langle x^{-\beta}e_{r,x},e_{r,x}\rangle =
\sum_{x\in \mathbb N^{\times}} x^{1 -\beta} = \zeta(\beta-1),
\]
and $\zeta(\beta- 1)^{-1} e^{-\beta H}$ is a bounded positive operator with trace one,
which defines a state $\psi_{\beta,\mu}$ on $\mathbb TT(\mathbb N^{\times}nx)$ through the representation $\pi_\mu$:
\begin{equation}\label{defpsit}
\psi_{\beta,\mu} (T):= \frac{1}{\zeta(\beta-1)} \operatorname{Tr}(e^{-\beta H} \pi_\mu(T))=\frac{1}{\zeta(\beta-1)}\sum_{(r,x)\in X}x^{-\beta}\langle\pi_\mu(T)e_{r,x},e_{r,x}\rangle.
\end{equation}
For more general $\mu$, we can still define $H$ formally by $H(fe_{r,x})=(\ln x)fe_{r,x}$, but now $e^{-\beta H}$ is no longer trace-class. (If we view $H_\mu$ as $\ell^2(X)\otimes L^2(\mathbb T,d\mu)$, the new $e^{-\beta H}$ is the tensor product $e^{-\beta H}\otimes 1$ of the old one with the identity operator on $L^2(\mathbb T,d\mu)$, which is not trace-class unless $L^2(\mathbb T,d\mu)$ is finite-dimensional.) Nevertheless, using \eqref{defpsit} as motivation, we can still define $\psi=\psi_{\beta,\mu}$ using the elements $e_{r,x}=1e_{r,x}$ by
\[
\psi(T)=\frac{1}{\zeta(\beta-1)}\sum_{(r,x)\in X}x^{-\beta}\langle\pi_\mu(T)e_{r,x},e_{r,x}\rangle,
\]
and verify directly that $\psi$ is a positive functional with $\psi(1)=1$, hence is a state. We want to show that $\psi$ is a KMS$_{\beta}$-state satisfying~\eqref{KMScomputation}.
We check \eqref{KMScomputation} first. We have
\[
\psi(s^m v_a v_b^* s^{*n})
=\frac{1}{\zeta(\beta-1)}\sum_{(r,x)\in X}x^{-\beta}\langle V_b^* S^{*n}e_{r,x},V_a^*S^{*m}e_{r,x}\rangle.
\]
Now $V_b^*S^{*n}e_{r,x}$ has the form $fe_{s,b^{-1}x}$ and $V_a^*S^{*m}e_{r,x}$ has the form $ge_{t,a^{-1}x}$, so the inner product in the $(r,x)$-summand is zero unless $a^{-1}x=b^{-1}x$ in $\mathbb N^{\times}$, or equivalently, unless $a=b$ and $a\mid x$. Similarly, since $S^{*n}e_{r,x}$ has the form $he_{s,x}$, it is either in the range of $V_aV_a^*$ or orthogonal to it.
Thus
\begin{align*}
\psi(s^m v_a v_a^* s^{*n})
&=\frac{1}{\zeta(\beta-1)}\sum_{\{(r,x)\in X\;:\;a|x\}}x^{-\beta}\langle S^m V_a V_a^* S^{*n}e_{r,x},e_{r,x}\rangle\\
&=\frac{1}{\zeta(\beta-1)}\sum_{\{(r,x)\in X\;:\;a|x,\ S^{*n}e_{r,x}\in V_aV_a^*(H_\mu)\}}x^{-\beta}\langle S^mS^{*n}e_{r,x},e_{r,x}\rangle.
\end{align*}
For each $x$ such that $a|x$, there are precisely $a^{-1}x$ elements $r \in \mathbb Z/x$ such that $S^{*n}e_{r,x}$ belongs to the range of $V_aV_a^*$. For each such element,
\[
\langle S^mS^{*n}e_{r,x},e_{r,x}\rangle=
\begin{cases}0&\text{ unless $x$ divides $m-n$}\\
\int_{\mathbb T}z^{(m-n)/x}\,d\mu(z)&\text{ if $x|(m-n)$.}
\end{cases}
\]
So the right-hand side of~\eqref{KMScomputation} vanishes unless $a=b$ and $a|(m-n)$ (which ensures that there exist $x$ satisfying $a|x|(m-n)$), and in that case
\begin{equation}\label{formulaforpsi}
\psi(s^m v_a v_a^* s^{*n})
=\frac{1}{\zeta(\beta-1)}\sum_{\{x\in\mathbb N^{\times}\;:\;a|x,\ x|(m-n)\}}(a^{-1}x)x^{-\beta}\int z^{(m-n)/x}\,d\mu (z),
\end{equation}
as required.
To check that $\psi$ is a KMS$_{\beta}$ state using \lemref{KMScharacterisationlemma}, we need to see that when $a|(m-n)$ we have
\[
\psi(s^m v_a v_a^* s^{*n})=a^{-\beta}\psi(s^{(((m-n)/a))}),
\]
where we use double parentheses according to Convention~\ref{convention(())}.
However, expanding out the right-hand side gives
\begin{align*}
a^{-\beta}\psi(s^{((m-n)/a))})
&=\frac{a^{-\beta}}{\zeta(\beta-1)}\sum_{(w,y)\in X}y^{-\beta}\langle S^{(((m-n)/a))}e_{w,y},e_{w,y}\rangle\\
&=\frac{a^{-\beta}}{\zeta(\beta-1)}\sum_{\{(w,y)\in X\;:\;y|(m-n)/a\}}y^{-\beta}\Big(y\int_{\mathbb T}z^{(m-n)/ay}\,d\mu(z)\Big),
\end{align*}
which is another way of writing the right-hand side of~\eqref{formulaforpsi}.
For $\beta = \infty$, we use the same representation $\pi_\mu$, and set
\[
\psi_{\infty, \mu} (T) := \langle \pi_\mu( T) e_{0,1}, e_{0,1} \rangle;
\]
the state $\psi_{\infty,\mu}$ satisfies \eqref{KMSinftycomputation}, and is a ground state by \lemref{groundcharacterisation}.
To see that $\psi_{\infty,\mu}$ is a KMS$_\infty$ state,
notice first that the only term on the right of \eqref{KMScomputation}
which survives the limit as $\beta \to \infty$ has $x = 1$, and there is such a term only when $a = 1$.
Since $\lim_{\beta \to \infty} \zeta(\beta-1) = 1$, we deduce that $\psi_{\beta,\mu}$ converges weak* to $\psi_{\infty,\mu}$ as $\beta\to\infty$, and hence $\psi_{\infty,\mu}$ is a KMS$_\infty$ state.
Formula~\eqref{KMScomputation}, or formula \eqref{KMSinftycomputation} for $\beta = \infty$, shows that
the map $\mu\mapsto \psi_{\beta,\mu}$
is affine and weak*-continuous from $P(\mathbb T)$ into the simplex of KMS$_{\beta}$ states.
We claim that the states $\psi_{\infty, z}$ for $ z \in \mathbb T$ are pure and mutually inequivalent. The vector $e_{0,1} \in H_z = \ell^2(X)$ is cyclic for $\pi_z$ for each $z \in \mathbb T$, and thus $\pi_z$ can be regarded as the GNS representation of the corresponding vector state
\[
\psi_{\infty,z} (T) := \langle \pi_z(T)e_{0,1},e_{0,1}\rangle.
\]
So it suffices to show that if $A \in \mathcal B( \ell^2(X))$ is a nonzero projection intertwining $\pi_z$ and $\pi_w$ for some $z, w\in \mathbb T$, then $z = w$ and $A= 1$. Before we do this, we observe that the product
\[
Q: =\prod_{p\in \mathcal P} \prod_{j = 0}^{p-1} \big(1 - \pi_z(s^j)V_p V_p^*\pi_z(s^{*j})\big),
\]
converges in the weak-operator topology on $\ell^2(X)$ to the rank-one projection onto $\mathbb C e_{0,1}$.
Indeed, we have $V_p^*\pi_z(s^{*j})e_{0,1}= \barz^j V_p^*e_{0,1}=0$ for every $(j,p)$, and hence for each finite subset $F$ of $\mathcal P$ we have $(1 - \pi_z(s^j)V_p V_p^*\pi_z(s^{*j}))e_{0,1}=e_{0,1}$ for all $p\in F$ and $j<p$;
on the other hand, if $b \neq 1$, there are a prime $p$ that divides $b$
and a value of $j$ such that $(j,p) \leq (n,b)$ in the quasi-lattice order
(see the proof of \thmref{qnisboundaryquotient}), and then
$(1 - \pi_z(s^j)V_p V_p^*\pi_z(s^{*j})) e_{n,b} = 0$.
More generally, for each $a\in \mathbb N^{\times}$ and $0\leq m < a$,
\[
Q_{m,a} := \pi_z(s^m)V_a Q V_a^* \pi_z(s^{*m}),
\]
is the rank-one projection onto the vector $e_{m,a}$, and is in $\pi_z(\mathbb TT(\mathbb N^{\times}nx))''$ for every $z\in \mathbb T$. Notice that the operator $Q_{m,a}$ on $\ell^2(X)$ is the same for every $z$.
Suppose now that $A \in \mathcal B( \ell^2(X))$ is a projection intertwining $\pi_z$ and $\pi_w$ for some $z, w\in \mathbb T$.
Since $Q_{m,a}$ belongs to $\pi_z(\mathbb TT(\mathbb N^{\times}nx))''$ and $\pi_w(\mathbb TT(\mathbb N^{\times}nx))''$, it commutes with $A$. This implies that there are scalars $\lambda_{k,a}$ such that $Ae_{k,a} = \lambda_{k,a}e_{k,a}$. Then we have
\begin{align*}
z^n \lambda_{k,a}e_{k,a} &= A(z^ne_{k,a}) = A (\pi_z(s^{na})e_{k,a})=A (\pi_z(s^{na+k})V_a e_{0,1})\\
&= \pi_w(s^{na+k}) V_a (Ae_{0,1})=
\pi_w(s^{na+k}) V_a(\lambda_{0,1}e_{0,1}) = \lambda_{0,1}w^ne_{k,a}.
\end{align*}
Thus
$\lambda_{k,a} = (w/z)^n \lambda_{0,1} $ for every $n\in \mathbb N$,
and this implies that either $\lambda_{k,a} = 0$ for every $(k,a)$, or
$z =w$, in which case $\lambda_{k,a} = \lambda_{0,1}$ for every $(k,a)$. Either way, $A$ is a multiple of the identity, and the representations $\pi_z$ are irreducible and mutually inequivalent. Thus the corresponding vector states $\psi_{\infty,z}$ are pure
and mutually inequivalent.
\end{proof}
We now prove the parts of \thmref{maintheorem} which describe the ground states on $\mathbb TT(\mathbb N^{\times}nx)$.
\begin{proof}[Proof of part (4) of \thmref{maintheorem}]
Since the additive generator $s$ of $\mathbb TT(\mathbb N^{\times}nx)$ is a proper isometry, Coburn's Theorem implies that $C^*(s)$ is naturally isomorphic to $\mathbb TT(\mathbb N)$. The restriction $\omega:=\psi|_{C^*(s)}$ is then a positive functional satisfying $\omega(1)=1$, and hence is a state of $C^*(s)\cong \mathbb TT(\mathbb N)$. So $\psi\mapsto \psi|_{C^*(s)}$ maps ground states to states of $\mathbb TT(\mathbb N)$. \lemref{lemmagroundcharacterisation} implies that $\psi$ satisfies \eqref{formground}, which implies that $\psi\mapsto \psi|_{C^*(s)}$ is injective on ground states.
To see that $\psi\mapsto \psi|_{C^*(s)}$ is surjective, let $\omega$ be a state of $\mathbb TT(\mathbb N)$, let $\pi_{S,V}$ be the representation of $\mathbb TT(\mathbb N^{\times}nx)$ on $\ell^2(X,\mathcal H_\omega)$ constructed in \lemref{constructreps}, and define
\[
\psi_{\omega}(T):=\langle \pi(T) \xi_\omega e_{0,1}, \xi_\omega e_{0,1}\rangle\ \text{ for $T\in \mathbb TT(\mathbb N^{\times}nx)$.}
\]
We then have
\begin{equation}\label{comppsiomega}
\psi_\omega(s^m v_a v_b^* s^{*n})=
\langle S^m V_aV_b^*S^{*n} \xi_\omega e_{0,1}, \xi_\omega e_{0,1}\rangle = \langle V_b^*S^{*n}\xi_\omega e_{0,1}, V_a^*S^{*m}\xi_\omega e_{0,1}\rangle.
\end{equation}
Since $V_b^*S^{*n}\xi_\omega e_{0,1}$ vanishes unless $b=1$, the right-hand side of \eqref{comppsiomega} vanishes unless $a=b=1$, and \lemref{lemmagroundcharacterisation} implies that $\psi_\omega$ is a ground state. On the other hand, if $a=b=1$, then \eqref{comppsiomega} gives
\[
\psi_\omega(s^m v_a v_b^* s^{*n})=
\langle S^m S^{*n} \xi_\omega e_{0,1}, \xi_\omega e_{0,1}\rangle
=\langle \pi_{\omega}(s^m s^{*n}) \xi_\omega, \xi_\omega\rangle=\omega(s^ms^{*n}),
\]
which implies that $\psi_\omega|_{C^*(s)}=\omega$. We now know that $\psi\mapsto \psi|_{C^*(s)}$ is a bijection from the set of ground states onto the state space of $\mathbb TT(N)$.
The map $\psi\mapsto \psi|_{C^*(s)}$ is obviously affine. Equation~\eqref{formground} implies that it is a homeomorphism for the respective weak* topologies, and hence it is an affine isomorphism of compact convex sets. This implies in particular that the extremal ground states are those of the form $\psi_\omega$ where $\omega$ is a pure state of $\mathbb TT(\mathbb N)$. Since the GNS representation $\pi_\omega$ of $\mathbb TT(\mathbb N)$ is irreducible, the Wold decomposition for the isometry $\pi_\omega(s)$ implies that $\pi_\omega$ is either equivalent to the identity representation of $\mathbb TT(\mathbb N)$ on $\ell^2$ or is lifted from an irreducible representation of $C(\mathbb T)$. Thus, since $\omega$ is a vector state in its GNS representation, $\omega$ is either a vector state for the identity representation or is lifted from an evaluation map on $C(\mathbb T)$.
\end{proof}
\section{Surjectivity of the parametrisation of KMS states}\label{secsurjectivity}
To show that all KMS states
arise via the above construction, we need to show that in any GNS representation, there are analogues of the projection $Q$ which we used in the proof of \proref{constructKMS>2}. To deal with the case where $\zeta(\beta-1)$ does not converge, we need to use also analogous projections involving products over finite sets of primes. For each subset $E$ of $ \mathcal P$, let $\mathbb N^{\times}_E$ denote the
semigroup of positive integers with all prime factors in $E$;
the corresponding zeta function and Euler product are given by
\begin{equation}\label{eulerproductB}
\zeta_E(\beta) := \sum_{a\in \mathbb N^{\times}_E} a^{-\beta}= \prod_{p\in E} (1-p^{-\beta})^{-1}.
\end{equation}
For every $E$, the series converges for $\beta>1$, but if $E$ is finite it also converges
for $\beta > 0$.
The reconstruction formula in part (3) of the following lemma is one of our main technical innovations. We will see in the Appendix how this technique also simplifies the proof of uniqueness for the KMS states of the Bost-Connes systems, and we believe that it is likely to be useful elsewhere.
\begin{lemma}\label{phirestrictedtoQB}
Let $\beta >1$ and suppose $\phi$ is a KMS$_{\beta}$ state.
Form the GNS-representation $(H_\phi,\pi_\phi,\xi_\phi)$ of $\mathbb TT(\mathbb N^{\times}nx)$, so that $\phi(\cdot)=\langle \pi_\phi(\cdot)\xi_\phi,\xi_\phi\rangle$, and
denote by $\tilde\phi$ the vector state extending $\phi$ to all bounded operators on $H_\phi$. Write $S=\pi_\phi(s)$, $V_p=\pi_\phi(v_p)$, and let $E$ be a subset of $\mathcal P$. Then the product
\[
Q_E: =\prod_{p\in E} \prod_{j = 0}^{p-1} (1 - S^j V_p V_p^*S^{*j})
\]
converges in the weak-operator topology to a projection $Q_E$ in $\pi_\phi(\mathbb TT(\mathbb N^{\times}nx))''$, which satisfies
\begin{enumerate}
\item $\tilde\phi(Q_E) ={ \zeta_E(\beta-1)}^{-1}$;
\item if $E$ is a subset of $\mathcal P$ such that $\zeta_E(\beta -1) < \infty$, then $\phi_{Q_E} (T) := \zeta_E(\beta -1) \tilde \phi(Q_E \pi_\phi(T) Q_E)$ defines a state $\phi_{Q_E}$ of $\mathbb TT(\mathbb N^{\times}nx)$, called the \emph{conditional state of $\phi$ with respect to $Q_E$};
\item if $\zeta_E(\beta -1) < \infty$, then $\phi$ can be reconstructed from its conditional state $\phi_{Q_E}$ by the formula
\begin{equation}\label{reconstruct}
\phi (T) = \sum_{a\in \mathbb N^{\times}_E}\sum_{k=0}^{a-1} \frac{a^{-\beta}}{\zeta_E(\beta-1)} \phi_{Q_E}(v_a^* s^{*k} \, T \, s^k v_a);
\end{equation}
in particular, for $n \geq 0$ we have
\begin{equation}\label{reconstructionequationSn}
\phi(s^n)=\frac{1}{\zeta_E(\beta-1)}\sum_{\{a\in \mathbb N^{\times}_E\;:\;a|n\}}a^{1-\beta}\phi_{Q_E}(s^{n/a}).
\end{equation}
\end{enumerate}
\end{lemma}
\begin{proof}
When $E$ is finite, the product is finite and belongs to $\pi_\phi(\mathbb TT(\mathbb N^{\times}nx))$. When $E$ is infinite, $Q_E$ is the weak-operator limit of a decreasing family of projections in the range of $\pi_\phi$, and therefore belongs to $\pi_\phi(\mathbb TT(\mathbb N^{\times}nx)) ''$.
Suppose $p$ and $q$ are relatively prime. Since for each $a$ the projections $s^j v_av_a^* s^{*j}$ with $0\leq j < a$
have mutually orthogonal ranges, we have
\begin{align*}
\phi\Big(\prod_j (1 - s^j v_pv_p^* s^{*j}) \, &\prod_k (1 - s^k v_qv_q^* s^{*k}) \Big)
= \phi\Big(\Big(1 - \sum_j s^j v_pv_p^* s^{*j}\Big)\Big(1- \sum_k s^k v_qv_q^* s^{*k}\Big)\Big)\\
&= \phi\Big(1 - \sum_j s^j v_pv_p^* s^{*j}- \sum_k s^k v_qv_q^* s^{*k}+\sum_{j,k}s^j v_pv_p^* s^{*j}\;s^k v_qv_q^* s^{*k}\Big).
\end{align*}
The covariance relation in Lemma~\ref{covarianceonngenerators} implies that
\[
v_p^*s^{*j}s^kv_q=s^\alpha v_qv_p^*s^{*\beta},
\]
where $(\alpha,\beta)$ is the smallest non-negative solution of $k-j=\alpha p-\beta q$. So
\begin{align*}
\phi\Big(\prod_j (1 - s^j v_pv_p^* s^{*j}) \, &\prod_k (1 - s^k v_qv_q^* s^{*k}) \Big) \\
&= 1 - \sum_j \phi(s^j v_pv_p^* s^{*j}) - \sum_k \phi(s^k v_qv_q^* s^{*k}) + \sum_{j,k}
\phi( s^j v_pv_p^* s^{*j}\;s^kv_qv_q^* s^{*k} )\\
&= 1 - \sum_j p^{-\beta} - \sum_k q^{-\beta} + \sum_{j,k}
\phi\big( s^{j+p\alpha}v_pv_qv_q^*v_p^* s^{*(k+q\beta)}\big)\\
&= 1 - p(p^{-\beta}) - q(q^{-\beta}) + \sum_{j,k} (pq)^{-\beta}\\
&= (1-p^{1-\beta}) \, (1-q^{1-\beta})\\
&=\phi\Big(\prod_j (1 - s^j v_pv_p^* s^{*j})\Big)\phi\Big(\prod_k (1 - s^k v_qv_q^* s^{*k})\Big).
\end{align*}
where we have used formula \eqref{KMScharacterisation} in the third equality.
From this we deduce that for every finite subset $F$ of $E$, we have
\[
\phi\Big(\prod_{p\in F} \prod_{j=0}^{p-1} (1-s^jv_{p} v_{p}^* s^{*j}) \Big)
= \prod_{p\in F} (1-p^{1-\beta}),
\]
and (1) follows on taking limits and using the product formula
\eqref{eulerproductB} for $\zeta_E$.
Since $A \mapsto Q_E AQ_E$ is positive and linear, $\phi_{Q_E }$ is a positive linear functional; part (1) implies that $\phi_{Q_E }(1) = 1$, and we have proved (2).
We next claim that the projections
\[
\{Q_{E ,k,a}:= S^k V_a Q_E V_a^* S^{*k}: a\in \mathbb N^{\times}_E,\ 0\leq k<a\}
\]
are mutually orthogonal. Suppose $a,b \in \mathbb N^{\times}_E $, $0 \leq k <a$ and $0\leq l < b$ satisfy $(k,a)\not=(l, b)$. Then
\begin{equation}\label{prodorthog}
Q_{E ,k,a} Q_{E ,l,b} = S^k V_a Q_E (V_a^* S^{*k} S^l V_b) Q_E V_b^* S^{*l},
\end{equation}
and the covariance relation of Lemma~\ref{covarianceonngenerators} implies that the factor in parenthesis has the form $S^\gamma V_{b'} V_{a'}^* S^{*\delta}$, where $(\gamma, b')$ and $(\delta, a') $ cannot both be equal to $(0,1)$ because $(k,a)\not=(l, b)$. We can now use (T1) to extract from either $S^\gamma V_{b'}$ or $S^{\delta}V_{a'}$ a factor
of the form $S^kV_p$ with $p \in E $ and $0\leq k<p$; since $Q_E \leq 1 - S^k V_p V_p^* S^{*k}$, this implies that the right-hand side of \eqref{prodorthog} vanishes, and the claim is proved.
If $\zeta_E (\beta -1) < \infty$, then
\[
\tilde\phi(\sum_{k,a} Q_{E ,k,a}) = \sum_{k,a} a^{-\beta} \tilde\phi(Q_E ) = \sum_a aa^{-\beta}\tilde\phi(Q_E )= \tilde\phi(Q_E ) \zeta_E (\beta -1) = 1,
\]
so that $\tilde \phi$ is carried by the projection $\sum_{k,a} Q_{E ,k,a}$. Thus we have
\[
\phi(T)
=\tilde\phi\Big(\Big(\sum_{k,a}Q_{E ,k,a}\Big)\pi_\phi(T) \Big(\sum_{l,b}Q_{E ,l,b}\Big)\Big)
=\sum_{k,a, l,b} \tilde\phi\big(Q_{E ,k,a} \pi_\phi(T) Q_{E ,l,b}\big).
\]
Now the orthogonality of the projections $Q_{E,k,a}$ and the KMS$_\beta$ condition imply that
\[
\phi(T) = \sum_{k,l,a,b}a^{-\beta} \tilde\phi \big(Q_E V_a^* S^{*k} \pi_\phi(T) S^l V_b Q_E V_b^*S^{*l}S^kV_aQ_E\big);
\]
as in \eqref{prodorthog}, we have $Q_E V_b^*S^{*l}S^kV_aQ_E=0$ unless $(k,a)=(l,b)$, and hence
\[
\phi(T) = \sum_{k=l, \, a=b}a^{-\beta} \tilde\phi \big(Q_E V_a^* S^{*k} \pi_\phi(T) S^l V_b Q_E \big),
\]
which implies the reconstruction formula \eqref{reconstruct}. To get the formula \eqref{reconstructionequationSn} for $\phi(s^n)$, we deduce from the formulas in Lemma~\ref{relsata} that
\[
v_a^* s^{*m}s^ns^m v_a =
\begin{cases} v_a^*s^nv_a=s^{n/a}&\text{ if $a|n$,}\\
0&\text{ otherwise,}
\end{cases}
\]
so that for each $a|n$ there are $a$ equal summands on the right-hand side of \eqref{reconstruct}. This completes the proof of part (3).
\end{proof}
\begin{proposition}[\thmref{maintheorem}(2): uniqueness for $1\leq \beta \leq 2$]\label{unique1-2}
The state $\psi_\beta$ constructed in \proref{productmeasure} is the unique
KMS$_\beta$ state for $1 \leq \beta \leq 2$.
\end{proposition}
Before proving Proposition~\ref{unique1-2} we need to do some preliminary work.
\begin{lemma}\label{checkprojOK}
Suppose $\beta\geq 1$ and $\phi$ is a KMS$_{\beta}$ state of $\mathbb TT(\mathbb N^{\times}nx)$. If $P$ is a projection in the span of $\{s^mv_av^*_bS^{*n}\}$ such that $\sigma_t(P)=P$ for all $t\in \mathbb R$ and $\phi(P)=0$, then $\phi(RPT)=0$ for all $R,T\in \mathbb TT(\mathbb N^{\times}nx)$.
\end{lemma}
\begin{proof}
We first observe that for every $T\in \mathbb TT(\mathbb N^{\times}nx)$ we have
\[
0\leq \phi(PT^*TP)\leq \phi(P\|T\|^2P)=\|T\|^2\phi(P)=0,
\]
and hence $\phi$ vanishes on the corner $P \mathbb TT(\mathbb N^{\times}nx) P$. Next, we consider analytic elements $R=s^mv_av_b^*s^{*n}$ and $T=s^qv_cv_d^*s^{*r}$. Since $z\mapsto P-\sigma_{z}(P)$ is analytic and vanishes on $\mathbb R$, it vanishes everywhere. Thus the KMS$_{\beta}$ condition gives
\[
\phi(RPT)=\phi((RP)(PT))=\phi(PT\sigma_{i\beta}(RP))=(a/b)^{-\beta}\phi(PRTP)=0,
\]
and this extends to arbitrary $R$ and $T$ by continuity of $\phi$.
\end{proof}
\begin{lemma}\label{KMSfactors}
Suppose that $\phi$ is a KMS$_{\beta}$ state of $\mathbb TT(\mathbb N^{\times}nx)$ for some $\beta\geq 1$. Then $\phi$ vanishes on the ideal in $\mathbb TT(\mathbb N^{\times}nx)$ generated by $1-ss^*$. If $\beta=1$, then $\phi$ also vanishes on the ideal generated by $\{1-\sum_{k=0}^{p-1} s^jv_pv_p^*s^{*j}:p\in \mathcal P\}$.
\end{lemma}
\begin{proof}
From \eqref{KMScharacterisation} we have $\phi(1-ss^*)=\phi(1)-\phi(ss^*)=1-1=0$, so the first assertion follows from Lemma~\ref{checkprojOK}. Now suppose $\beta=1$. Then another application of \eqref{KMScharacterisation} shows that
\[
\phi\Big(1-\sum_{k=0}^{p-1} s^jv_pv_p^*s^{*j}\Big)=1-\sum_{k=0}^{p-1}p^{-1}=1-p(p^{-1})=0,
\]
so the second assertion also follows from Lemma~\ref{checkprojOK}.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{unique1-2}]
Let $\phi$ be a KMS$_\beta$ state, and
suppose first that $1< \beta \leq 2$. For every finite set $E\subset\mathcal P$ and every $n > 0$, the sum in \eqref{reconstructionequationSn}
has finitely many summands, each satisfying $a^{1-\beta}|\phi_{Q_E } (s^{n/a}) | \leq a^{1-\beta}$. Thus, since $\zeta_E (\beta-1) \to \infty$ as $E $ increases,
the right-hand side of \eqref{reconstructionequationSn} tends to zero as $E $ increases through a listing of $\mathcal P$. Thus $\phi(s^n) = 0$ for every $n\in \mathbb Z\setminus \{0\}$, and \lemref{KMScharacterisationlemma} implies that
\[
\phi(s^nv_bv_a^*s^{*m})=\begin{cases}
0&\text{ unless $a=b$ and $m=n$}\\
a^{-\beta}&\text{ if $a=b$ and $m=n$.}
\end{cases}
\]
Comparing this with \eqref{charpsibeta} shows that $\phi = \psi_\beta$.
Now suppose $\beta = 1$. Then Lemma~\ref{KMSfactors} implies that $\phi$
factors through the boundary quotient, and thus comes from a state of Cuntz's $\mathcal Q_\mathbb N$. Thus the result follows from \cite[Theorem 4.3]{cun2}.
\end{proof}
For $\beta >2$ we can take $E = \mathcal P$ in Lemma~\ref{phirestrictedtoQB}, and deduce that a KMS$_\beta$ state is determined by its conditioning to $Q:=Q_\mathcal P$. We shall use this to prove that the map described in part (3) of \thmref{maintheorem} is surjective.
Since KMS states vanish on the ideal generated by $1-ss^*$, the state $\phi$, the GNS-representation $\pi_\phi$, and the conditional state $\phi_{Q}$ all vanish on that ideal. In particular, this implies that the restriction of $\phi_{Q}$
to $C^*(s)$ factors through the quotient map $q:C^*(s)\to C(\mathbb T)$, and hence there is a probability measure $\mu = \mu_\phi$ on $\mathbb T$ such that
\begin{equation}\label{phiQ}
\phi_{Q}(s^n)=\int_{\mathbb T}q(s^n)\,d\mu=\int_{\mathbb T} z^n\,d\mu(z)\ \text{ for $n\in\mathbb Z$}.
\end{equation}
\begin{proposition}[\thmref{maintheorem}(3): the map $\mu \to \psi_{\beta,\mu}$
is a bijection] \label{KMSfromrestriction}
Let $\beta>2$ and take $Q:= Q_\mathcal P$. If $\phi$ is a KMS$_\beta$ state and
$\mu_\phi$ is the probability measure on $\mathbb T$ such that \eqref{phiQ} holds, then $\phi = \psi_{\beta,\mu_\phi}$. Conversely, if $\mu$ is a probability measure on $\mathbb T$, then $\mu = \mu_{\psi_{\beta, \mu}}$.
\end{proposition}
\begin{proof}
By \lemref{KMScharacterisationlemma}, to prove the first assertion
it suffices to check that $\phi$ and $ \psi_{\beta,\mu}$ agree on positive powers of $s$ (taking adjoints then shows that they also agree on powers of $s^*$). Since $\zeta(\beta -1) <\infty$, the reconstruction formula
\eqref{reconstructionequationSn} gives
\begin{align*}
\phi(s^n)&=\frac{1}{\zeta(\beta-1)}\sum_{\{a\in \mathbb N^{\times}\;:\;a|n\}}a^{1-\beta}\phi_Q(s^{n/a})\\
&=\frac{1}{\zeta(\beta-1)}\sum_{\{a\in \mathbb N^{\times}\;:\;a|n\}} a^{1-\beta}\int_{\mathbb T} z^{n/a}\,d\mu(z),
\end{align*}
which by \eqref{KMScomputation} is precisely $\psi_{\beta,\mu}(s^n)$.
For the converse, we show that the moment sequences
$\int_\mathbb T z^m \, d\mu(z) $ and $\int_\mathbb T z^m \, d \mu_{\psi_{\beta, \mu}}(z)$
for the two measures coincide, and then an application of the Riesz representation theorem shows that $\mu=\mu_{\psi_{\beta, \mu}}$. Since the measures are positive it suffices to deal with
$m\geq 0$. We know that \eqref{KMScomputation} holds for both measures, for
$\mu$ by definition and for $ \mu_{\psi_{\beta, \mu}}$ by the first part.
When $m = 1$, we take $n = 0$ and $x = y = 1$ in \eqref{KMScomputation}, which then reduces to a single term, and we can deduce that the first moments coincide:
\[
\int_\mathbb T z \, d\mu(z) = \zeta(\beta-1) {\psi_{\beta, \mu}}(s) =\int_\mathbb T z \, d \mu_{\psi_{\beta, \mu}}(z).
\]
When $m= p \in \mathcal P$, equation \eqref{KMScomputation} gives
\begin{align}
\int_\mathbb T z^p \, d\mu(z) + p^{1-\beta} \int_\mathbb T z \, d\mu(z)& = \zeta(\beta-1) {\psi_{\beta, \mu}}(s^p)\label{momentcalc}\\
&=\int_\mathbb T z^p \, d \mu_{\psi_{\beta, \mu}}(z) + p^{1-\beta} \int_\mathbb T z \, d \mu_{\psi_{\beta, \mu}}(z).\notag
\end{align}
Since we already know that $\int_\mathbb T z \, d\mu(z) = \int_\mathbb T z \, d \mu_{\psi_{\beta, \mu}}(z)$,
we conclude that
\[
\int_\mathbb T z^p \, d\mu(z) = \int_\mathbb T z^p \, d \mu_{\psi_{\beta, \mu}}(z).
\]
We can extend this result to non-prime $n\in \mathbb N^{\times}$ by an induction argument on the number of prime factors of $m$ (counting multiplicity); the key inductive step is established by an argument like that of \eqref{momentcalc}. Thus the moments are equal for all $n$, and the result follows.
\end{proof}
This concludes the proof of \thmref{maintheorem}.
\appendix
\section{Uniqueness of equilibrium for the Bost-Connes algebra}\label{bcuniqueness}
The Hecke $C^*$-algebra $\mathcal C_{\Q}$ of Bost and Connes \cite{bos-con} is the universal unital $C^*$-algebra generated by a unitary representation $e:\mathbb Q/\mathbb Z\to U(\mathcal C_{\Q})$ and an isometric representation $\mu:\mathbb N^{\times}\to \mathcal C_{\Q}$ satisfying
\[
\frac{1}{n}\sum_{\{s\in\mathbb Q/\mathbb Z\,:\,ns=r\}} e(s)=\mu_ne(r)\mu_n^*
\]
(see \cite[Corollary~2.10]{bcalg}). The isometric representation $\mu$ is then automatically Nica covariant \cite[Proposition~2.8]{bcalg}, so there is a natural homomorphism $\pi_\mu:\mathbb TT(\mathbb N^{\times})\to \mathcal C_{\Q}$, and the main theorem of \cite{quasilat} implies that $\pi_\mu$ is injective. The unitary representation $e$ induces a unital homomorphism $\pi_e:C^*(\mathbb Q/\mathbb Z)\to \mathcal C_{\Q}$. Since $\mathbb Q/\mathbb Z$ is an abelian group with dual isomorphic to the additive group $\widehat\mathbb Z$ of integral ad\`eles, we can view $\pi_e$ as a homomorphism $\pi:C(\widehat\mathbb Z)\to\mathcal C_{\Q}$. There is an action $\alpha$ of $\mathbb N^{\times}$ on $C(\widehat \mathbb Z)$ defined by
\[
\alpha_n(f)(z)=\begin{cases}f(n^{-1}z)&\text{if $n$ divides $z$ in $\widehat\mathbb Z$}\\
0&\text{otherwise,}\end{cases}
\]
and then the relations defining $\mathcal C_{\Q}$ say that $(\pi,\mu)$ satisfies
\begin{equation}\label{semigpcov}
\pi(\alpha_n(f))=\mu_n\pi(f)\mu_n^*,
\end{equation}
and that $(\pi,\mu)$ is universal for such pairs (see, for example, \cite[Proposition~32]{diri}). Next, note that the endomorphism $\gamma_n$ of $C(\widehat\mathbb Z)$ defined by $\gamma_n(f)(z)=f(nz)$ satisfies $\alpha_n\circ\gamma_n(f)=\alpha_n(1)f$, so the embedding $\pi:C(\widehat\mathbb Z)\to\mathcal C_{\Q}$ satisfies
\begin{align}\label{revcov}
\mu_n^*\pi(f)\mu_n&=\mu_n^*\mu_n\mu_n^*\pi(f)\mu_n=\mu_n^*\pi(\alpha_n(1)f)\mu_n=\mu_n^*\pi(\alpha_n\circ\gamma_n(f))\mu_n\\
&=\mu_n^*\big(\mu_n\pi(\gamma_n(f))\mu_n^*\big)\mu_n=\pi(\gamma_n(f)).\notag
\end{align}
\begin{example}
In Theorem~\ref{qnisboundaryquotient} we identified $\mathcal Q_\mathbb N$ as the boundary quotient $C(\partial\Omega)\rtimes({\mathbb Q \rtimes \mathbb Q^*_+})$. Proposition~\ref{boundary} shows that the homeomorphism of $\widehat\mathbb Z$ onto $\partial\Omega$ carries the action of $\mathbb N^{\times}\subset \widehat\mathbb Z$ by left multiplication (in the ring $\widehat\mathbb Z$) into the left action of $\mathbb N^{\times}\subset \mathbb Q_+^*\subset {\mathbb Q \rtimes \mathbb Q^*_+}$ on $\partial\Omega$. Thus if we use this homeomorphism to define a homomorphism $\pi:C(\widehat\mathbb Z)\to C(\partial\Omega)\rtimes({\mathbb Q \rtimes \mathbb Q^*_+})$, then the pair $(\pi,v|_{\mathbb N^{\times}})$ satisfies \eqref{semigpcov}, and hence gives a homomorphism $\pi\times v|_{\mathbb N^{\times}}$ of $\mathcal C_{\Q}$ into $\mathcal Q_\mathbb N$. Theorem~3.7 of \cite{bcalg} implies that $\pi\times v|_{\mathbb N^{\times}}$ is injective. (Cuntz gave a slightly different description of this embedding in \cite[Remark~3.5]{cun2}.)
\end{example}
The universal property of $\mathcal C_{\Q}$ implies that there is an action $\sigma:\mathbb R\to \operatorname{Aut}\mathcal C_{\Q}$ which fixes the subalgebra $C^*(\mathbb Q/\mathbb Z)\cong C(\widehat\mathbb Z)$ and satisfies $\sigma_t(\mu_n)=n^{it}\mu_n$. Our goal in this short appendix is to use the ideas of \secref{secsurjectivity} to give a relatively elementary proof of the following theorem, which is a key part of the Bost-Connes analysis of $\mathcal C_{\Q}$. This approach bypasses the technical proofs
of \cite[Lemmas~27(b) and~28]{bos-con} and of \cite[Lemma~45]{diri}.
\begin{theorem}\label{BCbeta<1}
For $\beta\in (0,1]$, the system $(\mathcal C_{\Q},\mathbb R,\sigma)$ has at most one KMS$_\beta$ state.
\end{theorem}
As in \cite{bos-con}, the idea is to prove that a KMS$_\beta$ state is invariant under the action of a large symmetry group as well as the dynamics. When we view $\mathcal C_{\Q}$ as being generated by $C(\widehat\mathbb Z)$ and an isometric representation $\mu$ of $\mathbb N^{\times}$, the symmetry group is the multiplicative group $\widehat\mathbb Z^*$ of invertible elements in the ring $\widehat\mathbb Z$, which acts on $C(\widehat \mathbb Z)$ by $\tau_u(f)(z)=f(uz)$. The automorphisms $\tau_u$ commute with the endomorphisms $\alpha_n$, and hence give an action $\theta:\widehat\mathbb Z^*\to \operatorname{Aut}\mathcal C_{\Q}$. This action commutes with the dynamics $\sigma$, and we know from Proposition~21 of \cite{bos-con} (or Propositions 30 and 32 of \cite{diri}) that the fixed-point algebra $\mathcal C_{\Q}^\theta$ is the copy of the Toeplitz algebra $\mathbb TT(\mathbb N^{\times})$ in $\mathcal C_{\Q}$.
The key lemma is:
\begin{lemma}\label{KMSinv}
Any KMS$_\beta$ state for $0< \beta \leq 1$ is $\widehat\mathbb Z^*$-invariant, cf. \cite[Lemma 27(c)]{bos-con}.
\end{lemma}
Given this lemma, we know that any KMS$_\beta$ state $\psi$ factors through the expectation $E_\theta$ onto the fixed point algebra $\mathcal C_{\Q}^\theta=\mathbb TT(\mathbb N^{\times})$, and hence is determined by its values on $\mathbb TT(\mathbb N^{\times})$. Since $\mathbb N^{\times}$ is quasi-lattice ordered (in fact it is lattice ordered), we have $\mathbb TT(\mathbb N^{\times})=\overline{\operatorname{span}}\{\mu_m\mu_n^*\}$. Since the KMS state $\psi$ is invariant for the dynamics $\sigma$, and since $\sigma_t(\mu_m\mu_n^*)=(m/n)^{it}\mu_m\mu_n^*$, we must have $\psi(\mu_m\mu_n^*)=0$ for $m\not= n$, and $\psi$ is determined by its values on $\operatorname{span}\{\mu_n\mu_n^*\}$. But there it is completely determined by the KMS condition:
\[
\psi(\mu_n\mu_n^*)=\psi(\mu_n^*\sigma_{i\beta}(\mu_n))=n^{-\beta}\psi(\mu_n^*\mu_n)=n^{-\beta}\psi(1)=n^{-\beta}.
\]
So there can only be one such state, and Lemma~\ref{KMSinv} implies Theorem~\ref{BCbeta<1}.
It remains for us to prove \lemref{KMSinv}. The key ingredient is an analogue of
\lemref{phirestrictedtoQB} for the Bost-Connes system, which stems from the observation that the reconstruction formula from \cite[Theorem 20]{diri}
also works for small $\beta$ if one restricts to finitely many primes, as Neshveyev did in the proof of the Proposition in \cite{nes}.
Suppose that $\phi$ is a KMS$_\beta$ state of $(\mathcal{C}_{\mathbb Q}, \sigma)$. For $E\subset \mathcal P$ finite, we set $Q_E := \prod_{p\in E } (1- \mu_p \mu_p^*)$. For distinct primes $p,q$ we have
\[
\phi(\mu_p \mu_p^* \mu_q \mu_q^*) = \phi(\mu_{pq}\mu_{pq}^*) = (pq)^{-\beta} =
p^{-\beta} q^{-\beta} = \phi(\mu_p \mu_p^*) \phi( \mu_q \mu_q^*) ,
\]
and thus
\[
\phi(Q_E ) = \prod_{p\in E } (1 - \phi(\mu_p\mu_p^*)) = \prod_{p\in E } (1 - p^{-\beta}) = \zeta_E (\beta)^{-1},
\]
as defined in \eqref{eulerproductB}. We define the conditional state $\phi_{Q_E }$ ($\phi$ given $Q_E $) by
\[
\phi_{Q_E }(\cdot) := \zeta_E (\beta) \phi(Q_E \, \cdot \, Q_E ).
\]
\begin{lemma}\label{reconstructionlemma}
If $\phi$ is a KMS$_\beta$ state of $(\mathcal{C}_{\mathbb Q},\sigma)$ and $E$ is a finite subset of $\mathcal P$, then
\[
\phi(T) = \sum_{n\in \mathbb N^{\times}_E } \frac{n^{-\beta}}{\zeta_E (\beta)} \phi_{Q_E } (\mu_n^* T \mu_n)\ \text{ for $T\in \mathcal{C}_{\mathbb Q}$.}
\]
\end{lemma}
\begin{proof}
We first claim that the projections $\mu_n Q_E \mu_n^*$ for $n\in \mathbb N^{\times}_E $ are mutually orthogonal. To see this, suppose $m,n\in \mathbb N^{\times}_E$ and $m\not=n$. Then there exists $q\in E$ such that $e_q(m)\not= e_q(n)$; say we have $e_q(m)<e_q(n)$, and write $m=m'q^{e_q(m)}$, $n=n'q^{e_q(n)}$. Then $\gcd(q,m')=1$, so $\mu_{m'}^*\mu_q= \mu_q\mu_{m'}^*$ and
\begin{align*}
( \mu_m Q_E \mu_m^*) ( \mu_n Q_E \mu_n^*)
&= \mu_m Q_E \mu_{m'}^* \mu_q^{e_q(n)-e_q(m)}\mu_{n'} Q_E \mu_n^*\\
&=\mu_m Q_E \mu_q^{e_q(n)-e_q(m)} \mu_{m'}^*\mu_{n'} Q_E \mu_n^*
\end{align*}
vanishes because the factor $(1-\mu_q\mu_q^*)\mu_q$ of $Q_E \mu_q^{e_q(n)-e_q(m)}$ does.
The normal extension $\tilde\phi$ of $\phi$ to $\pi_\phi(\mathcal{C}_{\mathbb Q})"$ satisfies
\[
\tilde\phi \Big(\sum_{n \in \mathbb N^{\times}_E } \pi_\phi\big(\mu_n Q_E \mu_n^*\big)\Big) = \sum_{n \in \mathbb N^{\times}_E } \phi(\mu_n Q_E \mu_n^*)
= \sum_{n \in \mathbb N^{\times}_E } n^{-\beta} \phi(Q_E ) = 1,
\]
so the state $\tilde\phi$ is supported by the projection $\sum_{m \in \mathbb N^{\times}_E } \mu_m Q_E \mu_m^*$, and
\begin{align*}
\phi(T) &=\tilde\phi(\pi_\phi(T))\\
&=\tilde \phi\bigg(\Big(\sum_{m \in \mathbb N^{\times}_E }\pi_\phi\big( \mu_m Q_E \mu_m^*\big)\Big) \pi_\phi(T)\Big(\sum_{n \in \mathbb N^{\times}_E } \pi_\phi\big(\mu_n Q_E \mu_n^*\big)\Big)\bigg)\\
&= \sum_{m,n \in \mathbb N^{\times}_E } \phi(\mu_m Q_E \mu_m^* T \mu_n Q_E \mu_n^*).
\end{align*}
The KMS condition \eqref{defKMS} (with $c = \mu_m Q_E \mu_m^*$) and the orthogonality of the $\{\mu_n Q_E \mu_n^*\}$ imply that the terms with $m \not=n$ vanish, and another application of the KMS condition gives
\[
\phi(T) =\sum_{n \in \mathbb N^{\times}_E } n^{-\beta} \phi( Q_E \mu_n^* T \mu_n Q_E \mu_n^*n^{-\beta}\mu_n) = \sum_{n \in \mathbb N^{\times}_E } n^{-\beta} \phi( Q_E \mu_n^* T \mu_n Q_E ).\qedhere
\]
\end{proof}
\begin{proof}[Proof of \lemref{KMSinv}]
As indicated at the beginning of \cite[Section 7]{bos-con}, to prove that a state is $\widehat\mathbb Z^*$-invariant it suffices to show that
it vanishes on the spectral subspaces
\[
C(\widehat\mathbb Z)_\chi : = \{f \in C(\widehat\mathbb Z) : \theta_g ( f ) = \chi(g) f \text{ for all } g \in \widehat\mathbb Z^*\}
\]
for nontrivial characters $\chi $ of $\widehat\mathbb Z^*$. So suppose that\footnote{Sorry about the hats. The one in $\widehat\mathbb Z^*$ is the hat in $\widehat\mathbb Z$, which is standard number-theoretic notation for the integral ad\`eles, and the outside one in $(\widehat\mathbb Z^*)^\wedge$ is the standard harmonic-analytic notation for the Pontryagin dual.} $\chi\in(\widehat\mathbb Z^*)^\wedge$ and $\chi\not=1$. Since $\widehat\mathbb Z^*$ is the inverse limit
$\varprojlim(\mathbb Z/n\mathbb Z)^*$, the dual $(\widehat\mathbb Z^*)^\wedge$ is the direct limit $\varinjlim((\mathbb Z/n\mathbb Z)^*)^\wedge$, and there exists $m$ such that $\chi$ belongs to $((\mathbb Z/m\mathbb Z)^*)^\wedge$ --- in other words, such that $\chi$ factors through the canonical map $r\mapsto r(m)$ from $\widehat\mathbb Z^*$ to $(Z/m\mathbb Z)^*$. Let $F$ be a finite set of primes containing all the prime factors of $m$, so that $m\in\mathbb N^{\times}_F$. When we identify $\widehat\mathbb Z$ with $\prod_{p\in\mathcal P}\mathbb Z_p$, the subalgebras $C_F:=C(\prod_{p\in F}\mathbb Z_p)\otimes 1$ span a dense subspace of $C(\widehat\mathbb Z)$; since $E_\chi:f\mapsto \int_{\widehat\mathbb Z^*} \theta_u(f)\overline{\chi(u)}\,du$ onto $C(\widehat\mathbb Z)_\chi$ is continuous, the union $\bigcup_{F}(C_F\cap C(\widehat\mathbb Z)_\chi)$ is dense in $C(\widehat\mathbb Z)_\chi$. Thus it suffices to prove that $\phi(\pi(f))=0$ for every $f\in C_F$ (where $\pi$ is the embedding of $C(\widehat\mathbb Z)$ in $\mathcal C_{\Q}$ discussed at the beginning of the section).
As in the proof of \cite[Lemma 27]{bos-con}, see also \cite[pages~369--370]{diri}, we modify the embedding of $\mathbb N^{\times}$ in $\widehat\mathbb Z$ so that every positive integer lands in $\widehat\mathbb Z^*$: for $q\in \mathcal P$ we take $u_q$ to be the element of $\prod_p \mathbb Z_p^*$ defined by
\[
(u_q)_p =
\begin{cases}q & \text{ if $p \neq q$} \\
1 & \text{ if } p = q,
\end{cases}
\]
and extend the map $q\mapsto u_q$ to $\mathbb N^{\times}$ by prime factorisation. Notice that if $p\nmid n$, then $(u_n)_p=n$ in $\mathbb Z_p$, so for functions $f\in C_F$ and $n\in \mathbb N^{\times}_{\mathcal P\setminus F}$, we have $\theta_{u_n}(f)=\gamma_n(f)$ (where $\gamma_n$ is the left inverse for $\alpha_n$ discussed at the start of the section), and hence \eqref{revcov} implies that $\mu_n^*\pi(f)\mu_n=\pi(\theta_{u_n}(f))$.
Now suppose that $F$ is a fixed finite set of primes containing the prime factors of $m$, and take $f\in C_F\cap C(\widehat\mathbb Z)_\chi$. Then for each finite subset $E$ of $\mathcal P\setminus F$, \lemref{reconstructionlemma} implies that
\begin{align}\label{calcuserecontrs}
\phi(\pi(f))&= \sum_{n\in \mathbb N^{\times}_E } \frac{n^{-\beta}}{\zeta_E (\beta)} \phi_{Q_E } (\mu^*_n\pi(f)\mu_n)\\
&=\sum_{n\in \mathbb N^{\times}_E } \frac{n^{-\beta}}{\zeta_E (\beta)} \phi_{Q_E } (\pi(\theta_{u_n}(f)))\notag\\
&= \phi_{Q_E } (\pi(f)) \sum_{n\in \mathbb N^{\times}_E } \frac{n^{-\beta}}{\zeta_E (\beta)} \chi(u_n).\notag
\end{align}
Since $n \mapsto \chi(u_n)$ is a nontrivial Dirichlet character modulo $m$, we have
\[
\sum_{n\in \mathbb N^{\times}_E } n^{-\beta} \chi(u_n) = \prod_{p\in E } (1 - p^{-\beta} \chi(u_p))\ \text{ for $\beta>0$;}
\]
as $E $ increases through a listing of $\mathcal P \setminus F$, this product converges to
$\prod_{p\in \mathcal P \setminus F} (1 - p^{-\beta} \chi(u_p))$, which is finite (by, for example, Theorem 5 on page 161 of \cite{lan}). On the other hand, since $\beta \leq 1$, we have
$\zeta_E (\beta) \to \zeta_{\mathcal P \setminus F} (\beta) = \infty$ as $E$ increases. Thus \eqref{calcuserecontrs} implies that $\phi(\pi(f)) = 0$.
\end{proof}
\end{document}
|
\begin{document}
\preprint{APS/123-QED}
\title{All-silicon quantum light source by embedding an atomic emissive center in a nanophotonic cavity}
\author{Walid Redjem}
\altaffiliation{Equal contribution}
\affiliation{Department of Electrical Engineering and Computer Sciences, University of California Berkeley, Berkeley, CA 94720, USA}
\author{Yertay Zhiyenbayev}
\altaffiliation{Equal contribution}
\affiliation{Department of Electrical Engineering and Computer Sciences, University of California Berkeley, Berkeley, CA 94720, USA}
\author{Wayesh Qarony}
\altaffiliation{Equal contribution}
\affiliation{Department of Electrical Engineering and Computer Sciences, University of California Berkeley, Berkeley, CA 94720, USA}
\author{Vsevolod Ivanov}
\affiliation{Accelerator Technology and Applied Physics Division, Lawrence Berkeley National Laboratory, Berkeley, California 94720, USA}
\author{Christos Papapanos}
\affiliation{Department of Electrical Engineering and Computer Sciences, University of California Berkeley, Berkeley, CA 94720, USA}
\author{Wei Liu}
\affiliation{Accelerator Technology and Applied Physics Division, Lawrence Berkeley National Laboratory, Berkeley, California 94720, USA}
\author{Kaushalya Jhuria}
\affiliation{Accelerator Technology and Applied Physics Division, Lawrence Berkeley National Laboratory, Berkeley, California 94720, USA}
\author{Zakaria Al Balushi}
\affiliation{Department of Materials Science and Engineering, University of California Berkeley, Berkeley, California 94720, USA}
\affiliation{Materials Sciences Division, Lawrence Berkeley National Laboratory, Berkeley, California 94720, USA}
\author{Scott Dhuey}
\affiliation{Molecular Foundry, Lawrence Berkeley National Laboratory, Berkeley, California 94720, USA}
\author{Adam Schwartzberg}
\affiliation{Molecular Foundry, Lawrence Berkeley National Laboratory, Berkeley, California 94720, USA}
\author{Liang Tan}
\affiliation{Molecular Foundry, Lawrence Berkeley National Laboratory, Berkeley, California 94720, USA}
\author{Thomas Schenkel}
\affiliation{Accelerator Technology and Applied Physics Division, Lawrence Berkeley National Laboratory, Berkeley, California 94720, USA}
\author{Boubacar Kanté}
\email{Corresponding author: [email protected]}
\affiliation{Department of Electrical Engineering and Computer Sciences, University of California Berkeley, Berkeley, CA 94720, USA}
\affiliation{Materials Sciences Division, Lawrence Berkeley National Laboratory, Berkeley, California 94720, USA}
\date{\today}
\begin{abstract}
Silicon is the most scalable optoelectronic material, and it has revolutionized our lives in many ways. The prospect of quantum optics in silicon is an exciting avenue because it has the potential to address the scaling and integration challenges, the most pressing questions facing quantum science and technology. We report the first all-silicon quantum light source based on a single atomic emissive center embedded in a silicon-based nanophotonic cavity. We observe a more than 30-fold enhancement of luminescence, a near unity atom-cavity coupling efficiency, and an 8-fold acceleration of the emission from the quantum center. Our work opens avenues for large-scale integrated all-silicon cavity quantum electrodynamics and quantum photon interfaces with applications in quantum communication, sensing, imaging, and computing.
\end{abstract}
\maketitle
Quantum science and technologies promise to revolutionize our societies \cite{nielsen_quantum_2002,macfarlane_quantum_2003}. In the search for the ideal quantum information processing platform, “scaling” is perhaps the most challenging question due to the fundamental but contradictory requirements for quantum systems to simultaneously be isolated and controllable from the environment in large arrays of interacting qubits \cite{de_leon_materials_2021,arute_quantum_2019}. Among many quantum information platforms ranging from superconducting qubits to trapped ions, quantum photons play a fundamental role because they are necessary for future quantum networks to enable communication between distant quantum nodes \cite{bouwmeester_experimental_1997,clarke_superconducting_2008,devoret_superconducting_2013,monroe_scaling_2013,kimble_quantum_2008}. Quantum photons have been generated from an extensive range of platforms, including quantum dots, color-centers in diamonds such as NV, SiV, and SnV, or defects in two-dimensional materials such as hBN \cite{santori_indistinguishable_2002,gruber_scanning_1997,lee_deterministic_2014,hayee_revealing_2020,xu_creating_2021}. The scaling challenge is currently being addressed using hybrid material platforms and metamaterials in which quantum light sources are optimized and integrated into more complex scalable systems, following the example of heterogeneous integration in the classical domain \cite{elshaari_hybrid_2020,wan_large-scale_2020,carter_quantum_2013,santiago-cruz_resonant_2022}. However, the challenge for integrating quantum devices is more significant than for classical systems because each interface allows losses and decoherence that need to be minimized. It is thus fundamental to minimize the number of interfaces by deeply integrating intrinsically scalable platforms.
Silicon is currently the most scalable optoelectronic material. Despite the lack of efficient classical light sources based on silicon, emissive centers have been observed in silicon since the end of the 1980s \cite{davies_optical_1989}. It is only during the last two years that single centers in silicon have been isolated \cite{redjem_single_2020,hollenbach_engineering_2020, durand_broad_2021,higginbottom_optical_2022}. Since then, emissive centers in silicon have been coupled to waveguides, and more recently, an ensemble of centers has been integrated into ring resonators \cite{prabhu_individually_2022,deabreu_waveguide-integrated_2022,komza_indistinguishable_2022,lefaucher_cavity-enhanced_2022}. However, deterministic single photon sources based on silicon emissive centers have remained elusive due to the lack of controlled manufacturing approaches and the complexity of materials interfaces after device fabrication. We report the first all-silicon quantum light source based on an atomic emissive center in a silicon nanophotonic cavity. The manufacturing of the centers in silicon-on-insulator substrates, with controlled densities and dipole orientations, enables their overlap with designed nanophotonic cavities. We demonstrate the successful alignment of a quantum defect and nanophotonic cavity dipole moments and tune the nanophotonic cavity to overlap its resonance with the zero-phonon line of the silicon-based quantum defect. We achieve a more than 30-fold enhancement of the luminescence intensity and an 8-fold acceleration of the single photon emission rate. Our results open the door to large-scale integrated all-silicon quantum optics devices and systems for applications in quantum communication, sensing, imaging, and computing.
The proposed all-silicon atom-cavity system, presented in Fig. \ref{fig:fig1}A, consists of a single defect in silicon embedded in a photonic crystal (PhC) defect cavity. The PhC cavity consists of three missing holes in a suspended triangular lattice of holes \cite{sakoda_optical_2005}. The atomic defect, the G-center in silicon, is made of two substitutional carbon atoms (black spheres) bound to the same silicon self-interstitial (blue sphere). The manufacturing process starts with the implantation of carbon ($^{13}$C) with an energy of 36 keV in a commercial 230 nm thick silicon-on-insulator (SOI) wafer. The implantation is followed by electron beam lithography, dry etching, thermal annealing, and wet etching (see Supplementary Materials). Secondary ion mass spectroscopy (SIMS) measurements indicate that the implanted carbon and the atomic centers created during the annealing process are located in the middle of the silicon layer (see Supplementary Materials).
The dipole moment of the center is computed by density functional theory \cite{ivanov_effect_2022}. It is in the plane as indicated by the red arrow in the inset of Fig.\ref{fig:fig1}A. The G-center is one of a broad diversity of recently observed emissive centers in silicon and its electronic structure, presented in Fig. \ref{fig:fig1}B, comprises a ground singlet state, a dark excited triplet state, and an excited singlet state \cite{udvarhelyi_identification_2021}. The computed electromagnetic mode of the cavity for the transverse electric polarization is superimposed on the sketch of the PhC, evidencing the high confinement of the electromagnetic field in the region of missing holes in the triangular lattice. This polarization matches the orientation of the atomic defect dipole moment. The deterministic positioning of atomic-scale defects in photonic cavities has been challenging for most platforms and has not yet been achieved for silicon-emissive centers. It requires not only overlap of the quantum defect with highly confined optical modes but also the alignment of the dipole moments of the atom and the cavity. To overcome this challenge in our platform, we first investigated the scalable manufacturing of single emissive centers with controllable densities and inhomogeneous broadening. We identified an annealing time window below which only ensembles of centers are created and beyond which all single centers are destroyed (see Supplementary Materials). We also find that shorter annealing time within that window minimizes the inhomogeneous broadening of the zero-phonon line (ZPL) of the quantum emitters, a critical requirement for overlapping the ZPL with a designed nanophotonic resonance to enhance light-matter interaction (see Supplementary Materials). The controlled density and inhomogeneous broadening of quantum centers increase the probability of overlap with an array of finite-size photonic crystal cavities. We subsequently investigated the polarization response of created emissive centers, and a statistical analysis presented in the supplementary materials indicates a preferential orientation of the emitters in silicon. We then fabricate PhC cavities so that the dipole moments of the cavities and centers align. Fig. \ref{fig:fig1}C presents a scanning electron microscope (SEM) image of a fabricated silicon-based atom-cavity system. The inset presents the cavity with a mode volume of $0.66(\lambda_{\text{cav}}/n)^3$. The successful embedding of a single center in a cavity involved a controlled sequence of CMOS-compatible fabrication steps.
Fig. \ref{fig:fig2}A presents the photoluminescence (PL) raster scan of a device with bright emission from a color center within the boundaries of the cavity. The dashed white line indicates the boundary of the finite PhC, and the suspended PhC is surrounded by the silicon-on-insulator (SOI) wafer. The photoluminescence of the photonic device, presented in Fig. \ref{fig:fig2}B, exhibits a sharp peak at ~1275 nm and a blue-shifted broader peak at ~1272 nm, corresponding to the ZPL of the color-center and the cavity resonance, respectively. The cavity is further characterized in reflectivity using resonant scattering measurements in Fig. \ref{fig:fig2}C. The cavity is illuminated with a linearly polarized white light source (white arrow) at 45 degrees with respect to the cavity axis that is along the X-direction. The signal polarized perpendicular to the excitation is collected (red arrow) to probe the cavity mode, and resonance is observed at ~1272 nm, in perfect agreement with the PL measurement. The reflectivity is fitted with a Fano resonance line shape giving an intrinsic quality factor ($Q$) of 3209. The experimental value is comparable to the theoretical $Q$ of 6000 and the discrepancy is attributed to fabrication imperfections. Fig. \ref{fig:fig2}D presents the polarization diagram of the cavity mode detuned from the ZPL in orange and the polarization diagram of a quantum emitter alone in black. The polarizations agree well with a dipolar model (solid lines) and have been successfully aligned.
In Fig. \ref{fig:fig3}A, the spectrum of the quantum emitter over a broad range of energy shows the zero-phonon line (ZPL) of the silicon emissive center and its phonon sideband. Fig. \ref{fig:fig3}B presents the spectrum of the quantum emitter using a high-resolution grating. The ZPL is located at 972.43 meV and has a linewidth of 8.3 GHz. To demonstrate that the bright emission from the middle of the cavity corresponds to a single emissive center, we performed quantum coherence measurements of the emitter in the cavity. Autocorrelation measurements, shown in Fig. \ref{fig:fig3}C, are performed using a Hanbury-Brown and Twiss interferometer with superconducting nanowire single-photon detectors (see Supplementary Materials). The second-order correlation measurements of the emission from the cavity under continuous excitation exhibit an antibunching, confirming the successful spatial overlap of a single silicon emissive center with the nanophotonic cavity with an antibunching at a zero delay $g^{(2)}(0) = 0.30 \pm 0.07$. Autocorrelation measurements under pulsed excitation at a repetition rate of 10 MHz are presented in Fig. \ref{fig:fig3}D and they demonstrate on-demand single-photon generation from the all-silicon platform.
The enhancement of the single center in the cavity requires spatial and spectral overlap. Spatial overlap was achieved in Fig. \ref{fig:fig2} and Fig. \ref{fig:fig3}. To achieve spectral overlap, the nanophotonic cavity is tuned using cycles of argon gas injection. The injected gas condensates at the surface of the PhC and modifies the effective index of cavity mode tuning the resonance wavelength of the cavity that is shifted from ~1269 nm to ~1275 nm. In Fig. \ref{fig:fig4}A, as the cavity resonance is shifted towards the ZPL of the quantum center, the photoluminescence is enhanced to reach a maximum at ~1275 nm, where the spectral overlap is achieved. In Fig. \ref{fig:fig4}B, the ZPL intensity as a function of the cavity detuning shows an enhancement larger than 30 achieved on resonance. For cavity detuning varying from $\delta$ = 2.40 nm to $\delta$ = 0.00 nm, the excited lifetime shortens from 53.6 ns to 6.7 ns. An 8-fold reduction in the lifetime is experimentally observed when the overlap is achieved compared to the off-resonance case. Light-matter interaction in cavities is usually quantified using the Purcell factor ($F_p$) that measures the decay rate enhancement of the atom from free space to the cavity ($\gamma_{\text{cav}} = F_p\gamma_0$). It can be estimated by $F_p = (\tau_{\text{bulk}}/\tau_{\text{on}}-\tau_{\text{bulk}}/\tau_{\text{off}})/\eta$ where $\tau_{\text{bulk}}$ is the lifetime of a quantum emitter outside the PhC (dark yellow dots in Fig. \ref{fig:fig4}C), and $\tau_{\text{off}}$ is the lifetime for a detuning of 2.4 nm. The lifetime measured off-resonance is slightly longer than the one in the bulk because of the reduced density of state in the PhC gap \cite{yablonovitch_inhibited_1987}. The percentage of photons emitted at the ZPL wavelength of the emitter (Debye-Waller) is $\eta = 15\%$, which was measured by comparing the count rate with and without the ZPL bandpass filter. The experimental Purcell factor of the defect cavity is $F_p \sim 29.0$. The coupling efficiency of the center to the cavity mode ($\beta$ factor) can be estimated by $1/\tau_{on}/[1/\tau_{on}+1/\tau_{off}]$, yielding a value of $\beta \sim 89\%$.
We thus reported the first all-silicon quantum light source based on an atomic emissive center in a silicon-based nanophotonic cavity. The quantum light source in silicon is one of a broad diversity of recently discovered centers that we successfully embedded in a silicon-on-insulator photonic crystal cavity. The quantum center is manufactured in silicon using a sequence of CMOS compatible steps that control the density, inhomogeneous broadening, and orientation of the dipolar moment of the emitters, enabling their efficient overlap with designed nanophotonic cavities. The performance of the quantum light source can be further improved by developing cavities with higher quality factors as well as more deterministic positioning methods to further improve the emitter-cavity spatial overlap. Our results will enable all-silicon quantum optics interfaces with silicon-emissive centers for scalable and integrated quantum optoelectronics.
\vbox{}
\typeout{}
\begin{figure*}
\caption{\textbf{A single atomic emissive center embedded in a silicon-photonic cavity.}
\label{fig:fig1}
\end{figure*}
\begin{figure*}
\caption{\textbf{Experimental characterization of the silicon-based quantum emitter and cavity.}
\label{fig:fig2}
\end{figure*}
\begin{figure*}
\caption{\textbf{Quantum coherence measurements of the emitter in the cavity.}
\label{fig:fig3}
\end{figure*}
\begin{figure*}
\caption{\textbf{Spectral tuning of the nanocavity and enhanced atom-cavity interaction.}
\label{fig:fig4}
\end{figure*}
\end{document}
|
\begin{document}
\title{Bounds for a solution set of linear complementarity problems over Hilbert spaces }
\begin{abstract}
Let $H$ be a real Hilbert space. In this short note, using some of the properties of bounded linear operators with closed range defined on $H$, certain bounds for a specific convex subset of the solution set of infinite linear complementarity problems, are established.
\end{abstract}
{\bf AMS Subject Classification(2010):} {47A99, 90C48.}
{\bf Keywords.} {Linear complementarity problem, Hilbert space, Closed range operator, Positive semidefinite operator.}
\section{Introduction}\label{notation}
Let $H$ be a Hilbert space over the real field $\mathbb{R}$. Let $\mathcal{B}(H)$ denote the set of bounded linear operators on the Hilbert space $H$. A subset $K$ of a Hilbert space $H$ is said to be a cone, if $x, y \in K$ and $\lambda \geq 0$ imply that $x+ y \in K$, $\lambda x \in K$ and $K \cap (-K) = \{0\}.$ Let $H^{'}$ denote the space of all continuous linear functionals on $H$. The dual cone $K^*$ of a cone $K$ is defined as follows: $K^{*}=\{f \in H^{'} : f(x) \geq 0 , \forall x \in K \}$. Let $H_+$ be a cone in $H$. For a given vector $q \in H$ and an operator $T \in \cal{B}$ $(H)$ the linear complementarity problem, written as $LCP(T,q)$, is to find a vector $z \in H_+$ such that $Tz + q \in H_+^*$ and $\langle z, Tz+q \rangle = 0 $. For the case $H=\mathbb{R}^n$ and $H_+ =\mathbb{R}_+^n$ (entry-wise nonnegative vectors in $\mathbb{R}^n$), this problem is well studied \cite{cot-pan-sto}. The linear programming problem in $\mathbb{R}^n$ is defined as follows: For a given vector $p \in \mathbb{R}^n$, find a vector $x \in \mathbb{R}_+^n$ which minimizes $p^Tx$ subject to $Tx + q \in \mathbb{R}_+^n$, where $T$ is an $n \times n$ real matrix, $q$ is a vector in $\mathbb{R}^n$ and $p^T$ denotes the transpose of the vector $p$. In \cite{manga1}, the author proved that, under certain assumptions, each solution of linear programming problem is also a solution of the linear complementarity problem. In \cite{cry-demp}, the authors extended the results of \cite{manga1} to infinite dimensional Hilbert spaces. Also linear complementarity problems are closely related to the variational inequality problems. In \cite{cry-demp}, the authors established that equivalence linear complementarity problem and variational inequality problem. In \cite{isac-numerical}, the author studied the boundedness of solution set of linear complementarity problems over infinite dimensional Hilbert spaces. One of the main objectives of this article is to study some of the properties of closed range operators. Using these properties, we shall extend some of the bounds of the solution set of linear complementarity problems established in \cite{isac-numerical}. Also, we provide an alternate simple proof for one of the main results (Theorem $3.1$) of \cite{isac-numerical}.
This article is organized as follows: In section \ref{ndp}, we collect some of the known results. In section \ref{prop-clos}, we establish some of the properties of closed range operators. In Theorem \ref{posi-closed} and Theorem \ref{posi-closed-conv}, we derive an equivalent condition for an operator to be a closed range positive semidefinite operator. In section \ref{bound_lcp}, we derive bounds for a convex subset of the solution set of linear complementarity problems.
\section{Notation, Definitions and Preliminary Results}\label{ndp}
For an operator $T \in \mathcal{B}(H)$, let $T^{*}, R(T)$ and $N(T)$ denote the adjoint, range space and null space of $T$, respectively. The \emph{Moore-Penrose inverse} of an operator $T \in \mathcal{B}(H)$ is the unique operator, if it exists, $S \in \mathcal{B}(H)$ satisfying the following conditions: $(1)~ T = TST$, $(2)~S = STS$, $(3)~(TS)^{*} = TS$ and $(4)~(ST)^{*} = ST$, and is denoted by $T^{\dagger}$. For a subset $M$ of $H$, $\overline{M}$ denotes the topological closure of $M$. An operator $T \in \mathcal{B}(H)$ is said be \emph{closed range operator}, if $R(T)$ is closed. It is well known that, an operator $T \in \mathcal{B}(H)$ has Moore-Penrose inverse $S \in \mathcal{B}(H)$ if and only if $R(T)$ is closed \cite{benis, kato}. A self-adjoint operator $T \in \mathcal{B}(H)$ is said to be \emph{positive semidefinite} if for all $x \in H$, $\langle Tx, x \rangle \geq 0$, and $T$ is said to be \emph{positive definite} if $\langle Tx , x \rangle > 0$ for all nonzero $x \in H$ \cite{isac-numerical}. The positive semidefinite operators are known as positive operators in the literature \cite{reed-simon}. If $T$ is a positive semidefinite operator, then there exist a unique positive semidefinite operator $S$ such that $S^2 = T$ \cite[Lemma 3.2]{bott}. The operator $S$ is called the positive square root of the operator $T$ and is denoted by $T^\frac{1}{2}.$
If $T \in \mathcal{B}(H)$ and $R(T)$ is closed, then the following holds: $(a)~ R(T^*) = R(T^\dagger)$, $(b)~TT^\dagger y = y$ for all $y \in R(T)$ and $(c)~TT^\dagger = T^\dagger T$, whenever $R(T) = R(T^*)$. Further, if $T$ is positive semidefinite, then $R(T) = R(T^\frac{1}{2}) , N(T) = N(T^\frac{1}{2})$.
Let $X$ be a real linear space. Then $X$ is called a {\it partially ordered vector space} if there is a partial order $"\leq"$ defined on $X$ such that the following compatibility conditions are satisfied: $(i)~ x \leq y \Longrightarrow x + z \leq y + z$ for all $z \in X$ and $(ii)~ x \leq y \Longrightarrow \alpha x \leq \alpha y$ for all $\alpha \geq 0$. A subset $X_+$ of a real linear space $X$ is said to be a {\it cone} if, $X_+ + X_+ \subseteq X_+,$ $\alpha X_+ \subseteq X_+ $ for all $ \alpha \geq 0,$ $X_+ \cap {-X_+} = \{0\}$ and $X_+ \neq \{0\}$. A vector $x \in X$ is said to be {\it nonnegative}, if $x \in X_+$. This is denoted by $x \geq 0$. We define $x \leq y$ if and only if $y-x \in X_+$. Then $"\leq"$ is a partial order (induced by $X_+$) on $X$. Conversely, if $X$ is a partially ordered normed linear space with the partial order $"\leq"$, then the set $X_
+ =\{x \in X : x \geq 0\}$ is a cone, and it is called the {\it positive cone} of $X$. By a {\it partial ordered real normed linear space} $X$ we mean a real normed linear space $X$ together with a {\it closed} positive cone $X_+$. Let $X'$ denote the space of all continuous linear functionals on $X$. The dual cone $X_+^*$ of $X_+$, is defined as follows: $X_+^{*}=\{f \in X' : f(x) \geq 0 , \forall x \in X_+ \}$.
A partially ordered real normed linear space which is also a Banach space is called a {\it partially ordered Banach space}. A partially ordered real normed linear space which is also a Hilbert space is called a {\it partially ordered Hilbert space}.
A cone $X_+$ on a real normed linear space $X$ is said to be solid if $\inte(X_+) \neq \emptyset $, where $\inte(X_+)$ denotes the set of all interior points of $X_+$. If $X$ is a Hilbert space, then a cone $X_+$ is said to be self-dual, if $X_+ = X_+^*$.
\section{Properties of closed range operators}\label{prop-clos}
In this section we study some of the properties of closed range operators.
\begin{defn}
For an operator $T \in \mathcal{B}(H)$ , define $M(T) = \sup \{\langle Tx, x \rangle : x \in H , ||x|| = 1\},$ $m(T) = \inf \{\langle Tx, x \rangle : x \in H , ||x|| = 1\}$ and $m_r(T) = \inf \{\langle Tx, x \rangle : x \in R(T^*) , ||x|| = 1\}.$
\end{defn}
\begin{pro}
Let $T \in \mathcal{B}(H)$. Then the following statements hold:
\begin{enumerate}
\item[(i)] An operator $T$ is self adjoint if and only if $T^\dagger$ is self adjoint,
\item[(ii)] For an operator $T$, $m(T) \geq 0 $ if and only if $T$ is positive semidefinite,
\item[(iii)] A closed range operator $T$ is self-adjoint positive semidefinite if and only if $T^\dagger$ is self-adjoint positive semidefinite.
\item[(iv)] For a closed range operator $T$, $m_r(T) > 0$ if and only if $m_r(T^{\dagger})>0$.
\end{enumerate}
\end{pro}
\begin{proof}
Proofs of part $(i)$ and part $(ii)$ are easy to verify.
\textbf{$(iii)$} Let $T$ be a self adjoint positive semidefinite operator with closed range. Then, $T^{\dagger}$ exists, and $T^\dagger$ is self adjoint. Let $x \in H$. Then, $\langle T^\dagger x, x \rangle = \langle T^\dagger T T^\dagger x , x \rangle=\langle T T^\dagger x , T^\dagger x \rangle \geq0$, since $T$ is positive semidefinite. Thus $T^\dagger$ is positive semidefinite. The converse can be proved in a similar way.
\textbf{$(iv)$} Let $x \in R(T^{*})$, then $x = T^{\dagger}y$ for some $y \in R(T)$. Now, $\langle Tx , x \rangle = \langle TT^\dagger y, T^\dagger y\rangle = \langle y, T^\dagger y\rangle$. Since, $H$ is a Hilbert space over the field of real numbers, we have $ \langle y, T^\dagger y\rangle= \langle T^\dagger y, y\rangle$. Thus, $\langle Tx , x \rangle = \langle T^\dagger y, y\rangle$, for some $y \in R(T)$, and hence we have $m_r(T)>0$ if and only if $m_r(T^\dagger) >0$.
\end{proof}
The following lemma will be useful in the proof of the subsequent result.
\begin{lemma}\label{mrT-closure}
Let $T \in \mathcal{B}(H)$ be a self-adjoint operator. Then $m_r(T) = \inf \{\langle Tx, x \rangle : x \in \overline{R(T)} , ||x|| = 1\}.$
\end{lemma}
\begin{proof}
Let $x \in \overline{R(T)}$ and $||x||=1$. Then, there exists a sequence of nonzero vectors $\{x_n\}$ in $R(T)$ such that $x_n$ converges to $x$. Now, since $||x|| =1 $, $x_n$ converges to $x$ implies that $\frac{x_n}{||x_n||}$ converges to $x$. So, without loss of generality, we can assume that $||x_n|| = 1$ for all $n$. Now, $|\langle Tx_n, x_n \rangle - \langle Tx, x \rangle|= |\langle Tx_n, (x_n-x)\rangle + \langle T(x_n-x), x \rangle |\leq ||T|| ||x_n||||x_n-x|| + ||T||||x_n-x||||x||.$ Thus, $x_n$ converges to $x$ implies $\langle Tx_n, x_n \rangle$ converges to $\langle Tx, x \rangle$, and hence $\langle Tx, x \rangle \geq m_r(T)$.
\end{proof}
In the next theorem, we establish a sufficient condition for the positive semidefiniteness of the operator $T$ in terms of $m_r(T)$.
\begin{theorem}\label{posi-closed}
Let $T \in \mathcal{B}(H)$ be a self-adjoint operator. If $m_r(T) > 0$, then $T$ is positive semidefinite and $R(T)$ is closed.
\end{theorem}
\begin{proof}
By Lemma \ref{mrT-closure}, $\langle Tx, x \rangle \geq 0$ for all $x \in \overline{R(T)}$. Now, we shall prove that $T$ is positive semidefinite. Let $x \in H$, then $x = x_1 + x_2$, where $x_1 \in N(T)$ and $x_2 \in N(T)^\perp = \overline{R(T)}$. Now, $\langle Tx, x \rangle = \langle T(x_1+x_2), (x_1+x_2) \rangle = \langle Tx_2, x_2 \rangle \geq 0$. Thus $T$ is positive semidefinite.
To complete the proof, let us show that $R(T)$ is closed. Assume that the sequence $\{Tx_n\}$ converges to $y$. We can assume that, the elements of the sequence $\{x_n\}$ are distinct and belong to $\overline{R(T)}$. Then, by Lemma \ref{mrT-closure}, we have $$m_r(T) \leq \frac{\langle Tx_n - T x_m, x_n-x_m \rangle}{||x_n - x_m||^2} \leq \frac{||Tx_n - T x_m||~||x_n-x_m||}{||x_n - x_m||^2} $$ so that $$m_r(T) ||x_n - x_m|| \leq ||Tx_n - Tx_m||.$$ Since $\{Tx_n\}$ is a cauchy sequence, it follows that $\{x_n\}$ is also a cauchy sequence and hence $\{x_n \}$ converges to some $x \in H$. Thus $Tx_n$ converges to $Tx$, and hence $Tx=y$. Thus $R(T)$ is closed.
\end{proof}
Next, we show that the converse of Theorem \ref{posi-closed} is true.
\begin{theorem}\label{posi-closed-conv}
Let $T \in \mathcal{B}(H)$ be a self-adjoint operator. If $T$ is positive semidefinite and $R(T)$ is closed, then $m_r(T) > 0$.
\end{theorem}
\begin{proof}
Suppose that $m_r(T) =0$. Then, there exists a sequence $\{x_n\}$ in $R(T)$ such that $||x_n|| =1$ and $\langle Tx_n, x_n \rangle \rightarrow 0$. We have $\langle Tx_n , x_n \rangle = \langle T^{\frac{1}{2}}x_n , T^{\frac{1}{2}}x_n \rangle = ||T^{\frac{1}{2}}x_n||$ and so $T^{\frac{1}{2}}x_n \rightarrow 0$ which, in turn implies that $Tx_n \rightarrow 0$. Since $T$ has closed range, $T^\dagger$ is bounded and hence $T^\dagger T x_n \rightarrow 0$. But $T^\dagger T x_n = x_n$ for all $n$. So $x_n$ converges to $0$, which is not possible. Thus $m_r(T) >0$.
\end{proof}
Next, let us establish a property of closed range operators with $m_r(T)>0$.
\begin{theorem}\label{norm_rel}
Let $T \in \mathcal{B}(H)$ be a self adjoint operator with $m_r(T) >0 $. If
\begin{itemize}
\item[(a)] $R_1 = \sup \{\langle Tx, x \rangle : x \in R(T)$ and $||x|| =1 \},$
\item[(b)] $R_2 = \sup \{\langle Tx, x \rangle : x \in R(T)$ and $||x|| \leq 1 \},$
\item[(c)] $R_3 = \sup \{\langle Tx, x \rangle : ||x|| =1 \},$ and
\item[(d)] $R_4 = \sup \{\langle Tx, x \rangle : ||x|| \leq 1 \}, $
\end{itemize}
then $R_1 =R_2 =R_3 =R_4$.
\end{theorem}
\begin{proof}
It is easy to verify that $R_1 = R_2$ and $R_3 =R_4$.To complete the proof, let us prove $R_2 = R_4$. From the definition, it is clear that $R_4 \geq R_2$. Let $x \in H$ such that $||x||\leq 1.$ Then, $x = x_1 + x_2$ with $x_1 \in N(T)^\perp = R(T)$, $x_2 \in N(T)$ such that $||x_1|| \leq 1$ and $||x_2|| \leq 1$. Thus, for any $x \in H$, we have $\langle Tx , x \rangle = \langle Tx_1, x_1\rangle$ for some $x_1 \in R(T)$ with $||x_1|| \leq 1$. Thus, we have $R_4 = R_2$.
\end{proof}
In the next theorem, we derive relationships between $M(T^\dagger)$ and $m_r(T)$, and $M(T)$ and $m_r(T^\dagger)$
\begin{theorem}\label{mMinv}
Let $T \in \mathcal{B}(H)$ be a self adjoint operator such that $m_r(T)>0$. Then the following holds:
\begin{enumerate}
\item $M(T^\dagger) = [m_r(T)]^{-1}$, and
\item $m_r(T^\dagger) = [M(T)]^{-1}.$
\end{enumerate}
\end{theorem}
\begin{proof}
Let $y \in R(T)$ with $||y||=1$. Then $y = Tx$, for some $x \in R(T)$. Now, $ \langle T^\dagger y, y \rangle = \langle T^\dagger T x, T x \rangle = \langle Tx, x \rangle$. Since, $m_r(T) \leq \frac{1}{||x||^2} \langle Tx, x\rangle$, we have $ \frac{1}{\langle Tx, x\rangle} \leq \frac{1}{||x||^2 m_r(T)}$ and hence $\langle Tx, x \rangle \leq \frac{\langle Tx , x \rangle^2}{||x||^2 m_r(T)} \leq \frac{||Tx||^2}{m_r(T)}$. Thus $\langle T^\dagger y, y\rangle \leq \frac{1}{m_r(T)}$, and hence $\sup\{\langle T^\dagger y , y \rangle: y \in R(T), ||y|| = 1\} \leq \frac{1}{m_r(T)}$. Now, by Theorem \ref{norm_rel}, we get $M(T^\dagger) \leq \frac{1}{m_r(T)}.$
If $x \in N(T)$ or $y \in N(T)$, then $\langle Tx, y \rangle = \langle x , Ty \rangle = 0.$ Let $x, y \in R(T)$, then, by Cauchy-Schwartz inequality for a semi inner product, we have $|\langle Tx, y \rangle|^2 \leq \langle Tx, x\rangle \langle Ty, y\rangle.$ If $y = T^\dagger x$, then $|\langle TT^\dagger x, x \rangle|^2 \leq \langle Tx, x\rangle \langle T^\dagger x, x\rangle$. Hence, we have $$\langle Tx, x \rangle \langle T^\dagger x, x \rangle \geq 1,$$ whenever $||x|| =1.$ Now, we have $\langle Tx, x \rangle \geq \frac{1}{\langle T^\dagger x, x \rangle}$ and hence $\inf \{ \langle Tx, x \rangle: ||x||=1\} \geq \inf \frac{1}{\{\langle T^\dagger x, x \rangle : ||x|| = 1\}} = \frac{1}{\sup \{ \langle T^\dagger x, x \rangle: ||x|| = 1\}} $. Thus, we get $M(T^\dagger ) \geq \frac{1}{ m_r(T)}.$ The proof of the second assertion is similar.
\end{proof}
In the next corollary, we establish a relationship between $m_r(T)$ and norm of the operator $T^\dagger$.
\begin{cor}\label{comp-m-M}
If $T \in \mathcal{B}(H)$ is a self adjoint operator such that $m_r(T) >0 $, then $\displaystyle{||T^\dagger|| =\frac{1}{m_r(T)}}$.
\end{cor}
\begin{proof}
By the definition, $||T^\dagger|| = \sup \{\displaystyle \langle T^\dagger x, x\rangle : x \in H, ||x|| =1 \}$.
Thus, by Theorem \ref{norm_rel}, we have $ ||T^\dagger|| = \displaystyle{\frac{1}{m_r(T)}}.$
\end{proof}
\section{Bounds for solution set of linear complementarity problems} \label{bound_lcp}
In this section, we establish bounds for a certain specific convex subset of the solution set of linear complementarity problems. For an operator $T \in \mathcal{B}(H)$ and $b \in H$, the solution set of the associated linear complementarity problem, LCP$(T, b)$, is denoted by $SOL(T,b)$, is defined as the set of all solutions of the LCP$(T, b)$.
In general, the solution set of a linear complementarity problem need not be bounded. In the next theorem, we give a sufficient condition under which the solution is unbounded.
\begin{theorem}
Let $H$ be a real Hilbert space and let $K$ be a cone. Let $T \in \mathcal{B}(H)$ and $b \in H$. If $ x \in N(T) \cap SOL(T, b)$ for some nonzero $x \in H$, then $SOL(T, b)$ is unbounded.
\end{theorem}
\begin{proof}
Let $ x \in N(T) \cap SOL(T, b)$ and $x \neq 0$. Then $\alpha x \in N(T) \cap SOL(T, b)$, for all $ \alpha \geq 0.$ Hence $SOL(T, b)$ is unbounded.
\end{proof}
In the next theorem we establish a bound for those solutions which do not belong to the null space of the operator $T$.
\begin{theorem}\label{bounded1}
Let $H$ be a real Hilbert space and let $K$ be a cone. Let $T \in \mathcal{B}(H)$ and $b \in H$. If $m_r(T)>0$, then the solution set $SOL(T, b)\cap R(T^*)$ of $LCP(T, b)$ is a subset of $B(0, \frac{||b||}{m_r(T)}) \cap K$.
\end{theorem}
\begin{proof}For $x \in H$,
we have,
\begin{align*}
\langle Tx+b, x\rangle &= \langle Tx, x\rangle + \langle b, x\rangle, \\
&\geq m_r(T)||x||^2 - ||b|| || x||, \\
& = (m_r(T)||x|| - ||b||) || x||.
\end{align*}
Now, if $(m_r(T)||x|| - ||b||) >0 $, then $\langle Tx+b, x\rangle >0$. Thus $x \notin SOL(T, b)$. Hence, if $ x \in SOL(T, b)\cap R(T^*) $, then $(m_r(T)||x|| - ||b||) \leq 0$. Thus $||x|| \leq \frac{||b||}{m_r(T)}$, which proves the claim.
\end{proof}
In the next theorem, we derive a bound for solution of the linear complementarity problem, whenever the solution vector belongs to the range space of the operator $T$.
\begin{theorem}
Let $H$ be a real Hilbert space and $K$ be a self-dual cone in $H$. If $T\in \mathcal{B}(H)$ is a self adjoint operator such that $m_r(T) >0 $, then for every solution $x \in R(T)$ of $LCP(T, b)$ where $b \in R(T)$, one has $||x|| \leq \frac{M(T)}{m_r(T)} ||T^{\dagger}(b)||.$
\end{theorem}
\begin{proof}
Since $b\in R(T)$ and $T$ is self adjoint, we have $b=TT^\dagger b$ and $M(T)=\lvert \lvert T \rvert \rvert$. Thus $\lvert \vert b\rvert \rvert\leq \lvert \lvert T \rvert \rvert \lvert \lvert T^\dagger b \rvert \rvert$. Suppose that $x\in R(T)$ is a solution of $LCP (T,b)$. By Theorem \ref{bounded1},
\begin{align*}
\lvert \lvert x\rvert \rvert & \leq \frac{||b||}{m_r(T)},\\
& \leq \frac{M(T)}{m_r(T)} ||T^{\dagger}(b)||.
\end{align*}
\end{proof}
The proof of the above theorem gives an alternate simple proof to \cite[Theorem 3.1]{isac-numerical}.
\begin{cor}
Let $H$ be a real Hilbert space and $K$ be a self-dual cone in $H$. If $T\in \mathcal{B}(H)$ is a self adjoint operator such that $m(T) >0 $, then for every solution $x$ of $LCP(T, b)$, one has $||x|| \leq \frac{M(T)}{m(T)} ||T^{-1}(b)||.$
\end{cor}
\begin{theorem}\label{last}
Let $H$ be a real Hilbert space and let $T\in \mathcal{B}(H)$ be a self adjoint closed range operator. If $x$ is a solution of $LCP(T, b)$, where $b \in R(T)$, then
\begin{center}
$\langle x-x_b, T(x-x_b)\rangle=\frac{1}{4}\langle b, T^\dagger b\rangle ,$
\end{center}
where $x_b=-\frac{1}{2} T^\dagger b$.
\end{theorem}
\begin{proof}
Let $b\in R(T)$ and $x_b=-\frac{1}{2}T^\dagger b$. Suppose that $x$ is a solution of $LCP(T, b)$. Then
\begin{eqnarray}
\langle x-x_b, T(x-x_b)\rangle & =& \langle x,Tx\rangle + \frac{1}{2} \langle x,TT^\dagger b\rangle + \frac{1}{2} \langle T^\dagger b, T x\rangle + \frac{1}{4}\langle T^\dagger b, TT^\dagger b\rangle \nonumber \\
&=& \langle x,T x\rangle + \langle x, b\rangle +\frac{1}{4}\langle T^\dagger b, b\rangle \nonumber \\
&=& \frac{1}{4}\langle b, T^\dagger b\rangle. \nonumber
\end{eqnarray}
\end{proof}
\begin{theorem}
Let $H$ be a real Hilbert space and $T\in \mathcal{B}(H)$ be a self adjoint operator such that $m_r(T) >0 .$ Let $b \in R(T)$ and $x_b=-\frac{1}{2} T^\dagger b$. Then for every solution $x\in R(T)$ of $LCP(T, b)$, where $x\neq x_b$, one has
\begin{center}
$\frac{\| b \|}{2M(T)}\leq \| x-x_b \| \leq \frac{\| b \|}{2m_r(T)}.$
\end{center}
\end{theorem}
\begin{proof}
By Theorem \ref{last}, we have $\langle x-x_b, T(x-x_b)\rangle=\frac{1}{4}\langle b, T^\dagger b\rangle.$
Also, $\frac{1}{4} m_r(T^\dagger)\|b\|^2\leq \|x-x_b\|^2M(T)$, by the definition of $M(T)$ and $m_r(T)$.
Now, by Theorem \ref{mMinv}, we have
$\frac{1}{4M(T)}\|b\|^2\leq \|x-x_b\|^2M(T).$
Thus, we can conclude that
\begin{equation}
\frac{1}{2M(T)}\|b\|\leq \|x-x_b\|. \label{1}
\end{equation}
Similarly, by Theorem \ref{mMinv} and \ref{last}, we have
$m_r(T)\|x-x_b\|^2\leq \frac{1}{4}M(T^{\dagger})\|b\|^2= \frac{1}{4m_r(T)}\|b\|^2$.
Thus \begin{equation}
\|x-x_b\|\leq \frac{1}{2m_r(T)}\|b\|. \label{2}
\end{equation}
From equation \eqref{1} and \eqref{2},
\begin{center}
$\frac{\| b \|}{2M(T)}\leq \| x-x_b \| \leq \frac{\| b \|}{2m_r(T)}.$
\end{center}
\end{proof}
\subsection{Example}
Let $H = l^2(\mathbb{Z})$ and $H_+ = \{(\dots, x_{-2}, x_{-1},\fbox{$x_{0}$},x_{1},x_{2}, \dots): x_i \geq 0 ~\mbox{for all}~ i\}.$ Then $H_+$ is a self-dual cone. Define $T : H \rightarrow H$ as $T((\dots, x_{-2}, x_{-1},\fbox{$x_{0}$},x_{1},x_{2}, \dots)) = (\dots, 0, 0, \fbox{$x_{0}$},x_{1},x_{2}, \dots)$. Then, $T$ is an orthogonal projection on to the space $l^2(\mathbb{N})$. So, $T = T^2 = T^*$ and $T = T^\dagger$. Consider the vector $b = (\dots, 0, 0, \fbox{$0$}, \frac{-1}{3},1,\frac{1}{2}, \frac{1}{3}, \dots) \in R(T)$, it is easy to verify that $x = (\dots, 0, 0, \fbox{$0$}, \frac{1}{3},0,0, \dots)$ solves the associated LCP. It is clear that, $\Vert x \Vert \leq \Vert b \Vert$. Also, $\Vert x - x_b \Vert = \frac{\sqrt{37}}{6} = \frac{\Vert b \Vert}{2}$. Indeed, in the vector $b$, if we may replace the entry $2$ by any real number.
It may be observed that, more generally, any orthogonal projection satisfies the assumptions of our theorem.\\
\textbf{Acknowledgement:}
Projesh Nath Choudhury was partially supported by National Post-Doctoral Fellowship(PDF/2019/000275), the SERB, Department of Science and Technology, India, and the NBHM Post-Doctoral Fellowship (0204/11/2018/R$\&$D-II/6437) from DAE (Govt. of India).
M. Rajesh Kannan would like to thank the SRIC, IIT Kharagpur, the SERB, Department of Science and Technology, India, for financial support through the projects ISIRD, MATRICS (MTR/2018/000986) and Early Career Research Award (ECR/2017/000643).
\end{document}
|
\begin{document}
\def0.5\textwidth{0.5\textwidth}
\title{Almost complex structures \\on $(n-1)$-connected $2n$-manifolds}
\author{Huijun Yang}
\address{Hua Loo-Keng Key laboratory of Mathematics, Academy of Mathematics and Systems Science,
Chinese Academy of Sciences, Beijing 100190, China}
\email{[email protected]}
\subjclass[2000]{55N15, 19L64.}
\keywords{ Almost complex structure, stable almost complex structure, reduced $KU$-group, reduced $KO$-group, real reduction}
\begin{abstract} Let $M$ be a closed $(n-1)$-connected $2n$-dimensional smooth
manifold with $n\geq 3$. In terms of the system of invariants for
such manifolds introduced by Wall, we obtain necessary
and sufficient conditions for $M$ to admit an almost complex
structure.
\end{abstract}
\maketitle
\section{Introduction}
First we introduce some notations. For a topological space $X$, let $Vect_{C}(X)$ (resp. $Vect_{R}(X)$)
be the set of isomorphic classes of complex (resp. real) vector
bundles on $X$, and let $r\colon Vect_{C}(X)\rightarrow Vect_{R}(X)$
be the real
reduction, which induces the real reduction homomorphism $\tilde{r}\colon \widetilde{K}
(X)\rightarrow \widetilde{KO}(X)$ from the reduced $KU$-group to the
reduced $KO$-group of $X$.~For a map $f\colon X\rightarrow Y$
between topological spaces $X$~and~$Y$,~denote by $f_{u}^{*}\colon
\widetilde{K}(Y)\rightarrow \widetilde{K}(X)$~and~$f_{o}^{*}\colon
\widetilde{KO}(Y)\rightarrow \widetilde{KO}(X)$~the induced
homomorphisms.
Let $M$ be a $2n$-dimensional smooth manifold with tangent bundle
$TM$. We say that $M$ admits an \emph{almost complex structure}
(resp.~a \emph{stable almost complex structure}) if $TM \in
\mathrm{Im} r$ (resp.~$TM \in \mathrm{Im} \tilde{r}$). Clearly, $M$
admits an almost complex structure implies that $M$ admits a stable
almost complex structure. It is a classical topic in geometry to
determine which $M$ admits an almost complex structure. See for
instance \cite{wu,eh,mg,su}.
In this paper we determine those closed $(n-1)$-connected $2n$-dimensional smooth manifolds $M$ with $
n\geq 3$ that admit an almost complex structure.
Throughout this paper, $M$ will be a closed oriented
$(n-1)$-connected $2n$- dimensional smooth manifold with $ n\ge3$.
In \cite{wa}, C.T.C. Wall assigned to each $M$ a system of invariants
as follows.
\shortstack[l]{
1) $H=H^{n}(M;\mathbb{Z})\cong Hom(H_{n}(M;\mathbb{Z});\mathbb{Z}
)\cong\oplus _{j=1}^{k}\mathbb{Z}$, the cohomology group of $M$, \\\quad with $k$ the $n$-th Betti number of $M$,
}
\shortstack[l]{
2) $I\colon H\times
H\rightarrow \mathbb{Z}$, the intersection form of $M$ which is unimodular and $n$- sym-\\\quad metric, defined by\\
\qquad \qquad\qquad\qquad\qquad $I(x,y)=<x\cup y,[M]>$,\\
\quad where
the homology class $[M]$ is the orientation class of $M$,\\
3) A map $\alpha \colon H_{n}(M;\mathbb{Z})\rightarrow \pi
_{n-1}(SO_{n})$ that assigns each element $x\in H_{n}(M;\mathbb{Z})$
to\\ \quad the
characteristic map $\alpha (x)$ for the normal bundle of the embedded $n$-sphere \\\quad$
S_{x}^n$ representing $x$.
}
These invariants satisfy the relation (\cite[Lemma 2]{wa})
\begin{equation}\label{gx}
\alpha(x+y)=\alpha(x)+\alpha(y)+I(x,y)~\partial\iota_n,
\end{equation}
where $\partial$ is the boundary homomorphism in the exact sequence
\begin{equation}\label{tl}
\cdots\rightarrow\pi_n(S^n)\xrightarrow{\partial}\pi_{n-1}(SO_n)\xrightarrow{S}\pi_{n-1}(SO_{n+1})\rightarrow\cdots
\end{equation}
of the fiber bundle $SO_n\hookrightarrow SO_{n+1}\rightarrow S^n$,
and $\iota_n\in\pi_n(S^n)$ is the class of the identity map.
Denote by $\chi = S\circ ~\alpha\colon
H_{n}(M;\mathbb{Z})\rightarrow \pi _{n-1}(SO_{n+1})\cong
\widetilde{KO}(S^{n})$ the composition map, then from (\ref{gx}) and
(\ref{tl})
\begin{equation}\label{chi}
\chi =S\circ \alpha \in H^{n}(M;\widetilde{KO}(S^{n}))=Hom(H_{n}(M;
\mathbb{Z});\widetilde{KO}(S^{n}))
\end{equation}
can be viewed as an $n$-dimensional cohomology class of $M$, with
coefficient in $\widetilde{KO}(S^n)$. It follows from
Kervaire~\cite[Lemma~1.1]{ke}~and~Hirzebruch index Theorem \cite{hi}
that the Pontrjagin classes $p_{j}(M)\in H^{4j}(M;\mathbb{Z})$ of
$M$ can be expressed in terms of the cohomology class $\chi$ and the
index $\tau$ of the intersection form $I$ (when $n$ is even) as
follows (cf. Wall~\cite[p.~179-180]{wa}).
\begin{lemma}{Let $M$ be a closed oriented
$(n-1)$-connected $2n$-dimensional smooth manifold with $ n\ge3$. Then
\begin{equation*} p_{j}(M)=
\begin{cases}
\pm a_{n/4}(n/2-1)!\chi, & n\equiv
0(mod~4), j=n/4, \\
\frac{a_{n/4}^{2}}{2}((n/2-1)!)^{2}\{1-\frac{
(2^{n/2-1}-1)^{2}}{2^{n-1}-1}\binom{n}{n/2}\frac{B_{n/4}^{2}}{B_{n/2}}\}I(\chi,\chi)
\\+\frac{n!}{2^{n}(2^{n-1}-1)B_{n/2}}\tau, & n\equiv
0(mod~4), j=n/2,\\
\frac{n!}{2^{n}(2^{n-1}-1)B_{n/2}} \tau, & n\equiv 2(mod~4), j=n/2,
\end{cases}
\end{equation*}
where
\begin{equation*}a_{n/4}=
\begin{cases}
1, & n\equiv 0~(mod~8),\\
2, & n\equiv 4~(mod~8),
\end{cases}
\end{equation*}
$B_m$ is the $m$-th Bernoulli number.}
\end{lemma}
Now we can state the main results as follows.
\begin{theorem1} {Let $M$ be a closed oriented $(n-1)$-connected
$2n$-dimensional smooth manifold with $ n\ge3$, $\chi$ be the
cohomology class defined in (\ref{chi}), $\tau$ the index of the intersection form $I$ (when $n$ is even). Then the necessary and
sufficient conditions for $M$ to admit a stable almost complex
structure are:
\shortstack[l]{
1)~$n\equiv 2,~3,~5,~6,~7~(mod~8)$,~or\\
2)~if~$n\equiv 0~(mod~8)\colon \chi \equiv 0~(mod~2)$~and~$\frac{
(B_{n/2}-B_{n/4})}{B_{n/2}B_{n/4}}\cdot \frac{n\tau }{2^{n}}\equiv 0~(mod
~2)$,\\
3)~if~$n\equiv
4~(mod~8)\colon \frac{(B_{n/2}+B_{n/4})}{B_{n/2}B_{n/4}}\cdot
\frac{\tau
}{2^{n-2}}\equiv 0~(mod~2)$,\\
4)~if~$n\equiv 1~(mod~8)\colon \chi =0$.}}
\end{theorem1}
\begin{theorem2}{ Let $M$ be a closed oriented $(n-1)$-connected
$2n$-dimensional smooth manifold with $ n\ge3$, $k$ be the $n$-th
Betti number, $I$ be the intersection form, and $p_j(M)$ be the
Pontrjagin class of $M$ as in Lemma 1.1. Then $M$ admits an almost complex
structure if and only if $M$ admits a stable almost complex
structure and one of the following conditions are satisfied:
\shortstack[l]{
1)~If~$n\equiv 0~(mod~4)\colon$~$
4p_{n/2}(M)-I(p_{n/4}(M),~p_{n/4}(M))=8~(k+2)$,\\
2)~if~$n\equiv 2~(mod~8)\colon$ there exists an element~$x\in H^{n}(M;\mathbb{Z
})$ such that \\ \qquad $x\equiv \chi ~(mod~2)$ and $I(x,
x)=(2(k+2)+p_{n/2}(M))/((n/2-1)!)^{2}$,\\
3)~if~$n\equiv 6~(mod~8)\colon$~there exists an element~$x\in H^{n}(M;\mathbb{Z
})$ such that \\ \qquad $I(x, x)=(2(k+2)+p_{n/2}(M))/((n/2-1)!)^{2}$,\\
4)~if~$n\equiv 1~(mod~4)\colon$~$2((n-1)!)\mid(2-k)$,\\
5)~if~$n\equiv 3~(mod~4)\colon$~$(n-1)!\mid(2-k)$.}}
\end{theorem2}
\begin{remark}{ i) Since the rational numbers $\frac{(B_{n/2}-B_{n/4})}{
B_{n/2}B_{n/4}}\cdot \frac{n\tau }{2^{n}}$ and $\frac{(B_{n/2}+B_{n/4})}{
B_{n/2}B_{n/4}}\cdot \frac{\tau }{2^{n-2}}$ in Theorem 1 can be viewed as $2$-adic integers (see the proof of Theorem 1), it makes sense to take congruent classes modulo 2.
ii) In the cases 2) and 3) of Theorem 2, when the conditions are
satisfied, the almost complex structure on $M$ depends on the
choice of $x$.
}
\end{remark}
This paper is arranged as follows. In \S 2 we obtain presentations
for the groups $\widetilde{KO}(M)$, $\widetilde{K}(M)$ and determine
the real reduction $\tilde{r}\colon \widetilde{K}(M)\rightarrow
\widetilde{KO}(M)$ accordingly. In \S 3 we determine the expression
of $TM\in\widetilde{KO}(M)$ with respect to the presentation of
$\widetilde{KO}(M)$ obtained in \S 2. With these preliminary
results, Theorem 1 and Theorem 2 are established in \S 4.
I would like to thank my supervisor H. B. Duan, Dr. Su and Dr. Lin for their
help with the preparation of this paper.
\section{The real reduction $\tilde{r}\colon\protect\widetilde{K}(M)\rightarrow
\protect\widetilde{KO}(M)$}
According to Wall \cite{wa}, $M$ is homotopic to
a $CW$ complex $(\vee _{\lambda=1}^{k}S_{\lambda}^{n})\cup
_{f}\mathbb{D}^{2n}$, where $k$ is the $n$-th Betti number of $M$, $\vee _{\lambda=1}^{k}S_{\lambda}^{n}$
is the wedge sum of $n$-spheres which is the $n$-skeleton of $M$ and $f \in \pi_{2n-1}(\vee
_{\lambda=1}^{k}S_{\lambda}^{n})$ is the attaching map of $\mathbb{D}^{2n}$ which is
determined by the intersection form $I$ and the map $\alpha$~(cf. Duan and Wang \cite{dw}).
Let $i\colon \vee _{\lambda=1}^{k}S_{\lambda}^{n}\rightarrow M$ be
the inclusion map of the $n$-skeleton of $M$ and $p\colon M\rightarrow S^{2n}$ be the
map collapsing the $n$-skeleton $\vee _{\lambda=1}^{k}S_{\lambda}^{n}$ to the base point. Then by the
naturality of the Puppe sequence, we have the following exact ladder:
\begin{table}[!htbp]
\begin{tabular}[b]{c@{\hspace{4pt}}c@{\hspace{4pt}}c@{\hspace{4pt}}c@{\hspace{4pt}}c@{\hspace{4pt}}c@{\hspace{4pt}}c@{\hspace{4pt}}c@{\hspace{4pt}}c@{\hspace{4pt}}c}
&$\widetilde{K}(\vee_{\lambda=1}^k S_{\lambda}^{n+1})$ & $
\overset{\Sigma f_{u}^{\ast }}{\rightarrow } $&
$\widetilde{K}(S^{2n})$ & $\overset{p_u^{\ast }}{ \rightarrow }$ &
$\widetilde{K}(M)$ & $\overset{i_u^{\ast }}{\rightarrow }$ &
$\widetilde{K}(\vee_{\lambda=1}^k S_{\lambda}^{n})$ &
$\overset{f_u^{\ast }}{\rightarrow }$ &
$\widetilde{K}(S^{2n-1})$ \\
(2.1)\quad\quad&$\tilde{r}\downarrow$ & & $\tilde{r}\downarrow$ & & $\tilde{r}
\downarrow$ & & $\tilde{r}\downarrow$ & & $\tilde{r}\downarrow $\\
&$\widetilde{KO}(\vee_{\lambda=1}^k S_{\lambda}^{n+1})$ & $\overset{\Sigma f_{o}^{\ast }}{
\rightarrow }$ & $\widetilde{KO}(S^{2n})$ & $\overset{p_o^{\ast
}}{\rightarrow }$
& $\widetilde{KO}(M)$ & $\overset{i_o^{\ast }}{\rightarrow }$ & $\widetilde{KO}
(\vee_{\lambda=1}^k S_{\lambda}^{n})$ & $\overset{f_o^{\ast }}{\rightarrow }$ & $\widetilde{KO}
(S^{2n-1})$
\end{tabular}
\end{table}
where the horizontal homomorphisms $\Sigma f_{u}^{\ast}$, $\Sigma
f_{o}^{\ast }$, $p_u^{\ast}$, $p_o^{\ast }$, $i_u^{\ast}$,
$i_o^{\ast }$ and $f_{u}^{\ast }$, $f_{o}^{\ast }$ are induced by
$\Sigma f$, $p$, $i$ and $f$ respectively, and where $\Sigma$
denotes the suspension.
Let $\mathbb{Z}\beta$ (resp. $\mathbb{Z}_2\beta$) be the infinite
cyclic group (resp. finite cyclic group of order 2) generated by
$\beta$. Then the generators $\omega_{C}^m$ (resp. $\omega_{R}^m$) of the cyclic group $\widetilde{K}(S^m)$ (resp. $\widetilde{KO}(S^m)$) with $m > 0$ can be so chosen such that the
real reduction $\tilde{r}\colon \widetilde{K} (S^m)\rightarrow
\widetilde{KO}(S^m)$ can be summarized as in Table 1 (cf. Mimura~and~Toda~\cite[Theorem~6.1,~p.~211]{mt}).
\begin{table}[!htbp]
\centering
\begin{tabular}[b]{llll}
\multicolumn{4}{c}{Table~1.~ Real reduction~$\tilde{r}\colon \widetilde{K}
(S^m)\rightarrow
\widetilde{KO}(S^m)$}\\[0pt] \hline
\multicolumn{1}{|c}{\rule{0pt}{13pt} $m~(mod~8)$} &
\multicolumn{1}{|c}{$\widetilde{K}(S^{m})$} &
\multicolumn{1}{|c}{$\widetilde{KO}(S^{m})$} &
\multicolumn{1}{|c|}{$\tilde{r}\colon \widetilde{K} (S^m)\rightarrow
\widetilde{KO}(S^m)$}
\\[0pt]
\hline
\multicolumn{1}{|c}{$0$} &
\multicolumn{1}{|l}{$\mathbb{Z}\omega_{C}^m$} &
\multicolumn{1}{|l}{$\mathbb{Z}\omega_{R}^m$} &
\multicolumn{1}{|l|}{$\tilde{r}(\omega_{C}^m)=2\omega_{R}^m$}
\\ \hline
\multicolumn{1}{|c}{$1$} & \multicolumn{1}{|l}{$0$} &
\multicolumn{1}{|l}{$\mathbb{Z}_{2}\omega_{R}^m$} & \multicolumn{1}{|l|}{$\tilde{r}=0$} \\
\hline
\multicolumn{1}{|c}{$2$} &
\multicolumn{1}{|l}{$\mathbb{Z}\omega_{C}^m$} &
\multicolumn{1}{|l}{$\mathbb{Z}_{2}\omega_{R}^m$} &
\multicolumn{1}{|l|}{$\tilde{r}(\omega_{C}^m)=\omega_{R}^m$}
\\ \hline
\multicolumn{1}{|c}{$4$} &
\multicolumn{1}{|l}{$\mathbb{Z}\omega_{C}^m$} &
\multicolumn{1}{|l}{$\mathbb{Z}\omega_{R}^m$} & \multicolumn{1}{|l|}{$\tilde{r}(\omega_{C}^m)=\omega_{R}^m$} \\
\hline
\multicolumn{1}{|c}{$6$} &
\multicolumn{1}{|l}{$\mathbb{Z}\omega_{C}^m$} &
\multicolumn{1}{|l}{$0$} & \multicolumn{1}{|l|}{$\tilde{r}=0$} \\
\hline
\multicolumn{1}{|c}{$3$, $5$, $7$} & \multicolumn{1}{|l}{$0$} &
\multicolumn{1}{|l}{$0$} & \multicolumn{1}{|l|}{$\tilde{r}=0$} \\
\hline
\end{tabular}
\end{table}
Denoted by $t_{ju}^{\ast}\colon
\widetilde{K}(S_j^n)\rightarrow\widetilde{K}(\vee_{\lambda=1}^kS_{\lambda}^n)$~and~$t_{jo}^{\ast}\colon
\widetilde{KO}(S_j^n)\rightarrow\widetilde{KO}(\vee_{\lambda=1}^kS_{\lambda}^n)$
the homomorphisms induced by $t_{j}\colon
\vee_{\lambda=1}^kS_{\lambda}^n\rightarrow S_{j}^n$ which collapses
$\vee_{\lambda\neq j}S_{\lambda}^{n}$ to the base point. Then we
have:
\begin{lemma}{ Let $M$ be a closed oriented
$(n-1)$-connected $2n$-dimensional smooth manifold with $ n\ge3$. Then the presentations of the groups
$\widetilde{K}(M)$ and $
\widetilde{KO}(M)$ as well as the real reduction $\tilde{r}\colon \widetilde{K}
(M)\rightarrow \widetilde{KO}(M)$ can be given as in Table 2.
\begin{table}[!htbp]
\centering
\begin{tabular}{llll} \multicolumn{4}{c}{Table~2.~Real reduction~$\tilde{r}\colon \widetilde{K}
(M)\rightarrow\widetilde{KO}(M)$}\\[0pt] \hline
\multicolumn{1}{|c}{\rule{0pt}{14pt} $n~(mod\text{ }8)$} &
\multicolumn{1}{|c}{$\widetilde{K}(M)$} &
\multicolumn{1}{|c}{$\widetilde{KO}(M)$} &
\multicolumn{1}{|c|}{$\tilde{r}\colon \widetilde{K} (M)\rightarrow
\widetilde{KO}(M)$} \\
\hline
\multicolumn{1}{|c}{$0$} &
\multicolumn{1}{|l}{$\mathbb{Z}\xi\oplus \bigoplus
_{j=1}^{k}\mathbb{Z}\eta _{j}$} & \multicolumn{1}{|l}{$\mathbb{Z}
\gamma \oplus \bigoplus _{j=1}^{k}\mathbb{Z}\zeta _{j}$} &
\multicolumn{1}{|l|}{$\tilde{r}(\xi )=2\gamma $, $\tilde{r}(\eta
_{j})=2\zeta _{j}$}
\\ \hline
\multicolumn{1}{|c}{ $1$} &
\multicolumn{1}{|l}{$\mathbb{Z}\xi$} &
\multicolumn{1}{|l}{$\mathbb{Z}_2
\gamma \oplus \bigoplus _{j=1}^{k}\mathbb{Z}_2\zeta _{j}$} & \multicolumn{1}{|l|}{$\tilde{r}(\xi )=\gamma$} \\
\hline
\multicolumn{1}{|c}{ $2$} &
\multicolumn{1}{|l}{$\mathbb{Z}\xi\oplus \bigoplus
_{j=1}^{k}\mathbb{Z}\eta _{j}$} & \multicolumn{1}{|l}{$\mathbb{Z}
\gamma \oplus \bigoplus _{j=1}^{k}\mathbb{Z}_2\zeta _{j}$} &
\multicolumn{1}{|l|}{$\tilde{r}(\xi )=\gamma $, $\tilde{r}(\eta
_{j})=\zeta _{j}$}
\\ \hline
\multicolumn{1}{|c}{ $4$} &
\multicolumn{1}{|l}{$\mathbb{Z}\xi\oplus \bigoplus
_{j=1}^{k}\mathbb{Z}\eta _{j}$} &
\multicolumn{1}{|l}{$\mathbb{Z}
\gamma \oplus \bigoplus _{j=1}^{k}\mathbb{Z}\zeta _{j}$} &
\multicolumn{1}{|l|}{$\tilde{r}(\xi)=2\gamma $, $\tilde{r}(\eta _{j})=\zeta _{j}$} \\
\hline
\multicolumn{1}{|c}{ $5$} &
\multicolumn{1}{|l}{$\mathbb{Z}\xi$} &
\multicolumn{1}{|l}{$\mathbb{Z}_{2}\gamma$} &
\multicolumn{1}{|l|}{$\tilde{r}(\xi )=\gamma$} \\ \hline
\multicolumn{1}{|c}{$6$} &
\multicolumn{1}{|l}{$\mathbb{Z}\xi\oplus \bigoplus
_{j=1}^{k}\mathbb{Z}\eta _{j}$} & \multicolumn{1}{|l}{$\mathbb{Z}
\gamma$} & \multicolumn{1}{|l|}{$\tilde{r}(\xi )=\gamma$,
$\tilde{r}(\eta _{j})=0$} \\ \hline
\multicolumn{1}{|c}{ $3$, $7$} &
\multicolumn{1}{|l}{$\mathbb{Z}\xi$} &
\multicolumn{1}{|l}{$0$} & \multicolumn{1}{|l|}{$\tilde{r}=0$} \\\hline
\end{tabular}
\end{table}\\
where the generators $\xi$,~$\eta_j$,~$\gamma$,~$\zeta_j$,~$1\le j \le
k$, satisfy:
\begin{equation*}
\left\{\begin{aligned}\xi=p_{u}^{\ast}(\omega_{C}^{2n}),~
i_{u}^{\ast}(\eta_j)=t_{ju}^{\ast}(\omega_{C}^n);\\
\gamma=p_{o}^{\ast}(\omega_{R}^{2n}),~
i_{o}^{\ast}(\zeta_j)=t_{jo}^{\ast}(\omega_{R}^n).
\end{aligned}\right.
\end{equation*}
}
\end{lemma}
\begin{proof}{ We assert that
a) the induced homomorphisms $f^{\ast }_u$, $f^{\ast }_o$, $\Sigma
f^{\ast }_u$ and $\Sigma f^{\ast }_o$ in (2.1) are trivial, moreover,
b) the short exact sequences
\begin{gather}
0\rightarrow\widetilde{K}(S^{2n})\xrightarrow{p_u^{\ast
}}\widetilde{K}(M)\xrightarrow{i_u^{\ast
}}\widetilde{K}(\vee_{\lambda=1}^k S_{\lambda}^{n})\rightarrow0\tag{i} \notag \\
0\rightarrow\widetilde{KO}(S^{2n})\xrightarrow{p_o^{\ast
}}\widetilde{KO}(M)\xrightarrow{i_o^{\ast
}}\widetilde{KO}(\vee_{\lambda=1}^k S_{\lambda}^{n})\rightarrow0\tag{ii} \notag
\end{gather}
split.
Denote by $c\colon \widetilde{KO}(X)\rightarrow\widetilde{K}(X)$ the
complexification. Then by (2.1), combining these assertions with
the fact that $\tilde{r}\circ c=2$, all the results in Table
2 are easily verified.
Now we prove assertions a) and b).
Firstly, by the Bott periodicity Theorem \cite{bo}, we may assume
that the horizontal homomorphisms $\Sigma f_{u}^{\ast}$, $\Sigma
f_{o}^{\ast }$, $p_u^{\ast}$, $p_o^{\ast }$, $i_u^{\ast}$,
$i_o^{\ast }$ and $f_{u}^{\ast }$, $f_{o}^{\ast }$ in (2.1) are
induced by $\Sigma^9f$, $\Sigma^8p$, $\Sigma^8i$ and $\Sigma^8f$
respectively, where $\Sigma^j$ denotes the $j$-th iterated
suspension. Note that $\Sigma^9f\in \pi_{2n+8}(\vee
_{\lambda=1}^{k}S_{\lambda}^{n+9})$ and $\Sigma^8f\in
\pi_{2n+7}(\vee _{\lambda=1}^{k}S_{\lambda}^{n+8})$, and the groups
$\pi_{2n+8}(\vee _{\lambda=1}^{k}S_{\lambda}^{n+9})$ and
$\pi_{2n+7}(\vee _{\lambda=1}^{k}S_{\lambda}^{n+8})$ are all in
their stable range, that is $\pi_{2n+8}(\vee
_{\lambda=1}^{k}S_{\lambda}^{n+9})\cong \pi_{2n+7}(\vee
_{\lambda=1}^{k}S_{\lambda}^{n+8})\cong
\oplus_{\lambda=1}^{k}\pi_{n-1}^s$, where $\pi_{n-1}^S$ is the
$(n-1)$-th stable homotopy group of spheres. Thus the fact that
$\Sigma f_{u}^{\ast}$ and $f^{\ast }_u$ are trivial can be deduced
easily from Table 1 and Adams \cite[proposition 7.1]{ad}; the fact
that $\Sigma f^{\ast }_o$ and $f^{\ast }_o$ are trivial when
$n\nequiv{1}~(mod~8)$ follows from Table 1 while the fact that
$\Sigma f^{\ast }_o$ and $f^{\ast }_o$ are trivial when
$n\equiv~1~(mod~8)$ follows from Adams \cite[proposition 7.1]{ad}.
This proves assertion a).
Secondly, (i) of assertion b) is true since
the abelian group $\widetilde{K}(\vee_{\lambda=1}^k
S_{\lambda}^{n})$ is free.
Finally we prove (ii) of assertion b). For the cases
$n\nequiv{1,2}~(mod~8)$ the proof is similar to (i).
Case $n\equiv 1~(mod~8)$. From (2.1), Table 1 and (i) we get that $\widetilde{K}(M)\cong\mathbb{Z}$ and
$\widetilde{KO}(M)$ is a finite group. Therefore, for each $x\in
\widetilde{KO}(M)$, we have $2x=\tilde{r}\circ c(x)=0$, which
implies (ii) of assertion b) in this case.
Case $n\equiv 2~(mod~8)$. By (i), we may write $\widetilde{K}(M)$ as
\begin{equation*}
\widetilde{K}(M)=\mathbb{Z}\xi\oplus \bigoplus
_{j=1}^{k}\mathbb{Z}\eta _{j},
\end{equation*}
where the generators $\xi$,
$\eta_j$, $1\le j\le k$, satisfy
$\xi=p_{u}^{\ast}(\omega_{C}^{2n})$,
$i_{u}^{\ast}(\eta_j)=t_{ju}^{\ast}(\omega_{C}^n)$. By Hilton-Milnor theorem \cite[p. 511]{wh} we know that the group $\pi_{2n-1}(\vee_{j=1}^kS_j^{n})$ can be decomposed as:
\begin{equation*}
\pi_{2n-1}(\vee_{j=1}^kS_j^{n})\cong\oplus_{j=1}^k \pi_{2n-1}(S_{j}^n)\oplus_{1\le i < j\le k}\pi_{2n-1}(S_{ij}^{2n-1}),
\end{equation*}
where $S_{ij}^{2n-1}=S^{2n-1}$, the group $\pi_{2n-1}(S_{j}^n)$ is embedded in $\pi_{2n-1}(\vee_{j=1}^kS_j^{n})$ by the natural inclusion, and the group $\pi_{2n-1}(S_{ij}^{2n-1})$ is embedded by composition with the Whitehead product of certain elements in $\pi_{n}(\vee_{j=1}^kS_j^{n})$. Hence by Duan and Wang \cite[Lemma 3]{dw}, the attaching map $f$ can be decomposed accordingly as:
\begin{equation*}
f=\Sigma_{j=1}^k f_{j} + g,
\end{equation*}
where
\begin{equation*}
f_j\in Im J \subset \pi_{2n-1}(S^n)
\end{equation*}
$J$ being the $J$-homomorphism and $g\in\oplus_{1\le i< j\le
k}\pi_{2n-1}(S_{ij}^{2n-1})$. Moreover, since the suspension of the Whitehead
product is trivial, it follows that the homotopy group
$\pi_{2n+7}(\vee_{j=1}^kS_j^{n+8})$ can be decomposed as:
\begin{equation*}
\pi_{2n+7}(\vee_{j=1}^kS_j^{n+8})\cong \oplus_{j=1}^k
\pi_{2n+7}(S_j^{n+8}),
\end{equation*}
and accordingly $\Sigma^8f$ can be decomposed as:
\begin{equation*}
\Sigma^8f=\oplus_{j=1}^k \Sigma^8 f_j \in\oplus_{j=1}^k
\pi_{2n+7}(S_j^{n+8})
\end{equation*}
with
\begin{equation*}
\Sigma^{8} f_j\in Im J \subset \pi_{2n+7}(S_j^{n+8})\cong \pi_{n-1}^s.
\end{equation*}
Denote by
$e_{C}(\Sigma^8 f_j)$ the $e_{C}$ invariant of $\Sigma^8 f_j$ defined in Adams \cite{ad}, $\Psi_{C}^{-1}\colon\widetilde{K}(M)\rightarrow\widetilde{K}(M)$~and
~$\Psi_{R}^{-1}=id\colon\widetilde{KO}(M)\rightarrow\widetilde{KO}(M)$~ the Adams operations,
~where~$id$~is the identity map.
Then it follows from Adams \cite[Proposition 7.19]{ad} that
\begin{equation*}
e_{C}(\Sigma^8 f_j)=0,
\end{equation*}
for each $1\le j\le k$.
Hence, by considering the map
\begin{equation*}
\tilde{t}_j\colon(\vee _{\lambda=1}^{k}S_{\lambda}^{n+8})\cup
_{\Sigma^8f}\mathbb{D}^{2n+8}\rightarrow S_{j}^{n+8}\cup
_{\Sigma^8f_j}\mathbb{D}^{2n+8}
\end{equation*}
which collapses $\vee_{\lambda\neq j}S_{\lambda}^{n+8}$ to a point, it's easy to see from \cite[proposition 7.5, Proposition 7.8]{ad} and the naturality of Adams operation that
\begin{equation*}
\Psi_{C}^{-1}(\eta_j)=(-1)^{n/2}\eta_j+l\cdot((-1)^n-(-1)^{n/2})\xi\in\widetilde{K}(M)
\end{equation*}
for each $\eta_j$, and for some $l\in\mathbb{Z}$.
Therefore from
\begin{equation*}
\tilde{r}\circ\Psi_{C}^{-1}=\Psi_{R}^{-1}\circ\tilde{r},
\end{equation*}
we have
\begin{equation*}
\Psi_{R}^{-1}\tilde{r}(\eta_j)=-\tilde{r}(\eta_j)+2l\tilde{r}(\xi).
\end{equation*}
That is
\begin{equation*}
2\tilde{r}(\eta_j-l\xi)=0.
\end{equation*}
But from (2.1) and Table 1, we get
\begin{equation*}
i_{o}^{\ast}\tilde{r}(\eta_j-l\xi)=t_{oj}^{\ast}(\omega_{R}^n).
\end{equation*}
That is
\begin{equation*}
\tilde{r}(\eta_j-l\xi)\neq0\in\widetilde{KO}(M).
\end{equation*}
Thus (ii) of assertion b) in this case is established and the proof
is finished. }
\end{proof}
\begin{remark}\label{re}{ Since the induced homomorphisms $i^{\ast}\colon H^{n}(M;\mathbb{Z})\rightarrow
H^{n}(\vee _{\lambda=1}^{k}S_{\lambda}^{n};\mathbb{Z})$ and $p^{\ast}\colon H^{2n}(S^{2n};\mathbb{Z})
\rightarrow H^{2n}(M;\mathbb{Z})$ are both isomorphisms, and the
generator $\omega_{C}^{2n}\in\widetilde{K}(S^{2n})$ can be chosen
such that its $n$-th chern class $c_n(\omega_{C}^{2n})=(n-1)!$(cf. Hatcher \cite[p. 101]{ha}), from the naturality of the chern class, we get
\begin{equation*}c_{i}(\xi)=
\begin{cases}
(n-1)!, & i=n,\\
0, &\text{others}.
\end{cases}
\end{equation*}
Similarly,
when $n$ is even, $\eta_j$, $1\le j\le k$, can be chosen such that
\begin{align*}
c_{n/2}(\Sigma_{j=1}^kx_j\eta_j)=(n/2-1)!(x_1,x_2,...,x_k)\in
H^n(M;\mathbb{Z}),
\end{align*}
where $x_j\in\mathbb{Z}$ for all $1\le j\le k$ (since $H^n(M;\mathbb{Z})\cong\oplus_{j=1}^k\mathbb{Z}$, we can write an element $x\in H^n(M;\mathbb{Z})$, under the isomorphism $i^*$, as the form $(x_1,x_2,...,x_k)$ ).}
\end{remark}
\begin{remark}{ As in Remark \ref{re}, if we write $\chi$ as $(\chi_1,...,\chi_k)\in
H^{n}(M;\widetilde{KO}(S^{n}))$, where
\begin{equation*}\chi_j\in\widetilde{KO}(S^{n})\cong
\begin{cases}
\mathbb{Z}, & n\equiv 0~(mod~4),\\
\mathbb{Z}_2, & n\equiv 1,2~(mod~8),\\
0, &\text{others},
\end{cases}
\end{equation*}
then since the tangent bundle of
sphere is stably trivial, it follows that
\begin{align*}
i_o^{\ast }(TM)=\Sigma_{j=1}^k\chi_jt_{jo}^{\ast}(\omega_{R}^n).
\end{align*}}
\end{remark}
\section{The tangent bundle of $M$}
Denote by $\dim_{c}\alpha$ the dimension of $\alpha\in Vect_{C}(M)$.
When $n\equiv0~(mod~4)$, we set
\begin{align*}
\hat{A}(M)&=~ <\hat{\mathfrak{A}}(M),[M]>, \\
\hat{A}_{C}(M)&= ~<ch(TM\otimes \mathbb{C})\cdot
\hat{\mathfrak{A}}(M),[M]>,\\
\hat{A}_{\chi}(M)&=~ <ch(\Sigma_{j=1}^{k}\chi_j\eta_j)\cdot
\hat{\mathfrak{A}}(M),[M]>,
\end{align*} where $ch$ denotes the
chern character, and $\hat{\mathfrak{A}}(M)$ is the
$\mathfrak{A}$-class of $M$ (cf. Atiyah and Hirzebruch \cite{ah}).
It follows from the differentiable Riemann-Roch theorem (cf. Atiyah
and Hirzebruch \cite{ah}) that $\hat{A}(M)$, $\hat{A}_{C}(M)$ and
$\hat{A}_{\chi}(M)$ are all integers. In particular,
$\hat{A}_{\chi}(M)$ is even when $\chi\equiv0~(mod~2)$.
Using the notation above, we get
\begin{lemma}{ Let $M$ be a closed oriented
$(n-1)$-connected $2n$-dimensional smooth manifold with $ n\ge3$. Then $TM$ can be expressed by the generators $\gamma$,~$\zeta_j$,~$1\le j\le
k$ of $\widetilde{KO}(M)$ as
follows:
\begin{equation*}TM=
\begin{cases}
l\gamma+\Sigma_{j=1}^k \chi_j\zeta_j, & n\equiv0, 2, 4~(mod~8),\\
l\gamma, & n\equiv6~(mod~8),\\
\Sigma_{j=1}^k \chi_j\zeta_j, & n\equiv1~(mod~8),\\
0, & n\equiv3,5,7~(mod~8),
\end{cases}
\end{equation*}
where
\begin{equation*}l=
\begin{cases}
\hat{A}_{C}(M)+(\Sigma_{j=1}^ka_{n/4}\chi_j\dim_{c}\eta_j-2n)\hat{A}(M)-a_{n/4}\hat{A}_{\chi}(M),
& n\equiv0~(mod~4),\\
-\frac{p_{n/2}(M)}{2((n-1)!)}, & n\equiv2~(mod~4).
\end{cases}
\end{equation*}}
\end{lemma}
\begin{proof}{ Case $n\equiv0~(mod~8)$. By Remark 2.3, we may
suppose that
\begin{equation*}
TM=l\gamma+\Sigma_{j=1}^k \chi_j\zeta_j\in\widetilde{KO}(M),
\end{equation*}
where $l\in\mathbb{Z}$. Hence from $\tilde{r}\circ c=2$ and Table 2, we have
\begin{align}\label{c}
c(TM)&=TM\otimes\mathbb{C}\\
&=l\xi+\Sigma_{j=1}^k
\chi_j\eta_j\in\widetilde{K}(M).\notag
\end{align}
Now if we regard $\xi$ and $\chi_j$ as complex vector bundles, then
from (\ref{c}) we have
\begin{equation*}
TM\otimes\mathbb{C}\oplus\varepsilon^s\cong l\xi\oplus\bigoplus_{j=1}^k\chi_j\eta_j\oplus\varepsilon^t,
\end{equation*}
for some $s,~t\in\mathbb{Z}$ satisfying
\begin{equation*}
s-t~=~l\cdot
\dim_{c}\xi+\Sigma_{j=1}^k\chi_j\dim_{c}\eta_j-2n,
\end{equation*}
where $\varepsilon^j$ is the trivial complex vector bundle of dimension $j$. Thus we have
\begin{align*}
\hat{A}_{C}(M)=&-(l\cdot
\dim_{c}\xi+\Sigma_{j=1}^k\chi_j\dim_{c}\eta_j-2n)\hat{A}(M)\\
&\text{ } +<ch(l\xi+\Sigma_{j=1}^k \chi_j\eta_j)\cdot
\hat{\mathfrak{A}}(M),[M]>,
\end{align*}
that is
\begin{equation*}
l=\hat{A}_{C}(M)+(\Sigma_{j=1}^k\chi_j\dim_{c}\eta_j-2n)\hat{A}(M)-\hat{A}_{\chi}(M).
\end{equation*}
Cases $n\equiv2, 4, 6~(mod~8)$ can be proved by the same way
as above. Note that in the case
$n\equiv2~(mod~4)$ the calculation of $\hat{A}_{C}(M)$ is
replaced by the calculation of the $n$-th chern class of $TM\otimes\mathbb{C}$.
Case $n\equiv1~(mod~4)$. From~Milnor~and~Kervaire~\cite[Lemma~1]{mk}~and~Adams~\cite[Theorem~1.3]{ad}, we get~that~$\chi=0$~implies ~$TM=0\in\widetilde{KO}(M)$. Then
i) case $n\equiv5~(mod~8)$. $TM=0\in\widetilde{KO}(M)$ because
$\chi=0$ in this case.
ii) case $n\equiv1~(mod~8)$. By Remark 2.3, we may suppose that
\begin{align*}
TM=l\gamma+\Sigma_{j=1}^k \chi_j\zeta_j,
\end{align*}where
$l\in\mathbb{Z}_2$. Then if $\chi=0$, we have $l=0$ because $TM=0$.
If $\chi\neq0$ and $l\neq0$, suppose that $\chi_{\lambda}\neq0$ for
some $1\le \lambda \le k$, set
\begin{equation*}\zeta_{j}^{\prime}=
\begin{cases}
\zeta_{j} & \text{if } j\neq\lambda, \\
\zeta_{j}+\gamma & \text{if } j=\lambda. \\
\end{cases}
\end{equation*}
Hence $\gamma$, $\zeta_{j}^{\prime}$, $1\le j\le k$, which satisfy
the conditions in Lemma 2.1, are also the generators of
$\widetilde{KO}(M)$, and we have $TM=\Sigma_{j=1}^k
\chi_j\zeta_j^{\prime}$. This implies that the generators $\gamma$,
$\zeta_{j}$, $1\le j\le k$, of $\widetilde{KO}(M)$ in Lemma 2.1 can
always be chosen such that $TM=\Sigma_{j=1}^k \chi_j\zeta_j$.
Case $n\equiv3~(mod~4)$. $TM=0$ because $\widetilde{KO}(M)=0$
in this case.
}
\end{proof}
\section{Almost complex structure on $M$}
We are now ready to prove Theorem 1 and Theorem 2.
\begin{proof}[Proof of Theorem 1.]{ Cases 1) and 2) $n\equiv0~(mod~4)$. In these cases, we get that (cf. Wall~\cite[p.~179-180]{wa})
\begin{align*}
\hat{A}(M)& =-\frac{B_{n/2}}{2(n!)}p_{n/2}(M)+\frac{1}{2}\{\frac{B_{n/4}^{2}
}{4((n/2)!)^{2}}+\frac{B_{n/2}}{2(n!)}\}I(p_{n/4}(M),p_{n/4}(M)), \\
\hat{\mathfrak{A}}(M)&
=1-\frac{B_{n/4}}{2((n/2)!)}p_{n/4}(M)+\hat{A}(M),\\
ch(TM\otimes \mathbb{C})& =2n+(-1)^{n/4+1}\frac{p_{n/4}(M)}{(n/2-1)!}+\frac{
I(p_{n/4}(M), p_{n/4}(M))-2p_{n/2}(M)}{2( (n-1)!)}.
\end{align*}
Hence by Lemma 1.1 we have
\begin{align}\label{a}
\hat{A}_{C}(M)= \text{ }& 2n\{1+\frac{1}{B_{n/2}}+\frac{(2^{n-1}-1)}{(2^{n/2}-1)^{2}}
\cdot \frac{(-1)^{n/4}B_{n/2}-B_{n/4}}{B_{n/2}B_{n/4}}\}\hat{A}(M) \\
& +\frac{1}{(2^{n/2}-1)^{2}}\cdot \frac{(-1)^{n/4}B_{n/2}-B_{n/4}}{
B_{n/2}B_{n/4}}\cdot \frac{n\tau }{2^{n}}.\notag
\end{align}
Moreover since the denominator of $B_{m}$, when written as the most simple fraction, is always square free and divisible
by 2 (cf. Milnor~\cite[p.
284]{ms}), we may set $B_m~=~b_m/(2c_m)$, where $c_{m}$ and $b_{m}$ are odd integers. Then multiply each side of (\ref{a}) by $(2^{n/2}-1)^2\cdot b_{n/2} \cdot b_{n/4}$, we get that
\begin{align*}
(2^{n/2}-1)^2 b_{n/2} b_{n/4} \hat{A}_{C}(M)=&~2n\{(2^{n/2}-1)^2\cdot b_{n/2} \cdot b_{n/4}
+ 2 (2^{n/2}-1)^2 b_{n/4} c_{n/2}\\
& + 2(2^{n-1}-1)((-1)^{n/4}b_{n/2}c_{n/4}-b_{n/4}c_{n/2})\}\hat{A}(M)\\
& +2((-1)^{n/4}b_{n/2}c_{n/4}-b_{n/4}c_{n/2})\frac{n\tau}{2^n}.
\end{align*}
Since $\hat{A}_{C}(M)$ and $\hat{A}(M)$ are integers and $(2^{n/2}-1)^2\cdot b_{n/2} \cdot b_{n/4}$ is an odd integer, it follows that
$$\frac{(-1)^{n/4}B_{n/2}-B_{n/4}}{
B_{n/2}B_{n/4}}\cdot \frac{n\tau }{2^{n}}$$
is a 2-adic integer, and hence
\begin{align*}
\hat{A}_{C}(M)\equiv 0~(mod~ 2)\iff \frac{(-1)^{n/4}B_{n/2}-B_{n/4}}{
B_{n/2}B_{n/4}}\cdot \frac{n\tau }{2^{n}}\equiv 0~(mod~2) .
\end{align*}
Then by combining these facts with Lemma 2.1 and Lemma 3.1, one verifies the
results in these cases.
Cases 3) and 4) $n\nequiv{0}~(mod~4)$ can be deduced easily
from Lemma 2.1 and Lemma 3.1. }
\end{proof}
To prove Theorem 2, we need the following lemma (see Sutherland~\cite{su}
for the proof).
\begin{lemma}{Let $N$ be a closed smooth $2n$
-manifold. Then $N$ admits an almost complex structure if and only if it admits a stable almost complex
structure $\alpha$ satisfying
$c_{n}(\alpha )=e(N)$, where $e(N)$ is the Euler class of $N$.}
\end{lemma}
\begin{proof}[Proof of Theorem 2.]{Firstly,~it follows from Lemma~4.1 that~$M$~admits an almost complex structure if and only if there exists an element~$\alpha\in\widetilde{K}(M)$~such that
\begin{equation}
\label{jf} \left\{ \begin{aligned} \tilde{r}(\alpha)&=TM
\in\widetilde{KO}(M),\\
c_n(\alpha)&=e(M).
\end{aligned} \right.
\end{equation}
Secondly, if there exists an element~$\alpha \in \widetilde{K}(M)$~such that~$\tilde{r}(\alpha
)=TM\in
\widetilde{KO}(M)$,~then we have the following identity~(cf.~Milnor~\cite[p.~177]{ms}):
\begin{equation}\label{ch}
(\sum_j(-1)^jc_j(\alpha))\cdot(\sum_jc_j(\alpha))=\sum_j(-1)^jp_j(M).
\end{equation}
Now we prove Theorem 2 case by case.
Case 1) $n\equiv0~(mod~4)$. In this case $e(M)=k+2$.~From Lemma 4.1
we know that $M$ admits an almost complex structure if and only if
there exists an element~$\alpha\in\widetilde{K}(M)$~such that
(\ref{jf}) is satisfied. Now (\ref{ch}) becomes
\begin{equation*}
(1+c_{n/2}(\alpha)+c_{n}(\alpha))\cdot(1+c_{n/2}(\alpha)+c_{n}(\alpha))=1+(-1)^{n/4}p_{n/4}(M)+p_{n/2}(M),
\end{equation*}
it follows that
\begin{equation*}
c_{n/2}(\alpha)=(-1)^{n/4}\frac{1}{2}p_{n/4}(M),
\end{equation*}
hence
\begin{equation*}
c_{n}(\alpha)=\frac{1}{2}p_{n/2}(M)-\frac{1}{8}I(p_{n/4}(M),~p_{n/4}(M)).
\end{equation*}
Therefore from~(\ref{jf})~we get that,~$M$
admits an almost complex structure if and only if ~$M$~admits a stable almost complex structure and satisfies
\begin{equation*}
4p_{n/2}(M)-I(p_{n/4}(M), p_{n/4}(M))=8(k+2).
\end{equation*}
Case 2) $n\equiv2~(mod~8)$. In this case $e(M)=k+2$.~Set $\alpha
=l\xi+\Sigma_{j=1}^kx_j\eta_j\in\widetilde{K}(M)$ where
$l\in\mathbb{Z}$ is the integer as in Lemma 3.1 and
$x_j\in\mathbb{Z}$,~such that~$x_j\equiv\chi_j~(mod~2)$. Then from
Lemma 2.1 and Lemma 3.1, we know that
$\tilde{r}(\alpha)=TM\in\widetilde{KO}(M)$. Hence by~(\ref{jf}), we
see that $M$~admits an almost complex structure if and only if
\begin{equation*}
\left\{ \begin{aligned} &\alpha =l\xi+\Sigma_{j=1}^kx_j\eta_j\in\widetilde{K}(M),\\
&c_n(\alpha)=e(M).
\end{aligned} \right.
\end{equation*}
Let~$x=(x_1,x_2,...,x_k)\in H^n(M;\mathbb{Z})$.~Then by Remark~2.2
\begin{equation*}
c_{n/2}(\alpha)=(n/2-1)!x.
\end{equation*}
Now (\ref{ch}) is
\begin{equation*}
(1-c_{n/2}(\alpha)+c_{n}(\alpha))\cdot(1+c_{n/2}(\alpha)+c_{n}(\alpha))=1-p_{n/2}(M),
\end{equation*}
therefore
\begin{align*}
c_{n}(\alpha)&=\frac{1}{2}(I(c_{n/2}(\alpha),c_{n/2}(\alpha))-p_{n/2}(M))\\
&=\frac{1}{2}\{((n/2-1)!)^{2}I(x,~x)-p_{n/2}(M)\}.
\end{align*}
Thus it follows from ~(\ref{jf}) that~$M$~admits an almost complex structure if and only if there exists an element~$x\in
H^n(M;\mathbb{Z})$~such that
\begin{equation*}
\left\{ \begin{aligned}&x\equiv\chi~(mod~2),\\
&I(x, x)=(2(k+2)+p_{n/2}(M))/((n/2-1)!)^{2}.
\end{aligned} \right.
\end{equation*}
Case 3) $n\equiv6~(mod~8)$. The proof is similar to the proof of case 2).
Case 4) $n\equiv1~(mod~4)$. Now $e(M)=2-k$.~
From~(\ref{jf}),~Lemma~2.1,~Lemma~3.1~and Remark~2.2, we see that
$M$ admits an almost complex structure if and only if
\begin{equation*}
\left\{ \begin{aligned}
&\chi =0,\\
&\alpha =2a\xi,\\
&2a(n-1)! =2-k,
\end{aligned} \right.
\end{equation*}
where~$a\in\mathbb{Z}$. Hence by Lemma 3.1 and Lemma 2.1, $M$~admits
an almost complex structure if and only if~$M$~admits a stable
almost complex structure and
\begin{equation*}
2(n-1)! \mid (2-k).
\end{equation*}
Case 5) $n\equiv3~(mod~4)$. The proof is similar to the proof of case 4).
}
\end{proof}
\end{document}
|
\begin{document}
\title{Tits construction of the exceptional simple Lie algebras}
\author[Alberto Elduque]{Alberto Elduque$^{\star}$}
\thanks{$^{\star}$ Supported by the Spanish Ministerio de
Educaci\'on y Ciencia
and FEDER (MTM 2007-67884-C04-02) and by the
Diputaci\'on General de Arag\'on (Grupo de Investigaci\'on de
\'Algebra)}
\address{Departamento de Matem\'aticas e
Instituto Universitario de Matem\'aticas y Aplicaciones,
Universidad de Zaragoza, 50009 Zaragoza, Spain}
\email{[email protected]}
\dedicatory{Dedicated to Professor Jacques Tits on the occasion of his eightieth birthday}
\date{\today}
\subjclass[2000]{Primary: 17B60. Secondary: 17B50, 17B25, 17C50, 17A75}
\keywords{Tits construction, Lie algebra, exceptional, simple, Jordan, composition}
\begin{abstract}
The classical Tits construction of the exceptional simple Lie algebras has been extended in a couple of directions by using either Jordan superalgebras or composition superalgebras. These extensions are reviewed here. The outcome has been the discovery of some new simple modular Lie superalgebras.
\end{abstract}
\maketitle
\section{Introduction}\label{se:Introduction}
In 1966, Tits provided a beautiful unified construction of the exceptional simple Lie algebras $F_4$, $E_6$, $E_7$ and $E_8$ over fields of characteristic $\ne 2,3$ \cite{Tits66}. This construction depends on two algebras: an alternative algebra of degree $2$ and a Jordan algebra of degree $3$. The most interesting cases appear when semisimple alternative algebras and central simple Jordan algebras are considered in this construction. Then the alternative algebra becomes a composition algebra, and hence its dimension is restricted to $1$, $2$, $4$ or $8$, while the Jordan algebra becomes a form of the Jordan algebra of hermitian $3\times 3$ matrices over a second composition algebra. Freudenthal's Magic Square (\cite{Freu64}) is obtained with these ingredients.
In recent years, some extensions of Tits construction have been considered. Benkart and Zelmanov \cite{BZ96} considered Lie algebras graded by root systems. They realized that the Lie algebras graded over either the root system $G_2$ or $F_4$ could be described in terms of a generalized Tits construction, and that some specific simple Jordan superalgebras (of dimension $3$ and $4$) could be plugged into this Tits construction, instead of Jordan algebras (see also \cite{BE03}). The outcome is that Freudenthal's Magic Square can be enlarged to a rectangle that includes, in characteristic $\ne 2,3,5$, the exceptional simple Lie superalgebras $D(2,1;t)$, $G(3)$ and $F(4)$ in Kac's classification \cite{Kac77}. Moreover, in characteristic $5$ another simple Jordan superalgebra: Kac's ten-dimensional algebra, can be used too, and this lead to the discovery of a new simple modular Lie superalgebra specific of this characteristic \cite{EldModular}´, whose even part is of type $B_5$ and the odd part is the spin module for the even part.
In a different direction, Freudenthal's Magic Square presents a symmetry which is not obvious from Tits construction. More symmetric constructions have been considered by different authors (\cite{V}, \cite{AF}, \cite{LM1}, \cite{LM2}, \cite{BS1}, \cite{BS2} or \cite{EldIbero1}). These constructions are based on two composition algebras and their triality Lie algebras, and they are symmetric on the two composition algebras involved, whence the symmetry of the outcome: the Magic Square. Besides, these construction are valid too over fields of characteristic $3$.
But over fields of characteristic $3$ there are composition superalgebras with nontrivial odd part, as this phenomenon is specific of characteristics $2$ and $3$ (in characteristic $2$, these superalgebras are just composition algebras with a grading over ${\mathbb Z}_2$). These composition superalgebras in characteristic $3$ can be plugged into the symmetric construction of the Magic Square. The outcome is a larger square, which extends the Magic Square with the addition of two rows and columns filled with (mostly) simple Lie superalgebras. With one exception, the Lie superalgebras that appear have no counterpart in Kac's classification. There are $10$ such new simple modular Lie superalgebras in characteristic $3$.
Moreover, it turns out that the simple Lie superalgebra in characteristic $5$ mentioned above, obtained by means of the Tits construction with ingredients a Cayley algebra and the ten-dimensional Kac Jordan superalgebra, and the $10$ simple Lie superalgebras that appear in the larger square in characteristic $3$ almost exhaust (there are just $3$ other superalgebras) the list of exceptional simple modular Lie superalgebras with a Cartan matrix in characteristic $>2$ in the recent classification by Bouarroudj, Grozman and Leites \cite{BGLgordo}.
The aim of this paper is to review these two directions in which the Tits construction has been extended.
In the next section, the classical Tits construction will be recalled. Then a section will be devoted to see how some Jordan superalgebras can be used instead of Jordan algebras. This will result in a larger \emph{Supermagic Rectangle} which contains the exceptional simple classical Lie superalgebras in Kac's classification and a new simple Lie superalgebra in characteristic $5$.
The fourth section will be devoted to explain the more symmetric construction of Freudenthal's Magic Square in terms of two composition algebras and then the fifth section will show how this symmetric construction allows, in characteristic $3$, to enlarge the Magic Square to a \emph{Supermagic Square} with a whole family of new simple Lie superalgebras which appear only in this characteristic. Some related comments and remarks will be given in a final section.
No proofs will be given, but references will be provided throughout. For the sake of simplicity, all the vector spaces and algebras and superalgebras considered will be defined over an algebraically closed ground field ${\mathbb F}$ of characteristic $\ne 2$.
\section{Tits construction}\label{se:Tits}
As mentioned in the introduction, in 1966 Tits (\cite{Tits66}) gave a unified construction of the exceptional simple classical Lie algebras by means of two
ingredients: a composition algebra and a degree three simple
Jordan algebra. Here this construction will be reviewed following closely
\cite[\S 1]{EldTits3} and the approach used by Benkart and Zelmanov in
\cite{BZ96}.
Let $C$ be a composition algebra over the ground field ${\mathbb F}$
with norm $n$. Thus, $C$ is a finite dimensional unital algebra over ${\mathbb F}$,
endowed with the nondegenerate quadratic form $n:C\rightarrow {\mathbb F}$ such that
$n(ab)=n(a)n(b)$ for any $a,b\in C$. Then, each element satisfies
the degree $2$ equation
\[
a^2-t_C(a)a+n(a)1=0,
\]
where $t_C(a)=n(a,1)\,\bigl(=n(a+1)-n(a)-1\bigr)$ is called the
\emph{trace}. The subspace of trace zero elements will be denoted by
$C^0$.
Moreover, for any $a,b\in C$, the linear map $D_{a,b}:C\rightarrow
C$ given by
\[
D_{a,b}(c)=[[a,b],c]+3(a,c,b)
\]
where $[a,b]=ab-ba$ is the commutator, and $(a,c,b)=(ac)b-a(cb)$ the
associator, is a derivation: the \emph{inner derivation} determined
by the elements $a,b$ (see \cite[Chapter III, \S 8]{Schafer}). These
derivations satisfy
\[
D_{a,b}=-D_{b,a},\quad D_{ab,c}+D_{bc,a}+D_{ca,b}=0,
\]
for any $a,b,c\in C$. The linear span of these derivations will be
denoted by $\inder C$. It is an ideal of the whole Lie algebra of
derivations $\der C$ and, if the characteristic is $\ne 3$, it is
the whole $\der C$.
The dimension of $C$ is restricted to $1$, $2$, $4$ (quaternion
algebras) and $8$ (Cayley algebras). Over our
algebraically closed field ${\mathbb F}$, the only composition algebras are, up to
isomorphism, the ground field ${\mathbb F}$, the cartesian product of two
copies of the ground field ${\mathbb F}\times {\mathbb F}$, the algebra of two by two
matrices $\Mat_2({\mathbb F})$ (where the norm of a matrix is its determinant), and the split Cayley algebra $C({\mathbb F})$. (See, for
instance, \cite[Chapter 2]{ZSSS}.)
Now, let $J$ be a unital Jordan algebra with a \emph{normalized
trace} $t_J:J\rightarrow {\mathbb F}$. That is, $J$ is a commutative algebra over ${\mathbb F}$ which satisfies the Jordan identity:
\[
x^2(yx)=(x^2y)x
\]
for any $x,y\in J$, and $t_J$ is a linear map such
that $t_J(1)=1$ and $t_J\bigl((xy)z\bigr)=t_J\bigl(x(yz)\bigr)$ for
any $x,y,z\in J$. The archetypical example of a Jordan algebra is the subspace of symmetric elements in an associative algebra with an involution. This subspace is not closed under the associative product, but it is indeed closed under the symmetrized product ${\mathfrak a}c{1}{2}(xy+yx)$. With this symmetrized product, it becomes a Jordan algebra. (One may consult \cite{Jac68} for the main properties of finite dimensional Jordan algebras.)
Then, for such a unital Jordan algebra, there is the decomposition $J={\mathbb F} 1\oplus J^0$, where $J^0=\{x\in J:
t_J(x)=0\}$. For any $x,y\in J^0$, the product $xy$ splits as
\[
xy=t_J(xy)1+x*y,
\]
with $x*y\in J^0$. Then $x*y=xy-t_J(xy)1$ gives a commutative
multiplication on $J^0$. The linear map $d_{x,y}:J\rightarrow J$
defined by
\[
d_{x,y}(z)=x(yz)-y(xz),
\]
is the \emph{inner derivation} of $J$ determined by the elements $x$ and
$y$. Since $d_{1,x}=0$ for any $x$, it is enough to deal with the
inner derivations $d_{x,y}$, with $x,y\in J^0$. The linear span of
these derivations will be denoted by $\inder J$, which is an ideal
of the whole Lie algebra of derivations $\der J$.
Given $C$ and $J$ as before, consider the space
\[
{\mathcal T}(C,J)=\inder C\oplus \bigl(C^0\otimes J^0\bigr)\oplus
\inder J
\]
(unadorned tensor products are always considered over ${\mathbb F}$), with the
anticommutative multiplication $[.,.]$ specified by:
\begin{equation}\label{eq:TCJproduct}
\begin{split}
\bullet&\ \textrm{$\inder C$ and $\inder J$ are Lie subalgebras,}\\
\bullet&\ [\inder C,\inder J]=0,\\
\bullet&\ [D,a\otimes x]=D(a)\otimes x,\ [d,a\otimes x]=a\otimes
d(x),\\
\bullet&\ [a\otimes x,b\otimes y]=t_J(xy)D_{a,b}+\bigl([a,b]\otimes
x*y\bigr)+2t_C(ab)d_{x,y},
\end{split}
\end{equation}
for all $D\in \inder C$, $d\in \inder J$, $a,b\in C^0$, and
$x,y\in J^0$.
The conditions for ${\mathcal T}(C,J)$ to be a Lie algebra are the
following:
\[
\begin{split}
\textrm{(i)}&\quad \displaystyle{\sum_{\circlearrowleft}
t_C\bigl([a_{1}, a_{2}] a_{3}\bigr)\,
d_{(x_1 * x_2), x_3}}=0,\\[6pt]
\textrm{(ii)}&\quad \displaystyle{\sum_{\circlearrowleft}
t_J\bigl( (x_1 * x_2) x_{3}\bigr)
\,D_{[a_1, a_2], a_3}}=0,\\[6pt]
\textrm{(iii)}&\quad \displaystyle{\sum_{\circlearrowleft}
\Bigl(D_{a_1,a_2}(a_3) \otimes t_J\bigl(x_1
x_2\bigr) x_3} + [[a_1, a_2],a_3] \otimes (x_1 * x_2)* x_3\\[-6pt]
&\null\hspace{2in} +2
t_C(a_1 a_2) a_3\otimes d_{x_1, x_2}(x_3)\Bigr)=0,
\end{split}
\]
\noindent
for any $a_1,a_2,a_{3} \in C^0$ and any $x_1,x_2,x_3 \in J^0$. The
notation ``$\displaystyle{\sum_\circlearrowleft}$'' indicates
summation over the cyclic permutation of the variables.
These conditions appear in \cite[Proposition 1.5]{BE03}, but there
they are stated in the more general setting of superalgebras, a
setting we will deal with later on. In particular, over fields of
characteristic $\ne 3$, these conditions are fulfilled if $J$ is a
separable Jordan algebra of degree three over ${\mathbb F}$ and
$t_J={\mathfrak a}c{1}{3}T$, where $T$ denotes the generic trace of $J$ (see
for instance \cite{Jac68}).
If the characteristic of our algebraically closed field ${\mathbb F}$ is $\ne 3$,
the degree $3$ simple Jordan algebras are, up to isomorphism, the
algebras of $3\times 3$ hermitian matrices over a unital composition
algebra: $H_3(C')$ (see \cite{Jac68}). By varying $C$ and $C'$,
${\mathcal T}(C,H_3(C'))$ is a classical Lie algebra, and
Freudenthal's Magic Square (Table \ref{ta:FMS}) is obtained.
\begin{table}[h!]
\begin{center}
\begin{tabular}{c|cccc}
${\mathcal T}(C,J)$&\vrule height 14pt width 0ptdepth 2pt$H_3({\mathbb F})$&$H_3({\mathbb F}\times{\mathbb F})$
&$H_3(\Mat_2({\mathbb F}))$&$H_3(C({\mathbb F}))$\\
\cline{1-5}
\vrule height 14pt width 0ptdepth 2pt${\mathbb F}$&$A_1$&$A_2$&$C_3$&$F_4$\\
\vrule height 14pt width 0ptdepth 2pt${\mathbb F}\times {\mathbb F}$&$A_2$&$A_2\oplus A_2$&$A_5$&$E_6$\\
\vrule height 14pt width 0ptdepth 2pt$\Mat_2({\mathbb F})$&$C_3$&$A_5$&$D_6$&$E_7$\\
\vrule height 14pt width 0ptdepth 2pt$C({\mathbb F})$&$F_4$&$E_6$&$E_7$&$E_8$
\end{tabular}
\null
\end{center}
\caption{Freudenthal's Magic Square}\label{ta:FMS}
\end{table}
Let us have a look at the rows in this Tits construction of
Freudenthal's Magic Square.
\noindent\textbf{First row:}\quad Here $C={\mathbb F}$, so $C^0=0$ and
$\inder C=0$. Thus, ${\mathcal T}(C,J)$ is just $\inder J$. In
particular, ${\mathcal T}({\mathbb F},J)$ makes sense and is a Lie algebra for any
Jordan algebra $J$.
\noindent\textbf{Second row:}\quad Here $C={\mathbb F}\times {\mathbb F}$, so $C^0$
consists of the scalar multiples of $(1,-1)$, and thus ${\mathcal T}(C,J)$
can be identified with $J^0\oplus \inder J$. The elements in $J^0$
multiply as $[x,y]=4d_{x,y}$ because
$t_C\bigl((1,-1)^2\bigr)=t_C\bigl((1,1)\bigr)=2$. Given any unital Jordan
algebra with a normalized trace $J$, its \emph{Lie multiplication algebra} ${\mathcal L}(J)$ (see
\cite{Schafer}) is the Lie subalgebra of the general linear Lie
algebra ${\mathfrak g}l(J)$ generated by $l_J=\{l_x:x\in J\}$, where
$l_x:y\mapsto xy$ denotes the left multiplication by $x$. Then
${\mathcal L}(J)=l_J\oplus\inder J$. The map
\[
\begin{split}
{\mathcal T}(C,J)&\rightarrow {\mathcal L}(J)\\
(1,-1)\otimes x+d&\mapsto 2l_x+d,
\end{split}
\]
is a monomorphism. Its image is the Lie subalgebra ${\mathcal L}^0(J)=l_{J^0}\oplus \inder J$. Again this shows that ${\mathcal T}({\mathbb F}\times {\mathbb F},J)$ makes sense and is a
Lie algebra for any Jordan algebra with a normalized trace. Given
any separable Jordan algebra of degree $3$ in
characteristic $\ne 3$, ${\mathcal L}^0(J)$ is precisely the derived
algebra $[{\mathcal L}(J),{\mathcal L}(J)]$. This latter Lie algebra makes sense
for any Jordan algebra over any field. (Recall that the
characteristic is assumed to be $\ne 2$ throughout.)
\noindent\textbf{Third row:}\quad Here $C=\Mat_2({\mathbb F})$. Under these circumstances, $C^0$ is the simple
three-dimensional Lie algebra of $2\times 2$ trace zero matrices ${\mathfrak s}l_2({\mathbb F})$ under the commutator $[a,b]=ab-ba$.
Besides, for any $a,b\in C^0$, the inner derivation $D_{a,b}$ is
just $\ad_{[a,b]}$, since $C$ is associative. Hence, $\inder C$
can be identified with $C^0$, and ${\mathcal T}(C,J)$ with
\[
C^0\oplus \bigl(C^0\otimes J^0\bigr)\oplus\inder
J\simeq\bigl(C^0\otimes ({\mathbb F}1\oplus J^0)\bigr)\oplus\inder J\simeq
\bigl(C^0\otimes J\bigr)\oplus \inder J,
\]
and the Lie bracket \eqref{eq:TCJproduct} in ${\mathcal T}(C,J)$ becomes
the bracket in $\bigl({\mathfrak s}l_2({\mathbb F})\otimes J\bigr)\oplus \inder J$ given by
\[
\begin{split}
\bullet&\ \textrm{$\inder J$ is a Lie subalgebra,}\\
\bullet&\ [d,a\otimes x]=a\otimes d(x),\\
\bullet&\ [a\otimes x,b\otimes y]=\bigl([a,b]\otimes
xy\bigr)+2t_C(ab)d_{x,y},
\end{split}
\]
for any $a,b\in {\mathfrak s}l_2({\mathbb F})$, $x,y\in J$, and $d\in\inder J$, since
$t_J(xy)1+x*y=xy$ for any $x,y\in J$. This bracket makes sense for
any Jordan algebra (not necessarily endowed with a normalized
trace), it goes back to \cite{Tits62}, and the resulting Lie algebra is the well-known Tits-Kantor-Koecher Lie algebra of the Jordan algebra $J$ (\cite{Koecher}, \cite{Kantor}).
\noindent\textbf{Fourth row:}\quad In the last row, $C$ is a Cayley
algebra over ${\mathbb F}$. If the characteristic of ${\mathbb F}$ is
$\ne 3$, the Lie algebra $\der C=\inder C$ is a simple Lie
algebra of type $G_2$ (dimension $14$, and note that this is no longer true in characteristic $3$ \cite{AMEN}), and $C^0$ is its unique
seven dimensional irreducible module. In particular, the Lie
algebra ${\mathcal T}\bigl(C({\mathbb F}),J\bigr)$ is a Lie algebra graded over the
root system $G_2$. These $G_2$-graded Lie algebras contain a simple
subalgebra isomorphic to $\der C({\mathbb F})$ such that, as modules for
this subalgebra, they are direct sums of copies of modules of three
types: adjoint, the irreducible seven dimensional module, and the
trivial one dimensional module. These Lie algebras have been
determined in \cite{BZ96} and the possible Jordan algebras involved
are essentially the degree $3$ Jordan algebras.
\section{Jordan superalgebras}\label{se:JordanSuperalgebras}
In Kac's classification of the simple finite dimensional Lie superalgebras over an algebraically closed field of characteristic $0$, there appear three exceptional situations (that is, the superalgebras do not belong to families where the dimension grows indefinitely).
A description of these
simple Lie superalgebras can be found in Kac's seminal paper \cite{Kac77}. Over an algebraically closed field ${\mathbb F}$
of characteristic $\ne 2,3$, these exceptional Lie superalgebras
may be characterized by the following properties (see
\cite[Proposition 2.1.1]{Kac77}, where the characteristic is assumed to be $0$, but this restriction is not necessary here):
\begin{enumerate}
\item There is a unique $40$-dimensional simple classical Lie
superalgebra $F(4)$ for which the even part $F(4)_{\bar 0}$ is a
Lie algebra of type $B_3\oplus A_1$, and the
representation of $F(4)_{\bar 0}$ on the odd part $F(4)_{\bar 1}$
is the tensor product of the $8$-dimensional spin representation of $B_3$
with the natural $2$-dimensional representation of $A_1$.
\item There is a unique $31$-dimensional simple classical Lie
superalgebra $G(3)$ for which $G(3)_{\bar 0}$ is a
Lie algebra of type $G_2\oplus A_1$, and its
representation on $G(3)_{\bar 1}$ is the tensor product of
the $7$-dimensional irreducible representation of $G_2$
with the natural $2$-dimensional representation of $A_1$.
\item There is a one-parameter family of $17$-dimensional simple
classical Lie superalgebras $D(2,1;t)$, $t\in
{\mathbb F}\setminus\{0,-1\}$, consisting of all simple Lie superalgebras
for which the even part is a Lie algebra of type $A_1\oplus
A_1\oplus A_1$, and its representation on
the odd part is the tensor product of the natural $2$-dimensional
representations of the three $A_1$-direct summands.
\end{enumerate}
It turns out that all these exceptional simple Lie superalgebras can be constructed by means of the Tits construction reviewed in Section \ref{se:Tits}, if the Jordan algebra $J$ there is replaced by suitable Jordan superalgebras.
First, a general fact about superalgebras. Consider the Grassmann (or exterior) algebra $G$ on a countable number of variables. That is, $G$ is the unital associative algebra over ${\mathbb F}$ generated by elements $e_i$, $i=1,2,\ldots$, subject to the relations $e_i^2=0$, $e_ie_j=-e_je_i$ for any $i,j$. This Grassmann algebra is naturally graded over ${\mathbb Z}_2$, with all the generators $e_i$ in $G_{\bar 1}$. In this way, $G$ becomes an associative superalgebra.
Given any other superalgebra $A=A_{\bar 0}\oplus A_{\bar 1}$ over ${\mathbb F}$, that is, any ${\mathbb Z}_2$-graded algebra, the \emph{Grassmann envelope} of $A$ is defined to be the ${\mathbb Z}_2$-graded algebra
\[
G(A)=(A_{\bar 0}\otimes G_{\bar 0})\oplus(A_{\bar 1}\otimes G_{\bar 1}).
\]
This is a subalgebra of the algebra $A\otimes G$ with its natural multiplication, in particular $G(A)$ is an algebra over the commutative associative ring $G_{\bar 0}$.
Given any variety ${\mathcal V}$ of algebras, the superalgebra $A$ is said to be a superalgebra in the variety ${\mathcal V}$ if its Grassmann envelope (as an algebra over $G_{\bar 0}$) is an algebra in the variety ${\mathcal V}$. In particular, a superalgebra $J=J_{\bar 0}\oplus J_{\bar 1}$ is a \emph{Jordan superalgebra} if $G(J)$ is a Jordan algebra over $G_{\bar 0}$. (This is equivalent to the superalgebra $J$ being supercommutative and satisfying the superized versions of the identity $x^2(yx)=(x^2y)x$ and its linearizations.)
Let $J=J_{\bar 0}\oplus J_{\bar 1}$ be now a unital Jordan superalgebra
with a normalized trace $t_J:J\rightarrow {\mathbb F}$. That is, $t_J$ is a linear
map such that $t_J(1)=1$, and $t_J(J_{\bar 1})=0=t_J\bigl((J,J,J)\bigr)$
(see \cite[\S 1]{BE03}). Then $J={\mathbb F} 1\oplus J^0$, where $J^0=\{x\in
J: t_J(x)=0\}$, which contains $J_{\bar 1}$. For $x,y\in J^0$,
$xy=t_J(xy)1+x*y$, where $x*y=xy-t_J(xy)1$ is a supercommutative
multiplication on $J^0$. Since $(J,J,J)=[l_J,l_J](J)$ is contained
in $J^0$, the subspace $J^0$ is invariant under $\inder J=[l_J,l_J]$,
the Lie superalgebra of inner derivations. (As for Jordan algebras, $l_x:y\mapsto xy$ denotes the multiplication by the element $x$, but now the Lie bracket is the bracket of the Lie superalgebra of endomorphisms of $J$, so $d_{x,y}=[l_x,l_y]=l_xl_y-(-1)^{xy}l_yl_x$ for homogeneous elements $x,y\in J$, where $(-1)^{xy}$ is $1$ if either $x$ or $y$ is even, and it is $-1$ if both $x$ and $y$ are odd.)
Given a composition algebra $C$ and a unital Jordan
superalgebra with a normalized trace $J$, consider the superspace
\[
{\mathcal T}(C,J)=\inder C\oplus(C^0\otimes J^0)\oplus\inder J,
\]
with the superanticommutative product given by exactly the same formulas in \eqref{eq:TCJproduct}.
If the Grassmann envelope $G(J)$ satisfies the Cayley-Hamilton
equation $ch_3(x)=0$ of $3\times 3$-matrices (this is the condition that reflects the fact of being of degree $3$), where
\[
ch_3(x)=x^3-3t_J(x)x^2+\Bigl({\mathfrak a}c{9}{2}t_J(x)^2-{\mathfrak a}c{3}{2}t_J(x^2)\Bigr)x-
\Bigl(t_J(x^3)-{\mathfrak a}c{9}{2}t_J(x^2)t_J(x)+{\mathfrak a}c{9}{2}t_J(x)^3\Bigr)1,
\]
and where we use the same notation $t_J$ to denote the natural extension of the normalized trace in $J$ to a linear form on $G(J)$ over $G_{\bar 0}$,
then with the same arguments given by Tits, ${\mathcal T}(C,J)$ is shown to be a Lie superalgebra (see \cite[Sections 3 and 4]{BE03}).
However, the arguments in the previous section concerning the first three rows in the Magic Square are valid too for superalgebras and show that if the dimension of the composition algebra is $1$, $2$ or $4$ (that is, if the composition algebra $C$ is associative), then ${\mathcal T}(C,J)$ is a Lie superalgebra for any Jordan superalgebra $J$.
Only a few simple Jordan superalgebras satisfy the condition on the Cayley-Hamilton equation of degree $3$. The finite dimensional simple Jordan superalgebras have been classified in \cite{KacJordan} and \cite{MZ} (see also \cite{HK} and \cite{RZ}).
Given a vector superspace $V=V_{\bar 0}\oplus V_{\bar 1}$ endowed with a nondegenerate supersymmetric bilinear form $b$ (so that $b$ is symmetric on $V_{\bar 0}$, skew-symmetric on $V_{\bar 1}$ and $b(V_{\bar 0},V_{\bar 1})=0=b(V_{\bar 1},V_{\bar 0})$), the Jordan superalgebra $J(V,b)$ of this form is defined as the unital superalgebra $J=J_{\bar 0}\oplus J_{\bar 1}$ with $J_{\bar 0}={\mathbb F}1 \oplus V_{\bar 0}$, $J_{\bar 1}=V_{\bar 1}$, with the multiplication determined by $uv=b(u,v)1$ for any $u,v\in V$. This is always a simple Jordan superalgebra. Denote by $V^{0\vert 2}$ the vector superspace with $V_{\bar 0}=0$ and $\dim V_{\bar 1}=2$, endowed with a nondegenerate supersymmetric bilinear form $b$ (unique up to scaling), and by $J^{0\vert 2}$ the corresponding three-dimensional simple Jordan superalgebra $J(V^{0\vert 2},b)$. This superalgebra is trivially endowed with a normalized trace form given by $t_J(1)=1$, $t_J(u)=0$ for any $u\in V^{0\vert 2}$, and it is easy to check that its Grassmann envelope satisfies the Cayley-Hamilton equation of degree $3$.
Among the simple Jordan superalgebras, there is a one-parameter
family of 4-dimensional algebras $D_t \ (t \neq 0)$, having
$(D_t)_{\bar 0} = {\mathbb F} e \oplus {\mathbb F} f$ and $(D_t)_{\bar 1} = {\mathbb F} x \oplus {\mathbb F} y$ where
\[
\begin{gathered}
e^2 = e, \qquad f^{2} = f, \qquad ef = 0 \\
xy = e+t f \,(= -yx), \qquad ex = {\mathfrak a}c{1}{2} x = fx, \qquad
ey = {\mathfrak a}c{1}{2} y = fy.
\end{gathered}
\]
For $t\ne 0,-1$, $D_t$ admits a normalized trace given by:
\[
t_J(e)={\mathfrak a}c{t}{1+t},\quad t_J(f)={\mathfrak a}c{1}{1+t},\quad t_J\bigl((D_t)_{\bar 1}\bigr)=0,
\]
and it is shown in \cite[Lemma 2.3]{BE03} that the Grassmann envelope satisfies the Cayley-Hamilton equation of degree $3$ if and only if $t$ is either $2$ or ${\mathfrak a}c{1}{2}$. (Note that $D_t$ is isomorphic to $D_{{\mathfrak a}c{1}{t}}$.)
Another simple Jordan superalgebra must enter into our discussion: the $10$-dimensional Kac's superalgebra $K_{10}$. An easy way to describe this Jordan superalgebra appeared in \cite{BE02} in terms of the smaller
Kaplansky superalgebra. The tiny Kaplansky superalgebra is
the three dimensional Jordan superalgebra $K_3=K_{\bar 0}\oplus K_{\bar 1}$,
with $K_{\bar 0}={\mathbb F} e$ and $K_{\bar 1} =U$, a two dimensional vector space
endowed with a nonzero alternating bilinear form $(.\vert .)$, and
multiplication given by
\[
e^2=e,\quad ex=xe={\mathfrak a}c{1}{2}x,\quad xy=(x\vert y)e,
\]
for any $x,y\in U$. The bilinear form $(.\vert .)$ can be extended
to a supersymmetric bilinear form by means of $(e\vert
e)={\mathfrak a}c{1}{2}$ and $(K_{\bar 0}\vert K_{\bar 1})=0$.
The Kac Jordan superalgebra is
\begin{equation}\label{eq:K10}
K_{10}={\mathbb F} 1\oplus (K_3\otimes K_3),
\end{equation}
with unit element $1$ and product determined \cite[(2.1)]{BE02} by
\[
(a\otimes b)(c\otimes d)=(-1)^{bc}\Bigl(ac\otimes
bd-{\mathfrak a}c{3}{4}(a\vert c)(b\vert d)1\Bigr),
\]
for homogeneous elements $a,b,c,d\in K_3$. This superalgebra is simple if the characteristic is $\ne 2,3$. In characteristic $3$, it contains the simple ideal $K_9=K_3\otimes K_3$. Besides, the Jordan superalgebra $K_{10}$ is endowed with a unique normalized trace given by $t_J(1)=1$ and $t_J(K_3\otimes K_3)=0$.
\begin{proposition}\label{pr:degree3}
Assume that the characteristic of our ground field ${\mathbb F}$ is $\ne 2,3$. Then the only finite-dimensional simple
unital Jordan superalgebras $J$ with $J_{\bar 1} \neq 0$ whose Grassmann envelope $G(J)$ satisfies
the Cayley-Hamilton equation ${ch}_3(x) = 0$ relative a normalized
trace on $J$ are, up to isomorphism, $J^{0\vert 2}$, $D_2$ and, only if the characteristic of ${\mathbb F}$ is $5$, $K_{10}$.
\end{proposition}
\begin{proof}
This is proved in \cite[Proposition 5.1]{BE03}, where $K_{10}$ is missing (the argument has a flaw for $K_{10}$ as the subalgebra considered there to get a contradiction is not a unital subalgebra). The situation for $K_{10}$ is settled in \cite{McC}.
\end{proof}
Therefore, we can plug the three superalgebras in Proposition \ref{pr:degree3} in the Tits construction ${\mathcal T}(C,J)$ (and in all the rows but the last one, we can plug also the Jordan superalgebras $D_t$ for arbitrary values of $t\ne 0$), to obtain the \emph{Supermagic Rectangle} in Table \ref{ta:SuperRectangle}. The entries in the last three columns are computed in \cite{BE03} and \cite{EldModular}.
{\tiny
\begin{table}[h!]
\begin{center}
\begin{tabular}{c|cccc|ccc}
${\mathcal T}(C,J)$&\vrule height 14pt width 0ptdepth 2pt$H_3({\mathbb F})$&$H_3({\mathbb F}\times {\mathbb F})$&$H_3(\Mat_2({\mathbb F}))$&$H_3(C({\mathbb F}))$&$J^{0\vert 2}$& $D_t$&$K_{10}$\\
\cline{1-8}
\vrule height 14pt width 0ptdepth 2pt${\mathbb F}$&$A_1$&$A_2$&$C_3$&$F_4$&$A_1$&$B(0,1)$&$B(0,1)\oplus B(0,1)$\\
\vrule height 14pt width 0ptdepth 2pt${\mathbb F}\times {\mathbb F}$&$A_2$&$A_2\oplus A_2$&$A_5$&$E_6$&$B(0,1)$&$A(1,0)$&$C(3)$\\
\vrule height 14pt width 0ptdepth 2pt$\Mat_2({\mathbb F})$&$C_3$&$A_5$&$D_6$&$E_7$&$B(1,1)$&$D(2,1;t)$&$F(4)$\\
\vrule height 14pt width 0ptdepth 2pt$C({\mathbb F})$&$F_4$&$E_6$&$E_7$&$E_8$&$G(3)$&$F(4)$ &${\mathcal T}(C({\mathbb F}),K_{10})$\\[-2pt]
&&&&&&($t=2$)&($\charac 5$)
\end{tabular}
\null
\end{center}
\caption{A Supermagic Rectangle}\label{ta:SuperRectangle}
\end{table}
}
Therefore, the exceptional simple classical Lie superalgebras $D(2,1;t)$, $G(3)$ and $F(4)$ can be obtained too by means of the Tits construction.
And in characteristic $5$ there appears another Lie superalgebra, ${\mathcal T}\bigl(C({\mathbb F}),K_{10}\bigr)$. This superalgebra turns out to be a new simple modular Lie superalgebra specific to this characteristic. Its dimension is $87$, its even part is the classical simple Lie algebra $B_5$, and its odd part is the spin module for the even part. This Lie superalgebra appeared for the first time in \cite{EldModular}, where all these features are proved.
In the notation of \cite{BGLgordo}, this superalgebra appears as $\mathfrak{el}(5;5)$.
\section{A symmetric construction of the Magic Square}\label{se:SymCons}
Given any composition algebra $C$ over ${\mathbb F}$ with norm $n$ and standard
involution $x\mapsto \bar x=n(1,x)1-x$, the algebra $H_3(C,*)$ of
$3\times 3$ hermitian matrices over $C$, where $(a_{ij})^*=(\bar
a_{ji})$, is a Jordan algebra under the symmetrized product
\begin{equation}\label{eq:Jproduct}
x\circ y= {\mathfrak a}c{1}{2}\bigl( xy+yx\bigr).
\end{equation}
Then,
\[
\begin{split}
J=H_3(C,*)&=\left\{ \begin{pmatrix} \alpha_0 &\bar a_2& a_1\\
a_2&\alpha_1&\bar a_0\\ \bar a_1&a_0&\alpha_2\end{pmatrix} :
\alpha_0,\alpha_1,\alpha_2\in {\mathbb F},\ a_0,a_1,a_2\in C\right\}\\[6pt]
&=\bigl(\oplus_{i=0}^2 {\mathbb F} e_i\bigr)\oplus
\bigl(\oplus_{i=0}^2\iota_i(C)\bigr),
\end{split}
\]
where
\[
\begin{aligned}
e_0&= \begin{pmatrix} 1&0&0\\ 0&0&0\\ 0&0&0\end{pmatrix}, &
e_1&=\begin{pmatrix} 0&0&0\\ 0&1&0\\ 0&0&0\end{pmatrix}, &
e_2&= \begin{pmatrix} 0&0&0\\ 0&0&0\\ 0&0&1\end{pmatrix}, \\
\iota_0(a)&=2\begin{pmatrix} 0&0&0\\ 0&0&\bar a\\
0&a&0\end{pmatrix},&
\iota_1(a)&=2\begin{pmatrix} 0&0&a\\ 0&0&0\\
\bar a&0&0\end{pmatrix},&
\iota_2(a)&=2\begin{pmatrix} 0&\bar a&0\\ a&0&0\\
0&0&0\end{pmatrix},
\end{aligned}
\]
for any $a\in C$. Identify ${\mathbb F} e_0\oplus {\mathbb F} e_1\oplus {\mathbb F} e_2$ with ${\mathbb F}^3$ by
means of $\alpha_0e_0+\alpha_1e_1+\alpha_2e_2\simeq
(\alpha_0,\alpha_1,\alpha_2)$. Then the commutative
multiplication \eqref{eq:Jproduct} becomes:
\begin{equation}\label{eq:Jniceproduct}
\left\{\begin{aligned}
&(\alpha_0,\alpha_1,\alpha_2)\circ(\beta_1,\beta_2,\beta_3)=
(\alpha_0\beta_0,\alpha_1\beta_1,\alpha_2\beta_2),\\
&(\alpha_0,\alpha_1,\alpha_2)\circ \iota_i(a)
={\mathfrak a}c{1}{2}(\alpha_{i+1}+\alpha_{i+2})\iota_i(a),\\
&\iota_i(a)\circ\iota_{i+1}(b)=\iota_{i+2}(\bar a\bar b),\\
&\iota_i(a)\circ\iota_i(b)=2n(a,b)\bigl(e_{i+1}+e_{i+2}\bigr),
\end{aligned}\right.
\end{equation}
for any $\alpha_i,\beta_i\in {\mathbb F}$, $a,b\in C$, $i=0,1,2$, and where
indices are taken modulo $3$.
Note that \eqref{eq:Jniceproduct} shows that $J$ is graded over
${\mathbb Z}_2\times {\mathbb Z}_2$ with:
\[
J_{(0,0)}={\mathbb F}^3,\quad J_{(1,0)}=\iota_0(C),\quad
J_{(0,1)}=\iota_1(C),\quad J_{(1,1)}=\iota_2(C)
\]
and, therefore, $\der J$ is accordingly graded over
${\mathbb Z}_2\times{\mathbb Z}_2$:
\[
(\der J)_{(i,j)}=\{ d\in\der J: d\bigl(J_{(r,s)}\bigr)\subseteq
J_{(i+r,j+s)}\ \forall r,s=0,1\}.
\]
It is not difficult to prove (see \cite[Lemma 3.4]{CunEld2}) that
$(\der J)_{(0,0)}=\{ d\in \der J : d(e_i)=0\ \forall
i=0,1,2\}$.
Now, for any $d\in(\der J)_{(0,0)}$, there are linear maps $d_i\in \End_{\mathbb F}(C)$, $i=0,1,2$ such that
$d\bigl(\iota_i(a)\bigr)=\iota_i\bigl(d_i(a)\bigr)$ for any $a\in C$
and $i=0,1,2$. Now, for any $a,b\in C$ and $i=0,1,2$:
\[
\begin{split}
0&=2n(a,b)d(e_{i+1}+e_{i+2})=d\bigl(\iota_i(a)\circ\iota_i(b)\bigr)\\
&=\iota_i\bigl(d_i(a)\bigr)\circ\iota_i(b)
+\iota_i(a)\circ\iota_i\bigl(d_i(b)\bigr)\\
&=2\bigl(n(d_i(a),b)+n(a,d_i(b)\bigr)(e_{i+1}+e_{i+2}),
\end{split}
\]
so $d_i$ belongs to the orthogonal Lie algebra
${\mathfrak s}o(C,n)$. Also, if we write $a\bullet b=\bar a\bar b$, we have:
\[
\begin{split}
\iota_i\bigl(d_i(a\bullet b)\bigr)&=
d\bigl(\iota_i(a\bullet b)\bigr)=d\bigl(\iota_{i+1}(a)\circ
\iota_{i+2}(b)\bigr)\\
&=d\bigl(\iota_{i+1}(a)\bigr)\circ \iota_{i+2}(b) +
\iota_{i+1}(a)\circ d\bigl(\iota_{i+2}(b)\bigr)\\
&=\iota_{i+1}\bigl(d_{i+1}(a)\bigr)\circ \iota_{i+2}(b) +
\iota_{i+1}(a)\circ \iota_{i+2}\bigl(d_{i+2}(b)\bigr)\\
&=\iota_i\Bigl(d_{i+1}(a)\bullet b+ a\bullet d_{i+2}(b)\Bigr),
\end{split}
\]
which shows that the triple $(d_0,d_1,d_2)$ satisfies the following condition:
\[
d_i(\bar a\bar b)=\overline{d_{i+1}(a)}\bar b+\bar a\overline{d_{i+2}(b)}
\]
for any $a,b\in C$ and $i=0,1,2$ (indices modulo $3$). One can check that the condition above for $i=0$ implies the conditions for $i=1$ and $i=2$. This leads us to the \emph{triality Lie algebra} of $C$, which is the Lie subalgebra of the direct sum of three copies of the orthogonal Lie algebra ${\mathfrak s}o(C,n)$ given by the following equation:
\begin{equation}\label{eq:triC}
\mathfrak{tri}(C)=\{(d_0,d_1,d_2)\in {\mathfrak s}o(C,n)^3: d_0(\bar a\bar b)=\overline{d_{1}(a)}\bar b+\bar a\overline{d_{2}(b)}\ \forall a,b\in C\}.
\end{equation}
It turns out that $(\der J)_{(0,0)}$ is then isomorphic to the triality Lie algebra $\mathfrak{tri}(C)$, by means of the linear map:
\[
\begin{split}
\mathfrak{tri}(C)&\longrightarrow (\der J)_{(0,0)}\\
(d_0,d_1,d_2)&\mapsto D_{(d_0,d_1,d_2)},
\end{split}
\]
such that
\[
\left\{\begin{aligned} &D_{(d_0,d_1,d_2)}(e_i)=0,\\
&D_{(d_0,d_1,d_2)}\bigl(\iota_i(a)\bigr)=
\iota_i\bigl(d_i(a)\bigr)
\end{aligned}\right.
\]
for any $i=0,1,2$ and $a\in C$.
Also, for any $i=0,1,2$ and $a\in C$, consider the following inner
derivation of the Jordan superalgebra $J$:
\[
D_i(a)=2\bigl[ l_{\iota_i(a)},l_{e_{i+1}}\bigr]
\]
(indices modulo $3$) where, as before, $l_x$ denotes the multiplication by $x$
in $J$. Note that the restriction of $l_{e_i}$ to
$\iota_{i+1}(C)\oplus\iota_{i+2}(C)$ is half the identity, so the
inner derivation $\bigl[ l_{\iota_i(a)},l_{e_{i}}\bigr]$ is trivial
on $\iota_{i+1}(C)\oplus\iota_{i+2}(C)$, which generates $J$. Hence
\[
\bigl[l_{\iota_i(a)},l_{e_{i}}\bigr]=0
\]
for any $i=0,1,2$ and $a\in C$. Also, $l_{e_0+e_1+e_2}$ is the
identity map, so we have $\bigl[ l_{\iota_i(a)},l_{e_0+e_{1}+e_2}\bigr]=0$,
and hence
\[
D_i(a)=2\bigl[ l_{\iota_i(a)},l_{e_{i+1}}\bigr]=
-2\bigl[l_{\iota_i(a)},l_{e_{i+2}}\bigr].
\]
A straightforward computation with \eqref{eq:Jniceproduct} gives
\[
\begin{split}
&D_i(a)(e_i)=0,\ D_i(a)(e_{i+1})={\mathfrak a}c{1}{2} \iota_i(a),\
D_i(a)(e_{i+2})=-{\mathfrak a}c{1}{2}\iota_i(a),\\
&D_i(a)\bigl(\iota_{i+1}(b)\bigr)=-\iota_{i+2}(a\bullet b),\\
&D_i(a)\bigl(\iota_{i+2}(b)\bigr)=\iota_{i+1}(b\bullet a),\\
&D_i(a)\bigl(\iota_i(b)\bigr)=2n(a,b)(-e_{i+1}+e_{i+2}),
\end{split}
\]
for any $i=0,1,2$ and any homogeneous elements $a,b\in C$.
Denote by $D_{\mathfrak{tri}(C)}$ the linear span of the $D_{(d_0,d_1,d_2)}$'s, $(d_0,d_1,d_2)\in \mathfrak{tri}(C)$, and by $D_i(C)$ the linear span of the $D_i(a)$'s, $a\in C$. Then $D_{\mathfrak{tri}(C)}=(\der J)_{(0,0)}$,
$D_0(C)=(\der J)_{(1,0)}$, $D_1(C)=(\der J)_{(0,1)}$, and
$D_2(C)=(\der J)_{(1,1)}$ (see \cite[Lemma 3.11]{CunEld2}).
Therefore, the ${\mathbb Z}_2\times{\mathbb Z}_2$-grading of $\der J$ becomes
\[
\der J=D_{\mathfrak{tri}(C)}\oplus\bigl(\oplus_{i=0}^2 D_i(C)\bigr).
\]
At least as vector spaces, and assuming that the characteristic of ${\mathbb F}$ is $\ne 2,3$, we obtain isomorphisms:
\begin{align*}
&J=H_3(C)\simeq
{\mathbb F}^3\oplus\bigl(\oplus_{i=0}^2\iota_i(C)\bigr),\\[2pt]
&J_0\simeq {\mathbb F}^2\oplus \bigl(\oplus_{i=0}^2\iota_i(C)\bigr),\\[4pt]
&\der J\simeq \mathfrak{tri}(C)\oplus\bigl(\oplus_{i=0}^2\iota_i(C)\bigr).
\end{align*}
Whence, if $C$ is a composition algebra with norm $n$, and $J$ is the simple Jordan algebra of hermitian $3\times 3$ matrices over a second composition algebra $C'$ with norm $n'$, the Lie algebra considered in the Tits construction (note that $\inder J=\der J$) can be split into pieces and then rearranged as follows, at least as a vector space:
\begin{align*}
{\mathcal T}(C,J)&=\inder C\oplus (C^0\otimes J^0)\oplus\inder J\\[4pt]
&\simeq \der C\oplus (C^0\otimes {\mathbb F}^2)\oplus\bigl(\oplus_{i=0}^2C^0\otimes
\iota_i(C')\bigr)\oplus\bigl(\mathfrak{tri}(C')\oplus(\oplus_{i=0}^2\iota_i(C'))\bigr)\\[4pt]
&\simeq\bigl(\der C\oplus (C^0\otimes {\mathbb F}^2)\bigr)\oplus\mathfrak{tri}(C')\oplus
\bigl(\oplus_{i=0}^2\iota_i(C\otimes C')\bigr)\\[4pt]
&\simeq\bigl(\mathfrak{tri}(C)\oplus\mathfrak{tri}(C')\bigr)\oplus\bigl(\oplus_{i=0}^2\iota_i(C\otimes
C')\bigr)
\end{align*}
where $\iota_i(C\otimes C')$ indicates a copy of the tensor product $C\otimes C'$, and where we have used that $\der C$ identifies canonically with the subalgebra of those elements in $\mathfrak{tri}(C)$ with equal components, and then $\mathfrak{tri}(C)$ decomposes as the direct sum of this subalgebra and two copies of the module for $\der C$ formed by the subspace of trace zero elements in $C$ (this follows, for instance, from the arguments in \cite[Chapter III.8]{Schafer} in dimension $8$ and \cite[Corollary 3.4]{EldIbero1} in dimension $\leq 4$).
Then (see \cite{BS1}, \cite{BS2}, \cite{LM1}, \cite{LM2} or \cite{EldIbero1}) the Lie bracket in ${\mathcal T}(C,J)$ can be transferred to the vector space
\[
{\mathfrak g}(C,C')=\bigl(\mathfrak{tri}(C)\oplus\mathfrak{tri}(C')\bigr)\oplus\bigl(\oplus_{i=0}^2
\iota_i(C\otimes C')\bigr),
\]
as follows:
\begin{equation}\label{eq:gCC'}
\begin{split}
\bullet&\ \textrm{$\mathfrak{tri}(C)\oplus\mathfrak{tri}(C')$ is a Lie subalgebra of ${\mathfrak g}(C,C')$,}\\
\bullet&\ [(d_0,d_1,d_2),\iota_i(x\otimes
x')]=\iota_i\bigl(d_i(x)\otimes x'\bigr),\\
\bullet&\
[(d_0',d_1',d_2'),\iota_i(x\otimes
x')]=\iota_i\bigl(x\otimes d_i'(x')\bigr),\\
\bullet&\ [\iota_i(x\otimes x'),\iota_{i+1}(y\otimes y')]=
\iota_{i+2}\bigl((\bar x\bar y)\otimes (\bar x' \bar y')\bigr)\ \textrm{(indices modulo $3$),}\\
\bullet&\ [\iota_i(x\otimes x'),\iota_i(y\otimes y')]=
n'(x',y')\theta^i(t_{x,y}) +
n(x,y)\theta'^i(t'_{x',y'}),
\end{split}
\end{equation}
for any $x,y\in C$, $x',y'\in C'$, $(d_0,d_1,d_2)\in \mathfrak{tri}(C)$, and $(d_0',d_1',d_2')\in \mathfrak{tri}(C')$, where
\[
t_{x,y}=\bigl(n(x,.)y-n(y,.)x,\tfrac{1}{2}n(x,y)1-R_{\bar x}R_y,\tfrac{1}{2}n(x,y)1-L_{\bar x}L_y\bigr),
\]
$L$ and $R$ denote, respectively, left and right multiplications, and $\theta$ is the automorphism of $\mathfrak{tri}(C)$ such that $\theta\bigl((d_0,d_1,d_2)\bigr)=(d_2,d_0,d_1)$. Similarly for $t_{x',y'}$ and $\theta'$.
In this way, Tits construction becomes a construction depending symmetrically on two composition algebras.
There is another advantage of this symmetric construction, and it is that the above definition of the Lie bracket in ${\mathfrak g}(C,C')$ makes sense too in characteristic $3$. One thus obtains the Magic Square in characteristic $3$ (Table \ref{ta:MS3}).
\begin{table}[h!]
\begin{center}
\begin{tabular}{rc|ccccc}
\multicolumn{2}{c}{}&\multicolumn{4}{c}{$\dim C'$}&\null\qquad\qquad\null\\[4pt]
&${\mathfrak g}(C,C')$&$1$&$2$&$4$&$8$\\
\cline{2-6}
&\vrule height 14pt width 0ptdepth 2pt$1$&$A_1$&$\tilde A_2$&$C_3$&$F_4$\\
&\vrule height 14pt width 0ptdepth 2pt$2$&$\tilde A_2$&$\tilde A_2\oplus \tilde A_2$&$\tilde A_5$&$\tilde E_6$\\
\smash{\raise 6pt\hbox{$\dim C$}}&\vrule height 14pt width 0ptdepth 2pt$4$&$C_3$&$\tilde A_5$&$D_6$&$E_7$\\
&\vrule height 14pt width 0ptdepth 2pt$8$&$F_4$&$\tilde E_6$&$E_7$&$E_8$
\end{tabular}
\end{center}
\null
\caption{Magic Square in characteristic $3$}\label{ta:MS3}
\end{table}
In Table \ref{ta:MS3}, $\tilde A_2$ (respectively $\tilde A_5$) denotes the projective general Lie algebra ${\mathfrak{pgl}}_3({\mathbb F})$ (respectively ${\mathfrak{pgl}}_6({\mathbb F})$), which is not simple, but contains the codimension one simple ideal ${\mathfrak{psl}}_3({\mathbb F})$ (respectively ${\mathfrak{psl}}_6({\mathbb F})$). In the same vein, $\tilde E_6$ denotes a $78$-dimensional Lie algebra which contains a unique codimension one simple ideal: the simple Lie algebra of type $E_6$ in characteristic $3$ (whose dimension is $77$!).
\section{Composition superalgebras}\label{se:ComSuper}
A quadratic superform on a ${\mathbb Z}_2$-graded vector space
$U=U_{\bar 0}\oplus U_{\bar 1}$ over our field ${\mathbb F}$ is a pair
$q=(q_{\bar 0},{\textup{b}})$ where $q_{\bar 0} :U_{\bar 0}\rightarrow {\mathbb F}$ is a quadratic
form, and
${\textup{b}}:U\times U\rightarrow {\mathbb F}$ is a supersymmetric even bilinear form
such that ${\textup{b}}\vert_{U_{\bar 0}\times U_{\bar 0}}$ is the polar form of $q_{\bar 0}$:
\[
{\textup{b}}(x_{\bar 0},y_{\bar 0})=q_{\bar 0}(x_{\bar 0}+y_{\bar 0})-q_{\bar 0}(x_{\bar 0})-q_{\bar 0}(y_{\bar 0})
\]
for any $x_{\bar 0},y_{\bar 0}\in U_{\bar 0}$.
The quadratic superform $q=(q_{\bar 0},{\textup{b}})$ is said to be
\emph{regular} if the bilinear form ${\textup{b}}$ is
nondegenerate.
Then a unital superalgebra $C=C_{\bar 0}\oplus C_{\bar 1}$, endowed with
a regular quadratic superform $q=(q_{\bar 0},{\textup{b}})$, called the
\emph{norm}, is said to be a \emph{composition superalgebra} (see
\cite{EldOkuCompoSuper}) in case
\begin{subequations}
\begin{align}
&q_{\bar 0}(x_{\bar 0} y_{\bar 0})=q_{\bar 0}(x_{\bar 0})q_{\bar 0}(y_{\bar 0}),\label{eq:qcompo1}\\
&{\textup{b}}(x_{\bar 0} y,x_{\bar 0} z)=q_{\bar 0}(x_{\bar 0}){\textup{b}}(y,z)={\textup{b}}(yx_{\bar 0},zx_{\bar 0}),\label{eq:qcompo2}\\
&{\textup{b}}(xy,zt)+(-1)^{ x y +
x
z + y z }{\textup{b}}(zy,xt)=(-1)^{
y z }{\textup{b}}(x,z){\textup{b}}(y,t),\label{eq:qcompo3}
\end{align}
\end{subequations}
for any $x_{\bar 0},y_{\bar 0}\in C_{\bar 0}$ and homogeneous elements
$x,y,z,t\in C$. (As we are working in characteristic $\ne 2$, it is enough to consider equation \eqref{eq:qcompo3}.)
Nontrivial composition superalgebras appear only over fields of characteristic $3$ (see \cite{EldOkuCompoSuper}):
\begin{itemize}
\item Let $V$ be a $2$-dimensional vector space over ${\mathbb F}$,
endowed with a nonzero alternating bilinear form $\langle .\vert
.\rangle$ (that is, $\langle v\vert v\rangle =0$ for any $v\in V$). Consider the superspace $B(1,2)$ (see \cite{She97}) with
\[
B(1,2)_{\bar 0} ={\mathbb F} 1,\qquad\text{and}\qquad B(1,2)_{\bar 1}= V,
\]
endowed with the supercommutative multiplication given by
\[
1x=x1=x\qquad\text{and}\qquad uv=\langle u\vert v\rangle 1
\]
for any $x\in B(1,2)$ and $u,v\in V$, and with the quadratic
superform $q=(q_{\bar 0},{\textup{b}})$ given by:
\[
q_{\bar 0}(1)=1,\quad {\textup{b}}(u,v)=\langle u\vert v\rangle,
\]
for any $u,v\in V$. If the characteristic of ${\mathbb F}$ is $3$, then
$B(1,2)$ is a composition superalgebra (\cite[Proposition
2.7]{EldOkuCompoSuper}).
\item Moreover, with $V$ as before, let $f\mapsto \bar f$ be the
associated symplectic involution on $\End_{\mathbb F}(V)$ (so $\langle
f(u)\vert v\rangle =\langle u\vert\bar f(v)\rangle$ for any $u,v\in
V$ and $f\in\End_{\mathbb F}(V)$). Consider the superspace $B(4,2)$ (see
\cite{She97}) with
\[
B(4,2)_{\bar 0}=\End_{\mathbb F}(V),\qquad\text{and}\qquad B(4,2)_{\bar 1}=V,
\]
with multiplication given by the usual one (composition of maps) in
$\End_{\mathbb F}(V)$, and by
\[
\begin{split}
&v\cdot f=f(v)=\bar f\cdot v \in V,\\
&u\cdot v=\langle .\vert u\rangle v\in \End_{\mathbb F}(V)
\end{split}
\]
for any $f\in\End_{\mathbb F}(V)$ and $u,v\in V$, where $\langle .\vert u\rangle v$ denotes the endomorphism $w\mapsto \langle w\vert u\rangle v$; and with quadratic superform
$q=(q_{\bar 0},{\textup{b}})$ such that
\[
q_{\bar 0}(f)=\det(f),\qquad{\textup{b}}(u,v)=\langle u\vert v\rangle,
\]
for any $f\in \End_{\mathbb F}(V)$ and $u,v\in V$. If the
characteristic is $3$, $B(4,2)$ is a composition superalgebra
(\cite[Proposition 2.7]{EldOkuCompoSuper}).
\end{itemize}
Given any composition superalgebra $C$ with norm $q=(q_{\bar 0},{\textup{b}})$, its
standard involution is given by
\[
x\mapsto \bar x={\textup{b}}(x,1)1-x,
\]
and its \emph{triality Lie superalgebra} $\mathfrak{tri}(C)=\mathfrak{tri}(C)_{\bar 0}\oplus\mathfrak{tri}(C)_{\bar 1}$ is
defined by:
\begin{multline*}
\mathfrak{tri}(C)_{\bar\imath}=\{ (d_0,d_1,d_2)\in\mathfrak{o}sp(C,b)^3_{\bar\imath}:\\
d_0(x\bullet y)=d_1(x)\bullet y+(-1)^{i x }x\bullet
d_2(y)\ \forall x,y\in C_{\bar 0}\cup C_{\bar 1}\},
\end{multline*}
for $\bar \imath= \bar 0,\bar 1$, where $x\bullet y=\bar x\bar y$ for any $x,y\in C$, and $\mathfrak{o}sp(S,b)$ denotes the
associated orthosymplectic Lie superalgebra. The bracket in
$\mathfrak{tri}(C)$ is given componentwise.
Now the construction given in Section \ref{se:SymCons} of the Lie algebra ${\mathfrak g}(C,C')$ is valid in this setting. Therefore, given two composition superalgebras $C$ and $C'$, consider the Lie superalgebra:
\[
{\mathfrak g}={\mathfrak g}(C,C')=\bigl(\mathfrak{tri}(C)\oplus\mathfrak{tri}(C')\bigr)\oplus\bigl(\oplus_{i=0}^2
\iota_i(C\otimes C')\bigr),
\]
where $\iota_i(C\otimes C')$ is just a copy of $C\otimes C'$
($i=0,1,2$), with bracket given by the superversion of \eqref{eq:gCC'}:
\begin{itemize}
\item the Lie bracket in $\mathfrak{tri}(C)\oplus\mathfrak{tri}(C')$, which thus becomes a Lie subsuperalgebra of ${\mathfrak g}$,
\item $[(d_0,d_1,d_2),\iota_i(x\otimes
x')]=\iota_i\bigl(d_i(x)\otimes x'\bigr)$,
\item
$[(d_0',d_1',d_2'),\iota_i(x\otimes
x')]=(-1)^{ d_i' x }\iota_i\bigl(x\otimes d_i'(x')\bigr)$,
\item $[\iota_i(x\otimes x'),\iota_{i+1}(y\otimes y')]=(-1)^{
x' y }
\iota_{i+2}\bigl((x\bullet y)\otimes (x'\bullet y')\bigr)$ (indices modulo
$3$),
\item $[\iota_i(x\otimes x'),\iota_i(y\otimes y')]=
(-1)^{ x x' + x y' +
y y' }
{\textup{b}}'(x',y')\theta^i(t_{x,y})$ \newline \null\hspace{2.5 in} $+
(-1)^{ y x' }
{\textup{b}}(x,y)\theta'^i(t'_{x',y'})$,
\end{itemize}
\noindent
for any $i=0,1,2$ and homogeneous $x,y\in C$, $x',y'\in C'$,
$(d_0,d_1,d_2)\in\mathfrak{tri}(C)$, and $(d_0',d_1',d_2')\in\mathfrak{tri}(C')$. Here
$\theta$ denotes the natural automorphism
$\theta:(d_0,d_1,d_2)\mapsto (d_2,d_0,d_1)$ in $\mathfrak{tri}(C)$, while $t_{x,y}$ is defined now by
\[
t_{x,y}=\bigl(\sigma_{x,y},\tfrac{1}{2}{\textup{b}}(x,y)1-r_xl_y,\tfrac{1}{2}{\textup{b}}(x,y)1-l_xr_y\bigr)
\]
with $l_x(y)=x\bullet y$, $r_x(y)=(-1)^{xy}y\bullet x$, and
\[
\sigma_{x,y}(z)=(-1)^{yz}{\textup{b}}(x,z)y-(-1)^{x(y+z)}{\textup{b}}(y,z)x
\]
for homogeneous $x,y,z\in S$. Also $\theta'$
and $t'_{x',y'}$ denote the analogous elements for $\mathfrak{tri}(C')$.
Assuming the characteristic of ${\mathbb F}$ is $3$, the Lie superalgebras ${\mathfrak g}(C,C')$, where $C,C'$ run over $\{{\mathbb F},{\mathbb F}\times{\mathbb F},\Mat_2({\mathbb F}),C({\mathbb F}), B(1,2),B(4,2)\}$, appear in Table \ref{ta:supermagicsquare}, which has been obtained in \cite{CunEld1}, and which extends Table \ref{ta:MS3} with the addition of two rows and columns filled with Lie superalgebras.
{\small
\begin{table}[h!]
\begin{center}
\begin{tabular}{c|cccc|cc}
${\mathfrak g}(C,C')$&${\mathbb F}$&${\mathbb F}\times {\mathbb F}$&$\Mat_2({\mathbb F})$&$C({\mathbb F})$&$B(1,2)$&$B(4,2)$\\
\hline
\vrule height 14pt width 0ptdepth 2pt${\mathbb F}$&$A_1$&$\tilde A_2$&$C_3$&$F_4$&${\mathfrak{psl}}_{2,2}$&${\mathfrak s}p_6\oplus (14)$\\
\vrule height 14pt width 0ptdepth 2pt${\mathbb F}\times {\mathbb F}$&&$\tilde A_2\oplus \tilde A_2$&$\tilde A_5$&$\tilde E_6$&$\bigl({\mathfrak{pgl}}_3\oplus{\mathfrak s}l_2\bigr)\oplus\bigl({\mathfrak{psl}}_3\otimes (2)\bigr)$&
${\mathfrak{pgl}}_6\oplus (20)$\\
\vrule height 14pt width 0ptdepth 2pt$\Mat_2({\mathbb F})$&&&$D_6$&$E_7$&
$\bigl({\mathfrak s}p_6\oplus{\mathfrak s}l_2\bigr)\oplus\bigl((13)\otimes (2)\bigr)$
&${\mathfrak s}o_{12}\oplus spin_{12}$\\
\vrule height 14pt width 0ptdepth 2pt$C({\mathbb F})$&&&&$E_8$&
$\bigl({\mathfrak f}_4\oplus{\mathfrak s}l_2\bigr)\oplus\bigl((25)\otimes (2)\bigr)$&
${\mathfrak e}_7\oplus (56)$\\
\hline
\vrule height 14pt width 0ptdepth 2pt$B(1,2)$&&&&&
${\mathfrak s}o_7\oplus 2spin_7$ &${\mathfrak s}p_8\oplus(40)$\\
\vrule height 14pt width 0ptdepth 2pt$B(4,2)$&&&&&&${\mathfrak s}o_{13}\oplus spin_{13}$\\
\end{tabular}
\null
\end{center}
\caption{A Supermagic Square in characteristic
$3$}\label{ta:supermagicsquare}
\end{table}
}
Since the construction of ${\mathfrak g}(C,C')$ is symmetric, only the
entries above the diagonal are needed. In Table
\ref{ta:supermagicsquare}, the even and odd parts of the nontrivial superalgebras in the
table which have no counterpart in the classification in
characteristic $0$ (\cite{Kac77}) are displayed, $spin$ denotes the
spin module for the corresponding orthogonal Lie algebra, while
$(n)$ denotes a module of dimension $n$, whose precise description is given in \cite{CunEld1}. Thus, for example,
${\mathfrak g}(\Mat_2({\mathbb F})),B(1,2))$ is a Lie superalgebra whose even part is
(isomorphic to) the direct sum of the symplectic Lie algebra
${\mathfrak s}p_6$ and of ${\mathfrak s}l_2$, while its odd part is the tensor
product of a $13$-dimensional module for ${\mathfrak s}p_6$ and the
natural $2$-dimensional module for ${\mathfrak s}l_2$.
A precise description of these modules and of the Lie superalgebras
as Lie superalgebras with a Cartan matrix is given in \cite{CunEld1}. All the
inequivalent Cartan matrices for these Lie superalgebras are listed in \cite{BGL}.
Denote by ${\mathfrak g}(n,m)$ the Lie superalgebra in Table \ref{ta:supermagicsquare} with $\dim C=n$ and $\dim C'=m$. Then all the Lie superalgebras ${\mathfrak g}(n,m)$ are simple unless either $n$ or $m$ is equal to $2$. The Lie superalgebra ${\mathfrak g}(2,3)$ has a unique simple ideal of codimension $1$ whose even part is ${\mathfrak{psl}}_3\oplus{\mathfrak s}l_2$, while ${\mathfrak g}(2,6)$ has a unique simple ideal of codimension one whose even part is ${\mathfrak{psl}}_6$.
In this way, a family of $10$ new simple Lie superalgebras over algebraically closed fields of characteristic $3$ appear in Table \ref{ta:supermagicsquare}.
Actually, with the exception of ${\mathfrak g}(3,6)$, all these superalgebras had appeared for the first time in \cite{EldNew3}, where the ${\mathfrak g}(n,3)$ and ${\mathfrak g}(n,6)$ for $n=1,2,4$, or $8$ were described in terms of some instances of the so called symplectic and orthogonal triple systems, and in \cite{EldModular}, where ${\mathfrak g}(3,3)$ and ${\mathfrak g}(6,6)$ appeared in a different way.
\section{Some further results and remarks}
In this section we mention several recent results related with the Tits construction and its extensions.
\subsection{Bouarroudj, Grozman and Leites classification}
The finite dimensional modular Lie superalgebras over algebraically closed fields with indecomposable symmetrizable Cartan matrices (or contragredient Lie superalgebras) have been classified in \cite{BGLgordo} under some technical hypotheses, even in characteristic $2$, where a suitable definition of Lie superalgebra is given first.
In characteristic $p\geq 3$, apart from the Lie superalgebras obtained as the analogues of the Lie superalgebras in Kac's classification in characteristic $0$ by reducing the Cartan matrices modulo $p$, there are only the following exceptions:
\begin{enumerate}
\item Two exceptions in characteristic $5$: ${\mathfrak b}r(2;5)$ of dimension $10\vert 12$ (that is the even part has dimension $10$ and the odd part dimension $12$), and ${\mathfrak e}l(5;5)$ of dimension $55\vert 32$.
\item The family of exceptions given by the Lie superalgebras in the Supermagic Square in characteristic $3$ (Table \ref{ta:supermagicsquare}).
\item Another two exceptions in characteristic $3$, similar to the ones in characteristic $5$: ${\mathfrak b}r(2;3)$ of dimension $10\vert 8$, and ${\mathfrak e}l(5;3)$ of dimension $39\vert 32$.
\end{enumerate}
Besides the superalgebras in the Supermagic Square in characteristic $3$ (Table \ref{ta:supermagicsquare}), it turns out that the superalgebra ${\mathfrak e}l(5;5)$ is the Lie superalgebra ${\mathcal T}(C({\mathbb F}),K_{10})$ in the Supermagic Rectangle (Table \ref{ta:SuperRectangle}), while the superalgebra ${\mathfrak e}l(5;3)$ lives (as a natural maximal subalgebra) in the Lie superalgebra ${\mathfrak g}(3,8)$ of the Supermagic Square in characteristic $3$ (see \cite{EldModels}).
Therefore only two of the exceptions: ${\mathfrak b}r(2;3)$ and ${\mathfrak b}r(2;5)$ do not seem to be connected to an extension of the Tits construction.
The simple Lie superalgebra ${\mathfrak b}r(2;3)$ appeared for the first time in \cite{EldNew3}, related to an eight dimensional \emph{symplectic triple system}. Finally, a simple model of the Lie superalgebra ${\mathfrak b}r(2;5)$ appears in \cite{EldModels}.
\subsection{Jordan algebras and superalgebras}
The Lie superalgebras that appear in the Supermagic Square in characteristic $3$ in Table \ref{ta:supermagicsquare} are strongly related to some Jordan algebras and superalgebras.
Thus the even part of the Lie superalgebras ${\mathfrak g}\bigl(B(1,2),C\bigr)$, for a composition algebra $C$, is the direct sum of the three-dimensional simple Lie algebra ${\mathfrak s}l_2$ and the Lie algebra of derivations of the simple Jordan algebra $J=H_3(C,*)$ of $3\times 3$ hermitian matrices over $C$, while its odd part is the tensor product of the two-dimensional natural irreducible module for ${\mathfrak s}l_2$ with the module for $\der J$ consisting of the quotient of the subspace $J^0$ of trace zero elements in $J$ modulo the subspace spanned by the identity matrix. (Since the characteristic is $3$, the trace of the identity matrix is $0$.)
On the other hand, the Lie superalgebras ${\mathfrak g}(\bigl(B(4,2),C\bigr)$, with $C$ as above, are related to the simple Freudenthal triple system defined on the subspace of $2\times 2$ matrices with diagonal entries in ${\mathbb F}$ and off-diagonal matrices in $J=H_3(C,*)$. (See \cite{CunEld2} for the details.)
From a different perspective (see \cite{CunEldIvan}), the Lie superalgebras ${\mathfrak g}\bigl(B(1,2),C\bigr)$ (respectively ${\mathfrak g}\bigl(B(4,2),C\bigr)$), for an associative composition algebra $C$, are related to the simple Jordan superalgebra of the hermitian $3\times 3$ matrices over the composition superalgebra $B(1,2)$ (respectively $B(4,2)$). In particular, if $C=\Mat_2({\mathbb F})$ then we get the Tits-Kantor-Koecher Lie superalgebras of these simple Jordan superalgebras.
Moreover, the simple Lie superalgebra ${\mathfrak g}\bigl(B(1,2),B(1,2)\bigr)$ is the Tits-Kantor-Koecher Lie superalgebra of the $9$-dimensional simple Kac Jordan superalgebra $K_9$ (recall that the $10$-dimensional Kac superalgebra in \eqref{eq:K10} is no longer simple in characteristic $3$, but contains a $9$-dimensional simple ideal, which is the tensor product (in the graded sense) of two copies of the tiny Kaplansky superalgebra $K_3$.)
\subsection{The fourth row of Tits construction in characteristic $3$}
If the characteristic is $3$, the Lie algebra of derivations of the Cayley algebra $C({\mathbb F})$ is no longer simple, but contains the simple ideal consisting of its inner derivations, which consists just of the adjoint maps $\ad_x:y\mapsto [x,y]$. This ideal is isomorphic to the projective special linear algebra ${\mathfrak{psl}}_3({\mathbb F})$ (see \cite{AMEN}). It makes sense then to consider a modified Tits construction:
\[
\begin{split}
\tilde{\mathcal T}\bigl(C({\mathbb F}),J\bigr)&=\ad_{C^0}\oplus (C^0\otimes J^0)\oplus\inder J\\
&\simeq (C^0\otimes J)\oplus \inder J,
\end{split}
\]
with a bracket like the one in \eqref{eq:TCJproduct}. Then (see \cite{EldTits3}) $\tilde{\mathcal T}(C,J)$ becomes a Lie algebra if and only if $J$ is a commutative alternative algebra (these two conditions imply that $J$ is a Jordan algebra).
The simple commutative alternative algebras are just the fields, so nothing interesting appears here.
But there are two types of simple commutative alternative superalgebras
\cite{She97} in characteristic $3$, apart from the fields. The easiest simple commutative alternative superalgebra is the Jordan superalgebra $J^{0\vert 2}$ which has already appeared in the construction of the Supermagic Rectangle (Table \ref{ta:SuperRectangle}) in Section \ref{se:JordanSuperalgebras}.
It follows that the simple Lie superalgebra $\tilde{\mathcal T}(C({\mathbb F}),J^{0\vert 2})$ coincides with the unique simple ideal of codimension one in the Lie superalgebra ${\mathfrak g}(2,3)={\mathfrak g}({\mathbb F}\times{\mathbb F},B(1,2))$ in the Supermagic Square in characteristic $3$ (Table \ref{ta:supermagicsquare}), so nothing new appears here.
However, the other family of simple commutative alternative superalgebras in characteristic $3$ consists of the superalgebras $B=B(\Gamma,d)$, where:
\begin{itemize}
\item $\Gamma$ is a commutative associative algebra,
\item $d\in\der\Gamma$ is a derivation such that there is no proper ideal of $\Gamma$ invariant under the action of $d$ (that is, $\Gamma$ is $d$-simple),
\item $a(bu)=(ab)u=(au)b$,\quad $(au)(bu)=ad(b)-d(a)b$, for any
$a,b\in \Gamma$.
\end{itemize}
The most natural example of this situation is obtained if $\Gamma$ is taken to be the algebra of divided powers
\[
{\mathcal O}(1;n)=\espan{t^{(r)}: 0\leq r\leq 3^n-1},
\]
where $t^{(r)}t^{(s)}=\binom{r+s}{r}t^{(r+s)}$, and with $d$ the derivation given by $d(t^{(r)})=t^{(r-1)}$.
The Lie superalgebras $\tilde{\mathcal T}\bigl(C({\mathbb F}),B({\mathcal O}(1;n),d)\bigr)$ ($n\geq 1$) are then simple Lie superalgebras with no counterparts in Kac's classification. These simple Lie superalgebras have appeared first in \cite{BGL} in a completely different way, and were denoted by $\textrm{Bj}(1;n\vert 7)$. (See \cite{EldTits3} for the details.)
\subsection{Symmetric composition (super)algebras}
In Section \ref{se:SymCons} a symmetric construction of the Magic Square have been reviewed. It depends on two composition algebras, and uses their triality Lie algebras. However, nicer formulas for triality are obtained in one takes the so called \emph{symmetric composition algebras} instead of the traditional unital composition algebras (see \cite[Chapter VIII]{KMRT}).
An algebra $S$, with product denoted by $*$, endowed with a regular quadratic form $n$ permitting composition: $n(x*y)=n(x)n(y)$, and satisfying that $n(x*y,z)=n(x,y*z)$ for any $x,y,z\in S$, is called a \emph{symmetric composition algebra}.
The easiest way to obtain a symmetric composition algebra is to start with a classical composition algebra $C$ and consider the new multiplication defined by $x\bullet y=\bar x\bar y$. Then $C$, with its norm but with this new product, is a symmetric composition algebra, called a \emph{para-Hurwitz algebra}.
Actually, if one looks at the definition of the triality Lie algebra $\mathfrak{tri}(C)$, one may notice that, in terms of this new multiplication, the definition in \eqref{eq:triC} becomes:
\[
\mathfrak{tri}(C)=\{(d_0,d_1,d_2)\in{\mathfrak s}o(C,n): d_0(x\bullet y)=d_1(x)\bullet y+x\bullet d_2(y)\ \forall x,y\in C\}.
\]
(This has already been used in the definition of the triality Lie superalgebra in the previous section.) Also, the Lie bracket in ${\mathfrak g}(C,C')$ is better expressed in terms of the para-Hurwitz product.
The interesting fact about symmetric composition algebras is that, apart from the para-Hurwitz algebras, there appear the so called \emph{Okubo algebras}, which were introduced, under a different name, in \cite{Oku78}.
Over our algebraically closed ground field ${\mathbb F}$ there is a unique Okubo algebra. If the characteristic of ${\mathbb F}$ is $\ne 2,3$, this Okubo algebra is defined as the subspace ${\mathfrak s}l_3({\mathbb F})$ of trace zero $3\times 3$ matrices, with the multiplication given by:
\[
x*y=\mu xy+(1-\mu)yx-{\mathfrak a}c{1}{3}\tr(xy)1,
\]
where $\mu$ is a solution of the equation $3X(1-X)=1$, and the norm is given by $n(x)={\mathfrak a}c{1}{6}\tr(x^2)$. In characteristic $3$ a different definition is required (see \cite{EP96} and the references there in).
Our definition of the Lie algebra ${\mathfrak g}(C,C')$ is valid for $C$ and $C'$ being symmetric composition algebras. For para-Hurwitz algebras this is the same construction, but Okubo algebras introduce some extra features. Actually, over algebraically closed fields, no new simple Lie algebras appear, but some new properties of the simple exceptional Lie algebras can be explained in these new terms. For instance, some interesting gradings on these algebras appear naturally induced from natural gradings on Okubo algebras (see \cite{EldGradSym}).
In conclusion, I hope to have convinced the reader that the beautiful construction given by Tits \cite{Tits66} of the exceptional simple Lie algebras, has continued to be, more than forty years after its publication, a source of inspiration for further work.
\def$'${$'$}
\providecommand{\bysame}{\leavevmode\hbox
to3em{\hrulefill}\thinspace}
\end{document}
|
\begin{document}
\title{Critical branching processes evolving in an unfavorable random
environment\thanks{
This work was supported by the Russian Science Foundation under grant
no.19-11-00111 https://rscf.ru/en/project/19-11-00111/ }}
\author{V.A.Vatutin\thanks{
Steklov Mathematical Institute Gubkin street 8 119991 Moscow Russia Email:
[email protected]}, E.E.Dyakonova\thanks{
Steklov Mathematical Institute Gubkin street 8 119991 Moscow Russia Email:
[email protected]}}
\maketitle
\begin{abstract}
Let $\left\{ Z_{n},n=0,1,2,...\right\} $ be a critical branching process in
random environment and let $\left\{ S_{n},n=0,1,2,...\right\} $ be its
associated random walk. It is known that if the increments of this random
walk belong (without centering) to the domain of attraction of a stable law,
then there exists a sequence $a_{1},a_{2},...,$ slowly varying at infinity
such that the conditional distributions
\begin{equation*}
\mathbf{P}\left( \frac{S_{n}}{a_{n}}\leq x\Big|Z_{n}>0\right) ,\quad x\in
(-\infty ,+\infty ),
\end{equation*}
weakly converges, as $n\rightarrow \infty $ to the distribution of a
strictly positive and proper random variable. In this paper we supplement
this result with a description of the asymptotic behavior of the probability
\begin{equation*}
\mathbf{P}\left( S_{n}\leq \varphi (n);Z_{n}>0\right) ,
\end{equation*}
if $\varphi (n)\rightarrow \infty $ \ as $n\rightarrow \infty $ in such a
way that $\varphi (n)=o(a_{n})$.
\textbf{Key words}: branching process, unfavorable random environment,
survival probability
\end{abstract}
\section{Introduction and main result}
We consider critical branching processes evolving in an unfavorable random
environment. To formulate the problem under consideration and to give a
detailed description of our main result we start by recalling some basic
properties of such processes.
Let $\mathcal{M}$ be the space of all probability measures on $\mathbf{N}
_{0}:=\{0,1,2,\ldots \}.$ Equipped with a metric, $\mathcal{M}$ becomes a
Polish space. Let $F$ be a random variable taking values in $\mathcal{M}$,
and let $F_{n},n\in \mathbf{N}:=\mathbf{N}_{0}\backslash \left\{ 0\right\} $
be a sequence of independent copies of $F$. The infinite sequence $\mathcal{E
}=\left\{ F_{n},n\in \mathbf{N}\right\} $ is called a random environment.
Given the environment $\mathcal{E}$, we may construct the i.i.d. sequence of
generating functions
\begin{equation*}
F_{n}(s):=\sum_{j=0}^{\infty }F_{n}\left( \left\{ j\right\} \right)
s^{j},\quad s\in \lbrack 0,1].
\end{equation*}
In the sequel we make no difference between an element $F_{n}\in \mathcal{M}$
and the respective generating function $F_{n}(s)$ and use for a random
variable $F,$ taking values in $\mathcal{M}$, the representation
\begin{equation*}
F(s):=\sum_{j=0}^{\infty }F\left( \left\{ j\right\} \right) s^{j},\quad s\in
\lbrack 0,1].
\end{equation*}
A sequence of nonnegative random variables $\mathcal{Z}=\left\{ Z_{n},\ n\in
\mathbf{N}_{0}\right\} $ specified on\ a probability space $(\Omega ,
\mathcal{F},\mathbf{P})$ is called a branching process in random environment
(BPRE), if $Z_{0}$ is independent of $\mathcal{E}$ and, given $\mathcal{E}$
the process $\mathbf{Z}$ is a Markov chain with
\begin{equation*}
\mathcal{L}\left( Z_{n}|Z_{n-1}=z_{n-1},\mathcal{E}=(f_{1},f_{2},...)\right)
=\mathcal{L}(\xi _{n1}+\ldots +\xi _{ny_{n-1}})
\end{equation*}
for all $n\in \mathbf{N}$, $z_{n-1}\in \mathbf{N}_{0}$ and $
f_{1},f_{2},...\in \mathcal{M}$, where $\xi _{n1},\xi _{n2},\ldots $ is a
sequence of i.i.d. random variables with distribution $f_{n}.$ Thus, $
Z_{n-1} $ is the $(n-1)$th generation size of the population of the
branching process and $f_{n}$ is the offspring distribution of an individual
at generation $n-1$.
We denote $X_{i}=\log F_{i}^{\prime }(1),i=1,2,...$ and introduce the
sequence
\begin{equation*}
S_{0}=0,\quad S_{n}=X_{1}+...+X_{n},\ n\geq 1,
\end{equation*}
is called the associated random walk for the process $\mathcal{Z}$.
We need the subset
\begin{equation*}
\mathcal{A}=\{0<\alpha <1;\,|\beta |<1\}\cup \{1<\alpha <2;|\beta |\leq
1\}\cup \{\alpha =1,\beta =0\}\cup \{\alpha =2,\beta =0\}
\end{equation*}
of the set $\mathbb{R}^{2}.$ For a pair $(\alpha ,\beta )\in \mathcal{A}$
and a random variable $X$ we write $X\in \mathcal{D}\left( \alpha ,\beta
\right) $ if the distribution of $X$ belongs (without centering) to the
domain of attraction of a stable law with density $g_{\alpha .\beta
}(x),x\in (-\infty ,+\infty )$ and the characteristic function
\begin{equation*}
G_{\alpha ,\beta }\mathbb{(}w\mathbb{)}=\int_{-\infty }^{+\infty
}e^{iwx}g_{\alpha .\beta }(x)\,dx=\exp \left\{ -c|w|^{\,\alpha }\left(
1-i\beta \frac{w}{|w|}\tan \frac{\pi \alpha }{2}\right) \right\} ,\ c>0,
\end{equation*}
This implies, in particular, that there is an increasing sequence of
positive numbers
\begin{equation}
a_{n}\ =\ n^{1/\alpha }\ell (n), \label{defA}
\end{equation}
where $\ell (1),\ell (2),\ldots $ is a slowly varying sequence, such that,
as $n\rightarrow \infty $
\begin{equation*}
\mathcal{L}\left\{ \frac{S_{nt}}{a_{n}},t\geq 0\right\} \overset{D}{
\rightarrow }\mathcal{L}\left\{ Y_{t},t\geq 0\right\} ,
\end{equation*}
where the symbol \ \ $\overset{D}{\rightarrow }$ stands for the convergence
in distribution in the space $D[0,+\infty )$ with Skorokhod topology and the
process $\mathcal{Y}=\left\{ Y_{t},t\geq 0\right\} $ is strictly stable and
has marginal distributions specified by the characteristic functions \ \ \ \
\ \ \ \ \ \ \
\begin{equation*}
\mathbf{E}e^{iwY_{t}}=G_{\alpha ,\beta }\mathbb{(}wt^{1/\alpha }\mathbb{)}
,t\geq 0.
\end{equation*}
Observe that if $X_{n}\overset{d}{=}X\in \mathcal{D}\left( \alpha ,\beta
\right) $ for all $n\in \mathbf{N}$ then (see,$\ $\cite{Zol57} or \cite[p.
380]{BGT87}) the limit
\begin{equation*}
\lim_{n\rightarrow \infty }\mathbf{P}\left( S_{n}>0\right) =\rho =\mathbf{P}
\left( Y_{1}>0\right)
\end{equation*}
exists, where
\begin{equation*}
\displaystyle\rho =\frac{1}{2}+\frac{1}{\pi \alpha }\arctan \left( \beta
\tan \frac{\pi \alpha }{2}\right) .
\end{equation*}
We now formulate our first restriction on the properties of the BPRE.
\paragraph{Condition B1.}
\emph{The random variables }$X_{n}=\log F_{n}^{\prime }(1),n\in \mathbf{N}$
\emph{\ are independent copies of a random variable }$X\in \mathcal{D}\left(
\alpha ,\beta \right) $\emph{,} $\left\vert \beta \right\vert <1,$ \emph{
whose distribution is absolutely continuous. }
Our second assumption on the environment concerns the standardized truncated
second moment of the generating function $F$:
\begin{equation*}
\zeta (b)=\frac{\sum_{k=b}^{\infty }k^{2}F\left( \left\{ k\right\} \right) }{
\left( \sum_{i=b}^{\infty }iF\left( \left\{ i\right\} \right) \right) ^{2}}.
\end{equation*}
\paragraph{Condition B2.}
\emph{There exist $\varepsilon >0$ and $b\in $}$\mathbf{N}$ \emph{such that}
\begin{equation*}
\mathbf{E}[(\log ^{+}\zeta (b))^{\alpha +\varepsilon }]\ <\ \infty \ ,
\end{equation*}
\emph{where }$\log ^{+}x=\log (\max (x,1))$\emph{.}
It is known (see \cite[Theorem 1.1 and Corollary 1.2]{agkv}) that if
Conditions B1, B2 are valid then there exist a number $\theta \in (0,\infty
) $ and a sequence $l(1),l(2)...$ slowly varying at infinity such that, as $
n\rightarrow \infty $
\begin{equation*}
\mathbf{P}\left( Z_{n}>0\right) \sim \theta \mathbf{P}\left( \min \left(
S_{1},...,S_{n}\right) \geq 0\right) \sim \theta n^{-(1-\rho )}l(n).
\end{equation*}
Besides (see \cite[Theorem 1.5]{agkv}), for any $t\in \left[ 0,1\right] $
and any $x\geq 0$
\begin{equation*}
\lim_{n\rightarrow \infty }\mathbf{P}\left( \frac{S_{nt}}{a_{n}}\leq
x|Z_{n}>0\right) =\mathbf{P}\left( Y^{+}(t)\leq x\right) ,
\end{equation*}
where $\mathcal{Y}^{+}=\left\{ Y^{+}(t),0\leq t\leq 1\right\} $ denotes the
meander of the strictly stable process $\mathcal{Y}$.
Thus, the associated random walk that provides survival of the population to
a distant moment $n$ \ grows like $a_{n}$ times a random positive multiplier.
Our aim is to investigate the behavior of the probability of the event $
\left\{ Z_{n}>0\right\} $ in an unfavorable environment, namely, when $0\leq
S_{n}=o(a_{n})$ as $n\rightarrow \infty $.
To formulate the desired statement we denote
\begin{equation*}
M_{n}=\max \left( S_{1},...,S_{n}\right) ,\quad L_{n}=\min \left(
S_{1},...,S_{n}\right) ,
\end{equation*}
and introduce right-continuous renewal functions
\begin{align*}
U(x)& =I\left\{ x\geq 0\right\} +\sum_{n=1}^{\infty }\mathbf{P}\left(
S_{n}\geq -x,M_{n}<0\right) ,\ x\in \mathbb{R}, \\
V(x)\ & =\ I\left\{ x<0\right\} +\sum_{k=1}^{\infty }\mathbf{P}\left(
S_{n}<-x,L_{n}\geq 0\right) ,\ x\in \mathbb{R}.
\end{align*}
It is well-known that $U(x)=O(x)$ and $V(-x)=O(x)$ as $x\rightarrow \infty $.
\textbf{Remark 1.} Observe that if Condition B1 is valid then
\begin{equation*}
V(x)\ =\tilde{V}(x)\ =\ I\left\{ x<0\right\} +\sum_{k=1}^{\infty }\mathbf{P}
\left( S_{n}<-x,L_{n}>0\right) ,\ x\in \mathbb{R}.
\end{equation*}
We use this fact many times below referring to the results of \cite{VW09}.
We now formulate the main result of the note.
\begin{theorem}
\label{T_smallDevi}Let Conditions B1, B2 be valid. If a function $\varphi
(n)\rightarrow \infty $ as $n\rightarrow \infty $ and $\varphi (n)=o(a_{n})$
then
\begin{equation*}
\mathbf{P}\left( Z_{n}>0;S_{n}\leq \varphi (n)\right) \sim \Theta \frac{
g_{\alpha ,\beta }(0)}{na_{n}}\int_{0}^{\varphi (n)}V(-z)dz
\end{equation*}
as $n\rightarrow \infty $, where $\Theta $ is a positive constant specified
in formula (\ref{Def_Theta}) below.
\end{theorem}
Theorem \ref{T_smallDevi} compliments Theorem 1.1 in \cite{agkv} where the
asymptotic behavior of the survival probability $\mathbf{P}\left(
Z_{n}>0\right) $ was investigated as $n\rightarrow \infty $.
In the sequel we denote by $C,C_{1},C_{2},...,$ some positive constants that
do not necessarily coincide in different formulas.
\section{Auxiliary results}
Proving Theorem \ref{T_smallDevi} we will use random walks that start from
any point $x\in \mathbb{R}$. In such cases we write the respective
probabilities as $\mathbf{P}_{x}\left( \cdot \right) $. We also write $
\mathbf{P}$ instead of $\mathbf{P}_{0}$.
We now formulate a number of statements that show importance of the
functions $U$ and $V$.
\begin{lemma}
\label{L_estimV} If Condition B1 is valid than for any $\lambda \in
(0,\infty )$ there exists a constant $C(\lambda )$ such that
\begin{equation*}
\int_{0}^{\lambda \varphi (n)}V(-z)dz\leq C(\lambda )\int_{0}^{\varphi
(n)}V(-z)dz
\end{equation*}
for all $n\geq 1$.
\end{lemma}
\textbf{Proof}. If $X\in \mathcal{D}(\alpha ,\beta ),$ then (compare with
Lemma 13 in \cite{VW09})
\begin{equation*}
V(-x)=x^{\alpha \rho }l_{2}(x)
\end{equation*}
as $x\rightarrow \infty $, where $l_{2}(x)$ is a slowly varying function.
Therefore, the function
\begin{equation*}
\int_{0}^{x}V(-z)dz
\end{equation*}
is regularly varying of the index $\alpha \rho +1$ as $x\rightarrow \infty $
(see \cite[Ch. VIII, Sec. 9, Theorem 1]{Fel2008}). Now the statement of the
lemma follows from the properties of regularly varying \ functions with a
positive index.
The lemma is proved.
Let
\begin{equation}
b_{n}=\frac{1}{a_{n}n}=\frac{1}{\ n^{1/\alpha +1}\ell (n)}. \label{Def_b}
\end{equation}
\begin{lemma}
\label{L_double} If the distribution of the random \ variable $X$ satisfies
Condition B1 then there is a constant $C>0$ such that for all $n\geq 1$ and
for all $x,y\geq 0$
\begin{equation}
\mathbf{P}_{x}\left( 0\leq S_{n}<y,L_{n}\geq 0\right) \ \leq \
C\,b_{n}\,U(x)\int_{0}^{y}V(-z)dz\ , \label{Rough1}
\end{equation}
and for all $x,y\leq 0$
\begin{equation}
\mathbf{P}_{x}\left( y\leq S_{n}<0,M_{n}<0\right) \ \leq \
C\,b_{n}\,V(x)\int_{y}^{0}U(-z)\ dz. \label{Rough2}
\end{equation}
\end{lemma}
\textbf{Proof}. According to Proposition 2.3 in \cite{ABGV2011} there is a
constant $C>0$ such that for all $n\geq 1$ and for all $x,z\geq 0$
\begin{equation*}
\mathbf{P}_{x}\left( z-1\leq S_{n}<z,L_{n}\geq 0\right) \ \leq \
C\,b_{n}\,U(x)V(-z)\ ,
\end{equation*}
and for all $x,z\leq 0$
\begin{equation*}
\mathbf{P}_{x}\left( z\leq S_{n}<z+1,M_{n}<0\right) \ \leq \
C\,b_{n}\,V(x)U(-z)\ .
\end{equation*}
Integration with respect to $z$ of the first inequality over the interval $
[0,y),y\geq 0,$ and the second inequality over the interval $(y.0],y<0$
gives the desired statement.
The lemma is proved.
The next theorem is a restatement of Theorem 4 in \cite{VW09} in our
notation and refines (\ref{Rough1}) for $x=0$.
\begin{theorem}
\label{T_VatWach} If the distribution of the random \ variable $X$ satisfies
Condition B1. Then for any $\Delta >0$
\begin{equation*}
\mathbf{P}\left( S_{n}\in \lbrack y,y+\Delta \right) ,L_{n}\geq 0)\sim
g_{\alpha ,\beta }(0)b_{n}\int_{y}^{y+\Delta }V(-w)dw
\end{equation*}
uniformly in \thinspace $y\in (0,\delta _{n}a_{n}]$ where $\delta
_{n}\rightarrow 0$ as $n\rightarrow \infty $.
\end{theorem}
Integration over $y\in (0,x]$ the relation shown in Theorem \ref{T_VatWach}
leads to the following important conclusion.
\begin{corollary}
\label{C_IntegVW} Under the conditions of Theorem \ref{T_VatWach}
\begin{equation*}
\mathbf{P(}S_{n}\leq x,L_{n}\geq 0)\sim g_{\alpha ,\beta
}(0)b_{n}\int_{0}^{x}V(-w)dw
\end{equation*}
uniformly in \thinspace $x\in (0,\delta _{n}a_{n}],$ where $\delta
_{n}\rightarrow 0$ as $n\rightarrow \infty $.
\end{corollary}
We need some refinements of Theorem \ref{T_VatWach} established by Doney.
For $x\geq 0$ and $y\geq 0$ we write $\ $\ for brevity $x_{n}$ for $x/a_{n}$
and $y_{n}$ for $y/a_{n}$.
\begin{lemma}
\label{DonLocal} (\cite[Proposition 18]{Don12}) If the distribution of the
random \ variable $X$ satisfies Condition B1, then for any $\Delta >0$
\begin{equation*}
\mathbf{P}_{x}\left( S_{n}\in \lbrack y,y+\Delta ),L_{n}\geq 0\right) \sim
g_{\alpha ,\beta }(0)b_{n}U(x)\int_{y}^{y+\Delta }V(-w)dw
\end{equation*}
uniformly with respect to $x,y\geq 0$ such that $\max
(x_{n},y_{n})\rightarrow 0$ as $n\rightarrow \infty .$
\end{lemma}
In view of the identities
\begin{equation*}
\begin{array}{rl}
\mathbf{E}[U(x+X);X+x\geq 0]\ =\ U(x)\ , & x\geq 0\ , \\
\mathbf{E}[V(x+X);X+x<0]\ =\ V(x)\ , & x\leq 0\ ,
\end{array}
\end{equation*}
which hold for any oscillating random walk (see \cite[Ch. 4.4.3]{KV2017}) $U$
and $V$ give rise to two new probability measures $\mathbf{P}^{+}$ and $
\mathbf{P}^{-}$. The construction procedure is standard and explained for $
\mathbf{P}^{+}$ and $\mathbf{P}^{-}$ in detail in \cite{agkv} and \cite
{ABGV2011}, respectively (see also \cite[Ch. 5.2]{KV2017}). We recall here
only some basic definitions related to the construction.
We assume that the random walk $\mathcal{S}=\left\{ S_{n},n\geq 0\right\} $
is adapted to some filtration $\mathcal{F}=(\mathcal{F}_{n},n\geq 0)$ and
construct probability measures $\mathbf{P}_{x}^{+}$, $x\geq 0,$ satisfying
for any bounded and measurable function $g:\mathcal{S}^{n+1}\rightarrow
\mathbb{R}$ the equality
\begin{equation*}
\mathbf{E}_{x}^{+}[g(R_{0},\ldots ,R_{n})]\ =\ \frac{1}{U(x)}\mathbf{E}
_{x}[g(R_{0},\ldots ,R_{n})U(S_{n});L_{n}\geq 0]\ ,\ n\in \mathbf{N}_{0},
\end{equation*}
where $R_{0},R_{1},\ldots $ is a sequence of $\mathcal{S}$-valued random
variables, adapted to the filtration $\mathcal{F}$.
Similarly $V$ gives rise to probability measures $\mathbf{P}_{x}^{-}$, $
x\leq 0$, characterized for each $n\in \mathbf{N}_{0}$ by the equation
\begin{equation*}
\mathbf{E}_{x}^{-}[g(R_{0},\ldots ,R_{n})]\ :=\ \frac{1}{V(x)}\mathbf{E}
_{x}[g(R_{0},\ldots ,R_{n})V(S_{n});M_{n}<0]\ .
\end{equation*}
Now we prove the following statement which generalizes Lemma 2.5 in \cite
{agkv}.
\begin{lemma}
\label{L_cond} Assume Condition $B1$. Let $H_{1},H_{2},...,$ be a uniformly
bounded sequence of random variables adapted to the filtration $\mathcal{
\tilde{F}=}\left\{ \mathcal{\tilde{F}}_{k},k\in \mathbf{N}\right\} $, which
converges $\mathbf{P}^{+}$-a.s. to a random variable $H_{\infty }$ as $
n\rightarrow \infty $. Suppose that $\varphi (n),$ $n\in \mathbf{N}$ is a
function such that $\inf_{n\in \mathbf{N}}\varphi (n)\geq C>0$ and $\varphi
(n)=o(a_{n})$ as $n\rightarrow \infty $. Then
\begin{equation*}
\lim_{n\rightarrow \infty }\frac{\mathbf{E}\left[ H_{n};S_{n}\leq \varphi
(n),L_{n}\geq 0\right] }{\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq
0\right) }=\mathbf{E}^{+}\left[ H_{\infty }\right] .
\end{equation*}
\end{lemma}
\textbf{Proof}. For a fixed $k<n$ we have
\begin{equation*}
\frac{\mathbf{E}\left[ H_{k};S_{n}\leq \varphi (n),L_{n}\geq 0\right] }{
\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right) }=\mathbf{E}\left[
H_{k}\frac{\mathbf{P}_{S_{k}}(S_{n-k}^{\prime }\leq \varphi
(n),L_{n-k}^{\prime }\geq 0)}{\mathbf{P}\left( S_{n}\leq \varphi
(n),L_{n}\geq 0\right) };L_{k}\geq 0\right] ,
\end{equation*}
where $\mathcal{S}^{\prime }=\left\{ S_{n}^{\prime },n=0,1,2,...\right\} $
is a random walk being a probabilistic copy of the random walk $\mathcal{S}$
and is independent of the set$\mathcal{\ }\left\{ S_{j},j=0,1,...,k\right\} $
. We know by Lemma \ref{L_double}, Corollary \ref{C_IntegVW} and the
condition $\inf_{n\in \mathbf{N}}\varphi (n)\geq C>0$ that, there exist
constants $C,C_{1}$ and $C_{2}$ such that for any fixed $k$ and all $n\geq k$
\begin{equation*}
\frac{\mathbf{P}_{S_{k}}(S_{n-k}^{\prime }\leq \varphi (n),L_{n-k}^{\prime
}\geq 0)}{\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right) }\leq
\frac{C\,b_{n-k}\,U(S_{k})\int_{0}^{\varphi (n)}V(-z)dz}{C_{1}\,b_{n}\,
\int_{0}^{\varphi (n)}V(-z)dz}\leq C_{2}U(S_{k}).
\end{equation*}
Further, recalling Corollary\textbf{\ \ref{C_IntegVW} }and the definition (
\ref{Def_b}) we see that, for each fixed $x\geq 0$ and $k\in \mathbf{N}$
\begin{equation*}
\lim_{n\rightarrow \infty }\frac{\mathbf{P}_{x}(S_{n-k}^{\prime }\leq
\varphi (n),L_{n-k}^{\prime }\geq 0)}{\mathbf{P}\left( S_{n}\leq \varphi
(n),L_{n}\geq 0\right) }=\lim_{n\rightarrow \infty }\frac{
\,b_{n-k}\,U(x)\int_{0}^{\varphi (n)}V(-z)dz}{b_{n}\,\int_{0}^{\varphi
(n)}V(-z)dz}=U(x).\text{ }
\end{equation*}
Since
\begin{equation*}
\mathbf{E}\left[ H_{k}U(S_{k});L_{k}\geq 0\right] =\mathbf{E}^{+}\left[ H_{k}
\right] <\infty ,
\end{equation*}
it follows by the dominated convergence theorem that, for each fixed $k$
\begin{eqnarray*}
&&\lim_{n\rightarrow \infty }\frac{\mathbf{E}\left[ H_{k};S_{n}\leq \varphi
(n),L_{n}\geq 0\right] }{\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq
0\right) } \\
&=&\mathbf{E}\left[ H_{k}\times \lim_{n\rightarrow \infty }\frac{\mathbf{P}
_{S_{k}}(S_{n-k}^{\prime }\leq \varphi (n),L_{n-k}^{\prime }\geq 0)}{\mathbf{
P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right) };L_{k}\geq 0\right] \\
&=&\mathbf{E}\left[ H_{k}U(S_{k});L_{k}\geq 0\right] =\mathbf{E}^{+}\left[
H_{k}\right] .
\end{eqnarray*}
Further, in view of the estimate (\ref{Rough1}) for each $\lambda >1$ we
have
\begin{eqnarray*}
&&\left\vert \mathbf{E}\left[ \left( H_{n}-H_{k}\right) ;S_{\lambda n}\leq
\varphi (n),L_{\lambda n}\geq 0\right] \right\vert \\
&\leq &\mathbf{E}\left[ \left\vert H_{n}-H_{k}\right\vert \mathbf{P}
_{S_{n}}(S_{n(\lambda -1)}^{\prime }\leq \varphi (n),L_{n(\lambda
-1)}^{\prime }\geq 0);L_{n}\geq 0\right] \\
&\leq &Cb_{n(\lambda -1)}\int_{0}^{\varphi (n)}V(-z)dz\times \mathbf{E}\left[
\left\vert H_{n}-H_{k}\right\vert \,\,U(S_{n}),L_{n}\geq 0\right] \\
&=&Cb_{n(\lambda -1)}\int_{0}^{\varphi (n)}V(-z)dz\times \mathbf{E}^{+}\left[
\left\vert H_{n}-H_{k}\right\vert \,\right] .
\end{eqnarray*}
Hence, using (\ref{Def_b}) and Corollary \ref{C_IntegVW} we conclude that
\begin{eqnarray*}
\left\vert \mathbf{E}\left[ (H_{n}-H_{k})|S_{\lambda n}\leq \varphi
(n),L_{\lambda n}\geq 0\right] \right\vert &\leq &C\mathbf{E}^{+}\left[
\left\vert H_{n}-H_{k}\right\vert \right] \frac{b_{n(\lambda
-1)}\int_{0}^{\varphi (n)}V(-z)dz}{C_{1}\,b_{n\lambda }\,\int_{0}^{\varphi
(n)}V(-z)dz} \\
&\leq &C_{2}\left( \frac{\lambda }{\lambda -1}\right) ^{1+1/\alpha }\mathbf{E
}^{+}\left[ \left\vert H_{n}-H_{k}\right\vert \right] .
\end{eqnarray*}
Letting first $n$ and then $k$ to infinity we see that for each $\lambda >1$
the right-hand side of the previous relation vanishes by the dominated
convergence theorem.
Using this result we see that
\begin{eqnarray*}
\lim_{n\rightarrow \infty }\mathbf{E}\left[ H_{n}|S_{\lambda n}\leq \varphi
(n),L_{\lambda n}\geq 0\right] &=&\lim_{k\rightarrow \infty
}\lim_{n\rightarrow \infty }\frac{\mathbf{E}\left[ \left( H_{n}-H_{k}\right)
;S_{\lambda n}\leq \varphi (n),L_{\lambda n}\geq 0\right] }{\mathbf{P}\left(
S_{\lambda n}\leq \varphi (n),L_{\lambda n}\geq 0\right) } \\
&&+\lim_{k\rightarrow \infty }\lim_{n\rightarrow \infty }\frac{\mathbf{E}
\left[ H_{k};S_{\lambda n}\leq \varphi (n),L_{\lambda n}\geq 0\right] }{
\mathbf{P}\left( S_{\lambda n}\leq \varphi (n),L_{\lambda n}\geq 0\right) }
\\
&=&\lim_{k\rightarrow \infty }\mathbf{E}^{+}\left[ H_{k}\right] =\mathbf{E}
^{+}\left[ H_{\infty }\right] ,
\end{eqnarray*}
which we rewrite as
\begin{equation*}
\mathbf{E}\left[ H_{n};S_{\lambda n}\leq \varphi (n),L_{\lambda n}\geq 0
\right] =\left( \mathbf{E}^{+}\left[ H_{\infty }\right] +o(1)\right) \mathbf{
P}\left( S_{\lambda n}\leq \varphi (n),L_{\lambda n}\geq 0\right) .
\end{equation*}
Asuming without loss of generality that \ $\mathbf{E}^{+}\left[ H_{\infty }
\right] \leq 1$ we conclude that
\begin{eqnarray*}
&&\left\vert \mathbf{E}\left[ H_{n};S_{n}\leq \varphi (n),L_{n}\geq 0\right]
-\mathbf{E}^{+}\left[ H_{\infty }\right] \mathbf{P}\left( S_{\lambda n}\leq
\varphi (n),L_{\lambda n}\geq 0\right) \right\vert \\
&\leq &\left\vert \mathbf{E}\left[ H_{n};S_{\lambda n}\leq \varphi
(n),L_{\lambda n}\geq 0\right] -\mathbf{E}^{+}\left[ H_{\infty }\right]
\mathbf{P}\left( S_{\lambda n}\leq \varphi (n),L_{\lambda n}\geq 0\right)
\right\vert \\
&&+\left\vert \mathbf{P}\left( S_{\lambda n}\leq \varphi (n),L_{\lambda
n}\geq 0\right) -\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right)
\right\vert .
\end{eqnarray*}
We have proved that the first summand at the right-hand side of the
inequality is of order $o\left( \mathbf{P}\left( S_{\lambda n}\leq \varphi
(n),L_{\lambda n}\geq 0\right) \right) $ as $n\rightarrow \infty $, and,
therefore, the order $o\left( \mathbf{P}\left( S_{n}\leq \varphi
(n),L_{n}\geq 0\right) \right) ,$ since
\begin{equation*}
\lim_{n\rightarrow \infty }\frac{\mathbf{P}\left( S_{\lambda n}\leq \varphi
(n),L_{\lambda n}\geq 0\right) }{\mathbf{P}\left( S_{n}\leq \varphi
(n),L_{n}\geq 0\right) }=C(\lambda )<\infty
\end{equation*}
by Corollary \ref{C_IntegVW} and Lemma \ref{L_estimV}.
Further, again by Corollary \ref{C_IntegVW} and the definition (\ref{Def_b})
we have
\begin{eqnarray*}
&&\left\vert \mathbf{P}\left( S_{\lambda n}\leq \varphi (n),L_{\lambda
n}\geq 0\right) -\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right)
\right\vert \\
&\leq &\left\vert \mathbf{P}\left( S_{\lambda n}\leq \varphi (n),L_{\lambda
n}\geq 0\right) -g_{\alpha ,\beta }(0)b_{n\lambda }\int_{0}^{\varphi
(n)}V(-z)dz\right\vert \\
&&+\left\vert \mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right)
-g_{\alpha ,\beta }(0)b_{n}\int_{0}^{\varphi (n)}V(-z)dz\right\vert \\
&&+g_{\alpha ,\beta }(0)\left\vert b_{n\lambda }-b_{n}\right\vert
\int_{0}^{\varphi (n)}V(-z)dz \\
&=&o\left( b_{n}\int_{0}^{\varphi (n)}V(-z)dz\right) +g_{\alpha ,\beta
}(0)b_{n}\left\vert \frac{b_{n\lambda }}{b_{n}}-1\right\vert
\int_{0}^{\varphi (n)}V(-z)dz.
\end{eqnarray*}
Hence, letting $\lambda \downarrow 1$ we see that
\begin{equation*}
\lim_{\lambda \downarrow 1}\lim_{n\rightarrow \infty }\frac{\left\vert
\mathbf{P}\left( S_{\lambda n}\leq \varphi (n),L_{\lambda n}\geq 0\right) -
\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right) \right\vert }{
b_{n}\int_{0}^{\varphi (n)}V(-z)dz}=0.
\end{equation*}
Combining the obtained estimates we get the statement of the lemma.
\section{Proof of Theorem \protect\ref{T_smallDevi}}
Introduce iterations of probability generating functions $\
F_{1}(.),F_{2}(.),...,$ by setting
\begin{equation*}
F_{k,n}(s)=F_{k+1}(F_{k+2}(\ldots (F_{n}(s))\ldots ))
\end{equation*}
for $0\leq k\leq n-1$, $0\leq s\leq 1$, and letting $F_{n,n}(s)=s.$ Using
this notation we write
\begin{equation*}
\mathbf{P}\left( Z_{n}>0|\ F_{k+1},\ldots ,F_{n};Z_{k}=1\right)
=1-F_{k,n}(0).
\end{equation*}
In particular, for any $j\leq n$
\begin{eqnarray*}
1-F_{0,n}(0) &=&\mathbf{P}\left( Z_{n}>0|\ F_{1},\ldots ,F_{n};Z_{0}=1\right)
\\
&\leq &\mathbf{P}\left( Z_{j}>0|\ F_{1},\ldots ,F_{j};Z_{0}=1\right) \\
&=&1-F_{0,j}(0)\leq e^{S_{j}}.
\end{eqnarray*}
Therefore, $\lim_{n\rightarrow \infty }F_{0,n}(0)=F_{0,\infty }(0)$ exists
a.s.
We set
\begin{equation*}
\tau _{n}=\min \left\{ 0\leq k\leq n:S_{k}=\min (0,L_{n})\right\}
\end{equation*}
and write for $1<J<n$ the representation
\begin{equation*}
\mathbf{P}\left( Z_{n}>0;S_{n}\leq \varphi (n)\right) =\sum_{j=0}^{J}\mathbf{
P}\left( Z_{n}>0;S_{n}\leq \varphi (n),\tau _{n}=j\right) +R(J,n),
\end{equation*}
where
\begin{equation*}
R(J,n)=\sum_{j=J+1}^{n}\mathbf{P}\left( Z_{n}>0;S_{n}\leq \varphi (n),\tau
_{n}=j\right) .
\end{equation*}
\begin{lemma}
\label{L_remainder} If $\varphi (n)\rightarrow \infty $ then under the
condition of Theorem \ref{T_smallDevi}
\begin{equation*}
\lim_{J\rightarrow \infty }\lim_{n\rightarrow \infty }\frac{R(J,n)}{\mathbf{P
}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right) }=0.
\end{equation*}
\end{lemma}
\textbf{Proof}. For each $j\in \mathbf{N}_{0}$ we have
\begin{eqnarray*}
&&\mathbf{P}\left( Z_{n}>0;S_{n}\leq \varphi (n),\tau _{n}=j\right) =\mathbf{
E}\left[ 1-F_{0,n}(0);S_{n}\leq \varphi (n),\tau _{n}=j\right] \\
&\leq &\mathbf{E}\left[ e^{S_{j}};S_{n}\leq \varphi (n);\tau _{n}=j\right] .
\end{eqnarray*}
By the duality property of random walks and relations (\ref{Rough1}) and (
\ref{Rough2})\ we get for $k>0$ and $y>0$ the inequalities
\begin{equation}
\mathbf{P}\left( S_{n}\in \lbrack -k,-k+1),\tau _{n}=n\right) =\mathbf{P}
\left( S_{n}\in \lbrack -k,-k+1),M_{n}<0\right) \leq C\,b_{n}\,U(k)
\label{Rough0}
\end{equation}
and
\begin{equation}
\mathbf{P}\left( 0\leq S_{n}<y,L_{n}\geq 0\right) \ \leq \
C\,b_{n}\,\int_{0}^{y}V(-z)dz\ . \label{Rough22}
\end{equation}
By these inequalities we deduce the estimates
\begin{eqnarray}
&&\mathbf{E}\left[ e^{S_{j}};S_{n}\leq \varphi (n);\tau _{n}=j\right] \notag
\\
&=&\mathbf{E}\left[ e^{S_{j}}\mathbf{P}\left( S_{n-j}^{\prime }\leq \varphi
(n)-S_{j};L_{n-j}^{\prime }\geq 0|S_{j}\right) ;\tau _{j}=j\right] \notag \\
&\leq &\sum_{k=1}^{\infty }e^{-k+1}\mathbf{P}\left( S_{j}\in \lbrack
-k,-k+1),\tau _{j}=j\right) \mathbf{P}\left( S_{n-j}\leq \varphi
(n)+k;L_{n-j}\geq 0\right) \notag \\
&\leq &Cb_{j}\sum_{k=1}^{\infty }e^{-k+1}U(k)\mathbf{P}\left( S_{n-j}\leq
\varphi (n)+k;L_{n-j}\geq 0\right) \label{RightTail0} \\
&\leq &Cb_{j}b_{n-j}\sum_{k=1}^{\infty }e^{-k}U(k)\int_{0}^{\varphi
(n)+k}V(-z)dz.\ \label{RightTail}
\end{eqnarray}
According to Lemma \ref{L_estimV} there exists a constant $C(2)>0$ such that
\begin{equation*}
\int_{0}^{2\varphi (n)}V(-z)dz\leq C(2)\int_{0}^{\varphi (n)}V(-z)dz.
\end{equation*}
Besides, $b_{j}\leq Cb_{n}$ for all $n/2\leq j\leq n$ in view of (\ref{Def_b}
). Using the inequality $V(x+y)\leq V(x)+V(y)$ valid for $x\leq 0,y\leq 0$
(see, for instance, the proof of Corollary 2.4 in \cite{ABGV2011}) and (\ref
{RightTail0}), we conclude that
\begin{eqnarray*}
&&\sum_{j\geq n/2}\mathbf{P}\left( Z_{n}>0;S_{n}\leq \varphi (n),\tau
_{n}=j\right) \\
&\leq &\sum_{j\geq n/2}\mathbf{E}\left[ e^{S_{j}},S_{n}\leq \varphi (n);\tau
_{n}=j\right] \\
&\leq &C\sum_{k=1}^{\infty }e^{-k}U(k)\sum_{j\geq n/2}b_{j}\mathbf{P}\left(
S_{n-j}\leq \varphi (n)+k;L_{n-j}\geq 0\right) \\
&\leq &Cb_{n}\sum_{k=1}^{\infty }e^{-k}U(k)V(-\varphi (n)-k) \\
&\leq &C_{1}b_{n}\left( \sum_{k=1}^{\infty }e^{-k}U(k)(V(-\varphi
(n))+V(-k))\right) \\
&\leq &C_{1}b_{n}\left( C_{2}V(-\varphi (n))+C_{3}\right) .
\end{eqnarray*}
Since $V(-x),x>0,$ is a regularly varying function as $x\rightarrow \infty $
with a positive index, it follows by Corollary \ref{C_IntegVW} that
\begin{eqnarray}
&&\sum_{j\geq n/2}\mathbf{E}\left[ e^{S_{\tau _{n}}},S_{n}\leq \varphi
(n);\tau _{n}=j\right] \leq C_{3}b_{n}V(-\varphi (n))=o\left(
b_{n}\int_{0}^{\varphi (n)}V(-z)dz\right) \notag \\
&=&o(\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right) )
\label{Big_j}
\end{eqnarray}
as $n\rightarrow \infty $. Further, in view of\ (\ref{RightTail})
\begin{eqnarray}
&&\sum_{J+1\leq j<n/2}\mathbf{E}\left[ e^{S_{j}},S_{n}\leq \varphi (n);\tau
_{n}=j\right] \notag \\
&\leq &C\sum_{J\leq j<n/2}b_{j}b_{n-j}\sum_{k=1}^{\infty
}e^{-k}U(k)\int_{0}^{\varphi (n)+k}V(-z)dz \notag \\
&\leq &Cb_{n}\sum_{J\leq j<n/2}b_{j}\left( \sum_{k=1}^{-\infty
}e^{-k}U(k)\left( \int_{0}^{2\varphi (n)}V(-z)dz+\int_{0}^{2k}V(-z)dz\right)
\right) \notag \\
&\leq &Cb_{n}\sum_{J\leq j<n/2}b_{j}\left( C_{1}\int_{0}^{2\varphi
(n)}V(-z)dz+C_{2}\right) \leq C_{3}\sum_{J\leq j\leq \infty }b_{j}\times
b_{n}\int_{0}^{\varphi (n)}V(-z)dz \notag \\
&=&\varepsilon _{J}\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right)
, \label{Small_j}
\end{eqnarray}
where $\varepsilon _{J}\rightarrow 0$ as $J\rightarrow \infty $. Combining (
\ref{Big_j}) and (\ref{Small_j}) and letting to infinity first $n$ and than $
J$ we see that
\begin{equation*}
\lim_{J\rightarrow \infty }\lim_{n\rightarrow \infty }\frac{R(J,n)}{\mathbf{P
}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right) }\leq C\lim_{J\rightarrow
\infty }\varepsilon _{J}=0.
\end{equation*}
Lemma \ref{L_remainder} is proved.
\begin{lemma}
\label{L_fixed_j}Under the conditions of Theorem \ref{T_smallDevi} for each
fixed $j$
\begin{equation*}
\mathbf{P}\left( Z_{n}>0,S_{n}\leq \varphi (n),\tau _{n}=j\right) \sim
\Theta (j)g_{\alpha ,\beta }(0)b_{n}\int_{0}^{\varphi (n)}V(-z)dz
\end{equation*}
as $n\rightarrow \infty ,$ where
\begin{equation*}
\Theta (j)=\sum_{k=1}^{\infty }\mathbf{P}(Z_{j}=k,\tau _{j}=j)\mathbf{E}^{+}
\left[ 1-F_{0,\infty }^{k}(0)\right] \leq \mathbf{P}(\tau _{j}=j)\leq 1.
\end{equation*}
\end{lemma}
\textbf{Proof}. First observe that for $0\leq j<n$ and any $\varepsilon \in
(0,1)$
\begin{eqnarray*}
&&\mathbf{P}\left( Z_{n}>0,S_{n}\leq \varphi (n);\tau _{n}=j;S_{j}\leq
-\varepsilon \varphi (n)\right) \\
&=&\mathbf{E}\left[ 1-F_{0,n}(0),S_{n}\leq \varphi (n);\tau _{n}=j;S_{j}\leq
-\varepsilon \varphi (n)\right] \\
&\leq &\mathbf{E}\left[ e^{S_{\tau _{n}}},S_{n}\leq \varphi (n);\tau
_{n}=j;S_{j}\leq -\varepsilon \varphi (n)\right] \\
&=&\int_{-\infty }^{-\varepsilon \varphi (n)}e^{x}\mathbf{P}\left( S_{j}\in
dx,\tau _{j}=j\right) \mathbf{P}\left( S_{n-j}\leq \varphi (n)-x;L_{n-j}\geq
0\right) .
\end{eqnarray*}
Using the decomposition $(-\infty ,-\varepsilon \varphi (n)]=(-\infty
,-\varphi (n)]\cup (-\varphi (n),-\varepsilon \varphi (n)]$ we have
\begin{eqnarray*}
&&\int_{-\infty }^{-\varepsilon \varphi (n)}e^{x}\mathbf{P}\left( S_{j}\in
dx,\tau _{j}=j\right) \mathbf{P}\left( S_{n-j}\leq \varphi (n)-x;L_{n-j}\geq
0\right) \\
&\leq &\mathbf{P}\left( S_{n-j}\leq 2\varphi (n);L_{n-j}\geq 0\right)
\int_{-\varphi (n)}^{-\varepsilon \varphi (n)}e^{x}\mathbf{P}\left( S_{j}\in
dx,\tau _{j}=j\right) \\
&&+\int_{-\infty }^{-\varphi (n)}e^{x}\mathbf{P}\left( S_{j}\in dx,\tau
_{j}=j\right) \mathbf{P}\left( S_{n-j}\leq -2x;L_{n-j}\geq 0\right) .
\end{eqnarray*}
Since $V(-x),x\geq 0,$ is a renewal function, $V(-x)\leq C(\left\vert
x\right\vert +1)$. Using this estimate and Lemma \ref{L_double} we conclude
that
\begin{eqnarray*}
&&\int_{-\infty }^{-\varphi (n)}e^{x}\mathbf{P}\left( S_{j}\in dx,\tau
_{j}=j\right) \mathbf{P}\left( S_{n-j}\leq -2x;L_{n-j}\geq 0\right) \\
&\leq &Cb_{n-j}\int_{-\infty }^{-\varphi (n)}e^{x}\mathbf{P}\left( S_{j}\in
dx,\tau _{j}=j\right) \int_{0}^{-2x}V(-z)dz \\
&\leq &C_{1}b_{n-j}\int_{-\infty }^{-\varphi (n)}e^{x}\left\vert
x\right\vert ^{2}\mathbf{P}\left( S_{j}\in dx,\tau _{j}=j\right) \\
&\leq &C_{1}b_{n-j}\mathbf{E}\left[ e^{S_{j}}\left\vert S_{j}\right\vert
^{2}I\left\{ S_{j}\leq -\varphi (n)\right\} \right] =o(b_{n-j}).
\end{eqnarray*}
This and the estimate for $V(-x)$ given above imply
\begin{eqnarray*}
&&\mathbf{P}\left( S_{n-j}\leq 2\varphi (n);L_{n-j}\geq 0\right)
\int_{-\varphi (n)}^{-\varepsilon \varphi (n)}e^{x}\mathbf{P}\left( S_{j}\in
dx,\tau _{j}=j\right) \\
&\leq &Cb_{n-j}\int_{0}^{2\varphi (n)}V(-z)dze^{-\varepsilon \varphi (n)} \\
&\leq &Cb_{n-j}\int_{0}^{2\varphi (n)}\left( z+1\right) dze^{-\varepsilon
\varphi (n)}=o(b_{n-j})
\end{eqnarray*}
as $n\rightarrow \infty $. Therefore, for each fixed $j$ and $\varepsilon
\in (0,1)$
\begin{equation*}
\mathbf{P}\left( Z_{n}>0,S_{n}\leq \varphi (n);\tau _{n}=j;S_{j}\leq
-\varepsilon \varphi (n)\right) =o(b_{n-j}).
\end{equation*}
Further, we have
\begin{eqnarray*}
&&\mathbf{P}\left( Z_{n}>0,S_{n}\leq \varphi (n);\tau
_{n}=j,S_{j}>-\varepsilon \varphi (n)\right) \\
&=&\mathbf{E}\left[ \mathbf{P}\left( Z_{n}>0,S_{n}\leq \varphi (n);\tau
_{n}=j|S_{j},Z_{j}\right) ;S_{j}>-\varepsilon \varphi (n)\right] \\
&=&\sum_{k=1}^{\infty }\int_{-\varepsilon \varphi (n)}^{0}\mathbf{P}
(Z_{j}=k,S_{j}\in dx,\tau _{j}=j)\mathbf{E}\left[ 1-F_{0,n-j}^{k}(0),S_{n-j}
\leq \varphi (n)-x,L_{n-j}\geq 0\right] .
\end{eqnarray*}
Given $\varphi (n)=o(a_{n})$ we see that for any $\varepsilon >0$ and all $
x\in (-\varepsilon \varphi (n),0]$ and each $k\in \mathbf{N}$
\begin{eqnarray*}
&&\mathbf{E}\left[ 1-F_{0,n-j}^{k}(0),S_{n-j}\leq \varphi (n),L_{n-j}\geq 0
\right] \\
&\leq &\mathbf{E}\left[ 1-F_{0,n-j}^{k}(0),S_{n-j}\leq \varphi
(n)-x,L_{n-j}\geq 0\right] \\
&\leq &\mathbf{E}\left[ 1-F_{0,n-j}^{k}(0),S_{n-j}\leq (1+\varepsilon
)\varphi (n),L_{n-j}\geq 0\right] .
\end{eqnarray*}
Hence, using Lemma \ref{L_cond}, Corollary \ref{C_IntegVW} and the fact that
$\int_{0}^{x}V(-z)dz$ is a regularly varying function with index $\alpha
\rho +1$ we conclude that as $n\rightarrow \infty $
\begin{equation*}
\lim_{n\rightarrow \infty }\inf_{x\in (-\varepsilon \varphi (n),0]}\frac{
\mathbf{E}\left[ 1-F_{0,n-j}^{k}(0),S_{n-j}\leq \varphi (n)-x,L_{n-j}\geq 0
\right] }{\mathbf{P}\left( S_{n-j}\leq \varphi (n),L_{n-j}\geq 0\right) }
\geq \mathbf{E}^{+}\left[ 1-F_{0,\infty }^{k}(0)\right]
\end{equation*}
and
\begin{eqnarray*}
&&\lim_{n\rightarrow \infty }\sup_{x\in (-\varepsilon \varphi (n),0]}\frac{
\mathbf{E}\left[ 1-F_{0,n-j}^{k}(0),S_{n-j}\leq \varphi (n)-x,L_{n-j}\geq 0
\right] }{\mathbf{P}\left( S_{n-j}\leq \varphi (n),L_{n-j}\geq 0\right) } \\
&\leq &\left( 1+\varepsilon \right) ^{\alpha \rho +1}\mathbf{E}^{+}\left[
1-F_{0,\infty }^{k}(0)\right] .
\end{eqnarray*}
Therefore, for each fixed $k$ and $j$
\begin{eqnarray*}
\lim_{\varepsilon \downarrow 0}\lim_{n\rightarrow \infty }\int_{-\varepsilon
\varphi (n)}^{0}\mathbf{P}(Z_{j} &=&k;S_{j}\in dx,\tau _{j}=j) \\
&&\times \frac{\mathbf{E}\left[ 1-F_{0,n-j}^{k}(0),S_{n-j}\leq \varphi
(n)-x,L_{n-j}\geq 0\right] }{\mathbf{P}\left( S_{n}\leq \varphi
(n),L_{n}\geq 0\right) } \\
&=&\int_{-\infty }^{0}\mathbf{P}(Z_{j}=k;S_{j}\in dx,\tau _{j}=j)\mathbf{E}
^{+}\left[ 1-F_{0,\infty }^{k}(0)\right] \\
&=&\mathbf{P}(Z_{j}=k,\tau _{j}=j)\mathbf{E}^{+}\left[ 1-F_{0,\infty }^{k}(0)
\right] .
\end{eqnarray*}
Further,
\begin{eqnarray*}
\sum_{k=K+1}^{\infty }\int_{-\varepsilon \varphi (n)}^{0}\mathbf{P}(Z_{j}
&=&k;S_{j}\in dx,\tau _{j}=j)\mathbf{E}\left[ 1-F_{0,n-j}^{k}(0),S_{n-j}\leq
\varphi (n)-x,L_{n-j}\geq 0\right] \\
&\leq &\sum_{k=K+1}^{\infty }\mathbf{P}(Z_{j}=k,\tau _{j}=j)\mathbf{P}\left(
S_{n-j}\leq (1+\varepsilon )\varphi (n),L_{n-j}\geq 0\right) \\
&\leq &\mathbf{P}(Z_{j}\geq K+1)\mathbf{P}\left( S_{n-j}\leq (1+\varepsilon
)\varphi (n),L_{n-j}\geq 0\right)
\end{eqnarray*}
and, by Lemma \ref{L_estimV} the right-hand side of this relation is
\begin{equation*}
o\left( \mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right) \right)
\end{equation*}
as $K\rightarrow \infty $. Hence, as $n\rightarrow \infty $
\begin{eqnarray*}
\mathbf{P}\left( Z_{n}>0,S_{n}\leq \varphi (n),\tau _{n}=j\right) &\sim
&\Theta (j)\mathbf{P}\left( S_{n}\leq \varphi (n),L_{n}\geq 0\right) \\
&\sim &\Theta (j)g_{\alpha ,\beta }(0)b_{n}\int_{0}^{\varphi (n)}V(-z)dz.
\end{eqnarray*}
for each fixed $j$.
Lemma \ref{L_fixed_j} is proved.
\textbf{Proof of Theorem \ref{T_smallDevi}}. Combining Lemmas \ref
{L_remainder} and \ \ref{L_fixed_j} we see that
\begin{equation*}
\mathbf{P}\left( Z_{n}>0,S_{n}\leq \varphi (n)\right) \sim \Theta g_{\alpha
,\beta }(0)b_{n}\int_{0}^{\varphi (n)}V(-z)dz,
\end{equation*}
where
\begin{equation}
\Theta =\sum_{j=0}^{\infty }\Theta (j)==\sum_{j=1}^{\infty
}\sum_{k=1}^{\infty }\mathbf{P}(Z_{j}=k,\tau _{j}=j)\mathbf{E}^{+}\left[
1-F_{0,\infty }^{k}(0)\right] . \label{Def_Theta}
\end{equation}
Positivity and finiteness of $\Theta $ was proved in \cite{agkv}, Theorem
1.1 (see, in particular, formula (4.10)).
Theorem \ref{T_smallDevi} is proved.
\end{document}
|
\begin{document}
\title{Strong polygamy of quantum correlations in multi-party quantum systems}
\author{Jeong San Kim}
\email{[email protected]} \affiliation{
Department of Mathematics, University of Suwon, Kyungki-do 445-743, Korea
}
\date{\today}
\begin{abstract}
We propose a new type of polygamy inequality for multi-party quantum
entanglement. We first consider the possible amount of bipartite entanglement distributed
between a fixed party and any subset of the rest parties in a multi-party quantum system.
By using the summation of these distributed entanglements, we provide an upper bound of the
distributed entanglement between a party and the rest in multi-party quantum systems.
We then show that this upper bound also plays as a lower bound of the usual polygamy inequality,
therefore the strong polygamy of multi-party quantum
entanglement. For the case of multi-party pure states, we further show
that the strong polygamy of entanglement implies the strong polygamy of quantum discord.
\end{abstract}
\pacs{
03.67.Mn,
03.65.Ud
}
\maketitle
\section{Introduction}
Entanglement is one of the most remarkable features in the field of quantum
information and computation theory with many useful
applications such as quantum teleportation and quantum key distribution~\cite{tele,BB84,Eke91}.
One of the essential differences of quantum entanglement from other
classical correlations is in its restricted shareability; if a pair of parties in a multi-party quantum system
share maximal entanglement, they cannot have any entanglement nor
classical correlations with the rest. This restricted shareability
of entanglement is known as the {\em monogamy of entanglement}
(MoE)~\cite{T04, JGS}, which does not have any classical counterpart, and this makes
quantum physics fundamentally different form classical physics.
In the seminal paper by Coffman, Kundu and Wootters~\cite{CKW}, MoE
was mathematically characterized in forms of a trade-off inequality; for a
three-qubit state $\rho_{ABC}$ with two-qubit reduced density
matrices $\rho_{AB}=\mbox{$\mathrm{tr}$}_C \rho_{ABC}$ and $\rho_{AC}=\mbox{$\mathrm{tr}$}_B
\rho_{ABC}$,
\begin{align}
\tau\left(\rho_{A(BC)}\right)\geq \tau\left(\rho_{AB}\right)+\tau\left(\rho_{AC}\right),
\label{CKW}
\end{align}
where $\tau\left(\rho_{A(BC)}\right)$ is the entanglement of
$\rho_{ABC}$ with respect to the bipartition
between A and BC measured by tangle~\cite{CKW}, and $\tau\left(\rho_{AB}\right)$ and
$\tau\left(\rho_{AC}\right)$ are the tangles of $\rho_{AB}$ and
$\rho_{AC}$ respectively. Later, Inequality~(\ref{CKW}) was
generalized for multi-qubit systems~\cite{OV} and some classes
of multi-qudit systems in terms of various entanglement
measures~\cite{Kim123}. It was recently shown that squashed
entanglement is a faithful entanglement measure, which also shows
the monogamy inequality of entanglement in arbitrary dimensional
quantum systems~\cite{BCY10}.
Whereas MoE shows the restricted shareability of bipartite
entanglement in multi-party quantum systems, the possible amount of
bipartite entanglement distribution assisted by the third party is
known to have a dually monogamous (thus polygamous) property in
multi-party quantum systems; for three-qubit systems, {\em
polygamy of entanglement} (PoE) was first characterized as a polygamy inequality
\begin{align}
\tau\left(\rho_{A(BC)}\right)\leq
\tau_a\left(\rho_{AB}\right)+\tau_a\left(\rho_{AC}\right),
\label{poly1}
\end{align}
where $\tau_a\left(\rho_{AB}\right)$ and
$\tau_a\left(\rho_{AC}\right)$ are the tangle of assistance of
$\rho_{AB}$ and $\rho_{AC}$ respectively~\cite{GMS, GBS}.
Inequality~(\ref{poly1}) was generalized for various classes of
multi-party, higher dimensional quantum systems~\cite{BGK}, and
a general polygamy inequality of entanglement was recently shown in
terms of {\em entanglement of assistance} in arbitrary
dimensional multi-party quantum systems~\cite{gpoly}.
The study of shareability and distribution of quantum correlations,
especially quantum entanglement, in multi-party quantum systems is
the key ingredient of many quantum information and communication
protocols. For example, due to the mutually-exclusive relation of
entanglement sharing characterized by monogamy inequality, one can
possibly quantify how much information an eavesdropper could
potentially obtain about the secret key to be extracted in quantum
cryptography~\cite{Paw}. In other words, the security of quantum
key distribution protocols that prohibits an eavesdropper from obtaining any
information without disturbance is guaranteed by MoE, the law of quantum
physics, rather than assumptions on the difficulty of
computation.
Here, we propose a new type of polygamy inequality for quantum
entanglement; in multi-party quantum systems, we first consider the
possible amount of bipartite entanglement distributed between a fixed party
and any subset of the rest parties. By using the summation of these
distributed entanglements, we provide an upper bound of the
distributed entanglement between a party and the rest. We then show that this upper bound
also plays as a lower bound of the general polygamy inequality of multi-party quantum entanglement;
therefore the strong polygamy of multi-party quantum
entanglement. For the case of multi-party pure states, we further show
that the strong polygamy of entanglement implies that
of quantum discord.
This paper is organized as follows. In Section~\ref{Sec: Bipartite
Quantum Correlations}, we briefly recall the definitions and some
properties of bipartite quantum correlations such as entanglement of
assistance, quantum discord, one-way unlocalizable entanglement and
one-way unlocalizable quantum discord. In Section~\ref{sub: spoly
entanglement}, we establish the strong polygamy of distributed
entanglement in terms of EoA, and we also show a close relation between the strong
polygamy of entanglement and quantum discord for multi-party pure states in Section~\ref{sub:
spoly discord}. In Section~\ref{Sec:Con}, we summarize our results.
\section{Bipartite Quantum Correlations}
\label{Sec: Bipartite Quantum Correlations}
For a bipartite quantum state $\rho_{AB}$, its one-way classical
correlation $\mathcal
{J}^\leftarrow(\rho_{AB})$ is
\begin{equation}\begin{split}
\mathcal {J}^\leftarrow(\rho_{AB}) &= \max_{\{M_x\}} \left[S(\rho_A)-\sum_x p_x S(\rho^x_A)\right],
\end{split}
\label{HVI}
\end{equation}
where $p_x\equiv \mbox{$\mathrm{tr}$}[(I_A\otimes M_x)\rho_{AB}]$ is the probability
of the outcome $x$, $\rho^x_A\equiv \mbox{$\mathrm{tr}$}_B[(I_A\otimes
{M_x})\rho_{AB}]/p_x$ is the state of system $A$ when the outcome
was $x$, and the maximum is taken over all the measurements
$\{M_x\}$ applied on system $B$~\cite{Henderson-Vedral01}.
For a tripartite pure state $\ket{\psi}_{ABC}$ with reduced density
matrices $\rho_{A}=\mbox{$\mathrm{tr}$}_{BC}\ket{\psi}_{ABC}\bra{\psi}$, $\rho_{AB}
=\mbox{$\mathrm{tr}$}_{C}\ket{\psi}_{ABC}\bra{\psi}$, and $\rho_{AC}
=\mbox{$\mathrm{tr}$}_{B}\ket{\psi}_{ABC}\bra{\psi}$,
a trade-off relation between quantum entanglement and classical correlation was shown~\cite{KW}
\begin{align}
S(\rho_A)&={\mathcal J}^\leftarrow(\rho_{AB})+E_f(\rho_{AC}),
\label{KWmain1}
\end{align}
where
\begin{equation}
E_f(\rho_{AC})=\min \sum_{i}p_i S(\rho^{i}_{A}) \label{eof}
\end{equation}
is the {\em entanglement of formation}(EoF) of $\rho_{AC}$ ~\cite{BDSW},
whose minimization is taken over over all pure state decompositions
of $\rho_{AC}$,
\begin{equation}
\rho_{AC}=\sum_{i} p_i |\phi^i\rangle_{AC}\langle\phi^i|,
\label{decomp}
\end{equation}
with $\mbox{$\mathrm{tr}$}_{C}|\phi^i\rangle_{AC}\langle\phi^i|=\rho^{i}_{A}$.
From the definition, $E_f(\rho_{AC})$ is considered as the minimum averaged entanglement needed
to prepare $\rho_{AC}$, and the term {\em formation} naturally arises. Furthermore, Eq.~(\ref{KWmain1})
can be interpreted as follows; for any tripartite pure state $\ket{\psi}_{ABC}$ (a three-party closed quantum system),
the total correlation between subsystems $A$ and $BC$ quantified by the entropy $S\left(\rho_A\right)$ consists of
the classical correlation ${\mathcal J}^\leftarrow(\rho_{AB})$ between subsystems $A$ and $B$, and the formation of entanglement $E_f(\rho_{AC})$
between $A$ and $C$.
As a dual quantity to EoF, the {\em entanglement of assistance} (EoA) is defined as the maximum average entanglement,
\begin{equation}
E_a(\rho_{AC})=\max \sum_{i}p_i S(\rho^{i}_{A}), \label{EoA}
\end{equation}
over all possible pure state decompositions of $\rho_{AC}$~\cite{LVvE03}.
EoA is clearly a mathematical dual to EoF because one takes the maximum average entanglement whereas the other
takes the minimum.
We also note that for a pure state $\ket{\psi}_{ABC}$, all possible pure state
decompositions of $\rho_{AC}$ can be realized by rank-1 measurements
of subsystem $B$, and conversely, any rank-1 measurement can be
induced from a pure state decomposition of $\rho_{AC}$~\cite{BGK}.
Thus $E_a(\rho_{AC})$ can be considered as the possible maximum average entanglement that can be distributed between $A$ and $C$ with
the assistance of the environment $B$. This makes the duality between EoF and EoA clearer because one is the formation of entanglement
whereas the other is the possible entanglement distribution.
Similarly to the duality between EoF and EoA, we have a dual quantity to $J^\leftarrow(\rho_{AB})$;
for a bipartite state $\rho_{AB}$,
the {\em one-way unlocalizable entanglement}~(UE) is defined as
\begin{equation}
\begin{split}
E_u^{\leftarrow}(\rho_{AB}) &:= \min_{\{M_x\}} \left[S(\rho_A)-\sum_x p_x S(\rho^x_A)\right],\\
\end{split}
\label{UE}
\end{equation}
where the minimum is taken over all possible rank-1 measurements
$\{M_x\}$ applied on system $B$~\cite{BGK}.
Moreover, the trade-off relation in
Eq.~(\ref{KWmain1}) was also shown to have a dual relation in terms of EoA
and UE in three-party quantum systems~\cite{BGK}. For a three-party
pure state $\ket{\psi}_{ABC}$,
\begin{align}
S(\rho_A)&=E_u^{\leftarrow}(\rho_{AB})+E_a(\rho_{AC}).
\label{eq: 3UEEA}
\end{align}
For a bipartite state $\rho_{AB}$, quantum discord (QD) is defined as the difference between the mutual
information and one-way classical correlation~\cite{OZ,HV},
\begin{equation}
{\delta}^{\leftarrow}\left(\rho_{AB}\right)={\mathcal I}\left(\rho_{AB}\right)-{\mathcal J}^\leftarrow\left(\rho_{AB}\right),
\label{QD}
\end{equation}
where
\begin{equation}
{\mathcal I}\left(\rho_{AB}\right)=S\left(\rho_A\right)+S\left(\rho_B\right)-S\left(\rho_{AB}\right),
\label{mutual}
\end{equation}
is the mutual information of $\rho_{AB}$ with reduced density matrices $\rho_A$ and $\rho_B$ onto its subsystems $A$ and $B$
respectively.
Based on the duality between one-way classical correlation and UE, a dual quantity to QD was introduced; for a bipartite
quantum state $\rho_{AB}$ its one-way unlocalizable quantum discord (UD) is defined as~\cite{UD}
\begin{equation}
{\delta}_u^{\leftarrow}\left(\rho_{AB}\right)={\mathcal I}\left(\rho_{AB}\right)-E_u^\leftarrow\left(\rho_{AB}\right),
\label{UD}
\end{equation}
where $E_u^\leftarrow\left(\rho_{AB}\right)$ is the UE of $\rho_{AB}$ in Eq.~(\ref{UE}).
\section{Strong Polygamy of Quantum Correlations}
\label{Sec: Strong Polygamy}
\subsection{Strong Polygamy of Quantum Entanglement}
\label{sub: spoly entanglement}
In multi-party quantum systems, the distribution of bipartite entanglement quantified by EoA has
a polygamous relation as follows; for an $n+1$-party quantum state
$\rho_{AB_1\cdots B_n}$ with reduced density matrices
$\rho_{BA_i}$ on bipartite subsystems $AB_i$ for $i=1,\cdots,n$,
\begin{align}
E_a\left(\rho_{A(B_1\cdots B_n)}\right)
\leq& E_a\left(\rho_{AB_1}\right)+\cdots +E_a\left(\rho_{AB_n}\right)\nonumber\\
=&\sum_{i=1}^{n}E_a\left(\rho_{AB_i}\right),
\label{npolymixed}
\end{align}
where $E_a\left(\rho_{A(B_1\cdots B_n)}\right)$ is EoA of $\rho_{AB_1\cdots B_n}$
with respect to the bipartition between
$A$ and the rest, and $E_a\left(\rho_{BA_i}\right)$ is EoA of $\rho_{BA_i}$ for $i=1,\cdots,n$~\cite{gpoly}.
Let us denote $\mbox{$\mathbb B$} = \{B_1,\cdots,B_n \}$, that is, the set of
subsystems $B_i$'s, and consider a nonempty proper subset
$\mbox{$\mathbb X$}=\{B_{i_1},\cdots,B_{i_k}\}$ of $\mbox{$\mathbb B$}$ for $1 \leq k \leq n-1$.
Together with the complement $\mbox{$\mathbb X$}^c=\mbox{$\mathbb B$}-\mbox{$\mathbb X$}$ of $\mbox{$\mathbb X$}$ in $\mbox{$\mathbb B$}$,
$\rho_{AB_1\cdots B_n}$ can also be considered as a three-party
quantum state $\rho_{A\mbox{$\mathbb X$}\mbox{$\mathbb X$}^c}$. Furthermore, the polygamy inequality in~(\ref{npolymixed}) implies
\begin{align}
E_a\left(\rho_{A\mbox{$\mathbb B$}}\right)=&E_a\left(\rho_{A(\mbox{$\mathbb X$}\mbox{$\mathbb X$}^c)}\right)\nonumber\\
\leq& E_a\left(\rho_{A\mbox{$\mathbb X$}}\right)+E_a\left(\rho_{A\mbox{$\mathbb X$}^c}\right),
\label{polyAXXC}
\end{align}
where $E_a\left(\rho_{A\mbox{$\mathbb X$}}\right)$ and $E_a\left(\rho_{A\mbox{$\mathbb X$}^c}\right)$ are EoA of reduced density matrices
$\rho_{A\mbox{$\mathbb X$}}$ and $\rho_{A\mbox{$\mathbb X$}^c}$, respectively.
Because Inequality~(\ref{polyAXXC}) holds for any proper subset $\mbox{$\mathbb X$}$ of $\mbox{$\mathbb B$}$, we consider all possible nonempty proper subsets $\mbox{$\mathbb X$}$ of $\mbox{$\mathbb B$}$,
which lead us to the following inequality,
\begin{align}
E_a\left(\rho_{A\mbox{$\mathbb B$}}\right)&\leq \frac{1}{2^n-2}\sum_{\mbox{$\mathbb X$}}\left(E_a\left(\rho_{A\mbox{$\mathbb X$}}\right)+E_a\left(\rho_{A\mbox{$\mathbb X$}^c}\right)\right),
\label{3polyall}
\end{align}
where the summation is over all possible nonempty proper subsets $\mbox{$\mathbb X$}$'s.
Here we note that the set of all nonempty proper subsets of $\mbox{$\mathbb B$}$ is the same with the set of their complements;
\begin{align}
\{\mbox{$\mathbb X$} | \mbox{$\mathbb X$} \subset \mbox{$\mathbb B$} \}=\{\mbox{$\mathbb X$}^c| \mbox{$\mathbb X$} \subset \mbox{$\mathbb B$} \},
\label{sets}
\end{align}
thus we have
\begin{align}
\sum_{\mbox{$\mathbb X$}}E_a\left(\rho_{A\mbox{$\mathbb X$}^c}\right)=\sum_{\mbox{$\mathbb X$}}E_a\left(\rho_{A\mbox{$\mathbb X$}}\right),
\label{setsumequal}
\end{align}
and Eq.~(\ref{3polyall}) becomes
\begin{align}
E_a\left(\rho_{A\mbox{$\mathbb B$}}\right)\leq\frac{1}{2^{n-1}-1}\sum_{\mbox{$\mathbb X$}}E_a\left(\rho_{A\mbox{$\mathbb X$}}\right).
\label{spolyE}
\end{align}
For a nonempty proper subset $\mbox{$\mathbb X$}=\{B_{i_1},\cdots,B_{i_k}\}$ of $\mbox{$\mathbb B$}$ and its complement $\mbox{$\mathbb X$}^c=\{B_{i_{k+1}},\cdots,B_{i_n}\}$,
Inequality~(\ref{npolymixed}) also implies
\begin{align}
E_a\left(\rho_{A\mbox{$\mathbb X$}}\right)+&E_a\left(\rho_{A\mbox{$\mathbb X$}^c}\right)\nonumber\\
&\leq\sum_{j=1}^{k}E_a\left(\rho_{AB_{i_j}}\right)+\sum_{j=k+1}^{n}E_a\left(\rho_{AB_{i_j}}\right)\nonumber\\
&=\sum_{i=1}^{n}E_a\left(\rho_{AB_i}\right).
\label{upper}
\end{align}
By considering all possible nonempty proper subsets $\mbox{$\mathbb X$}$ of $\mbox{$\mathbb B$}$ and using Eqs.~(\ref{sets}) and (\ref{setsumequal}), we have
\begin{align}
\frac{1}{2^{n-1}-1}\sum_{\mbox{$\mathbb X$}}E_a\left(\rho_{A\mbox{$\mathbb X$}}\right)\leq \sum_{i=1}^{n}E_a\left(\rho_{AB_i}\right).
\label{upper2}
\end{align}
From inequalities~(\ref{spolyE}) and (\ref{upper2}), we have the following {\em strong polygamy inequalities}
of distributed entanglement in multi-party quantum systems;
for any multi-party state
$\rho_{AB_1\cdots B_n}$,
(pure or mixed)
\begin{align}
E_a\left(\rho_{A\mbox{$\mathbb B$}}\right)\leq&\frac{1}{2^{n-1}-1}\sum_{\mbox{$\mathbb X$}}E_a\left(\rho_{A\mbox{$\mathbb X$}}\right)\nonumber\\
\leq& \sum_{i=1}^{n}E_a\left(\rho_{AB_i}\right), \label{spolyE2}
\end{align}
where the first summation is over all nonempty proper subsets $\mbox{$\mathbb X$}$ of $\mbox{$\mathbb B$} = \{B_1,\cdots,B_n \}$.
Here, the term {\em strong} is twofold. First, Inequality~(\ref{spolyE2}) is in fact tighter than the usual polygamy inequality
in (\ref{npolymixed}). Moreover, we have considered the entanglement distribution (EoA) between the single party $A$ and all possible subsets $\mbox{$\mathbb X$}$'s
of $\mbox{$\mathbb B$}$ to obtain a tighter polygamy inequality whereas the usual polygamy inequality only considers EoA between $A$ and each single party ($B_i$'s)
in $\mbox{$\mathbb B$}$.
\subsection{Strong Polygamy of Quantum Discord}
\label{sub: spoly discord}
Let us now consider strong polygamy inequality of
quantum discord in multi-party quantum systems in terms of UD. We first note that the
definition of UD in Eq.~(\ref{UD}) and the relation between EU and
EoA in Eq.~(\ref{eq: 3UEEA}) lead us to the following relation
between ED and EoA; for a three-party pure state $\ket{\psi}_{ABC}$
with its reduced density matrices $\rho_{AB}$ and $\rho_{AC}$,
\begin{align}
E_a\left(\rho_{AB}\right)={\delta}_u^{\leftarrow}\left(\rho_{AC}\right)+S\left(\rho_{A|C}\right),
\label{EDEoA}
\end{align}
where $S\left(\rho_{A|C}\right)=S\left(\rho_{AC}\right)-S\left(\rho_{C}\right)$ is the conditional entropy of $\rho_{AC}$.
For a multi-party pure state
$\ket{\psi}_{A\mbox{$\mathbb B$}}=\ket{\psi}_{AB_1\cdots B_n}$ and a nonempty proper
subset $\mbox{$\mathbb X$}$ of $\mbox{$\mathbb B$}$, Eq.~(\ref{EDEoA}) implies
\begin{align}
E_a\left(\rho_{A\mbox{$\mathbb X$}}\right)={\delta}_u^{\leftarrow}\left(\rho_{A\mbox{$\mathbb X$}^c}\right)+S\left(\rho_{A|\mbox{$\mathbb X$}^c}\right),
\label{EDEoA2}
\end{align}
where $\rho_{A\mbox{$\mathbb X$}}$ and $\rho_{A\mbox{$\mathbb X$}^c}$ are the reduced density matrices of $\ket{\psi}_{A\mbox{$\mathbb B$}}$ on to subsystems $A\mbox{$\mathbb X$}$ and $A\mbox{$\mathbb X$}^c$, respectively.
Now we consider above equality for all possible nonempty proper subsets $\mbox{$\mathbb X$}$ of $\mbox{$\mathbb B$} = \{B_1,\cdots,B_n \}$ to obtain
\begin{align}
\sum_{\mbox{$\mathbb X$}}E_a\left(\rho_{A\mbox{$\mathbb X$}}\right)=&\sum_{\mbox{$\mathbb X$}}\left({\delta}_u^{\leftarrow}\left(\rho_{A\mbox{$\mathbb X$}^c}\right)+S\left(\rho_{A|\mbox{$\mathbb X$}^c}\right)\right)\nonumber\\
=&\sum_{\mbox{$\mathbb X$}}{\delta}_u^{\leftarrow}\left(\rho_{A\mbox{$\mathbb X$}^c}\right)+\sum_{\mbox{$\mathbb X$}}S\left(\rho_{A|\mbox{$\mathbb X$}^c}\right)\nonumber\\
=&\sum_{\mbox{$\mathbb X$}}{\delta}_u^{\leftarrow}\left(\rho_{A\mbox{$\mathbb X$}}\right)+\sum_{\mbox{$\mathbb X$}}S\left(\rho_{A|\mbox{$\mathbb X$}}\right),
\label{EDEoAsum}
\end{align}
where the last equality is due to Eq.~(\ref{sets}).
Furthermore, due to the complementary property of conditional entropy, we have
\begin{align}
S\left(\rho_{A|\mbox{$\mathbb X$}}\right)+S\left(\rho_{A|\mbox{$\mathbb X$}^c}\right)=0
\label{compcondent}
\end{align}
for any three-party pure state $\ket{\psi}_{A\mbox{$\mathbb X$}\mbox{$\mathbb X$}^c}$, and this implies
\begin{align}
\sum_{\mbox{$\mathbb X$}}S\left(\rho_{A|\mbox{$\mathbb X$}}\right)=0,
\label{compcondent2}
\end{align}
where the summation is over all nonempty proper subsets of $\mbox{$\mathbb B$}$.
From Eqs.~(\ref{EDEoAsum}) and (\ref{compcondent2}), we have
\begin{align}
\sum_{\mbox{$\mathbb X$}}E_a\left(\rho_{A\mbox{$\mathbb X$}}\right)=\sum_{\mbox{$\mathbb X$}}{\delta}_u^{\leftarrow}\left(\rho_{A\mbox{$\mathbb X$}}\right),
\label{sumEDEU}
\end{align}
for any multi-party pure state $\ket{\psi}_{A\mbox{$\mathbb B$}}$ and its reduced density matrix $\rho_{A\mbox{$\mathbb X$}}$.
Let us now consider UD of a bipartite pure state $\ket{\psi}_{A\mbox{$\mathbb B$}}$; the definition of UD in Eq.~(\ref{UD}) leads us to
\begin{align}
{\delta}_u^{\leftarrow}\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)={\mathcal I}\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)-E_u^\leftarrow\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right).
\label{UDpure}
\end{align}
For a bipartite pure state $\ket{\psi}_{A\mbox{$\mathbb B$}}$, we have
\begin{align}
{\mathcal I}\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)=&S\left(\rho_A\right)+S\left(\rho_{\mbox{$\mathbb B$}}\right)-S\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)\nonumber\\
=&2S\left(\rho_A\right),
\label{mutualpure}
\end{align}
thus Eq.~(\ref{UDpure}) becomes
\begin{align}
{\delta}_u^{\leftarrow}\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)=2S\left(\rho_A\right)-E_u^\leftarrow\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right).
\label{UDpure1}
\end{align}
We note that any purification of $\ket{\psi}_{A\mbox{$\mathbb B$}}$ in
three-party quantum systems $A\mbox{$\mathbb B$} C$ is trivially a product state
$\ket{\psi}_{A\mbox{$\mathbb B$}}\otimes\ket{\phi}_C$ for some pure state
$\ket{\phi}_C$. From the definition of EU in Eq.~(\ref{UE}), we have
\begin{align}
E_u^\leftarrow\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)=S\left(\rho_{A}\right)-E_a\left(\rho_{AC}\right),
\label{UE2}
\end{align}
where $\rho_{AC}$ is the reduced density matrix of
$\ket{\psi}_{A\mbox{$\mathbb B$}}\otimes\ket{\phi}_C$ on subsystems $AC$, which is
\begin{align}
\rho_A\otimes\ket{\phi}_C\bra{\phi}.
\label{redAC}
\end{align}
Because $E_a\left(\rho_{AC}\right)=0$ for the product state
$\rho_{AC}$, we have
\begin{align}
E_u^\leftarrow\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)=S\left(\rho_{A}\right),
\label{UE3}
\end{align}
for the bipartite pure state $\ket{\psi}_{A\mbox{$\mathbb B$}}$, therefore
Eqs.~(\ref{UDpure1}) and Eq.~(\ref{UE3}) lead us
to
\begin{align}
{\delta}_u^{\leftarrow}\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)=S\left(\rho_{A}\right).
\label{UDpure2}
\end{align}
We also note that EoA of $\ket{\psi}_{A\mbox{$\mathbb B$}}$ is just the entropy of
subsystems, thus
\begin{align}
{\delta}_u^{\leftarrow}\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)=E_a\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right).
\label{UDEoA}
\end{align}
Now, from Eqs.~(\ref{sumEDEU}) and (\ref{UDEoA}) together with
Inequality~(\ref{spolyE}), we have
\begin{align}
{\delta}_u^{\leftarrow}\left(\ket{\psi}_{A\mbox{$\mathbb B$}}\right)\leq\frac{1}{2^{n-1}-1}\sum_{\mbox{$\mathbb X$}}{\delta}_u^{\leftarrow}\left(\rho_{A\mbox{$\mathbb X$}}\right),
\label{spolyD}
\end{align}
where the summation is over all non-empty proper subsets of $\mbox{$\mathbb B$}$.
In other words, strong polygamy inequality of entanglement in
(\ref{spolyE}) also implies the strong polygamy of quantum discord
for the case of multi-party pure states $\ket{\psi}_{A\mbox{$\mathbb B$}}$, that is,
closed quantum systems.
\section{Summary}
\label{Sec:Con}
We have proposed a strong polygamy inequality for multi-party quantum
entanglement; by considering the possible amount of entanglement distribution in terms of EoA
between a fixed party and any subset of the rest parties in a multi-party quantum system,
we have provided an upper bound of the distributed entanglement between a party and the rest. We have also shown that
this upper bound plays as a lower bound of the usual polygamy inequality.
We have further shown that the strong polygamy of entanglement implies that
of quantum discord for the case of multi-party pure states.
Our results strengthen the characterization of the polygamous nature of
entanglement in multi-party quantum systems. Moreover, our results shows a closed relation
between PoE and quantum discord, which provides a strong clue for possible relations
between PoE and other quantum correlation measures.
Noting the importance of the study on multipartite quantum correlations, our results can provide a rich reference for future
work on the study of quantum correlations in multi-party quantum systems.
\section*{Acknowledgments}
This research was supported by Basic Science Research Program through the National Research Foundation of Korea(NRF)
funded by the Ministry of Education, Science and Technology(2012R1A1A1012246).
\end{document}
|
\begin{document}
\title{Coherence of resonant light-matter interaction in the strong-coupling limit}
\author{Th. K. Mavrogordatos}
\email[Email address(es): ]{[email protected]; [email protected]}
\affiliation{Department of Physics, Stockholm University, SE-106 91, Stockholm, Sweden}
\date{\today}
\begin{abstract}
We explore the role of quantum fluctuations in the strong-coupling limit of the dissipative Jaynes-Cummings oscillator driven on resonance. For weak excitation, we derive analytical expressions for the spectrum and the intensity correlation function for the photons scattered by the two-state atom coupled to the coherently driven cavity mode. We do so by writing down a birth-death process adding the higher orders in the excitation strength needed to go beyond the pure-state factorization, following the method introduced in [H. J. Carmichael, {\it Statistical Methods in Quantum Optics 2}, Springer, 2008, Sec. 16.3.4]. Our results for the first and second-order correlation functions are complemented by the numerical investigation of the waiting-time distribution for the photon emissions directed sideways, and the comparison with ordinary resonance fluorescence. To close out our discussion, we increase the driving field amplitude and approach the critical point organizing a second-order dissipative quantum phase transition by depicting the excitation pathways in the intracavity field distribution for a finite system size.
\end{abstract}
\pacs{32.50.+d, 42.50.Lc, 42.50.Ar, 03.65.Yz, 42.50.-p}
\keywords{strong-coupling limit, cavity QED, open driven Jaynes-Cummings model, squeezing-induced linewidth narrowing, spontaneous dressed-state polarization}
\maketitle
\section{Introduction}
Single-atom quantum electrodynamics (QED) in its strong-coupling limit occupies a central position in the study of multi-photon quantum-nonlinear optics, providing the ground where the invalidation of the small-noise assumption, upon which the linear theory of fluctuations rests, is played out. Absorptive optical bistability for a single atom inside a resonant cavity is an elementary and illustrative example of a quantum dissipative system operating at the interface between the quantum and classical limit \cite{Savage1988}. The exquisite control acquired over cavity and circuit QED architectures has prompted an extensive investigation of the intrinsically quantum nonlinearity of coherent radiation-matter interaction most commonly encapsulated in the Jaynes-Cummings (JC) model \cite{JCpaper1963} and its extended versions (see e.g., \cite{Bishop2009}). In particular, the strong-coupling conditions attained in the circuit QED experiment of \cite{Fink2008} allowed the observation of vacuum Rabi splitting for up to two photons, demonstrating that `` the system is quantum mechanical in nature'' by going one step beyond the mode spitting that is in principle explainable by the treatment of two linear coupled oscillators. More recently, photon correlation functions of second and third order were employed in pursuit of an experimental signature of two-photon blockade, via the demonstration of three-photon antibunching with simultaneous two-photon bunching \cite{Hamsen2017}.
Our interest here is with the resonant excitation of the JC oscillator in its strong-coupling limit. On resonance, photon blockade breaks down by means of a second-order dissipative quantum phase transition \textemdash{a} continuous transition organized around a critical point in the space of drive amplitude and detuning \cite{Carmichael2015}. It was as well in the strong-coupling limit that one of the earliest applications of quantum trajectory theory was given [see Sec. 5 of \cite{Alsing1991}], presenting a novel framework to account for the translation of microscopic spontaneous emission events to the accumulated macroscopic cavity-field switching effect as a diffusion process. Central to the demonstration of spontaneous dressed-state polarization is the emergence of new semiclassical states organizing the asymptotic dynamics (also called {\it neoclassical} states) when the length of the Bloch vector is conserved [see Sec. 4 of \cite{Alsing1991} and Sec. 16.3.1 for an extension of the Maxwell-Bloch solutions to $N$ atoms]. The quantum dissipative dynamics responsible for spontaneous symmetry breaking is elucidated by the demonstration of quantum jumps induced by the extraction of homodyne photocurrent records \textemdash{a} partial yet continuous observation \textemdash{in} the experiment of \cite{Armen2009} following the theoretical grounding provided by \cite{Mabuchi1998}.
Instead of unravelling the density operator in quantum trajectories, the work reported in this brief Communication deals with ensemble-averaged quantities obtained from the master equation and the quantum regression formula. Initially, we carry on with the formalism developed in Sec. 16.3.4. of \cite{QO2} to account for the squeezing-induced linewidth narrowing in the fluorescence spectrum, as has been done for the transmitted light. After constructing an effective model within the secular approximation in the basis of dressed JC eigenstates for weak excitation in Sec. \ref{sec:MEsec}, we derive analytical results for the first and second-order correlation functions of atomic emission in Sec. \ref{sec:cohwel}. Finally, to reveal the role of quantum fluctuations close to the critical point, in Sec. \ref{sec:phasebmd} we consider a driving-field amplitude of the same order of magnitude as the light-matter coupling strength. We focus on the development of phase bimodality when the JC oscillator is driven such as to maintain a constant empty-cavity excitation. For this drive, the Maxwell-Bloch equations place us on the upper branch of the bistability curve, predicting that empty-cavity amplitude as a steady-state output.
\section{Master equation, dressed states and reduced model}
\label{sec:MEsec}
Our starting point is the familiar Lindblad master equation (ME) for single-atom cavity QED with coherent driving of the cavity mode on resonance:
\begin{equation}\label{eq:ME1}
\begin{aligned}
\frac{d\rho}{dt}&\equiv\mathcal{L}\rho=-i[\omega_0(\sigma_{+}\sigma_{-} + a^{\dagger}a)+ig(a^{\dagger}\sigma_{-}-a\sigma_{+}),\rho]\\
&-i[\overline{\mathcal{E}}_0 e^{-i\omega_0 t}a^{\dagger} + \overline{\mathcal{E}}_0^{*} e^{i\omega_0 t}a,\rho]\\
&+\kappa (2 a \rho a^{\dagger} -a^{\dagger}a \rho - \rho a^{\dagger}a)\\
&+\frac{\gamma}{2}(2\sigma_{-}\rho \sigma_{+} - \sigma_{+}\sigma_{-}\rho - \rho \sigma_{+}\sigma_{-}),
\end{aligned}
\end{equation}
where $\rho$ is the system density operator, $a$ ($a^{\dagger}$) are the annihilation (creation) operators for the cavity photons, and $\sigma_{+}$ ($\sigma_{-}$) are the raising (lowering) operators for the two-state atom dipole-coupled to the cavity mode with strength $g$. In what follows, this coupling constant is appreciably larger than the dissipation rates, $2\kappa$, which is the photon loss rate from the cavity, and $\gamma$, the rate at which the atom is damped to modes other than the privileged cavity mode, resonantly driven with amplitude $\overline{\mathcal{E}}_0$. The condition $g \gg \kappa, \gamma/2$ defines the strong-coupling limit, while the inequality $2(|\overline{\mathcal{E}}_0|/g) \ll 1$ defines the weak-driving regime which will concern us for the largest part of this Communication. We write Eq. \eqref{eq:ME1} in matrix form using the truncated basis $\{\ket{2}_A\ket{n}_a, \ket{1}_A\ket{n}_a; n=0,1,\ldots,N\}$ (the two states of the atom are denoted by the subscript $A$ and the Fock-states for the cavity field with the subscript $a$) and then solve the resulting set of linear ordinary differential equations using an explicit Runge-Kutta method of eighth order, checking the invariance of obtained results with respect to $N$. The steady states obtained have also been checked against the unique eigenstate corresponding to the zero eigenvalue of the Liouvillian super-operator. We have used the computational toolbox for quantum optics detailed in \cite{Tan1999} alongside codes developed ad hoc in the programming languages {\it Matlab} and {\it Python}.
The stationary states of the resonantly driven JC model in the interaction picture \cite{DynamicStarkEffect, QO2} are the ``ground'' state
\begin{equation}\label{eq:groundstate}
\ket{\tilde{\psi}_G}=S(\eta) \ket{O_{12}(r)}_A \ket{0}_a,
\end{equation}
with {\it quasi}energy
\begin{equation}\label{eq:quasienergiesJC1}
E_G=0,
\end{equation}
and the ``excited'' state doublets ($n=1,2, \ldots$)
\begin{equation}
\ket{\tilde{\psi}_{n,U (L)}}=D[\alpha(E_{n,U(L)})]S(\eta)\frac{1}{\sqrt{2}}\ket{\tilde{U}(\tilde{L})}
\end{equation}
with $\ket{\tilde{U}(\tilde{L})}\equiv \ket{O_{21}(r)}_A \ket{n-1}_a \pm i \ket{O_{12}(r)}_A \ket{n}_a$ (we take $U \to +, \, L \to -$) and {\it quasi}energies
\begin{equation}\label{eq:quasienergiesJC2}
E_{n,U(L)}=\pm e^{-3r} \sqrt{n} \hbar g.
\end{equation}
The atomic-state superpositions, with coefficients depending on the degree of squeezing, $r$, have the form
\begin{equation}
\begin{aligned}
\ket{O_{12}(r)}_A \equiv &\frac{1}{\sqrt{2}}\Bigg(\sqrt{1 + e^{-2r}} \ket{1}_A \\
&+ i e^{i{\rm arg}(\overline{\mathcal{E}}_0)} \sqrt{1 - e^{-2r}} \ket{2}_A\Bigg),
\end{aligned}
\end{equation}
\begin{equation}
\begin{aligned}
\ket{O_{21}(r)}_A \equiv &\frac{1}{\sqrt{2}}\Bigg(\sqrt{1 + e^{-2r}} \ket{2}_A\\
&- i e^{-i{\rm arg}(\overline{\mathcal{E}}_0)} \sqrt{1 - e^{-2r}} \ket{1}_A\Bigg),
\end{aligned}
\end{equation}
where $D(\alpha) \equiv \exp(\alpha a^{\dagger}-\alpha^{*}a)$ is the displacement operator with an energy-dependent argument
$$\alpha(E;r)=-e^{i{\rm arg}(\overline{\mathcal{E}}_0)}[E/(\hbar g)] e^{4r}\sqrt{1-e^{-4r}};$$ $r$ is defined by
\begin{equation}
e^{-2r} \equiv \sqrt{1 - (2|\overline{\mathcal{E}}_0|/g)^2}
\end{equation}
and features in the argument of the squeeze operator $S(\eta) \equiv \exp [\frac{1}{2}(\eta^{*}a^2-\eta a^{\dagger 2})]$ as $\eta=-r e^{2i {\rm arg}(\overline{\mathcal{E}}_0)}$. The states placed in quotes are stationary in the interaction and not in the Schr\"{o}dinger picture; they correspond to {\it quasi}energies and not actual energies.
Equipped with the general properties of the dressed JC eigenstates, we will delineate the procedure for obtaining some analytical results in the weak-driving regime. We first make the approximation
\begin{equation}
r=-\frac{1}{2}\ln\left[\sqrt{1-\left(2\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2}\,\right] \simeq \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2,
\end{equation}
which is consistent with $\eta \approx - (\overline{\mathcal{E}}_0/g)^2$ and
\begin{equation}
\begin{aligned}
\alpha[E_{n,U(L)};r]& \approx -{\rm sgn}[E_{n,U(L)}] e^{i{\rm arg}(\overline{\mathcal{E}}_0)} \sqrt{n} (1+r) \sqrt{4r}\\
&\approx -{\rm sgn}[E_{n,U(L)}] \sqrt{n} (2 \overline{\mathcal{E}}_0 /g),
\end{aligned}
\end{equation}
for $2(|\overline{\mathcal{E}}_0|/g) \ll 1$. Further, we resort to the secular approximation in the basis of dressed JC eigenstates, writing the matrix elements of the density operator in the interaction picture as $\overline{\rho}_{E E^{\prime}}$, where $E, E^{\prime}$ are the {\it quasi}energies defined in Eqs. \eqref{eq:quasienergiesJC1}, \eqref{eq:quasienergiesJC2}. We adopt the notation used in Secs. 16.3.3 and 16.3.4 of \cite{QO2}. When transforming the ME \eqref{eq:ME1} in the dressed-state basis [considering only the nontrivial time evolution due to cavity and atomic damping], for the diagonal matrix elements we write down
\begin{equation}
\dot{\overline{\rho}}_{E E}=\sum_{\epsilon} \left(\gamma_{\epsilon, E} \overline{\rho}_{\epsilon \epsilon} - \gamma_{E, \epsilon}\overline{\rho}_{E E}\right),
\end{equation}
with
\begin{equation}\label{eq:gammaratesdef}
\gamma_{\epsilon, E}=\gamma \left|\braket{E|\sigma_{-}|\epsilon}\right|^2 + 2\kappa \left|\braket{E|a|\epsilon}\right|^2,
\end{equation}
while the off-diagonal elements obey the rate equations
\begin{equation}\label{eq:nondiag}
\begin{aligned}
\dot{\overline{\rho}}_{E E^{\prime}}&=-\left(\sum_{\epsilon} \frac{\gamma_{E, \epsilon} + \gamma_{E^{\prime}, \epsilon}}{2} - K^{(1)}_{E,E^{\prime}}\right) \overline{\rho}_{E E^{\prime}}\\
&+ K^{(2)}_{E,E^{\prime}} \overline{\rho}_{-E^{\prime}-E},
\end{aligned}
\end{equation}
with
\begin{subequations}\label{eq:kapparatesdef}
\begin{align}
&K^{(1)}_{E,E^{\prime}} \equiv \gamma \braket{E|\sigma_{-}|E} \braket{E^{\prime}|\sigma_{+}|E^{\prime}} + 2\kappa \braket{E|a|E} \braket{E^{\prime}|a^{\dagger}|E^{\prime}}, \\
&K^{(2)}_{E,E^{\prime}} \equiv \gamma \braket{E|\sigma_{-}|-E^{\prime}} \braket{-E|\sigma_{+}|E^{\prime}} \notag \\
&+ 2\kappa \braket{E|a|-E^{\prime}} \braket{-E|a^{\dagger}|E^{\prime}}.
\end{align}
\end{subequations}
The ``ground'' state of the time-independent JC Hamiltonian [see Eq. \eqref{eq:groundstate} and the approximations made in Eqs. 16.184-16.186 of \cite{QO2}], dressed by the weak drive, is approximated as
\begin{equation}\label{eq:gs}
\ket{\tilde{\psi}_G}=\ket{G} + i\frac{\overline{\mathcal{E}}_0}{g}\ket{2}_A \ket{0}_a + \frac{1}{\sqrt{2}}\left(\frac{\overline{\mathcal{E}}_0}{g}\right)^2\ket{1}_A \ket{2}_a,
\end{equation}
with $\ket{G} \equiv \ket{1}_A \ket{0}_a$, while the first ``excited'' doublet acquires a contribution from the ground state,
\begin{subequations}\label{eq:es}
\begin{align}
&\ket{\tilde{\psi}_{1,U}}=\ket{1,U} + i \frac{1}{\sqrt{2}}\frac{\overline{\mathcal{E}}^{*}_0}{g}\ket{1}_A \ket{0}_a,\\
& \ket{\tilde{\psi}_{1,L}}=\ket{1,L} + i \frac{1}{\sqrt{2}}\frac{\overline{\mathcal{E}}^{*}_0}{g}\ket{1}_A \ket{0}_a,
\end{align}
\end{subequations}
where
\begin{subequations}
\begin{align}
\ket{1,U} \equiv \frac{1}{\sqrt{2}}(\ket{2}_A \ket{0}_a +i \ket{1}_A\ket{1}_a), \\
\ket{1,L} \equiv \frac{1}{\sqrt{2}}(\ket{2}_A \ket{0}_a -i \ket{1}_A\ket{1}_a),
\end{align}
\end{subequations}
is the first excited doublet of the JC ladder. Eqs. \eqref{eq:gs} and \eqref{eq:es} follow from a perturbative expansion of the dressed JC eigenstates \textemdash{linear} combinations of displaced and squeezed Fock states \cite{DynamicStarkEffect} \textemdash{in} powers of the ratio between the external driving-field amplitude and the light-matter coupling constant, since all operations upon which the treatment is based rely on the degree of squeezing. This treatment is followed in Sec. 16.3.4. of \cite{QO2} for the determination of the optical spectrum in the strong-coupling limit of cavity QED building upon the initial sketch of \cite{cavityQEDBerman}. As we will find out, all relevant observables and correlation functions we will be dealing with and this method can account for, are expressed in powers of $r \simeq (|\overline{\mathcal{E}}_0|/g)^2$.
\begin{figure}
\caption{{\it Schematic transition diagram in the effective three-level model.}
\label{fig:levels}
\end{figure}
Within the lowest-order approximation at weak excitation, we limit our attention to the transitions occurring between the ``ground'' state and the first ``excited'' state doublet, as depicted in Fig. \ref{fig:levels}. To specify the equations of motion for the matrix elements of the density matrix on the basis of these three dressed states, we use Eqs. \eqref{eq:gammaratesdef} and \eqref{eq:kapparatesdef} to determine the rates
\begin{subequations}\label{eq:gammarates}
\begin{align}
&\gamma_{G;G}=\gamma|\braket{\tilde{\psi}_G|\sigma_{-}|\tilde{\psi}_G}|^2 + 2\kappa|\braket{\tilde{\psi}_G|a|\tilde{\psi}_G}|^2= \gamma \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2, \\
&\gamma_{G;1,U}=\gamma_{G;1,L}=(\kappa+\gamma/2)\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4, \\
&\gamma_{1,U;G}=\gamma_{1,L;G}=(\kappa + \gamma/2),
\end{align}
\end{subequations}
together with
\begin{subequations}\label{eq:kapparates}
\begin{align}
&K^{(1)}_{G;1,U}=K^{(1)}_{G;1,L}=K^{(1)}_{1,U;G}=K^{(1)}_{1,L;G}=\gamma\left(i\frac{\overline{\mathcal{E}}_0}{g}\right) \left(i\frac{1}{2}\frac{\overline{\mathcal{E}}^{*}_0}{g}\right) \notag \\
&=-\frac{\gamma}{2}\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2,\\
&K^{(2)}_{G;1,U}=K^{(2)}_{G;1,L}=\gamma\braket{\tilde{\psi}_G|\sigma_{-}|\tilde{\psi}_{1,L}} \braket{\tilde{\psi}_G|\sigma_{+}|\tilde{\psi}_{1,U}}\notag \\
&+2\kappa\braket{\tilde{\psi}_G|a|\tilde{\psi}_{1,L}}\braket{\tilde{\psi}_G|a^{\dagger}|\tilde{\psi}_{1,U}}=(\kappa + \gamma/2)\left(\frac{\overline{\mathcal{E}}^{*}_0}{g}\right)^2,\\
&K^{(2)}_{1,U;G}=K^{(2)}_{1,L;G}=(\kappa + \gamma/2)\left(\frac{\overline{\mathcal{E}}_0}{g}\right)^2.
\end{align}
\end{subequations}
Hence, within the secular approximation, the rate equations for the diagonal elements are
\begin{subequations}
\begin{align}
&\dot{\overline{\rho}}_{1,U;1,U}=-(\kappa + \gamma/2)\overline{\rho}_{1,U;1,U} + (\kappa + \gamma/2)\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4 \overline{\rho}_{GG},\\
&\dot{\overline{\rho}}_{1,L;1,L}=-(\kappa + \gamma/2)\overline{\rho}_{1,L;1,L} + (\kappa + \gamma/2)\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4 \overline{\rho}_{GG},
\end{align}
\end{subequations}
with the stationary solution $\overline{\rho}_{1,U;1,U}=\overline{\rho}_{1,L;1,L}=(|\overline{\mathcal{E}}_0|/g)^4 \overline{\rho}_{GG}$. These matrix elements determine the steady-state density matrix (assuming $\overline{\rho}_{GG}=1$)
\begin{equation}\label{eq:rhoss}
\overline{\rho}_{\rm ss}= |\tilde{\psi}_{G}\rangle \langle \tilde{\psi}_{G}| + \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4\left(|\tilde{\psi}_{1,U}\rangle \langle \tilde{\psi}_{1,U}| + |\tilde{\psi}_{1,L}\rangle \langle \tilde{\psi}_{1,L}| \right).
\end{equation}
going beyond the pure-state factorization ($\overline{\rho}_{\rm ss}= |\tilde{\psi}_{G}\rangle \langle \tilde{\psi}_{G}|$) by including a birth-death process contributing by higher-order terms to a mixed state. We also note that the three dressed states under consideration are normalized to dominant order, with $\braket{\tilde{\psi}_{G}|\tilde{\psi}_{1,(U,L)}}=0$ while $\braket{\tilde{\psi}_{1,(U,L)}|\tilde{\psi}_{1,(U,L)}}=(1/2)(|\overline{\mathcal{E}}_0|/g)^2$; the latter, however, combined with the ``excited'' state occupation probability $p_{1,\, {\rm ss}}=(|\overline{\mathcal{E}}_0|/g)^4$, leads to negligible contributions to the dominant terms in the steady-state observables extracted from the three-level model depicted in Fig. \ref{fig:levels}. Likewise, terms correcting the {\it ansatz} $\overline{\rho}_{GG}=1$, required by the normalization of $\overline{\rho}_{\rm ss}$, have a negligible contribution.
As a first example based on the form of Eq. \eqref{eq:rhoss}, we note that the steady-state photon number $\braket{a^{\dagger}a}_{\rm ss}$ comprises two equal parts: one originates from the ``ground'' state (the last term on the right-hand side of Eq. \eqref{eq:gs}) and the other from the ``excited'' doublet, which is equal to $p_{1,\, {\rm ss}}$. Next, we briefly point to a characteristic property of atomic emission deriving from the form of \eqref{eq:rhoss}. We define $\braket{\tilde{\sigma}_{\pm}} \equiv {\rm tr}(\tilde{\rho}_{\rm ss} \sigma_{\pm})$, and compute the steady-state normal-ordered variance of the fluctuation $\Delta \tilde{\sigma}_{\theta} \equiv (1/2)(e^{-i\theta}\Delta\tilde{\sigma}_{-} + e^{i\theta}\Delta\tilde{\sigma}_{+})$ for an adjustable phase $\theta$ of the local oscillator employed in a common scheme for detecting squeezing \cite{Mandel1982}. We then find
\begin{equation}
\begin{aligned}
\braket{:\left(\Delta \tilde{\sigma}_{\theta} \right)^2:}&=\frac{1}{2} \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2 \cos[2(\theta - {\rm arg}(\overline{\mathcal{E}}_0))] \\
&+ \mathcal{O}[(|\overline{\mathcal{E}}_0|/g)^4].
\end{aligned}
\end{equation}
Hence, squeezing of steady-state fluctuations attains its most negative value, $\braket{:(\Delta \tilde{\sigma}_{\theta=\pi/2 + {\rm arg}(\overline{\mathcal{E}}_0)})^2:} \simeq -r/2$, when the local oscillator is in phase with the mean polarization amplitude $\braket{\tilde{\sigma}_{-}}_{\rm ss} \approx i \overline{\mathcal{E}}_0/g$. The dependence of squeezing on the square of the driving field amplitude is also encountered in ordinary resonance fluorescence for weak excitation [where $|\overline{\mathcal{E}}_0|$ is instead scaled by $\gamma$, see Sec. 2.3.6 of \cite{QO1}].
\section{Coherence at weak excitation}
\label{sec:cohwel}
Having now extracted some indicative steady-state results in the weak excitation regime, let us see what can be learned about the coherence of the atomic emission from the effective three-level transition model, depicted in Fig. \ref{fig:levels}, originating from the master equation in the secular approximation.
\begin{figure*}
\caption{{\it First and second-order coherence of fluorescence in the weak-excitation limit.}
\label{fig:g1g2plots}
\end{figure*}
\subsection{The fluorescence spectrum}
For this purpose, we turn to the off-diagonal matrix elements leading to dominant-order terms in $|\overline{\mathcal{E}}_0|/g$ (for instance, we do not take into account transitions within the ``excited'' state doublet). Invoking the quantum regression formula, the first-order correlation function required for the determination of the fluorescence spectrum is ($\tau \geq 0$)
\begin{equation}\label{eq:g1general}
\begin{aligned}
\braket{\sigma_{+}(0)\sigma_{-}(\tau)}_{\rm ss}&=e^{-i\omega_0 \tau} {\rm tr}[\sigma_{-} e^{\mathcal{\tilde{L}}\tau}(\tilde{\rho}_{\rm ss}\sigma_{+})]\\
&=e^{-i\omega_0 \tau} {\rm tr}[\sigma_{-} \tilde{R}(\tau)],
\end{aligned}
\end{equation}
with
\begin{equation}
\tilde{R}(\tau) \equiv e^{\mathcal{\tilde{L}}\tau}(\tilde{\rho}_{\rm ss}\sigma_{+}),
\end{equation}
where $\mathcal{\tilde{L}}$ is the Liouvillian super-operator in the interaction picture. We therefore need to evaluate the matrix elements of $\tilde{R}(\tau)$ for all relevant $E, E^{\prime}$. To eliminate the trivial time dependence, as we did before, we transform
\begin{equation}
\overline{R}_{EE^{\prime}} \equiv e^{(i/\hbar)(E-E^{\prime})t} \tilde{R}_{E E^{\prime}}.
\end{equation}
The initial values of the relevant matrix elements, which are to determine the dominant order in the perturbative expansion in powers of $(|\overline{\mathcal{E}}_0|/g)$, are
\begin{equation}
\begin{cases}
&(\tilde{\rho}_{\rm ss}\sigma_{+})_{G;1,U}=-\frac{1}{\sqrt{2}} \left(\displaystyle \frac{|\overline{\mathcal{E}}_0|}{g} \right)^2, \\
&(\tilde{\rho}_{\rm ss}\sigma_{+})_{1,U;G}=\frac{1}{\sqrt{2}} \left(\displaystyle \frac{|\overline{\mathcal{E}}_0|}{g}\right)^4, \\
&(\tilde{\rho}_{\rm ss}\sigma_{+})_{G;1,L}=-\frac{1}{\sqrt{2}} \left( \displaystyle \frac{|\overline{\mathcal{E}}_0|}{g} \right)^2,\\
&(\tilde{\rho}_{\rm ss}\sigma_{+})_{1,L;G}= \frac{1}{\sqrt{2}} \left(\displaystyle \frac{|\overline{\mathcal{E}}_0|}{g}\right)^4.
\end{cases}
\end{equation}
At later times, $\tau >0$, the matrix elements written above satisfy the equations of motion corresponding to Eqs. \eqref{eq:nondiag} for the density-matrix elements:
\begin{subequations}\label{eq:generalSA}
\begin{align}
&\dot{\overline{R}}_{G;1,U}=-\left[\frac{1}{2}(\kappa + \gamma/2) - K^{(1)}_{G; 1,U}\right] \overline{R}_{G; 1,U} + K^{(2)}_{G;1,U} \overline{R}_{1,L;G}, \label{eq:generalSA1} \\
&\dot{\overline{R}}_{G;1,L}=-\left[\frac{1}{2}(\kappa + \gamma/2) - K^{(1)}_{G; 1,L}\right] \overline{R}_{G; 1,L} + K^{(2)}_{G;1,L} \overline{R}_{1,U;G},\label{eq:generalSA2} \\
&\dot{\overline{R}}_{1,U;G}=-\left[\frac{1}{2}(\kappa + \gamma/2) - K^{(1)}_{1,U;G}\right] \overline{R}_{1,U;G} + K^{(2)}_{1,U;G} \overline{R}_{G;1,L}, \label{eq:generalSA3} \\
&\dot{\overline{R}}_{1,L;G}=-\left[\frac{1}{2}(\kappa + \gamma/2) - K^{(1)}_{1,L;G}\right] \overline{R}_{1,L;G} + K^{(2)}_{1,L;G} \overline{R}_{G;1,U} \label{eq:generalSA4}
\end{align}
\end{subequations}
and
\begin{subequations}
\begin{align}
&\dot{\overline{R}}_{1,U(U);1,U(L)}=-(\kappa + \gamma/2)\overline{R}_{1,U(U);1,U(L)} \notag\\
&+ (\kappa + \gamma/2)\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4 \overline{R}_{G;G}, \\
&\dot{\overline{R}}_{1,L(L);1,L(U)}=-(\kappa + \gamma/2)\overline{R}_{1,L(L);1,L(U)} \notag \\
&+ (\kappa + \gamma/2)\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4 \overline{R}_{G;G},
\end{align}
\end{subequations}
We note that in the set of Eqs. \eqref{eq:generalSA}, all coefficients $K^{(1)}_{E, E^{\prime}}$ are of order $(|\overline{\mathcal{E}}_0|/g)^2$ for all relevant $E, E^{\prime}$ values labeling the states involved in the dynamics within the interaction picture. Hence, we neglect their contribution to the damping rate $(1/2)(\kappa + \gamma/2)$. At the same time, the coefficients $K^{(2)}_{G;1,(U,L)}$ in Eqs. \eqref{eq:generalSA1} and \eqref{eq:generalSA2} multiply terms which are already of order higher than those on the left-hand side. Keeping then terms of the same order on both sides of the equations leads to
\begin{subequations}\label{eq:g1eqscoupled}
\begin{align}
& \dot{\overline{R}}_{G;1,U}=-\frac{1}{2}(\kappa + \gamma/2)\overline{R}_{G;1,U} \\
& \dot{\overline{R}}_{G;1,L}=-\frac{1}{2}(\kappa + \gamma/2)\overline{R}_{G;1,L} \\
&\dot{\overline{R}}_{1,U;G}=-\frac{1}{2}(\kappa + \gamma/2)\overline{R}_{1,U;G} + (\kappa + \gamma/2) \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2 \overline{R}_{G;1,L},\\
&\dot{\overline{R}}_{1,L;G}=-\frac{1}{2}(\kappa + \gamma/2) \overline{R}_{1,L;G} + (\kappa + \gamma/2) \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2 \overline{R}_{G;1,U}.
\end{align}
\end{subequations}
Their solutions read
\begin{subequations}\label{eq:Reqsfinal}
\begin{align}
&\overline{R}_{G;1,U}(\tau)=-\frac{1}{\sqrt{2}} \left(\displaystyle \frac{|\overline{\mathcal{E}}_0|}{g} \right)^2 e^{-\frac{1}{2}(\kappa + \gamma/2)\tau}, \\
&\overline{R}_{G;1,L}(\tau)=-\frac{1}{\sqrt{2}} \left(\displaystyle \frac{|\overline{\mathcal{E}}_0|}{g} \right)^2 e^{-\frac{1}{2}(\kappa + \gamma/2)\tau}, \\
&\overline{R}_{1,U;G}(\tau)=\frac{1}{\sqrt{2}}\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4 \{-1+2 [1 + (1/2)(\kappa + \gamma/2)\tau]\} \notag \\
&\times e^{-\frac{1}{2}(\kappa + \gamma/2)\tau}, \\
&\overline{R}_{1,L;G}(\tau)=\frac{1}{\sqrt{2}}\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4 \{-1+2 [1 + (1/2)(\kappa + \gamma/2)\tau]\} \notag \\
& e^{-\frac{1}{2}(\kappa + \gamma/2)\tau}.
\end{align}
\end{subequations}
It remains to find the matrix elements of $\sigma_{-}$ for the states involved in the transitions specified above:
\begin{equation}\label{eq:sigmame}
\begin{cases}
&\braket{\tilde{\psi}_G|\sigma_{-}|\tilde{\psi}_{1,U}}=1/\sqrt{2},\\
&\braket{\tilde{\psi}_{1,U}|\sigma_{-}|\tilde{\psi}_{G}}=-(1/\sqrt{2})(|\overline{\mathcal{E}}_0|/g)^2, \\
&\braket{\tilde{\psi}_G|\sigma_{-}|\tilde{\psi}_{1,L}}=1/\sqrt{2}, \\
&\braket{\tilde{\psi}_{1,L}|\sigma_{-}|\tilde{\psi}_{G}} =-(1/\sqrt{2})(|\overline{\mathcal{E}}_0|/g)^2.
\end{cases}
\end{equation}
Substituting the expressions of Eqs. \eqref{eq:Reqsfinal} and \eqref{eq:sigmame} into Eq. \eqref{eq:g1general}, we arrive at
\begin{equation}
\begin{aligned}
e^{i\omega_0 \tau} \braket{\sigma_{+}(0)\sigma_{-}(\tau)}_{\rm ss}=&\braket{\tilde{\psi}_G|\sigma_{-}|\tilde{\psi}_{G}} \overline{R}_{G;G} \notag \\
&+\braket{\tilde{\psi}_G|\sigma_{-}|\tilde{\psi}_{1,U}} \overline{R}_{1,U;G}(\tau) e^{-ig\tau} \notag \\
&+\braket{\tilde{\psi}_G|\sigma_{-}|\tilde{\psi}_{1,L}} \overline{R}_{1,U;L}(\tau)e^{ig\tau}\notag \\
&+ \braket{\tilde{\psi}_{1,U}|\sigma_{-}|\tilde{\psi}_{G}} \overline{R}_{G;1,U}(\tau)e^{ig\tau} \notag \\
&+ \braket{\tilde{\psi}_{1,L}|\sigma_{-}|\tilde{\psi}_{G}} \overline{R}_{G;1,L}(\tau)e^{-ig\tau}
\end{aligned}
\end{equation}
and, finally, it follows that ($\tau \geq 0$)
\begin{equation}\label{eq:firsorderCorr}
\begin{aligned}
&\braket{\sigma_{+}(0)\sigma_{-}(\tau)}_{\rm ss}=\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2 e^{-i\omega_0 \tau} + 2 \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4\\
& \times e^{-[\frac{1}{2}(\kappa + \gamma/2)+i\omega_0]\tau} \left[1 + \frac{1}{2}(\kappa + \gamma/2)\tau\right] \cos(g\tau).
\end{aligned}
\end{equation}
The numerically-evaluated first-order correlation function $\braket{\sigma_{+}(0)\sigma_{-}(\tau)}_{\rm ss}$ is depicted in Fig. \ref{fig:g1g2plots}(a) and compared to the analytical expression on Eq. \eqref{eq:firsorderCorr}. The level at $(|\overline{\mathcal{E}}_0|/g)^2$, which is the coherent part of the spectrum, coincides with the prediction of the Maxwell-Bloch equations for the product $\braket{\sigma_{+}}_{\rm ss} \braket{\sigma_{-}}_{\rm ss}$ in the lower branch of the absorptive bistability curve when $\gamma \to 0$, as well as with the corresponding neoclassical expressions \cite{Alsing1991, QO2}. The deviation caused by quantum fluctuations coincides with the first-order correlation function of the intracavity field [Eq. (16.203) of \cite{QO2}]. This can be anticipated from the similar form of the expressions for $a^{\dagger}a=(1/2)(|1,U \rangle \langle 1,U| +|1,L \rangle \langle 1,L| +|1,U \rangle \langle 1,L|+ |1,L \rangle \langle 1,U|)$ and $\sigma_{+}\sigma_{-}=(1/2)(|1,U \rangle \langle 1,U| +|1,L \rangle \langle 1,L| -|1,U \rangle \langle 1,L|- |1,L \rangle \langle 1,U|)$ within the effective three-level model (in its ``bare'' JC-dressed form) we are here considering \cite{Shamailov2010}.
The fluorescence spectrum, then, to dominant order in $|\overline{\mathcal{E}}_0|/g$ and including both coherent and incoherent parts, reads
\begin{equation}
\begin{aligned}
T(\omega)&=\frac{1}{2\pi}\int_{-\infty}^{\infty}d\tau \exp(i\omega\tau)\braket{\sigma_{+}(0)\sigma_{-}(\tau)}_{\rm ss}\\
&=\frac{1}{\pi}{\rm Re}\left[\int_{0}^{\infty}d\tau \exp(i\omega\tau)\braket{\sigma_{+}(0)\sigma_{-}(\tau)}_{\rm ss}\right]\\
&=\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2\delta(\omega-\omega_0) + \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4 \frac{2}{\pi}\\
&\times \Bigg\{\frac{[\frac{1}{2}(\kappa + \gamma/2)]^3}{\{[\frac{1}{2}(\kappa + \gamma/2)]^2 + (\omega-\omega_0 + g)^2\}^2}\\
&+\frac{[\frac{1}{2}(\kappa + \gamma/2)]^3}{\{[\frac{1}{2}(\kappa + \gamma/2)]^2 + (\omega-\omega_0 - g)^2\}^2}\Bigg\}.
\end{aligned}
\end{equation}
As we expect, the incoherent part of the spectrum coincides with the spectrum of the transmitted light, both revealing a vacuum Rabi doublet with squeezing-induced linewidth narrowing. The coefficient of the delta function in the coherent part of the spectrum coincides with the prediction of the Maxwell-Bloch equations for $\gamma \to 0$ and $|\overline{\mathcal{E}}_0|/g \leq 1/(2\sqrt{2})$, namely $\braket{\tilde{\sigma}_{+}}_{\rm ss}\braket{\tilde{\sigma}_{-}}_{\rm ss}=(|\overline{\mathcal{E}}_0|/g)^2$. For the transmitted light, the mean-field prediction in that limit is $\braket{\tilde{a}}_{\rm ss}=\braket{\tilde{a}^{\dagger}}_{\rm ss}=0$, consistent with the absence of a coherent part in the spectrum within our perturbative treatment in powers of $|\overline{\mathcal{E}}_0|/g$.
\subsection{Intensity correlation function and waiting-time distribution}
We will now extend our analysis to the second-order coherence of fluorescence. The intensity correlation function for the photons scattered by the two-level atom is defined as
\begin{equation}\label{eq:g2A}
\begin{aligned}
g^{(2)}_{\rm ss}(\tau)& \equiv \frac{\braket{\sigma_{+}(0)\sigma_{+}(\tau)\sigma_{-}(\tau)\sigma_{-}(0)}_{\rm ss}}{\braket{\sigma_{+}\sigma_{-}}_{\rm ss}^2}\\
&=\frac{{\rm tr} \left\{[e^{\tilde{\mathcal{L}}\tau}\rho_{\rm cond}] \sigma_{+}\sigma_{-}\right\}}{\braket{\sigma_{+}\sigma_{-}}_{\rm ss}}\\
&={\rm tr} [\sigma_{+}\sigma_{-} \tilde{D}(\tau)],
\end{aligned}
\end{equation}
where $\rho_{\rm cond}\equiv (\sigma_{-}\rho_{\rm ss}\sigma_{+})/[{\rm tr}(\sigma_{-}\rho_{\rm ss}\sigma_{+})]$ is the conditional density matrix following the emission of one photon from the atom, evolving in time as $\tilde{D}(\tau) \equiv e^{\tilde{\mathcal{L}}\tau}\rho_{\rm cond}$. Likewise, we first compute the matrix elements of $\rho_{\rm cond}$ in the dressed-state basis. We find,
\begin{equation}
\begin{aligned}
&\overline{D}_{1,(U,L);G}\equiv \braket{\tilde{\psi}_{1,(U,L)}|\sigma_{-}\rho_{\rm ss}\sigma_{+}|\tilde{\psi}_{G}}\\
&=\langle\tilde{\psi}_{1,(U,L)}|\sigma_{-}\rho_{\rm ss}| 2 \rangle_{A} |0 \rangle_{a}\\
&=-i\frac{1}{\sqrt{2}} \frac{\overline{\mathcal{E}}_0}{g} \left[ \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2 + \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4 \right].
\end{aligned}
\end{equation}
Similarly,
\begin{equation}
\begin{aligned}
&\overline{D}_{G; 1,(U,L)}\equiv \braket{\tilde{\psi}_{G}|\sigma_{-}\rho_{\rm ss}\sigma_{+}|\tilde{\psi}_{1,(U,L)}}\\
&=\frac{i}{\sqrt{2}}\frac{\overline{\mathcal{E}}_0^{*}}{g}\langle\tilde{\psi}_{G}|\sigma_{-}\rho_{\rm ss}| 2 \rangle_{A} |0 \rangle_{a}\\
&=\frac{i}{\sqrt{2}}\frac{\overline{\mathcal{E}}_0^{*}}{g}\left[ \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2 + \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4 \right].
\end{aligned}
\end{equation}
As for the matrix elements of the conditional density matrix within the same family of dressed states, we have
\begin{equation}
\overline{D}_{G;G} \equiv \braket{\tilde{\psi}_{G}|\sigma_{-}\rho_{\rm ss}\sigma_{+}|\tilde{\psi}_{G}}=\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^2 + \left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4
\end{equation}
and
\begin{equation}
\begin{aligned}
&\overline{D}_{1,(U,L); 1,(U,L)}\equiv\braket{\tilde{\psi}_{1,(U,L)}|\sigma_{-}\rho_{\rm ss}\sigma_{+}|\tilde{\psi}_{1,(U,L)}}\\
&=\frac{i}{\sqrt{2}}\frac{\overline{\mathcal{E}}_0^{*}}{g}\langle\tilde{\psi}_{1,(U,L)}|\sigma_{-}\rho_{\rm ss}| 2 \rangle_{A} |0 \rangle_{a}\\
&\approx \left(\frac{i}{\sqrt{2}}\frac{\overline{\mathcal{E}}_0^{*}}{g}\right)\langle\tilde{\psi}_{1,(U,L)}|\sigma_{-}|\tilde{\psi}_{G}\rangle \langle \tilde{\psi}_{G}| 2 \rangle_{A} |0 \rangle_{a} =\frac{1}{2}\left(\frac{|\overline{\mathcal{E}}_0|}{g}\right)^4.
\end{aligned}
\end{equation}
The matrix elements for the two-level excitation operator read
\begin{equation}
\begin{cases}
&\braket{\tilde{\psi}_G|\sigma_{+}\sigma_{-}|\tilde{\psi}_{1,U}}=-(i/\sqrt{2})(\overline{\mathcal{E}}_0^{*}/g), \\ &\braket{\tilde{\psi}_{1,U}|\sigma_{+}\sigma_{-}|\tilde{\psi}_{G}}=(i/\sqrt{2})(\overline{\mathcal{E}}_0/g), \\
&\braket{\tilde{\psi}_G|\sigma_{+}\sigma_{-}|\tilde{\psi}_{1,L}}=-(i/\sqrt{2})(\overline{\mathcal{E}}_0^{*}/g), \\ &\braket{\tilde{\psi}_{1,L}|\sigma_{+}\sigma_{-}|\tilde{\psi}_{G}} =(i/\sqrt{2})(\overline{\mathcal{E}}_0/g), \\
&\braket{\tilde{\psi}_G|\sigma_{+}\sigma_{-}|\tilde{\psi}_{G}}=(|\overline{\mathcal{E}}_0|/g)^2, \\ &\braket{\tilde{\psi}_{1,(U,L)}|\sigma_{+}\sigma_{-}|\tilde{\psi}_{1,(U,L)}}=1/2.
\end{cases}
\end{equation}
Now, the matrix elements accounting for the excitation and de-excitation of the first ``excited'' states obey one and the same equation of motion, since the second terms in the sums of the right-hand side for the four equations corresponding to the set \eqref{eq:generalSA} (with $R \to D$) only contribute further higher-order corrections. In particular, the equations in question reduce to
\begin{equation}\label{eq:melbetg2}
\dot{\overline{D}}_{G (1,[U,L]); 1,[U,L] (G)}=-\frac{1}{2}(\kappa + \gamma/2)\overline{D}_{G (1,[U,L]); 1,[U,L] (G)},
\end{equation}
giving rise to a simple exponential decay of correlations. Putting all these pieces together, we obtain
\begin{equation}\label{eq:g2DS}
g^{(2)}_{\rm ss}(\tau)=1+e^{-(\kappa+\gamma/2)\tau}-2e^{-\frac{1}{2}(\kappa+\gamma/2)\tau}\cos(g\tau).
\end{equation}
For a single atom in a cavity, the pure-state factorization within the two-quanta basis for one atom in a cavity yields the following correlation function of the side-scattered light in the weak excitation limit:
\begin{equation}\label{eq:g2PSfact}
\begin{aligned}
& g^{(2)}_{\rm ss}(\tau)=\Bigg\{1-e^{-\frac{1}{2}(\kappa+\gamma/2)\tau} \\
&
\times \left[\cos(g^{\prime}\tau) + \frac{\kappa-\gamma^{\prime}/2}{\kappa+\gamma/2}\frac{\frac{1}{2}(\kappa + \gamma/2)}{g^{\prime}} \sin(g^{\prime}\tau)\right]\Bigg\}^2.
\end{aligned}
\end{equation}
where
$g^{\prime}\equiv \sqrt{g^2-\frac{1}{4}(\kappa-\gamma/2)^2}$ and $\gamma^{\prime}\equiv \gamma (1+2C_1)$ is the cavity-enhanced emission rate featuring the (single-atom) co-operativity parameter $2C_1 \equiv 2g^2/(\kappa \gamma)$. In the strong-coupling limit, $g \gg \kappa, \gamma/2$, the two expressions of Eqs. \eqref{eq:g2DS} and \eqref{eq:g2PSfact} \textemdash{both} independent of the drive strength \textendash{practically} coincide as we can see in frames (b)-(d) of Fig. \ref{fig:g1g2plots}. The former expression is an expansion of the square in the latter when ignoring the second term in the square brackets. The closeness between the two expressions in the strong-coupling limit may be traced to the fact that the ``ground'' state of Eq. \eqref{eq:gs} agrees with the pure state in the two-quanta basis [see Eq. (2) of \cite{Carmichael1991}] and it is the coefficient of $\ket{2}_A \ket{0}_a$, of order $|\overline{\mathcal{E}}_0|/g$ that ultimately determines the intensity correlation function. The coefficients $\tilde{\alpha}(t)$ and $\tilde{\beta}(t)$ featuring in the one-quantum expansion $\ket{\tilde{\psi}(\tau)}=\ket{1}_{A}\ket{0}_a + \tilde{\alpha}(\tau) \ket{1}_{A}\ket{1}_a + \tilde{\beta}(\tau)\ket{2}_{A}\ket{0}_a,$ obey linear coupled oscillator equations solved by the familiar vacuum Rabi oscillations between the one-quantum amplitudes. Since there is a separation of orders in Eqs. \eqref{eq:melbetg2} determining the transitions between the ``ground'' and ``excited'' states, the damped Rabi oscillation is essentially all what remains; in that sense, there is nothing from Eq. \eqref{eq:g2DS} which is inaccessible to the theory of linear coupled oscillators.
\begin{figure}
\caption{{\it Detecting photons scattered from an atom driven by a cavity mode vs. free-space resonance fluorescence.}
\label{fig:wtau}
\end{figure}
\begin{figure*}
\caption{{\it Approaching the critical point of a second-order dissipative quantum phase transition for a finite system size.}
\label{fig:Qfuncs}
\end{figure*}
\begin{figure}
\caption{{\it Transient intracavity field distribution above the critical point.}
\label{fig:Qfuncevol}
\end{figure}
This reduction, however, provides a limited perspective: the correlation function of Eq. \eqref{eq:g2PSfact} extends beyond the strong-coupling limit unlike its counterpart, as long as we remain in the weak-excitation regime. When we operate closer the critical point, there is a obvious deviation between the numerically-evaluated intensity correlation function and the prediction of Eqs. \eqref{eq:g2DS} and \eqref{eq:g2PSfact}, first and foremost in terms of their frequency content; the main spectral peak in the Fourier transform of $g_{\rm ss}^{(2)}(\tau)$ splits on either side of $\omega=g$ [see Fig. \ref{fig:g1g2plots}(d)]. When we increase $|\overline{\mathcal{E}}_0|/g$ further, oscillations at $\omega \sim g$ fade away in favor of a more collective response involving competing states spanning the two distinct branches of the split JC ladder \cite{Chough1996}.
To conclude this section on second-order coherence, we note that the current scheme does not allow for the calculation of the intensity correlation function, since terms of order $(|\overline{\mathcal{E}}_0|/g)^8$ are involved \textemdash{this} observation brings in the inclusion of the second ``excited''-state doublet. Nevertheless, we find that a rough estimate of $g^{(2)}_{{\rm ss}, \rightarrow}(0)$ coming from $\ket{\tilde{\psi}_G}$,
\begin{equation}\label{eq:g2forward}
\frac{\braket{\tilde{\psi}_G|(a^{\dagger})^2a^2|\tilde{\psi}_G}}{\braket{\tilde{\psi}_G|a^{\dagger}a|\tilde{\psi}_G}^2}=\frac{1}{4} \left(\frac{g}{|\overline{\mathcal{E}}_0|}\right)^4=\frac{1}{4r^2},
\end{equation}
is reasonably close to the result obtained numerically from the full solution of the ME, predicting the presence of significant photon bunching. For a large spontaneous emission enhancement factor $2C_1 \equiv 2g^2/(\kappa \gamma)$, the forwards photon scattering may also be highly bunched in the weak-excitation regime of the bad-cavity limit, with $g_{\rightarrow}^{(2)}(0)=(1-4C_1^2)^2$ \cite{RiceCarmichaelIEEE}, as well as for the forwards-scattering channel comprising the emission of a free two-state atom excited by the output of a coherently driven cavity with low flux in a cascaded open quantum systems formulation \cite{Carmichael1993}. In both these instances, however, the zero-delay intensity correlation function is independent of the drive amplitude in the limit $|\overline{\mathcal{E}}_0| \to 0$, unlike the asymptotic behavior anticipated by Eq. \eqref{eq:g2forward} for the strong-coupling limit.
Next, let us trace how is the coherence associated with a well defined JC ladder manifested in a closely related quantity to $g^{(2)}_{\rm ss}(\tau)$, the photoelectron waiting-time distribution; this is the distribution of time intervals $\tau$ between two successive photoelectric detection events. The waiting-time distribution for side-scattered photons at unit detection efficiency is defined as \cite{QO1}
\begin{equation}\label{eq:wss}
w_{\rm ss}(\tau) \equiv \gamma \frac{{\rm tr}[\sigma_{+}\sigma_{-}\, e^{\overline{\mathcal{L}}\tau}(\sigma_{-}\rho_{\rm ss} \sigma_{+})]}{\braket{\sigma_{+}\sigma_{-}}_{\rm ss}},
\end{equation}
where $\overline{\mathcal{L}}\equiv \mathcal{L}-\gamma \sigma_{-} \cdot \sigma_{+}$, imposing the condition that no photons are emitted sideways in the interval $\tau$. It is instructive to compare the numerically computed function from Eq. \eqref{eq:wss} against the waiting-time distribution of ordinary resonance fluorescence from a free atom driven by a coherent field. Such a field is to be assigned an effective amplitude $\overline{\mathcal{E}}_0^{\prime}=g \braket{a}_{\rm ss}$, where $\braket{a}_{\rm ss}$ is the steady-state intracavity amplitude numerically obtained from the ME \eqref{eq:ME1}:
\begin{equation}\label{eq:resflwt}
\tilde{w}_{\rm ss}(\tau)=\gamma e^{-(\gamma/2)\tau}\frac{Y^2}{1-2Y^2}\left[1-\cosh\left(\frac{\gamma \tau}{2}\sqrt{1-2Y^2}\right)\right].
\end{equation}
In Eq. \eqref{eq:resflwt}, $Y \equiv 2\sqrt{2} |\overline{\mathcal{E}}_0^{\prime}|/\gamma$ is the dimensionless drive parameter the square of which determines the ratio of the incoherent to the coherent scattering intensities. For the weakly resonantly driven JC oscillator, on the other hand, this ratio is solely determined by $(|\overline{\mathcal{E}}_0|/g)^2$. In Fig. \ref{fig:wtau}, we observe that the decaying Rabi oscillations in $w_{\rm ss}(\tau)$ are bounded from below by $\tilde{w}_{\rm ss}(\tau)$ plotted for $Y \simeq 0.07$, with a peak occurring at about $\gamma \tau=12$. Increasing the dissipation rates by one order of magnitude, we find that the effective ordinary resonance fluorescence model maintains the same value of $Y^2 \ll 1$ and therefore the same position where $\tilde{w}_{\rm ss}(\tau)$ attains its maximum. However, Rabi oscillations survive only up to $g\tau \simeq 50$. As we move away from the strong-coupling limit at weak excitation, numerical simulations show that the mean time interval between photopulses is in good agreement with the prediction of the theory for resonance fluorescence, namely $\tau_{\rm av}=(2/\gamma)(1+Y^2)/Y^2 \simeq 2/(\gamma Y^2)$. Nevertheless, the initial high-frequency oscillations at $g$ produce a continual disagreement with respect to the position and the value of the maximum in the probability distribution \textemdash{a} distinct mark left by the dressed JC energy levels.
\section{Organization of phase bimodality for a finite system size}
\label{sec:phasebmd}
We will close our discussion on the coherence properties of resonant light-matter interaction by making a brief detour to the onset of the well-known dissipative quantum phase transition associated with the critical point $|\overline{\mathcal{E}}_0|=g/2$ and the emergence of spontaneous dressed state polarization in the limit of ``zero system size'' $\gamma \to 0$~\cite{Carmichael2015}. We will focus specifically on the distribution of the intracavity field for increasing values of the ratio $\gamma/(2\kappa)$ as we approach the critical point from below. To that end, in Fig. \ref{fig:Qfuncs} we plot the steady-state {\it quasi}probability distribution $Q_{\rm ss}(\alpha)$ (with $\alpha=x+iy$) calculated from \cite{Alsing1991}
\begin{equation}\label{eq:Qdef}
Q_{\rm ss}(\alpha)=\frac{1}{\pi}\braket{\alpha|\tilde{\rho}_{\rm ss,\, c}|\alpha}=\frac{1}{\pi}e^{-|\alpha|^2}\sum_{n,m}\frac{\alpha^{*n}\alpha^m}{\sqrt{n!m!}} \braket{n|\rho_{\rm ss,\, c}|m},
\end{equation}
where
\begin{equation}
\braket{n|\tilde{\rho}_{\rm ss,\, c}|m} \equiv \lim_{t \to \infty} {}_{a}\langle n|[{}_{A}\langle 2| \tilde{\rho}(t) |2\rangle _{A} + {}_{A}\langle 1| \tilde{\rho}(t) |1\rangle _{A}]|m\rangle_{a}
\end{equation}
and $\tilde{\rho}(t)$ is the system density operator in a frame rotating at $\omega_0$. Motivated by Figs. 2-4 of \cite{Alsing1991}, all of which plotted for a constant $g/\kappa=10$, we instead coherently drive the cavity mode such that a constant empty-cavity steady-state excitation amplitude $|\overline{\mathcal{E}}_0|/\kappa=10$ is maintained throughout while the system size parameter $n_{\rm sc}=\gamma^2/(8g^2)$ (the saturation photon number of absorptive optical bistability) changes from $0$ to $4.1 \times 10^{-3}$ in Panel I, and from $0$ to $5 \times 10^{-3}$ in Panel II of Fig. \ref{fig:Qfuncs}. For all the operating conditions used in this figure, the Maxwell-Bloch equations predict a single output state (according to the state equation for absorptive optical bistability \cite{Savage1988}) with $|\alpha_{\rm ss}|=|\overline{\mathcal{E}}_0|/\kappa=10$ \textemdash{the} steady-state cavity amplitude in the absence of the two-state atom \textemdash{while} the neoclassical equations predict $|\alpha_{\rm ss}|=0$, since $|\overline{\mathcal{E}}_0| \leq g/2$ in both panels. We also note that for $|\overline{\mathcal{E}}_0|/g \geq 1/(2\sqrt{2})$, which is the case for the drive amplitude used in Fig. \ref{fig:Qfuncs}, the Maxwell-Bloch equations in the limit $\gamma \to 0$ yield as well a steady state with the empty-cavity excitation (see Sec. 16.3.1 of \cite{QO2}).
As we can observe in these distributions, the quantum picture differs substantially from the mean-field predictions: the steady-state intracavity excitation increases with $n_{\rm sc}$ while the field distribution always maintains a symmetry with respect to the real axis. The distribution peaks occur at complex conjugate amplitudes which are always bounded by $|\overline{\mathcal{E}}_0|/\kappa$ along the two orthogonal directions set by $\overline{\mathcal{E}}_0$; those peaks mark the attractors organizing symmetry breaking in the background of appreciable spontaneous emission. The excitation path joining the two symmetric states is eventually occupied with the largest excitation probability in a from of a single elongated peak inscribed along the ``bridge'' formed between the two branches of the JC split ladder, as we can see in frames (d) of both panels; the two branches are mixed by increasingly more frequent spontaneous emission events [see also Fig. 6 of \cite{Carmichael2015}, depicting complex-amplitude bimodality, as well as Fig. 3 of \cite{Kilin91} for one of the earliest accounts on phase bimodality]. While the excitation landscape have been set up below the critical point, as we see in Panel I, the occurrence of spontaneous dressed-state polarization at $|\overline{\mathcal{E}}_0|=g/2$ decreases substantially the vacuum-state occupation probability for every value of $n_{\rm sc}$.
To get a better appreciation of the dynamics associated with spontaneous symmetry breaking, in Fig. \ref{fig:Qfuncevol} we plot the intracavity field distribution along the transient evolution leading to the steady state, when driving above the critical point and starting from the ground state of the JC oscillator. The distribution peaks follow a path constrained to the interior of the neoclassical curve parametrized by $(x(\overline{\varepsilon}_0), y(\overline{\varepsilon}_0))$ with $x(\overline{\varepsilon}_0)=(\overline{\varepsilon}_0/\kappa) \{1-[g/(2\overline{\varepsilon}_0)]^2\}$ and $y(\overline{\varepsilon}_0)=\pm [g/(2\kappa)] \sqrt{1-[g/(2\overline{\varepsilon}_0)]^2}$, where $0\leq \overline{\varepsilon}_0 \leq |\overline{\mathcal{E}}_0|$ (here we take $\overline{\mathcal{E}}_0=i|\overline{\mathcal{E}}_0|$). In this transient manifestation of quantum criticality, spontaneous symmetry breaking occurs from the very beginning of the evolution to the steady-state distribution depicted in the inset. In fact, the later stages of this evolution which are not imprinted on the phase space primarily establish the role of spontaneous emission in promoting switching between the two ladders; the two main peaks occur almost at the same position but are significantly elongated along the excitation pathway.
\section{Concluding remarks}
In conclusion, we have investigated various aspects of the coherence associated with strong resonant light-matter interaction when transitioning from the weak excitation regime to the onset of a dissipative quantum phase transition of second order. For weak excitation, $2(|\overline{\mathcal{E}}_0|/g) \ll 1$, all calculated observables and correlation functions are directly expressed in terms of the degree of squeezing of the intracavity field. The incoherent spectra for both forwards and side-scattered light are sums of the same squared Lorentzian distributions \textemdash{an} evidence of fluctuation squeezing. Moreover, the decoherence effects due to cavity photon loss and spontaneous emission are placed on equal footing, since the correlation functions of first and second order are only functions of the sum $\kappa + \gamma/2$. This is the case for both the (incoherent) fluorescence spectrum the spectrum of the transmitted light.
For stronger driving, however, when the higher ranks of the two branches forming the split JC ladder are being populated, the situation is markedly different. The two decoherence channels are assessed in terms of their ability to precipitate ladder-switching events, and spontaneous emission is special in that respect, as reflected in the role of the system-size parameter \textemdash{a} function of the ratio $\gamma/g$. Pictorially, what we are in essence dealing with is the behavior of quantum fluctuations along the line of zero detuning ($\Delta \omega=0$) in visualizations of out-of-equilibrium quantum dynamics like Fig. 1 or Fig. 2(a) of \cite{Carmichael2015}. Those visualizations are shaped in a fundamental way by the inputs and outputs in the scattering configuration modeled by the JC oscillator. It remains for the experiment to tie the various pieces together while tracing this line, from the squeezing-induced linewidth narrowing of the weak-excitation regime to the occurrence of spontaneous symmetry breaking at high excitation.
The data underlying this Communication are available at: \href{http://dx.doi.org/10.17632/s5srj363gp.1}{http://dx.doi.org/10.17632/s5srj363gp.1}.
\begin{acknowledgments}
I thank P. Rabl and M. Schuler for providing an initial efficient program solving a system of linear differential equations as the matrix form of a related Lindblad master equation. I am also grateful to H. J. Carmichael for valuable discussions. This work was supported by the Swedish Research Council (VR) alongside the Knut and Alice Wallenberg foundation (KAW).
\end{acknowledgments}
\end{document}
|
\begin{document}
\title{Dynamics of entanglement of a three-level atom in motion interacting with two coupled modes including parametric down conversion}
\newcommand{\norm}[1]{\left\Vert#1\right\Vert}
\newcommand{\abs}[1]{\left\vert#1\right\vert}
\newcommand{\set}[1]{\left\{#1\right\}}
\newcommand{\mathbb R}{\mathbb R}
\newcommand{\mathbb{I}}{\mathbb{I}}
\newcommand{\mathbb C}{\mathbb C}
\newcommand{\varepsilon}{\varepsilon}
\newcommand{\longrightarrow}{\longrightarrow}
\newcommand{\mathbf{B}(X)}{\mathbf{B}(X)}
\newcommand{\mathfrak{H}}{\mathfrak{H}}
\newcommand{\mathcal{A}}{\mathcal{A}}
\newcommand{\mathcal{D}}{\mathcal{D}}
\newcommand{\mathcal{N}}{\mathcal{N}}
\newcommand{\mathcal{x}}{\mathcal{x}}
\newcommand{\mathcal{p}}{\mathcal{p}}
\newcommand{\lambda}{\lambdambda}
\newcommand{a^{ }_F}{a^{ }_F}
\newcommand{a^{ }_Fd}{a^\dag_F}
\newcommand{a^{ }_Fy}{a^{ }_{F^{-1}}}
\newcommand{a^{ }_Fdy}{a^\dag_{F^{-1}}}
\newcommand{\phi^{ }_n}{\mathcal{p}hi^{ }_n}
\newcommand{\hat{\mathcal{H}}}{\hat{\mathcal{H}}}
\newcommand{\hat{\mathcal{H}}D}{\mathcal{H}}
\begin{abstract}
In this paper, a model by which we study the interaction between a motional three-level atom and two-mode field injected simultaneously in a bichromatic cavity is considered; the three-level atom is assumed to be in a $\Lambda$-type configuration. As a result, the atom-field and the field-field interaction (parametric down conversion) will be appeared. It is shown that, by applying a canonical transformation, the introduced model can be reduced to a well-known form of the generalized Jaynes-Cummings model. Under particular initial conditions, which may be prepared for the atom and the field, the time evolution of state vector of the entire system is analytically evaluated. Then, the dynamics of atom by considering `atomic population inversion' and two different measures of entanglement, i.e., `von Neumann entropy' and `idempotency defect' is discussed, in detail. It is deduced from the numerical results that, the duration and the maximum amount of the considered physical quantities can be suitably tuned by selecting the proper field-mode structure parameter $p$ and the detuning parameters.
\end{abstract}
\section{Introduction}\lambdabel{sec-intro}
Entanglement, an unbreakable quantum correlation between parts of a multipartite system, is one of the most essential characteristics of the quantum mechanical systems which plays a key role within new information technologies \cite{ benenti}. Also, it is an important resource in many interesting applications in fields related to quantum computation as well as quantum information \cite{bennett}. However, the appearance of entanglement in the interaction between light and matter in a cavity QED is of special interest, in which the atom-field interaction produces the entangled state. \\
There is a fully quantum mechanical and of course an exact (in the rotating wave approximation) model that describes a two-level atom interacting with a single-mode field, as a very simplified version of atom-field interaction, which is called the Jaynes-Cummings model (JCM) \cite{JCM1,JCM2}.
Many generalizations have been proposed to modify the JCM in the literature \cite{GJCM1,GJCM2,GJCM3,GJCM4,GJCM5,GJCM6,GJCM7,GJCM8,GJCM9}. In addition, various researches have been published to quantify the atom-field entangled states, using the standard JCM (and also the generalized JCM). For instance, a general formalism for a $\Lambda$-type three-level atom interacting with a correlated two-mode field is presented by Abdel-Aty {\it et al} \cite{aty}. The authors found the degree of entanglement (DEM) for their system by obtaining the density matrix operator. The entanglement properties of a cavity field generated from a laser-driven collective three-level atomic ensemble (in a $V$-type configuration) inside a two-mode cavity including the spontaneously generated coherence have been investigated by Tang {\it et al} \cite{tang}. Entanglement dynamics, as measured by concurrence, of the tripartite system of one atom and the two cavity modes has been discussed by Abdel-Aty {\it et al} \cite{abdel-aty}. Dynamics of entanglement and other nonclassical properties of a $V$- and a $\Lambda$-type three-level atom interacting with a single-mode field in a Kerr medium with intensity-dependent coupling and in the presence of the detuning parameters have been studied in \cite{zait} and \cite{us} by Zait and us, respectively. In particular, we illustrated that the strength and time interval of nonclassicality aspects is more visible in $\Lambda$-type than $V$-type three-level atoms \cite{us}. Recently, entanglement dynamics of the nonlinear interaction between a three-level atom ( in a $\Lambda$ configuration) and a two-mode cavity field in the presence of a cross-Kerr medium and its deformed counterpart \cite{newhonarasa}, intensity-dependent atom-field coupling and the detuning parameters has been discussed by us \cite{usJOSA,usJOSA1}. \\
On one hand, various generalizations to modify the JCM have been proposed in the literature. For instance, one may consider a two-mode field simultaneously inject within a high-Q two-mode bichromatic cavity \cite{abd-AOP} so that the atom interacts with each field individually as well as both fields. As a result, other nonlinearities may be occurred during the atom-field interaction, for example, one may refer to switching, modulation and frequency selection of radiation in optical communication networks \cite{net}. One of the main goals of this research is to investigate the effect of field-field interaction, namely, parametric down conversion, on the atomic population inversion as well as the entanglement dynamics between subsystems (atom and field).
On the other hand, due to the fact that in any atom-field interaction, the atom may not be exactly static during the interaction, the influence of atomic motion on the interaction dynamics should be taken into account. For instance, this effect together with field-mode structure on the atomic dynamics (atomic population inversion) has been examined by Joshi {\it et al} \cite{joshi1}. Also, a model in which a moving atom undergoes a two-photon transition in a two-mode coherent state field has been studied by Joshi \cite{joshi2}. The authors then compared their own results with those of \cite{schlicher} by Schlicher in which an atom undergoes a one-photon transition. As a further comment about the realization of atomic motion, note that, there exist some experiments that are comparable to the interaction of an atom with an electromagnetic pulse \cite{experiment1,experiment2}, in which the interaction of an atom with cavity eigenmodes of different shape functions is studied. \\
In this paper, we try to present a model that describes a moving three-level atom (in a $\Lambda$-configuration) interacting with a two-mode field injected simultaneously in a bichromatic cavity in the presence of the detuning parameters.
Apart from other new features of our work, in particular, we investigate individually and simultaneously the effects of atomic motion (via varying the field-mode structure parameter) and the detuning parameters on some physical criteria which will be studied in detail.
Indeed, the main goal of the present paper is to discuss on the effects of these parameters on the physical appearances of the state vector of the whole system. To achieve this purpose, we examine the dynamics of atomic population inversion, the time evolution of the field entropy by which the amount of the degree of entanglement (DEM) between subsystems is determined and the decoherence (coherence loss) which is obtained by idempotency defect.\\
The remainder of paper is organized as follows: In the next section, we obtain the state vector of the whole system using the generalized JCM. In section 3, the atomic dynamics is discussed by considering atomic population inversion. Then, using the von Neumann approach, the time evolution of the field entropy is studied in sections 4, and section 5 deals with the decoherence by considering the linear entropy. Finally, section 6 contains a summary and concluding remarks.
\section{Introducing the model and its solution}
This section is devoted to describe the interaction between a moving three-level atom and two coupled modes of the cavity field, taking into account the field-field interaction by considering parametric down conversion. So, let us assume a model in which the two-mode quantized electromagnetic field oscillating with frequencies $\Omega_{1}$ and $\Omega_{2}$ in an optical cavity interacts with the $\Lambda$-type three-level atom which is free to move in the cavity. In this atomic configuration, the atomic levels are indicated by $|j\rangle$ with energies $\omega_{j}$, where $j=1,2,3$, the transitions $|1\rangle\rightarrow|2\rangle$ and $|1\rangle\rightarrow|3\rangle$ are allowed and the transition $|2\rangle\rightarrow|3\rangle$ is forbidden in the electric-dipole approximation \cite{zubairy}. Anyway, the Hamiltonian describing the dynamics of our above system in the RWA can be written as ($\hbar = c = 1$):
\begin{eqnarray}\lambdabel{H}
\hat{H} =\hat{H}_{A}+\hat{H}_{F}+ \hat{H}_{AF} + \hat{H}_{FF},
\end{eqnarray}
where the atomic and field parts of the Hamiltonian read as
\begin{eqnarray}\lambdabel{H-PA}
\hat{H}_{A}= \sum_{j=1}^{3} \omega_{j}\hat{\sigma}_{jj}, \;\;\;\;\; \hat{H}_{F}= \sum_{j=1}^{2} \Omega_{j} \hat{a}^{\dag}_{j} \hat{a}_{j},
\end{eqnarray}
and the atom-field and field-field interactions are given
\begin{eqnarray}\lambdabel{H-PB}
\hat{H}_{AF} &=& \sum_{j=1}^{2} \left[ g_{1}^{(j)}f_{1}^{(j)}(z)(\hat{a}_{j}\; \hat{\sigma}_{12}+\hat{\sigma}_{21}\hat{a}_{j}^{\dag})
+ g_{2}^{(j)}f_{2}^{(j)}(z)(\hat{a}_{j}\;\hat{\sigma}_{13}+\hat{\sigma}_{31}\hat{a}_{j}^{\dag}) \right], \nonumber \\
\hat{H}_{FF} &=& \mathbf{g} \left( \hat{a}_{1}^{\dag}\hat{a}_{2} + \hat{a}_{1}\hat{a}_{2}^{\dag} \right),
\end{eqnarray}
where $\hat{\sigma}_{ij}$ is the atomic ladder operator between the levels $|i\rangle$ and $|j\rangle$ defined by $\hat{\sigma}_{ij}=|i\rangle \lambdangle j|,(i,j=1,2,3),\hat{a}_{j}$ ($\hat{a}_{j}^{\dag}$) is the bosonic annihilation (creation) operator of the field mode $j$, the constants $g_{1}^{(j)}, g_{2}^{(j)}$ determine the strength of the atom-field couplings for the mode $j$, and $\mathbf{g}$ denotes the field-field coupling constant. Notice that, the influence of atomic motion in the model has been entered by the shape functions $f_{1}^{(j)}(z)$ and $f_{2}^{(j)}(z)$. \\
It is worth to note that a deep view in the Hamiltonian of the atom-field interaction implies the fact that this Hamiltonian may be reconstructed by changing $g_{i}^{(j)},(i,j=1,2),$ to $g_{i}^{(j)}f_{i}^{(j)}(z)$, when it is compared with the JCM where the atomic motion is neglected, i.e. in our model, the atom-field coupling depends on the atomic motion by considering the shape function $f_{i}^{(j)}(z)$. \\
Now, in order to obtain the solution of the atom-field system described by the above Hamiltonians, there are three different but equivalent methods, namely probability amplitudes, Heisenberg operators and the unitary time evolution operator approaches \cite{zubairy}. However, the presented formalism for the considered system is based on the method of time evolution operator. But, before using this approach to reach our goal, it is necessary to introduce the canonical transformations
\begin{eqnarray}\lambdabel{Can-Trans}
\hat{a}_{1} = \hat{b}_{1} \cos \theta + \hat{b}_{2} \sin \theta,\;\;\;\;\;\hat{a}_{2} = \hat{b}_{2} \cos \theta - \hat{b}_{1} \sin \theta,
\end{eqnarray}
which is the well-known Bogoliubov-Valatin transformation \cite{BVT1,BVT2,BVT3} and has been introduced in the context of the Bardeen-Cooper-Schrieffer model of superconductivity \cite{HBVT}. In this transformation, the operators $\hat{b}_{i} (\hat{b}_{i}^{\dag}), i=1,2,$ have the same meaning of the operators $\hat{a}_{i} (\hat{a}_{i}^{\dag})$ while $\theta$ is the rotation angle which will be determined later. It is worthwhile to mention that under these transformations, the sum of the photon number of the field is invariant, that is, $\hat{a}_{1}^{\dag} \hat{a}_{1} + \hat{a}_{2}^{\dag} \hat{a}_{2} = \hat{b}_{1}^{\dag} \hat{b}_{1} + \hat{b}_{2}^{\dag} \hat{b}_{2}$.\\
Inserting the canonical transformations in (\ref{Can-Trans}) into the whole Hamiltonian in (\ref{H}) leads us to the following Hamiltonian
\begin{eqnarray}\lambdabel{H-F}
\hat{\mathcal{H}} = \hat{H}_{0} + \hat{H}_{1},
\end{eqnarray}
where
\begin{eqnarray}\lambdabel{H-0}
\hat{H}_{0} = \sum_{j=1}^{3} \omega_{j}\hat{\sigma}_{jj} + \sum_{j=1}^{2} \mathbf{\Omega}_{j} \hat{b}^{\dag}_{j} \hat{b}_{j},
\end{eqnarray}
and
\begin{eqnarray}\lambdabel{H-1}
\hat{H}_{1} = \sum_{j=1}^{2} \left[ \mu_{1}^{(j)}f_{1}^{(j)}(z)(\hat{b}_{j}\; \hat{\sigma}_{12}+\hat{\sigma}_{21}\hat{b}_{j}^{\dag})
+ \mu_{2}^{(j)}f_{2}^{(j)}(z)(\hat{b}_{j}\;\hat{\sigma}_{13}+\hat{\sigma}_{31}\hat{b}_{j}^{\dag}) \right],
\end{eqnarray}
with
\begin{eqnarray}\lambdabel{H-Coeff}
\mathbf{\Omega}_{1} &=& \Omega_{1} \cos^{2} \theta + \Omega_{2} \sin^{2} \theta - \mathbf{g} \sin 2 \theta, \nonumber \\
\mathbf{\Omega}_{2} &=& \Omega_{1} \sin^{2} \theta + \Omega_{2} \cos^{2} \theta + \mathbf{g} \sin 2 \theta, \nonumber \\
\mu_{k}^{(1)} &=& g_{k}^{(1)} \cos \theta - g_{k}^{(2)} \sin \theta, \nonumber \\
\mu_{k}^{(2)} &=& g_{k}^{(1)} \sin \theta + g_{k}^{(2)} \cos \theta, \;\;\; k=1,2,
\end{eqnarray}
in which the rotation angle $\theta$ is still unknown and must be determined. To attain this aim, the evanescent wave terms from the Hamiltonians related to the field and field-field interaction should be avoided. Therefore, one may set the particular choice of angle $\theta$ which reads as
\begin{eqnarray}\lambdabel{r}
\theta = \frac{1}{2} \tan^{-1} \left( \frac{2 \mathbf{g}}{ \Omega_{2} - \Omega_{1} } \right).
\end{eqnarray}
With the above selection of $\theta$, the field-field coupling parameter $\mathbf{g}$ will then be in the form
\begin{eqnarray}\lambdabel{g}
\mathbf{g} = \frac{\delta \left( \Omega_{2} - \Omega_{1} \right) }{1 - \delta^{2}},
\end{eqnarray}
where $ g_{1}^{(1)}/g_{1}^{(2)} = \delta = g_{2}^{(1)}/g_{2}^{(2)} $.
Looking deeply at the relations (\ref{H-0}) and (\ref{H-1}) and comparing them with (\ref{H})-(\ref{H-PB}) shows clearly that, the applied canonical transformations simplify the interaction Hamiltonian by eliminating the field-field interaction. In this way, our presented model is reduced to the usual form of the JCM. \\
Anyway, for next purpose, it is convenient to rewrite the Hamiltonian (\ref{H-F}) in the interaction picture; accordingly one may arrive at
\begin{eqnarray}\lambdabel{VI}
V_{I}(t) = \mu f(z) \left[ \hat{b}_{2} \hat{\sigma}_{12} \exp (- i \mathcal{D}elta_{2} t ) + \gamma \hat{b}_{2} \hat{\sigma}_{13} \exp (- i \mathcal{D}elta_{3} t ) \right] + \mathrm{c.c.},
\end{eqnarray}
with
\begin{eqnarray}\lambdabel{VI-Coeff}
\mu = g \sqrt{1 + \delta^{2}},\;\;\; \gamma = \mu_{2}^{(2)}/ \mu_{1}^{(2)}, \;\;\; \mathcal{D}elta_{k} = \mathbf{\Omega}_{2} - (\omega_{1} - \omega_{k}),\;\;\;k=2,3,
\end{eqnarray}
where we redefined $g_{1}^{(2)} = g$. In last step, we have to determine the explicit form of the functions $f_{j}(z)$, $j=1,2$, which correspond to the atomic motion. To achieve this purpose, we restrict the atomic motion in the $z$-axis direction which is consistent with respect to the cavity QED experiments. Therefore, only the $z$-dependence of the field-mode function would be necessarily taken into account and so the atomic motion may be specified as
\begin{eqnarray}\lambdabel{flamb}
f_{i}(z) \rightarrow f_{i}(vt),\;\;\;\;i=1,2,
\end{eqnarray}
where $v$ denotes the atomic velocity. To make the latter discussion more convenient, one may define a $\mathrm{TEM_{mnp_{i}}}$ mode as \cite{joshi1,joshi2,schlicher}
\begin{eqnarray}\lambdabel{fz}
f_{i}(z)=\sin(p_{i}\mathcal{p}i vt/L ),
\end{eqnarray}
where $p_{i}$ represents the number of half-wavelengths of the field mode inside a cavity with a length $L$. \\
We can now find the explicit form of the wave function of the entire system by using the standard technique. So let us consider the initial state of the whole system to be in the following form:
\begin{eqnarray}\lambdabel{sayi}
|\mathcal{p}si(0)\rangle_{\mathrm{A-F}}=|1\rangle \otimes \sum_{n=0}^{+\infty} \sum_{m=0}^{+\infty} q_{n} q_{m} |n,m \rangle = \sum_{n = 0}^{+\infty} \sum_{m = 0}^{+\infty} q_{n} q_{m} |1, n,m \rangle,
\end{eqnarray}
where $q_{n}$ and $q_{m}$ are the probability amplitudes of the initial radiation field of the cavity. Keeping in mind all above assumptions in addition to considering $p_{1} =p_{2} \equiv p$ (without loss of generality), it may be found that, by the action of the time evolution operator directly (with the Hamiltonian in (\ref{VI})), on the initial state vector of the system in (\ref{sayi}), we arrive at the explicit form of the wave function as follows
\begin{eqnarray}\lambdabel{say}
\hspace{-2cm} |\mathcal{p}si(t)\rangle &=& \sum_{n=0}^{+\infty} \sum_{m=0}^{+\infty} q_{n} q_{m} \Big[ A(n,m,t) |1,n,m \rangle \nonumber \\
&+& B(n,m+1,t) |2,n,m+1\rangle + C(n,m+1,t)|3,n,m+1\rangle \Big]
\end{eqnarray}
where $A,B$ and $C$ are the atomic probability amplitudes which may be evaluated by a lengthy but straightforward procedure as
\begin{eqnarray}\lambdabel{saycoef}
A(n,m,t)&=&\cos \left[ \sqrt{ \left( |\Theta_{1}|^{2} + |\Theta_{2}|^{2} \right) (m+1) } \right], \nonumber \\
B(n,m+1,t)&=& \frac{\Theta_{1}^{*}}{i \sqrt{ \left( |\Theta_{1}|^{2} + |\Theta_{2}|^{2} \right) }} \sin \left[ \sqrt{ \left( |\Theta_{1}|^{2} + |\Theta_{2}|^{2} \right) (m+1) } \right], \nonumber \\
C(n,m+1,t)&=& \frac{\Theta_{2}^{*}}{i \sqrt{ \left( |\Theta_{1}|^{2} + |\Theta_{2}|^{2} \right) }} \sin \left[ \sqrt{ \left( |\Theta_{1}|^{2} + |\Theta_{2}|^{2} \right) (m+1) } \right],
\end{eqnarray}
with the following definitions for $\Theta_{i}(t) (i=1,2)$
\begin{eqnarray}\lambdabel{theta1}
\Theta_{1}(t) &=& \mu \int_{0}^{t}f(vt') \exp(- i \mathcal{D}elta_{2} t' ) dt', \nonumber \\
\Theta_{2}(t) &=& \gamma \mu \int_{0}^{t}f(vt') \exp(- i \mathcal{D}elta_{3} t' ) dt'.
\end{eqnarray}
For a particular choice of the atomic motion, we consider the velocity of the atom by the special value $v=g L/\mathcal{p}i$ and hence the equation (\ref{fz}) is reduced to $f(z)=\sin(p g t)$ (recall that we assumed $p_{1} = p_{2} = p$). Consequently, the exact forms of $\Theta_{i}(t)$ are as bellow:
\begin{eqnarray}\lambdabel{thetaf}
\Theta_{1}(t) &=& \mu \left[ \frac{\sin \left( \frac{ p - \mathcal{D}elta_{2} }{ 2 } \; t \right) }{i(p - \mathcal{D}elta_{2})} \exp \left( i \; \frac{ p - \mathcal{D}elta_{2} }{ 2 } \; t \right) - \frac{\sin \left( \frac{ p + \mathcal{D}elta_{2} }{ 2 } \; t \right) }{i(p + \mathcal{D}elta_{2})} \exp \left( - i \; \frac{ p + \mathcal{D}elta_{2} }{ 2 } \; t \right) \right], \nonumber \\
\Theta_{2}(t) &=& \gamma \mu \left[ \frac{\sin \left( \frac{ p - \mathcal{D}elta_{3} }{ 2 } \; t \right) }{i(p - \mathcal{D}elta_{3})} \exp \left( i \; \frac{ p - \mathcal{D}elta_{3} }{ 2 } \; t \right) - \frac{\sin \left( \frac{ p + \mathcal{D}elta_{3} }{ 2 } \; t \right) }{i(p + \mathcal{D}elta_{3})} \exp \left( - i \; \frac{ p + \mathcal{D}elta_{3} }{ 2 } \; t \right) \right].
\end{eqnarray}
It is now necessary to emphasize the fact that the obtained state vector of the entire system in (\ref{say}) is authentic for arbitrary amplitudes of the initial states of the field such as number, phase, coherent or squeezed state. However, since the coherent state is more accessible than other typical field states (recall that the laser field far above the threshold condition is known as a coherent state \cite{zubairy}), we shall consider the fields to be initially in the coherent states
\begin{eqnarray}\lambdabel{amplitude}
|\alpha, \beta \rangle = \sum_{n=0}^{+\infty} \sum_{m=0}^{+\infty} q_{n} q_{m} |n,m \rangle, \;\;\;\;q_{n} = \exp \left( -\frac{ |\alpha|^{2} }{2}\right) \frac{\alpha^{n}}{\sqrt{n!}}, \;q_{m} = \exp \left( -\frac{ |\beta|^{2} }{2}\right) \frac{\beta^{m}}{\sqrt{m!}},
\end{eqnarray}
in which $|\alpha|^{2}$ and $|\beta|^{2}$ denote the mean photon number (intensity of light) of mode $1$ and $2$, respectively. \\
Inserting (\ref{thetaf}) in the time-dependent coefficients (\ref{saycoef}) leads one to the explicit form of the state vector and consequently the density matrix of the entire atom-field system.
\section{Atomic population inversion}\lambdabel{S-inversion}
We are now in a position to examine the atomic dynamics, in particular the dynamics of an important quantity, namely atomic population inversion. For the present model (including a `three-level' atom in the $\Lambda$-type configuration), the atomic inversion, which is introduced as the difference between the excited-state and ground-state probabilities, may be defined as follows \cite{Inv1,Inv2}:
\begin{eqnarray}\lambdabel{inversion}
\lambdangle \hat{\sigma}_{z}(t) \rangle = \rho_{11}(t) - (\rho_{22}(t) + \rho_{33}(t)),
\end{eqnarray}
in which the matrix elements of atomic density operator are generally given by
\begin{eqnarray}\lambdabel{rhoshekl}
\rho_{i j}(t) = \sum_{n=0}^{+\infty} \sum_{m=0}^{+\infty} \lambdangle n,m, i | \mathcal{p}si(t) \rangle \lambdangle \mathcal{p}si(t) | n,m, j \rangle,\;\;\; i , j = 1,2,3.
\end{eqnarray}
Figure \ref{Ainversion} shows the evolution of the atomic population inversion against the scaled time $\tau = g t$ for initial mean number of photons fixed at $|\alpha|^{2} = 10=|\beta|^{2}$. The left plots of this figure show the influence of atomic motion by selecting the fixed value of the field-mode structure parameter $p = 2$. Also, the effect of this parameter by considering different values of $p$ in the shape function $f(z)$ is discussed in the right plots.
In figure \ref{Ainversion}(a), the exact resonant case is assumed ($\mathcal{D}elta_{2} = \mathcal{D}elta_{3} =0$) and the coupling constants are equal ($\gamma = 1$).
Figure \ref{Ainversion}(b) is plotted to indicate the effect of different atom-field couplings by considering $\gamma = 2$.
The effect of detuning parameters ($\mathcal{D}elta_{2}/g =7, \; \mathcal{D}elta_{3}/g = 15$) together with the situations in which the atom-field couplings are equal ($\gamma = 1$) and different ($\gamma = 2$) is depicted in figure \ref{Ainversion}(c) and \ref{Ainversion}(d), respectively. \\
In detail, it is seen from the left plots that, in resonance case (figures \ref{Ainversion}(a) and \ref{Ainversion}(b)), the atomic inversion oscillates between its minimum ($ -1$) maximum ($+1$) values. The temporal behaviour of this quantity is changed when the effect of detuning parameters is regarded (figures \ref{Ainversion}(c) and \ref{Ainversion}(d)), in which the atomic population is observed for all the time.
In the right plots, the effect of field-mode structure parameter is studied by choosing different values of $p$. Figures \ref{Ainversion}(a) and \ref{Ainversion}(b) indicate that, by an increase of $p$, the fluctuation between minima and maxima becomes faster. It is understood from figures \ref{Ainversion}(c) and \ref{Ainversion}(d) that, in spite of the fact that the atomic population inversion is seen at all times for fixed $p = 2$ (left plot), this quantity periodically varies between the negative and positive values (the atomic inversion is observed, but not always), when the value of $p$ goes up (right one).
Finally, it is found from figures \ref{Ainversion}(b) and \ref{Ainversion}(d) that, the ratio of atom-field coupling constants ($\gamma$) has no considerable effect on the amount of the population inversion.
\section{Quantum mutual information and the DEM}
In order to study the dynamics of entanglement and obtain the DEM, the quantum mutual information (quantum entropy) is a useful quantity that lead us to the amount of entanglement \cite{chuang}. In other words, temporal behaviour of quantum entropy of subsystems, here atom and field, indicates the time evolution of the DEM between them. In the present formalism, we use the von Neumann entropy \cite{pk1}. Before this, recalling the important theorem by Araki and Leib \cite{araki} should be valuable. Based on this theorem, in a bipartite quantum system, the system and subsystem entropies, at any time $t$, are confined by the following triangle inequality
\begin{eqnarray}\lambdabel{valen}
|S_{A}(t)-S_{F}(t)|\leq S_{AF}(t) \leq S_{A}(t)+S_{F}(t),
\end{eqnarray}
where $S_{A}$, $S_{F}$ and $S_{AF}$ refer to the atom, the field and the total entropy, respectively. As a result, being the initial state of the whole system at a pure state implies the fact that the total entropy of the system is zero and remains constant. In other words, if a system initially is prepared in a pure state, the time evolution of the reduced entropies of subsystems is exactly identical. This fact means that for our consideration, $S_{A}(t)=S_{F}(t)$ at any time $t$ \cite{phoenix}. Therefore, by calculating the reduced entropy of the atom, the DEM will be obtained. \\
Anyway, according to the von Neumann entropy, as a measure of entanglement, the entropy of the atom is defined by
\begin{eqnarray}\lambdabel{ventd}
S_{A}(t)=-\mathrm{Tr}_{A} \left(\hat{\rho}_{A}(t) \ln \hat{\rho}_{A}(t) \right) = S_{F}(t),
\end{eqnarray}
in which $\hat{\rho}_{A}(t)=\mathrm{Tr}_{F}\left( |\mathcal{p}si(t) \rangle \lambdangle \mathcal{p}si(t) | \right)$ where $|\mathcal{p}si(t) \rangle$ has been introduced in (\ref{say}).
Following the same procedure of \cite{usJOSA}, the entropy of the field (and so the atom, too) can be expressed by
relation \cite{us,chuang,pk3}
\begin{eqnarray}\lambdabel{sff}
\mathrm{DEM}(t) = S_{F}(t) = - \sum_{j=1}^{3}\zeta_{j} \ln \zeta_{j},
\end{eqnarray}
where $\zeta_{j}$, the eigenvalues of the reduced atomic density matrix, are given by Cardano's method as \cite{kardan}
\begin{eqnarray}\lambdabel{ventkardan}
\zeta_{j}&=&-\frac{1}{3}\mathcal{x}i_{1}+\frac{2}{3}\sqrt{\mathcal{x}i_{1}^{2}-3\mathcal{x}i_{2}}\cos\left[\varrho+\frac{2}{3}(j-1)\mathcal{p}i \right],\;\;\;j=1,2,3, \nonumber \\
\varrho &=& \frac{1}{3}\cos^{-1}\left[ \frac{9\mathcal{x}i_{1}\mathcal{x}i_{2}-2\mathcal{x}i_{1}^{3}-27\mathcal{x}i_{3}}{2(\mathcal{x}i_{1}^{2}-3\mathcal{x}i_{2})^{3/2}}\right],
\end{eqnarray}
with
\begin{eqnarray}\lambdabel{vzal}
\mathcal{x}i_{1} &=& -\rho_{11}-\rho_{22}-\rho_{33}, \nonumber \\
\mathcal{x}i_{2} &=& \rho_{11}\rho_{22}+\rho_{22}\rho_{33}+\rho_{33}\rho_{11} -\rho_{12}\rho_{21}-\rho_{23}\rho_{32}-\rho_{31}\rho_{13}, \nonumber \\
\mathcal{x}i_{3} &=& -\rho_{11}\rho_{22}\rho_{33}-\rho_{12}\rho_{23}\rho_{31}-\rho_{13}\rho_{32}\rho_{21} +\rho_{11}\rho_{23}\rho_{32}+\rho_{22}\rho_{31}\rho_{13}+\rho_{33}\rho_{12}\rho_{21},
\end{eqnarray}
where the above matrix elements have been introduced in (\ref{rhoshekl}).
It is instructive to declare that since the parameter $\mathcal{x}i_{1}$ in (\ref{vzal}) precisely shows the trace of density matrix with minus sign, thus the exact value of this parameter is clearly equal to $-1$. Concerning equations (\ref{sff})-(\ref{vzal}), one is able to find the DEM between the atom and field. We would like to state the fact that getting zero value of the DEM in equation (\ref{sff}) ($S_{A(F)}=0$) means that the subsystems are unentangled (the system of atom-field is separable). \\
Our presented results in figure \ref{dem} indicate the evolution of the field entropy against the scaled time $\tau$, from which the DEM is studied. In this figure, we have considered the parameters similar to figure \ref{Ainversion}.
Comparing the left plots of figures \ref{dem}(a) and \ref{dem}(b) shows that changing the ratio of coupling constants ($\gamma$) from $1$ to $2$ has no remarkable effect on the variation of the field entropy in the absence of the detuning parameters.
However, as is seen from figures \ref{dem}(c) and \ref{dem}(d), in the presence of the detuning ($\mathcal{D}elta_{2}/g =7, \; \mathcal{D}elta_{3}/g = 15$), the amount of the DEM is obviously diminished. Also, it is valuable to notice that, unlike the figure \ref{dem}(b), selecting appropriate values of atom-field coupling constants may improve the DEM.
Paying attention to the right plot of figure \ref{dem}(a) indicates that, by an increase in $p$, the intervals of time of the entanglement between the atom and field, become shorter. In other words, the variations between maxima and minima values of the field entropy in large $p$ is faster than the small one.
The right plot of figure \ref{dem}(b) shows that for the case $\gamma = 2$, the amount of the DEM is decreased with increasing the parameter $p$.
It is observed from figure \ref{dem}(c) that, by an increase of $p$, the amount of DEM is clearly enhanced when the detuning is present. In addition, comparing the right plots of figure \ref{dem}(c) and \ref{dem}(d) states that the amount of entanglement in the presence of the detuning can be enriched by changing $ \gamma $ from $1$ to $2$.
\section{Linear entropy and coherence loss}
A simple and direct measure to obtain the degree of decoherence (coherence loss) is called the “idempotency defect” \cite{idempotency1,idempotency2,idempotency3}.
This quantity, which may be considered as the lowest order approximation to the von Neumann entropy, is a good measure to understand the purity loss of the state of a quantum system. This parameter can be easily measured by the linear entropy, defined as \cite{idempotency4}
\begin{eqnarray}\lambdabel{LinEnt}
S (\hat{\rho}) = \mathrm{Tr} \left[ \hat{\rho}_{A}(t) (1 - \hat{\rho}_{A}(t)) \right],
\end{eqnarray}
where $\hat{\rho}_{A}(t)$ is the atomic density operator. According to the latter equation, the linear entropy is zero for a pure state, that is, $\mathrm{Tr} \; \hat{\rho}_{A}(t) = \mathrm{Tr} \; \hat{\rho}_{A}^{2}(t)$. Consequently, nonzero values of linear entropy then indicate the non-purity of the state of the system. Also, it is valuable to notice that maximally entanglement and consequently, the most mixed state is revealed when linear entropy gets the value $1$. \\
In figure \ref{loss} we have plotted the idempotency defect, from which the coherence loss is studied, in terms of scaled time $\tau$ for different chosen parameters assumed in figure \ref{Ainversion}.
The left plots of figures \ref{loss}(a) and \ref{loss}(b) indicate that the $\gamma$ parameter (the ratio of the coupling constants) has no outstanding role on the coherence in resonance case. By entering the detuning parameters ($\mathcal{D}elta_{2}/g =7, \; \mathcal{D}elta_{3}/g = 15$), decoherence is rapidly attenuated (the left plots of figures \ref{loss}(c) and \ref{loss}(d)).
We should now concentrate on the right plots, where the effect of field-mode structure parameter is investigated by considering different values of $p$. It is observed that, in resonance case, by an increase of $p$, decoherence and consequently the purity of the state of the system can be sharply descended (the right plots of figures \ref{loss}(a) and \ref{loss}(b)). Although, this behaviour is quite different with the situation that the detuning is present (figures \ref{loss}(c) and \ref{loss}(d)). In this case, by ascending $p$, the linear entropy is evidently increased.
\section{Summary and conclusion}\lambdabel{summary}
In this paper, we have proposed a model to discuss the interaction between a moving $\Lambda$-type three-level atom and a two-mode field injected simultaneously into the cavity in the presence of the detuning parameters. As a result of the presented model, we have considered the field-field interaction, from which the parametric down conversion was taken into account. Using a particular canonical transformation, an analytical and also exact solution for our interaction model has been presented and the explicit form of the state vector of the whole atom-field system has been obtained. Next, atomic dynamics by considering the atomic population inversion has been numerically investigated. Also, two different types of entanglement consist of von Neumann entropy (to study the DEM) and idempotency defect or linear entropy (to discuss the decoherence) have been studied. In each case, the effect of atomic motion via considering different values of field-mode structure parameter $p$, has been examined in the presence/absence of the detuning parameters with the equal/different values of the atom-field coupling constants. The main results of the paper are listed in what follows. \\
$i)$ {\it Atomic population inversion:} It is found that the presence of the detuning parameters leads to the visibility of the atomic population inversion at all times. Adding the effect of field-mode structure parameter $p$, which is studied by choosing different values of $p$, implies the fact that the population inversion changes between the negative and positive values (the atomic inversion is appeared, not at all times), when the value of $p$ grows up. It is also worthwhile to mention that the ratio of atom-field coupling ($\gamma$) has no effective role in appearing the population inversion. \\
$ii)$ {\it Dynamics of entanglement:} It is observed that the effect of the detuning parameters is sharply pulled down the entanglement between the atom and the field. Although, by increasing the field-mode structure parameter $p$, the DEM clearly grows up. In addition, it may be useful to state that the amount of entanglement in the presence of the detuning can be improved by setting $\gamma = 2$ relative to $\gamma = 1$. \\
$iii)$ {\it Coherence loss:} It is shown that the coherence between subsystems in resonance case can be enriched by changing the $\gamma$ parameter, i.e., the ratio of the coupling constants. Also, adding the effect of the detuning rapidly weakens the decoherence. Paying attention to the effect of field-mode structure parameter $p$, it is observed that in resonance case, by an increase of this parameter, decoherence and consequently the purity of the state of the system can be sharply descended. Although, in the presence of the detuning parameters, by increasing $p$ the decoherence is obviously ascended. \\
Finally, in general, according to the obtained numerical results, the depth and domain of each of the considered properties can be appropriately controlled by choosing the suitable field-mode structure parameter $p$ and the detuning parameters, when initial states of the subsystems are fixed.\\
\vspace {2 cm}
\end{document}
|
\begin{document}
\baselineskip=17pt
\title[A characterization of Fourier transforms]{A characterization of Fourier transforms}
\author{Philippe Jaming}
\address{Universit\'e d'Orl\'eans\\
Facult\'e des Sciences\\
MAPMO - F\'ed\'eration Denis Poisson\\ BP 6759\\ F 45067 Orl\'eans Cedex 2\\
France}
\email{[email protected]}
\begin{abstract}
The aim of this paper is to show that, in various situations, the only continuous linear map that transforms a convolution product into a pointwise product is a Fourier transform.
We focus on the cyclic groups ${\mathbb{Z}}/nZ$, the integers ${\mathbb{Z}}$, the Torus ${\mathbb{T}}$ and the real line.
We also ask a related question for the twisted convolution.
\end{abstract}
\subjclass{42A;A3842A85;42B10;43A25}
\keywords{Fourier transform;convolution}
\thanks{The author wishes to thank S. Alesker for sending him the preprint \cite{AAAV2}
and O. Guedon for pointing to that paper.
This work was partially financed by the French ANR project {\sl AHPI} (ANR-07-BLAN-0247-01)}
\maketitle
{\it In memory of A. Hulanicki.}
\section{Introduction}
The aim of this paper is to characterize the Fourier transform by some of its properties.
Indeed, the Fourier transform is well known to change a translation into
a modulation (multiplication by a character) and vice-versa and to change a convolution into a pointwise product. Moreover, these are some of its main features and are fundamental properties in many of its applications. The aim of this paper is to show that the Fourier transform is, to some extend,
uniquely determined by some of these properties.
Before going on, let us introduce some notation. Let $G$ be a locally compact Abelian group with Haar measure $\nu$ and let $\hat G$ be the dual group. Operations on $G$ will be denoted
additively. Let us recall that the convolution on $G$ is defined
for $f,g\in L^1(G)$ by
$$
f*g(x)=\int_G f(t)g(x-t)\,\mbox{d}\nu(t)
$$
(and $f*g\in L^1(G)$) while the Fourier transform is defined by
$$
{\mathcal F}(f)(\gamma)=\hat f(\gamma)=\int_G f(t)\overline{\gamma(t)}\,\mbox{d}\nu(t).
$$
We will here mainly focus on the four following cases,
$G=\hat G={\mathbb{Z}}/n{\mathbb{Z}}$, $G={\mathbb{Z}}$ and $\hat G={\mathbb{T}}$ and vice versa or $G=\hat G={\mathbb{R}}$,
(our results will then easily extend to products to such groups.)
We will here focus on two types of results. The first ones concerns the characterization of
the Fourier transform as being essentially the only continuous linear transform that changes a convolution product into a pointwise product. To our knowledge the first results in that direction appear in the work of Lukacs \cite{Lu1,Lu2}, pursued in \cite{Em}, and an essentially complete result appeared in \cite{Fi}
for all LCA groups, under the mild additional constraint that the transform has a reasonable kernel.
We will show here that this hypothesis can be lifted. Further, a striking result, recently proved by
Alekser, Artstein-Avidan, Milman \cite{AAAV1,AAAV2} is that, to some extend, continuity and
linearity may be removed as well. More precisely, let us denote by ${\mathcal S}({\mathbb{R}}^d)$ the Schwartz functions on ${\mathbb{R}}^d$ and by ${\mathcal S}'({\mathbb{R}}^d)$ the Schwartz (tempered) distributions.
\noindent{\bf Theorem (Alekser, Artstein-Avidan, Milman)}\\
{\sl Let $T\,:{\mathcal S}({\mathbb{R}}^d)\to{\mathcal S}({\mathbb{R}}^d)$ be a mapping that extends to a mapping
$T\,:{\mathcal S}'({\mathbb{R}}^d)\to{\mathcal S}'({\mathbb{R}}^d)$ that is bijective and such that
\begin{enumerate}
\renewcommand{\roman{enumi}}{\roman{enumi}}
\item for every $f\in{\mathcal S}({\mathbb{R}}^d)$ and $g\in{\mathcal S}'({\mathbb{R}}^d)$, $T(f*g)=T(f).T(g)$;
\item for every $f\in{\mathcal S}({\mathbb{R}}^d)$ and $g\in{\mathcal S}'({\mathbb{R}}^d)$, $T(f.g)=T(f)*T(g)$.
\end{enumerate}
Then there exists $B\in{\mathcal M}_n({\mathbb{R}})$ with $\det B=1$ such that $T(f)={\mathcal F}(f)\circ B$.}
Note that $T$ is neither assumed to be linear nor to be continuous. We will adapt the proof
of this theorem to obtain an analogue result on the cyclic group. This has the advantage to highlight
the main features which come into proof of this theorem. The main difference is that in this theorem,
we assume that $T$ sends smooth functions into smooth functions. In the case of the cyclic group,
we do not have such functions at hand and are therefore lead to assume some mild continuity;
{\it see} Theorem \ref{th:A} for a precise statement.
A second set of results has its origin in the work of Cooper \cite{Co1,Co2}. Here one considers
the Fourier transform as an intertwining operator between two groups of transforms
acting on $L^p$-spaces. In order to state the precise result, let us define,
for $\alpha\in{\mathbb{R}}$ and $f$ a function on ${\mathbb{R}}$, $\tau_\alpha f(t)=f(t+\alpha)$.
Further, if ${\mathcal F}i\,:{\mathbb{R}}\to{\mathbb{R}}$, let $M^{({\mathcal F}i)}_\alpha f(t)=e^{i\alpha{\mathcal F}i(t)}f(t)$.
It is easy to see that ${\mathcal F}\tau_\alpha=M^{(t)}_\alpha{\mathcal F}$ and
${\mathcal F} M^{(-t)}_\alpha=\tau_\alpha{\mathcal F}$ {\it i.e.} the Fourier transform intertwines
translations and modulations and vice versa. The converse is also true. More precisely:
\noindent{\bf Theorem (Cooper)}\\
{\sl Let $T\,: L^2({\mathbb{R}})\to L^2({\mathbb{R}})$ be a continuous linear transformation such that
there exists two measurable functions ${\mathcal F}i,\psi\,:{\mathbb{R}}\to{\mathbb{R}}$ for which
$$
T\tau_\alpha=M^{({\mathcal F}i)}_\alpha T\quad\mathrm{and}\quad TM^{(\psi)}_\alpha=\tau_\alpha T.
$$
Then ${\mathcal F}i(t)=bt+c$, $\psi(t)=bt+d$ with $b,c,d\in{\mathbb{R}}$ and $T={\mathcal F}$.}
We will extend this theorem to ${\mathbb{Z}}/n{\mathbb{Z}}$ and ${\mathbb{Z}}$.
The remaining of the article is organized as follows. In the next section, we will prove
the results for the groups $G={\mathbb{Z}}$ and $G={\mathbb{Z}}/n{\mathbb{Z}}$ while Section \ref{sec:3} is devoted to the cases
$G={\mathbb{T}}^d$ and $G={\mathbb{R}}^d$. We conclude with some questions concerning the twisted convolution.
Before going on, let us introduce some more notation.
If $E\subset G$, we will denote by $\chi_E$ the function on $G$ given by $\chi_E(k)=1$ if $k\in E$ and $\chi_E(k)=0$ otherwise. The Kronecker symbol is denoted by $\delta_{j,k}$ where
$\delta_{j,k}=0$ or $1$ according to $j\not=k$ or $j=k$.
\section{The cyclic group and the integers}
In this section, we consider $G={\mathbb{Z}}/n{\mathbb{Z}}$ or $G={\mathbb{Z}}$.
We will write $\mathcal{C}(\hat G)$ for the set of $n$-periodic sequences
when $G=\hat G={\mathbb{Z}}/n{\mathbb{Z}}$ or of continuous functions on $\hat G={\mathbb{T}}$ if $G={\mathbb{Z}}$.
Our first result is the following:
\begin{theorem}\label{th:1}
Let $G={\mathbb{Z}}/n{\mathbb{Z}}$ or $G={\mathbb{Z}}$.
Let $T$ be a linear continuous map $T\,:L^1(G)\to \mathcal{C}(\hat G)$ such that
$T(f*g)=T(f).T(g)$. Then there exists $E\subset \hat G$ and a map
$\sigma\,:\hat G\to\hat G$ such that, for
$f\in L^1(G)$ and almost every $\eta\in\hat G$
$T(f)(\eta)=\chi_E(\eta)\widehat{f}\bigl(\sigma(\eta)\bigr)$. *Moreover, $\sigma$ is measurable if $G={\mathbb{Z}}$.
\end{theorem}
\begin{proof}
Let $\delta_k=(\delta_{j,k})_{j \in G}\in L^1(G)$. Then $\delta_k*\delta_\ell=\delta_{k+\ell}$, so that
\begin{equation}
\label{eq:Tdelta}
T(\delta_{k+\ell})=T(\delta_k*\delta_\ell)=T(\delta_k)T(\delta_\ell).
\end{equation}
In particular, for each $\eta\in \hat G$, the map $\pi_\eta\,:k\to T(\delta_k)(\eta)$ is a group homomorphism from $G$ to ${\mathbb{C}}$.
First note $\pi_\eta(0)=\pi_\eta(k)\pi_\eta(-k)$
so that if $\pi_\eta$ vanishes somewhere, it vanishes at $0$. Conversely
$\pi_\eta(k)=\pi_\eta(k)\pi_\eta(0)$
so that if $\pi_\eta$ vanishes at $0$, it vanishes everywhere. Further
$\pi_\eta(0)=\pi_\eta(0)^2$ so that $\pi_\eta(0)=0$ or $1$.
We will now assume that $\pi_\eta(0)=1$ and exploit
$\pi_\eta(k+1)=\pi_\eta(k)\pi_\eta(1)$ which implies that
$\pi_\eta(k)=\pi_\eta(1)^k$.
We now need to distinguish the two cases:
--- if $G={\mathbb{Z}}/n{\mathbb{Z}}$, then $1=\pi_\eta(0)=\pi_\eta(n)=\pi_\eta(1)^n$
$\pi_\eta(1)$ is an $n$-th root of unity {\it i.e.}
$T(\delta_1)(\eta)=e^{2i\pi \sigma(\eta)/n}$ for some $\displaystyle\sigma(\eta)\in \{0,1\ldots,n-1\}={\mathbb{Z}}/n{\mathbb{Z}}$.
In follows that $T(\delta_k)(\eta)=e^{2i\pi k\sigma(\eta)/n}$.
--- if $G={\mathbb{Z}}$, as $T$ was assumed to be continuous $L^1(G)\to \mathcal{G}(\hat G)$, there is a constant $C>0$ such that, for every $f\in L^1(G)$, $\norm{Tf}_\infty\leq C\norm{f}_1$.
In particular, for every $k\in{\mathbb{Z}}$ and every $m\in\hat G={\mathbb{T}}$
$$
|\pi_\eta(1)^k|=|[T(\delta_1)(\eta)]^k|=
|T(\delta_k)(\eta)|\leq C\norm{\delta_k}_1=C
$$
thus, by letting $k\to\pm\infty$, we obtain that $\pi_\eta(1)$ is a complex number of modulus $1$
(as it is not $0$ since $\pi_\eta(0)^k\not=0$). We may thus write
$T(\delta_1)(\eta)=e^{2i\pi \sigma(\eta)}$ for some $\sigma(\eta)\in[0,1]\simeq{\mathbb{T}}=\hat G$.
Moreover, as $\eta\to T(\delta_1)(\eta)$ is measurable, we may assume that $\sigma$ is measurable as well.
Let us now define $E=\{\eta\in \hat G\,:\ T(\delta_k)(\eta)=0\ \forall\ k\in G\}$. Then, by linearity and
continuity of $T$, for $f\in L^1(G)$,
\begin{eqnarray*}
Tf(\eta)&=&T\left(\sum_{k\in G}f(k)\delta_k\right)(\eta)=\sum_{k\in G}f(k)T(\delta_k)(\eta)\\
&=&\begin{cases}\displaystyle\sum_{k\in G}f(k)\chi_E(\eta)e^{2i\pi k\sigma(\eta)/n}&\mbox{if }G={\mathbb{Z}}/n{\mathbb{Z}}\\
\displaystyle\sum_{k\in G}f(k)\chi_E(\eta)e^{2i\pi k\sigma(\eta)}&\mbox{if }G={\mathbb{Z}}\end{cases}\\
&=&\chi_E(\eta)\widehat{f}\bigl(\sigma(\eta)\bigr),
\end{eqnarray*}
which completes the proof.
\end{proof}
\begin{remark}
Using tensorization, we may extend the result with no difficulty to $G=\prod_{i\in I}{\mathbb{Z}}/n_i{\mathbb{Z}}\times{\mathbb{Z}}^d$.
\end{remark}
We will now adapt the proof of \cite{AAAV1,AAAV2} to show that on ${\mathbb{Z}}_n$, a bijective transform
that maps a product into a convolution is essentially a Fourier transform.
We will need some notation:
\begin{notation}
We will consider the following particular
elements of $L^1({\mathbb{Z}}/n{\mathbb{Z}})$\,: $\mathbf{0}=(0,\ldots,0)$ and $\mathbf{1}=(1,\ldots,1)$.
Further, if $a\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$ we will write $\displaystyle{\mathbb{E}}[a]=\sum_{j=0}^{n-1}a_j$.
\end{notation}
We can now state the main theorem:
\begin{theorem}\label{th:A}
Let ${\mathcal T}\,:L^1({\mathbb{Z}}/n{\mathbb{Z}})\to L^1({\mathbb{Z}}/n{\mathbb{Z}})$ be a bijective transformation (not necessarily linear) such
that the map ${\mathbb{C}}\to L^1({\mathbb{Z}}/n{\mathbb{Z}})$, $c\to {\mathcal T}(c\mathbf{1})$ is continuous. Assume that
\begin{enumerate}
\renewcommand{\roman{enumi}}{\roman{enumi}}
\item for every $a,b\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a.b)={\mathcal T}(a).{\mathcal T}(b)$;
\item for every $a,b\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a*b)={\mathcal T}(a)*{\mathcal T}(b)$.
\end{enumerate}
Then there exists $\eta\in\{1,\ldots,n-1\}$ that has no common divisor with $n$ such that
-- either, for every $j\in{\mathbb{Z}}/n{\mathbb{Z}}$ and every $a\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a)(\eta j)=a(j)$;
-- or, for every $j\in{\mathbb{Z}}/n{\mathbb{Z}}$ and every $a\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a)(\eta j)=\overline{a(j)}$.
\end{theorem}
\begin{remark}
The fact that $\eta$ has no common divisor with $n$ implies that the map $j\to j\eta$
is a permutation of $\{0,\ldots,n-1\}$ so that the map ${\mathcal T}$ is actually fully determined.
\end{remark}
\begin{corollary}\label{cor:A}
Let ${\mathcal T}\,:L^1({\mathbb{Z}}/n{\mathbb{Z}})\to L^1({\mathbb{Z}}/n{\mathbb{Z}})$ be a bijective transformation (not necessarily linear) such
that the map ${\mathbb{C}}\to L^1({\mathbb{Z}}/n{\mathbb{Z}})$, $c\to T(c\mathbf{1})$ is continuous. Assume that
\begin{enumerate}
\renewcommand{\roman{enumi}}{\roman{enumi}}
\item for every $a,b\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a.b)={\mathcal T}(a)*{\mathcal T}(b)$;
\item for every $a,b\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a*b)={\mathcal T}(a).{\mathcal T}(b)$.
\end{enumerate}
Then there exists $\eta\in\{1,\ldots,n-1\}$ that has no common divisor with $n$ such that
either, for every $j\in{\mathbb{Z}}/n{\mathbb{Z}}$ and every $a\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a)(\eta j)=\hat a(j)$
or, for every $j\in{\mathbb{Z}}/n{\mathbb{Z}}$ and every $a\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a)(\eta j)=\overline{\hat a(j)}$.
\end{corollary}
\begin{proof}[Proof of Corollary \ref{cor:A}]
It is enough to apply Theorem \ref{th:A} to $\tilde {\mathcal T}={\mathcal F}^{-1}{\mathcal T}$.
\end{proof}
\begin{corollary}\label{cor:B}
Let ${\mathcal T}\,:L^1({\mathbb{Z}}/n{\mathbb{Z}})\to L^1({\mathbb{Z}}/n{\mathbb{Z}})$ be a bijective transformation (not necessarily linear) such
that the map ${\mathbb{C}}\to L^1({\mathbb{Z}}/n{\mathbb{Z}})$, $c\to {\mathcal T}(c\mathbf{1})$ is continuous. Assume that for
every $a\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}^2a(k)=a(-k)$ and that one of the following two identities holds:
\begin{enumerate}
\renewcommand{\roman{enumi}}{\roman{enumi}}
\item for every $a,b\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a.b)={\mathcal T}(a)*{\mathcal T}(b)$;
\item for every $a,b\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a*b)={\mathcal T}(a).{\mathcal T}(b)$.
\end{enumerate}
Then there exists $\eta\in\{1,\ldots,n-1\}$ that has no common divisor with $n$ such that
either, for every $j\in{\mathbb{Z}}/n{\mathbb{Z}}$ and every $a\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a)(\eta j)=\hat a(j)$
or, for every $j\in{\mathbb{Z}}/n{\mathbb{Z}}$ and every $a\in L^1({\mathbb{Z}}/n{\mathbb{Z}})$, ${\mathcal T}(a)(\eta j)=\overline{\hat a(j)}$.
\end{corollary}
\begin{proof}[Proof of Corollary \ref{cor:B}]
If ${\mathcal T}^2a(k)=a(-k)$ then if one of the identities holds, so does the second one, so that
Corollary \ref{cor:A} gives the result.
\end{proof}
\begin{proof}[Proof of Theorem \ref{th:A}]
The proof goes in several steps that are similar to those in \cite{AAAV2}. The first one consists in identifying
the image by ${\mathcal T}$ of some particular elements of $L^1({\mathbb{Z}}/n{\mathbb{Z}})$:
\noindent{\bf Step 1.} {\sl We have ${\mathcal T}(\delta_0)=\delta_0$, ${\mathcal T}(\mathbf{0})=\mathbf{0}$ and ${\mathcal T}(\mathbf{1})={\mathcal T}(\mathbf{1})$.
Moreover, there is a $k\in\{-1,1\}$ and an $\alpha\in{\mathbb{C}}$ with $\mbox{Re}\,\alpha>0$ such that,
if we define $\beta\,:{\mathbb{C}}\to{\mathbb{C}}$ by $\beta(0)=0$ and $\displaystyle\beta(c)=\left(\frac{c}{|c|}\right)^k|c|^\alpha$ for $c\not=0$, then ${\mathcal T}(c\mathbf{1})=\beta(c)\mathbf{1}$.}
Indeed, as ${\mathcal T}(a.b)={\mathcal T}(a).{\mathcal T}(b)$, we immediately get the following:
$$
{\mathcal T}(c_1c_2\mathbf{1})={\mathcal T}(c_1\mathbf{1}).{\mathcal T}(c_2\mathbf{1})\quad\mbox{and}\quad
{\mathcal T}(c_1\delta_j)={\mathcal T}(c_1\mathbf{1}).{\mathcal T}(\delta_j)
$$
while from ${\mathcal T}(a*b)={\mathcal T}(a)*{\mathcal T}(b)$ we deduce that
$$
{\mathcal T}(\delta_{j+k})={\mathcal T}(\delta_j){\mathcal T}(\delta_k)\quad\mbox{and}\quad
{\mathcal T}(a)={\mathcal T}(\delta_0*a)={\mathcal T}(\delta_0)*{\mathcal T}(a).
$$
Applying this last identity to $a={\mathcal T}^{-1}(\delta_0)$ we get $\delta_0={\mathcal T}(\delta_0)*\delta_0={\mathcal T}(\delta_0)$.
Further $a=a.\mathbf{1}$ thus ${\mathcal T}(a)={\mathcal T}(a).{\mathcal T}(\mathbf{1})$
and, applying this again to $a={\mathcal T}^{-1}(b)$, we have $b=b.{\mathcal T}(\mathbf{1})$ for all $b\in\ell^2_n$, thus
${\mathcal T}(\mathbf{1})=\mathbf{1}$.
Similarly, $\mathbf{0}=a.\mathbf{0}$ thus ${\mathcal T}(\mathbf{0})={\mathcal T}(a).{\mathcal T}(\mathbf{0})$ and, applying this to $a={\mathcal T}^{-1}(\mathbf{0})$ we get ${\mathcal T}(\mathbf{0})=\mathbf{0}.{\mathcal T}(\mathbf{0})=\mathbf{0}$.
Finally, ${\mathbb{E}}[a]\mathbf{1}=a*1$ thus ${\mathcal T}({\mathbb{E}}[a]\mathbf{1})={\mathcal T}(a*\mathbf{1})={\mathcal T}(a)*\mathbf{1}={\mathbb{E}}[{\mathcal T}(a)]\mathbf{1}$.
As every $c\in{\mathbb{C}}$ may be written $c=E[\frac{c}{n}\mathbf{1}]$, we may define
$\beta(c)=E\left[{\mathcal T}\left(\frac{c}{n}\mathbf{1}\right)\right]$
so that $T(c\mathbf{1})=\beta(c)\mathbf{1}$. Note that $\beta$ is continuous
since we have assumed that $T$ acts continuously on constants and as ${\mathcal T}$ is one-to-one, so is $\beta$.
Moreover, $\beta$ is multiplicative\,:
$$
\beta(c_1c_2)\mathbf{1}={\mathcal T}(c_1c_2\mathbf{1})
={\mathcal T}(c_1\mathbf{1}).{\mathcal T}(c_2\mathbf{1})=\beta(c_1)\beta(c_2)\mathbf{1}.
$$
It is then easy to check that there is a $k\in\{-1,1\}$ and an $\alpha\in{\mathbb{C}}$ with $\mbox{Re}\,\alpha>0$
such that $\beta(0)=0$ and $\beta(c)=\displaystyle\left(\frac{c}{|c|}\right)^k|c|^\alpha$.
We will now take care of the image of $\delta_j$, $j=0,\ldots,n-1$.
\noindent{\bf Step 2.} {\sl There is an $\eta\in\{1,\ldots,n-1\}$ with no common divisor with $n$
such that ${\mathcal T}(\delta_j)=\delta_{\eta j}$.}
Assume that $k\not=\ell\in{\mathrm{supp}\,} {\mathcal T}(\delta_j)$ thus $\delta_k.{\mathcal T}(\delta_j)\not=\mathbf{0}$
and $\delta_\ell.{\mathcal T}(\delta_j)\not=\mathbf{0}$. Let $a={\mathcal T}^{-1}(\delta_k)$,
$b={\mathcal T}^{-1}(\delta_\ell)$, then
$$
a.\delta_j={\mathcal T}^{-1}(\delta_k).{\mathcal T}^{-1}\bigl({\mathcal T}(\delta_j)\bigr)
={\mathcal T}^{-1}\bigl(\delta_k.{\mathcal T}(\delta_j))\not={\mathcal T}^{-1}(\mathbf{0})
$$
since ${\mathcal T}$ is one-to-one. From Step 1, we know that ${\mathcal T}^{-1}(\mathbf{0})=\mathbf{0}$,
therefore $a.\delta_j\not=\mathbf{0}$.
For the same reason, $b.\delta_j\not=\mathbf{0}$. In particular,
$a.b\not=\mathbf{0}$, thus ${\mathcal T}(a.b)\not=\mathbf{0}$. But this contradicts ${\mathcal T}(a.b)=\delta_k.\delta_\ell$ with $k\not=\ell$.
It follows that, for each $j\in\{1,\ldots,n-1\}$, there exists $c_j\in{\mathbb{C}}\setminus\{0\}$
and $\sigma(j)\in\{0,\ldots,n-1\}$ such that ${\mathcal T}(\delta_j)=c_j\delta_{\sigma(j)}$.
But then
$$
\mathbf{1}={\mathcal T}(\mathbf{1})={\mathcal T}(\mathbf{1}*\delta_j)={\mathcal T}(\mathbf{1})*{\mathcal T}\delta_j
=c_j\mathbf{1}*\delta_j=c_j\mathbf{1}
$$
thus $c_j=1$. As ${\mathcal T}$ is one-to-one, it follows that $\sigma(j)\in\{1,\ldots,n-1\}$
and that $\sigma$ is a permutation.
Next
$$
\delta_{\sigma(j+k)}={\mathcal T}(\delta_{j+k})={\mathcal T}(\delta_j*\delta_k)={\mathcal T}(\delta_j)*{\mathcal T}(\delta_k)=\delta_{\sigma(j)}*
\delta_{\sigma(k)}=\delta_{\sigma(j)+\sigma(k)}.
$$
Thus $\sigma(j+k)=\sigma(j)+\sigma(k)$ and therefore $\sigma(j)=j\sigma(1)$. Further, the
fact that $\sigma$ is a permutation implies that $\sigma(1)$ has no common divisor with $n$
(Bezout's Theorem).
\noindent{\bf Step 3.} {\sl Conclusion.}
We can now prove that ${\mathcal T}$ is of the expected form: fix $j\in\{0,\ldots,n-1\}$ and
$a\in\ell^2_n$. Let $k=\sigma^{-1}(j)$ so that ${\mathcal T}(\delta_k)=\delta_j$. Then
\begin{eqnarray*}
{\mathcal T}(a)(j)\delta_j&=&{\mathcal T}(a).\delta_j={\mathcal T}(a).{\mathcal T}(\delta_k)={\mathcal T}(a.\delta_k)\\
&=&{\mathcal T}\bigl(a(k)\mathbf{1}.\delta_k\bigr)
=\beta\bigl(a(k)\bigr)\mathbf{1}.{\mathcal T}(\delta_k)\\
&=&\beta\bigl(a(k)\bigr)\delta_j.
\end{eqnarray*}
It follows that ${\mathcal T}(a)(j)=\beta\bigl(a\circ \sigma^{-1}(j)\bigr)=
\displaystyle\left(\frac{a\circ \sigma^{-1}(j)}{\abs{a\circ \sigma^{-1}(j)}}\right)^k|a\circ \sigma^{-1}(j)|^\alpha$. We want to prove that $\alpha=1$. But
\begin{eqnarray*}
{\mathbb{E}}[{\mathcal T}(a)]\mathbf{1}&=&{\mathcal T}(a)*\mathbf{1}={\mathcal T}(a)*{\mathcal T}(\mathbf{1})={\mathcal T}(a*\mathbf{1})={\mathcal T}({\mathbb{E}}[a]\mathbf{1})\\
&=&\left(\frac{{\mathbb{E}}[a]}{\overline{{\mathbb{E}}[a]}}\right)^k|{\mathbb{E}}[a]|^\alpha\mathbf{1}
\end{eqnarray*}
so that
${\mathbb{E}}[{\mathcal T}(a)]=\left(\frac{{\mathbb{E}}[a]}{\abs{{\mathbb{E}}[a]}}\right)^k|{\mathbb{E}}[a]|^\alpha$ or, in other words,
\begin{eqnarray*}
\sum_{\ell=0}^{n-1}\left(\frac{a(\ell)}{\abs{a(\ell)}}\right)^k\abs{a(\ell)}^\alpha
&=&\sum_{j=0}^{n-1}\left(\frac{a\bigl(\sigma^{-1}(j)\bigr)}{\abs{a\bigl(\sigma^{-1}(j)\bigr)}}\right)^k
\abs{a\bigl(\sigma^{-1}(j)\bigr)}^\alpha\\
&=&
\left(\frac{\sum_{j=0}^{n-1}a_j}{\sum_{j=0}^{n-1}\abs{a_j}}\right)^k\abs{\sum_{j=0}^{n-1}a_j}^\alpha.
\end{eqnarray*}
Taking $a(0)=1$, $a(1)=t>0$ and $a(j)=0$ for $j=2,\ldots,n-1$, this reduces to
$1+t^\alpha=(1+t)^\alpha$. This implies that $\alpha=1$ (which is easiest seen by differentiating and letting $t\to0$). It follows that $\beta(c)=c$ or $\bar c$ according to $k=1$ or $-1$.
\end{proof}
\begin{remark}
The proof adapts with no difficulty to any finite Abelian group. To prove the same result on ${\mathbb{Z}}$,
the best is to first compose ${\mathcal T}$ with a Fourier transform and then to adapt the proof
in \cite{AAAV2} from the real line to the torus. We refrain from giving the details here.
The lines of proof given here follows those given in \cite{AAAV2} (up to the ordering and the removal of technicalities that are useless in the finite group setting). The main difference is that we need to assume
that ${\mathcal T}$ acts continuously on constants. In \cite{AAAV2} this hypothesis is replaced by the fact that
${\mathcal T}$ sends smooth functions into smooth functions.
Finally, it should also be noted that Hypothesis (i) and (ii) are only used when either $a$ or $b$
is either a constant $c\mathbf{1}$ or a Dirac $\delta_j$.
\end{remark}
We will conclude this section with a Cooper like theorem. Let us first introduce some notation:
\begin{notation}
Let ${\mathcal F}i\,:{\mathbb{Z}}/n{\mathbb{Z}}\to {\mathbb{C}}$. For $k\in{\mathbb{Z}}/n{\mathbb{Z}}$, we define the two following linear operators $L^1({\mathbb{Z}}/n{\mathbb{Z}})\to L^1({\mathbb{Z}}/n{\mathbb{Z}})$\,:
$$
\tau_ka(j)=a(j+k)\quad\mbox{and}\quad M^{({\mathcal F}i)}_k a(j)=e^{k{\mathcal F}i(j)}a(j).
$$
Note that actually ${\mathcal F}i\,:{\mathbb{Z}}/n{\mathbb{Z}}\to {\mathbb{C}}/2i\pi{\mathbb{Z}}$.
As is well known, if ${\mathcal F}i(j)=2i\pi j/n$ for some $k\in{\mathbb{Z}}/n{\mathbb{Z}}$, ${\mathcal F}\tau_{-k}=M^{({\mathcal F}i)}_k{\mathcal F}$ and
${\mathcal F} M^{({\mathcal F}i)}_k=\tau_k{\mathcal F}$.
\end{notation}
We can now state the following:
\begin{theorem}
Let ${\mathcal T}\,:L^1({\mathbb{Z}}/n{\mathbb{Z}})\to L^1({\mathbb{Z}}/n{\mathbb{Z}})$ be continuous linear operator such that there exist
two maps ${\mathcal F}i,\psi\,:{\mathbb{Z}}/n{\mathbb{Z}}\to {\mathbb{C}}$ for which
$$
{\mathcal T}\tau_k=M_k^{({\mathcal F}i)}{\mathcal T}\quad\mbox{and}\quad {\mathcal T} M_k^{(\psi)}=\tau_k{\mathcal T}.
$$
Then there exist $k_0,m_0,m_1\in{\mathbb{Z}}/n{\mathbb{Z}}$, $c\in{\mathbb{C}}$ such that
${\mathcal F}i(j)=\displaystyle\frac{2i\pi}{n}(k_0j+m_0)$, $\psi(j)=\frac{2i\pi}{n}(-k_0j+m_1)$ and
$$
{\mathcal T}(a)(\ell)=ce^{2i\pi\ell m_1/n}\hat a(k_0\ell+m_0).
$$
\end{theorem}
\begin{proof} Without loss of generality, we may assume that ${\mathcal T}\not=0$.
First note that the conditions are equivalent to
\begin{equation}
\label{eq:cooper}
a)\ {\mathcal T}(\delta_k*a)(\ell)=e^{-k{\mathcal F}i(\ell)}{\mathcal T}(a)(\ell)\quad\mbox{and}\quad b)\ {\mathcal T}(e^{-k\psi(\cdot)}a)=\delta_k*{\mathcal T}(a).
\end{equation}
Note that these two expressions are $n$-periodic in $k$ so that ${\mathcal F}i$ and $\psi$ take their values
in $\{0,\frac{2i\pi}{n},\ldots,\frac{2i\pi(n-1)}{n}\}$.
First a) implies
$$
{\mathcal T}(\delta_j)(\ell)={\mathcal T}(\delta_j*\delta_0)(\ell)=e^{-j{\mathcal F}i(\ell)}{\mathcal T}(\delta_0)(\ell).
$$
Next \eqref{eq:cooper} b) implies that
\begin{eqnarray*}
e^{-k\psi(j)}{\mathcal T}(\delta_j)(\ell)&=&T(e^{-k\psi(j)}\delta_j)(\ell)=T(e^{-k\psi(\cdot)}\delta_j)(\ell)\\
&=&\delta_k*T(\delta_j)(\ell)=T(\delta_j)(\ell-k).
\end{eqnarray*}
In particular, ${\mathcal T}(\delta_j)(\ell)=e^{\ell\psi(j)}{\mathcal T}(\delta_j)(0)$, thus
$$
{\mathcal T}(\delta_j)(\ell)=e^{\ell\psi(j)-j{\mathcal F}i(0)}{\mathcal T}(\delta_0)(0).
$$
From linearity, we thus get that for $a\in\ell^2_n$
$$
{\mathcal T}(a)(\ell)=\sum_{j=0}^{n-1}a(j){\mathcal T}(\delta_j)(\ell)=
\left(\sum_{j=0}^{n-1}a(j)e^{\ell\psi(j)-j{\mathcal F}i(0)}\right){\mathcal T}(\delta_0)(0).
$$
As we assumed that ${\mathcal T}\not=0$, we thus have ${\mathcal T}(\delta_0)(0)\not=0$. Then \eqref{eq:cooper} reads
$$
\sum_{j=0}^{n-1}a(j)e^{\ell\psi(j+k)-(j+k){\mathcal F}i(0)}=
\sum_{j=0}^{n-1}a(j)e^{\ell\psi(j)-j{\mathcal F}i(0)-k{\mathcal F}i(\ell)}
$$
thus $\ell\psi(j+k)-(j+k){\mathcal F}i(0)=\ell\psi(j)-j{\mathcal F}i(0)-k{\mathcal F}i(\ell)$ for all $j,k,\ell\in{\mathbb{Z}}/n{\mathbb{Z}}$
(modulo $2i\pi/n$).
Taking $k=1$, we get
$$
{\mathcal F}i(\ell)-{\mathcal F}i(0)=\bigl(\psi(j)-\psi(j+1)\bigr)\ell
$$
so that ${\mathcal F}i$ and $\psi$ are ``affine''. More precisely, ${\mathcal F}i(\ell)=\bigl(\psi(0)-\psi(1)\bigr)\ell+{\mathcal F}i(0)$ modulo $2i\pi/n$ and, as ${\mathcal F}i$ takes its values
in $\frac{2i\pi}{n}{\mathbb{Z}}/n{\mathbb{Z}}$,
${\mathcal F}i(\ell)=\frac{2i\pi}{n}(k_0\ell+m_0)$ (modulo $2i\pi/n$) with $k_0,m_0\in\{0,\ldots,n-1\}$ and $b\in{\mathbb{C}}$.
Further $\psi(j+1)=\psi(j)+{\mathcal F}i(0)-{\mathcal F}i(1)$ thus $\psi(j)=\psi(0)+j\bigl({\mathcal F}i(0)-{\mathcal F}i(1)\bigr)=\frac{2i\pi }{n}(-k_0j+m_1)$ (again modulo $2i\pi/n$).
We thus conclude that
$$
{\mathcal T}(a)(\ell)=e^{2i\pi\frac{\ell m_1}{n}}\sum_{j=0}^{n-1}a(j)e^{-2i\pi\frac{k_0\ell+m_0}{n}j}.
$$
as expected.
\end{proof}
\section{The real line and the Torus}\label{sec:3}
We now consider the case $G={\mathbb{R}}^d$ resp. $G={\mathbb{T}}^d$ so that $\hat G={\mathbb{R}}^d$ resp. $\hat G={\mathbb{Z}}^d$. To simplify notation, we write $\mathcal{C}({\mathbb{Z}}^d)=L^\infty({\mathbb{Z}}^d)$.
\begin{theorem}\label{prop:fourR}
Let $d\geq 1$ be an integer and $G={\mathbb{R}}^d$ or $G={\mathbb{T}}^d$.
Let $T$ be a continuous linear operator $L^1(G)\to \mathcal{C}(\hat G)$ such that
$T(f*g)=T(f)T(g)$. Then there exists a set $E\subset G$ and a function
${\mathcal F}i\,: \hat G\to \hat G$ such that $T(f)(\xi)=\chi_E(\xi)\widehat{f}\bigr({\mathcal F}i(\xi)\bigl)$.
\end{theorem}
\begin{proof}
Let us fix $\xi\in\hat G$ and consider the continuous linear functional $T_\xi$
on $L^1(G)$ given by $T_\xi(f)=T(f)(\xi)$. Then there exists a bounded function $h_\xi$ on $G$
such that $T_\xi(f)=\int_G f(t)h_\xi(t)\,\mbox{d}t$. There is no loss of generality in assuming that
$h_\xi\not=0$.
Let us now take $A,B$ to sets of finite measure. Then Fubini's Theorem implies that
\begin{eqnarray}
\int_{A\times B}h_\xi(s+t)\,\mbox{d}s\,\mbox{d}t&=&\int_{{\mathbb{R}}^d} \chi_A*\chi_B(t)h_\xi(t)\,\mbox{d}t
=T(\chi_A*\chi_B)(\xi)\nonumber\\
&=&T(\chi_A)(\xi)T(\chi_B)(\xi)=\int_Ah_\xi(t)\,\mbox{d}t\,\int_Bh_\xi(t)\,\mbox{d}t.\label{eq:ffi}
\end{eqnarray}
Now let ${\mathcal F}i_n$ be defined on $G^2$ by
$$
{\mathcal F}i_n(x,y)=\begin{cases}\bigl(h_\xi(x+y)-h_\xi(x)h_\xi(y)\bigr)\chi_{[-n,n]}(x)\chi_{[-n,n]}(y)
&\mbox{if }G={\mathbb{R}}\\
h_\xi(x+y)-h_\xi(x)h_\xi(y)&\mbox{if }G={\mathbb{T}}\end{cases}.
$$
As ${\mathcal F}i_n$ is bounded (since $h_\xi$ is) and has compact support, ${\mathcal F}i_n\in L^1(G^2)$
and \eqref{eq:ffi} implies that
$$
\int_{A\times B}{\mathcal F}i_n(x,y)\,\mbox{d}x\,\mbox{d}y=0
$$
for every sets $A,B$ of finite measure, so that ${\mathcal F}i_n=0$ for every $n$. That is
\begin{equation}
\label{eq:h}
h_\xi(x+y)=h_\xi(x)h_\xi(y){\mathcal Q}uad\mbox{for almost every }x,y\in G.
\end{equation}
If $h_\xi$ were continuous, this would imply that $h_\xi(x)=e^{i\scal{a_\xi,x}}$ and, by boundedness of $h_\xi$,
that $a_\xi\in{\mathbb{R}}^d$. We will now overcome this difficulty by introducing
$$
H_{\xi,j}(x)=\int_0^xh_\xi(t\mathbf{e}_j)\,\mbox{d}t
$$
where $j=1,\ldots,d$ and $\mathbf{e}_j=(\delta_{j,k})_{k=1,\ldots,d}$ is the $j$-th vector in the standard basis.
Clearly $H_{\xi,j}$ is continuous and satisfies
$$
H_{\xi,j}(x)H_{\xi,j}(y)=\int_0^x\bigl(H_{\xi,j}(y+t)-H_{\xi,j}(t)\bigr)\,\mbox{d}t.
$$
From this, we immediately deduce that $H_{\xi,j}$ is smooth, that $H_{\xi,j}^\prime(t)=h_\xi(t\mathbf{e}_j)$
almost everywhere and that $H_{\xi,j}^\prime(x+y)=H_{\xi,j}^\prime(x)H_{\xi,j}^\prime(y)$ everywhere. Thus, for almost every $x\in{\mathbb{R}}$ or ${\mathbb{T}}$,
$h_\xi(x\mathbf{e}_j)=e^{ia_{\xi,j} x}$ with $a_{\xi,j}$ real. Finally for $x\in G$,
$$
h_\xi(x)=h_\xi(x_1\mathbf{e}_1+\cdots+x_d\mathbf{e}_d)=h_\xi(x_1\mathbf{e}_1)\cdots h_\xi(x_d\mathbf{e}_d)
=e^{i\scal{a_{\xi},x}}
$$
where $a_\xi=(a_{\xi,1},\ldots,a_{\xi,d})$.
We have thus proved that there exists a map from ${\mathcal F}i\,:G\to G$ and a set $E$ such that
\begin{equation}
\label{eq:f}
Tf(\xi)=\chi_E(\xi)\widehat{f}\bigl({\mathcal F}i(\xi)\bigr)
\end{equation}
which completes the proof.
\end{proof}
\begin{remark}
If $T$ extends to a unitary operator from $L^2({\mathbb{R}}^d)$ onto $L^2({\mathbb{R}}^d)$ then $E={\mathbb{R}}^d$
and ${\mathcal F}i\,:G\to G$ is bijection and is measure preserving {\it i.e.} $|{\mathcal F}i^{-1}(E)|=|E|$
for every set $E\subset G$ of finite measure. This last fact is a corollary of \cite{Si} ({\it see also} \cite{No}).
\end{remark}
Note that in this theorem, we have only used the $L^1-L^\infty$ duality to show that the operator
is a kernel operator. A slightly more evolved theorem allows to obtain this directly. More precisely,
this is a consequence of the following theorem that dates back at least to Gelfand \cite{Ge}
and Kantorovich-Vullich \cite{KV} ({\it see} also \cite[Theorem 2.2.5]{DP} or \cite[Theorem 1.3]{AT}):
\begin{theorem}
Let $(\Omega_1,\mu_1)$ and $(\Omega_2,\mu_2)$ be two $\sigma$-finite measure spaces. There is a one-to-one correspondence
between bounded linear operators $T\,:L^1(\Omega_1)\to L^\infty(\Omega_2)$ and kernels $k\in L^\infty(\Omega_1\times\Omega_2)$. This correspondence is given by $T=T_k$ where $T_k$ is defined by
$$
T_kf(\omega)=\int_{\Omega_1}k(\zeta,\omega)f(\zeta)\,\mbox{d}\mu_1(\zeta),\quad f\in L^1(\Omega_1).
$$
\end{theorem}
It follows that this proposition than essentially reduces to the results in \cite{Lu1,Lu2},
However, a non explicit condition in those papers is that $k$ be defined everywhere as it is applied to Dirac masses.
\section{The twisted convolution}
In this section, we consider the case of the twisted convolution (for background on this transform we refer to \cite{Fo}). Recall that this is defined for
$f,g\in L^1({\mathbb{R}}^{2d})$ by
$$
f{\natural\,} g(x,y)=
\int_{{\mathbb{R}}^{d}}\int_{{\mathbb{R}}^{d}}f(x-s,y-t)g(s,t)e^{i\pi(\scal{x,t}-\scal{y,s})}\,\mbox{d}s\,\mbox{d}t.
$$
This defines a new $L^1({\mathbb{R}}^{2d})$ function.
Note also that this operation is non commutative.
Next, for $p,q\in{\mathbb{R}}^d$, let us define the following operator that acts on functions on ${\mathbb{R}}^d$:
$$
\rho(p,q){\mathcal F}i(x)=e^{2i\pi \scal{q,x}+i\pi\scal{p,q}}{\mathcal F}i(x+p).
$$
For $f\in L^1({\mathbb{R}}^d\times{\mathbb{R}}^d)$ we define the (bounded linear) operator on $L^1({\mathbb{R}}^d)\to L^1({\mathbb{R}}^d)$
\begin{eqnarray*}
\rho(f){\mathcal F}i(x)&=&\int_{{\mathbb{R}}^d}\int_{{\mathbb{R}}^d}f(p,q)\rho(p,q){\mathcal F}i(x)\,\mbox{d}p\,\mbox{d}q\\
&=&\int_{{\mathbb{R}}^d}K_f(x,y){\mathcal F}i(y)\,\mbox{d}y
\end{eqnarray*}
where $\displaystyle K_f(x,y)=\int_{{\mathbb{R}}^d}f(y-x,q)e^{i\pi\scal{q,x+y}}\,\mbox{d}q={\mathcal F}_2^{-1}[f]\left(y-x,\frac{x+y}{2}\right)$
and ${\mathcal F}_2$ stands for the Fourier transform in the second variable.
One then checks through a cumbersome computation that $\rho(f{\natural\,} g)=\rho(f)\rho(g)$ (here the product stands for composition of operators) or, for the kernels
$$
K_{f{\natural\,} g}(x,y)=\int_{{\mathbb{R}}^d}K_f(x,z)K_g(z,y)\,\mbox{d}z.
$$
\noindent{\bf Question.} {\sl To what extend does this characterize the transform $f\to\rho(f)$.}
\end{document}
|
\begin{document}
\title[The asymptotic profile of an eta-theta quotient]{The asymptotic profile of an eta-theta quotient related to entanglement entropy in string theory}
\author{Joshua Males}
\address{University of Cologne, Department of Mathematics and Computer Science, Weyertal 86-90, 50931
Cologne, Germany}
\email{[email protected]}
\begin{abstract}
In this paper we investigate a certain eta-theta quotient which appears in the partition function of entanglement entropy. Employing Wright's circle method, we give its bivariate asymptotic profile.
\end{abstract}
\blfootnote{Mathematics Subject Classification 2010: 11F50}
\blfootnote{\textit{Keywords}: Eta-theta quotient; asymptotic profile; Wright's circle method.}
\maketitle
\section{Introduction and Statement of Results}
Modern mathematical physics in the direction of string theory and black holes is intricately linked to number theory. For example, work of Dabholkar, Murthy, and Zagier relates certain mock modular forms to physical phenomena such as quantum black holes and wall crossing \cite{dabholkar2012quantum}. Similarly, the connections between automorphic forms and a second quantised string theory are described in \cite{dijkgraaf1997elliptic}, and modular forms for certain elliptic curves and their realisation in string theory is discussed in \cite{kondo2019string}. Further, the recent paper \cite{harvey2019ramanujans} discusses in-depth the links between work of the enigmatic Ramanujan in relation to modular forms and their generalisations and string theoretic objects (and indeed, why such links should be expected).
Knowledge of the behaviour of the modular objects aids the descriptions of physical phenomena. For instance, in \cite{gliozzi1977supersymmetry}, the authors use the classical number-theoretic Jacobi triple product identity to demonstrate the supersymmetry of the open-string spectrum using RNS fermions in light-cone gauge (see also \cite{witten2019open}).
In particular, parts of physical partition functions are often modular or mock modular objects. For example, the partition functions of the Melvin model \cite{RUSSO1996131} and the conical entropy of both the open and closed superstring \cite{he2015notes} both involve the weight $-3$ and index $0$ meromorphic Jacobi form
\begin{equation*}
f(z;\tau) \coloneqq \frac{\vartheta(z;\tau)^4}{ \eta(\tau)^9 \vartheta(2z;\tau)},
\end{equation*}
where $\eta$ is the Dedekind eta function given by
\begin{equation*}
\eta(\tau) \coloneqq q^{\frac{1}{24}} \prod_{n \geq 1} \left( 1-q^n \right),
\end{equation*}
and
\begin{equation*}
\vartheta(z;\tau) \coloneqq i \zeta^{\frac{1}{2}} q^{\frac{1}{8}} \prod_{n\geq 1} (1-q^n)(1-\zeta q^n)(1-\zeta^{-1} q^{n-1})
\end{equation*}
is the Jacobi theta function, with $\zeta \coloneqq e^{2 \pi i z}$ for $z \in \mathbb{C}$, and $q \coloneqq e^{2 \pi i \tau}$ with $\tau \in \mathbb{H}$, the upper half-plane.
We are particularly interested in the coefficients of the $q$-expansion of $f$ where $0 \leq z \leq 1$, away from the pole at $z = 1/2$, where the residue of $f$ is calculated in \cite{witten2019open} - the other residues may be calculated using the elliptic transformation formulae for $f$. For instance, the asymptotic behaviour of the coefficients is required in order to investigate the UV limit. For a fixed value of $z$ the problem of finding the asymptotics of the coefficients is elementary, as \cite{he2015notes} notes. In particular, fixing $z = \frac{h}{k}$ a rational number with $\gcd(h,k) = 1$ and $0 \leq h < \frac{k}{2}$, then classical results in the theory of modular forms (see Theorem 15.10 of \cite{bringmann2017harmonic} for example) give that the coefficients of $f(\frac{h}{k};\tau) = \sum_{n \geq 0} a_{h,k}(n)q^n$ behave asymptotically as
\begin{equation*}
a_{h,k}(n) \sim \frac{\left( \frac{h}{k} \right)^{\frac{7}{4}}}{2 \sqrt{2} \pi} n^{-\frac{9}{4}} e^{4 \pi \sqrt{ \frac{hn}{k}}}.
\end{equation*}
In the present paper, we let
\begin{equation*}
f(z;\tau) \eqqcolon \sum_{\substack{n \geq 0 \\ m \in \mathbb{Z}}} b(m,n) \zeta^m q^n.
\end{equation*}
and investigate the coefficients $b(m,n)$; in particular we want to compute the bivariate asymptotic profile of $b(m,n)$ for a certain range of $m$.
In \cite{bringmann2016dyson}, the authors introduce techniques in order to compute the bivariate asymptotic behaviour of coefficients for a Jacobi form in order to answer Dyson's conjecture on the bivariate asymptotic behaviour of the partition crank. This method is used in numerous other papers - for example, in relation to the rank of a partition \cite{dousse2014asymptotic}, ranks and cranks of cubic partitions \cite{kim2016asymptotic}, and certain genera of Hilbert schemes \cite{manschot2014asymptotic} (a result that has recently been extended to a complete classification with exact formulae using the Hardy-Ramanujan circle method \cite{gillman2019partitions}), along with many other partition-related statistics.
Using Wright's circle method \cite{wright1934asymptotic,wright1971stacks} and following the same approach as \cite{bringmann2016dyson} we show the following theorem.
\begin{theorem}\label{Theorem: main}
For $\beta \coloneqq \pi \sqrt{\frac{2}{n}}$ and $|m| \leq \frac{1}{6 \beta} \log(n)$ we have that
\begin{equation*}
b(m,n) =(-1)^{m+\delta+\frac{3}{2}} \frac{\beta^6 m}{8 \pi^5 (2n)^{\frac{1}{4} }} e^{2 \pi \sqrt{2n}} + O \left( m n^{-\frac{15}{4}} e^{2 \pi \sqrt{2n}} \right)
\end{equation*}
as $n \rightarrow \infty$. Here, $\delta \coloneqq 1$ if $m <0$ and $\delta = 0$ otherwise.
\end{theorem}
\begin{remark}
Although our approach is similar to \cite{bringmann2016dyson,dousse2014asymptotic}, in some places we require a little more care since finding the Fourier coefficients requires taking an integral over a path where $f$ has a pole. In this case, we turn to the framework of \cite{dabholkar2012quantum} - this is explained explicitly in Section \textnormal{Re}f{Section: asymptotics}.
\end{remark}
We begin in Section \textnormal{Re}f{Section: prelims} by recalling relevant results that are pertinent to the rest of the paper. In Section \textnormal{Re}f{Section: bounds toward dominant pole} we investigate the behaviour of $f$ toward the dominant pole $q = 1$. We follow this in Section \textnormal{Re}f{Section: bounds away from dominant pole} by bounding the contribution away from the pole at $q=1$. We finish in Section \textnormal{Re}f{Section: Circle method} by applying Wright's circle method to find the asymptotic behaviour of $b(m,n)$ and hence prove Theorem \textnormal{Re}f{Theorem: main}.
\section{Preliminaries}\label{Section: prelims}
Here we recall relevant definitions and results which will be used throughout the rest of the paper.
\subsection{Properties of $\vartheta$ and $\eta$}
When determining the asymptotic behaviour of $f$ we will require the modularity behaviour of both $\vartheta$ and $\eta$. It is well-known that $\vartheta$ satisfies the following lemma (see e.g. \cite{mumford2007tata}).
\begin{lemma}\label{Lemma: transformation of theta}
The function $\vartheta$ satisfies the following transformation properties.\\
\begin{enumerate}
\item $\vartheta(-z ; \tau) = -\vartheta(z;\tau)$\\
\item $\vartheta(z+1;\tau) = -\vartheta(z;\tau)$\\
\item $ \vartheta(z; \tau) = \frac{i}{\sqrt{-i \tau}} e^{ \frac{- \pi i z^2}{\tau}} \vartheta\left( \frac{z}{\tau} ; -\frac{1}{\tau} \right)$
\end{enumerate}
\end{lemma}
Further, we have the following well-known modular transformation formula of $\eta$ (see e.g. \cite{siegel_1954}).
\begin{lemma}\label{Lemma: transformation of eta}
We have that
\begin{equation*}
\begin{split}
\eta\left( -\frac{1}{\tau} \right) = \sqrt{-i\tau} \eta(\tau).
\end{split}
\end{equation*}
\end{lemma}
\subsection{Euler Polynomials}
We will also make use properties of the Euler polynomials $E_r$, defined by the generating function
\begin{equation*}
\frac{2e^{xt}}{1+e^t} \eqqcolon \sum_{r \geq 0} E_r(x) \frac{t^r}{r!}.
\end{equation*}
Lemma 2.2 of \cite{bringmann2016dyson} shows that the following lemma holds.
\begin{lemma}\label{Lemma: sech in terms of E}
We have
\begin{equation*}
-\frac{1}{2} \textnormal{sech}^2 \left( \frac{t}{2} \right) = \sum_{r \geq 0} E_{2r+1} (0) \frac{t^{2r}}{(2r)!}.
\end{equation*}
\end{lemma}
Further, Lemma 2.3 of \cite{bringmann2016dyson} gives the following integral representation for the Euler polynomials.
\begin{lemma}
We have that
\begin{equation*}
\mathcal{E}_j \coloneqq \int_{0}^{\infty} \frac{w^{2j+1}}{\sinh(\pi w)} dw = \frac{(-1)^{j+1} E_{2j+1} (0)}{2}.
\end{equation*}
\end{lemma}
\subsection{A particular bound}
In Section \textnormal{Re}f{Section: bounds away from dominant pole} we require a bound on the size of
\begin{equation*}
P(q) \coloneqq \frac{q^{\frac{1}{24}}}{\eta(\tau)},
\end{equation*}
away from the pole at $q=1$. For this we use the following lemma which is shown to hold in Lemma 3.5 of \cite{bringmann2016dyson}.
\begin{lemma}
Let $\tau = u +iv \in \mathbb{H}$ with $Mv \leq u \leq \frac{1}{2}$ for $u>0$ and $v \rightarrow 0$. Then
\begin{equation*}
|P(q)| \ll \sqrt{v} \exp \left[ \frac{1}{v} \left(\frac{\pi}{12} - \frac{1}{2\pi} \left(1- \frac{1}{\sqrt{1+M^2}}\right)\right) \right].
\end{equation*}
\end{lemma}
In particular, with $v = \frac{\beta}{2\pi}$, $u=\frac{\beta m^{-\frac{1}{3}} x}{2 \pi}$ and $M = m^{-\frac{1}{3}}$ this gives for $1 \leq x \leq \frac{\pi m^{\frac{1}{3}}}{\beta}$ the bound
\begin{equation}\label{Equation: bound on P(q)}
|P(q)| \ll n^{-\frac{1}{4}} \exp \left[ \frac{2 \pi}{\beta} \left( \frac{\pi}{12} - \frac{1}{2\pi} \left( 1- \frac{1}{\sqrt{1 + m^{-\frac{2}{3}}}} \right) \right) \right].
\end{equation}
\subsection{$I$-Bessel functions}
Here we recall relevant results on the $I$-Bessel function defined by
\begin{equation*}
I_{\ell} (x) \coloneqq \frac{1}{2 \pi i} \int_{\Gamma} t^{-\ell - 1}e^{\frac{x}{2} \left(t+\frac{1}{t} \right)} dt,
\end{equation*}
where $\Gamma$ is a contour which starts in the lower half plane at $-\infty$, surrounds the origin counterclockwise and returns to $-\infty$ in the upper half-plane. We are particularly interested in the asymptotic behaviour of $I_\ell$, given in the following lemma (see e.g. (4.12.7) of \cite{andrews_askey_roy_1999}).
\begin{lemma}\label{Lemma: asymptotic of I Bessel}
For fixed $\ell$ we have
\begin{equation*}
I_\ell(x) = \frac{e^x}{\sqrt{2 \pi x}} + O\left(\frac{e^x}{x^{\frac{3}{2}}}\right)
\end{equation*}
as $x \rightarrow \infty$.
\end{lemma}
We also require the behaviour of an integral related to the $I$-Bessel function. Define
\begin{equation*}
P_{s} \coloneqq \frac{1}{2 \pi i} \int_{1-im^{-\frac{1}{3}}}^{1+im^{-\frac{1}{3}}} v^s e^{\pi \sqrt{2n} \left(v + \frac{1}{v}\right)} dv.
\end{equation*}
Then Lemma 4.2 of \cite{bringmann2016dyson} reads as follows.
\begin{lemma}\label{Lemma: P in terms of I Bessel}
For $|m| \leq \frac{1}{6 \beta} \log(n)$ we have
\begin{equation*}
P_s = I_{-s-1}\left( 2 \pi \sqrt{2n}\right) + O\left( \exp\left(\pi \sqrt{2n}\left(1+ \frac{1}{1+ |m|^{-\frac{2}{3}}}\right)\right) \right)
\end{equation*}
as $n \rightarrow \infty$.
\end{lemma}
\section{Asymptotic behaviour of $f$}\label{Section: asymptotics}
The aim of this Section is to determine the asymptotic behaviour of $f$. To do so we consider two separate cases: when $q$ tends toward the pole $q=1$, and when $q$ is away from this pole. It is shown that the behaviour toward the pole at $q=1$ gives the dominant contribution when applying the circle method in Section \textnormal{Re}f{Section: Circle method}.
First note that Lemma \textnormal{Re}f{Lemma: transformation of theta} implies that $f(-z;\tau) = -f(z;\tau)$, and so $b(-m,n) = -b(m,n)$. We now restrict our attention to the case of $m\geq 0$.
We next find the Fourier coefficient of $\zeta^m$ of $f$, following the framework of \cite{dabholkar2012quantum}. Since there is a pole of $f$ at $z = \frac{1}{2}$, we define
\begin{equation*}
\begin{split}
f_m^\pm (\tau) \coloneqq & \int_{0}^{\frac{1}{2} - a} f(z;\tau) e^{-2 \pi i m z} dz + \int_{\frac{1}{2} + a}^{1} f(z;\tau) e^{-2 \pi i m z} dz + G^\pm \\
= & -2i \int_{0}^{\frac{1}{2} - a} f(z;\tau) \sin(2 \pi m z) dz + G^\pm,
\end{split}
\end{equation*}
where $a>0$ is small, and
\begin{equation*}
G^\pm \coloneqq \int_{\frac{1}{2}-a}^{\frac{1}{2}+a} f(z;\tau) e^{-2 \pi i m z} dz.
\end{equation*}
For $G^+$ the integral is taken over a semi-circular path passing above the pole. Similarly, $G^-$ is taken over a semi-circular path passing below the pole. Then the Fourier coefficient of $\zeta^m$ of $f$ is
\begin{equation}\label{Equation: f_m split into two integrals}
f_m (\tau) \coloneqq \frac{f_m^+ + f_m^-}{2} = -2 i \lim_{a \rightarrow 0^+} \int_{0}^{\frac{1}{2} - a} f(z;\tau) \sin(2 \pi m z) dz + \frac{G^+ +G^-}{2}.
\end{equation}
Letting $z \mapsto 1-z$ maps the path of integration from $0$ to $1$ passing above the pole to the path from $1$ to $0$ passing below the pole. One then sees directly that
\begin{align}\label{f_m(tau)}
f_m (\tau) = -2 i \int_{0}^{\frac{1}{2}} f(z;\tau) \sin(2 \pi m z) dz.
\end{align}
In the following two subsections we determine the asymptotic behaviour of $f$ toward and away from the dominant pole at $q=1$ respectively. Throughout, we will let $\tau = \frac{i \varepsilon}{2 \pi}$, $\varepsilon \coloneqq \beta(1 + ix m^{-\frac{1}{3}})$ and $\beta \coloneqq \pi \sqrt{\frac{2}{n}}$. We determine asymptotics as $n \rightarrow \infty$.
\subsection{Bounds towards the dominant pole}\label{Section: bounds toward dominant pole}
Here we find the asymptotic behaviour of $f$ toward the dominant pole at $q=1$, shown in the following lemma.
\begin{lemma}\label{Lemma: main asmyptotic term of f at 0}
Let $\tau = \frac{i \varepsilon}{2\pi}$, with $0 < \textnormal{Re}(\varepsilon) \ll 1$, and $0 < z < \frac{1}{2}$. Then we have that
\begin{equation*}
f\left(z;\frac{i \varepsilon}{2 \pi} \right) = -\frac{\varepsilon^3}{\pi^3} \frac{\sinh\left( \frac{2 \pi^2 z}{\varepsilon} \right)^4}{ \sinh\left( \frac{4 \pi^2 z}{\varepsilon} \right) } \left( 1 + e^{ -4\pi^2 \textnormal{Re}\left(\frac{1}{\varepsilon}\right) (1-2z)} + O\left( e^{ -4\pi^2 \textnormal{Re}\left(\frac{1}{\varepsilon}\right) (1-z) } \right) \right).
\end{equation*}
\end{lemma}
\begin{proof}
Using the modularity of $f$ (which follows from Lemmas \textnormal{Re}f{Lemma: transformation of theta} and \textnormal{Re}f{Lemma: transformation of eta}) and setting $q_0 \coloneqq e^{-\frac{2 \pi i}{\tau}}$, we have that
\begin{equation*}
\begin{split}
f(z;\tau) & = \frac{\tau^3 \zeta^{\frac{2}{\tau}} \prod_{n \geq 1} \left(1 - \zeta^{\frac{1}{\tau}} q_0^n\right)^4 \left( 1-\zeta^{-\frac{1}{\tau}} q_0^{n-1} \right)^4 }{ i \zeta^{\frac{1}{\tau}} \prod_{n \geq 1} \left( 1-q_0^n \right)^6 \left( 1-\zeta^{\frac{2}{\tau}} q_0^n \right) \left( 1-\zeta^{\frac{-2}{\tau}} q_0^{n-1} \right) } \\
& = \frac{\tau^3 \left( \zeta^{\frac{1}{2\tau}} - \zeta^{-\frac{1}{2\tau}} \right)^4 }{ i \left( \zeta^{\frac{1}{\tau}} - \zeta^{\frac{-1}{\tau}} \right) } \prod_{n \geq 1} \frac{ \left(1 - \zeta^{\frac{1}{\tau}} q_0^n\right)^4 \left( 1-\zeta^{-\frac{1}{\tau}} q_0^n \right)^4 }{ \left( 1-q_0^n \right)^6 \left( 1-\zeta^{\frac{2}{\tau}} q_0^n \right) \left( 1-\zeta^{\frac{-2}{\tau}} q_0^n \right) }.
\end{split}
\end{equation*}
This gives
\begin{equation*}
-\frac{\varepsilon^3}{ \pi^3} \frac{\sinh\left( \frac{2 \pi^2 z}{\varepsilon} \right)^4}{ \sinh\left( \frac{4 \pi^2 z}{\varepsilon} \right) } \prod_{n \geq 1} \frac{ \left(1 - e^{\frac{4\pi^2}{\varepsilon}(z-n) }\right)^4 \left( 1-e^{\frac{4\pi^2}{\varepsilon}(-z-n) } \right)^4 }{ \left( 1-e^{\frac{-4\pi^2 n}{\varepsilon} } \right)^6 \left( 1-e^{\frac{4\pi^2}{\varepsilon}(2z-n) } \right) \left( 1-e^{\frac{4\pi^2}{\varepsilon}(-2z-n) } \right) }.
\end{equation*}
In order to find a bound we expand the denominator using geometric series. For $0 < z < \frac{1}{2}$ we see that $| e^{\frac{4\pi^2}{\varepsilon} (\pm 2z - n)} |<1$ for all $n \geq 1$, and so we expand the denominator to obtain the product as
\begin{equation*}
\prod_{n \geq 1} \left(1- e^{\frac{4\pi^2 }{\varepsilon} (z-n)} \right)^4 \left( 1 - e^{\frac{-4\pi^2 }{\varepsilon} (z+n) } \right)^4 \sum_{j \geq 0} e^{\frac{4 j \pi^2}{\varepsilon} (2z - n)} \sum_{k \geq 0} e^{\frac{- 4 k \pi^2}{\varepsilon} (2z + n)} \left(\sum_{\ell \geq 0} e^{\frac{-4 \pi^2 \ell n}{\varepsilon}} \right)^6,
\end{equation*}
which, for $0 < \textnormal{Re}(\varepsilon) \ll 1$, is of order
\begin{equation*}
1 + e^{ -4\pi^2 \textnormal{Re}\left(\frac{1}{\varepsilon}\right) (1-2z)} + O\left( e^{ -4\pi^2 \textnormal{Re}\left(\frac{1}{\varepsilon}\right) (1-z) } \right).
\end{equation*}
Hence overall we find that
\begin{equation*}
f\left(z;\frac{i \varepsilon}{2 \pi} \right) = -\frac{\varepsilon^3}{\pi^3} \frac{\sinh\left( \frac{2 \pi^2 z}{\varepsilon} \right)^4}{ \sinh\left( \frac{4 \pi^2 z}{\varepsilon} \right) } \left( 1 + e^{ -4\pi^2 \textnormal{Re}\left(\frac{1}{\varepsilon}\right) (1-2z)} + O\left( e^{ -4\pi^2 \textnormal{Re}\left(\frac{1}{\varepsilon}\right) (1-z) } \right) \right),
\end{equation*}
yielding the claim.
\end{proof}
\begin{remark}
It is easy to see that this gives the same main term as noted in Section 4.5 of \cite{he2015notes} (up to sign, which the authors there do not make use of).
\end{remark}
Since $f(z;\tau) = - f(1-z;\tau)$ we see this immediately also implies the following lemma.
\begin{lemma}
Let $\tau = \frac{i \varepsilon}{2\pi}$, with $0 < \textnormal{Re}(\varepsilon) \ll 1$, and $\frac{1}{2} < z < 1$. Then we have that
\begin{equation*}
f\left(z;\frac{i \varepsilon}{2 \pi} \right) = \frac{\varepsilon^3}{\pi^3} \frac{\sinh\left( \frac{2 \pi^2 (1-z)}{\varepsilon} \right)^4}{ \sinh\left( \frac{4 \pi^2 (1-z)}{\varepsilon} \right) } \left( 1 + e^{ -4\pi^2 \textnormal{Re}\left(\frac{1}{\varepsilon}\right) (2z-1)} + O\left( e^{ -4\pi^2 \textnormal{Re}\left(\frac{1}{\varepsilon}\right) z } \right) \right).
\end{equation*}
\end{lemma}
We then obtain the following theorem.
\begin{theorem}\label{Theorem: behaviour of f_m at dominant pole}
For $|x| \leq 1$ we have that
\begin{equation*}
f_m\left( \frac{i \varepsilon}{2 \pi} \right) = (-1)^{m+\frac{3}{2}} \frac{m}{4 \pi^4} \varepsilon^5 e^{\frac{2\pi^2}{\varepsilon}} +O(\varepsilon^3)
\end{equation*}
as $n \rightarrow \infty$.
\end{theorem}
\begin{proof}
We have
\begin{align*}
-\frac{\varepsilon^3}{\pi^3} \frac{\sinh\left(\frac{2\pi^2z}{\varepsilon}\right)^4}{\sinh\left(\frac{4\pi^2}{\varepsilon}\right)} =- \frac{\varepsilon^3}{\pi^3} e^{\frac{4\pi^2z}{\varepsilon}} \left(1+O(e^{-\frac{4 \pi^2z}{\varepsilon}}) \right).
\end{align*}
Plugging this into \eqref{f_m(tau)} and integrating explicitly, using the formula
\begin{align*}
\int_0^{\frac{1}{2}} e^{xz} \sin(2\pi m z) dz = - \frac{2 \pi m}{4\pi^2 m^2 + x^2} e^{\frac{x}{2}} \cos(\pi m) - \frac{2 \pi m}{4\pi^2m^2 + x^2},
\end{align*}
shows directly that
\begin{align*}
f_m(\tau) = & -2\frac{\varepsilon^3}{\pi^3}(-1)^{m+\frac{1}{2}} \frac{2 \pi m}{\left(\frac{16 \pi^2}{\varepsilon^2}\right)} e^{\frac{2 \pi^2}{\varepsilon}} + O(\varepsilon^3)= (-1)^{m+\frac{3}{2}} \frac{m}{4 \pi^4} \varepsilon^5 e^{\frac{2\pi^2}{\varepsilon}} +O(\varepsilon^3).
\end{align*}
\end{proof}
\subsection{Bounds away from the dominant pole}\label{Section: bounds away from dominant pole}
We next investigate the behaviour of $f_m$ away from the pole $q=1$, by assuming that $1 \leq x \leq \frac{\pi m^{\frac{1}{3}}}{\beta}$. In the following lemma we bound the term
\begin{equation*}
\frac{\eta(2\tau)^8}{\eta(\tau)^{16}},
\end{equation*}
away from the pole $q = 1$.
\begin{lemma}
For $1 \leq x \leq \frac{\pi m^{\frac{1}{3}}}{\beta}$ we have that
\begin{equation*}
\left| \frac{\eta\left( \frac{i \varepsilon}{\pi}\right)^8}{\eta\left( \frac{i \varepsilon}{2 \pi} \right)^{16}} \right| \ll n^{-2} \exp\left[ \pi \sqrt{2n} - \frac{8 \sqrt{2n}}{\pi} \left(1 - \frac{1}{\sqrt{1+ m^{-\frac{2}{3}}}}\right) \right]
\end{equation*}
as $n \rightarrow \infty$.
\end{lemma}
\begin{proof}
We first write
\begin{equation*}
\frac{\eta(2\tau)^8}{\eta(\tau)^{16}} = \frac{\eta(2 \tau)^8}{q^{\frac{2}{3}}} \frac{q^{\frac{2}{3}}}{\eta(\tau)^{16}}.
\end{equation*}
Using equation \eqref{Equation: bound on P(q)} directly we find that (with $\tau = \frac{i \varepsilon}{2 \pi}$)
\begin{equation*}
\frac{q^{\frac{2}{3}}}{\eta(\tau)^{16}} = P(q)^{16} \ll n^{-4} \exp\left[ \frac{ 4 \pi \sqrt{2n}}{3} - \frac{8 \sqrt{2n}}{\pi} \left(1 - \frac{1}{\sqrt{1+ m^{-\frac{2}{3}}}}\right) \right].
\end{equation*}
It remains to consider the behaviour of $e^{\varepsilon/12} \eta(\frac{i\varepsilon}{ \pi})$. Using the transformation formula of $\eta$ given in Lemma \textnormal{Re}f{Lemma: transformation of eta} along with the well-known summation representation of $\eta$, we see that as $n \rightarrow \infty$ we obtain
\begin{align*}
e^{\frac{\varepsilon}{12}} \eta\left(\frac{i\varepsilon}{ \pi} \right) = e^{\frac{\varepsilon}{12}} \sqrt{\frac{ \pi}{\varepsilon}} e^{-\frac{\pi^2}{12 \varepsilon}} \sum_{j \in \mathbb{Z}} (-1)^j e^{- \frac{ \pi^2(3j^2 - j)}{\varepsilon} } \ll \sqrt{\frac{ \pi}{\varepsilon}} e^{-\frac{\pi^2}{12 \varepsilon}}.
\end{align*}
We hence have
\begin{align*}
\left| \frac{\eta\left(\frac{i \varepsilon}{ \pi}\right)}{e^{\frac{2\varepsilon}{3}}} \right|^8 \ll \left| \sqrt{\frac{ \pi}{\varepsilon}} e^{-\frac{\pi^2}{12 \varepsilon}} \right|^8 \ll \left(\frac{ \pi}{\beta}\right)^4 e^{-\frac{2\pi^2}{3 \beta}} \ll n^{2} e^{-\frac{\pi \sqrt{2n}}{3}}.
\end{align*}
Combining the two bounds yields the result.
\end{proof}
Next, we investigate the contribution of
\begin{equation*}
\left| \int_{0}^{\frac{1}{2} - a} f(z;\tau) \sin(2\pi m z) dz \right| \ll \int_{0}^{\frac{1}{2} - a} |f(z;\tau) \sin(2 \pi m z)| dz.
\end{equation*}
Then we want to bound
\begin{equation*}
\begin{split}
|f(z;\tau) \sin(2\pi m z)| = \left| \frac{ \sin(2 \pi m z) \vartheta(z;\tau)^4}{\eta(\tau)^9 \vartheta(2z;\tau)} \right|
\end{split}
\end{equation*}
away from the dominant pole. For $0 < b < \frac{1}{2}$ far from $\frac{1}{2}$ we see that we may bound the integrand in modulus by
\begin{equation*}
|f(b;\tau) \sin(2 \pi m b)| \ll |P(q)|^9 \left| q^{-\frac{3}{8}} \frac{\vartheta(b;\tau)^4}{\vartheta(2b;\tau)} \right| \ll |P(q)|^9 \sum_{n \in \mathbb{Z}} |q|^{\frac{n^2 +n}{2}} \ll |P(q)|^9 \sum_{n \in \mathbb{Z}} e^{-\beta n^2}.
\end{equation*}
As $z \rightarrow \frac{1}{2}$ we apply L'H\^{o}pital's rule to the integrand $|f(z;\tau) \sin(2 \pi m z)|$ which yields the bound
\begin{equation*}
\left| \int_{0}^{\frac{1}{2} - a} f(z;\tau) \sin(2\pi m z) dz \right| \ll \frac{\eta(2\tau)^8}{\eta(\tau)^{16}}.
\end{equation*}
Hence, away from the dominant pole in $q$, we have shown the following proposition.
\begin{proposition}\label{Proposition: f away from dominant}
For $1 \leq x \leq \frac{\pi m^{\frac{1}{3}}}{\beta}$ we have that
\begin{equation*}
\left|f\left(z; \frac{i \varepsilon}{2 \pi} \right) \right| \ll n^{-2} \exp\left[ \pi \sqrt{2n} - \frac{8 \sqrt{2n}}{\pi} \left(1 - \frac{1}{\sqrt{1+ m^{-\frac{2}{3}}}}\right) \right]
\end{equation*}
as $n \rightarrow \infty$.
\end{proposition}
\section{The Circle Method}\label{Section: Circle method}
In this section we use Wright's variant of the Circle Method to complete the proof of Theorem \textnormal{Re}f{Theorem: main}. We start by noting that Cauchy's theorem implies that
\begin{equation*}
b(m,n) = \frac{1}{2 \pi i} \int_{C} \frac{f_m (\tau)}{q^{n+1}} dq,
\end{equation*}
where $C \coloneqq \{ q \in \mathbb{C} \mid |q| = e^{-\beta} \}$ is a circle centred at the origin of radius less than $1$, with the path taken in the counter-clockwise direction.
Making a change of variables, changing the direction of the path of the integral, and recalling that $\varepsilon = \beta(1+ixm^{-\frac{1}{3}})$ we have
\begin{equation*}
b(m,n) = \frac{\beta}{2\pi m^{\frac{1}{3}}} \int_{|x| \leq \frac{\pi m^{\frac{1}{3}}}{\beta}} f_m\left(\frac{i \varepsilon}{2 \pi}\right) e^{\varepsilon n} dx.
\end{equation*}
Splitting this integral into two pieces, we have $b(m,n) = M + E$ where
\begin{equation*}
M \coloneqq \frac{\beta}{2 \pi m^{\frac{1}{3}}} \int_{|x| \leq 1} f_m\left( \frac{i \varepsilon}{2 \pi}\right) e^{\varepsilon n} dx,
\end{equation*}
and
\begin{equation*}
E \coloneqq \frac{\beta}{2 \pi m^{\frac{1}{3}}} \int_{1 \leq |x| \leq \frac{\pi m^{\frac{1}{3}}}{\beta}} f_m\left(\frac{i \varepsilon}{2 \pi}\right) e^{\varepsilon n} dx.
\end{equation*}
Next we determine the contributions of each of the integrals $M$ and $E$, and see that $M$ contributes to the main asymptotic term, while $E$ is part of the error term.
\subsection{The major arc}
First we concentrate on the contribution $M$. Then we obtain the following proposition.
\begin{proposition}
We have that
\begin{equation*}
M = (-1)^{m+\frac{3}{2}} \frac{\beta^6 m}{8 \pi^5 (2n)^{\frac{1}{4} }} e^{2 \pi \sqrt{2n}} + O \left( m n^{-\frac{13}{4}} e^{2 \pi \sqrt{2n}} \right)
\end{equation*}
as $n \rightarrow \infty$.
\end{proposition}
\begin{proof}
By Theorem \textnormal{Re}f{Theorem: behaviour of f_m at dominant pole} and making the change of variables $v = 1 +ixm^{-\frac{1}{3}}$ we obtain
\begin{equation*}
M = (-1)^{m+\frac{3}{2}} \frac{\beta^6 m}{4 \pi^4 } P_{4} + O\left( \beta^3 e^{\pi \sqrt{2n}} \right).
\end{equation*}
Now we rewrite $P_{4}$ in terms of the $I$-Bessel function using Lemma \textnormal{Re}f{Lemma: P in terms of I Bessel}, yielding
\begin{equation*}
\begin{split}
M =(-1)^{m+\frac{3}{2}} \frac{\beta^6 m}{4 \pi^4 } I_{-5}(2 \pi \sqrt{2n}) + O\left( \beta^6 e^{\pi \sqrt{2n} \left(1+\frac{1}{1+m^{-\frac{2}{3}}}\right)} \right) + O\left( \beta^3 e^{\pi \sqrt{2n}} \right).
\end{split}
\end{equation*}
The asymptotic behaviour of the $I$-Bessel function given in Lemma \textnormal{Re}f{Lemma: asymptotic of I Bessel} gives that
\begin{equation*}
\begin{split}
M = & (-1)^{m+\frac{3}{2}} \frac{\beta^6 m}{8 \pi^5 (2n)^{\frac{1}{4} }} e^{2 \pi \sqrt{2n}} + O \left( m n^{-\frac{15}{4}} e^{2 \pi \sqrt{2n}} \right) + O\left( \beta^6 e^{\pi \sqrt{2n} \left(1+\frac{1}{1+m^{-\frac{2}{3}}}\right)} \right) \\
& + O\left( \beta^3 e^{\pi \sqrt{2n}} \right).
\end{split}
\end{equation*}
It is clear that the first error term is the dominant one, and the result follows.
\end{proof}
\subsection{The error arc}
Now we bound $E$ as follows.
\begin{proposition}
As $n \rightarrow \infty$
\begin{equation*}
E \ll n^{-2} \exp\left[ 2\pi \sqrt{2n} - \frac{8 \sqrt{2n}}{\pi} \left(1 - \frac{1}{\sqrt{1+ m^{-\frac{2}{3}}}}\right) \right].
\end{equation*}
\end{proposition}
\begin{proof}
By Proposition \textnormal{Re}f{Proposition: f away from dominant} we see that the main term in the error arc is given by the residue. Hence we may bound
\begin{equation*}
\begin{split}
E & \ll \int_{1 \leq x \leq \frac{\pi m^{\frac{1}{3}}}{\beta} } n^{-2} \exp\left[ \pi \sqrt{2n} - \frac{8 \sqrt{2n}}{\pi} \left(1 - \frac{1}{\sqrt{1+ m^{-\frac{2}{3}}}}\right) \right] e^{\varepsilon n} dx\\
& \ll n^{-2} \exp\left[ 2\pi \sqrt{2n} - \frac{8 \sqrt{2n}}{\pi} \left(1 - \frac{1}{\sqrt{1+ m^{-\frac{2}{3}}}}\right) \right].
\end{split}
\end{equation*}
Noting that this is exponentially smaller than $M$ finishes the proof of Theorem \textnormal{Re}f{Theorem: main}.
\end{proof}
\section{Open questions}
We end by commenting on some questions related to the results presented above.
\begin{enumerate}
\item Here we discuss the asymptotic profile of the coefficients $b(m,n)$ for $|m| \leq \frac{1}{6 \beta} \log(n)$. We are also interested in the profile when $m$ is larger than this bound, and so in future it would be instructive to investigate the asymptotic profile of $b(m,n)$ for large $|m|$. For example, similar results in this direction for the crank of a partition are given in \cite{parry2017dyson}.
\item In the present paper, we provide a framework for investigating the profile of eta-theta quotients. In particular, we deal with the case of a function with a single simple pole on the path of integration. Future research is planned in order to expand this framework for a family of meromorphic eta-theta quotients with a finite number of (not necessarily single) poles on the path of integration. This should include similar eta-theta quotients that appear in other physical partition functions.
\item In showing Theorem \textnormal{Re}f{Theorem: main} we see that the main asymptotic term arises from the pole at $z = 1/2$, and in turn from the residue term $\frac{\eta(2\tau)^8}{\eta(\tau)^{16}}$; is there a physical interpretation for the fact that these terms give the largest contribution to the asymptotic behaviour of $b(m,n)$?
\end{enumerate}
\end{document}
|
\begin{document}
\title{Secure Bilevel Asynchronous Vertical Federated Learning \with Backward Updating}
\begin{abstract}
Vertical federated learning (VFL) attracts increasing attention due to the emerging demands of multi-party collaborative modeling and concerns of privacy leakage. In the real VFL applications, usually only one or partial parties hold labels, which makes it challenging for all parties to collaboratively learn the model without privacy leakage. Meanwhile, most existing VFL algorithms are trapped in the synchronous computations, which leads to
inefficiency in their real-world applications. To address these challenging problems, we propose a novel {\bf VF}L framework integrated with new {\bf b}ackward updating mechanism and {\bf b}ilevel asynchronous parallel architecture (VF{${\textbf{B}}^2$}), under which three new algorithms, including VF{${\textbf{B}}^2$}-SGD, -SVRG, and -SAGA, are proposed. We derive the theoretical results of the convergence rates of these three algorithms under both strongly convex and nonconvex conditions. We also prove the security of VF{${\textbf{B}}^2$} under semi-honest threat models. Extensive experiments on benchmark datasets demonstrate that our algorithms are efficient, scalable and lossless.
\end{abstract}
\section{Introduction}
Federated learning \cite{mcmahan2016communication,smith2017federated,kairouz2019advances} has emerged as a paradigm for collaborative modeling with privacy-preserving. A line of recent works \cite{mcmahan2016communication,smith2017federated} focus on the horizontal federated learning, where each party has a subset of samples with complete features. There are also some works \cite{gascon2016secure,yang2019federated,dang2020large} studying the vertical federated learning (VFL), where each party holds a disjoint subset of features for all samples. In this paper, we focus on VFL that has attracted much attention from the academic and industry due to its wide applications to emerging multi-party collaborative modeling with privacy-preserving.
Currently, there are two mainstream methods for VFL, including homomorphic encryption (HE) based methods and exchanging the raw computational results (ERCR) based methods. The HE based methods \cite{hardy2017private,cheng2019secureboost} leverage HE techniques to encrypt the raw data and then use the encrypted data (ciphertext) for training model with privacy-preserving. However, there are two major drawbacks of HE based methods. First, the complexity of homomorphic mathematical operation on ciphertext field is very high,
thus HE is extremely time consuming for modeling \cite{liu2015encrypted,liu2019federated}. Second, approximation is required for HE to support operations of non-linear functions, such as Sigmoid and Logarithmic functions, which inevitably causes loss of the accuracy for various machine learning models using non-linear functions \cite{kim2018secure,yang2019quasi}. Thus, the inefficiency and inaccuracy of HE based methods dramatically limit their wide applications to realistic VFL tasks.
ERCR based methods \cite{zhang2018feature,hu2019fdml,gu2020Privacy} leverage labels and the raw intermediate computational results transmitted from the other parties to compute stochastic gradients, and thus use distributed stochastic gradient descent (SGD) methods to train VFL models efficiently. Although ERCR based methods circumvent aforementioned drawbacks of HE based methods, existing ERCR based methods are designed with only considering that all parties have labels, which is not usually the case in real-world VFL tasks. In realistic VFL applications, usually only one or partial parties (denoted as active parties) have the labels, and the other parties (denoted as passive parties) can only provide extra feature data but do not have labels. When these ERCR based methods are applied to the real situation with both active and passive parties, \textbf{the algorithms even cannot guarantee the convergence} because only active parties can update the gradient of loss function based on labels but the passive parties cannot, \emph{i.e.} partial model parameters are not optimized during the training process. Thus, it comes to the crux of designing the proper algorithm for solving real-world VFL tasks with only one or partial parties holding labels.
Moreover, algorithms using synchronous computation \cite{gong2016private,zhang2018feature} are inefficient when applied to real-world VFL tasks, especially, when computational resources in the VFL system are unbalanced. Therefore, it is desired to design the efficient asynchronous algorithms for real-world VFL tasks.
Although there have been several works studying asynchronous VFL algorithms
\cite{hu2019fdml,gu2020Privacy}, it is still an open problem to design asynchronous algorithms for solving real-world VFL tasks with only one or partial parties holding labels.
In this paper, we address these challenging problems by proposing a novel framework (VF{${\textbf{B}}^2$}) integrated with the novel backward updating mechanism (BUM) and bilevel asynchronous parallel architecture (BAPA). Specifically, the BUM
enables all parties, rather than only active parties, to collaboratively update the model securely and also makes the final model lossless; the BAPA is designed for efficiently asynchronous backward updating.
Considering the advantages of SGD-type algorithms in optimizing machine learning models, we thus propose three new SGD-type algorithms, \emph{i.e.}, VF{${\textbf{B}}^2$}-SGD, -SVRG and -SAGA, under that framework.
\begin{figure*}
\caption{(a): System structure of VF{${\textbf{B}
\label{struc}
\end{figure*}
We summarize the contributions of this paper as follows.
\begin{itemize}
\item
We are the first to propose the novel backward updating mechanism for ERCR based VFL algorithms, which enables all parties, rather than only parties holding labels, to collaboratively learn the model with privacy-preserving and without hampering the accuracy of final model.
\item
We design a bilevel asynchronous parallel architecture that enables all parties asynchronously update the model through backward updating, which is efficient and scalable.
\item
We propose three new algorithms for VFL, including VF{${\textbf{B}}^2$}-SGD, -SVRG, and -SAGA under VF{${\textbf{B}}^2$}. Moreover, we theoretically prove their convergence rates for both strongly convex and nonconvex problems.
\end{itemize}
\noindent\textbf{Notations.}
$\widehat{w}$ denotes the inconsistent read of $w$.
$\bar{w}$ denotes $w$ to compute local stochastic gradient of loss function for collaborators, which maybe stale due to communication delay.
$\psi(t)$ is the corresponding party performing the $t$-th global iteration.
Given a finite set $S$, $|S|$ denotes its cardinality.
\section{Problem Formulation}
Given a training set $\{x_i,y_i\}_{i=1}^n$, where $y_i \in \{-1, +1\}$ for binary classification task or $y_i \in \mathbb{R}$ for regression problem and $x_i \in \mathbb{R}^d $, we consider the model in a linear form of $w^{\top} x$, where $w \in \mathbb{R}^d $ corresponds to the model parameters. For VFL, $x_i$ is vertically distributed among $q\geq2$ parties, \emph{i.e.}, $x_i=[(x_i)_{\mathcal{G}_1}; \cdots; (x_i)_{\mathcal{G}_q}]$, where $(x_i)_{\mathcal{G}_\ell} \in \mathbb{R}^{d_\ell}$ is stored on the $\ell$-th party and $\sum_{\ell=1}^{q}d_{\ell} = d$. Similarly, there is $w=[w_{\mathcal{G}_1}; \cdots; w_{\mathcal{G}_q}]$. Particularly, we focus on the following regularized empirical risk minimization problem.
\begin{equation}\label{P}
\min_{w\in \mathbb{R}^{d}} f(w) := \frac{1}{n} \sum_{i=1}^{n} \underbrace{\mathcal{L}\left(w^{\top} x_{i}, y_{i}\right)+\lambda \sum_{\ell=1}^{q} g(w_{\mathcal{G}_\ell})}_{f_{i}(w)}, \tag{P}
\end{equation}
where $w^{\top}x_i =\sum_{\ell=1}^{q}{w}_{\mathcal{G}_{\ell }}^{\top}\left(x_{i}\right)_{\mathcal{G}_{\ell}}$, $\mathcal{L}$ denotes the loss function, $\sum_{\ell=1}^{q} g(w_{\mathcal{G}_\ell})$ is the regularization term, and $f_i: \mathbb{R}^d \to \mathbb{R}$ is smooth and possibly nonconvex. Examples of problem~\ref{P} include models for binary classification tasks \cite{conroy2012fast,wang2017stochastic} and models for regression tasks \cite{shen2013novel,wang2019spiderboost}.
In this paper, we introduce two types of parties: {\bf{active party}} and {\bf{passive party}}, where the former denotes data provider holding labels while the latter does not. Particularly, in our problem setting, there are $m$ ($1\leq m \leq q$) active parties. Each active party can play the role of dominator in model updating by actively launching updates.
All parties, including both active and passive parties, passively launching updates play the role of collaborator.
To guarantee the model security, only active parties know the form of the loss function.
Moreover, we assume that the labels can be shared by all parties finally. Note that this does not obey our intention that only active parties hold the labels before training.
The problem studied in this paper is stated as follows:\\
\noindent{\bf{Given}}: Vertically partitioned data $\{x_{\mathcal{G}_\ell}\}_{\ell =1}^{q}$ stored in $q$ parties and the labels only held by active parties. \\
\noindent{\bf{Learn}}: A machine learning model {\bf M} collaboratively learned by both active and passive parties without leaking privacy.\\
\noindent{\bf{Lossless Constraint}}: The accuracy of {\bf M} must be comparable to that of model {\bf M$'$} learned under non-federated learning.
\section{VF${{\text{B}}}^2$ Framework}
In this section, we propose the novel VF${{\text{B}}}^2$ framework. VF${{\text{B}}}^2$ is composed of three components and its systemic structure is illustrated in Fig.~\ref{struca}. The details of these components are presented in the following.
The key of designing the proper algorithm for solving real-world VFL tasks with both active and passive parties is to make the passive parties utilize the label information for model training. However, it is challenging to achieve this because direct using the labels hold by active parties leads to privacy leakage of the labels without training. To address this challenging problem, we design the BUM with painstaking.\\
{\noindent\bf{Backward Updating Mechanism:}}
The key idea of BUM is to make passive parties indirectly use labels to compute stochastic gradient without directly accessing the raw label data. Specifically, the BUM embeds label $y_i$ into an intermediate value $\vartheta: = \frac{\partial \mathcal{L}\left({w}^{\top} x_{i}, y_{i}\right)}{\partial\left({w}^{\top} x_{i}\right)}$. Then $\vartheta$ and $i$ are distributed backward to the other parties. Consequently, the passive parties can also compute the stochastic gradient and update the model by using the received $\vartheta$ and $i$ (please refer to Algorithms~\ref{AFSGD-A} and \ref{AFSGD-P} for details). Fig.~\ref{strucb} depicts the case where $\vartheta$ is distributed from party $1$ to the rest parties.
In this case, all parties, rather than only active parties, can collaboratively learn the model without privacy leakage.
\begin{algorithm}[!t]
\caption{Safe algorithm of obtaining $w^Tx_i$.}\label{safer_tree}
\begin{algorithmic}[1]
\REQUIRE {$\{w_{\mathcal{G}_{\ell'}}\}_{{\ell '}=1}^{q}$ and $ \{{(x_i)}_{\mathcal{G}_{\ell '}}\}_{{\ell '}=1}^{q}$ allocating at each party, index $i$.} \\
{ \bf{Do this in parallel}}
\FOR {$\ell '=1, \cdots, q$}
\STATE Generate a ramdon number $\delta_{\ell '}$ and calculate $w_{\mathcal{G}_{\ell '}}^{\top} {(x_i)}_{\mathcal{G}_{\ell '}}+ \delta_{\ell '}$,
\ENDFOR
\STATE Obtain $\xi_1 = \sum_{\ell '=1}^{q} (w_{\mathcal{G}_{\ell '}}^{\top} {(x_i)}_{\mathcal{G}_{\ell '}} +\delta_{\ell '})$ through tree structure $T_1$.
\STATE Obtain $\xi_2 = \sum_{\ell '=1}^{q} \delta_{\ell '} $ through totally different tree structure $T_2\neq T_1$.
\ENSURE {${w}^{\top}x_i =\xi_1-\xi_2$}
\end{algorithmic}
\end{algorithm}
For VFL algorithms with BUM, dominated updates in different active parties are performed in distributed-memory parallel, while collaborative updates within a party are performed in shared-memory parallel. The difference of parallelism fashion leads to the challenge of developing a new parallel architecture instead of just directly adopting the existing asynchronous parallel architecture for VFL. To tackle this challenge, we elaborately design a novel BAPA.\\
{\bf{Bilevel Asynchronous Parallel Architecture:}}
The BAPA includes two levels of parallel architectures, where the upper level denotes the inner-party parallel and the lower one is the intra-party parallel. More specifically, the inner-party parallel denotes distributed-memory parallel between active parties, which enables all active parties to asynchronously launch dominated updates; while the intra-party one denotes the shared-memory parallel of collaborative updates within each party, which enables multiple threads within a specific party
to asynchronously perform the collaborative updates. Fig.~\ref{strucb} illustrates the BAPA with $m$ active parties.
To utilize feature data provided by other parties, a party need obtain $w^Tx_i=\sum_{\ell=1}^{q} w_{\mathcal{G}_{\ell}}^{\top} {(x_i)}_{\mathcal{G}_{\ell}}$. Many recent works achieved this by aggregating the local intermediate computational results securely \cite{hu2019fdml,gu2020federated}. In this paper, we use the efficient tree-structured communication scheme \cite{zhang2018feature} for secure aggregation, whose security was proved in \cite{gu2020federated}. \\
{\noindent\bf{Secure Aggregation Strategy:}} The details are summarized in Algorithm~~\ref{safer_tree}. Specifically, at step~2, $w_{\mathcal{G}_\ell}^{\top} {(x_i)}_{\mathcal{G}_\ell}$ is computed locally on the $\ell$-th party to prevent the direct leakage of $w_{\mathcal{G}_\ell}$ and ${(x_i)}_{\mathcal{G}_\ell}$. Especially, a random number $\delta_{\ell}$ is added to $w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}$ to mask the value of $w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}$, which can enhance the security during aggregation process. At steps~4 and 5, $\xi_1$ and $\xi_2$ are aggregated through tree structures $T_1$ and $T_2$, respectively. Note that $T_2$ is totally different from $T_1$ that can prevent the random value being removed under threat model 1 (defined in section \ref{securitysec}).
Finally, value of $w^{\top} x_i=\sum_{\ell=1}^{q} (w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}$ is recovered by removing term $\sum_{\ell =1}^{q} \delta_{\ell }$ from $\sum_{\ell =1}^{q} (w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }} +\delta_{\ell})$ at the output step. Using such aggregation strategy, ${(x_i)}_{\mathcal{G}_{\ell }}$ and $w_{\mathcal{G}_{\ell }} $ are prevented from leaking during the aggregation.
\section{Secure Bilevel Asynchronous VFL Algorithms with Backward Updating}
\begin{algorithm}[!t]
\caption{VF{${\textbf{B}}^2$}-SGD for active party $\ell$ to actively launch dominated updates.}\label{AFSGD-A}
\begin{algorithmic}[1]
\REQUIRE {Local data $\{{(x_i)}_{\mathcal{G}_{\ell}},y_i\}_{i=1}^{n}$ stored on the $\ell$-th party, learning rate $\gamma$}.
\STATE Initialize the necessary parameters.\\
{ \bf{Keep doing in parallel (distributed-memory parallel for multiple active parties)}}
\STATE \quad Pick up an index $i$ randomly from $\{1,...,n\}$.
\STATE \quad Compute $\widehat{w}^{\top} x_{i}=\sum_{\ell^{\prime}=1}^{q}\widehat{w}_{\mathcal{G}_{\ell '}}^{\top}\left(x_{i}\right)_{\mathcal{G}_{\ell^{\prime}}}$ based on Al\\
\quad gorithm~\ref{safer_tree}.
\STATE \quad Compute $\vartheta = \frac{\partial \mathcal{L}\left(\widehat{w}^{\top} x_{i}, y_{i}\right)}{\partial\left(\widehat{w}^{\top} x_{i}\right)}$.
\STATE \quad Send $\vartheta$ and index $i$ to collaborators.
\STATE \quad Compute $\widetilde{v}^{\ell}=\nabla_{\mathcal{G}_{\ell}} f_{i}(\widehat{w})$.
\STATE \quad Update $w_{\mathcal{G}_{\ell}} \leftarrow w_{\mathcal{G}_{\ell}}-\gamma \widetilde{v}^{\ell}$. \\
{ \bf{End parallel }}
\end{algorithmic}
\end{algorithm}
SGD \cite{bottou2010large} is a popular method for learning machine learning (ML) models. However, it has a poor convergence rate due to the intrinsic variance of stochastic gradient. Thus, many popular variance reduction techniques have been proposed, including the SVRG, SAGA, SPIDER \cite{johnson2013accelerating,defazio2014saga,wang2019spiderboost} and their applications to other problems \cite{huang2019faster,huang2020accelerated,zhang2020faster,dang2020large,yang2020learning,yang2020adversarial,li2020towards,wei2019adversarial}. In this section we raise three SGD-type algorithms, \emph{i.e.} the SGD, SVRG and SAGA,
which are the most popular ones among SGD-type methods for the appealing performance in practice. We summarize the detailed steps of VF$\bf B^2$-SGD in Algorithms \ref{AFSGD-A} and \ref{AFSGD-P}. For VF$\bf B^2$-SVRG and -SAGA, one just needs to replace the update rule with corresponding one.
As shown in Algorithm~\ref{AFSGD-A}, at each dominated update, the dominator (an active party) calculates $\vartheta$ and then distributes $\vartheta$ together with $i$ to the collaborators (the rest $q-1$ parties). As shown in algorithm~\ref{AFSGD-P}, for party $\ell$, once it has received the $\vartheta$ and $i$, it will launch a new collaborative update asynchronously. As for the dominator, it computes the local stochastic gradient as $\nabla_{\mathcal{G}_{\ell}} f_{i}(\widehat{w}) = \nabla_{\mathcal{G}_\ell} \mathcal{L}(\widehat{w})+ \lambda \nabla g(\widehat{w}_{\mathcal{G}_{\ell }})$. While, for the collaborator, it uses the received $\vartheta$ to compute $\nabla_{\mathcal{G}_\ell} \mathcal{L}$ and local $\widehat{w}$ to compute $\nabla_{\mathcal{G}_{\ell }} g$ as shown at step 3 in Algorithm~\ref{AFSGD-P}. Note that active parties also need perform Algorithm~\ref{AFSGD-P} to collaborate with other dominators to ensure that the model parameters of all parties are updated.
\begin{algorithm}[!t]
\caption{VF{${\textbf{B}}^2$}-SGD for the $\ell$-th party to passively launch collaborative updates.}\label{AFSGD-P}
\begin{algorithmic}[1]
\REQUIRE {Local data $\{{(x_i)}_{\mathcal{G}_{\ell}},y_i\}_{i=1}^{n}$ stored on the $\ell$-th party, learning rate $\gamma$}.
\STATE Initialize the necessary parameters (for passive parties).\\
{ \bf{Keep doing in parallel (shared-memory parallel for multiple threads)}}
\STATE \quad Receive $\vartheta$ and the index $i$ from the dominator.
\STATE \quad Compute $\widetilde{v}^{\ell } = \nabla_{\mathcal{G}_{\ell}} \mathcal{L}(\bar{w})+\lambda \nabla_{\mathcal{G}_{\ell }} g(\widehat{w}) = \vartheta \cdot (x_i)_{\mathcal{G}_{\ell }} +$ \\
\quad $\lambda \nabla g(\widehat{w}_{\mathcal{G}_{\ell }})$.
\STATE \quad Update $w_{\mathcal{G}_{\ell}} \leftarrow w_{\mathcal{G}_{\ell }}-\gamma \widetilde{v}^{\ell}$.
\STATE{ \bf{End parallel}}
\end{algorithmic}
\end{algorithm}
\section{Theoretical Analysis}
In this section, we provide the convergence analyses. Please see the arXiv version for more details. We first present preliminaries for strongly convex and nonconvex problems.
\begin{assum}\label{assum1}
For $f_i(w)$ in problem \ref{P}, we assume the following conditions hold:
\begin{enumerate}
\item {\bf{Lipschitz Gradient:}} Each function $f_i$, $i=1,\ldots,n$, there exists $L>0$ such that for $ \forall \ w,w'\in \mathbb{R}^d$, there is
\begin{equation}
\|\nabla f_i (w) - \nabla f_i (w')\| \le L\|w-w'\|.
\end{equation}
\item {\bf{Block-Coordinate Lipschitz Gradient:}} For $i=1,\ldots,n$, there exists an $L_\ell>0$ for the $\ell$-th block $\mathcal{G}_\ell$, where $\ell=1,\cdots,q$ such that
\begin{equation}
\|\nabla_{\mathcal{G}_\ell} f_i (w+U_\ell\Delta_\ell) - \nabla_{\mathcal{G}_\ell} f_i (w)\| \le L_\ell\|\Delta_\ell\|,
\end{equation}
where $\Delta_\ell \in \mathbb{R}^{d_\ell}$, $U_\ell \in \mathbb{R}^{d\times d_\ell}$ and $[U_1, \cdots, U_q] = I_d$.
\item {\bf{Bounded Block-Coordinate Gradient:}} There exists a constant $G$ such that for $f_i,\ i=1,\cdots,n$ and block $\mathcal{G}_\ell$, $\ell =1,\cdots,q$, it holds that $\|\nabla_{\mathcal{G}_\ell} f_i(w)\|^2\leq G $.
\end{enumerate}
\end{assum}
\begin{assum}\label{assum2}
The regularization term $g$ is $L_g$-smooth, which means that there exists an $L_g>0$ for $\ell = 1,\dots,q$ such that $\forall w_{\mathcal{G}_\ell}, w_{\mathcal{G}_\ell}' \in \mathbb{R}^{d_\ell}$ there is
\begin{equation}
\|\nabla g(w_{\mathcal{G}_\ell}) - \nabla g(w_{\mathcal{G}_\ell}')\| \le L_g\|w_{\mathcal{G}_\ell}-w_{\mathcal{G}_\ell}'\|.
\end{equation}
\end{assum}
Assumption \ref{assum2} imposes the smoothness on $g$, which is necessary for the convergence analyses. Because, as for a specific collaborator, it uses the received $\widehat{w}$ (denoted as $\bar{w}$) to compute $\nabla_{\mathcal{G}_{\ell }} \mathcal{L}$ and local $\widehat{w}$ to compute $\nabla_{\mathcal{G}_{\ell }} g=\nabla g(w_{\mathcal{G}_\ell})$, which makes it necessary to track the behavior of $g$ individually. Similar to previous research works \cite{lian2015asynchronous,huo2017asynchronous,leblond2017asaga}, we introduce the bounded delay as follows.
\begin{assum}\label{assum4}{\bf{Bounded Delay:}}
Time delays of inconsistent reading and communication between dominator and its collaborators are upper bounded by $\tau_1$ and $\tau_2$, respectively.
\end{assum}
Given $\widehat{w}$ as the inconsistent read of $w$, which is used to compute the stochastic gradient in dominated updates, following the analysis in \cite{gu2020Privacy}, we have
\begin{equation}\label{Dt1}
\widehat{w}_t-w_t = \gamma \sum_{u\in D(t)}U_{\psi(u)}\widetilde{v}_u^{\psi(u)},
\end{equation}
where $D(t)=\{t-1,\cdots,t-\tau_0\}$ is a subset of non-overlapped previous iterations with $\tau_0\leq \tau_1$. Given $\bar{w}$ as the parameter used to compute the $\nabla_{\mathcal{G}_{\ell }} \mathcal{L}$ in collaborative updates, which is the steal state of $\widehat{w}$ due to the communication delay between the specific dominator and its corresponding collaborators. Then, following the analyses in \cite{huo2017asynchronous}, there is
\begin{equation}\label{Dt2}
\bar{w}_t = \widehat{w}_{t-\tau_0} = \widehat{w}_t + \gamma \sum_{t'\in D^\prime(t)}U_{\psi(t')}\widetilde{v}_{t'}^{\psi(t')},
\end{equation}
where $D'(t)=\{t-1,\cdots,t-\tau_0\}$ is a subset of previous iterations performed during the communication and $\tau_0\leq \tau_2$.
\subsection{Convergence Analysis for Strongly Convex Problem}
\begin{assum}\label{assumc1}
Each function $f_i$, $i=1,\ldots,n$, is $\mu$-strongly convex, i.e., $\forall \ w,\ w'\in \mathbb{R}^d$ there exists a $\mu>0$ such that
\begin{equation}
f_i(w)\geq f_i(w') + \langle \nabla f_i(w'), w- w' \rangle + \frac{\mu}{2}\|w-w'\|^2.
\end{equation}
\end{assum}
For strongly convex problem, we introduce notation $K(t)$ that denotes a minimum set of successive iterations fully visiting all coordinates from global iteration number $t$. Note that this is necessary for the asynchronous convergence analyses of the global model. Moreover, we assume that the size of $K(t)$ is upper bounded by $\eta_1$, \emph{i.e.}, $|K(t)|\leq \eta_1$. Based on $K(t)$, we introduce the epoch number $v(t)$ as follow.
\begin{definition}\label{definc2}
Let $P(t)$ be a partition of $\{0,1,\cdots, t-\sigma'\}$, where $\sigma'\geq0$. For any $\kappa\subseteq P(t)$ we have that there exists $t'\leq t$ such that $K(t')=\kappa$, and $\kappa_1 \subseteq P(t)$ such that $K(0)=\kappa_1$. The epoch number for the $t$-th global iteration, i.e., $v(t)$ is defined as the maximum cardinality of $P(t)$.
\end{definition}
Given the definition of epoch number $v(t)$, we have the following theoretical results for $\mu$-strongly convex problem.
\begin{theorem}\label{thm-sgdconvex}
Under Assumptions~\ref{assum1}-\ref{assum4} and \ref{assumc1}, to achieve the accuracy $\epsilon$ of problem~\ref{P} for VF{${\textbf{B}}^2$}-SGD, i.e., $\mathbb{E}(f(w_t)-f(w^*))\leq \epsilon$, let $\gamma\leq \frac{\epsilon\mu^{1/3}}{(G{96L_*^2})^{1/3}}$, if $\tau\leq {\text {min}}\{\epsilon^{-4/3}, \frac{(GL_*^2)^{2/3}}{\epsilon^2\mu^{2/3}}\}$
, the epoch number $v(t)$ should satisfy
$
v(t) {\geq} \frac{44(GL^2_*)^{1/3}}{ \mu^{4/3}\epsilon} log (\frac{2(f(w_0)-f(w^*))}{\epsilon})
$
, where $L_{*}=\text{max}\{L, \{L_{\ell}\}_{\ell=1}^{q}, L_g\}$, $\tau={\text{max}}\{\tau_1^2,\tau_2^2,\eta_1^2\}$, $w^0$ and $w^*$ denote the initial point and optimal point, respectively.
\end{theorem}
\begin{theorem}\label{thm-svrgconvex}
Under Assumptions~\ref{assum1}-\ref{assum4} and \ref{assumc1}, to achieve the accuracy $\epsilon$ of problem~\ref{P} for VF{${\textbf{B}}^2$}-SVRG, let $C=(L_*^2 \gamma+L_*)\frac{\gamma^2}{2}$ and $\rho=\frac{\gamma\mu}{2}- \frac{16L_*^2\eta_1C}{\mu}$, we can carefully choose $\gamma$ such that
\begin{eqnarray}
\nonumber
&& 1) \ 1-2L_*^2\gamma^2\tau>0;
\ \ 2)\ \rho>0;
\ \ 3)\ \frac{8L_*^2\tau^{1/2}C}{\rho\mu} \leq 0.05;\\
&& 4)\ L_{*}^{2} \gamma^{2}\tau^{3/2}(28 C + 5 {\gamma}) \frac{2\lambda_{\gamma}G}{\rho} \leq \frac{\epsilon}{8},
\end{eqnarray}
where $\lambda_{\gamma}=\frac{18}{1-2L_{*}^2\gamma^2\tau}$, the inner epoch number $v(t)$ should satisfy $v(t)\geq \frac{{\text {log}} 0.25}{{\text {log}}(1-\rho)}$ and the outer loop number $S$ should satisfy $S\geq \frac{1}{{\text {log}}\frac{4}{3}}{{\text {log}}\frac{2f(w_0)-f(w^*)}{\epsilon}}$.
\end{theorem}
\begin{theorem}\label{thm-sagaconvex}
Under Assumptions~\ref{assum1}-\ref{assum4} and \ref{assumc1}, to achieve the accuracy $\epsilon$ of problem~\ref{P} for VF{${\textbf{B}}^2$}-SAGA, let
$c_0=\left(2 { \gamma^3 \tau^{3/2}} + ({L_*^2 \gamma^3 \tau} + {L_*\gamma^2}) 180 \gamma^2\tau^{3/2} + 8 \gamma^{2} \tau\right)\frac{18GL_{*}^2}{1 - 72L_{*}^2\gamma^2\tau }$,
$c_1=2L_*^2\tau({L_*^2 \gamma^3 \tau}+ {L_*\gamma^2})$,
$c_2=4 ({L_*^2 \gamma^3 \tau}+ {L_*\gamma^2})\frac{L_*^{2} \tau}{n} $, and $\rho\in (1-\frac{1}{n},1)$, we can choose $\gamma$ such that
\begin{eqnarray}
&&1) \ 1-72L_*^2\gamma^2\tau>0;\ 2)\ 0<1-\frac{\gamma \mu}{4}<1;
\nonumber \\
&&3)\ \frac{4 c_{0}}{\gamma \mu(1-\rho)\left(\frac{\gamma \mu^{2}}{4}-2 c_{1}-c_{2}\right)} \leq \frac{\epsilon}{2};
\nonumber \\
&&4)\ -\frac{\gamma \mu^{2}}{4}+2 c_{1}+c_{2}\left(1+(1-\frac{1-\frac{1}{n}}{\rho})^{-1}\right) \leq 0;
\nonumber \\
&&5)\ -\frac{\gamma \mu^{2}}{4}+c_{2}+c_{1}\left(2+(1-\frac{1-\frac{1}{n}}{\rho})^{-1}\right) \leq 0,
\end{eqnarray}
the epoch number $v(t)$ should satisfy $v(t) \geq \frac{1}{\log \frac{1}{\rho}}\log \frac{2\left(2 \rho-1+\frac{\gamma \mu}{4}\right) \left(f(w_0)-f(w^*)\right)}{\epsilon\left(\rho-1+\frac{\gamma \mu}{4}\right)\left(\frac{\gamma \mu^{2}}{4}-2 c_{1}-c_{2}\right)}$.
\end{theorem}
\begin{remark}
For strongly convex problems, given the assumptions and parameters in corresponding theorems, the convergence rate of VF{${\textbf{B}}^2$}-SGD is $\mathcal{O} (\frac{1}{{\epsilon}}\text{log}(\frac{1}{\epsilon}))$, and those of VF{${\textbf{B}}^2$}-SVRG and VF{${\textbf{B}}^2$}-SAGA are $\mathcal{O} (\text{log}(\frac{1}{\epsilon}))$.
\end{remark}
\subsection{Convergence Analysis for Nonconvex Problem}
\begin{assum}\label{assumnc1}
Nonconvex function $f(w)$ is bounded below,
\begin{equation}\label{inf}
f^*:=\inf_{w\in \mathbb{R}^d} f(w) > -\infty.
\end{equation}
\end{assum}
Assumption \ref{assumnc1} guarantees the feasibility of nonconvex problem (P).
For nonconvex problem, we introduce the notation $K'(t)$ that denotes a set of $q$ iterations fully visiting all coordinates, \emph{i.e.}, $K'(t) = \{\{t, t+\bar{t}_1, \cdots, t+\bar{t}_{q-1}\}: \psi(\{t, t+\bar{t}_1,\cdots, t+\bar{t}_{q-1}\}) = \{1,\cdots,q\}\}$, where the $t$-th global iteration denotes a dominated update. Moreover, these iterations are performed respectively on a dominator and $q-1$ different collaborators receiving $\vartheta$ calculated at the $t$-th global iteration. Moreover, we assume that $K'(t)$ can be completed in $\eta_2$ global iterations, \emph{i.e.}, for $\forall t' \in \mathcal{A}(t)$, there is $\eta_2 \geq {\text{max}}\{u|u\in K'(t')\}-t'$. Note that, different from $K(t)$, there is $|K'(t)|=q$ and the definition of $K'(t)$ does not emphasize on ``successive iterations'' due to the difference of analysis techniques between strongly convex and nonconvex problems. Based on $K'(t)$, we introduce the epoch number $v'(t)$ as follow.
\begin{definition}\label{definnc2}
$\mathcal{A}(t)$ denotes a set of global iterations, where for $\forall$ $t' \in \mathcal{A}(t)$ there is the $t'$-th global iteration denoting a dominated update and $\cup_{\forall t' \in \mathcal{A}(t)} K'(t')=\{0,1,\cdots,t\}$. The epoch number $v'(t)$ is defined as $|\mathcal{A}(t)|$.
\end{definition}
Give the definition of epoch number $v'(t)$, we have the following theoretical results for nonconvex problem.
\begin{theorem}\label{thm-sgdnonconvex}
Under Assumptions~\ref{assum1}-\ref{assum4} and \ref{assumnc1}, to achieve the $\epsilon$-first-order stationary point of problem~\ref{P}, i.e. $\mathbb{E}\|\nabla f(w)\| \le \epsilon$ for stochastic variable $w$, for VF{${\textbf{B}}^2$}-SGD, let
$\gamma = \frac{ \epsilon}{{L_{*}qG}}$, if $\tau\leq\frac{512qG}{\epsilon^2}$, the total epoch number $T$ should satisfy
\begin{equation}
T \geq {\frac{ {\mathbb{E}\left[ f (w^0) - f^* \right]L_{*}qG }}{\epsilon^2}},
\end{equation}
where $L_{*}=\text{max}\{L, \{L_{\ell}\}_{\ell=1}^{q}, L_g\}$, $\tau={\text{max}}\{\tau_1^2,\tau_2^2,\eta_2^2\}$, $f(w^0)$ is the initial function value and $f^*$ is defined in Eq.~\ref{inf}.
\end{theorem}
\begin{theorem}\label{thm-svrgnonconvex}
Under Assumptions~\ref{assum1}-\ref{assum4} and \ref{assumnc1},
to solve problem~\ref{P} with VF{${\textbf{B}}^2$}-SVRG, let $\gamma = \frac{m_0}{L_{*}n^\alpha}$, where $0<m_0<\frac{1}{8}$, $0<\alpha \leq 1$, if epoch number $N$ in an outer loop satisfies $ N \leq \lfloor \frac{n^{{\alpha}}}{2m_0} \rfloor$, and $\tau < \text{min} \{\frac{n^{2\alpha}}{20m_0^2},\frac{1-8m_0}{40m_0^2} \} $, there is
{\begin{equation}
\small{\frac{1}{T}\sum\limits_{s=1}^{S}\sum\limits_{t=0}^{N-1}\mathbb{E} ||\nabla f(w^s_{t_0})||^2 \leq \frac{L_{*}n^{\alpha}\mathbb{E}\left[ f( w_{0}) - f( w^{*}) \right] }{T \sigma }},
\end{equation}
}
where $T$ is the total number of epoches, $t_0$ is the start iteration of epoch $t$, $\sigma$ is a small value independent of $n$.
\end{theorem}
\begin{theorem}\label{thm-saganonconvex}
Under Assumptions~\ref{assum1}-\ref{assum4} and \ref{assumnc1},
to solve problem~\ref{P} with VF{${\textbf{B}}^2$}-SAGA, let
$\gamma = \frac{m_0}{L_{*}n^\alpha}$, where $0<m_0<\frac{1}{20}$, $0<\alpha \leq 1$, if total epoch number $T$ satisfies $T \leq \lfloor \frac{n^{{\alpha}}}{4m_0} \rfloor$
and $\tau < \text{min} \{\frac{n^{2\alpha}}{180m_0^2},\frac{1-20m_0}{40m_0^2} \}$, there is
\begin{equation}
\frac{1}{T}\sum\limits_{t=0}^{T-1}\mathbb{E} ||\nabla f(w_{t_0})||^2 \leq \frac{L_{*}n^{\alpha}\mathbb{E}\left[ f( w_{0}) - f( w^{*}) \right] }{T \sigma }.
\end{equation}
\end{theorem}
\begin{remark}
For nonconvex problems, given conditions in the theorems, the convergence rate of VF{${\textbf{B}}^2$}-SGD is $\mathcal{O} (1/{\sqrt{T}})$, and those of VF{${\textbf{B}}^2$}-SVRG and VF{${\textbf{B}}^2$}-SAGA are $\mathcal{O} (1/{T})$.
\end{remark}
\section{Security Analysis}\label{securitysec}
We discuss the data security and model security of VF{${\textbf{B}}^2$} under two semi-honest threat models commonly used in security analysis \cite{cheng2019secureboost,xu2019hybridalpha,gu2020federated}. Specially, these two threat models have different threat abilities, where threat model 2 allows collusion between parties while threat model 1 does not.
\begin{figure}
\caption{SGD-based}
\caption{SVRG-based}
\caption{SAGA-based}
\caption{ $q$-parties speedup scalability with $m=2$ on $D_4$.}
\label{Exp-sca}
\end{figure}
\begin{itemize}
\item {\bf{Honest-but-curious}} (threat model 1): All workers will follow the algorithm to perform the correct computations. However, they may use their own retained records of the intermediate computation result to infer other worker's data and model.
\item {\bf{Honest-but-colluding}} (threat model 2): All workers will follow the algorithm to perform the correct computations. However, some workers may collude to infer other worker's data and model by sharing their retained records of the intermediate computation result.
\end{itemize}
Similar to \cite{gu2020federated}, we prove the security of VF{${\textbf{B}}^2$} by analyzing and proving its ability to prevent inference attack defined as follows.
\begin{definition}[Inference attack]\label{definatt1}
An inference attack on the $\ell$-th party is to infer $(x_i)_{\mathcal{G}_\ell}$ (or $w_{\mathcal{G}_\ell}$) belonging to other parties or $y_i$ hold by active parties without directly accessing them.
\end{definition}
\begin{lemma}\label{infinite}
Given an equation $o_i = w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}$ or $o_i = \frac{\partial \mathcal{L}\left(\widehat{w}^{\top} x_{i}, y_{i}\right)}{\partial\left(\widehat{w}^{\top} x_{i}\right)}$ with only $o_i$ being known, there are infinite different solutions to this equation.
\end{lemma}
The proof of lemma~\ref{infinite} is shown in the arXiv version. Based on lemma~\ref{infinite}, we obtain the following theorem.
\begin{theorem}\label{security}
Under two semi-honest threat models, VF{${\textbf{B}}^2$} can prevent the inference attack.
\end{theorem}
\noindent{\bf Feature and model security:}
During the aggregation, the value of $o_i = w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}$ is masked by $\delta_{\ell }$ and just the value of $w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}+ \delta_{\ell }$ is transmitted. Under threat model 1, one even can not access the true value of $o_i$, let alone using relation $o_i = w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell}}$ to refer $w_{\mathcal{G}_{\ell }}^{\top}$ and ${(x_i)}_{\mathcal{G}_{\ell}}$. Under threat model 2, the random value $\delta_{\ell }$ has risk of being removed from term $w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}+ \delta_{\ell }$ by colluding with other parties. Applying lemma~\ref{infinite} to this circumstance, and we have that even if the random value is removed it is still impossible to exactly refer $w_{\mathcal{G}_{\ell }}^{\top}$ and ${(x_i)}_{\mathcal{G}_{\ell }}$. Thus, the aggregation process can prevent inference attack under two semi-honest threat models.\\
{\bf Label security}: When analyze the security of label, we do not consider the collusion between active parties and passive parties, which will make preventing labels from leaking meaningless. In the backward updating process, if a passive party $\ell$ wants to infer $y_i$ through the received $\vartheta$, it must solve the equation
$\vartheta = \frac{\partial \mathcal{L}\left(\widehat{w}^{\top} x_{i}, y_{i}\right)}{\partial\left(\widehat{w}^{\top} x_{i}\right)}$. However, only $\vartheta$ is known to party $\ell$. Thus, following from lemma~\ref{infinite}, we have that it is impossible to exactly infer the labels. Moreover, the collusion between passive parties has no threats to the security of labels. Therefore, the backward updating can prevent inference attack under two semi-honest threat models.
From above analyses, we have that the feature security, label security and model security are guaranteed in VFB$^{2}$.
\section{Experiments}\label{secexp}
In this section, extensive experiments are conducted to demonstrate the efficiency, scalability and losslessness of our algorithms. More experiments are presented in the arXiv version.\\
\noindent {\bf Experiment Settings:} All experiments are implemented on a machine with four sockets, and each sockets has 12 cores. To simulate the environment with multiple machines (or parties), we arrange an extra thread for each party to schedule its $k$ threads and support communication with (threads of) the other parties. We use MPI to implement the communication scheme. The data are partitioned vertically and randomly into $q$ non-overlapped parts with nearly equal number of features. The number of threads within each parties, \emph{i.e.} \ $k$, is set as $m$. We use the training dataset or randomly select 80\% samples as the training data, and the testing dataset or the rest as the testing data. An optimal learning rate $\gamma$ is chosen from $\{5e^{-1},1e^{-1},5e^{-2},1e^{-2},\cdots\}$ with regularization coefficient $\lambda=1e^{-4}$ for all experiments.
\begin{table}[!t]
\centering
\begin{tabular}{@{}ccccc@{}}
\toprule
\multirow{2}{*}{}
& \multicolumn{2}{c}{Financial} & \multicolumn{2}{c}{Large-Scale} \\ \cmidrule(l){2-3} \cmidrule(l){4-5}
& $D_1$ & $D_2$ & $D_3$ & $D_4$ \\
\midrule
\#Samples & 24,000 & 96,257 & 17,996 & 175,000 \\
\#Features & 90 & 92 & 1,355,191 & 16,609,143 \\ \bottomrule
\end{tabular}
\caption{Dataset Descriptions.}
\label{dataset}
\end{table}
\begin{figure*}
\caption{Data: $D_1$}
\caption{Data: $D_2$}
\caption{Data: $D_3$}
\caption{Data: $D_4$}
\caption{Results for solving $\mu$-strongly convex VFL models (Problem \ref{P1}
\label{Exp-con}
\end{figure*}
\begin{figure*}
\caption{Data: $D_1$}
\caption{Data: $D_2$}
\caption{Data: $D_3$}
\caption{Data: $D_4$}
\caption{Results for solving nonconvex VFL models (Problem \ref{P2}
\label{Exp-ncon}
\end{figure*}
\noindent {\bf Datasets:} We use four classification datasets summarized in Table~\ref{dataset} for evaluation. Especially, $D_1$ (UCICreditCard) and $D_2$ (GiveMeSomeCredit) are the real financial datsets from the Kaggle website\footnote{\url{https://www.kaggle.com/datasets}}, which can be used to demonstrate the ability to address real-world tasks; $D_3$ (news20) and $D_4$ (webspam) are the large-scale ones from the LIBSVM \cite{chang2011libsvm} website\footnote{\url{https://www.csie.ntu.edu.tw/cjlin/libsvmtools/datasets/}}. Note that we apply one-hot encoding to categorical features of $D_1$ and $D_2$ , thus the number of features become 90 and 92, respectively. \\
\noindent {\bf Problems:} We consider $\ell_2$-norm regularized logistic regression problem for $\mu$-strong convex case
\begin{equation}\label{P1}
\min_{w \in \mathbb{R}^d} f(w):=\frac{1}{n} \sum_{i=1}^{n} {\text{log}}(1+e^{-y_iw^{\top} x_i}) + \frac{\lambda}{2} \|w\|^2,
\end{equation}
and the nonconvex logistic regression problem
\begin{equation}\label{P2}
\min_{w \in \mathbb{R}^d} f(w):=\frac{1}{n} \sum_{i=1}^{n} {\text{log}}(1+e^{-y_iw^{\top} x_i}) + \frac{\lambda}{2} \sum_{i=1}^{d} \frac{w_i^2}{1+w_i^2} \nonumber.
\end{equation}
\subsection{Evaluations of Asynchronous Efficiency and Scalability}
To demonstrate the asynchronous efficiency, we introduce the synchronous counterparts of our algorithms (\emph{i.e.}, synchronous {VF}L algorithms with BUM, denoted as {VF\bf{B}}) for comparison. When implementing the synchronous algorithms, there is a synthetic straggler party which may be 30\% to 50\% slower than the faster party to simulate the real application scenario with unbalanced computational resource.\\
{\noindent {\bf Asynchronous Efficiency:}} In these experiments, we set $q=8$, $m=3$ and fix the $\gamma$ for algorithms with a same SGD-type but in different parallel fashions.
As shown in Figs.~\ref{Exp-con} and \ref{Exp-ncon}, the loss v.s. run
time curves demonstrate that our algorithms consistently outperform their synchronous counterparts regarding the efficiency.
\begin{table*}[!t]
\centering
\begin{tabular}{@{}cccccccc@{}}
\toprule
&Algorithm& $D_1$ & $D_2$ & $D_3$ & $D_4$ \\ \midrule
\multirow{3}{*}{Problem (\ref{P1})}
& NonF & 81.96\%$\pm$0.25\% & 93.56\%$\pm$0.19\% & 98.29\%$\pm$0.21\% & 92.17\%$\pm$0.12\% \\
& AFSVRG-VP & 79.35\%$\pm$0.19\% & 93.35\%$\pm$0.18\% & 97.24\%$\pm$0.11\% & 89.17\%$\pm$0.10\% \\
&{\bf{ Ours}} & 81.96\%$\pm$0.22\% & 93.56\%$\pm$0.20\% & 98.29\%$\pm$0.20\% & 92.17\%$\pm$0.13\% \\
\midrule
\multirow{3}{*}{Problem (\ref{P2})}
& NonF & 82.03\%$\pm$0.32\% & 93.56\%$\pm$0.25\% & 98.45\%$\pm$0.29\% & 92.71\%$\pm$0.24\% \\
& AFSVRG-VP & 79.36\%$\pm$0.24\% & 93.35\%$\pm$0.22\% & 97.59\%$\pm$0.13\% & 89.98\%$\pm$0.14\% \\
& {\bf{ Ours}} & 82.03\%$\pm$0.34\% & 93.56\%$\pm$0.24\% & 98.45\%$\pm$0.33\% & 92.71\%$\pm$0.27\% \\
\bottomrule
\end{tabular}
\caption{Accuracy of different algorithms to evaluate the losslessness of our algorithms (10 trials).}
\label{exp-lossless}
\end{table*}
Moreover, from the perspective of loss v.s. epoch number, we have that algorithms based on SVRG and SAGA have the better convergence rate than that of SGD-based algorithms which is consistent to the theoretical results.\\
{\noindent {\bf Asynchronous Scalability:}} We also consider the asynchronous speedup scalability in terms of the number of total parties $q$. Given a fixed $m$, $q$-parties speedup is defined as
\begin{equation}
\text{$q$-parties speedup} =\frac{\text{Run time of using 1 party}}{\text{Run time of using $q$ parties}},
\end{equation}
where run time is defined as time spending on reaching a certain precision of sub-optimality, \emph{i.e.}, $1e^{-3}$ for $D_4$. We implement experiment for {Problem (\ref{P2})}, results of which are shown in Fig.~\ref{Exp-sca}. As depicted in Fig.~\ref{Exp-sca}, our asynchronous algorithms has much better $q$-parties speedup scalability than synchronous ones and can achieve near linear speedup.
\subsection{Evaluation of Losslessness}
To demonstrate the losslessness of our algorithms, we compare VF${ {\textbf{B}}}^2$-SVRG with its non-federated (NonF) counterpart (all data are integrated together for modeling) and ERCR based algorithm but without BUM, \emph{i.e.}, AFSVRG-VP proposed in \cite{gu2020Privacy}. Especially, AFSVRG-VP also uses distributed SGD method but can not optimize the parameters corresponding to passive parties due to lacking labels. When implementing AFSVRG-VP, we assume that only half parties have labels, \emph{i.e.}, parameters corresponding to the features held by the other parties are not optimized. Each comparison is repeated 10 times with $m=3$, $q=8$, and a same stop criterion, \emph{e.g.}, $1e^{-5}$ for $D_1$. As shown in Table~\ref{exp-lossless}, the accuracy of our algorithms are the same with those of NonF algorithms and are much better than those of AFSVRG-VP, which are consistent to our claims.
\section{Conclusion}
In this paper, we proposed a novel backward updating mechanism for the real VFL system where only one or partial parties have labels for training models. Our new algorithms enable all parties, rather than only active parties, to collaboratively update the model and also guarantee the algorithm convergence, which was not held in other recently proposed ERCR based VFL methods under the real-world setting. Moreover, we proposed a bilevel asynchronous parallel architecture to make ERCR based algorithms with backward updating more efficient in real-world tasks. Three practical SGD-type of algorithms were also proposed with theoretical guarantee.
\onecolumn
\appendix
\section*{\LARGE{Supplementary Materials}}
We present the related supplements in following sections.
\section{Explanation of the Bilevel Asynchronous Parallel Architecture }
When $m=1$, we just need to set the number of threads within each party as 1, then Bilevel Asynchronous Parallel Architecture (BAPA) reduces to a parallel architecture with multiple parties. While, the updates on passive parties rely on the $\vartheta$ received from the only active party. In this case, the BAPA behaves likely (just behaves likely not the same as) the server-worker distributed-memory architecture in \cite{huo2017asynchronous} for there is a communication delay between the active and passive parties. The difference is that in our BAPA with $m=1$ the worker (\emph{i.e.}, passive parties $\ell$ in our BAPA) passively send the local $w_{\mathcal{G}_{\ell}}^{\top} {(x_i)}_{\mathcal{G}_{\ell}}$ to the other parties when $w^Tx_i$ is required instead of just actively sending the local $w_{\mathcal{G}_{\ell}}^{\top} {(x_i)}_{\mathcal{G}_{\ell}}$ to the only server (\emph{i.e.}, active in our BAPA). When $m=q$, then all parties hold labels and the BAPA reduces to the general shared-memory parallel architecture from the perspective of analysis.
\section{Supplements Related to Tree-Structured Communication}
\subsection{The definition and illustration of totally different tree structures}
First, we present the definition of significantly different tree structures mentioned at step 5 in Algorithm \ref{safer_tree}.
\begin{definition}[Two significantly different tree structures\cite{gu2020federated}]\label{definatt3}
For two tree structures $T_1$ and $T_2$ on all parties $\{1,\cdots,q\}$, they are significantly different if there does not exist a subtree $\widehat{T}_1$ of $T_1$ and a subtree $\widehat{T}_2$ of $T_2$ whose size are larger than 1 and smaller than $T_1$ and $T_2$, respectively, such that leaf ($\widehat{T}_1$) = leaf ($\widehat{T}_2$).
\end{definition}
Then we present an illusion of the totally different tree structures in Fig.~\ref{tree_struc}.
\begin{figure}
\caption{Tree structure $T_1$}
\caption{Tree structure $T_2$}
\caption{Illustration of tree-structured communication based on two totally different tree structures $T_1$ and $T_2$.}
\label{tree_struc}
\end{figure}
As depicted in Fig.~\ref{tree_struc} (a), party $1$ aggregates values from parties $1$ and $2$; party $3$ aggregates values from parties $3$ and $4$; and then party $1$, \emph{i.e.}, the aggregator, aggregates these two aggregated values from parties $1$ and $3$. While, as depicted in Fig.~\ref{tree_struc} (b), party $1$ aggregates values from parties $1$ and $3$; party $2$ aggregates values from parties $2$ and $4$; and then the aggregated values are aggregated from parties $1$ and $2$ to party $1$, \emph{i.e.}, the aggregator. From the aggregation process describe above, it is easily to conclude that aggregation through such significantly different tree structures can prevent the leakage of the random value $\delta_{{\ell}}$ when there are no collusion between parties.
\subsection{An example showing collusion between parties}
Then we present an example to show that collusion between parties can remove the random value $\delta_{\ell}$ added to $w_{\mathcal{G}_\ell}^{\top} {(x_i)}_{\mathcal{G}_\ell}$. Assume that $\{w_{\mathcal{G}_{\ell'}}^{\top} {(x_i)}_{\mathcal{G}_{\ell '}} + \delta_{\ell'}\}_{{\ell '}=1}^{q}$ are aggregated through tree structure $T_1$ and $\{ \delta_{\ell'}\}_{{\ell '}=1}^{q}$ are aggregated through tree structure $T_2$. In this case, party $3$ knows the value of $w_{\mathcal{G}_{\ell=4}}^{\top} {(x_i)}_{\mathcal{G}_{\ell=4}} + \delta_{\ell=4}$ and party $2$ knows the value of $\delta_{\ell=4}$. Then if there is collusion between parties 2 and 3, $\delta_{\ell=4}$ added to party $4$ can be removed from $w_{\mathcal{G}_{\ell=4}}^{\top} {(x_i)}_{\mathcal{G}_{\ell=4}} + \delta_{\ell=4}$.
\subsection{Proof of Lemma \ref{infinite}}
\begin{proof}
First, we consider the equation $o_i = w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}$ with two cases, including $d_{\ell}\geq 2$ and $d_{\ell}=1$. For $\forall d_{\ell}\geq 2$, given an arbitrary non-identity orthogonal matrix $U\in \mathbb{R}^{d_\ell\times d_\ell}$, we have
\begin{equation}\label{proofinfinite1}
(w_{\mathcal{G}_{\ell }}^{\top}U^{\top})(U {(x_i)}_{\mathcal{G}_{\ell }}) = w_{\mathcal{G}_{\ell }}^{\top}(U^{\top}U) {(x_i)}_{\mathcal{G}_{\ell }} = w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}=o_i
\end{equation}
From Eq.~\ref{proofinfinite1}, we have that given an equation $o_i = w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}$ with only $o_i$ being known, the solutions corresponding to $w_{\mathcal{G}_{\ell '}}$ and ${(x_i)}_{\mathcal{G}_{\ell '}}$ can be represented as $(w_{\mathcal{G}_{\ell '}}^{\top}U^{\top})$ and $(U {(x_i)}_{\mathcal{G}_{\ell '}})$, respectively. However, $U$ can be an arbitrary different non-identity orthogonal matrix, the solutions are thus infinite. If $d_\ell=1$, give an arbitrary real number $u\neq1$, we have
\begin{equation}\label{proofinfinite2}
(w_{\mathcal{G}_{\ell }}^{\top}u)(\frac{1}{u} {(x_i)}_{\mathcal{G}_{\ell }}) = w_{\mathcal{G}_{\ell }}^{\top}(u\frac{1}{u}) {(x_i)}_{\mathcal{G}_{\ell }} = w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}=o_i
\end{equation}
Similar to above analysis, we have that the solutions of equation $o_i = w_{\mathcal{G}_{\ell }}^{\top} {(x_i)}_{\mathcal{G}_{\ell }}$ are infinite when $d_\ell=1$. As for $o_i = \frac{\partial \mathcal{L}\left({w}^{\top} x_{i}, y_{i}\right)}{\partial\left({w}^{\top} x_{i}\right)}$, both ${w}^{\top} x_{i}$ and loss function are unknown , it is thus impossible to exactly infer the $y_{i}$. This completes the proof.
\end{proof}
\section{Detailed Algorithmic Steps of VF{${\textbf{B}}^2$}-SVRG and -SAGA}
In the following, we present the detailed algorithmic steps of VF{${\textbf{B}}^2$}-SVRG and -SAGA.
\subsection{VF{${\textbf{B}}^2$}-SVRG}
The proposed VF${\textbf{B}}^2$-SVRG with an improved convergence rate than VF${\textbf{B}}^2$-SGD is shown in Algorithms~\ref{afsvrg-active} and \ref{afsvrg-passive}. Different from VF${\textbf{B}}^2$-SGD directly using the stochastic gradient for updating, VF${\textbf{B}}^2$-SVRG adopts the variance reduction technique to control the intrinsic variance of stochastic gradient. Algorithm~\ref{afsvrg-active} thus computes $\widetilde{v}^{\ell}: = \nabla_{\mathcal{G}_{\ell}} f_{i}(\widehat{w})-\nabla_{\mathcal{G}_{\ell}} f_{i}\left(w^{s}\right)+\nabla_{\mathcal{G}_{\ell}} f\left(w^{s}\right)$. While for Algorithm~\ref{afsvrg-passive}, there is $\widetilde{v}^{\ell}=\vartheta_1 \cdot (x_i)_{\mathcal{G}_{\ell}} + \nabla g(\widehat{w}_{\mathcal{G}_{\ell }}) - \left(\vartheta_{i,0} \cdot (x_i)_{\mathcal{G}_{\ell}}
+ \nabla g(w_{\mathcal{G}_{\ell}}^{s})\right)
+\nabla_{\mathcal{G}_{\ell}} f\left(w^{s}\right)$., where $\nabla_{\mathcal{G}_{\ell}} f_{i}\left(w^{s}\right)$ is computed as $\left(\vartheta_2 \cdot (x_i)_{\mathcal{G}_{\ell }}+\nabla_{\mathcal{G}_\ell}g(w_{\mathcal{G}_{\ell}}^{s})\right)$.
\begin{algorithm}[h]
\caption{VF{${\textbf{B}}^2$}-SVRG for active party $\ell$ to actively launch dominated update}\label{afsvrg-active}
\begin{algorithmic}[1]
\REQUIRE {Local data $\{{(x_i)}_{\mathcal{G}_{\ell}},y_i\}_{i=1}^{n}$ stored on the $\ell$-th party, learning rate $\gamma$}.
\STATE Initialize $w_{\mathcal{G}_{\ell}} \in \mathbb{R}^{d_{\ell}}$.\\
\FOR {$s=0, 1, \ldots, S-1$}
\STATE Compute $\left(w^{s}\right)^{\top} x_{i}$ for $i=1,\cdots,n$ based on Algorithm~\ref{safer_tree}.
\STATE Compute $\vartheta_{0,i} = \frac{\partial \mathcal{L}\left(({w}^s)^{\top} x_{i} y_{i}\right)}{\partial\left(({w}^s)^{\top} x_{i}\right)}$ for $i=1, \cdots, n$ and the full local gradient $\nabla_{\mathcal{G}_{\ell}} f\left(w^{s}\right)=\frac{1}{n} \sum_{i=1}^{n} \nabla_{\mathcal{G}_{\ell}} f_{i}\left( {w}^{s}\right)$ , and then distribute all $\vartheta_0$ to the rest parties.
\STATE $w_{\mathcal{G}_{\ell}}=w_{\mathcal{G}_{\ell}}^{s}$.\\
{ \bf{Keep doing in parallel (distributed-memory parallel for multiple active parties)}}
\STATE \ \ Pick an index $i$ randomly from $\{1,...,n\}$.
\STATE \ \ Compute $\widehat{w}^{\top} x_{i}$ based on tree-structured communication.
\STATE \ \ Compute $\vartheta_1 = \frac{\partial \mathcal{L}\left(\widehat{w}^{\top} x_{i}, y_{i}\right)}{\partial\left(\widehat{w}^{\top} x_{i}\right)}$.
\STATE \ \ Sned $\vartheta_1$ and index $i$ to the rest parties.
\STATE \ \ Compute $\widetilde{v}^{\ell}=\nabla_{\mathcal{G}_{\ell}} f_{i}(\widehat{w})-\nabla_{\mathcal{G}_{\ell}} f_{i}\left(w^{s}\right)+\nabla_{\mathcal{G}_{\ell}} f\left(w^{s}\right)$.
\STATE \ \ Update $w_{\mathcal{G}_{\ell}} \leftarrow w_{\mathcal{G}_{\ell}}-\gamma \widetilde{v}^{\ell}$.\\
\textbf{End parallel}
\STATE $w_{\mathcal{G}_{\ell}}^{s+1}=w_{\mathcal{G}_{\ell}}$.
\ENDFOR
\end{algorithmic}
\end{algorithm}
\begin{algorithm}[h]
\caption{VF{${\textbf{B}}^2$}-SVRG for the $\ell$-th party to passively launch collaborative updates.}\label{afsvrg-passive}
\begin{algorithmic}[1]
\REQUIRE {Local data $D^{\ell}$ stored on the $\ell$-th party, learning rate $\gamma$}
\STATE Initialize $w_{\mathcal{G}_{\ell}} \in \mathbb{R}^{d_{\ell}}$ (only performed on passive parties).
\FOR {$s=0, 1, \ldots, S-1$}
\STATE Receive all $\vartheta_{0,i}$ from the dominator and use them to compute the full local gradient $\nabla_{\mathcal{G}_{\ell}} f\left(w^{s}\right)=\frac{1}{n} \sum_{i=1}^{n} \nabla_{\mathcal{G}_{\ell}} f_{i}\left( {w}^{s}\right) = \frac{1}{n} \sum_{i=1}^{n} (\vartheta_{0,i} \cdot (x_i)_{\mathcal{G}_{\ell}}
+ \nabla g({w}^s_{\mathcal{G}_{\ell }}))$.
\STATE $w_{\mathcal{G}_{\ell}}=w_{\mathcal{G}_{\ell}}^{s}$.
\STATE { \bf{Keep doing in parallel (shared-memory parallel for multiple threads)}}
\STATE \quad Receive $\vartheta_1$ and index $i$ from the dominator.
\STATE \quad Compute $\widetilde{v}^{\ell}=\vartheta_1 \cdot (x_i)_{\mathcal{G}_{\ell}}
+ \nabla g((\widehat{w})_{\mathcal{G}_{\ell }})
-\left(\vartheta_{i,0} \cdot (x_i)_{\mathcal{G}_{\ell}}
+ \nabla g(w_{\mathcal{G}_{\ell}}^{s})\right)
+\nabla_{\mathcal{G}_{\ell}} f\left(w^{s}\right)$.
\STATE \quad Update $w_{\mathcal{G}_{\ell}} \leftarrow w_{\mathcal{G}_{\ell}}-\gamma \widetilde{v}^{\ell}$.
\STATE {\bf{End Parallel}}
\ENDFOR
\end{algorithmic}
\end{algorithm}
\subsection{VF{${\textbf{B}}^2$}-SAGA}
VF${\textbf{B}}^2$-SAGA enjoying the same convergence rate with VF${\textbf{B}}^2$-SVRG is shown in Algorithms~\ref{afsaga-active} and \ref{afsaga-passive}. Different from VF${\textbf{B}}^2$-SVRG using ${w}^s$ as the reference gradient, VF${\textbf{B}}^2$-SAGA uses the average of history gradients stored in a table. In Algorithm~\ref{afsaga-active}, there is $\widetilde{v}^{\ell}=\nabla_{\mathcal{G}_{\ell}} f_{i}(\widehat{w})-\widetilde{\alpha}_{i}^{\ell} + \frac{1}{n} \sum_{j=1}^{n} \widetilde{\alpha}_{j}^{\ell}$. While, in Algorithm~\ref{afsaga-passive}, there is $\widetilde{v}^{\ell} = \vartheta \cdot (x_i)_{\mathcal{G}_{\ell }}
+ \nabla g((\widehat{w})_{\mathcal{G}_{\ell }})
- \widetilde{\alpha}_{i}^{\ell}
+\frac{1}{n} \sum_{j=1}^{n} \widetilde{\alpha}_{j}^{\ell}$.
\begin{algorithm}[h]
\caption{VF${\textbf{B}}^2$-SAGA for active party $\ell$ to actively launch dominated update}\label{afsaga-active}
\begin{algorithmic}[1]
\REQUIRE {Local data $\{{(x_i)}_{\mathcal{G}_{\ell}},y_i\}_{i=1}^{n}$ stored on the $\ell$-th party, learning rate $\gamma$}.
\STATE Initialize $w_{\mathcal{G}_{\ell}} \in \mathbb{R}^{d_{\ell}}$.\\
\STATE Compute the local gradient $\widehat{\alpha}_{i}^{\ell}=\nabla_{\mathcal{G}_{\ell}} f_{i}(\widehat{w})$, for $\forall i \in\{1, \ldots, n\}$ and $\ell = 1,\cdots,q$ through tree-structured communication.
(this is performed only at the $1$-th global iteration)\\
{ \bf{Keep doing in parallel (distributed-memory parallel for multiple active parties)}}
\STATE \quad Pick an index $i$ randomly from ${1,...,n}$.
\STATE \quad Compute $\widehat{w}^{\top} x_{i}=\sum_{\ell^{\prime}=1}^{q}(\widehat{w})_{\mathcal{G}_{\ell^\prime}}^{\top} \left(x_{i}\right)_{\mathcal{G}_{\ell^{\prime}}}$
based on tree-structured communication.
\STATE \quad Compute $\vartheta = \frac{\partial \mathcal{L}\left(\widehat{w}^{\top} x_{i}y_{i}\right)}{\partial\left(\widehat{w}^{\top} x_{i}\right)}$
\STATE \quad Sent $\vartheta$ and index $i$ to collaborators.
\STATE \quad Compute $\widetilde{v}^{\ell}=\nabla_{\mathcal{G}_{\ell}} f_{i}(\widehat{w})-\widetilde{\alpha}_{i}^{\ell}+\frac{1}{n} \sum_{j=1}^{n} \widetilde{\alpha}_{j}^{\ell}$.
\STATE \quad Update $w_{\mathcal{G}_{\ell}} \leftarrow w_{\mathcal{G}_{\ell}}-\gamma \widetilde{v}^{\ell}$.
\STATE \quad Update $\widetilde{\alpha}_{i}^{\ell} \leftarrow \nabla_{\mathcal{G}_{\ell}} f_{i}(\widehat{w})$.\\
\textbf{End parallel}
\ENSURE {$w_{\mathcal{G}_\ell}$}
\end{algorithmic}
\end{algorithm}
\begin{algorithm}[h]
\caption{VF{${\textbf{B}}^2$}-SAGA for the $\ell$-th party to passively launch collaborative updates.}\label{afsaga-passive}
\begin{algorithmic}[1]
\STATE { \bf{Keep doing in parallel (shared-memory parallel for multiple threads)}}
\STATE \quad Receive $\vartheta$ and index $i$ from the dominator.
\STATE \quad Compute $\widetilde{v}^{\ell}=\vartheta \cdot (x_i)_{\mathcal{G}_{\ell }}
+ \nabla_{\mathcal{G}_\ell}g((\widehat{w})_{\mathcal{G}_{\ell }})
- \widetilde{\alpha}_{i}^{\ell}
+\frac{1}{n} \sum_{j=1}^{n} \widetilde{\alpha}_{j}^{\ell}$.
\STATE \quad Update $w_{\mathcal{G}_{\ell}} \leftarrow w_{\mathcal{G}_{\ell}}-\gamma \widetilde{v}^{\ell}$.
\STATE \quad Update $\widetilde{\alpha}_{i}^{\ell} \leftarrow \vartheta \cdot (x_i)_{\mathcal{G}_{\ell }}
+ \nabla_{\mathcal{G}_\ell}g((\widehat{w})_{\mathcal{G}_{\ell }})$.
\STATE {\bf{End parallel}}
\end{algorithmic}
\end{algorithm}
\section{Additional Experiments on Regression Task}
These experiments are conducted on two datasets for regression task: $D_5$ (E2006-tfidf) and $D_6$ (YearPredictitionMSD) from the LIBSVM \cite{chang2011libsvm}. $D_5$ has $16,087$ training samples and $150,306$ features. $D_6$ has $463,715$ training samples and $90$ features. Moreover, we apply the min-max normalization technique to the target variables $y$ of $D_6$.\\
{\noindent {\bf Problems:}
We consider $\ell_2$-norm regularized regression problem for $\mu$-strong convex case
\begin{align}\label{P3}
\min_{w \in \mathbb{R}^d} f(w):=\frac{1}{n} \sum_{i=1}^{n} ({w^{\top} x_i}-y_i)^2 + \frac{\lambda}{2} \|w\|^2,
\end{align}
and the robust linear regression for nonconvex problem
\begin{align}\label{P4}
\min_{w \in \mathbb{R}^d} f(w):=\frac{1}{n} \sum_{i=1}^{n} \mathcal{L}(y_i -\left\langle {x_i}, {w} \right\rangle),
\end{align}
where $\mathcal{L} (x) :=\log (\frac{x^2}{2} + 1)$. \\
{\noindent {\bf Asynchronous efficiency:}}
In these experiments, we set $q=12$, $m=2$ and fix the $\gamma$ for algorithms with a same SGD-type but in different parallel fashions.
As shown in Fig.~\ref{Exp-regression}, the loss v.s. running
time curves demonstrate that our algorithms consistently outperform the corresponding synchronous counterparts in terms of the efficiency. \\
\begin{figure}
\caption{$D_5$ for Problem (\ref{P3}
\caption{$D_5$ for Problem (\ref{P4}
\caption{$D_6$ for Problem (\ref{P3}
\caption{$D_6$ for Problem (\ref{P4}
\caption{Results for solving regression tasks, where the number of epoches (points) denotes how many passes over the dataset the algorithm makes.}
\label{Exp-regression}
\end{figure}
\noindent{\bf{Evaluations of the losslessness}}
To demonstrate that our algorithms are lossless, we compare them with the corresponding non-federated (NonF) algorithms, \emph{i.e.}, all data were integrated together for modeling. For datasets without testing data, we split the data set into $5$ parts, and use one of them for testing. Moreover, we use the metric root mean square error (RMSE) for evaluation
\begin{align}\label{rmse}
RMSE=\sqrt{\frac{1}{n}\sum_{i=1}^{n}(\hat{y}-y)^2},
\end{align}
where $\hat{y}$ denotes the prediction value and $y$ is the true value. As shown in Table~\ref{exp-lossless1}, the results of our algorithms are the same with those of NonF algorithms and are much better than those of AFSVRG-VP, which are consistent to our claims.
\begin{table}[!t]
\centering
\begin{tabular}{@{}ccccc@{}}
\toprule
& Algorithm & $D_5$(RMSE) & $D_6$(RMSE) \\ \midrule
\multirow{3}{*}{Problem (\ref{P3})}
& NonF & 0.389$\pm$0.012 & 0.069$\pm$0.004 \\
&{{ AFSVRG-VP}} & 0.417$\pm$0.010 & 0.084$\pm$0.003 \\
&{\bf{ Ours}} & 0.389$\pm$0.013 & 0.069$\pm$0.005 \\
\midrule
\multirow{3}{*}{Problem (\ref{P4})}
& NonF & 0.382$\pm$0.014 & 0.068$\pm$0.004 \\
& {{ AFSVRG-VP}} & 0.415$\pm$0.009 & 0.084$\pm$0.004 \\
&{\bf{ Ours}} & 0.382$\pm$0.013 & 0.068$\pm$0.005 \\
\bottomrule
\end{tabular}
\caption{Evaluation of the losslessness for regression task (10 trials).}
\label{exp-lossless1}
\end{table}
\subsection{Asynchronous scalability in terms of $q$}
W present a more clear illusion of the asynchronous scalability in terms of $q$ shown in Fig.~\ref{Exp-sca-1}.
\begin{figure}
\caption{SGD-based}
\caption{SVRG-based}
\caption{SAGA-based}
\caption{ $q$-parties speedup scalability with $m=2$ on $D_4$.}
\label{Exp-sca-1}
\end{figure}
\iffalse
\subsection{Extra Discussion of the Asynchronous Scalability in Terms of $m$}
We also want to explore the scalability in terms of $m$. The ideal case of our BAPA is that the number of threads should be enough for each party to collaborate with the parallel dominated updates. In other words the computational resource of each party is enough. Otherwise there will be the undesired case where a dominated update will not responded by a specific party because this party has no free threads to do this. Thus, when demonstrate the scalability we assume that the threads of each party are enough and the unbalanced computational resource of each party is reflected by the unbalanced computation capabilities of threads for each party. In this case, given the fixed $q$ and $k$, when $m$ increases, there is a potential problem of communication jam that hampers the scalability. In our experiments, we consider the asynchronous scalability concerning $m$ as follows.
Given a fixed $q$ and $k$, $m$-active parties asynchronous scalability is defined as
\[\text{$m$-active asynchronous scalability} =\text{Run time of 1 active party}\,/\,\text{Run time of $m$ active parties},\]
where $m$-active parties denotes that, there are $m<q$ active parties among $q$ parties. In our experiments, $q=6$, $k=4$. Especially, the ideal case is depicted when the increasing of $m$ does not hamper the efficiency, \emph{i.e.}, when $m$ increases the run time of $m$ active parties is the same as that of $m=1$. As shown in Fig.~\ref{Exp-sca1}, given the conditions in our experiments, the asynchronous scalability in terms of $m$ is near ideal.
\begin{figure}
\caption{SGD-based}
\caption{SVRG-based}
\caption{SAGA-based}
\caption{ $m$-active parties speedup scalability with $q=6$ on $D_4$.}
\label{Exp-sca1}
\end{figure}
\fi
\section{Preliminaries for Convergence Analysis (corresponding to line 254 in the manuscript)}
In this section, we present some preliminaries which are helpful for readers to understand the analysis.
\\
\noindent{\bf{Globally labeling the iterates:}} As shown in the algorithms, we do not globally label the iterates from different parties. While, how to define the global iteration counter $t$ to label an iterate $w_t$ matters in the convergence analysis. In this paper, we adopts the ``after read'' labeling strategy \cite{leblond2017asaga}, where the global iterate counter is updated as one dominator finishes computing $\widehat{w}_t^\top x_i$ or as one collaborator finishes reading local parameters $(\widehat{w}_t)_{\mathcal{G}_{\psi(t)}} $ (this reading operation is performed after having received information from a specific dominator, \emph{e.g.,} step~3 in Algorithm~\ref{AFSGD-P}). It means that $\widehat{w}_t$ on a specific dominated parties is the $t+1$-th fully completed computation of $\widehat{w}^\top x_i$ and $(\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}$ on a collaborative party is the $t+1$-th fully completed read of $({w}_t)_{\mathcal{G}_{\psi(t)}}$. Importantly, such a labeling strategy guarantees that $i_t$ and $\widehat{w}_t$ are independent \cite{leblond2017asaga}, which simplifies the convergence analyses, especially, for VF{${\textbf{B}}^2$}-SAGA.
\\
\noindent{\bf{Global updating rule:}} Here we introduce the global updating rule as
\begin{equation}\label{global-up1}
w_{t+1} = w_t -\gamma U_{\psi(t)}\widetilde{v}_t^{\psi(t)}
\end{equation}
where $\widetilde{v}_t^{\psi(t)}$ has a different definition on different type of roles (dominator or collaborator). Although the definitions of $\widetilde{v}_t^{\psi(t)}$ are different on different type of roles, we will build uniform analyses for them.
\\
\noindent{ \bf{Relationship between $w_t$ and $\widehat{w}_t$:}} For dominators, $\widehat{w}^{T} x_{i}=\sum_{\ell^{\prime}=1}^{q}(\widehat{w})_{\mathcal{G}_{\ell '}}^{T}\left(x_{i}\right)_{\mathcal{G}_{\ell^{\prime}}}$ is obtained based on Algorithm~\ref{safer_tree} in an asynchronous parallel fashion, where $\widehat{w}$ denotes $w$ inconsistently read from different data parties. It means that, vector $(\widehat{w}_t)_{\mathcal{G}_{\ell '}}$ (where $\ell ' \neq \ell$) may be inconsistent to $({w}_t)_{\mathcal{G}_{\ell '}}$, i.e., some blocks of $\widehat{w}_t$ are the same with the ones in $w_t$ (e.g., $({w_t})_{\mathcal{G}_{\ell '}}=(\widehat{w}_t)_{\mathcal{G}_{\ell '}}$), but others are different. Thus we introduce a set $D(t)$ in Eq.~\ref{Dt1} and the upper bound of its size is introduced in Assumption~\ref{assum4}.
\\
\noindent{ \bf{Relationship between $\bar{w}_t$ and $\widehat{w}_t$:}} For a collaborative party, it use $\vartheta$ received from dominated party to compute $\nabla_{\mathcal{G}_{\psi(t)}} \mathcal{L}$, and we donate $\vartheta \cdot (x_i)_{\mathcal{G}_{\psi(t)}}$ at global iteration $t$ as $\nabla_{\mathcal{G}_{\psi(t)}} \mathcal{L}(\bar{w}_t)$. Since there is a communication delay between dominator and collaborators, $\bar{w}_t$ maybe an old $\widehat{w}_u$ ($u\leq t$). To describe the relation between $\bar{w}_t$ and $\widehat{w}_u$, we thus introduce a set $D^\prime(t)$ in Eq.~\ref{Dt2} (when $u = t$, $D^\prime(t)$ denotes an empty set). Meanwhile, we introduce an upper bound to the communication delay in Assumption~\ref{assum4}.
\\
{\noindent{\bf Introduction of $\widetilde{v}_t$ and $\widehat{v}_t$}:} In Algorithms~\ref{AFSGD-A} and \ref{AFSGD-P}, we have that for a dominator, there is $\widetilde{v}_t=\widehat{v}_t$. While for collaborators, there is $\widetilde{v}_t^{\psi(t)}=\vartheta \cdot (x_i)_{\mathcal{G}_\ell} + \nabla_{\mathcal{G}_\ell}g(\widehat{w})$ which can be rewritten as $\widetilde{v}_t^{\psi(t)}=\bar{v}_t^{\psi(t)}+ \nabla_{\mathcal{G}_\ell}g(\widehat{w})-\nabla_{\mathcal{G}_\ell}g(\bar{w})$.
\section{Convergence Analyses for Strongly Convex problems}
\subsection{Convergence Analysis of Theorem~\ref{thm-sgdconvex}}
\begin{lemma}\label{lem-csgd-1}
For VF{${\textbf{B}}^2$}-SGD, for $\forall t$, there is
\begin{equation}\label{lemequ-csgd-1}
\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 \leq \frac{2G}{1-\lambda_{1}}
\end{equation}
where there is $\lambda_{1}=2L_*^2\gamma^2\tau$.
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma \ref{lem-csgd-1}:}] If the $t$-th global iteration is a collaborative update we have
\begin{eqnarray}\label{csgd-0}
\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 &=& \mathbb{E} || \vartheta \cdot\left(x_{i}\right)_{\mathcal{G}_{\psi(t)}}
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}} )||^2 \nonumber \\
&=& \mathbb{E} ||\vartheta \cdot\left(x_{i}\right)_{\mathcal{G}_{\psi(t)}}
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_{t})_{\mathcal{G}_{\psi(t)}})
- \nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_{t})_{\mathcal{G}_{\psi(t)}})
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) ||^2 \nonumber \\
&\stackrel{(a)}{\leq}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2 \mathbb{E}\|\nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_{t})_{\mathcal{G}_{\psi(t)}})
- \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}})||^2
\nonumber \\
&\stackrel{(b)}{\leq}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2{L_{g}^2} \mathbb{E}\|(\bar{w}_{t})_{\mathcal{G}_{\psi(t)}}
- (\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}||^2
\nonumber \\
&\stackrel{(c)}{=}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2{L_{g}^2}\gamma^2 \mathbb{E}\|\sum_{t^\prime\in D'(t), \psi(t^\prime)=\psi(t)} \widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\nonumber \\
&\stackrel{(d)}{\leq}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2{L_{g}^2}\gamma^2 \tau_2\sum_{t^\prime\in D'(t)} \mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\nonumber \\
&\stackrel{(e)}{\leq}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2{L_{*}^2}\gamma^2 \tau_2 \sum_{t^\prime\in D'(t)}\mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\nonumber \\
&\stackrel{(f)}{\leq}& 2 G
+ 2{L_{*}^2}\gamma^2 \tau_2 \sum_{t^\prime\in D'(t)}\mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\end{eqnarray}
where (a) follows from $\|a+b\|^2\leq 2\|a\|^2 + 2\|b\|^2$, (b) follows from Assumption~\ref{assum2}, (c) follows from the Eq.~\ref{Dt2}, (d) follows from Assumption~\ref{assum4} and $\|\sum_{i=1}^{n}a_i\|^2 \leq n \sum_{i=1}^{n} \|a_i\|^2$, (e) follows from definition of $L_{*}$, (f) follows from the definition of $\bar{v}_t$ and Assumption~\ref{assum1}.
If the $t$-th global iteration is a dominated update, there is
\begin{align}\label{csgd-1}
\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 = \mathbb{E} ||\widehat{v}_{t}^{\psi(t)}||^2 \leq G
\end{align}
Then for $\forall t$, according to Eqs.~\ref{csgd-0} and \ref{csgd-1}, we have
\begin{align}\label{csgd-2}
\mathbb{E} ||\widetilde{v}_{0}^{\psi(0)}||^2 \stackrel{(a)}{\leq}& G \leq 2G
\nonumber \\
\mathbb{E} ||\widetilde{v}_{1}^{\psi(1)}||^2 \leq&
2G + 2{L_{*}^2}\gamma^2 \tau_2 \sum_{t^\prime\in D'(1)}\mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\stackrel{(b)}{\leq}
2G + 2{L_{*}^2}\gamma^2 \tau_2^2 \mathbb{E} ||\widetilde{v}_{0}^{\psi(0)}||^2
\stackrel{(c)}{=}
2G\frac{1-k^{ 1 +1}}{1-k}
\nonumber \\
...
\nonumber \\
\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 \leq &
2G + 2{L_{*}^2}\gamma^2 \tau_2 \sum_{t^\prime\in D'(t)}\mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\leq
2G + 2{L_{*}^2}\gamma^2 \tau_2^2 (2G\frac{1-k^{t}}{1-k})
\stackrel{(d)}{=}
2G\frac{1-k^{t+1}}{1-k}
\end{align}
where (a) follows from that the $0$-th global iteration must be a dominated update, (b) follows from that for all $t' \in D'(t)$, there is $t' \leq t$, (c) follows from that $k:=2{L_{*}^2}\gamma^2 \tau_2^2$, (d) follows from the summation formula of equal ratio sequence. According to Eq.~\ref{csgd-2}, it holds that for $\forall t$ there is
\begin{align}\label{csgd-3}
\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 \leq
2G\frac{1-k^{t+1}}{1-k} \leq \frac{2G}{1-k}\leq \frac{2G}{1-2{L_{*}^2}\gamma^2 \tau}
\end{align}
where the last inequality follows from the definition of $\tau$.
This completes the proof.
\end{proof}
\begin{lemma}\label{lem-csgd-2}
For all $\forall t$, there is
\begin{equation}\label{1}
\mathbb{E} \| {v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2 \leq2{ L_{{*}}^2 \gamma^2 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 8{ L_{*}^2 \gamma^2 \tau_2 \sum_{t' \in D^\prime(t)}} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\end{equation}
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma \ref{lem-csgd-2}:}]
First, we give the bound of $ \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2$ as follow
\begin{align}\label{csgd-4}
\mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
& \stackrel{ (a) }{\leq} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f(\bar{w}_t) - \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t) + \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) - \nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_t)_{\mathcal{G}_{\psi(t)}}\|^2
\nonumber \\
& \stackrel{ (b) }{\leq} 2\mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\bar{w}_t) - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\widehat{w}_t) \|^2
+ 2 \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} g ((\bar{w}_t)_{\mathcal{G}_\psi(t)})
- \nabla_{\mathcal{G}_{\psi(t)}} g ((\widehat{w}_t))_{\mathcal{G}_\psi(t)} \|^2
\nonumber \\
& \stackrel{ (c) }{\leq} 2{L^2} \mathbb{E} \| \bar{w}_t - \widehat{w}_t \|^2
+ 2{L_{g}^2} \mathbb{E} \| (\bar{w}_t)_{\mathcal{G}_\psi(t)} - (\widehat{w}_t)_{\mathcal{G}_\psi(t)} \|^2
\nonumber \\
& \stackrel{ (d) }{=} 2{ L^2 \gamma^2} \mathbb{E} \| \sum_{t' \in D'(t)} \textbf{U}_{\psi(t')} \widetilde{v}^{\psi(t')}_{t'} \|^2 + 2{ L_{g}^2 \gamma^2 } \mathbb{E} \| \sum_{t' \in D^\prime(t), \psi(t^\prime)=\psi(t)} \textbf{U}_{\psi(t')} \widetilde{v}^{\psi(t')}_{t'} \|^2
\nonumber \\
& \stackrel{ (e) }{\leq} 2{ L^2 \gamma^2 \tau_2} \sum_{t' \in D'(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 + 2{ L_{g}^2 \gamma^2 \tau_2 \sum_{t' \in D^\prime(t)}} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\nonumber \\
& \stackrel{ (f) }{\leq} 4{ L_{{*}}^2 \gamma^2 \tau_2} \sum_{t' \in D'(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\end{align}
where (a) follows from the definition of $\bar{v}_{t}^{\psi(t)}$ and the definitions of $\widetilde{v}_{t}^{\psi(t)}$ for different types of the $t$-th global iteration (i.e., dominated or collaborative), (b) follows from $\|a+b\|^2 \leq 2\|a\|^2 + 2\|b\|^2$, (c) follows from Assumptions~\ref{assum1} and \ref{assum2}, (d) follows from Eq.~\ref{Dt1}, (e) follows from Assumption~\ref{assum4} and $\| \sum_{i=1}^{n} a_i \|^2 \leq n\sum_{i=1}^{n} \|a_i\|^2$, (f) follows from the definition of $L_{*}$.
Then we consider the bound of $\mathbb{E} \| v_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2$:
\begin{align}\label{csgd-5}
\mathbb{E} \| v_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2 & = \mathbb{E} \| v_{t}^{\psi(t)} - \widehat{v}_{t}^{\psi(t)} + \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (a) }{\leq} 2\mathbb{E} \| v_{t}^{\psi(t)} - \widehat{v}_{t}^{\psi(t)}\|^2+ 2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \leq 2\mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} ({w}_t) - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\widehat{w}_t) \|^2
+ 2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (b) }{\leq} 2{L^2} \mathbb{E} \| {w}_t - \widehat{w}_t \|^2
+ 2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (c) }{=} 2{ L^2 \gamma^2} \mathbb{E} \| \sum_{t' \in D(t)} \textbf{U}_{\psi(t')} \widetilde{v}^{\psi(t')}_{t'} \|^2
+2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (d) }{\leq} 2{ L^2 \gamma^2 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (e) }{\leq} 2{ L_{{*}}^2 \gamma^2 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 8{ L_{*}^2 \gamma^2 \tau_2 \sum_{t' \in D^\prime(t)}} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\end{align}
where (a) follows from $\|a+b\|^2 \leq 2\|a\|^2 + 2\|b\|^2$, (b) follows from Assumptions~\ref{assum1}, (c) follows from Eq.~\ref{Dt1}, inequalities, (d) follows from Assumptions~\ref{assum4} and $\| \sum_{i=1}^{n} a_i \|^2 \leq n\sum_{i=1}^{n} \|a_i\|^2$, (e) follows from the definition of $L_{*}$ and Eq. \ref{csgd-4}. This completes the proof.
\end{proof}
\begin{lemma}\label{lem-csgd-3}
For VF{${\textbf{B}}^2$}-SGD, we have
\begin{equation*}\label{lemeq-csgd3}
\sum_{u\in K(t)}\mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
\geq
\frac{1}{2} \sum_{u\in K(t)} \mathbb{E}\| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t})\|^2
- L^2 \gamma^2 \eta_1 \sum_{u\in K(t)} \sum_{u' \in \{t,\cdots,u\}} \mathbb{E} \|\widetilde{v}_{u'}^{\psi(u')}\|^2
\end{equation*}
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma \ref{lem-csgd-3}:}]
For any $u \in K(t)$, there is
\begin{align}\label{csgd-6}
\mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u)}} f(w_{t})\|^{2}
& = \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t})
- \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})
+ \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
\nonumber \\
& \stackrel{(a)}{\leq} 2\mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t})
- \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
\nonumber \\
& \leq 2\mathbb{E} \| \nabla f({w}_{t}) - \nabla f({w}_{u})\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
\nonumber \\
& \stackrel{(b)}{\leq} 2 L^2 \gamma^2 \mathbb{E} \| {w}_{t} - {w}_{u}\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
\nonumber \\
& \stackrel{(c)}{=} 2 L^2 \gamma^2 \mathbb{E} \|\sum_{u' \in \{t,...,u\}}\textbf{U}_{\psi(u')}\widetilde{v}_{u'}^{\psi(u')}\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
\nonumber \\
& \stackrel{(d)}{\leq} 2 L^2 \gamma^2 \eta_1 \sum_{u \in \{t,...,u\}} \mathbb{E} \|\widetilde{v}_{u}^{\psi(u)}\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
\end{align}
where (a) follows from $\|a+b\|^2 \leq 2\|a\|^2 + 2\|b\|^2$, (b) follows from Assumptions~\ref{assum1}, (c) follows from Eq.~\ref{global-up1}, (d) follows from The bound of $|K(t)|$ and $\| \sum_{i=1}^{n} a_i \|^2 \leq n\sum_{i=1}^{n} \|a_i\|^2$. According to Eq.~\ref{csgd-6} we have
\begin{align}\label{csgd-7}
\mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
\geq \frac{1}{2} \mathbb{E}\| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t})\|^2 - L^2 \gamma^2 \eta_1 \sum_{u' \in \{t,\cdots,u\}} \mathbb{E} \|\widetilde{v}_{u'}^{\psi(u')}\|^2
\end{align}
Summing above equality for all $u \in K(t)$ we obtain the conclusion. This completes the proof.
\end{proof}
\begin{proof}[\textbf{Proof of Theorem \ref{thm-sgdconvex}:}]
For $\forall u \in K(t)$ we have that
\begin{eqnarray}\label{csgd-8}
&& \mathbb{E} f (w_{u+1})
\\ \nonumber &\stackrel{ (a) }{\leq}& \mathbb{E} \left ( f (w_{u}) + \langle \nabla f(w_{u}), w_{u+1}-w_{u} \rangle + \frac{L}{2} \|w_{u+1}-w_{u} \|^2 \right )
\\ \nonumber &=& \mathbb{E} \left ( f (w_{u}) - \gamma \langle \nabla f(w_{u}),
\widetilde{v}^{\psi(u)}_{u} \rangle + \frac{L\gamma^2}{2} \| \widehat{v}^{\psi(u)}_{u} \|^2 \right )
\\ \nonumber &{=}& \mathbb{E} \left ( f (w_{u}) - \gamma \langle \nabla f(w_{u}), \widetilde{v}^{\psi(u)}_{u} + {v}^{\psi(u)}_{u}- {v}^{\psi(u)}_{u} \rangle + \frac{L \gamma^2}{2} \| \widetilde{v}^{\psi(u)}_{u} \|^2 \right )
\\ \nonumber &\stackrel{(b)}{=}& \mathbb{E} f (w_{u}) - \gamma \mathbb{E} \langle \nabla f(w_{u}), \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}) \rangle + \frac{L \gamma^2}{2} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{u} \|^2 + \gamma \mathbb{E} \langle \nabla f(w_{u}), {v}^{\psi(u)}_{u} - \widetilde{v}^{\psi(u)}_{u} \rangle
\\ \nonumber &\stackrel{ (c) }{\leq}& \mathbb{E} f (w_{u}) - \gamma \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}) \|^2 + \frac{\gamma}{2} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}) \|^2 + \frac{L \gamma^2}{2} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{u} \|^2 + \frac{\gamma}{2} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{u} - {v}^{\psi(u)}_{u} \|^2
\\ &\stackrel{(d)}{\leq}& \mathbb{E} f (w_{u}) - \frac{\gamma}{2} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}) \|^2 + \frac{L_* \gamma^2}{2} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{u} \|^2
\nonumber \\ &&
+ { L_{{*}}^2 \gamma^3 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 4{ L_{*}^2 \gamma^3 \tau_2 \sum_{t' \in D^\prime(t)}} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 \nonumber
\end{eqnarray}
where the inequalities (a) follows form Assumption~\ref{assum2}, (b) follows from that $ {v}^{\psi(u)}_u = \nabla_{\mathcal{G}_{\psi(u)}} f_{i_u} ({w}_u)$ for a specific party, (c) follows from $\langle a,b \rangle\leq \frac{1}{2}(\|a\|^2+\|b\|^2)$, (d) follows from Lemma~\ref{lem-csgd-2} and the definition of $L_*$.
Summing Eq.~(\ref{csgd-8}) over all $ u \in K(t) $, we obtain
\begin{eqnarray}\label{csgd-9}
&& \mathbb{E} \left[f (w_{t+|K(t)|} - f (w_{t}) \right]
\\ \nonumber
&\leq&
-\frac{\gamma}{2}\sum_{u \in K(t)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}) \|^2
+ \frac{L_* \gamma^2}{2}\sum_{u \in K(t)} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{ u }\|^2
\nonumber \\
&+& ( L_{{*}}^2 \gamma^3 \tau_1 + 4L_{*}^2 \gamma^3 \tau_2 ) \sum_{u \in K(t)} \sum_{u' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\nonumber \\
&\stackrel{(a)}{\leq}&
-\frac{\gamma}{2}\left( \frac{1}{2} \sum_{u\in K(t)} \mathbb{E}\| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t})\|^2
- L^2 \gamma^2 \eta_1 \sum_{u\in K(t)} \sum_{u' \in \{t,\cdots,u\}} \mathbb{E} \|\widetilde{v}_{u'}^{\psi(u')}\|^2 \right)
\nonumber \\&&
+ \frac{L_* \gamma^2}{2}\sum_{u \in K(t)} \mathbb{E} \|\widetilde{v}^{\psi(u)}_{ u }\|^2
+ ( L_{{*}}^2 \gamma^3 \tau_1 + 4L_{*}^2 \gamma^3 \tau_2 ) \tau_2 \sum_{u \in K(t)} \sum_{u' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\nonumber \\&=&
-\frac{\gamma}{4} \sum_{u\in K(t)} \mathbb{E}\| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t})\|^2
+\frac{L^2 \gamma^3 \eta_1}{2} \sum_{u \in K(t)}\sum_{u' \in \{t,...,u\}} \mathbb{E} \|\widetilde{v}_{u'}^{\psi(u')}\|^2
\nonumber \\ &&
+ \frac{L_* \gamma^2}{2}\sum_{u \in K(t)} \mathbb{E} \|\widetilde{v}^{\psi(u)}_{ u }\|^2
+ ( L_{{*}}^2 \gamma^3 \tau_1 + 4L_{*}^2 \gamma^3 \tau_2 ) \sum_{u \in K(t)} \sum_{u' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\nonumber \\ &\stackrel{(b)}{\leq}&
-\frac{\gamma\mu}{2}(f(w_{t}) - f(w^*)) + \underbrace{(\frac{L_*^2 \gamma^3 \tau^{3/2}}{2}
+ \frac{L_*\gamma^2\tau^{1/2}}{2} + 5 { L_{*}^2 \gamma^3 \tau^{3/2}} )\frac{2G}{1-2L_*^2\gamma^2\tau}}_{C} \nonumber
\end{eqnarray}
where (a) follows from Lemma~\ref{lem-csgd-3}, (b) follows from Assumption~\ref{assumc1}. According to Eq.~\ref{csgd-9}, we have
\begin{align}\label{csgd-10}
\mathbb{E} \left[f (w_{t + |K(t)|}) - f(w^{*}) \right]\leq (1-\frac{\gamma\mu}{2})(f(w_{t}) - f(w^*)) + C
\end{align}
Assuming that $\cup_{\kappa \in P(t)}=\{0,1, \ldots, t\}$, applying Eq.~\ref{csgd-10}, we have that
\begin{align}\label{csgd--11}
&\mathbb{E} \left[f (w_{t}) - f (w^*)) \right]
\\
&\leq (1-\frac{\gamma\mu}{2})^{v(t)}(f(w_{0}) - f(w^*)) + C\sum_{i=0}^{v(t)}(1-\frac{\gamma\mu}{2})^{i}
\nonumber \\ & \leq
(1-\frac{\gamma\mu}{2})^{v(t)}(f(w_{0}) - f(w^*)) + C\frac{2(1-(1-\frac{\gamma\mu}{2})^{v(t)})}{\gamma \mu}
\nonumber \\ &\leq
(1-\frac{\gamma\mu}{2})^{v(t)}(f(w_{0}) - f(w^*)) + C\frac{2(1-(1-\frac{\gamma\mu}{2})^{v(t)})}{\gamma \mu}
\nonumber \\ &\stackrel{(a)}{\leq}
(1-\frac{\gamma\mu}{2})^{v(t)}(f(w_{0}) - f(w^*)) + (\frac{L_*^2 \gamma^3 \tau^{3/2}}{2}
+ \frac{L_*\gamma^2\tau^{1/2}}{2} + 5 { L_{*}^2 \gamma^3 \tau^{3/2}} )\frac{2G}{1-2L_*^2\gamma^2\tau}\frac{2}{\gamma \mu} \nonumber,
\end{align}
where (a) follows form the definition of $C$. To obtain the $\epsilon$ solution one can choose suitable $\gamma$, such that
\begin{align}\label{csgd-91-1}
1-2L_*^2\gamma^2\tau & >0
\end{align}
\begin{align}\label{csgd-91-2}
(1-\frac{\gamma\mu}{2})^{v(t)} \left(f(w_{0}) - f(w^*)\right) & \leq \frac{\epsilon}{2}
\end{align}
\begin{align}\label{csgd-91-3}
(\frac{L_*^2 \gamma^3 \tau^{3/2}}{2}
+ \frac{L_*\gamma^2\tau^{1/2}}{2} + 5 { L_{*}^2 \gamma^3 \tau^{3/2}} )\frac{2G}{1-2L_*^2\gamma^2\tau}\frac{2}{\gamma \mu} & \leq \frac{\epsilon}{2}.
\end{align}
According to Eq.~\ref{csgd-91-1}, there is $\gamma^2<\frac{1}{2L_*^2\tau}$, which implies that $ \frac{L_*^2 \gamma^3 \tau^{3/2}}{2}
+ \frac{L_*\gamma^2\tau^{1/2}}{2} + 5 { L_{*}^2 \gamma^3 \tau^{3/2}} \leq 6 L_*^2\gamma^3\tau^{3/2}$ (here we assume that $L_*$ can be chosen a value $\geq 1$, this is reasonable from the definition of $L_*$). Thus, we can rewrite Eq.~\ref{csgd-91-3} as
\begin{align}\label{1}
6L_*^2\gamma^3\tau^{3/2}\frac{4G}{\mu(1-2L_*^2\gamma^2\tau)} & \leq \frac{\epsilon}{2}.
\end{align}
which implies that if $\tau$ is upper bounded, \emph{i.e.}, $\tau\leq {\text {min}}\{\epsilon^{-4/3}, \frac{(GL_*^2)^{2/3}}{\epsilon^2\mu^{2/3}}\}$, we can carefully choose $\gamma\leq \frac{\epsilon\mu^{1/3}}{(G{96L_*^2})^{1/3}}$
such that Eq.~\ref{csgd-91-3} holds. According to Eq.~\ref{csgd-91-2}, there is
\begin{align}\label{1}
\text{log} (\frac{2(f(w_0)-f(w^*))}{\epsilon}) \leq v(t) \text{log}(\frac{1}{1-\frac{\gamma \mu}{2}})
\end{align}
Because $log(\frac{1}{x}) \geq 1-x$ for $0< x \leq 1 $, we have
\begin{align}\label{11}
v(t) \geq \frac{2}{\gamma \mu} log (\frac{2(f(w_0)-f(w^*))}{\epsilon})
\stackrel{(a)}{\geq} \frac{44(GL^2_*)^{1/3}}{ \mu^{4/3}\epsilon} log (\frac{2(f(w_0)-f(w^*))}{\epsilon})
\end{align}
This complets the proof.
\end{proof}
\subsection{Proof of Theorem~\ref{thm-svrgconvex}}
\begin{lemma}\label{lem-csvrg-1}
For VF{${\textbf{B}}^2$}-SVRG, let $u\in K(t)$ for $\forall t$, we have that
one can get:
\begin{eqnarray}\label{lemeq-csvrg-1}
\mathbb{E} \left \| \widetilde{v}^{\psi(u) }_u \right \|^2
\leq \frac{18G}{1-2 L_*^2 \gamma^2\tau}
\end{eqnarray}
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma~\ref{lem-csvrg-1}:}]
First, we prove the relation between $\mathbb{E}\|\widetilde{v}_u^{\psi(u)}\|^2$ and $\mathbb{E}\|\widehat{v}_u^{\psi(u)}\|^2$.
\begin{eqnarray}\label{svrg_1}
\mathbb{E} ||\widetilde{v}_{u}^{\psi(u)}||^2 &=& \mathbb{E} || \widetilde{v}^{\psi(u)}_u - \widehat{v}^{\psi(u)}_u +\widehat{v}^{\psi(u)}_u||^2 \nonumber \\
&\stackrel{a}{\leq}& 2 \mathbb{E} ||\widetilde{v}^{\psi(u)}_u - \widehat{v}^{\psi(u)}_u ||^2 + 2 \mathbb{E} ||\widehat{v}^{\psi(u)}_u||^2 \nonumber \\
\end{eqnarray}
where (a) follows from $\|a+b\|^2\leq 2\|a\|^2+ 2\|b\|^2$. The upper bound to $\mathbb{E} \left \| \widetilde{v}^{\psi(u)}_u - \widehat{v}^{\psi(u)}_u \right \|^2$ can be obtained as follows.
\begin{eqnarray}\label{svrg_2}
\mathbb{E} \left \| \widetilde{v}^{\psi(u)}_u - \widehat{v}^{\psi(u)}_u \right \|^2
&=&\mathbb{E} \left \| \left(\nabla_{\mathcal{G}_\ell} f_i (\widetilde{w}_{u}^s) - \nabla_{\mathcal{G}_\ell} f_i (\widehat{w}_{u}^s)\right) \right \|^2 \nonumber
\\ \nonumber &\stackrel{a}{\leq}& {L^2} \mathbb{E} \left \| \widetilde{w}_{{u}}^s - \widehat{w}_{u}^s \right \|^2
\\ \nonumber &\stackrel{b}{=}& {L^2 \gamma^2} \mathbb{E} \left \| \sum_{u' \in D(u)} \textbf{U}_{\psi(u')} \widetilde{v}^{\psi(u')}_{u'} \right \|^2
\\ \nonumber &\stackrel{c}{\leq} & {\tau_2 L^2 \gamma^2} \mathbb{E} \sum_{u' \in D(u)} \left \| \widetilde{v}^{\psi(u')}_{u'} \right \|^2 \nonumber \\
\end{eqnarray}
where (a) follows from Assumption~2, (b) follows from Eq.~\ref{Dt1}, (c) follows from Assumption~\ref{assum4}. Combining Eqs.~(\ref{svrg_1}) and (\ref{svrg_2}), we have that
\begin{eqnarray}\label{svrg_3}
&& \mathbb{E} \left \| \widetilde{v}^{\psi(u)}_u \right \|^2
\\ \nonumber &\leq & 2 \mathbb{E} \left \| \widetilde{v}^{\psi(u)}_u - \widehat{v}^{\psi(u)}_u \right \|^2
+ 2 \mathbb{E} \left \| \widehat{v}^{\psi(u)}_u \right \|^2
\\ \nonumber &\leq & 2 \tau_2 L^2 \gamma^2 \mathbb{E} \sum_{u' \in D(u)} \left \| \widetilde{v}^{\psi(u')}_{u'} \right \|^2
+ 2 \mathbb{E} \left \| \widehat{v}^{\psi(u)}_u \right \|^2
\end{eqnarray}
Then following the analyses of follows from l \ref{lem-csgd-1} , we have
\begin{eqnarray}\label{svrg_3}
\mathbb{E} \left \| \widetilde{v}^{\psi(u)}_u \right \|^2
\leq \frac{18G}{1-2 L_*^2 \gamma^2\tau_2^2}
\end{eqnarray}
This completes the proof
\end{proof}
\begin{lemma}\label{lem-csvrg-2}
Given the conditions in Theorem~\ref{thm-svrgconvex}, let $u\in K(t)$, we have that:
\begin{align}\label{svrg-lemeq-1}
&\mathbb{E}\|\widetilde{v}_{u}^{{\psi(u)}} \|^2 \leq
\frac{16 L^{2}}{\mu} \mathbb{E}\left(f\left(w_{t}^{s}\right)-f\left(w^{*}\right)\right)+\frac{8 L^{2}}{\mu} \mathbb{E}\left(f\left(w^{s}\right)-f\left(w^{*}\right)\right)+ 8 L^{2} \gamma^{2} \eta_{1} \sum_{u' \in\{t, \ldots, u\}} \mathbb{E}\left\|\widetilde{v}_{u'}^{\psi(u')}\right\|^{2}
\nonumber \\
& + 4{ L_{{*}}^2 \gamma^2 \tau_1} \sum_{u' \in D(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
+ 16 { L_{*}^2 \gamma^2 \tau_2 \sum_{u' \in D^\prime(u)}} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\end{align}
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma~\ref{lem-csvrg-2}:}]
Define $\mathbb{E}\|v_{u}^{{\psi(u)}}\|^{2}=\mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w_{u}^{s}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{s}\right)+\nabla_{\mathcal{G}_{\psi(u)}} f\left(w^{s}\right)\right\|^{2}$, we have that $\mathbb{E}\|\widetilde{v}_{u}^{{\psi(u)}}\|^2 = \mathbb{E}\|\widetilde{v}_{u}^{{\psi(u)}} - {v}_{u}^{{\psi(u)}} + {v}_{u}^{{\psi(u)}}\|^2 \leq 2\mathbb{E}\|\widetilde{v}_{u}^{{\psi(u)}} - {v}_{u}^{{\psi(u)}} \|^2 + 2\mathbb{E}\|{v}_{u}^{{\psi(u)}}\|^2 $. First we give the upper bound to $\mathbb{E}\|{v}_{u}^{{\psi(u)}}\|^2$ as follows.
\begin{align}\label{svrg-8}
&\mathbb{E}\|v_{u}^{{\psi(u)}}\|^{2}
\nonumber\\
&=\mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w_{u}^{s}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{s}\right)+\nabla_{\mathcal{G}_{\psi(u)}} f\left(w^{s}\right)\right\|^{2}
\nonumber\\&
=\mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w_{u}^{s}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{*}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{s}\right)+\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{*}\right)+\nabla_{\mathcal{G}_{\psi(u)}} f\left(w^{s}\right)\right\|^{2}
\nonumber\\&
\stackrel{(a)}{\leq} 2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w_{u}^{s}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{*}\right)\right\|^{2}+2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{s}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{*}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f\left(w^{s}\right)+\nabla_{\mathcal{G}_{\psi(u)}} f\left(w^{*}\right)\right\|^{2}
\nonumber\\&
\leq 2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w_{u}^{s}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{*}\right)\right\|^{2}+2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{s}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w^{*}\right)\right\|^{2}
\nonumber\\&
\stackrel{(b)}{\leq} 2 L^{2} \mathbb{E}\left\|w_{u}^{s}-w^{*}\right\|^{2}+2 L^{2} \mathbb{E}\left\|w^{s}-w^{*}\right\|^{2}
\nonumber\\&
=2 L^{2} \mathbb{E}\left\|w_{u}^{s}-w_{t}^{s}+w_{t}^{s}-w^{*}\right\|^{2}+2 L^{2} \mathbb{E}\left\|w^{s}-w^{*}\right\|^{2} \nonumber\\&
\stackrel{(c)}{\leq} 4 L^{2} \mathbb{E}\left\|w_{u}^{s}-w_{t}^{s}\right\|^{2}+4 L^{2} \mathbb{E}\left\|w_{t}^{s}-w^{*}\right\|^{2}+2 L^{2} \mathbb{E}\left\|w^{s}-w^{*}\right\|^{2} \nonumber\\&
\stackrel{(d)}{=} \quad 4 L^{2} \gamma^{2} \mathbb{E}\left\|\sum_{u' \in\{t, \ldots, u\}} \mathbf{U}_{\psi(u)} \widetilde{v}_{u'}^{\psi(u')}\right\|^{2}+4 L^{2} \mathbb{E}\left\|w_{t}^{s}-w^{*}\right\|^{2}+2 L^{2} \mathbb{E}\left\|w^{s}-w^{*}\right\|^{2} \nonumber\\&
\stackrel{(e)}{\leq} \frac{8 L^{2}}{\mu} \mathbb{E}\left(f\left(w_{t}^{s}\right)-f\left(w^{*}\right)\right)+\frac{4 L^{2}}{\mu} \mathbb{E}\left(f\left(w^{s}\right)-f\left(w^{*}\right)\right)+4 L^{2} \gamma^{2} \eta_{1} \sum_{u' \in\{t, \ldots, u\}} \mathbb{E}\left\|\widetilde{v}_{u'}^{\psi(u')}\right\|^{2},
\end{align}
where (a) and (c) follow from $\|a+b\|^2 \leq 2\|a\|^2 + 2\|b\|^2$, (b) follows from Assumption~\ref{assum1}, (d) follows from Eq.~\ref{global-up1}, and (e) follows from Assumption~\ref{assumc1}.
Next we give the upper bound of $\mathbb{E}\|\widetilde{v}_{u}^{{\psi(u)}} - {v}_{u}^{{\psi(u)}} \|^2$. Following the proof of Lemma~\ref{lem-csgd-2}, we have
\begin{align}\label{111}
\mathbb{E}\|\widetilde{v}_{u}^{{\psi(u)}} - {v}_{u}^{{\psi(u)}} \|^2 \leq 2{ L_{{*}}^2 \gamma^2 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 8 { L_{*}^2 \gamma^2 \tau_2 \sum_{t' \in D^\prime(t)}} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\end{align}
combing above two equalities, we have
\begin{align}\label{112}
&\mathbb{E}\|\widetilde{v}_{u}^{{\psi(u)}} \|^2 \nonumber \\
&\leq
\frac{16 L^{2}}{\mu} \mathbb{E}\left(f\left(w_{t}^{s}\right)-f\left(w^{*}\right)\right)+\frac{8 L^{2}}{\mu} \mathbb{E}\left(f\left(w^{s}\right)-f\left(w^{*}\right)\right)+ 8 L^{2} \gamma^{2} \eta_{1} \sum_{u' \in\{t, \ldots, u\}} \mathbb{E}\left\|\widetilde{v}_{u'}^{\psi(u')}\right\|^{2}
\nonumber \\
& + 4{ L_{{*}}^2 \gamma^2 \tau_1} \sum_{u' \in D(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
+ 16 { L_{*}^2 \gamma^2 \tau_2 \sum_{u' \in D^\prime(u)}} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\end{align}
This completes the proof.
\end{proof}
\begin{proof}[\textbf{Proof of Theorem~\ref{thm-svrgconvex}:}]
Similar to Eq.~\ref{csgd-8}, for $u \in K(t)$ at $s$-th outer loop, we have that
\begin{eqnarray}\label{csvrg-5}
&& \mathbb{E} f (w_{u+1}^{s})
\\ \nonumber &\stackrel{ (a) }{\leq}& \mathbb{E} \left ( f (w_{u}^{s}) + \langle \nabla f(w_{u}^{s}), w_{u+1}^{s}-w_{u}^{s} \rangle + \frac{L}{2} \|w_{u+1}^{s}-w_{u}^{s} \|^2 \right )
\\ \nonumber &=& \mathbb{E} \left ( f (w_{u}^{s}) - \gamma \langle \nabla f(w_{u}^{s}),
\widetilde{v}^{\psi(u)}_{u} \rangle + \frac{L\gamma^2}{2} \| \widehat{v}^{\psi(u)}_{u} \|^2 \right )
\\ \nonumber &{=}& \mathbb{E} \left ( f (w_{u}^{s}) - \gamma \langle \nabla f(w_{u}^{s}), \widetilde{v}^{\psi(u)}_{u} + {v}^{\psi(u)}_{u}- {v}^{\psi(u)}_{u} \rangle + \frac{L \gamma^2}{2} \| \widetilde{v}^{\psi(u)}_{u} \|^2 \right )
\\ \nonumber &\stackrel{(b)}{=}& \mathbb{E} f (w_{u}^{s}) - \gamma \mathbb{E} \langle \nabla f(w_{u}^{s}), \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}^{s}) \rangle + \frac{L \gamma^2}{2} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{u} \|^2 + \gamma \mathbb{E} \langle \nabla f(w_{u}^{s}), {v}^{\psi(u)}_{u} - \widetilde{v}^{\psi(u)}_{u} \rangle
\\ \nonumber &\stackrel{ (c) }{\leq}& \mathbb{E} f (w_{u}^{s}) - \gamma \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}^{s}) \|^2 + \frac{\gamma}{2} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}^{s}) \|^2 + \frac{L \gamma^2}{2} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{u} \|^2 + \frac{\gamma}{2} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{u} - {v}^{\psi(u)}_{u} \|^2
\\ &\stackrel{(d)}{\leq}& \mathbb{E} f (w_{u}^{s}) - \frac{\gamma}{2} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}^{s}) \|^2 + \frac{L_* \gamma^2}{2} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{u} \|^2
+ ( L_{{*}}^2 \gamma^2 \tau_1 + 4 L_{*}^2 \gamma^2 \tau_2) \sum_{t' \in D^\prime(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\end{eqnarray}
Summing Eq.~(\ref{csvrg-5}) over all $ u \in K(t) $, we obtain
\begin{eqnarray}\label{svrg-6}
&& \mathbb{E} \left[f (w_{t+|K(t)|}^{s} - f (w_{t}^{s}) \right]
\\ \nonumber
&\leq&
-\frac{\gamma}{2}\sum_{u \in K(t)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}^{s}) \|^2
+ \frac{L_* \gamma^2}{2}\sum_{u \in K(t)} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{ u }\|^2
+ ( L_{{*}}^2 \gamma^2 \tau_1 + 4 L_{*}^2 \gamma^2 \tau_2) \sum_{t' \in D^\prime(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\\ \nonumber
&\stackrel{(a)}{\leq}&
-\frac{\gamma}{2}\left( \frac{1}{2} \sum_{u\in K(t)} \mathbb{E}\| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t}^{s})\|^2
- L^2 \gamma^2 \eta_1 \sum_{u\in K(t)} \sum_{u' \in \{t,\cdots,u\}} \mathbb{E} \|\widetilde{v}_{u'}^{\psi(u')}\|^2 \right)
\\ \nonumber &&
+ \frac{L_* \gamma^2}{2}\sum_{u \in K(t)} \mathbb{E} \|\widetilde{v}^{\psi(u)}_{ u }\|^2
+ ( L_{{*}}^2 \gamma^2 \tau_1 + 4 L_{*}^2 \gamma^2 \tau_2) \sum_{u \in K(t)} \sum_{u' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\\ \nonumber &=&
-\frac{\gamma}{4} \sum_{u\in K(t)} \mathbb{E}\| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t}^{s})\|^2
+\frac{L^2 \gamma^3 \eta_1}{2} \sum_{u \in K(t)}\sum_{u' \in \{t,...,u\}} \mathbb{E} \|\widetilde{v}_{u'}^{\psi(u')}\|^2
\\ \nonumber &&
+ \frac{L_* \gamma^2}{2}\sum_{u \in K(t)} \mathbb{E} \|\widetilde{v}^{\psi(u)}_{ u }\|^2
+ ( L_{{*}}^2 \gamma^2 \tau_1 + 4 L_{*}^2 \gamma^2 \tau_2) \sum_{u \in K(t)} \sum_{u' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\\ \nonumber &\stackrel{(b)}{\leq}&
-\frac{\gamma\mu}{2}(f(w_{t}^{s}) - f(w^*))
+ ( L_{{*}}^2 \gamma^2 \tau_1 + 4 L_{*}^2 \gamma^2 \tau_2) \sum_{u \in K(t)} \sum_{u' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\\ \nonumber
&& + \underbrace{(\frac{L_*^2 \gamma^3 \tau}{2} + \frac{L_*\gamma^2}{2})}_{C}\sum_{u\in K(t)}
\biggl( \frac{16 L^{2}}{\mu} \mathbb{E}\left(f\left(w_{t}^{s}\right)-f\left(w^{*}\right)\right)
+\frac{8 L^{2}}{\mu} \mathbb{E}\left(f\left(w^{s}\right)-f\left(w^{*}\right)\right)
\nonumber \\
& & + 8 L^{2} \gamma^{2} \eta_{1} \sum_{u' \in\{t, \ldots, u\}} \mathbb{E}\left\|\widetilde{v}_{u'}^{\psi(u')}\right\|^{2}
+ 4{ L_{{*}}^2 \gamma^2 \tau_1} \sum_{u' \in D(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
+ 16 { L_{*}^2 \gamma^2 \tau_2 \sum_{u' \in D^\prime(u)}} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2 \biggr) \nonumber
\end{eqnarray}
where (a) follows from Lemma~\ref{lem-csgd-3}, (b) follows from Lemma~\ref{lem-csvrg-2}.
Let $e_t^s = \mathbb{E}(f(w_t^s)-f(w^*))$ and $e^s = \mathbb{E}(f(w^s)-f(w^*))$, we have
\begin{align}\label{svrg--1}
& e_{t + |K(t)|}^s
\nonumber \\ &
\leq(1-\frac{\gamma\mu}{2}+ \frac{16L^2\eta_1C}{\mu})e_t^s
+ \frac{8L^2\eta_1C}{\mu} e^s
+ 8 C L^{2} \gamma^{2} \eta_{1}\sum_{u\in K(t)} \sum_{u' \in\{t, \ldots, u\}} \mathbb{E}\left\|\widetilde{v}_{u'}^{\psi(u')}\right\|^{2}
\nonumber \\
& +(5 { L_{*}^2 \gamma^3 \tau^{1/2}} + 4C{ L_{{*}}^2}\gamma^2\tau_1 + 16C { L_{*}^2} \gamma^2 \tau_2 ) \sum_{u\in K(t)} \sum_{u' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\nonumber \\
& \leq(1-\frac{\gamma\mu}{2}+ \frac{16L^2\eta_1C}{\mu})e_t^s
+ \frac{8L^2\eta_1C}{\mu} e^s
+(28 C L^{2} \gamma^{2}\tau^{3/2}+ 5 { L_{*}^2 \gamma^3 \tau^{3/2}} ) \frac{18G}{1 - 2 L_{*}^2\gamma^2\tau}
\end{align}
We carefully choose $\gamma$ such that $\frac{\gamma\mu}{2}- \frac{16L^2\eta_1C}{\mu}\stackrel{def}{=}\rho >0$. Assume that $\cup_{\kappa \in P(t)}=\{0,1, \ldots, t\}$, applying above, we have
\begin{align}\label{svrg-111}
& e_t^s \nonumber \\
& \leq (1-\rho)^{v(t)}e^s + \left( \frac{8L^2\eta_1C}{\mu} e^s
+(28 C L^{2} \gamma^{2}\tau^{3/2}+ 5 { L_{*}^2 \gamma^3 \tau^{3/2}} ) \frac{18G}{1 - 2 L_{*}^2\gamma^2\tau}\right)\sum_{i=0}^{v(t)}(1-\rho)^i
\nonumber \\
& \leq (1-\rho)^{v(t)}e^s + \left( \frac{8L^2\eta_1C}{\mu} e^s
+(28 C L^{2} \gamma^{2}\tau^{3/2}+ 5 { L_{*}^2 \gamma^3 \tau^{3/2}} ) \frac{18G}{1 - 2 L_{*}^2\gamma^2\tau}\right)\frac{1}{\rho}
\nonumber \\
&\leq \left((1-\rho)^{v(t)} + \frac{8L^2\eta_1C}{\rho\mu}\right)e^s
+ (28 C L^{2} \gamma^{2}\tau^{3/2}+ 5 { L_{*}^2 \gamma^3 \tau^{3/2}} ) \frac{18G}{\rho(1 - 2 L_{*}^2\gamma^2\tau)}
\end{align}
Thus, to achieve the accuracy $\epsilon$ of, for VF{${\textbf{B}}^2$}-SVRG, i.e., $\mathbb{E} f\left(w_{S}\right)-f\left(w^{*}\right)\leq \epsilon$, we can carefully choose $\gamma$ such that
\begin{align}\label{saga-111}
\frac{8L^2\eta_1C}{\rho\mu}& \leq 0.05
\nonumber \\
(28 C L^{2} \gamma^{2}\tau^{3/2}+ 5 { L_{*}^2 \gamma^3 \tau^{3/2}} ) \frac{18G}{\rho(1 - 2 L_{*}^2\gamma^2\tau)} & \leq \frac{\epsilon}{8}
\end{align}
And then let $(1-\rho)^{v(t)}\leq 0.25$, i.e., $v(t)\geq \frac{{\text {log}} 0.25}{{\text {log}}(1-\rho)}$, we have that
\[e^{s+1} \leq 0.75 e^s + \frac{\epsilon}{8}\]
Recursively apply above equality, we have that
\[e^S \leq (0.75)^Se^0 + \frac{\epsilon}{2}\]
Finally, the outer loop number $S$ should satisfy the condition of $S\geq \frac{{\text {log}}\frac{2e^0}{\epsilon}}{{\text {log}}\frac{4}{3}}$ and epoch number $v(t)$ in an outer loop should satisfy $v(t)\geq \frac{{\text {log}} 0.25}{{\text {log}}(1-\rho)}$. This completes the proof.
\end{proof}
\subsection{Proof of Theorem~\ref{thm-sagaconvex}}
First we introduce following notations. $\phi(t)$ denotes the corresponding local time counter on the party $\psi(t)$. Given a local time counter $u$ and $\ell$-th party, $\xi(u,\ell)$ denotes the corresponding global time counter not only satisfying $\phi \left(\xi(u,\ell)\right)=u$ but also $\psi\left(\xi(u,\ell)\right) = \ell$.
\begin{lemma} \label{lem-csaga-1} For VF{${\textbf{B}}^2$}-SAGA, we have that
\begin{align}\label{csaga-1}
&\mathbb{E} \| \hat{\alpha}_{i_t}^{t,\psi(t)} - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}(w^{*}) \|^2
\nonumber \\
& \leq \frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{i=1}^n \left ( \frac{1}{n} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i(\hat{w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w^{*}) \right \|^2 \right )
\nonumber \\
& + \frac{1}{n} \sum_{i=1}^n \left ( 1 -\frac{1}{n} \right )^{\phi(t)-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i({w}_{{0}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w^{*}) \right \|^2
\\
& \mathbb{E} \left \| {\alpha}_{i_t}^{t,\psi(t)} - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}(w^{*}) \right \|^2
\nonumber \\
& \leq \frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{i=1}^n \left ( \frac{1}{n} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i({w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w^{*}) \right \|^2 \right )
\nonumber \\
& + \frac{1}{n} \sum_{i=1}^n \left ( 1 -\frac{1}{n} \right )^{\phi(t)-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i(\hat{w}_{{0}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w^{*}) \right \|^2
\\
&\mathbb{E}\left \| {\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2
\leq \frac{\tau_1 L^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{{u} \in D(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\\
&\mathbb{E}\left \| \widetilde{\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2
\leq \frac{4\tau_2 L^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{{u} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\end{align}
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma~\ref{lem-csaga-1}:}] Firstly, we have that
\begin{eqnarray}\label{csaga-2}
&& \mathbb{E} \left \| \hat{\alpha}_{i_t}^{t,\psi(t)} - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}({w}_{t}) \right \|^2
= \frac{1}{n} \sum_{i=1}^n \mathbb{E} \left \| \hat{\alpha}_{i}^{t,\psi(t)} - \nabla_{\mathcal{G}_{\psi(t)}} f_{i}({w}_{t}) \right \|^2
\\ & = & \nonumber \frac{1}{n} \sum_{i=1}^n \mathbb{E} \sum_{t'=0}^{\phi(t) -1} \mathbf{1}_{ \{ \textbf{u}_{i}^u =t' \}} \left \| \nabla_{\mathcal{G}_\ell} f_i(\hat{w}_{\xi(t',\psi(t))}) - \nabla_{\mathcal{G}_\ell} f_{i}(w^{*}) \right \|^2
\\ & = & \nonumber \frac{1}{n} \sum_{t'=0}^{\phi(t)-1} \sum_{i=1}^n \mathbb{E} \mathbf{1}_{ \{ \textbf{u}_{i}^u =t' \}} \left \| \nabla_{\mathcal{G}_\ell} f_i(\hat{w}_{\xi(t',\psi(t))}) - \nabla_{\mathcal{G}_\ell} f_{i}(w^{*}) \right \|^2
\end{eqnarray}
where $\textbf{u}_{i}^u$ denote the last iterate to update the $\widehat{\alpha}_i^{t,\psi(t)}$. Note that, we do not distinguish $\psi(t)$ and $\psi(t')$ because they correspond to the same party. We consider two cases including $t'>0$ and $t'=0$ as follows.
For $t'>0$, we have that
\begin{eqnarray}\label{csaga-3}
&& \mathbb{E} \left ( \mathbf{1}_{ \{ \textbf{u}_{i}^u =t' \}} \left \| \nabla_{\mathcal{G}_\ell} f_{i}(\hat{w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_\ell} f_{i}(w_{t,\psi(t)}) \right \|^2 \right )
\\ & \stackrel{ (a) }{\leq} & \nonumber
\mathbb{E} \left ( \mathbf{1}_{ \{ i_{t'} = i \}} \mathbf{1}_{ \{ i_v \neq i, \forall v \ s.t. \ t'+ 1 \leq v \leq \phi(t) -1 \}} \left \| \nabla_{\mathcal{G}_\ell} f_{i}(\hat{w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_\ell} f_{i}(w^{*}) \right \|^2 \right )
\\ & \stackrel{ (b) }{\leq} & \nonumber
P{ \{ i_{t'} = i \}} P { \{ i_v \neq i, \forall v
\ s.t. \ t'+ 1 \leq v \leq \phi(t) -1 \}} \mathbb{E} \left \| \nabla_{\mathcal{G}_\ell} f_{i}(\hat{w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_\ell} f_{i}(w^{*}) \right \|^2
\\ & \stackrel{ (c) }{\leq} & \nonumber \frac{1}{n} \left ( 1 -\frac{1}{n} \right )^{\phi(t) -1-t' } \mathbb{E} \left \| \nabla_{\mathcal{G}_\ell} f_{i}(\hat{w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_\ell} f_{i}(w^{*}) \right \|^2
\end{eqnarray}
where the inequality (a) uses the fact $i_{t'}$ and $i_v$ are independent for $v \neq t'$, the inequality (b) uses the fact that $P{ \{ i_t = i \}} = \frac{1}{n}$ and $P { \{ i_v \neq i\} } =1-\frac{1}{n}$.
For $t'=0$, we have that
\begin{eqnarray}\label{csaga-4}
&& \mathbb{E} \left ( \mathbf{1}_{ \{ \textbf{u}_{i}^u =0 \}}\left \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i}(\hat{w}_{0}) - \nabla_{\mathcal{G}_{\psi(t)}} f_{i}(w_{t,\psi(t)}) \right \|^2 \right )
\\ & \leq & \nonumber
\mathbb{E} \left ( \mathbf{1}_{ \{ i_v \neq i, \forall v \
s.t. \ 0 \leq v \leq \phi(t)-1 \}} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i}(\hat{w}_{{0}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_{i}(w^{*}) \right \|^2 \right )
\\ & \leq & \nonumber
P { \{ i_v \neq i, \forall v \
s.t. \ 0 \leq v \leq \phi(t-\tau_3)-1 \}} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i}(\hat{w}_{0}) - \nabla_{\mathcal{G}_{\psi(t)}} f_{i}(w^{*}) \right \|^2
\\ & \leq & \nonumber \left ( 1 -\frac{1}{n} \right )^{\phi(t)} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i}(\hat{w}_{{0}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_{i}(w^{*}) \right \|^2
\end{eqnarray}
Substituting Eqs.~\ref{csaga-4} and \ref{csaga-3} into \ref{csaga-2}, we have:
\begin{eqnarray}\label{csaga-5}
&& \mathbb{E} \left \| \hat{\alpha}_{i_t}^{t,\psi(t)} - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}(w^{*}) \right \|^2\nonumber
\\ & = & \nonumber
\frac{1}{n} \sum_{t'=0}^{\phi(t)-1}\sum_{i=1}^n \mathbb{E} \mathbf{1}_{ \{ \textbf{u}_{i}^u =t' \}} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i(\hat{w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w^{*}) \right \|^2
\\ & \stackrel{ (a) }{\leq} & \nonumber \frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{i=1}^n \left ( \frac{1}{n} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i(\hat{w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w^{*}) \right \|^2 \right )
\\ & & \nonumber + \frac{1}{n} \sum_{i=1}^n \left ( 1 -\frac{1}{n} \right )^{\phi(t)-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i(\hat{w}_{{0}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w^{*}) \right \|^2
\end{eqnarray}
Combing the fact that $w$ represents the consistent read and thus $\tau_3=0$ with (\ref{csaga-5}), we have
\begin{eqnarray}\label{csaga-6}
&& \mathbb{E} \left \| {\alpha}_{i_t}^{t,\psi(t)} - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}(w^{*}) \right \|^2
\\ & \leq & \nonumber \frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{i=1}^n \left ( \frac{1}{n} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i({w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w^{*}) \right \|^2 \right )
\\ & & \nonumber + \frac{1}{n} \sum_{i=1}^n \left ( 1 -\frac{1}{n} \right )^{\phi(t)-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i({w}_{{0}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w^{*}) \right \|^2
\\ & \leq & \nonumber \frac{L^2}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \sigma ({w}_{{\xi(t',\psi(t))}}) + L^{2}\left ( 1 -\frac{1}{n} \right )^{\phi(t)} \sigma ({w}_{0}),
\end{eqnarray}
where $\sigma\left(w_{u}\right)=\mathbb{E}\left\|w_{u}-w^{*}\right\|^{2}$.
Similarly, we have that
\begin{eqnarray}\label{cSAGA-7}
&& \mathbb{E}\left \| {\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2
= \frac{1}{n} \sum_{i=1}^n \mathbb{E} \left \| {\alpha}_{i}^{t,\psi(t)} - \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2
\\ \nonumber & = & \frac{1}{n} \sum_{t'=0}^{\phi(t)-1} \sum_{i=1}^n \mathbb{E} \mathbf{1}_{ \{ \textbf{u}_{i}^u =t'\} } \left \| {\alpha}_{i}^{t',\psi(t')} - \widehat{\alpha}_{i}^{t',\psi(t')} \right \|^2
\\ & \stackrel{ (a) }{\leq} & \nonumber \frac{1}{n}\sum_{i=1}^n \sum_{t'=1}^{\phi(t)-1} \left ( \frac{1}{n} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| {\alpha}_{i}^{t',\psi(t')} - \widehat{\alpha}_{i}^{t',\psi(t')} \right \|^2 \right )
\\ & & \nonumber + \frac{1}{n}\sum_{i=1}^n \left ( 1 -\frac{1}{n} \right )^{\phi(t)-1} \mathbb{E} \left \| {\alpha}_{i}^{0,\ell} - \widehat{\alpha}_{i}^{0,\ell} \right \|^2
\\ & \stackrel{ (b) }{=} & \nonumber \frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{i=1}^n \left ( \frac{1}{n} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| {\alpha}_{i}^{t',\psi(t')} - \widehat{\alpha}_{i}^{t',\psi(t)} \right \|^2 \right )
\\ & = & \nonumber
\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_\ell} f_i(\widehat{w}_{\xi(t',\psi(t))}) - \nabla_{\mathcal{G}_\ell} f_i(w_{\xi(t',\psi(t))}) \right \|^2
\\ & \stackrel{ (c) }{\leq} & \nonumber
\frac{L^2}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widehat{w}_{\xi(t',\psi(t))} - w_{\xi(t',\psi(t))} \right \|^2
\\ & = & \nonumber \frac{L^2 \gamma^2}{n}
\sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \sum_{{u} \in D(\xi(t',\psi(t)))} \textbf{U}_{\psi({u})} \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\\ & \stackrel{ (d) }{\leq} & \nonumber
\frac{\tau_1 L^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{{u} \in D(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\end{eqnarray}
where the inequality (a) can be obtained similar to (\ref{csaga-5}) (note that $\widehat{\alpha}_i^{t',\psi(t')}$ is an inconsistent read of ${\alpha}_i^{t',\psi(t')}$ its time interval can be overlapped by that of ${\alpha}_i^{t',\psi(t')}$ ), the equality (b) uses the fact of ${\alpha}_{i}^{0,\ell} = \widehat{\alpha}_{i}^{0,\ell}$, the inequality (c) uses Assumption {3}, and the inequality (d) uses Assumption~\ref{assum4}. Moreover, we have
\begin{eqnarray}\label{saga-7}
&& \mathbb{E}\left \| \widetilde{\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2
= \frac{1}{n} \sum_{i=1}^n \mathbb{E} \left \| \widetilde{\alpha}_{i}^{t,\psi(t)}
- \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2
\\ \nonumber & = & \frac{1}{n} \sum_{t'=0}^{\phi(t)-1} \sum_{i=1}^n \mathbb{E} \mathbf{1}_{ \{ \textbf{u}_{i}^u =t'\} } \left \| \widetilde{\alpha}_{i}^{t',\psi(t')}
- \widehat{\alpha}_{i}^{t',\psi(t)} \right \|^2
\\ & \leq& \nonumber
\frac{1}{n}\sum_{i=1}^n \sum_{t'=1}^{\phi(t)-1} \left ( \frac{1}{n} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{\alpha}_{i}^{t',\psi(t)}
- \widehat{\alpha}_{i}^{t',\psi(t)} \right \|^2 \right )
+ \frac{1}{n}\sum_{i=1}^n \left ( 1 -\frac{1}{n} \right )^{\phi(t)-1} \mathbb{E} \left \| \widetilde{\alpha}_{i}^{0,\ell}
- \widehat{\alpha}_{i}^{0,\ell} \right \|^2
\\ & = & \nonumber
\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{i=1}^n \left ( \frac{1}{n} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{\alpha}_{i}^{t',\psi(t)}
- \widehat{\alpha}_{i}^{t',\psi(t)} \right \|^2 \right )
\\ & \stackrel{(a)}{\leq} & \nonumber
\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \biggl(2 \mathbb{E} \| \nabla_{\mathcal{G}_\ell} f_i(\widehat{w}_{\xi(t',\psi(t))})
- \nabla_{\mathcal{G}_\ell} f_i(\bar{w}_{\xi(t',\psi(t))}) \|^2
\\
& & + \nonumber 2\mathbb{E} \|\nabla_{\mathcal{G}_\ell} g(\widehat{w}_{\xi(t',\psi(t))}) - \nabla_{\mathcal{G}_\ell} g(\bar{w}_{\xi(t',\psi(t))})\|^2 \biggr)
\\ & \stackrel{(b)}{\leq} & \nonumber
\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \biggl(2 L^2\mathbb{E} \| \widehat{w}_{\xi(t',\psi(t))}
- \bar{w}_{\xi(t',\psi(t))} \|^2
+ 2 L_g^2\mathbb{E} \| \widehat{w}_{\xi(t',\psi(t))} - \bar{w}_{\xi(t',\psi(t))}\|^2 \biggr)
\\ & \stackrel{ (c) }{\leq} & \nonumber
\frac{4L_*^2}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widehat{w}_{\xi(t',\psi(t))} - \bar{w}_{\xi(t',\psi(t))} \right \|^2
\\ & = & \nonumber
\frac{4L_*^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \sum_{{u} \in D'(\xi(t',\psi(t)))} \textbf{U}_{\psi({u})} \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\\ & \stackrel{ (d) }{\leq} & \nonumber
\frac{4\tau_2 L_*^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{{u} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\end{eqnarray}
(a)-(d) can be obtained from the analyses of Lemma \ref{lem-csgd-2}. This completes the proof.
\end{proof}
\begin{lemma}\label{lem-csaga-3}
Given a global iteration number $u$, we let $\left\{\bar{u}_{0}, \bar{u}_{1}, \ldots, \bar{u}_{v(u)-1\}}\right.$ be the all start iteration number for the global time counters from 0 to u. Thus, for VF{${\textbf{B}}^2$}-SAGA, we have that
\begin{align}\label{saga-lem2}
\mathbb{E}\|v_u^{\psi(u)}\|^2 \leq 4 \frac{L^{2} \eta_{1}}{l} \sum_{k^{\prime}=1}^{v(u)}\left(1-\frac{1}{l}\right)^{v(u)-k^{\prime}} \sigma\left(w_{\bar{u}_{k^{\prime}}}\right)
+ 2 L^{2}\left(1-\frac{1}{l}\right)^{v(u)} \sigma\left(w_{0}\right)+4 L^{2} \sigma\left(w_{\varphi(u)}\right)+8 L^{2} \gamma^{2} \eta_{1}^{2} q G
\end{align}
\end{lemma}
\begin{proof}[\bf{Proof of Lemma~\ref{lem-csaga-3}}]
W have that
\begin{align}\label{saga-111}
& \mathbb{E}\|v_u^{\psi(u)}\|^2
\nonumber \\
& = \mathbb{E}\|\nabla_{\mathcal{G}_{\psi(t)}} f_i (w^{*}) - \alpha_i^{{\psi(t)}} + \frac{1}{n} \sum_{i=1}^n \alpha_i^{{\psi(t)}}\|^2
\nonumber \\
& = \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i_{u}}\left(w_{u}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i_{u}}\left(w^{*}\right)-\alpha_{i_{u}}^{u, \ell}+\nabla_{\mathcal{G}_{\psi(u)}} f_{i_{u}}\left(w^{*}\right)+\frac{1}{n} \sum_{i=1}^{n} \alpha_{i}^{u, \ell}-\nabla_{\mathcal{G}_{\psi(u)}} f\left(w^{*}\right)+\nabla_{\mathcal{G}_{\psi(u)}} f\left(w^{*}\right)\right\|^{2}
\nonumber \\
& \stackrel{(a)}{\leq}
2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i_{u}}\left(w^{*}\right)-\alpha_{i_{u}}^{u, \ell}+\frac{1}{n} \sum_{i=1}^{n} \alpha_{i}^{u, \ell}-\nabla_{\mathcal{G}_{\psi(u)}} f\left(w^{*}\right)\right\|^{2}+2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w_{u}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i_{t}}\left(w^{*}\right)\right\|^{2}
\nonumber \\
&\stackrel{(b)}{\leq}
2 \mathbb{E}\left\|\alpha_{i_{u}}^{u, \ell}-\nabla_{\mathcal{G}_{\psi(u)}} f_{i_{t}}\left(w^{*}\right)\right\|^{2}+2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u)}} f_{i}\left(w_{u}\right)-\nabla_{\mathcal{G}_{\psi(u)}} f_{i_{t}}\left(w^{*}\right)\right\|^{2}
\nonumber \\
& \stackrel{(c)}{\leq}
2 \frac{L^{2}}{n} \sum_{u^{\prime}=1}^{\phi(u)-1}\left(1-\frac{1}{n}\right)^{\phi(u)-u^{\prime}-1} \mathbb{E}\left\|w_{\xi\left(u^{\prime}, \ell\right)}-w^{*}\right\|^{2}+2 L^{2}\left(1-\frac{1}{n}\right)^{\phi(u)} \sigma\left(w_{0}\right)
\nonumber \\
& + 2L^2\mathbb{E}\|w_u-w^*\|^2
\nonumber \\
& =
2 \frac{L^{2}}{n} \sum_{u^{\prime}=1}^{\phi(u)-1}\left(1-\frac{1}{n}\right)^{\phi(u)-u^{\prime}-1} \mathbb{E}\left\|w_{\xi\left(u^{\prime}, \ell\right)}-w_{\varphi\left(\xi\left(u^{\prime}, \ell\right)\right)}+w_{\varphi\left(\xi\left(u^{\prime}, \ell\right)\right)}-w^{*}\right\|^{2}
\nonumber \\ &
+2 L^{2}\left(1-\frac{1}{n}\right)^{\phi(u)} \sigma\left(w_{0}\right)+2 L^{2} \mathbb{E}\left\|w_{u}-w_{\varphi(u)}+w_{\varphi(u)}-w^{*}\right\|^{2}
\nonumber \\ &
\stackrel{(d)}{\leq}
2 \frac{L^{2}}{n} \sum_{u^{\prime}=1}^{\phi(u)-1}\left(1-\frac{1}{n}\right)^{\phi(u)-u^{\prime}-1} \mathbb{E}\left(2\left\|w_{\xi\left(u^{\prime}, \ell\right)}-w_{\varphi\left(\xi\left(u^{\prime}, \ell\right)\right)}\right\|^{2}+2\left\|w_{\varphi\left(\xi\left(u^{\prime}, \ell\right)\right)}-w^{*}\right\|^{2}\right)
\nonumber \\ &
+ 2 L^{2}\left(1-\frac{1}{n}\right)^{v(u)} \sigma\left(w_{0}\right)+ 4 L^{2} \mathbb{E}\left\|w_{\varphi(u)}-w^{*}\right\|^{2}+4 L^{2} \gamma^{2} \mathbb{E}\left\|\sum_{v \in\{\varphi(u), \ldots, u\}} \mathbf{U}_{\psi(v)} \widehat{v}_{v}^{\psi(v)}\right\|
\nonumber \\ & \stackrel{(e)}{\leq}
2 \frac{L^{2}}{n} \sum_{u^{\prime}=1}^{\phi(u)-1}\left(1-\frac{1}{n}\right)^{\phi(u)-u^{\prime}-1} \mathbb{E}\left(2 \eta_{1} \gamma^{2} \sum_{v \in\left\{\varphi\left(\xi\left(u^{\prime}, \ell\right)\right), \ldots, \xi\left(u^{\prime}, \ell\right)\right\}}\left\|\widehat{v}_{v}^{\psi(v)}\right\|^{2}+2\left\|w_{\varphi\left(\xi\left(u^{\prime}, \ell\right)\right)}-w^{*}\right\|^{2}\right)
\nonumber \\ & +
2 L^{2}\left(1-\frac{1}{n}\right)^{v(u)} \sigma\left(w_{0}\right)+4 L^{2} \mathbb{E}\left\|w_{\varphi(u)}-w^{*}\right\|^{2}+4 L^{2} \gamma^{2} \eta_{1} \sum_{v \in\{\varphi(u), \ldots, u\}} \mathbb{E}\left\|\widehat{v}_{v}^{\psi(v)}\right\|^{2}
\nonumber \\ & \stackrel{(f)}{\leq}
2 \frac{L^{2}}{n} \sum_{u^{\prime}=1}^{\phi(u)-1}\left(1-\frac{1}{n}\right)^{\phi(u)-u^{\prime}-1} \mathbb{E}\left(2 \eta_{1}^{2} \gamma^{2} \lambda_{\gamma}G
+2\left\|w_{\varphi\left(\xi\left(u^{\prime}, \ell\right)\right)}-w^{*}\right\|^{2}\right)
\nonumber \\ &
+2 L^{2}\left(1-\frac{1}{n}\right)^{v(u)} \sigma\left(w_{0}\right)+4 L^{2} \mathbb{E}\left\|w_{\varphi(u)}-w^{*}\right\|^{2}+4 L^{2} \gamma^{2} \eta_{1}^{2} \lambda_{\gamma}G
\nonumber \\ & \stackrel{(g)}{\leq}
4 \frac{L^{2}}{n} \sum_{u^{\prime}=1}^{\phi(u)-1}\left(1-\frac{1}{n}\right)^{\phi(u)-u^{\prime}-1} \mathbb{E}\left\|w_{\varphi\left(\xi\left(u^{\prime}, \ell\right)\right)}-w^{*}\right\|^{2}
\nonumber \\ &
+2 L^{2}\left(1-\frac{1}{n}\right)^{v(u)} \sigma\left(w_{0}\right)+4 L^{2} \mathbb{E}\left\|w_{\varphi(u)}-w^{*}\right\|^{2}+8 L^{2} \gamma^{2} \eta_{1}^{2} \lambda_{\gamma}G
\nonumber \\ & \stackrel{(h)}{\leq}
4 \frac{L^{2} \eta_{1}}{n} \sum_{k^{\prime}=1}^{v(u)}\left(1-\frac{1}{n}\right)^{v(u)-k^{\prime}} \sigma\left(w_{\bar{u}_{k^{\prime}}}\right)
+ 2 L^{2}\left(1-\frac{1}{n}\right)^{v(u)} \sigma\left(w_{0}\right)+4 L^{2} \sigma\left(w_{\varphi(u)}\right)+8 L^{2} \gamma^{2} \eta_{1}^{2} \lambda_{\gamma}G
\end{align}
where (a) and (d) uses $\|\sum_{i=1}^{n}a_i^2\|^2\leq n \sum_{i=1}^{n}\|a_i\|^2$, (b) follows from $\mathbb{E}\|x-\mathbb{E} x\|^{2} \leq \mathbb{E}\|x\|^{2}$, (c) uses Lemma~\ref{lem-csaga-1}, (e) follows from the bound of $|K(t)|$, and (f) follows from Assumption~\ref{assum1}, (g) uses the fact $\sum_{u^{\prime}=1}^{\phi(u)-1}\left(1-\frac{1}{n}\right)^{\phi(u)-u^{\prime}-1}\leq n$.
\end{proof}
\begin{lemma}\label{lem-csaga-2}
For all $\forall $ $\psi (t)$, there are
\begin{equation}\label{csaga-lem2}
\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 \leq \lambda_{\gamma} G
\end{equation}
where $\lambda_{\gamma}= \frac{18}{1 - 72L_{*}^2\gamma^2\tau }$
\end{lemma}
\begin{proof}[\bf{Proof of Lemma~\ref{lem-csaga-2}}]
we give the upper bound to $\mathbb{E} \| \widetilde{v}^{ {\psi(u)} }_t - \widehat{v}^{{\psi(u)}}_t \|^2$ as follows.
We have that
\begin{eqnarray}\label{csaga-8}
&& \mathbb{E} \left \| \widetilde{v}^{ {\psi(u)} }_t - \widehat{v}^{{\psi(u)}}_t \right \|^2
\\ & = & \nonumber
\mathbb{E} \left \| \left(\nabla_{\mathcal{G}_{\psi(t)}}\mathcal{L}(\bar{w})
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) \right)
- \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t)
- \widehat{\alpha}_{i}^{\psi(u)} + \widetilde{\alpha}_{i}^{\psi(u)}
+ \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{\psi(u)} - \frac{1}{n} \sum_{i=1}^n \widetilde{\alpha}_{i}^{\psi(u)} \right \|^2
\\ & \stackrel{ (a) }{\leq} & \nonumber
3 \mathbb{E} Q_1
+ 3\mathbb{E} \underbrace{\left \| \widetilde{\alpha}_{i}^{\psi(u)}
+ \widehat{\alpha}_{i}^{\psi(u)} \right \|^2 }_{Q_2}
+ 3\mathbb{E} \underbrace{\left \| \frac{1}{n} \sum_{i=1}^n \widetilde{\alpha}_{i}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2}_{Q_3}
\end{eqnarray}
where $Q_1 = \left \| \left(\nabla_{\mathcal{G}_{\psi(t)}}\mathcal{L}(\bar{w})
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) \right)
- \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t) \right\|$ and inequality (a) uses $\| \sum_{i=1}^n a_i \|^2 \leq n \sum_{i=1}^n \| a_i \|^2 $.
We will give the upper bounds for the expectations of $Q_1$, $Q_2$ and $Q_3$ respectively.
\begin{eqnarray}\label{csaga-9}
\nonumber \mathbb{E} Q_1 &=& \mathbb{E} \left \| \left(\nabla_{\mathcal{G}_{\psi(t)}}\mathcal{L}(\bar{w})
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) \right)
- \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t) \right\|
\\&\stackrel{(a)}{\leq}&
\mathbb{E} ||\nabla_{\mathcal{G}_{\psi(t)}} f(\bar{w}_t) - \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t) + \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) - \nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_t)_{\mathcal{G}_{\psi(t)}})||^2
\nonumber \\
&\stackrel{(b)}{\leq}& 4{ L_{{*}}^2 \gamma^2 \tau_2} \sum_{t' \in D'(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\end{eqnarray}
above inequality can be obtained by following the proof of Lemma~\ref{lem-csgd-2}.
\begin{eqnarray}\label{csaga-10}
\mathbb{E} Q_2 &=& \mathbb{E}\left \| \widetilde {\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2
\\ \nonumber &\leq & \frac{4\tau_2 L_*^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{{u} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\end{eqnarray}
where the inequality uses Lemma \ref{lem-csaga-1}.
\begin{eqnarray}\label{cSAGA-12}
&& \mathbb{E} Q_3 = \mathbb{E} \left \| \frac{1}{n} \sum_{i=1}^n \widetilde{\alpha}_{i}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2
\\ & \leq & \nonumber
\frac{1}{n} \sum_{i=1}^n \mathbb{E} \left \| \widehat{\alpha}_{i}^{t,\psi(t)} - \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2
\\ & \leq &
\frac{4\tau_2 L_*^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{{u} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2 \nonumber
\end{eqnarray}
where the first inequality uses $\| \sum_{i=1}^n a_i \|^2 \leq n \sum_{i=1}^n \| a_i \|^2 $, the second inequality uses Lemma \ref{lem-csaga-1}. Combining \ref{csaga-9}, \ref{csaga-10}, and \ref{cSAGA-12}, one can obtain:
\begin{eqnarray}\label{cSAGA-13}
&& \mathbb{E} \left \| \widetilde{v}_{t}^{\psi(t)} - \widehat{v}_t^{\psi(t)} \right \|^2
\\ & \leq & \nonumber 3 \mathbb{E} {Q_1} + 3 \mathbb{E} {Q_2} + 3\mathbb{E} {Q_3}
\\ & \leq & \nonumber {12 L_*^2\gamma^2\tau_2 } \sum_{u \in D'(\xi(t',\psi(t)))} \mathbb{E} ||\widetilde{v}^{\psi(u)}_{u}||^2
+ \frac{24\tau_2 L_*^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{u \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\end{eqnarray}
Combining $\mathbb{E}\| \widetilde{v}_{t}^{\psi(t)}\|^2 \leq 2\mathbb{E} \left \| \widetilde{v}_{t}^{\psi(t)} - \widehat{v}_t^{\psi(t)} \right \|^2 + 2\mathbb{E} \left \|\widehat{v}_t^{\psi(t)} \right \|^2$ with Eq.~\ref{cSAGA-13} and following the analyses of Lemma~\ref{lem-csvrg-1}, we have
\begin{equation}\label{csaga-13}
\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 \leq \frac{18G}{1 - 72L_{*}^2\gamma^2\tau },
\end{equation}
This completes the proof.
\end{proof}
Moreover,
define ${v}^{{\psi(t)}}_t= \nabla_{\mathcal{G}_{\psi(t)}} f_i (w^{*}) - \alpha_i^{{\psi(t)}} + \frac{1}{n} \sum_{i=1}^n \alpha_i^{{\psi(t)}}$.
And then, we give the upper bound to $\mathbb{E} \left \| \widehat{v}^{ {\psi(t)} }_t - {v}^{{\psi(t)}}_t \right \|^2$ as follows.
We have that
\begin{eqnarray}\label{csaga-14}
&& \mathbb{E} \left \| \widehat{v}_{t}^{\psi(t)} - v_{t}^{\psi(t)} \right \|^2
\\ & = & \nonumber \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\widehat{w}_{t})- \widehat{\alpha}_{i_t}^{t,\psi(t)} + \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (w^{*}) + \alpha_{i_t}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \alpha_{i}^{t,\psi(t)} \right \|^2
\\ & \stackrel{ (a) }{\leq} & \nonumber 3 \mathbb{E} \underbrace{\left \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\widehat{w}_{t})- \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} ({w}_{t}) \right \|^2 }_{Q_4}
+ 3\mathbb{E} \underbrace{\left \| {\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2 }_{Q_5}
+ 3\mathbb{E} \underbrace{\left \| \frac{1}{n} \sum_{i=1}^n \alpha_{i}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2}_{Q_6}
\end{eqnarray}
where the inequality (a) uses $\| \sum_{i=1}^n a_i \|^2 \leq n \sum_{i=1}^n \| a_i \|^2 $.
We will give the upper bounds for the expectations of $Q_4$, $Q_5$ and $Q_6$ respectively.
\begin{eqnarray}\label{csaga-15}
\nonumber \mathbb{E} Q_4 &=& \mathbb{E} \left \| \nabla_{\mathcal{G}_{{\psi(t)}}} f_{i_t} (\widehat{w}_{t})- \nabla_{\mathcal{G}_{{\psi(t)}}} f_{i_t} ({w}_{t}) \right \|^2 \\
&\stackrel{(a)}{\leq}& { L_{{\psi(t)}}^2}
\mathbb{E} ||w^{*} - \widehat{w}_{t}||^2
\nonumber \\
&=& {L_{{\psi(t)}}^2\gamma^2} \mathbb{E} ||\sum_{t' \in D(u)} \textbf{U}_{\psi(t')} \widetilde{v}^{\psi(t')}_{t'} ||^2
\nonumber \\
&\stackrel{(b)}{\leq}& { \tau_1 L_*^2\gamma^2 } \sum_{t' \in D(u)} \mathbb{E} \left[ ||\widetilde{v}^{\psi(t')}_{t'}||^2 \right] \nonumber
\end{eqnarray}
where (a) uses Assumption~2, (b) uses $\| \sum_{i=1}^n a_i \|^2 \leq n \sum_{i=1}^n \| a_i \|^2 $. Similar to the analyses of $Q_2$ and $Q_3$, we have
\begin{eqnarray}\label{csaga-16}
\mathbb{E} Q_5 = \mathbb{E}\left \| {\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2
\leq \frac{\tau_1 L^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{\widetilde{u} \in D(\xi(t',\psi(t')))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi(\widetilde{u})}_{\widetilde{u}} \right \|^2
\end{eqnarray}
where the inequality uses Lemma \ref{lem-csaga-1}.
\begin{eqnarray}\label{csaga-17}
&&\mathbb{E} Q_6 = \mathbb{E} \left \| \frac{1}{n} \sum_{i=1}^n {\alpha}_{i}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2
\nonumber \\
&\leq& \frac{\tau_1 L^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{\widetilde{u} \in D(\xi(t',\psi(t')))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi(\widetilde{u})}_{\widetilde{u}} \right \|^2 \nonumber
\end{eqnarray}
Based on above formulations, we have
\begin{align}\label{csaga-18}
\mathbb{E} \left \| \widetilde{v}_{t}^{\psi(t)} \right \|^2 & = \mathbb{E} \left \| \widetilde{v}_{t}^{\psi(t)} -{v}_{t}^{\psi(t)} + {v}_t^{\psi(t)} \right \|^2
\leq 2\mathbb{E} \| \widetilde{v}_{t}^{\psi(t)} -{v}_{t}^{\psi(t)} \|^2+ 2\mathbb{E} \|{v}_t^{\psi(t)} \|^2
\nonumber \\
& \leq 2\left( 2\mathbb{E} \| \widetilde{v}_{u}^{\psi(t)} -\widehat{v}_{u}^{\psi(t)}\|^2 + 2 \mathbb{E} \|\widehat{v}_{u}^{\psi(t)} - {v}_t^{\psi(t)} \|^2\right) + 2\mathbb{E} \|{v}_t^{\psi(t)} \|^2
\nonumber \\
& \leq \frac{360L_{*}^2\gamma^2\tau G}{1 - 72L_{*}^2\gamma^2\tau } + 2\mathbb{E} \|{v}_t^{\psi(t)} \|^2
\end{align}
\begin{proof}[\bf{Proof of Theorem \ref{thm-sagaconvex}}]
First, we upper bound $\mathbb{E}f(w_{t+1}) $ for $t =0,\cdots,S-1$:
\begin{eqnarray}\label{csaga-19}
&& \mathbb{E} \left[f (w_{t+|K(t)|} - f (w_{t}) \right]
\\ \nonumber
&\leq&
-\frac{\gamma}{2}\sum_{u \in K(t)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u)}} f ({w}_{u}) \|^2
+ \frac{L_* \gamma^2}{2}\sum_{u \in K(t)} \mathbb{E} \| \widetilde{v}^{\psi(u)}_{ u }\|^2
\nonumber \\
&+& (\frac{\tau_1 L^2 \gamma^3}{n} + \frac{4\tau_2 L^2 \gamma^3}{n})\sum_{u\in K(t)}\sum_{t'=1}^{\phi(u)-1} \sum_{{u'} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(u)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u'})}_{{u'}} \right \|^2
\\ \nonumber
&\stackrel{(a)}{\leq}&
-\frac{\gamma}{2}\left( \frac{1}{2} \sum_{u\in K(t)} \mathbb{E}\| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t})\|^2
- L^2 \gamma^2 \eta_1 \sum_{u\in K(t)} \sum_{u' \in \{t,\cdots,u\}} \mathbb{E} \|\widetilde{v}_{u'}^{\psi(u')}\|^2 \right)
\\ \nonumber &&
+ \frac{L_* \gamma^2}{2}\sum_{u \in K(t)} \mathbb{E} \|\widetilde{v}^{\psi(u)}_{ u }\|^2
+ \frac{5\tau^{1/2} L^2 \gamma^3}{n} \sum_{u \in K(t)} \sum_{t'=1}^{\phi(u)-1} \sum_{{u'} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(u)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u'})}_{{u'}} \right \|^2
\\ \nonumber &=&
-\frac{\gamma}{4} \sum_{u\in K(t)} \mathbb{E}\| \nabla_{\mathcal{G}_{\psi(u)}} f({w}_{t}^{s})\|^2
+\frac{L^2 \gamma^3 \eta_1}{2} \sum_{u \in K(t)}\sum_{u' \in \{t,...,u\}} \mathbb{E} \|\widetilde{v}_{u'}^{\psi(u')}\|^2
\\ \nonumber &&
+ \frac{L_* \gamma^2}{2}\sum_{u \in K(t)} \mathbb{E} \|\widetilde{v}^{\psi(u)}_{ u }\|^2
+ \frac{5\tau^{1/2} L^2 \gamma^3}{n}\sum_{u \in K(t)}\sum_{t'=1}^{\phi(u)-1} \sum_{{u'} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(u)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u'})}_{{u'}} \right \|^2
\\ \nonumber &\stackrel{(b)}{\leq}&
-\frac{\gamma}{4} \mathbb{E}\| \nabla f({w}_{t})\|^2
+ {(\frac{L_*^2 \gamma^3 \tau}{2} + \frac{L_*\gamma^2}{2})}\sum_{u\in K(t)}
\mathbb{E} \| \widetilde{v}^{\psi(u)}_{u} \|^2
\\ \nonumber &+&
\frac{5\tau^{1/2}L^2 \gamma^3}{n}\sum_{u \in K(t)}\sum_{t'=1}^{\phi(u)-1} \sum_{{u'} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(u)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u'})}_{{u'}} \right \|^2
\\ \nonumber &\stackrel{(c)}{\leq}&
-\frac{\gamma}{4} \mathbb{E}\| \nabla f({w}_{t})\|^2
+ 5 { L_{*}^2 \gamma^3 \tau^{1/2}} \sum_{u \in K(t)} \sum_{u' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
\nonumber \\
&& (\frac{L_*^2 \gamma^3 \tau}{2} + \frac{L_*\gamma^2}{2})\sum_{u\in K(t)}(180L_{*}^2\gamma^2\tau \frac{18G}{1 - 72L_{*}^2\gamma^2\tau } + 2\mathbb{E} \|{v}_t^{\psi(t)} \|^2)
\\ \nonumber &\stackrel{(d)}{\leq}&
-\frac{\gamma}{4} \mathbb{E}\| \nabla f({w}_{t})\|^2
+ \left(2 { L_{*}^2 \gamma^3 \tau^{1/2}\eta_1\tau_2} + (\frac{L_*^2 \gamma^3 \tau}{2} + \frac{L_*\gamma^2}{2})\eta_1180L_{*}^2\gamma^2\tau\right)\frac{18G}{1 - 72L_{*}^2\gamma^2\tau }
\nonumber \\
&& (\frac{L_*^2 \gamma^3 \tau}{2} + \frac{L_*\gamma^2}{2})\sum_{u\in K(t)}
2\biggl( 4 \frac{L^{2} \eta_{1}}{n} \sum_{k^{\prime}=1}^{v(u)}\left(1-\frac{1}{n}\right)^{v(u)-k^{\prime}} \sigma\left(w_{\bar{u}_{k^{\prime}}}\right)
\nonumber \\
&& + 2 L^{2}\left(1-\frac{1}{n}\right)^{v(u)} \sigma\left(w_{0}\right)
+4 L^{2} \sigma\left(w_{\varphi(u)}\right)
+ 8 L^{2} \gamma^{2} \eta_{1}^{2} \lambda_{\gamma}G\biggr)
\\ \nonumber &\stackrel{(e)}{\leq}&
-\frac{\gamma\mu}{4}e(w_t) - \frac{\gamma\mu}{4}\sigma(w_t)
+ ({L_*^2 \gamma^3 \tau}+ {L_*\gamma^2})
\eta_1\left( 2 L^{2}\left(1-\frac{1}{n}\right)^{v(u)} \sigma\left(w_{0}\right)
+4 L^{2} \sigma\left(w_{t}\right)\right)
\nonumber \\
&&
\left(2 { L_{*}^2 \gamma^3 \tau^{1/2}\eta_1\tau_2} + ({L_*^2 \gamma^3 \tau} + {L_*\gamma^2})\eta_1180L_{*}^2\gamma^2\tau + 8 L^{2} \gamma^{2} \eta_{1}^{2}\right)\frac{18G}{1 - 72L_{*}^2\gamma^2\tau }
\nonumber \\
&& + 4 ({L_*^2 \gamma^3 \tau}+ {L_*\gamma^2})
\frac{L^{2} \eta_{1}^2}{n} \sum_{k^{\prime}=1}^{v(u)}\left(1-\frac{1}{n}\right)^{v(u)-k^{\prime}} \sigma\left(w_{\bar{u}_{k^{\prime}}}\right)
\end{eqnarray}
where (a) follows from Eq.~\ref{csvrg-5}, (b) uses Lemma \ref{lem-csaga-3}, (c) follows from Eq. \ref{csaga-18}, (d) follows from Lemma \ref{lem-csaga-3}, (e) follows from Assumption \ref{assumc1}. Thus, we have
\begin{align}\label{csaga-20}
& e(w_{t+|K(t)|})
\nonumber \\
& \leq (1-\frac{\gamma\mu}{4})e(w_t) + \underbrace{2L^2\eta_1({L_*^2 \gamma^3 \tau}+ {L_*\gamma^2})}_{c_1} \left( \left(1-\frac{1}{n}\right)^{v(u)} \sigma\left(w_{0}\right)
+ 2\sigma\left(w_{t}\right)\right)
\nonumber \\ &
+ \underbrace{ \left(2 { L_{*}^2 \gamma^3 \tau^{1/2}\eta_1\tau_2} + ({L_*^2 \gamma^3 \tau} + {L_*\gamma^2})\eta_1180L_{*}^2\gamma^2\tau + 8 L^{2} \gamma^{2} \eta_{1}^{2}\right)\frac{18G}{1 - 72L_{*}^2\gamma^2\tau }}_{c_0}
\nonumber \\ &
+ \underbrace{ 4 ({L_*^2 \gamma^3 \tau}+ {L_*\gamma^2})
\frac{L^{2} \eta_{1}^2}{n} }_{c_2} \sum_{k^{\prime}=1}^{v(u)}\left(1-\frac{1}{n}\right)^{v(u)-k^{\prime}} \sigma\left(w_{\bar{u}_{k^{\prime}}}\right) - \frac{\gamma\mu^2}{4}\sigma(w_t)
\nonumber \\ &
= (1-\frac{\gamma\mu}{4})e(w_t)
+ \left(- \frac{\gamma\mu^2}{4}+ 2c_1 + c_2\right)\sigma(w_t)
+ c_1 (1-\frac{1}{n})^{v(t)}\sigma(w_0)
\nonumber \\
& + c_2\sum_{k^{\prime}=1}^{v(u)}\left(1-\frac{1}{n}\right)^{v(u)-k^{\prime}} \sigma\left(w_{\bar{u}_{k^{\prime}}}\right) + c_0
\end{align}
where $\left\{\bar{u}_{0}, \bar{u}_{1}, \ldots, \bar{u}_{v(u)-1}\right\}$ are the all start time counters for the global time counters from 0 to $u$.
We define the Lyapunov function as$\mathcal{L}_{t}=\sum_{k=0}^{v(t)} \rho^{v(t)-k} e\left(w_{\bar{u}_{k}}\right)$ where $\rho \in\left(1-\frac{1}{n}, 1\right)$, we have that
\begin{align}\label{csaga-21}
& L_{t+|K(t)|} \\
& =
\rho^{v(t)+1}e(w_0) + \sum_{k=0}^{v(t)}\rho^{v(t)-k}e(w_{\bar{u}_{k+1}})
\nonumber \\
&\stackrel{(a)}{\leq}
\rho^{v(t)+1}e(w_0) + \sum_{k=0}^{v(t)}\rho^{v(t)-k}\biggl[
(1-\frac{\gamma\mu}{4})e(w_{\bar{u}_{k}})
+ \left(- \frac{\gamma\mu^2}{4}+ 2c_1 + c_2\right)\sigma(w_{\bar{u}_{k}})
\nonumber \\
&+ c_1 (1-\frac{1}{n})^{v(t)}\sigma(w_0) + c_2\sum_{k^{\prime}=1}^{v(u)}\left(1-\frac{1}{n}\right)^{v(u)-k^{\prime}} \sigma\left(w_{\bar{u}_{k^{\prime}}}\right) + c_0\biggr]
\nonumber \\
&=
\rho^{v(t)+1}e(w_0) + (1-\frac{\gamma\mu}{4})L_t + \sum_{k=0}^{v(t)}\rho^{v(t)-k}\biggl[
\left(- \frac{\gamma\mu^2}{4}+ 2c_1 + c_2\right)\sigma(w_{\bar{u}_{k}})
\\
&+ c_1 (1-\frac{1}{n})^{v(t)}\sigma(w_0) + c_2\sum_{k^{\prime}=1}^{v(u)}\left(1-\frac{1}{n}\right)^{v(u)-k^{\prime}} \sigma\left(w_{\bar{u}_{k^{\prime}}}\right) \biggr] + \sum_{k=0}^{v(t)}\rho^{v(t)-k}c_0
\nonumber \\
&\stackrel{(b)}{\leq}
\rho^{v(t)+1}e(w_0) + (1-\frac{\gamma\mu}{4})L_t + \left(- \frac{\gamma\mu^2}{4}+ 2c_1 + c_2\right)\sigma(w_{\bar{u}_{k}}) + \frac{c_0}{1-\rho}
\nonumber \\
&\stackrel{(c)}{\leq}
\rho^{v(t)+1}e(w_0) + (1-\frac{\gamma\mu}{4})L_t - \left(\frac{\gamma\mu^2}{4}- 2c_1 -c_2\right)\frac{2}{L}e(w_{\bar{u}_{k}}) + \frac{c_0}{1-\rho} \nonumber
\end{align}
where (a) follows from e \ref{csaga-20}, (b) holds by approximately choosing $\gamma$ such that the terms related to $\sigma\left(w_{\pi_{k}}\right)(k=0, \cdots, v(t)-1)$ are negative, because the signs related to the lowest orders of $\sigma\left(w_{\bar{u}_{k}}\right)(k=0, \cdots, v(t)-1)$ are negative. In the following we give the detailed analysis of choosing a suitable $\gamma$ such that terms related to $\sigma\left(w_{\bar{u}_{k}}\right)(k=0, \cdots, v(t)-1)$ are negative. We first consider $k=0$. Assume that $C(\sigma(w_0))$ is the coefficient term of $\sigma(w_0)$ in follows from e \ref{csaga-21} , we have that
\begin{align}\label{csaga-22}
& C(\sigma(w_0))
\nonumber \\
& = \rho^{v(t)}\left(-\frac{\gamma \mu^{2}}{4}+2 c_{1}+c_{2}\right)+c_{1} \sum_{k=0}^{v(t)} \rho^{v(t)-k}\left(1-\frac{1}{n}\right)^{k}
\nonumber \\
& = \rho^{v(t)}\left(-\frac{\gamma \mu^{2}}{4}+2 c_{1}+c_{2}+c_{1} \sum_{k=0}^{v(t)}\left(\frac{1-\frac{1}{n}}{\rho}\right)^{k}\right)
\nonumber \\
& \leq \rho^{v(t)}\left(-\frac{\gamma \mu^{2}}{4}+2 c_{1}+c_{2}+c_{1} \frac{1}{1-\frac{1-\frac{1}{n}}{\rho}}\right)
\nonumber \\
& = \rho^{v(t)}\left(-\frac{\gamma \mu^{2}}{4}+c_{2}+c_{1} \left(2 + \frac{1}{1-\frac{1-\frac{1}{n}}{\rho}}\right)\right)
\end{align}
Based on Eq.~\ref{csaga-22}, we can carefully choose $\gamma$ such that $-\frac{\gamma \mu^{2}}{4}+c_{2}+c_{1} \left(2 + \frac{1}{1-\frac{1-\frac{1}{n}}{\rho}}\right)\leq0$.
Assume that $C(\sigma(w_{\bar{u}_k}))$ is the coefficient term of $\sigma(w_{\bar{u}_k})$ $(k=1,\cdots, v(t)-1)$ in the big square brackets of follows from e77, we have that
\begin{align}\label{csaga-23}
& C(\sigma(w_{\bar{u}_{k}}))
\nonumber \\
& = \rho^{v(t)-k}\left(-\frac{\gamma \mu^{2}}{4}+2 c_{1}+c_{2}\right)
+c_{2} \sum_{i=k+1}^{v(t)-1} \rho^{v(t)-i}\left(1-\frac{1}{n}\right)^{i-k}
\nonumber \\
& = \rho^{v(t)-k}\left(-\frac{\gamma \mu^{2}}{4}+2 c_{1}+c_{2}
+c_{2} \sum_{i=k+1}^{v(t)-1} \rho^{k-i}\left(1-\frac{1}{n}\right)^{i-k}\right)
\nonumber \\
& \leq \rho^{v(t)-k}\left(-\frac{\gamma \mu^{2}}{4}+2 c_{1}+c_{2}
+c_{2} \sum_{i=k+1}^{v(t)-1} \left(\frac{1-\frac{1}{n}}{\rho}\right)^{i-k}\right)
\nonumber \\
& = \rho^{v(t)-k}\left(-\frac{\gamma \mu^{2}}{4} + 2c_{1} + c_2\left(1 + \frac{1}{1-\frac{1-\frac{1}{n}}{\rho}}\right)\right)
\end{align}
Based on Eq.~\ref{csaga-23}, we can carefully choose $\gamma$ such that $-\frac{\gamma \mu^{2}}{4} + 2c_{1} + c_2\left(1 + \frac{1}{1-\frac{1-\frac{1}{n}}{\rho}}\right)\leq 0$.
Thus, based on Eq.~\ref{csaga-21}, we have that
\begin{align}\label{csaga-24}
& \left(\frac{\gamma \mu^{2}}{4}-2 c_{1}-c_{2}\right) \frac{2}{L} e\left(w_{\bar{u}_{k}}\right)
\nonumber \\
& \leq \left(\frac{\gamma \mu^{2}}{4}-2 c_{1}-c_{2}\right) \frac{2}{L} e\left(w_{\bar{u}_{k}}\right)+\mathcal{L}_{t+|K(t)|}
\nonumber \\
&\stackrel{(a)}{\leq} \rho^{v(t)+1} e\left(w_{0}\right)+\left(1-\frac{\gamma \mu}{4}\right) \mathcal{L}_{t}+\frac{c_{0}}{1-\rho}
\nonumber \\
&\stackrel{(b)}{\leq}
\left(1-\frac{\gamma \mu}{4}\right)^{v(t)+1} \mathcal{L}_{0}+\rho^{v(t)+1} e\left(w_{0}\right) \sum_{k=0}^{v(t)+1}\left(\frac{1-\frac{\gamma \mu}{4}}{\rho}\right)^{k}+\frac{c_{0}}{1-\rho} \sum_{k=0}^{v(t)}\left(1-\frac{\gamma \mu}{4}\right)^{k}
\nonumber \\
&\leq \left(1-\frac{\gamma \mu}{4}\right)^{v(t)+1} e\left(w_{0}\right)+\rho^{v(t)+1} e\left(w_{0}\right) \frac{1}{1-\frac{1-\frac{\gamma \mu}{4}}{\rho}}+\frac{c_{0}}{1-\rho} \frac{4}{\gamma \mu}
\nonumber\\
&\stackrel{(c)}{\leq}
\frac{2 \rho-1+\frac{\gamma \mu}{4}}{\rho-1+\frac{\gamma \mu}{4}} \rho^{v(t)+1} e\left(w_{0}\right)+\frac{c_{0}}{1-\rho} \frac{4}{\gamma \mu}
\end{align}
where (a) follows from e\ref{csaga-21}, (b) holds by using Eq.~\ref{csaga-21} recursively, (c) uses the fact that $1-\frac{\gamma\mu}{4}\leq \rho$
According to Eq. \ref{csaga-24}, we have that
\[e\left(w_{{u_0}_{k}}\right) \leq \frac{2 \rho-1+\frac{\gamma \mu}{4}}{\left(\rho-1 |+\frac{\gamma \mu}{4}\right)\left(\frac{\gamma \mu^{2}}{4}-2 c_{1}-c_{2}\right)} \rho^{v(t)+1} e\left(w_{0}\right)+\frac{4 c_{0}}{\gamma \mu(1-\rho)\left(\frac{\gamma \mu^{2}}{4}-2 c_{1}-c_{2}\right)}\]
Thus, under , to obtain the accuracy $\epsilon$ of Problem~\ref{P} for VF{${\textbf{B}}^2$}-SAGA, we can carefully choose $\gamma$ such that
\begin{align}\label{csaga-25}
1-72L^2\gamma^2\tau>0& \\
\frac{4 c_{0}}{\gamma \mu(1-\rho)\left(\frac{\gamma \mu^{2}}{4}-2 c_{1}-c_{2}\right)} \leq \frac{\epsilon}{2} & \\
0<1-\frac{\gamma \mu}{4}<1 &\\
-\frac{\gamma \mu^{2}}{4}+2 c_{1}+c_{2}\left(1+\frac{1}{1-\frac{1-\frac{1}{n}}{\rho}}\right) \leq 0 &\\
-\frac{\gamma \mu^{2}}{4}+c_{2}+c_{1}\left(2+\frac{1}{1-\frac{1-\frac{1}{n}}{\rho}}\right) \leq 0&\\
\end{align}
and let $\frac{2 \rho-1+\frac{\gamma \mu}{4}}{\left(\rho-1+\frac{\gamma \mu}{4}\right)\left(\frac{2 \mu^{2}}{4}-2 c_{1}-c_{2}\right)} \rho^{v(t)+1} e\left(w_{0}\right) \leq \frac{\epsilon}{2}$, we have that
\begin{align}\label{csaga-26}
v(t) \geq \frac{\log \frac{2\left(2 \rho-1+\frac{\gamma \mu}{4}\right) e\left(w_{0}\right)}{\epsilon\left(\rho-1+\frac{\gamma \mu}{4}\right)\left(\frac{\gamma \mu^{2}}{4}-2 c_{1}-c_{2}\right)}}{\log \frac{1}{\rho}}\
\end{align}
This completes the proof.
\end{proof}
\section{Convergence Analyses of Nonconvex Problems}
\subsection{Proof of Theorem~\ref{thm-sgdnonconvex}}
\begin{lemma}\label{lem-ncsgd-1}
For $\forall t$ (whether the $t$-th global iteration is a dominated or collaborative update), there is
\begin{equation}\label{lemeq-ncsgd-1}
\sum_{t=0}^{S-1} \mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 \leq \frac{4}{1-\lambda_{1}}\sum_{t=0}^{S-1}\mathbb{E} || \widehat{v}_{t}^{\psi(t)}\|^2,
\end{equation}
where $S$ denotes the total number of iterations, $\lambda_{1}=6L_*^2\gamma^2\tau$.
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma \ref{lem-ncsgd-1}:}] First, when the $t$-th global iteration corresponds to collaborative update, we have
\begin{eqnarray}\label{sgd-1}
\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 &=& \mathbb{E} || \vartheta \cdot\left(x_{i}\right)_{\mathcal{G}_{\psi(t)}}
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}} )||^2 \nonumber \\
&=& \mathbb{E} ||\vartheta \cdot\left(x_{i}\right)_{\mathcal{G}_{\psi(t)}}
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_{t})_{\mathcal{G}_{\psi(t)}})
- \nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_{t})_{\mathcal{G}_{\psi(t)}})
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) ||^2 \nonumber \\
&\stackrel{(a)}{\leq}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2 \mathbb{E}\|\nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_{t})_{\mathcal{G}_{\psi(t)}})
- \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}})||^2
\nonumber \\
&\stackrel{(b)}{\leq}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2{L_{g}^2} \mathbb{E}\|(\bar{w}_{t})_{\mathcal{G}_{\psi(t)}}
- (\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}||^2
\nonumber \\
&\stackrel{(c)}{=}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2{L_{g}^2}\gamma^2 \mathbb{E}\|\sum_{t^\prime\in D'(t), \psi(t^\prime)=\psi(t)} \widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\nonumber \\
&\stackrel{(d)}{\leq}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2{L_{g}^2}\gamma^2 \tau_2\sum_{t^\prime\in D'(t)} \mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\nonumber \\
&\stackrel{(e)}{\leq}& 2 \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
+ 2{L_{*}^2}\gamma^2 \tau_2 \sum_{t^\prime\in D'(t)}\mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\end{eqnarray}
where (a) follows from $\|a+b\|^2\leq 2\|a\|^2 + 2\|b\|^2$, (b) follows from Assumption~\ref{assum2}, (c) follows from the Eq.~\ref{Dt2}, (d) follows from Assumption~\ref{assum4} and $\|\sum_{i=1}^{n}a_i\|^2 \leq n \sum_{i=1}^{n} \|a_i\|^2$, (e) follows from definition of $L_{*}$. Then we bound the $ \mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2$ as follow
\begin{align}\label{sgd-13}
\mathbb{E} || \bar{v}_{t}^{\psi(t)}\|^2
& = \mathbb{E} || \bar{v}_{t}^{\psi(t)} - \widehat{v}_{t}^{\psi(t)}
+ \widehat{v}_{t}^{\psi(t)}\|^2
\nonumber \\
& \stackrel{a}{\leq} 2 \mathbb{E}\|\nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}(\bar{w}_{t})
- \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}(\widehat{w}_t)\|^2
+ 2 \mathbb{E} \|\widehat{v}_{t}^{\psi(t)}\|^2
\nonumber \\
& \stackrel{(b)}{\leq} 2{L_{\psi(t)}^2} \mathbb{E}\|\bar{w}_{t}
- \widehat{w}_t||^2
+ 2 \mathbb{E} \|\widehat{v}_{t}^{\psi(t)}\|^2
\nonumber \\
& \stackrel{(c)}{\leq} 2{L_{\psi(t)}^2}\gamma^2 \mathbb{E}\|\sum_{t^\prime\in D'(t)} \widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
+ 2\mathbb{E} \|\widehat{v}_{t}^{\psi(t)}\|^2
\nonumber \\
& \stackrel{(d)}{\leq} 2{L_{*}^2}\gamma^2 \tau_2 \sum_{t^\prime\in D'(t)}\mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
+ 2\mathbb{E} \|\widehat{v}_{t}^{\psi(t)}\|^2
\end{align}
where (a) follows from $\|a+b\|^2\leq 2\|a\|^2 + 2\|b\|^2$, (b) follows from Assumption~\ref{assum2}, (c) follows from the Eq.~\ref{Dt2}, (d) follows from the definition of $L_*$, Assumption~\ref{assum4}, and $ \|\sum_{i=1}^{n}a_i\|^2 \leq n \sum_{i=1}^{n} \|a_i\|^2$. Combining Eqs.~\ref{sgd-1} and \ref{sgd-13} we have
\begin{align}\label{sgd-14}
\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 & \leq 4 \mathbb{E} || \widehat{v}_{t}^{\psi(t)}\|^2
+ 6{L_{*}^2}\gamma^2 \tau_2 \sum_{t^\prime\in D'(t)}\mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\end{align}
Summing Eq.~(\ref{sgd-14}) for all iterations (assume the number of total iterations is $S$ ), there is
\begin{eqnarray}\label{sgd-2}
\sum_{t=0}^{S-1}\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 & \leq & 4 \sum_{t=0}^{S-1} \mathbb{E} || \widehat{v}_{t}^{\psi(t)}\|^2
+ 6{L_{*}^2}\gamma^2 \tau_2 \sum_{t=0}^{S-1} \sum_{t^\prime\in D'(t)}\mathbb{E}\|\widetilde{v}_{t^\prime}^{\psi(t^\prime)}||^2
\nonumber \\
&\stackrel{(a)}{\leq}& 4 \sum_{t=0}^{S-1} \mathbb{E} || \widehat{v}_{t}^{\psi(t)}\|^2
+ 6{L_{*}^2}\gamma^2 \tau \sum_{t=0}^{S-1} \mathbb{E}\|\widetilde{v}_{t}^{\psi(t)}||^2,
\end{eqnarray}
where (a) follows from Assumption~\ref{assum4}. When the $t$-th global iteration corresponds to dominated update, it is obviously that
\begin{equation}\label{sgd-3}
\sum_{t=0}^{S-1} \mathbb{E} \| \widetilde{v}^{\psi(t)}_{t} \|^2 = \sum_{t=0}^{S-1} \mathbb{E} \| \widehat{v}^{\psi(t)}_{t} \|^2 < 4 \sum_{t=0}^{S-1} \mathbb{E} || \widehat{v}_{t}^{\psi(t)}\|^2
+ 6{L_{*}^2}\gamma^2 \tau \sum_{t=0}^{S-1} \mathbb{E}\|\widetilde{v}_{t}^{\psi(t)}||^2
\end{equation}
Combining Eqs.~{\ref{sgd-2}} and {\ref{sgd-3}}, there is
\begin{equation}\label{sgd-4}
\sum_{t=0}^{S-1} \mathbb{E} \| \widetilde{v}^{\psi(t)}_{t} \|^2 < 4 \sum_{t=0}^{S-1} \mathbb{E} || \widehat{v}_{t}^{\psi(t)}\|^2
+ 6{L_{*}^2}\gamma^2 \tau \sum_{t=0}^{S-1} \mathbb{E}\|\widetilde{v}_{t}^{\psi(t)}||^2
\end{equation}
whether the $t$-th global iteration corresponds to collaborative update or dominated one, which implies that if $1-\lambda_1 > 0$ there is
\begin{equation}\label{sgd-5}
\sum_{t=0}^{S-1} \mathbb{E} \| \widetilde{v}^{\psi(t)}_{t} \|^2 < \frac{4}{1-6{L_{*}^2}\gamma^2 \tau} \sum_{t=0}^{S-1} \mathbb{E} || \widehat{v}_{t}^{\psi(t)}\|^2 = \frac{4}{1-\lambda_1} \sum_{t=0}^{S-1} \mathbb{E} || \widehat{v}_{t}^{\psi(t)}\|^2
\end{equation}
where $\lambda_1 = 6{L_{*}^2}\gamma^2 \tau$, this completes the proof.
\end{proof}
\begin{lemma}\label{lem-ncsgd-2}
For $\forall t$ (whether the $t$-th global iteration is a dominated or collaborative update), there is
\begin{equation}\label{lemeq-ncsgd-2}
\mathbb{E} \| v_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2 \leq 2{ L_{{*}}^2 \gamma^2 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 8 { L_{*}^2 \gamma^2 \tau_2 \sum_{t' \in D^\prime(t)}} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2.
\end{equation}
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma \ref{lem-ncsgd-2}:}]
First, we give the bound of $ \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2$ as follow
\begin{align}\label{2-1}
\mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
& \stackrel{ (a) }{\leq} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f(\bar{w}_t) - \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t) + \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) - \nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_t)_{\mathcal{G}_{\psi(t)}}\|^2
\nonumber \\
& \stackrel{ (c) }{\leq} 2\mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\bar{w}_t) - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\widehat{w}_t) \|^2
+ 2 \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} g ((\bar{w}_t)_{\mathcal{G}_\psi(t)})
- \nabla_{\mathcal{G}_{\psi(t)}} g ((\widehat{w}_t))_{\mathcal{G}_\psi(t)} \|^2
\nonumber \\
& \stackrel{ (d) }{\leq} 2{L^2} \mathbb{E} \| \bar{w}_t - \widehat{w}_t \|^2
+ 2{L_{g}^2} \mathbb{E} \| (\bar{w}_t)_{\mathcal{G}_\psi(t)} - (\widehat{w}_t)_{\mathcal{G}_\psi(t)} \|^2
\nonumber \\
& \stackrel{ (e) }{=} 2{ L^2 \gamma^2} \mathbb{E} \| \sum_{t' \in D'(t)} \textbf{U}_{\psi(t')} \widetilde{v}^{\psi(t')}_{t'} \|^2 + 2{ L_{g}^2 \gamma^2 } \mathbb{E} \| \sum_{t' \in D^\prime(t), \psi(t^\prime)=\psi(t)} \textbf{U}_{\psi(t')} \widetilde{v}^{\psi(t')}_{t'} \|^2
\nonumber \\ & \stackrel{ (f) }{\leq} 2{ L^2 \gamma^2 \tau_2} \sum_{t' \in D'(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 + 2{ L_{g}^2 \gamma^2 \tau_2 \sum_{t' \in D^\prime(t)}} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\nonumber \\ & \stackrel{ (g) }{\leq} 4{ L_{{*}}^2 \gamma^2 \tau_2} \sum_{t' \in D'(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\end{align}
where (a) follows from the definition of $\bar{v}_{t}^{\psi(t)}$ and the definitions of $\widetilde{v}_{t}^{\psi(t)}$ in different type of updates (dominated or collaborative one), (b) follows from $\|a+b\|^2 \leq 2\|a\|^2 + 2\|b\|^2$, (c) follows from the definition of $\bar{v}_{t}^{\psi(t)}$ and $\widehat{v}_{t}^{\psi(t)}$, (d) follows from Assumptions~\ref{assum2}, (e) follows from Eqs.~\ref{Dt1} and \ref{Dt2}, (f) follows from Assumptions~\ref{assum4} to \ref{assum4} and $\| \sum_{i=1}^{n} a_i \|^2 \leq n\sum_{i=1}^{n} \|a_i\|^2$, (g) follows from the definition of $L_{*}$. Then we consider the bound
\begin{align}\label{2}
\mathbb{E} \| v_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2 & = \mathbb{E} \| v_{t}^{\psi(t)} - \widehat{v}_{t}^{\psi(t)} + \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (a) }{\leq} 2\mathbb{E} \| v_{t}^{\psi(t)} - \widehat{v}_{t}^{\psi(t)}\|^2+ 2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \leq 2\mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} ({w}_t) - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\widehat{w}_t) \|^2
+ 2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (b) }{\leq} 2{L^2} \mathbb{E} \| {w}_t - \widehat{w}_t \|^2
+ 2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (c) }{=} 2{ L^2 \gamma^2} \mathbb{E} \| \sum_{t' \in D(t)} \textbf{U}_{\psi(t')} \widetilde{v}^{\psi(t')}_{t'} \|^2
+2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (d) }{\leq} 2{ L^2 \gamma^2 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 2 \mathbb{E} \| \widehat{v}_{t}^{\psi(t)} - \widetilde{v}_{t}^{\psi(t)} \|^2
\nonumber \\
& \stackrel{ (e) }{\leq} 2{ L_{{*}}^2 \gamma^2 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 8{ L_{*}^2 \gamma^2 \tau_2 \sum_{t' \in D^\prime(t)}} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\end{align}
where (a) follows from $\|a+b\|^2 \leq 2\|a\|^2 + 2\|b\|^2$, (b) follows from Assumptions~\ref{assum2}, (c) follows from Eqs.~\ref{Dt1} and \ref{Dt2}, inequalities, (d) follows from Assumptions~\ref{assum4} to \ref{assum4} and $\| \sum_{i=1}^{n} a_i \|^2 \leq n\sum_{i=1}^{n} \|a_i\|^2$, (e) follows from the definition of $L_{*}$ and Eq. \ref{2-1}. This completes the proof.
\end{proof}
\begin{lemma}\label{lem-ncsgd-3}
Assuming $S=qc$, where $c>0$ is an integer, we have
\begin{align}\label{lemeq-ncsgd-3}
\sum_{t \in \mathcal{A}(S)}\mathbb{E} \| \nabla f({w}_{t})\|^2
\stackrel{(a)}{\leq} 2 L^2 \gamma^2 \eta_2^2 \sum_{u=0}^{S-1} \mathbb{E} \|\widetilde{v}_{u}^{\psi(u)}\|^2 + 2 \sum_{u=0}^{S-1} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u)}} f({w}_{u})\|^2
\end{align}
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma \ref{lem-ncsgd-3}:}]
Given $t$ denotes a global iteration number, if the $t$-th global iteration is a dominated one, then for any $t' \in K'(t)$, there is
\begin{align}\label{17}
\mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t})\|^2
& = \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t}) - \nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t'}) + \nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t'})\|^2
\nonumber \\
& \stackrel{(a)}{\leq} 2\mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t}) - \nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t'})\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t'})\|^2
\nonumber \\
& \leq 2\mathbb{E} \| \nabla f({w}_{t}) - \nabla f({w}_{t'})\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t'})\|^2
\nonumber \\
& \stackrel{(b)}{\leq} 2 L^2 \gamma^2 \mathbb{E} \| {w}_{t} - {w}_{t'}\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t'})\|^2
\nonumber \\
& \stackrel{(c)}{=} 2 L^2 \gamma^2 \mathbb{E} \|\sum_{u \in \{t,...,t'\}}\textbf{U}_{\psi(u)}\widetilde{v}_{u}^{\psi(u)}\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t'})\|^2
\nonumber \\
& \stackrel{(d)}{\leq} 2 L^2 \gamma^2 \eta_2 \sum_{u \in \{t,...,t'\}} \mathbb{E} \|\widetilde{v}_{u}^{\psi(u)}\|^2
+ 2 \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t'})\|^2
\end{align}
where (a) follows from $\|a+b\|^2 \leq 2\|a\|^2 + 2\|b\|^2$, (b) follows from Assumptions~\ref{assum2}, (c) follows from Eq.~\ref{Dt1} , (d) follows from the bound of $|K'(t)|$ and $\| \sum_{i=1}^{n} a_i \|^2 \leq n\sum_{i=1}^{n} \|a_i\|^2$. Summing above for $t' \in K'(t)$ and all $t \in \mathcal{A}(S)$ we have
\begin{align}\label{189}
\sum_{t \in \mathcal{A}(S)}\sum_{t' \in K'(t)}\mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t})\|^2
&\leq 2 L^2 \gamma^2 \eta_2 \sum_{t \in \mathcal{A}(S)} \sum_{t' \in K'(t)} \sum_{u \in \{t,...,t'\}} \mathbb{E} \|\widetilde{v}_{u}^{\psi(u)}\|^2
\nonumber \\
& + 2 \sum_{t \in \mathcal{A}(S)} \sum_{t' \in K'(t)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t')}} f({w}_{t'})\|^2
\nonumber \\
& \stackrel{(a)}{\leq} 2 L_*^2 \gamma^2 \eta_2^2 \sum_{t=0}^{S-1} \mathbb{E} \|\widetilde{v}_{t}^{\psi(t)}\|^2 + 2 \sum_{t=0}^{S-1} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t)}} f({w}_{t})\|^2
\end{align}
where (a) follows from the bound of $|K'(t)|$ and the definition of $\mathcal{A}(S)$. For $\forall u \in \mathcal{A}(S)$ there is
\begin{align}\label{111}
\mathbb{E} \|\nabla f({w}_{u})\|^2& = \mathbb{E} \|\sum_{u'\in K'(u)}\nabla_{\mathcal{G}_{\psi(u')}} f({w}_{u})\|^2
\stackrel{(a)} {=} \sum_{u'\in K'(u)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u')}} f({w}_{u})\|^2
\end{align}
where (a) follows from the orthogonality between all coordinates. Combing Eqs.~\ref{189} and \ref{111}, there is
\begin{align}\label{188}
\sum_{u \in \mathcal{A}(S)}\mathbb{E} \| \nabla f({w}_{u})\|^2
{\leq} 2 L_*^2 \gamma^2 \eta_2^2 \sum_{t=0}^{S-1} \mathbb{E} \|\widetilde{v}_{t}^{\psi(t)}\|^2 + 2 \sum_{t=0}^{S-1} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t)}} f({w}_{t})\|^2
\end{align}
This completes the proof.
\end{proof}
\begin{proof}[\textbf{Proof of Theorem \ref{thm-sgdnonconvex}:}]
For $\forall t$ denotes a global iteration, we have that
\begin{eqnarray}\label{ncsgd-5}
&& \mathbb{E} f (w_{t+1})
\\ \nonumber &\stackrel{ (a) }{\leq}& \mathbb{E} \left ( f (w_{t}) + \langle \nabla f(w_{t}), w_{t+1}-w_{t} \rangle + \frac{L}{2} \|w_{t+1}-w_{t} \|^2 \right )
\\ \nonumber &=& \mathbb{E} \left ( f (w_{t}) - \gamma \langle \nabla f(w_{t}), \widetilde{v}^{\psi(t)}_t \rangle + \frac{L\gamma^2}{2} \| \widehat{v}^{\psi(t)}_t \|^2 \right )
\\ \nonumber &{=}& \mathbb{E} \left ( f (w_{t}) - \gamma \langle \nabla f(w_{t}), \widetilde{v}^{\psi(t)}_t + {v}^{\psi(t)}_t- {v}^{\psi(t)}_t \rangle + \frac{L \gamma^2}{2} \| \widetilde{v}^{\psi(t)}_t \|^2 \right )
\\ \nonumber &\stackrel{(b)}{=}& \mathbb{E} f (w_{t}) - \gamma \mathbb{E} \langle \nabla f(w_{t}), \nabla_{\mathcal{G}_{\psi(t)}} f ({w}_t) \rangle + \frac{L \gamma^2}{2} \mathbb{E} \| \widetilde{v}^{\psi(t)}_t \|^2 + \gamma \mathbb{E} \langle \nabla f(w_{t}), {v}^{\psi(t)}_t - \widetilde{v}^{\psi(t)}_t \rangle
\\ \nonumber &\stackrel{ (c) }{\leq}& \mathbb{E} f (w_{t}) - \gamma \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f ({w}_t) \|^2 + \frac{\gamma}{2} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f ({w}_t) \|^2 + \frac{L \gamma^2}{2} \mathbb{E} \| \widetilde{v}^{\psi(t)}_t \|^2 + \frac{\gamma}{2} \mathbb{E} \| \widetilde{v}^{\psi(t)}_t - {v}^{\psi(t)}_t \|^2
\\ &\stackrel{(d)}{\leq}& \mathbb{E} f (w_{t}) - \frac{\gamma}{2} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f ({w}_t) \|^2 + \frac{L_* \gamma^2}{2} \mathbb{E} \| \widetilde{v}^{\psi(t)}_t \|^2 \nonumber \\
& &+ { L_{*}^2 \gamma^3 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 4{ L_{*}^2 \gamma^3} \tau_2 \sum_{t' \in D^\prime(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 \nonumber
\end{eqnarray}
where the inequalities (a) follows form Assumption~\ref{assum1}, (b) follows from that $ {v}^{\psi(t)}_t = \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} ({w}_t)$ for a specific party, (c) follows from $\langle a,b \rangle\leq \frac{1}{2}(\|a\|^2+\|b\|^2)$, (d) follows from Lemma~\ref{lem-ncsgd-2} and the definition of $L_*$.
Summing Eq.~(\ref{ncsgd-5}) over all $ 0\leq t \leq S-1 $, we obtain
\begin{eqnarray}\label{ncsgd-6}
&& \frac{\gamma}{2} \sum_{t=0}^{S-1} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f ({w}_t) \|^2
\\ \nonumber &\leq& \sum_{t=0}^{S-1}\mathbb{E} \left[f (w_{t}) - f (w_{u+1}) \right] + {L_{*}^2 \gamma^3}\tau_1 \sum_{t=0}^{S-1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\\ \nonumber &&
+ 4 { L_{*}^2 \gamma^3} \tau_2 \sum_{t=0}^{S-1} \sum_{t' \in D^\prime(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ \frac{L_* \gamma^2}{2} \sum_{t=0}^{S-1} \mathbb{E} \| \widetilde{v}^{\psi(t)}_t \|^2
\end{eqnarray}
Note that $\nabla_{\mathcal{G}_{\psi(t)}} f ({w}_t)$ is the gradient of coordinate $\psi(t)$, while to obtain the global convergence rate it is necessary to focus on the gradient of all coordinates \emph{i.e.,} $\nabla f ({w}_t)$.
Combining Eq.~\ref{ncsgd-6} with Lemma~\ref{lem-ncsgd-3}, there is
\begin{align}\label{18}
\sum_{u \in \mathcal{A}(S)}\mathbb{E} \| \nabla f({w}_{u})\|^2
&\stackrel{(a)}{\leq} \sum_{t=0}^{S-1} \frac{4\mathbb{E} \left[ f (w_{t}) - f (w_{t+1}) \right]}{\gamma}
+ {2L_* \gamma} \sum_{t=0}^{S-1} \mathbb{E} \| \widetilde{v}^{\psi(t)}_t \|^2
\nonumber \\
& + 2 L_*^2 \gamma^2 (2\tau_1^2 + 8\tau_2^2+\eta_2^2) \sum_{t=0}^{S-1} \mathbb{E} \| \widetilde{v}^{\psi(t)}_{t} \|^2
\nonumber \\
& \stackrel{(b)}{\leq} \frac{4\mathbb{E} \left[ f (w^0) - f (w^*) \right]}{\gamma}
+ (22 L_*^2 \gamma^2 \tau + 2L_*\gamma) \sum_{t=0}^{S-1} \mathbb{E} \| \widetilde{v}^{\psi(t)}_{t} \|^2
\nonumber \\
& \stackrel{(c)}{\leq} \frac{4\mathbb{E} \left[ f (w^0) - f (w^*) \right]}{\gamma}
+ \sum_{t=0}^{S-1} (22 L_*^2 \gamma^2 \tau + 2L_*\gamma) \frac{4}{1-6L_*^2\gamma^2\tau} {G}
\end{align}
where (a) follows from Assumptions~\ref{assum4} and \ref{assum4}, (b) follows from the definition of $\tau$, (c) follows from Lemma~\ref{lem-ncsgd-1} and . Which implies that
\begin{align}\label{sgd-8}
\frac{1}{S} \sum_{u \in \mathcal{A}(S)}\mathbb{E} \| \nabla f({w}_{u})\|^2
\leq \frac{ 4 \mathbb{E}\left[ f (w^0) - f (w^*) \right] }{S\gamma} + (22 L_*^2 \gamma^2 \tau + 2L_*\gamma) \frac{4}{1-6L_*^2\gamma^2\tau} {G}
\end{align}
Note that for $\forall$ $S=qc$, where $c$ is an integer, there is $|\mathcal{A}(S)| = \frac{S}{q}$, and then we have
\begin{align}\label{sgd-113}
\frac{1}{|\mathcal{A}(S)|} \sum_{u \in \mathcal{A}(S)}\mathbb{E} \| \nabla f({w}_{u})\|^2
\leq \frac{4\mathbb{E}\left[ f (w^0) - f (w^*) \right] }{|\mathcal{A}(S)|\gamma}
+ (22 L_*^2 \gamma^2 \tau + 2L_*\gamma) \frac{4}{1-6L_*^2\gamma^2\tau} {G_1},
\end{align}
where $G_1=qG$.
To obtain the $\epsilon$-first-order stationary solution one can choose suitable $\gamma$, such that
\begin{align}\label{ncsgd-91-1}
1-6L_*^2\gamma^2\tau & >0
\end{align}
\begin{align}\label{ncsgd-91-2}
\frac{4\mathbb{E}\left[ f (w^0) - f (w^*) \right] }{|\mathcal{A}(S)|\gamma} & \leq \frac{\epsilon}{2}
\end{align}
\begin{align}\label{ncsgd-91-3}
(22 L_*^2 \gamma^2 \tau + 2L_*\gamma) \frac{4}{1-6L_*^2\gamma^2\tau }{G_1} & \leq \frac{\epsilon}{2}
\end{align}
which implies that if $\tau$ is upper bounded, i.e. $\tau \leq \frac{512qG}{3\epsilon^2}$ (one can obtain this by combining Eqs. \ref{ncsgd-91-1} and \ref{ncsgd-91-3}, and assuming Eq. \ref{ncsgd-91-2} holds), we can carefully choose the stepsize as
\[ \gamma = \frac{\epsilon}{32L_*G}\]
and if the total epoches number (i.e., $v'(S)$) of $S$ global iterations denoted as $T$ satisfying
\begin{equation}\label{sgd-10}
T\geq \frac{256L_*qG\mathbb{E}(f(w_0)-f(w^*))}{\epsilon^2}
\end{equation}
the $\epsilon$-first-order stationary solution is obtained:
\begin{align}\label{sgd-9}
\frac{1}{T} \sum_{t=0}^{T-1}\mathbb{E} \| \nabla f({w}_{t})\|^2 \leq \epsilon
\end{align}
this completes the proof.
\end{proof}
\subsection{Proof of Theorem~\ref{thm-svrgnonconvex}}
\begin{lemma}\label{lem-ncsvrg-1}
For all outer loop $s = 1,\cdots,S$ we define $\mathcal{A'}(s)$ as all epoches during this outer loop, there is
\begin{equation}\label{ncsvrg-lem1}
\sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 \leq \lambda_{\gamma} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} || {v}_{t}^{\psi(t)}\|^2,
\end{equation}
where $\lambda_{\gamma}=\frac{2}{1 - 20 L_{*}^2\gamma^2\tau} > 0$.
\end{lemma}
\begin{proof}[\textbf{Proof of Lemma~\ref{lem-ncsvrg-1}:}]
First, we have
\begin{eqnarray}\label{ncsvrg-1}
\mathbb{E} ||\widetilde{v}_{t}^{{\psi(t)}}||^2
&=& \mathbb{E} || \widetilde{v}^{{\psi(t)}}_t - {v}^{{\psi(t)}}_t + {v}^{{\psi(t)}}_t||^2 \\
&\stackrel{(a)}{\leq}& 2 \mathbb{E} ||\widetilde{v}^{{\psi(t)}}_t - {v}^{{\psi(t)}}_t ||^2
+ 2 \mathbb{E} ||{v}^{{\psi(t)}}_t||^2
\nonumber \\
&\stackrel{(b)}{\leq}&
2\left( 2{ L_{{*}}^2 \gamma^2 \tau_1} \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ 8 { L_{*}^2 \gamma^2 \tau_2 \sum_{t' \in D^\prime(t)}} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 \right) + 2 \mathbb{E} ||{v}^{{\psi(t)}}_t||^2 \nonumber
\end{eqnarray}
where (a) follows from $\|a+b\|^2\leq 2\|a\|^2+ 2\|b\|^2$, (b) follows from Lemma~\ref{lem-csgd-2}.
Summing Eq.\ref{ncsvrg-1} over an outer loop $s$, then there is
\begin{equation}\label{ncsvrg-5}
\sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}\mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 \stackrel{(a)}
{\leq}
\frac{2}{1-20L_*^2\gamma^2\tau} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} ||{v}^{{\psi(t)}}_t ||^2
\end{equation}
where (a) uses Assumptions~\ref{assum4}, \ref{assum4} and the definition of $\tau$.
Thus, if $\lambda_{\gamma}=\frac{2}{1 - 20 L_{*}^2\gamma^2\tau} > 0$, then $\mathbb{E}||\widetilde{v}_t^{{\psi(t)}}||^2$ is upper bounded:
\begin{eqnarray}\label{ncsvrg-6}
\sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} ||\widetilde{v}^{{\psi(t)}}_t||^2
&\leq& \lambda_{\gamma} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}\mathbb{E} ||{v}^{{\psi(t)}}_t ||^2
\end{eqnarray}
This completes the proof
\end{proof}
\begin{proof}[\textbf{Proof of Theorem~\ref{thm-svrgnonconvex}:}]
Similar to the proof of Theorem~\ref{thm-sgdnonconvex}, we first apply Lemma~\ref{lem-csgd-3} to an epoch (or an outer loop) $s$, and there is
\begin{align}\label{ncsvrg-7}
\sum\limits_{u\in \mathcal{A'}(s)} \mathbb{E} \| \nabla f({w}_{u})\|^2
\stackrel{(a)}{\leq}
2 L_*^2 \gamma^2 \tau_1^2 \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \|\widetilde{v}_{t}^{\psi(t)}\|^2
+ 2 \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t)}} f({w}_{t})\|^2
\end{align}
Summing Eq.~\ref{ncsvrg-7} over outer loops $1, \cdots, S$ we have
\begin{align}\label{ncsvrg-8}
\sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u_t)}} f({w}_{u_0}^s)\|^2
{\leq}
& 2 L_*^2 \gamma^2 \tau_1^2 \sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \|\widetilde{v}_{u_t}^{\psi(u_t)}\|^2
\nonumber \\
& + 2 \sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(u_t)}} f({w}_{u_t}^s)\|^2
\end{align}
Then we bound R.H.S. as follow.
First, we consider the bound of $\mathbb{E} \| {v}^{{\psi(t)}}_t \|^2$, and definite
\begin{equation}\label{ncsvrg-9}
\zeta_{t}^{s}=\nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}\left({w}_{t}^{s}\right)-\nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}\left({w}^s\right)
\end{equation}
where $w_t^s$ denotes $w_t$ at outer loop $s$. From the definition of $v_t^{\psi(t)}$ one can get:
\begin{equation}\label{ncsvrg-10}
\begin{array}{l}{\mathbb{E}\left\|v_{t}^{{\psi(t)}}\right\|^{2}
=\mathbb{E}\left\|\zeta_{t}^{s}+\nabla_{\mathcal{G}_{\psi(t)}} f\left({w}^s\right)\right\|^{2}} \\
{=\mathbb{E}\left\|\zeta_{t}^{s}+\nabla_{\mathcal{G}_{\psi(t)}} f\left({w}^s\right)-\nabla_{\mathcal{G}_{\psi(t)}} f\left(w_{t}^{s}\right)+\nabla_{\mathcal{G}_{\psi(t)}} f\left(w_{t}^{s}\right)\right\|^{2} } \\
{\stackrel{(a)}{\leq} 2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(t)}} f\left(w_{t}^{s}\right)\right\|^{2}+2 \mathbb{E}\left\|\zeta_{t}^{s}-\mathbb{E}\zeta_{t}^{s}\right\|^{2}} \\ {=2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(t)}} f\left(w_{t}^{s}\right)\right\|^{2}+2 \mathbb{E}\left\|\left(\nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}\left(w_{t}^{s}\right)-\nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}\left({w}^s\right)-\mathbb{E}\zeta_{t}^{s}\right)\right\|^{2}}\end{array}
\end{equation}
where (a) follows from $\|a+b\|^2\leq 2\|a\|^2+ 2\|b\|^2$, and $\mathbb{E}\left[\zeta_{t}^{s}\right]=\nabla f\left(w_{t}^{s}\right)- \nabla f\left({w}^s\right)$. From the above equality, we have
\begin{equation}\label{ncsvrg-11}
\begin{array}{l}{\mathbb{E}\left\|v_{t}^{{\psi(t)}}\right\|^{2}} \\ {\stackrel{(a)}{\leq} 2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(t)}} f\left(w_{t}^{s}\right)\right\|^{2}+{2} \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}\left(w_{t}^{s}\right)-\nabla_{\mathcal{G}_{\psi(t)}} f_{i_t}\left({w}^{s}\right)\right\|^{2}} \\ {\stackrel{(b)}{\leq}2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(t)}} f\left(w_{t}^{s}\right)\right\|^{2}+{2 L_*^{2}} \mathbb{E}\left\|w_{t}^{s}-{w}^{s}\right\|^{2}}\end{array}
\end{equation}
where (a) follows from that $\mathbb{E}\|\zeta-\mathbb{E}[\zeta]\|^2\leq \mathbb{E}\|\zeta\|^2$, (b) follows from Assumption~\ref{assum2}. We define $\widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s = \nabla_{\mathcal{G}_{\psi(t)}} \mathcal{L} (\bar{w}_{t}^s) + \nabla_{\mathcal{G}_{\psi(t)}} g ((\widehat{w}_t^s))_{\mathcal{G}_\psi(t)}
= \vartheta_1 \cdot (x_i)_{\mathcal{G}_{\psi(t)}}
+ \nabla_{\mathcal{G}_{\psi(t)}} g ((\widehat{w}_t^s))_{\mathcal{G}_\psi(t)} $ when the $t$-th global iteration denotes a collaborative update, while $\widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s = \nabla_{\mathcal{G}_{\psi(t)} } f(\widehat{w}_t)$ if a dominated update.
Then we derive the upper bound of $\mathbb{E} ||w_{t+1}^{s} - {w}^s ||^2 $
\begin{align}\label{ncsvrg-12}
\mathbb{E} ||w^s_{t+1} - w^s||^2 &
= \mathbb{E} ||w^s_{t+1}- w^s_t + w^s_t - w^s||^2
\nonumber \\
&= \mathbb{E} ||w^s_{t+1}-w^s_t ||^2 + \mathbb{E}||w^s_t - w^s||^2 - 2\mathbb{E}\left<w^s_{t+1}- w^s_t, w^s_t-w^s\right>
\nonumber \\
&= \gamma^2 \mathbb{E} ||\widetilde{v}^{{\psi(t)}}_t ||^2 + \mathbb{E} ||w^s_t - w^s||^2
- 2\gamma \mathbb{E}\left< \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s , w^s_t - w^s\right>
\nonumber \\
&\stackrel{(a)}{\leq} \gamma^2\mathbb{E} ||\widetilde{v}^{{\psi(t)}}_t||^2
+ \mathbb{E} ||w^s_t - w^s||^2 + 2\gamma \mathbb{E}\left[ \frac{1}{2\beta_t}||\widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2 + \frac{\beta_t}{2}||w^s_t - w^s||^2 \right]
\nonumber \\
&= \gamma^2\mathbb{E} ||\widetilde{v}^{{\psi(t)}}_t||^2
+ \frac{\gamma}{\beta_t} \mathbb{E} ||\widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2 + (1+ \gamma \beta_t) \mathbb{E} ||w^s_t - w^s||^2
\end{align}
where (a) follows from Yong-Equation. For $\forall t\in K'(u)$, where $u \in \mathcal{A'}(s)$ there is
\begin{align}\label{ncsvrg-13}
\mathbb{E} f(w^s_{t+1})
& \stackrel{(a)}{\leq}\mathbb{E}\left[ f(w^s_t) + \left< \nabla f(w^s_t) , w^s_{t+1}-w^s_t \right> + \frac{L}{2} || w^s_{t+1}- w^s_t||^2 \right]
\nonumber \\
&= \mathbb{E} f(w^s_t) - \gamma\mathbb{E}\left< \nabla f(w^s_t), \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s \right> + \frac{\gamma^2 L}{2} \mathbb{E} ||\widetilde{v}^{{\psi(t)}}_t||^2
\nonumber \\
&\stackrel{(b)}{=} \mathbb{E} f(w^s_t) - \frac{\gamma}{2} \mathbb{E}\biggl[ ||\nabla_{\mathcal{G}_{\psi(t)}} f(w^s_t)||^2 + || \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2 \nonumber \\
&- ||\nabla_{\mathcal{G}_{\psi(t)}} f(w^s_t) - \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2 \biggr] + \frac{\gamma^2 L_{*}}{2} \mathbb{E} ||\widetilde{v}^{{\psi(t)}}_t||^2
\end{align}
where the (a) follows from Assumption~2\ref{assum1}, (b) follows form $ \left<a,b\right>=\|a\|^2+\|b\|^2-\|a-b\|^2$. Next, we give the upper bound of the term $\mathbb{E}||\nabla_{\mathcal{G}_{\psi(t)}} f(w^s_t) - \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2$ :
\begin{eqnarray}\label{ncsvrg-14}
\mathbb{E}||\nabla_{\mathcal{G}_{\psi(t)}} f(w^s_t) - \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2 \leq 2{L_{{*}}^2 \gamma^2 \tau_1} \sum_{u' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
+ 8{ L_{{*}}^2 \gamma^2 \tau_2} \sum_{u' \in D'(t)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2.
\end{eqnarray}
Above result can be obtained by applying Lemma~\ref{lem-csgd-2} with $v_{t}^{\psi(t)}$ and $\widetilde{v}_{t}^{\psi(t)}$ defined in SVRG-based algorithm. From (\ref{ncsvrg-13}) and (\ref{ncsvrg-14}), it is easy to derive the following inequality:
\begin{eqnarray}\label{ncsvrg-15}
\mathbb{E} f(w_{t+1}^{s}) &\leq& \mathbb{E}f(w^s_t) - \frac{\gamma}{2} \mathbb{E}
||\nabla_{\mathcal{G}_{\psi(t)}} f(w^s_t)||^2 - \frac{\gamma}{2} \mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2 + \frac{\gamma^2L}{2} \mathbb{E} || \widetilde{v}^{{\psi(t)}}_t||^2
\nonumber \\
&+& {{L_{*}^2\gamma^3 }} \left(\tau_1 \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2 r
+ 4\tau_2 \sum_{t' \in D^\prime(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 \right)
\end{eqnarray}
Similar to many convergence analyses of nonconvex optimization, we define the Lyapunov function as
\begin{eqnarray}\label{ncsvrg-16}
R_t^{s} = \mathbb{E}\left[ f(w^s_t) + c_t ||w^s_t - w^s||^2 \right],
\end{eqnarray}
then there is
\begin{align}\label{ncsvrg-17}
R_{t+1}^{s}
&= \mathbb{E}\left[ f(w_{t+1}^{s}) + c_{t+1} ||w^s_{t+1}- w^s ||^2 \right] \nonumber \\
&\stackrel{(a)}{\leq} \mathbb{E}f(w^s_t) - \frac{\gamma}{2} \mathbb{E}
||\nabla_{\mathcal{G}_{\psi(t)}} f(w^s_t)||^2 - \frac{\gamma}{2} \mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2 + \frac{\gamma^2L}{2} \mathbb{E} || \widetilde{v}^{{\psi(t)}}_t||^2
\nonumber \\
&+ {{L_{*}^2\gamma^3 }} \left( \tau_1 \sum_{t' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 + 4 \tau_2 \sum_{t' \in D^\prime(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 \right)
\nonumber \\
&+ c_{t+1} \left[ \gamma^2\mathbb{E} ||\widetilde{v}^{{\psi(t)}}_t||^2 + \frac{\gamma}{\beta_t} \mathbb{E} ||\widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2 + (1+ \gamma \beta_t) \mathbb{E} ||w^s_t - w^s||^2 \right]
\nonumber \\
&\stackrel{(b)}{\leq} \mathbb{E} f(w^s_t) - \frac{\gamma}{2 } \mathbb{E} ||\nabla_{\mathcal{G}_{\psi(t)}} f(w^s_t)||^2 - (\frac{\gamma}{2 } - \frac{c_{t+1}\gamma}{ \beta_t})\mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s||^2
\nonumber \\
& + {{L_*^2\gamma^3\tau_1 }} \sum_{t' \in D(t)} \mathbb{E} ||\widetilde{v}^{\psi(t')}_{t'}||^2 + (\frac{\gamma^2L}{2 }
+ c_{t+1}\gamma^2) \mathbb{E} ||\widetilde{v}^{{\psi(t)}}_t||^2
\nonumber \\
&+ 4 L_*^2\gamma^3\tau_2 \sum_{t' \in D^\prime(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 + c_{t+1}(1+ \gamma\beta_t) \mathbb{E} ||w^s_t - w^s||^2 \nonumber \\
\end{align}
where (a) follow from Eqs.~(\ref{ncsvrg-12}) and (\ref{ncsvrg-15}). Summing this over an outer loop $s$ one can obtain:
\begin{align}\label{ncsvrg-18}
& \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}R_{u_t+1}^{s} \\
&=
\sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}\left( \mathbb{E} f(w^s_{u_t})
- \frac{\gamma}{2 } \mathbb{E} ||\nabla_{\mathcal{G}_{\psi({u_t})}} f(w^s_{u_t})||^2
- (\frac{\gamma}{2 } - \frac{c_{{u_t}+1}\gamma}{ \beta_{u_t}})\mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{\psi({u_t})}}^s||^2\right)
\nonumber \\
& \quad + \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} {{L_*^2\gamma^3\tau_1 }} \sum_{t' \in D({u_t})} \mathbb{E} ||\widetilde{v}^{\psi(t')}_{t'}||^2
+ \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}(\frac{\gamma^2L}{2 } + c_{{u_t}+1}\gamma^2) \mathbb{E} ||\widetilde{v}^{{\psi({u_t})}}_{u_t}||^2
\nonumber \\
& \quad + \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}4 L_*^2\gamma^3\tau_2 \sum_{t' \in D^\prime({u_t})} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
+ \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{{t}\in K'(u)}c_{{u_t}+1}(1+ \gamma\beta_{u_t}) \mathbb{E} ||w^s_{u_t} - w^s||^2 \nonumber
\end{align}
Summing above inequality over all outer loops $s=1,\cdots,S$ and reorganize it we have
\begin{align}\label{ncsvrg-19}
&\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}R_{{{u_t}}+1}^{s}
\\
&= \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}
\left( \mathbb{E} f(w^s_{{u_t}})
- \frac{\gamma}{2 } \mathbb{E} ||\nabla_{\mathcal{G}_{\psi({{u_t}})}} f(w^s_{{u_t}})||^2
+ c_{{{u_t}}+1}(1+ \gamma\beta_{{u_t}}) \mathbb{E} ||w^s_{{u_t}} - w^s||^2 \right)
\nonumber \\
& + \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}\left( {{L_*^2\gamma^3\tau_1 }} \sum_{t' \in D({u_t})} \mathbb{E} ||\widetilde{v}^{\psi(t')}_{t'}||^2
+ (\frac{\gamma^2L}{2 } + c_{{{u_t}}+1}\gamma^2) \mathbb{E} ||\widetilde{v}^{{\psi({u_t})}}_{{u_t}}||^2\right)
\nonumber \\
& - \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} (\frac{\gamma}{2 }
- \frac{c_{{{u_t}}+1}\gamma}{ \beta_{{u_t}}})\mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{\psi({{u_t}})}}^s ||^2
+ \sum\limits_{s=1}^{S}\sum\limits_{t=0 }^{|v(s)|-1}4L_*^2\gamma^3\tau_2 \sum_{t' \in D^\prime({{u_t}})} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\nonumber \\
& \stackrel{(a)}{\leq} \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \left( \mathbb{E} f(w^s_{{u_t}})
- \frac{\gamma}{2 } \mathbb{E} ||\nabla_{\mathcal{G}_{\psi({{u_t}})}} f(w^s_{{u_t}})||^2
+ c_{{{u_t}}+1}(1+ \gamma\beta_{{u_t}}) \mathbb{E} ||w^s_{{u_t}} - w^s||^2 \right)
\nonumber \\
& + \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}\left( {{L_*^2\gamma^3(\tau_1^2 + 4\tau_2^2) }} \mathbb{E} ||\widetilde{v}^{\psi({u_t})}_{{u_t}}||^2 + (\frac{\gamma^2L_{*}}{2 }
+ c_{{u_t}+1}\gamma^2) \mathbb{E} ||\widetilde{v}^{\psi({u_t})}_{{u_t}}||^2\right)
\nonumber \\
&\stackrel{(b)}{\leq}\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}\left( \mathbb{E} f(w^s_{{u_t}}) - \frac{\gamma}{2 } \mathbb{E} ||\nabla_{\mathcal{G}_{\psi({{u_t}})}} f(w^s_{{u_t}})||^2 + c_{{{u_t}}+1}(1+ \gamma\beta_{{u_t}}) \mathbb{E} ||w^s_{{u_t}} - w^s||^2 \right)
\nonumber \\
& + \lambda_{\gamma}\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}\left( {{5L_*^2\gamma^3 }} \tau + \frac{\gamma^2L_{*}}{2 } + c_{{{u_t}}+1}\gamma^2 \right)\mathbb{E} ||{v}^{\psi({{u_t}})}_{{u_t}} ||^2
\end{align}
where (a) follows from Assumptions~\ref{assum4} to \ref{assum4} and assuming $\frac{1}{2} \geq \frac{c_{{{u_t}}+1}}{\beta_{{u_t}}}$, (b) follows from Lemma~\ref{lem-ncsvrg-1}. Denote $ {{10L_*^2\gamma^3 }} \tau + {\gamma^2L_{*}} + 2c_{{{u_t}}+1}\gamma^2$ as $\lambda_{{u_t}}$, we have
\begin{align}\label{ncsvrg-20}
&\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}
R_{{{u_t}}+1}^s \\
&\stackrel{(a)}{\leq}
\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}
\left( \mathbb{E} f(w^s_{{u_t}}) - \frac{\gamma}{2 } \mathbb{E} ||\nabla_{\mathcal{G}_{\psi({{u_t}})}} f(w^s_{{u_t}})||^2 + c_{{{u_t}}+1}(1+ \gamma\beta_{{u_t}}) \mathbb{E} ||w^s_{{u_t}} - w^s||^2 \right)
\nonumber \\
& + \lambda_{\gamma}\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}
\lambda_{{u_t}}
\left( \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi({{u_t}})}} f\left(w_{{{u_t}}}^{s}\right)\right\|^{2}+{ L_{*}^{2}} \mathbb{E}\left\|w_{{{u_t}}}^{s}-{w}^{s}\right\|^{2} \right) \nonumber \\
&{=}\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}
\left( \mathbb{E} f(w^s_{{u_t}})+ (c_{{{u_t}}+1}(1+ \gamma\beta_{{u_t}})+\lambda_{\gamma}\lambda_{{{u_t}}}L_{*}^2 ) \mathbb{E} ||w^s_{{u_t}} - w^s||^2 \right) \nonumber \\
& - \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}
(\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{u_t})\mathbb{E} ||\nabla_{\mathcal{G}_{\psi({{u_t}})}} f(w^s_{{u_t}})||^2 \nonumber
\end{align}
where (a) follows from Eq.~\ref{ncsvrg-11} and the definitions of $\lambda_{u_t}$ and $L_*$. Then we return to Eq.~\ref{ncsvrg-8}:
\begin{align}\label{ncsvrg-133}
&\sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u_t)}} f({w}_{u_0}^s)\|^2 \\
&\stackrel{(a)}{\leq} 2 \lambda_{\gamma}L_*^2 \gamma^2 \tau_1^2 \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \|{v}_{{u_t}}^{\psi({u_t})}\|^2
+ 2 \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi({u_t})}} f({w}_{{u_t}})\|^2
\nonumber \\
& \stackrel{(b)}{\leq} 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E}\left\|w_{{u_t}}^{s}-{w}^{s}\right\|^{2}
\nonumber \\
& + (2 + 4 \lambda_{\gamma}L_*^2 \gamma^2 \tau) \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi({u_t})}} f({w}_{{u_t}}^s)\|^2
\end{align}
where (a) follows from Lemma \ref{lem-ncsvrg-1}, (b) follows from Eq.~\ref{ncsvrg-11} and $u_0$ denotes the start iteration during epoch $u$. This implies that
\begin{align}\label{ncsvrg-22}
&\frac{\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{u_t}}{2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau}
\sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u_t)}} f({w}_{u_0}^s)\|^2
\nonumber \\
& \stackrel{(a)}{\leq} L_*^2 (\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{u_t} )
\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E}\left\|w_{{u_t}}^{s}-{w}^{s}\right\|^{2}
+ (\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{u_t}) \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi({u_t})}} f({w}_{{u_t}}^s)\|^2
\end{align}
where (a) follows from the definition of $L_*$. Combining Eq.~\ref{ncsvrg-22} with \ref{ncsvrg-20} we have
\begin{align}\label{ncsvrg-23}
& \frac{\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{u_t}}{2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau}
\sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u_t)}} f({w}_{u_0}^s)\|^2
\nonumber \\
& \stackrel{(a)}{\leq} L_*^2 (\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{u_t} ) \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E}\left\|w_{{u_t}}^{s}-{w}^{s}\right\|^{2}
+ \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} R_{{{u_t}}+1}^s
\nonumber \\ &
+ \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \left(\mathbb{E} f(w^s_{{u_t}})
+ (c_{{{u_t}}+1}(1+ \gamma\beta_{t})+\lambda_{\gamma}\lambda_{{t}}L_{*}^2 ) \mathbb{E} ||w^s_{{u_t}} - w^s||^2 \right)
\nonumber \\
& = \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} R_{{{u_t}}+1}^s
+ \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}
\left(\mathbb{E} f(w^s_{{u_t}}) + (c_{{{u_t}}+1}(1+ \gamma\beta_{{u_t}})+\frac{\gamma}{2}L_{*}^2 )
\mathbb{E} ||w^s_{{u_t}} - w^s||^2\right)
\end{align}
Rearrange Eq.~\ref{ncsvrg-23} we have
\begin{align}\label{ncsvrg-24}
\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}R_{{{u_t}}+1}^s
&\leq
\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \left(\mathbb{E} f(w^s_{{u_t}}) + (c_{{{u_t}}+1}(1+ \gamma\beta_{{u_t}})+\frac{\gamma}{2}L_{*}^2 ) \mathbb{E} ||w^s_{{u_t}} - w^s||^2 \right)
\nonumber \\
& - \frac{\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{u_t}}{2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau}\sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \mathbb{E} \| \nabla f({w}_{u})\|^2
\nonumber \\
&= \sum\limits_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)}R_{{{u_t}}}^s
- \sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \Gamma_{u_t} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u_t)}} f({w}_{u_0}^s)\|^2
\end{align}
where
\begin{align}\label{ncsvrg-25}
c_{{u_t}} & = c_{{u_t}+1}(1+ \gamma \beta_{u_t}) + \frac{\gamma}{2}L_*^2
\end{align}
and
\begin{eqnarray}\label{ncsvrg-26}
\Gamma_{{u_t}} = \frac{ \frac{\gamma}{2} - \frac{2}{1 - 20L_{*}^2\gamma^2\tau}( 10L_{*}^2 \gamma^3 \tau + {\gamma^2L_{*}} + 2c_{{u_t}+1}\gamma^2)} {2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau}
\end{eqnarray}
Denote the subscript of the last iteration in $s$-th outer loop as $c_{\bar{s}}$ and set it as 0, and set \[{w}^{s+1} = w^{s}_{{ {u_t}=\bar{s}}}\] then there is
\[R_{{ {u_t}=\bar{s}}}^{s} = \mathbb{E} f(w_{{ {u_t}=\bar{s}}}^{s})= \mathbb{E} f({w}^{s+1}).\]
Applying these to \ref{ncsvrg-20} we can get,
\begin{eqnarray}\label{ncsvrg-27}
\sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u_t)}} f({w}_{u_0}^s)\|^2
\leq \frac{\mathbb{E}\left[ f( w^{s}) - f(w^{s+1}) \right] }{ \Gamma_*}
\end{eqnarray}
where $\Gamma_* = min \{\Gamma_{u_t}\}$, $u_0$ denotes the start iteration during epoch $u$. Using the update rule of VF{${\textbf{B}}^2$}-SVRG
and summing up all outer loops, and defining $w_0$ as initial point and $w^*$ as optimal solution, we have the final inequality:
\begin{eqnarray}\label{ncsvrg-28}
\sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u_t)}} f({w}_{u_0}^s)\|^2
\leq \frac{\mathbb{E}\left[ f( w_{0}) - f(w^{*}) \right] }{\Gamma_*}
\end{eqnarray}
since $\sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u_t)}} f({w}_{u_0}^s)\|^2 = \mathbb{E} \| \nabla f({w}_{u_0}^s)\|^2 $, we have
\begin{eqnarray}\label{ncsvrg-28}
\frac{1}{T}\sum_{s=1}^{S} \sum\limits_{u\in \mathcal{A'}(s)} \mathbb{E} \| \nabla f({w}_{u_0}^s)\|^2
\leq \frac{\mathbb{E}\left[ f( w_{0}) - f(w^{*}) \right] }{T \Gamma_*}
\end{eqnarray}
where $T$ denotes the total number of epoches, $u_0$ denotes the start iteration during epoch $u$.
To prove Theorem~\ref{thm-svrgnonconvex}, set $\{c_{u_t}^s\}_{u_t=\bar{s}} = 0$, $ \gamma = \frac{m_0}{L_{*}n^\alpha}$, $\beta_t = \beta = {2L_{*}}$, where $0<m_0<1$, and $0<\alpha<1$. And there is
\begin{eqnarray}\label{ncsvrg-29}
\theta &= & {\gamma \beta} = \frac{2m_0}{n^{{\alpha}}}
\end{eqnarray}
From the recurrence formula of $c_t$, we have:
\begin{eqnarray}\label{ncsvrg-30}
c_0& =& \frac{\gamma L_*^2}{2} \frac{(1+\theta)^{{N} } - 1}{\theta} \nonumber \\
&=& \frac{m_0L_*^2}{2L_*n^\alpha} \frac{n^\alpha}{2m_0} \left( (1+\theta)^{{N}} -1 \right) \nonumber \\
&\stackrel{(a)}{\leq}& \frac{L_*}{4} \left( (1+\theta)^\frac{1}{\theta} -1 \right) \nonumber \\
&\stackrel{(b)}{\leq} & \frac{L_*}{4}(e-1)
\end{eqnarray}
where (a) follow form ${{N}} \leq \lfloor \frac{n^{{\alpha}}}{2m_0} \rfloor$, (b) follows from that $(1+\frac{1}{l})^l$ is increasing for $l>0$, and $\lim\limits_{l\rightarrow \infty}(1 + \frac{1}{l})^l = e$. Since $e-1<2$, there is $c_0 \leq \frac{L_*}{2}$ which satisfies $c_t \leq \frac{\beta}{2}=L_*$ (used in \ref{ncsvrg-19}). Therefore, $c_t$ is decreasing with respect to $t$, and $c_0$ is also upper bounded.
\begin{eqnarray}
\Gamma_* &=& \min_t \Gamma_t \nonumber \\
&\stackrel{(a)}{\geq} & \frac{ \frac{\gamma}{2} - \frac{2}{1 - 20L_{*}^2\gamma^2\tau}( 10L_{*}^2 \gamma^3 \tau + {\gamma^2L_{*}} + 2c_{0}\gamma^2)} {2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau}
\nonumber \\
& =&\frac{ \frac{\gamma}{2} - \frac{2n^{2\alpha}}{n^{2\alpha} - 20m_0^2\tau}
( \frac{10m_0^2\tau}{n^{2\alpha}} + \frac{2m_0}{n^\alpha})\gamma}
{2 + \frac{8L_*^2m_0^2\tau}{n^{2\alpha}-20m_0^2\tau}}
\nonumber \\
&\stackrel{(b)}{\geq}& \frac{\left( \frac{1}{2}-(20m_0^2\tau + 4m_0)\right)\gamma}{2 + {8L_*^2m_0^2\tau}}
\nonumber \\
&\stackrel{(c)}{\geq}& \frac{\sigma }{L_{*}n^{\alpha}}
\end{eqnarray}
where (a) follows from $c_0=max\{c_t\}$, (b) follow form $n^{{\alpha}} \leq n^{2\alpha} - 20m_0^2 \tau$ (we assume $n \geq \frac{1 + \sqrt{1+80m_0^2\tau}}{2}$, this is easy to satisfy when $n$ is large) and $n^\alpha > 1$,
(c) follows from that if $\frac{1}{2} > 20m_0^2\tau + 4m_0$ and $\sigma$ is a small value which is independent of $n$.
Above all, if $\tau < \text{min} \{\frac{n^{2\alpha}}{20m_0^2},\frac{1-8m_0}{40m_0^2} \} $ (where $\tau < \frac{n^{2\alpha}}{20m_0^2}$ denotes $\lambda_\gamma >0$), where $1-8m_0>0$, and $N$, satisfies $N \leq \lfloor \frac{n^{{\alpha}}}{2m_0} \rfloor$ we have the conclusion:
\begin{eqnarray} \label{ncsvrg-final}
\frac{1}{T}\sum\limits_{s=1}^{S}\sum\limits_{t=0}^{N-1}\mathbb{E} ||\nabla f(w^s_{t})||^2 \leq \frac{L_{*}n^{\alpha}\mathbb{E}\left[ f( w_{0}) - f( w^{*}) \right] }{T \sigma }
\end{eqnarray}
where $T$ denotes the total number of epoches.
Let R.H.S. of \ref{ncsvrg-final} $\leq \epsilon$, one can obtain that
\begin{eqnarray} \label{ncsvrg-final2}
T\geq \frac{L_{*}n^{\alpha}\mathbb{E}\left[ f( w_{0}) - f( w^{*}) \right] }{\epsilon \sigma }
\end{eqnarray}
This completes the proof.
\end{proof}
\subsection{Proof of Theorem~\ref{thm-saganonconvex}}
\begin{lemma}\label{lem-ncsaga-2}
For all $\forall $ $\psi (t)$, there are
\begin{equation}\label{lemeq-ncsaga-2}
\sum\limits_{t=0}^{S-1} \mathbb{E} ||\widetilde{v}_{t}^{\psi(t)}||^2 \leq \lambda_{\gamma} \sum\limits_{t=0}^{S-1} \mathbb{E} || {v}_{t}^{\psi(t)}\|^2,
\end{equation}
where $\lambda_\gamma = \frac{2}{1 - 180L_{*}^2\gamma^2\tau }> 0$.
\end{lemma}
\begin{proof}[\bf{Proof of Lemma~\ref{lem-ncsaga-2}}]
First,
we give the upper bound to $\mathbb{E} \| \widetilde{v}^{ {\psi(u)} }_t - \widehat{v}^{{\psi(u)}}_t \|^2$ as follows.
We have that
\begin{eqnarray}\label{ncsaga-8}
&& \mathbb{E} \left \| \widetilde{v}^{ {\psi(u)} }_t - \widehat{v}^{{\psi(u)}}_t \right \|^2
\\ & = & \nonumber
\mathbb{E} \left \| \left(\nabla_{\mathcal{G}_{\psi(t)}}\mathcal{L}(\bar{w})
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) \right)
- \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t)
- \widehat{\alpha}_{i}^{\psi(u)} + \widetilde{\alpha}_{i}^{\psi(u)}
+ \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{\psi(u)} - \frac{1}{n} \sum_{i=1}^n \widetilde{\alpha}_{i}^{\psi(u)} \right \|^2
\\ & \stackrel{ (a) }{\leq} & \nonumber
3 \mathbb{E} Q_1
+ 3\mathbb{E} \underbrace{\left \| \widetilde{\alpha}_{i}^{\psi(u)}
+ \widehat{\alpha}_{i}^{\psi(u)} \right \|^2 }_{Q_2}
+ 3\mathbb{E} \underbrace{\left \| \frac{1}{n} \sum_{i=1}^n \widetilde{\alpha}_{i}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2}_{Q_3}
\end{eqnarray}
where $Q_1 = \left \| \left(\nabla_{\mathcal{G}_{\psi(t)}}\mathcal{L}(\bar{w})
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) \right)
- \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t) \right\|$ and inequality (a) uses $\| \sum_{i=1}^n a_i \|^2 \leq n \sum_{i=1}^n \| a_i \|^2 $.
We will give the upper bounds for the expectations of $Q_1$, $Q_2$ and $Q_3$ respectively.
\begin{eqnarray}\label{ncsaga-9}
\nonumber \mathbb{E} Q_1 &=& \mathbb{E} \left \| \left(\nabla_{\mathcal{G}_{\psi(t)}}\mathcal{L}(\bar{w})
+ \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) \right)
- \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t) \right\|
\\&\stackrel{(a)}{\leq}&
\mathbb{E} ||\nabla_{\mathcal{G}_{\psi(t)}} f(\bar{w}_t) - \nabla_{\mathcal{G}_{\psi(t)}} f(\widehat{w}_t) + \nabla_{\mathcal{G}_{\psi(t)}} g((\widehat{w}_t)_{\mathcal{G}_{\psi(t)}}) - \nabla_{\mathcal{G}_{\psi(t)}} g((\bar{w}_t)_{\mathcal{G}_{\psi(t)}})||^2
\nonumber \\
&\stackrel{(b)}{\leq}& 4{ L_{{*}}^2 \gamma^2 \tau_2} \sum_{t' \in D'(t)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2
\end{eqnarray}
above inequality can be obtained by following the proof of Lemma~\ref{lem-csgd-2}.
\begin{eqnarray}\label{ncsaga-10}
\mathbb{E} Q_2 &=& \mathbb{E}\left \| \widetilde {\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2
\\ \nonumber &\leq & \frac{4\tau_2 L_*^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{{u} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\end{eqnarray}
where the inequality uses Lemma \ref{lem-csaga-1}.
\begin{eqnarray}\label{ncsaga-11}
&& \mathbb{E} Q_3 = \mathbb{E} \left \| \frac{1}{n} \sum_{i=1}^n \widetilde{\alpha}_{i}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2
\\ & \leq & \nonumber
\frac{1}{n} \sum_{i=1}^n \mathbb{E} \left \| \widehat{\alpha}_{i}^{t,\psi(t)} - \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2
\\ & \leq &
\frac{4\tau_2 L_*^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{{u} \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2 \nonumber
\end{eqnarray}
where the first inequality uses $\| \sum_{i=1}^n a_i \|^2 \leq n \sum_{i=1}^n \| a_i \|^2 $, the second inequality uses Lemma \ref{lem-csaga-1}. Combining \ref{ncsaga-9}, \ref{ncsaga-10}, and \ref{ncsaga-11}, one can obtain:
\begin{eqnarray}\label{SAGA-13}
&& \mathbb{E} \left \| \widetilde{v}_{t}^{\psi(t)} - \widehat{v}_t^{\psi(t)} \right \|^2
\\ & \leq & \nonumber 3 \mathbb{E} {Q_1} + 3 \mathbb{E} {Q_2} + 3\mathbb{E} {Q_3}
\\ & \leq & \nonumber {12 L_*^2\gamma^2\tau_2 } \sum_{u \in D'(\xi(t',\psi(t)))} \mathbb{E} ||\widetilde{v}^{\psi(u)}_{u}||^2
+ \frac{24\tau_2 L_*^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{u \in D'(\xi(t',\psi(t)))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi({u})}_{{u}} \right \|^2
\end{eqnarray}
Summing above equality over all iterations, we have
\begin{eqnarray}\label{SAGA-14}
&& \sum_{t=0}^{S-1} \mathbb{E} \left \| \widetilde{v}_{t}^{\psi(t)} - \widehat{v}_t^{\psi(t)} \right \|^2
\\ & \leq & \nonumber
{12 L_*^2\gamma^2\tau_2^2 } \sum_{t=0}^{S-1} \mathbb{E} ||\widetilde{v}^{\psi(t)}_{t}||^2
+ \frac{24\tau_2^2 L_*^2 \gamma^2}{n} \sum_{t=0}^{S-1} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi(t')}_{t'} \right \|^2
\\ & \leq & \nonumber
{36 L_*^2\gamma^2\tau_2^2 } \sum_{t=0}^{S-1} \mathbb{E} ||\widetilde{v}^{\psi(t)}_{t}||^2
\end{eqnarray}
Define ${v}^{{\psi(t)}}_t= \nabla_{\mathcal{G}_{\psi(t)}} f_i (w_{t}) - \alpha_i^{{\psi(t)}} + \frac{1}{n} \sum_{i=1}^n \alpha_i^{{\psi(t)}}$.
And then, we give the upper bound to $\mathbb{E} \left \| \widehat{v}^{ {\psi(t)} }_t - {v}^{{\psi(t)}}_t \right \|^2$ as follows.
We have that
\begin{eqnarray}\label{ncsaga-15}
&& \mathbb{E} \left \| \widehat{v}_{t}^{\psi(t)} - v_{t}^{\psi(t)} \right \|^2
\\ & = & \nonumber \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\widehat{w}_{t})- \widehat{\alpha}_{i_t}^{t,\psi(t)} + \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} - \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (w_{t}) + \alpha_{i_t}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \alpha_{i}^{t,\psi(t)} \right \|^2
\\ & \stackrel{ (a) }{\leq} & \nonumber 3 \mathbb{E} \underbrace{\left \| \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} (\widehat{w}_{t})- \nabla_{\mathcal{G}_{\psi(t)}} f_{i_t} ({w}_{t}) \right \|^2 }_{Q_4}
+ 3\mathbb{E} \underbrace{\left \| {\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2 }_{Q_5}
+ 3\mathbb{E} \underbrace{\left \| \frac{1}{n} \sum_{i=1}^n \alpha_{i}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2}_{Q_6}
\end{eqnarray}
where the inequality (a) uses $\| \sum_{i=1}^n a_i \|^2 \leq n \sum_{i=1}^n \| a_i \|^2 $.
We will give the upper bounds for the expectations of $Q_4$, $Q_5$ and $Q_6$ respectively.
\begin{eqnarray}\label{ncsaga-16}
\mathbb{E} Q_4 &=& \mathbb{E} \left \| \nabla_{\mathcal{G}_{{\psi(t)}}} f_{i_t} (\widehat{w}_{t})- \nabla_{\mathcal{G}_{{\psi(t)}}} f_{i_t} ({w}_{t}) \right \|^2 \\
&\stackrel{(a)}{\leq}& { L_{{\psi(t)}}^2}
\mathbb{E} \left[ ||w_{t} - \widehat{w}_{t}||^2 \right]
\nonumber \\
&=& {L_{{\psi(t)}}^2\gamma^2} \mathbb{E} \left[ ||\sum_{t' \in D(u,i_t)} \textbf{U}_{\psi(t')} \widetilde{v}^{\psi(t')}_{t'} ||^2 \right]
\nonumber \\
&\stackrel{(b)}{\leq}& { \tau_1 L_*^2\gamma^2 } \sum_{t' \in D(u,i_t)} \mathbb{E} \left[ ||\widetilde{v}^{\psi(t')}_{t'}||^2 \right] \nonumber
\end{eqnarray}
where (a) uses Assumption~\ref{assum2}, (b) uses $\| \sum_{i=1}^n a_i \|^2 \leq n \sum_{i=1}^n \| a_i \|^2 $. Similar to the analyses of $Q_2$ and $Q_3$, we have
\begin{eqnarray}\label{ncsaga-17}
\mathbb{E} Q_5 = \mathbb{E}\left \| {\alpha}_{i_t}^{t,\psi(t)} - \widehat{\alpha}_{i_t}^{t,\psi(t)} \right \|^2
\leq \frac{\tau_1 L^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{\widetilde{u} \in D(\xi(t',\psi(t')))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi(\widetilde{u})}_{\widetilde{u}} \right \|^2
\end{eqnarray}
where the inequality uses Lemma \ref{lem-csaga-1}.
\begin{eqnarray}\label{ncsaga-18}
&&\mathbb{E} Q_6 = \mathbb{E} \left \| \frac{1}{n} \sum_{i=1}^n {\alpha}_{i}^{t,\psi(t)} - \frac{1}{n} \sum_{i=1}^n \widehat{\alpha}_{i}^{t,\psi(t)} \right \|^2
\nonumber \\
&\leq& \frac{\tau_1 L^2 \gamma^2}{n} \sum_{t'=1}^{\phi(t)-1} \sum_{\widetilde{u} \in D(\xi(t',\psi(t')))} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi(\widetilde{u})}_{\widetilde{u}} \right \|^2 \nonumber
\end{eqnarray}
Summing above inequality for $t=0,\cdots,S-1$ and follow the analyses of Eq.~\ref{ncsaga-15} one can have
\begin{eqnarray}\label{ncsaga-19}
&& \sum_{t=0}^{S-1} \mathbb{E} \left \| \widetilde{v}_{u}^{\psi(t)} - \widehat{v}_t^{\psi(t)} \right \|^2
\\ & \leq & \nonumber
{3 L_*^2\gamma^2\tau_1^2 } \sum_{t=0}^{S-1} \mathbb{E} ||\widetilde{v}^{\psi(t')}_{t'}||^2
+ \frac{6\tau_1 L^2 \gamma^2}{n} \sum_{t=0}^{S-1} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \widetilde{v}^{\psi(\widetilde{u})}_{\widetilde{u}} \right \|^2
\\ & \leq & \nonumber {9 L_*^2\gamma^2\tau_1^2 } \sum_{t=0}^{S-1} \mathbb{E} ||\widetilde{v}^{\psi(t')}_{t'}||^2
\end{eqnarray}
Based on above formulations, we have
\begin{align}\label{ncsaga-20}
\sum_{t=0}^{S-1} \mathbb{E} \left \| \widetilde{v}_{u}^{\psi(t)} - {v}_t^{\psi(t)} \right \|^2 & = \sum_{t=0}^{S-1} \mathbb{E} \left \| \widetilde{v}_{u}^{\psi(t)} -\widehat{v}_{u}^{\psi(t)} + \widehat{v}_{u}^{\psi(t)} - {v}_t^{\psi(t)} \right \|^2
\nonumber \\
&\leq \sum_{t=0}^{S-1} \left( 2\mathbb{E} \| \widetilde{v}_{u}^{\psi(t)} -\widehat{v}_{u}^{\psi(t)}\|^2 + 2 \mathbb{E} \|\widehat{v}_{u}^{\psi(t)} - {v}_t^{\psi(t)} \|^2\right)
\nonumber \\
&\stackrel{(a)}{\leq} {18 L_*^2\gamma^2\tau_1^2 } \sum_{t=0}^{S-1} \mathbb{E} ||\widetilde{v}^{\psi(t)}_{t}||^2 + {72 L_*^2\gamma^2\tau_2^2 } \sum_{t=0}^{S-1} \mathbb{E} ||\widetilde{v}^{\psi(t)}_{t}||^2
\end{align}
then we have
\begin{align}\label{ncsaga-21}
\sum_{t=0}^{S-1} \mathbb{E} \left \| \widetilde{v}_{t}^{\psi(t)} \right \|^2 & = \sum_{t=0}^{S-1} \mathbb{E} \left \| \widetilde{v}_{t}^{\psi(t)} -{v}_{t}^{\psi(t)} + {v}_t^{\psi(t)} \right \|^2
\leq \sum_{t=0}^{S-1} \left( 2\mathbb{E} \| \widetilde{v}_{t}^{\psi(t)} -{v}_{t}^{\psi(t)} \|^2+ 2\mathbb{E} \|{v}_t^{\psi(t)} \|^2 \right)
\nonumber \\
& \leq { L_*^2\gamma^2(36\tau_1^2 + 144\tau_2^2) } \sum_{t=0}^{S-1} \mathbb{E} ||\widetilde{v}^{\psi(t')}_{t'}||^2 + 2\sum_{t=0}^{S-1}{v}_t^{\psi(t)}
\end{align}
which implies that
if $\lambda_\gamma = \frac{2}{1 - 180\eta_2L_{*}^2\gamma^2\tau }> 0$, we hae
\begin{equation}\label{ncsaga-22}
\sum_{t=1}^{S} \mathbb{E} ||\widetilde{v}_{t}^{{\psi(t)}}||^2 \leq \lambda_{\gamma}\sum_{t=1}^{S} \mathbb{E} || {v}_{t}^{{\psi(t)}}\|^2,
\end{equation}
This completes the proof.
\end{proof}
Similar to the proof of Theorem~\ref{thm-sgdnonconvex}, we first apply Lemma~\ref{lem-csgd-3} to all $S$ iterations and there is
\begin{align}\label{ncsaga-23}
\sum_{t \in \mathcal{A}(S)}\mathbb{E} \| \nabla f({w}_{t})\|^2
\stackrel{(a)}{\leq} 2 L_*^2 \gamma^2 \eta_2^2 \sum_{t=0}^{S-1} \mathbb{E} \|\widetilde{v}_{t}^{\psi(t)}\|^2 + 2 \sum_{t=0}^{S-1} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t)}} f({w}_{t})\|^2
\end{align}
Then, we give the upper bound to $\mathbb{E} \| {v}^{{\psi(t)}}_{t} \|^2$ as follows. We definite:
\begin{equation}\label{ncsaga-24}
\zeta_{t}^{{\psi(t)}}=\nabla_{\mathcal{G}_{{\psi(t)}}}f_{i_t}(w_{t})-{\alpha}_{i_t}^{t,\psi(t)}
\end{equation}
and use the definition of $v_t^{\psi(t)}$ to get
\begin{equation}\label{ncsaga-25}
\begin{array}{l}{\mathbb{E}\left\|v_{t}^{{\psi(t)}}\right\|^{2}=\mathbb{E}\left\|\zeta_{t}^{{\psi(t)}}+ \frac{1}{n}\sum_{i=1}^{n}\alpha_i^{t,\psi(t)}\right\|^{2}} \\ {=\mathbb{E}\left\|\zeta_{t}^{{\psi(t)}}+\frac{1}{n}\sum_{i=1}^{n}\alpha_i^{t,\psi(t)}-\nabla_{\mathcal{G}_{{\psi(t)}}}f({w_{t}})+ \nabla_{\mathcal{G}_{{\psi(t)}}}f({w_{t}})\right\|^{2} }
\\ { \stackrel{(a)}{\leq}2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{{\psi(t)}}}f({w_{t}})\right\|^{2}+2 \mathbb{E}\left\|\zeta_{t}^{{\psi(t)}}-\mathbb{E}\left[\zeta_{t}^{{\psi(t)}}\right]\right\|^{2}} \\ { \stackrel{(b)}{\leq} 2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{{\psi(t)}}}f({w_{t}})\right\|^{2}+2 \mathbb{E}\left\|\zeta_{t}^{{\psi(t)}}\right\|^{2}}\end{array}
\end{equation}
where (a) follows from $\|a+b\|^2 \leq 2\|a\|^2+ 2\|b\|^2$ and $\mathbb{E}\left[\zeta_{t}^{{\psi(t)}}\right]=\nabla_{\mathcal{G}_{{\psi(t)}}} f\left(w_{t}\right)- \frac{1}{n}\sum_{i=1}^{n}\alpha_i^{t,\psi(t)}$, (b) follows from $\mathbb{E}\left\|\zeta_{t}^{{\psi(t)}}-\mathbb{E}\left[\zeta_{t}^{{\psi(t)}}\right]\right\|^{2}\leq \mathbb{E} \|\zeta_t^{{\psi(t)}}\|^2$, we have
\begin{align}\label{ncsaga-26}
&{\mathbb{E}\left\|v_{t}^{{\psi(t)}}\right\|^{2}}
\nonumber \\
&\leq 2 \mathbb{E}\left\|\nabla_{\mathcal{G}_{{\psi(t)}}} f\left(w_{t}\right)\right\|^{2}+{2} \mathbb{E}\left\|{\alpha}_{i_t}^{t,\psi(t)}-\nabla_{\mathcal{G}_{{\psi(t)}}}f_{i_t}({w_t})\right\|^{2}
\nonumber \\
&\stackrel{(a)}{\leq} 2\mathbb{E}\left\|\nabla_{\mathcal{G}_{{\psi(t)}}} f\left(w_{t}\right)\right\|^{2}+ 2\left ( 1 -\frac{1}{n} \right )^{\phi(t)-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i({w}_{{0}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w_{t}) \right \|^2
\nonumber\\ &\quad + 2\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| \nabla_{\mathcal{G}_{\psi(t)}} f_i({w}_{{\xi(t',\psi(t))}}) - \nabla_{\mathcal{G}_{\psi(t)}} f_i(w_{t}) \right \|^2 \nonumber \\
&\stackrel{(b)}{\leq} 2\mathbb{E}\left\|\nabla_{\mathcal{G}_{{\psi(t)}}} f\left(w_{t}\right)\right\|^{2}
+ 2L^2\left ( 1 -\frac{1}{n} \right )^{\phi(t)-1} \mathbb{E} \left \|{w}_{{0}} - w_{t} \right \|^2
\nonumber\\
&\quad + 2L^2\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \left \| {w}_{{\xi(t',\psi(t))}} - w_{t} \right \|^2 \nonumber \\
&\stackrel{(c)}{\leq} 2\mathbb{E}\left\|\nabla_{\mathcal{G}_{{\psi(t)}}} f\left(w_{t}\right)\right\|^{2}
+ 2L^2\left ( 1 -\frac{1}{n} \right )^{\phi(t)-1} \mathbb{E} \left \|{w}_{{0}} - w_{t} \right \|^2
\nonumber\\
&\quad + 2L^2\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \left(2\mathbb{E} \| {w}_{{\xi(t',\psi(t))}} - w_0\|^2 + 2\mathbb{E}\| w_0- w_{t} \|^2\right)
\nonumber \\
& \stackrel{(d)}{\leq}2\mathbb{E}\left\|\nabla_{\mathcal{G}_{{\psi(t)}}} f\left(w_{t}\right)\right\|^{2}
+ 6L^2\mathbb{E}\| w_0- w_{t} \|^2
+ 4L^2\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \| {w}_{{\xi(t',\psi(t))}} - w_0\|^2
\end{align}
where (a) follows from Lemma \ref{lem-csaga-1}, (b) follows from Assumption 2, (c) follows from $\|a+b\|^2\leq 2\|a\|^2 + 2\|b\|^2$, (d) follows from $\sum_{t'=1}^{\phi(t)-1} \frac{1}{n}\left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1}<1$ and $\left ( 1 -\frac{1}{n} \right )^{\phi(t)-1}<1$.
Moreover, there is
\begin{align}\label{ncsaga-27}
\sum_{t=0}^{S-1}\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \mathbb{E} \| {w}_{{\xi(t',\psi(t))}} - w_0\|^2 & \leq \sum_{t=0}^{S-1} \mathbb{E} \| {w}_{t} - w_0\|^2
\end{align}
which follows from that $\frac{1}{n} \sum_{t'=1}^{\phi(t)-1} \left ( 1 -\frac{1}{n} \right )^{\phi(t)-t'-1} \leq 1$. As for $\mathbb{E}\| w_0- w_{t} \|^2 $ there is
\begin{align}\label{ncsaga-28}
\mathbb{E}\| w_0- w_{t+1} \|^2 & = \mathbb{E}\| w_0- w_t + w_t -w_{t+1} \|^2
\nonumber \\
& = \mathbb{E}\| w_0- w_t\|^2 + \mathbb{E}\|w_t -w_{t+1} \|^2 -2\mathbb{E}\left<w_0- w_t ,w_t -w_{t+1}\right>
\nonumber \\
& = \mathbb{E}\| w_0- w_t\|^2 + \mathbb{E}\|w_t -w_{t+1} \|^2 -2\gamma\mathbb{E}\left<w_0- w_t ,\widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}}^s\right>
\nonumber \\
& \leq \mathbb{E}\| w_0- w_t\|^2 + \mathbb{E}\|w_t -w_{t+1} \|^2 +2\gamma(\frac{1}{2\beta_t}\mathbb{E}\|\widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}}^s\|^2 + \frac{\beta_t}{2}\mathbb{E}\| w_0- w_t\|^2)
\nonumber \\
& =(1 + \gamma \beta_t) \mathbb{E}\| w_0- w_t\|^2 + \gamma^2\mathbb{E}\|w_t -w_{t+1} \|^2 + \frac{\gamma}{\beta_t}\mathbb{E}\|\widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}}^s\|^2
\end{align}
\begin{proof}[\bf{Proof of Theorem \ref{thm-saganonconvex}}]
First, we upper bound $\mathbb{E}f(w_{t+1}) $ for $t =0,\cdots,S-1$:
\begin{align}\label{ncsaga-29}
\mathbb{E} f(w_{t+1})
& \stackrel{(a)}{\leq}\mathbb{E}\left[ f(w_t) + \left< \nabla f(w_t) , w_{t+1}-w_t \right> + \frac{L}{2} || w_{t+1}- w_t||^2 \right]
\nonumber \\
&= \mathbb{E} f(w_t) - \gamma\mathbb{E}\left< \nabla f(w_t), \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}},i_t} \right> + \frac{\gamma^2 L}{2} \mathbb{E} ||\widetilde{v}^{{{\psi(t)}}}_t||^2
\nonumber \\
&\stackrel{(b)}{=} \mathbb{E} f(w_t) - \frac{\gamma}{2} \mathbb{E}\biggl[ ||\nabla_{\mathcal{G}_{{\psi(t)}}} f(w_t)||^2 + || \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}} ||^2
\nonumber \\
&- ||\nabla_{\mathcal{G}_{{\psi(t)}}} f(w_t) - \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}} ||^2 \biggr] + \frac{\gamma^2 L_{*}}{2} \mathbb{E} ||\widetilde{v}^{{{\psi(t)}}}_t||^2
\end{align}
where the (a) follows from Assumption~2, (b) follows form $ \left<a,b\right>=\|a\|^2+\|b\|^2-\|a-b\|^2$.
Next, we give the upper bound of the term $\mathbb{E}||\nabla_{\mathcal{G}_{{\psi(t)}}} f(w_t) - \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}} ||^2$ :
\begin{eqnarray}\label{ncsaga-30}
\mathbb{E}||\nabla_{\mathcal{G}_{\psi(t)}} f(w_t) - \widetilde{\nabla}_{\mathcal{G}_{\psi(t)}}^s ||^2 \leq 2{L_{{*}}^2 \gamma^2 \tau_1} \sum_{u' \in D(t)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2
+ 8{ L_{{*}}^2 \gamma^2 \tau_2} \sum_{u' \in D'(t)} \mathbb{E} \| \widetilde{v}^{\psi(u')}_{u'} \|^2.
\end{eqnarray}
Above result can be obtained by following the analyses of Lemma~\ref{lem-csgd-2}. From Eqs.~(\ref{ncsaga-29}) and (\ref{ncsaga-30}), it is easy to derive the following inequality:
\begin{eqnarray}\label{ncsaga-31}
\mathbb{E} f(w_{t+1}^{s}) &\leq& \mathbb{E}f(w_t) - \frac{\gamma}{2} \mathbb{E}
||\nabla_{\mathcal{G}_{{\psi(t)}}} f(w_t)||^2 - \frac{\gamma}{2} \mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}},i_t}^s ||^2 + \frac{\gamma^2L}{2} \mathbb{E} || \widetilde{v}^{{{\psi(t)}}}_t||^2 \nonumber \\
&+& {{L_{*}^2\gamma^3 }} \left(\tau_1 \sum_{t' \in D(u)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 + 4\tau_2 \sum_{t' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 \right)
\end{eqnarray}
Here, we define a Lyapunov function:
\begin{eqnarray}\label{ncsaga-32}
R_t = \mathbb{E} f(w_{t}) + c_t \mathbb{E} \|w_0-w_t\|^2 \,.
\end{eqnarray}
From the definition of Lyapunov function, and (\ref{ncsaga-32}):
\begin{align}\label{ncsaga-33}
R_{t+1}
=& \mathbb{E}\left[ f(w_{t+1}) + c_{t+1}\mathbb{E} \|w_0-w_{t+1}\|^2 \right]
\nonumber \\
\leq& \mathbb{E}f(w_t) - \frac{\gamma}{2} \mathbb{E}
||\nabla_{\mathcal{G}_{{\psi(t)}}} f(w_t)||^2 - \frac{\gamma}{2} \mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}},i_t}^s ||^2 + \frac{\gamma^2L}{2} \mathbb{E} || \widetilde{v}^{{{\psi(t)}}}_t||^2
\nonumber \\
&+ {{L_{*}^2\gamma^3 }} \left(\tau_1 \sum_{t' \in D(u)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 + 4 \tau_2 \sum_{t' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 \right)+ c_{t+1} \mathbb{E} \|w_0-w_{t+1}\|^2
\nonumber \\
\stackrel{(a)}{\leq}& \mathbb{E}f(w_t) - \frac{\gamma}{2} \mathbb{E}
||\nabla_{\mathcal{G}_{{\psi(t)}}} f(w_t)||^2 - \frac{\gamma}{2} \mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}} ||^2 + \frac{\gamma^2L}{2} \mathbb{E} || \widetilde{v}^{{{\psi(t)}}}_t||^2
\nonumber \\
&+ {{L_{*}^2\gamma^3 }} \left(\tau_1 \sum_{t' \in D(u)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 + 4\tau_2 \sum_{t' \in D^\prime(u)} \mathbb{E} \| \widetilde{v}^{\psi(t')}_{t'} \|^2 \right)
\nonumber \\
&+ c_{t+1}\left((1+{\gamma\beta_t})\mathbb{E} \|w_0-w_{t}\|^2
+\frac{\gamma }{\beta_t}\mathbb{E} \|\widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}}\|^2 + \gamma^2\mathbb{E} \|\widetilde{v}^{{\psi(t)}}_t \|^2\right)
\end{align}
where (a) follows from Eq.~\ref{ncsaga-28}.
Summing above inequality for all iterations then we have that:
\begin{align}\label{ncsaga-34}
\sum\limits_{t=0}^{S-1}R_{{{t}}+1}
&= \sum\limits_{t=0}^{S-1}\left( \mathbb{E} f(w_{{t}}) - \frac{\gamma}{2 } \mathbb{E} ||\nabla_{\mathcal{G}_{\psi({t})}} f(w_{{t}})||^2
- (\frac{\gamma}{2 }-\frac{\gamma c_{{t}+1} }{\beta_{t}})\mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}}||^2 \right)
\nonumber \\
& \quad + \sum\limits_{t=0}^{S-1}\biggl( {{L_{*}^2\gamma^3 (\tau_1^2 +4 \tau_2^2)}} \mathbb{E} \| \widetilde{v}^{\psi(t)}_{t} \|^2
+ \frac{\gamma^2L}{2} \mathbb{E} || \widetilde{v}^{{{\psi(t)}}}_t||^2
\nonumber\\
&\quad + c_{{t}+1}\gamma^2\mathbb{E}\|\widetilde{v}^{{\psi(t)}}_{t} \|^2
+ c_{{t}+1}(1+{\gamma\beta_{t}})\mathbb{E} \|w_0-w_{t}\|^2\biggr)
\nonumber\\
&\stackrel{(a)}{\leq} \sum\limits_{t=0}^{S-1}\left( \mathbb{E} f(w_{{t}}) - \frac{\gamma}{2 } \mathbb{E} ||\nabla_{\mathcal{G}_{\psi({t})}} f(w_{{t}})||^2
- (\frac{\gamma}{2 }-\frac{\gamma c_{{t}+1} }{\beta_{t}})\mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}}||^2 \right)
\nonumber \\
& \quad + \sum\limits_{t=0}^{S-1}\biggl( \left( {5{L_{*}^2\gamma^3 \tau + \frac{\gamma^2L}{2} + c_{{t}+1}\gamma^2 }}\right) \mathbb{E} \| \widetilde{v}^{\psi(t)}_{t} \|^2
+ c_{{t}+1}(1+{\gamma\beta_{t}})\mathbb{E} \|w_0-w_{t}\|^2\biggr)
\nonumber\\
&\stackrel{(b)}{\leq} \sum\limits_{t=0}^{S-1}\left( \mathbb{E} f(w_{{t}})
- \frac{\gamma}{2 } \mathbb{E} ||\nabla_{\mathcal{G}_{\psi({t})}} f(w_{{t}})||^2
- (\frac{\gamma}{2 }-\frac{\gamma c_{{t}+1} }{\beta_{t}})\mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}}||^2 \right)
\nonumber \\
&\quad + \sum\limits_{t=1}^{S}\lambda_{\gamma}\lambda_{{t}} \left( \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi(u_t)}} f\left(w_{{t}}\right)\right\|^{2}
+ 5L_{*}^2 \|w_0-w_{t}\|^2\right)+ \sum\limits_{t=0}^{S-1}c_{{t}+1}(1+{\gamma\beta_{t}})\mathbb{E} \|w_0-w_{t}\|^2
\nonumber\\
&= \sum\limits_{t=0}^{S-1} \left( \mathbb{E} f(w_{{t}}) +\left(c_{{t}+1} (1+{\gamma\beta})
+ 5L_{*}^2\lambda_{\gamma}\lambda_{{t}}\right) \|w_0-w_{t}\|^2 \right)
\nonumber \\
& \quad - \sum\limits_{t=0}^{S-1} (\frac{\gamma}{2}-\lambda_{\gamma}\lambda_{{t}}) \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi({t})}} f\left(w_{{t}}\right)\right\|^{2}
- \sum\limits_{t=0}^{S-1} (\frac{\gamma}{2 }-\frac{\gamma c_{{t}+1} }{\beta_{t}})\mathbb{E} || \widetilde{\nabla}_{\mathcal{G}_{{\psi(t)}}}||^2
\nonumber\\
&\stackrel{(c)}{\leq} \sum\limits_{t=0}^{S-1} \left( \mathbb{E} f(w_{{t}}) +\left(c_{{t}+1} (1+{\gamma\beta})+ 5L_{*}^2\lambda_{\gamma}\lambda_{{t}}\right)\|w_0-w_{t}\|^2 \right)
- \sum\limits_{t=0}^{S-1} (\frac{\gamma}{2}-\lambda_{\gamma}\lambda_{{t}}) \mathbb{E}\left\|\nabla_{\mathcal{G}_{\psi({t})}} f\left(w_{{t}}\right)\right\|^{2}
\end{align}
where (a) follow from the definition of $\tau$, (b) uses Eqs.~\ref{ncsaga-26} and \ref{ncsaga-27}, $\lambda_{t} ={ 10 L_{*}^2 \gamma^3} \tau + {\gamma^2L} + 2c_{{t}+1}\gamma^2 $, and the definition of $L_{*}$, (b) follows from assuming $\frac{\gamma}{2 }-\frac{\gamma c_{t+1} }{\beta_{t}}>0$.
Similar to the proof of Theorem~\ref{thm-svrgnonconvex}, we have
\begin{align}\label{ncsaga-35}
&\sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f({w}_{{u_0}})\|^2
\\
&\stackrel{(a)}{\leq} 2 \lambda_{\gamma}L_*^2 \gamma^2 \tau_1^2 \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E} \|{v}_{t}^{\psi(t)}\|^2
+ 2 \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t)}} f({w}_{t})\|^2
\nonumber \\
& \stackrel{(b)}{\leq} 20 \lambda_{\gamma}L_*^4 \gamma^2 \tau \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E}\|w_0-w_{t}\|^2
+ (2 + 4 \lambda_{\gamma}L_*^2 \gamma^2 \tau) \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t)}} f({w}_{t})\|^2 \nonumber
\end{align}
where ${u_0}$ denotes the start global iteration of epoch $u$, (a) follows from Eq. \ref{ncsaga-23}, (b) follows from Eqs.~\ref{ncsaga-26} and \ref{ncsaga-27}. This implies that
\begin{align}\label{ncsaga-36}
&\frac{\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{t}}{2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau}\sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f({w}_{{u_0}})\|^2
\\
& \stackrel{(a)}{\leq} 5L_*^2 (\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{t} ) \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E}\|w_0-w_{t}\|^2
+ (\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{t}) \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E} \|\nabla_{\mathcal{G}_{\psi(t)}} f({w}_{t})\|^2 \nonumber
\end{align}
where (a) follows from the definition of $L_*$. Combining Eq.~\ref{ncsaga-36} with \ref{ncsaga-34} we have
\begin{align}\label{saga-30}
& \frac{\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{t}}{2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau} \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f({w}_{{u_0}})\|^2
\nonumber \\
& \stackrel{(a)}{\leq} 5L_*^2 (\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{t} ) \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E} \|w_0-w_{t}\|^2
+ \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} R_{{t}+1}
\nonumber \\
& + \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \left( \mathbb{E} f(w_{{t}}) +\left(c_{{t}+1} (1+{\gamma\beta})
+ 5L_{*}^2\lambda_{\gamma}\lambda_{{t}}\right)\mathbb{E} \|w_0-w_{t}\|^2 \right)
\nonumber \\
& = \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} R_{{t}+1}
+ \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \left( \mathbb{E} f(w_{{t}}) +\left(c_{{t}+1}(1+{\gamma\beta})
+ \frac{5}{2}{\gamma L_*^2}\right)\mathbb{E} \|w_0-w_{t}\|^2 \right)
\end{align}
Rearrange Eq.~\ref{saga-30} we have
\begin{align}\label{saga-31}
\sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} R_{{t}+1}
&\leq
\sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \left( \mathbb{E} f(w_{{t}}) +\left(c_{{t}+1} (1+{\gamma\beta})
+ \frac{5}{2}{\gamma L_*^2}\right)\mathbb{E} \|w_0-w_{t}\|^2 \right)
\nonumber \\
& - \frac{\frac{\gamma}{2 } -\lambda_{\gamma}\lambda_{t}}{2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau}\sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(u')}} f({w}_{{u_0}})\|^2
\nonumber \\
&=\sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} R_{{t}}
- \sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \Gamma_t \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f({w}_{{u_0}})\|^2
\end{align}
where
\begin{align}\label{saga-32}
c_{t} & = c_{{t}+1}\left(1 + \gamma \beta_{t}\right)
+ \frac{5}{2}{\gamma L_*^2}
\end{align}
and
\begin{eqnarray}\label{saga-33}
\Gamma_{t} = \frac{ \frac{\gamma}{2} - \frac{2}{1 - 180L_{*}^2\gamma^2\tau}( 10L_{*}^2 \gamma^3 \tau + {\gamma^2L_{*}} + 2c_{t+1}\gamma^2)} {2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau}
\end{eqnarray}
Let $\bar{S}$ be the subscript of the final global iteration, and one can set $\{ c_{t}\}_{t=\bar{S}} = 0$, define $w_0$ as initial point and $w^*$ as optimal solution, we have
\begin{eqnarray}\label{SAGA-34}
\frac{1}{S}\sum_{u \in \mathcal{A}(S)}\sum\limits_{t\in K'(u)} \Gamma_t \mathbb{E} \| \nabla_{\mathcal{G}_{\psi(t)}} f({w}_{{u_0}})\|^2
\leq \frac{\mathbb{E}\left[ f( w_0) - f(w^{*}) \right] }{S \Gamma_*}
\end{eqnarray}
where $\bar{t}$ denotes the start global iteration of epoch $t$ and use $\Gamma_* = min \{\Gamma_t\}$.
To prove Theorem~\ref{thm-saganonconvex}, set $\{c_{t}\}_{t=S-1} = 0$,
$ \gamma = \frac{m_0}{L_{*}n^\alpha}$, $\beta_t = \beta = {4L_{*}}$, where $0<m_0<1$, and $0<\alpha<1$. And there is
\begin{align}\label{saga-35}
\theta = \gamma \beta_{t}= \frac{4m_0}{n^{{\alpha}}}
\end{align}
Then following the analysis of Eq. \ref{ncsvrg-30}, we have that the total epoch number $T$ should satisfy $T\leq \lfloor \frac{n^{{\alpha}}}{4m_0} \rfloor$.
\begin{eqnarray}
\Gamma_* &=& \min_t \Gamma_t \nonumber \\
&\stackrel{(a)}{\geq} & \frac{ \frac{\gamma}{2} - \frac{2}{1 - 180L_{*}^2\gamma^2\tau}( 10L_{*}^2 \gamma^3 \tau + {\gamma^2L_{*}} + 2c_{0}\gamma^2)} {2 + 4 \lambda_{\gamma}L_*^4 \gamma^2 \tau}
\nonumber \\
& =&\frac{ \frac{\gamma}{2} - \frac{2n^{2\alpha}}{n^{2\alpha} - 180m_0^2\tau}
( \frac{10m_0^2\tau}{n^{2\alpha}} + \frac{5m_0}{n^\alpha})\gamma}
{2 + \frac{8L_*^2m_0^2\tau}{n^{2\alpha}-180m_0^2\tau}}
\nonumber \\
&\stackrel{(b)}{\geq}& \frac{\left( \frac{1}{2}-(20m_0^2\tau + 10m_0)\right)\gamma}{2 + {8L_*^2m_0^2\tau}}
\nonumber \\
&\stackrel{(c)}{\geq}& \frac{\sigma }{L_{*}n^{\alpha}}
\end{eqnarray}
where (a) follows from $c_0=max\{c_t\}$, (b) follow form $n^{{\alpha}} \leq n^{2\alpha} - 180m_0^2 \tau$ (which is satisfied when $n \geq \frac{1 + \sqrt{1+720m_0^2\tau}}{2}$, this is easy to satisfy when $n$ is large) and $n^\alpha > 1$, (c) follow from that if $\frac{1}{2} > 20m_0^2\tau + 10m_0$ and $\sigma$ is a small value which is independent of $n$.
Based on above analyses, we have the conclusion:
\begin{eqnarray} \label{ncsaga-final}
\frac{1}{T}\sum\limits_{t=0}^{T-1}\mathbb{E} ||\nabla f(w_{t_0})||^2 \leq \frac{L_{*}n^{\alpha}\mathbb{E}\left[ f( w_{0}) - f( w^{*}) \right] }{T \sigma }
\end{eqnarray}
where, $T$ denotes the number of total epoches, $t_0$ is the start iteration of epoch $t$.
This completes the proof.
\end{proof}
\end{document}
|
\begin{document}
\title[Braid representations from quantum groups]
{Braid representations from quantum groups of exceptional Lie type}
\author{Eric C. Rowell}
\address{Department of Mathematics,
Texas A\&M University, College Station, TX 77843, USA}
\mathfrak{e}mail{[email protected]}
\thanks{The author is partially supported by NSA grant H98230-08-1-0020. Thanks go to Hans Wenzl and Matt Papanikolas for useful discussions.
}
\begin{abstract}
We study the problem of determining if the braid group representations obtained from quantum groups of types $E, F$ and $G$ at roots of unity have infinite image or not. In particular we show that when the fusion categories associated with these quantum groups are not weakly integral, the braid group images are infinite. This provides further evidence for a recent conjecture that weak integrality is necessary and sufficient for the braid group representations associated with any braided fusion category to have finite image.
\mathfrak{e}nd{abstract}
\maketitle
\section{Introduction}
The $n$-strand braid group $\mathcal{B}_n$ is defined abstractly by generators $\sigma_1,\sigma_2,\ldots,\sigma_{n-1}$
satisfying relations:
\begin{enumerate}
\item[(R1)] $\sigma_i\sigma_{i+1}\sigma_i=\sigma_{i+1}\sigma_i\sigma_{i+1}$, $1\leq i\leq n-2$
\item[(R2)] $\sigma_{j}\sigma_i=\sigma_{i}\sigma_j$, $|i-j|>1$.
\mathfrak{e}nd{enumerate}
Given an object $X$ in a braided fusion category $\mathbb CC$ the braiding $c_{X,X}\in\mathcal{E}nd(X^{\otimes 2})$ allows one to construct a family of braid group representations via the homomorphism $\mathbb C\mathcal{B}_n\rightarrow \mathcal{E}nd(X^{\otimes n})$ defined on the braid group generators $\sigma_i$ by
$$\sigma_i\rightarrow Id_X^{\otimes i-1}\otimes c_{X,X}\otimes Id_X^{\otimes n-i-1}.$$ For any simple object $Y\subset X^{\otimes n}$ the simple $\mathcal{E}nd(X^{\otimes n})$-modules $\Hom(Y,X^{\otimes n})$ become $\mathcal{B}_n$ representations (although not necessarily irreducible).
In this paper we consider the problem of determining when the images of these representations are finite groups.
We will say a category $\mathbb CC$ has \textit{property $\mathbf{F}$} if, for all objects $X$ and all $n$, the corresponding braid group representations factor over finite groups. For example, the representation category $\mathbb Rep(D^\omega G)$ of the twisted double of a finite group $G$ always has property $\mathbf{F}$ \cite{ERW}. Various cases related to Hecke- and BMW-algebras have been studied in the literature, see \cite{FRW,FLW,jones86,jonescmp,LRW}. Recent interest in this question has come from the study of topological quantum computation, see \cite{FLW} for particulars.
For any object $X$ in a fusion category one defines the \mathfrak{e}mph{FP-dimension} ${\rm FPdim}(X)$ (see \cite{ENO}) to be the largest (necessarily real) eigenvalue of the matrix representing $X$ in the left-regular representation of the Grothendieck semiring. That is, ${\rm FPdim}(X)$ is the largest eigenvalue of the matrix whose $(i,j)$th entry is the multiplicity of the simple object $X_j$ in $X\otimes X_i$. A fusion category is said to be \mathfrak{e}mph{integral} if ${\rm FPdim}(X)\in\mathbb N$ for every object, and \mathfrak{e}mph{weakly integral} if ${\rm FPdim}(X_i)^2\in\mathbb N$ for any simple object $X_i$, or, equivalently, if $\sum_i ({\rm FPdim}(X_i))^2\in\mathbb N$ where the sum is over (representatives of isomorphism classes of) simple objects.
Empirical evidence (from the papers mentioned above, for example) partially motivates (see also \cite[Section 6]{RSW} and \cite{NR}):
\begin{conj}{\lambda}bel{propfconj}
A braided fusion category $\mathbb CC$ has property $\mathbf{F}$ if, and only if, the Frobenius-Perron dimension ${\rm FPdim}(\mathbb CC)$ of $\mathbb CC$ is an integer, (\textit{i.e.} $\mathbb CC$ is \mathfrak{e}mph{weakly integral}).
\mathfrak{e}nd{conj}
For example this conjecture predicts that the braid group images arising from any finite dimensional semisimple quasi-triangular quasi-Hopf algebra must be finite groups.
The goal of this paper is to verify one direction of this conjecture for a large class of braided fusion categories--namely, we will show that if $\mathbb CC$ is a non-weakly-integral braided fusion category associated to a quantum group of exceptional Lie type at a root of unity then the braid group images are infinite. For classical Lie types $A,B,C$ and $D$ results of this type have already appeared in \cite{FLW} and \cite{LRW}.
The body of the paper is organized into four sections. In Section \ref{2:qgcats} we outline the necessary background for the problem. Section \ref{3:wiclass} classifies the categories $\mathbb CC$ associated with exceptional-type quantum groups which are weakly integral and in Section \ref{4:noF} we show that property $\mathbf{F}$ fails in the non-weakly-integral cases. We make some final remarks about the problem in Section \ref{5:conclusions}.
\section{Preliminaries}{\lambda}bel{2:qgcats}
Associated to any semisimple finite dimensional Lie algebra $\mathfrak{g}$ and a complex number $q$ such that $q^2$ is a primitive $\mathfrak{e}ll$th root
of unity is a ribbon fusion category $\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)$. The construction is essentially due to Andersen \cite{andersen} and his collaborators. We refer the
reader to the survey paper \cite{Survey} and the texts \cite{BK} and \cite{Tur} for a more complete treatment.
We shall denote $q$-numbers in the standard way, i.e. $[n]:=\mathfrak{f}rac{q^n-q^{-n}}{q-q^{-1}}$. Let $\Phi_+$ denote the
positive roots for the system of type $\mathfrak{g}$ equipped with the form ${\lambda}ngle-,-\rangle$ normalized so that ${\lambda}ngle \alpha,\alpha\rangle=2$ for short roots, and $\check{\Phi}_+$ denote the positive coroots. The dominant Weyl alcove will be denoted $P_+$ and we will denote the fundamental weights by ${\lambda}_i$. Define $m$ to be the ratio of the squared-lengths of a long root and a short root so that $m=1$ for Lie type $E$, $m=2$ for Lie type $F_4$ and $m=3$ for Lie type $G_2$. Let $\vartheta_0$ be the highest root and $\vartheta_1$ be the highest short root. The simple objects in $\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)$ are labeled by:
$$C_\mathfrak{e}ll(\mathfrak{g}):=\begin{cases}\{{\lambda}\in P_+: {\lambda}ngle{\lambda}+\rho,\vartheta_0\rangle<\mathfrak{e}ll\} &
\text{if $m\mid \mathfrak{e}ll$}\\
\{{\lambda}\in P_+: {\lambda}ngle{\lambda}+\rho,\vartheta_1\rangle<\mathfrak{e}ll\} & \text{if $m\nmid
\mathfrak{e}ll$}\mathfrak{e}nd{cases}$$
In this notation the $FP$-dimension for the simple object in $\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)$ labelled by highest weight ${\lambda}mbda$ is obtained by setting
$q=e^{\pi i/\mathfrak{e}ll}$ in:
$$
{\rm FPdim}(V_{{\lambda}}):=\begin{cases} \prod_{\alpha\in\Phi_+}\mathfrak{f}rac{[{\lambda}ngle {\lambda}+\rho,\alpha\rangle]}{[{\lambda}ngle \rho,\alpha\rangle]}, & m\mid\mathfrak{e}ll\\
\prod_{\check{\alpha}\in\check{\Phi}_+}\mathfrak{f}rac{[{\lambda}ngle {\lambda}+\rho,\check{\alpha}\rangle]}{[{\lambda}ngle \rho,\check{\alpha}\rangle]}, & m\nmid\mathfrak{e}ll\mathfrak{e}nd{cases}
$$
The first case is well-known, see \cite{wenzlcstar} for a proof of positivity. The latter case is more recent, a proof is found in \cite{RJPAA}.
To deduce the infinitude of the braid group images we will employ the main result in \cite{RT} reproduced here for the reader's convenience. Let $\varphi:\mathcal{B}_3\rightarrow\GL(V)$ be a $d$-dimensional irreducible representation with $2\leq d\leq 5$ and set $A=\varphi(\sigma_1)$ and $B=\varphi(\sigma_2)$. Let $G$ denote the image $\varphi(\mathcal{B}_3)={\lambda}ngle A,B\rangle$ i.e. the group generated by $A$ and $B$ and
define $S:=\mathrm{Spec}(A)=\mathrm{Spec}(B)$.
\begin{defn}
A linear group $\Gammamma\subset\GL(V)$ is \mathfrak{e}mph{imprimitive} if $V$ is irreducible
and can be expressed as a direct sum of
subspaces $V_i$ which $\Gammamma$ permutes nontrivially. Otherwise, we say that $\Gammamma$ is \mathfrak{e}mph{primitive}.
\mathfrak{e}nd{defn}
\begin{thm}[\cite{RT}]{\lambda}bel{rttheorem} Let $\varphi$, $G$, $A$ and $S$ be defined as
in the previous paragraph.
Let $S=\{{\lambda}_1,\ldots,{\lambda}_d\}$, and define the projective order of $A$ by
$$po(A):=\min\{t: ({\lambda}_1)^t=({\lambda}_2)^t=\cdots=({\lambda}_d)^t\}.$$ We use the convention
that each successive statement excludes the hypotheses of all
of the preceding cases.
\begin{enumerate}
\item[(a)] Suppose some ${\lambda}_i$ is not a root of unity, or ${\lambda}_i={\lambda}_j$ for some $i\neq j$. Then $G$ is \textbf{infinite}.
\item[(b)] Suppose $po(A)\leq 5$. Then $G$ is \textbf{finite}.
\item[(c)] Suppose $G$ is imprimitive. Then $S$ is of the form:
\begin{enumerate}
\item[(i)] $\{\pm\chi,\alphapha\}$ or $\chi\{1,\omegaega,\omegaega^2\}\cup\{\alphapha\}$
with $\omegaega$ a primitive 3rd root of unity and $G$ is \textbf{finite} or
\item[(ii)] $\{\pm r,\pm s\}$. In this case if $u=r/s$
is a root of unity of order $o(u)\in \{7,8,9\}\cup[11,\infty)$ then $G$ is \textbf{infinite}, if $o(u)=6$, $G$ is \textbf{finite} and if $o(u)=5$ or $10$
one cannot decide $|G|$ without further information.
\mathfrak{e}nd{enumerate}
\item[(d)] Suppose $G$ is primitive. Then:
\begin{enumerate}
\item[(i)] If $d=2$ then $G$ is \textbf{infinite}.
\item[(ii)] If $d=3$ and $po(A)\mathfrak{g}eq 8$ then $G$ is \textbf{infinite}. If $po(A)=7$, and $\mathfrak{f}rac{1}{{\lambda}_1}S$ is Galois conjugate to $\{1,e^{2\pi\im/7},e^{2k\pi\im/7}\}$ with $k$ even, $G$ is \textbf{infinite}, whereas if
$k$ is odd, $G$ is \textbf{finite}.
\item[(iii)] If $d=4$ and $po(A)\not\in\{6,\ldots,10,12,15,20,24\}$ then $G$ is \textbf{infinite}.
\item[(iv)] If $d=5$ and $po(A)\in\{7,8\}\cup[13,\infty)$ then $G$ is \textbf{infinite}.
\mathfrak{e}nd{enumerate}
\mathfrak{e}nd{enumerate}
\mathfrak{e}nd{thm}
Notice that by (a) if $A$ is not diagonalizable then it has infinite order, so that in this case ``projective order" is a misnomer. Since we will be using this theorem to show the image of $\mathcal{B}_3$ is infinite, this should not cause any confusion.
In many cases irreducibility may be deduced from:
\begin{lem}\cite[Lemma 3.2]{TW}{\lambda}bel{twlemma} Suppose $Z$ is a simple self-dual object such that $Z^{\otimes 2}$ is isomorphic to $\bigoplus_{i=1}^d Y_i$ where $Y_i$ are distinct simple objects and such that $\sigma_1$ acts on each $Y_i$ by distinct scalars. Then $\mathcal{B}_3$ acts irreducibly on the $d$-dimensional vector space $\Hom(Z,Z^{\otimes 3})$.
\mathfrak{e}nd{lem}
In order to use these results we must have the eigenvalues of the image of $\sigma_1$ at our disposal in order to compute the projective order of the image of $\sigma_1$. These can be deduced from the quantum group via Reshetikhin's formula (see \cite{LeducRam}[Corollary 2.22(3)]):
Suppose that $V$ is an irreducible highest weight representation of $U_q\mathfrak{g}$ labeled by ${\lambda}$ and $W$ is a subrepresentation of $V\otimes V$ labeled by $\mu$. Assume further that $V^{\otimes 2}$ is multiplicity free.
\begin{equation}{\lambda}bel{eigs}
(c_{V,V})_{|_W}=\pm f({\lambda}) q^{{\lambda}ngle\mu,\mu+2\rho\rangle/2}\mathbf{1}_W
\mathfrak{e}nd{equation}
where $f({\lambda})$ is an overall scale factor that depends only on ${\lambda}$ and the sign is $+1$ if $W$ appears in the symmetrization of $V\otimes V$ and $-1$ if $W$ appears in the antisymmetrization of $V\otimes V$. Since the ribbon structure of $\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)$ is inherited from that of $Rep(U_q\mathfrak{g})$ we may use eqn. (\ref{eigs}) to compute the eigenvalues of the action of the generators of $\mathcal{B}_3$ on $\mathcal{E}nd(V^{\otimes 3})$. The quantity $\theta_\mu:=q^{{\lambda}ngle\mu+2\rho,\mu\rangle}$ is the \mathfrak{e}mph{twist} corresponding to the object $W$. It follows from the axioms of a ribbon category that the scalar by which $(c_{V,V})^2$ acts on $\Hom(W,V^{\otimes 2})$ is $\theta_W/(\theta_V)^2$ for $W$ a simple subobject of $V\otimes V$ (with $V$ simple and $V\otimes V$ multiplicity free). For a general ribbon category the objects have no structure so the determining the signs of the eigenvalues of $c_{V,V}$ is a bit more delicate.
\section{Weakly integrality}{\lambda}bel{3:wiclass}
In this section we classify the pairs $(\mathfrak{g},\mathfrak{e}ll)$ with $\mathfrak{g}$ of exceptional Lie type such that $\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)$ is weakly integral. The classification is summarized in Table \ref{weak}.
One important fact is the following:
\begin{prop}(see \cite{ENO})
If $\mathbb CC$ is a weakly integral fusion category then $\mathbb CC_{ad}$ is an integral fusion category.
\mathfrak{e}nd{prop}
Here $\mathbb CC_{ad}$ is the \mathfrak{e}mph{adjoint subcategory} generated by simple subobjects of $X\otimes X^*$ for each $X$. Thus to show that $\mathbb CC$ is not weakly integral it is enough to find an object in $\mathbb CC_{ad}$ that has non-integer $FP$-dimension.
\begin{prop} The ribbon category $\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)$ is weakly integral with rank at least $2$ if and only if $(\mathfrak{g},\mathfrak{e}ll)$ is in Table \ref{weak}.
\mathfrak{e}nd{prop}
\begin{table}\caption{Non-trivial Weakly Integral $\mathcal{C}(\mathfrak{g},q,\mathfrak{e}ll)$}{\lambda}bel{weak}
\begin{tabular}{*{3}{|c}|}
\hline
$\mathfrak{g}$ & $\mathfrak{e}ll$ & Notes\\
\hline\hline
$\mathfrak{e}_6$& $13$ & pointed, rank $3$
\\ \hline $\mathfrak{e}_7$& $19$ & pointed, rank $2$
\\ \hline $\mathfrak{e}_8$& $32$ & rank $3$
\\ \hline $\mathfrak{f}_4$& none &
\\ \hline $\mathfrak{g}_2$& $8$ & pointed, rank $2$
\\
\hline
\mathfrak{e}nd{tabular}
\mathfrak{e}nd{table}
\begin{proof} Direct computation show that the categories listed are weakly integral.
To verify that no other weakly integral cases exist is tedious but straightforward. We will first illustrate how this is done by giving complete details for type $\mathfrak{g}_2$, and then describe how to carry out the computation for other types.
First suppose that $3\mid\mathfrak{e}ll$. $\mathbb CC:=\mathbb CC(\mathfrak{g}_2,q,\mathfrak{e}ll)$ is trivial (rank $1$) for $\mathfrak{e}ll=12$ so we assume $\mathfrak{e}ll\mathfrak{g}eq 15$. Let ${\lambda}_1$ be the highest weight corresponding to the $7$-dimensional representation of $\mathfrak{g}_2$, and denote by $X$ the corresponding simple object in $\mathbb CC$. Note that $X$ is self-dual and $X\in\mathbb CC_{ad}$ since $X$ is a subobject of $X\otimes X$. Now ${\rm FPdim}(X)=\mathfrak{f}rac{[2][7][12]}{[4][6]}$, so if ${\rm FPdim}(X)=k\in\mathbb Z$ then the primitive $2\mathfrak{e}ll$th root of unity $q$ satisfies the polynomial in $\mathbb Z[q]$:
$$q^{20}+q^{18}+q^{12}+(1-k)q^{10}+q^8+q^2+1.$$
Thus the minimal polynomial of $q$ must divide the above polynomial so in particular, $\phi(2\mathfrak{e}ll)\leq 20$ where $\phi$ is Euler's totient function. This implies that $\mathfrak{e}ll\in\{15,18,\ldots,33\}$. Computing ${\rm FPdim}(X)$ for these 7 values of $\mathfrak{e}ll$ we find no integers, hence $\mathbb CC$ is not integral.
Now suppose that $3\nmid\mathfrak{e}ll$. Here $\mathbb CC$ is trivial for $\mathfrak{e}ll=7$ and pointed (integral) for $\mathfrak{e}ll=8$ so we assume $\mathfrak{e}ll\mathfrak{g}eq 10$. From ${\rm FPdim}(X)=[7]$ one finds that if ${\rm FPdim}(X)=k\in\mathbb Z$ then $\phi(2\mathfrak{e}ll)\leq 12$ as $q$ must satisfy a degree $12$ polynomial. From this we reduce the problem to checking that ${\rm FPdim}(X)$ is non-integral for $\mathfrak{e}ll\in\{10,11,13,14\}$, which is easily done. Thus $\mathbb CC$ is not integral except in the case $\mathfrak{e}ll=8$ found in Table \ref{weak}.
The other exceptional types can be done in a similar fashion: find a convenient object $X\in\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)_{ad}$, assume that ${\rm FPdim}(X)=k\in\mathbb Z$ to obtain an upper bound on $\mathfrak{e}ll$ by bounding $\phi(2\mathfrak{e}ll)$ as above and then checking that ${\rm FPdim}(X)$ is non-integral for the (finitely many) remaining values of $\mathfrak{e}ll$. Table \ref{nonint} gives the necessary data for all exceptional Lie types. The second column of Table \ref{nonint} gives a pair of weights $(\nu,\mu)$ such that $V_{\nu}\subset V_{\mu}\otimes V^*_{\mu}$, that is, so that $V_{\nu}\in\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)_{ad}$. The corresponding ${\rm FPdim}(V_{\nu})$ is given in the third column, and the upper bound on $\mathfrak{e}ll$ that the assumption ${\rm FPdim}(V_\nu)\in\mathbb Z$ induces is found in the last column.
\mathfrak{e}nd{proof}
\begin{table}\caption{Objects of Non-integral $FP$-dimension}{\lambda}bel{nonint}
\begin{tabular}{*{4}{|c}|}
\hline
$\mathfrak{g}$ & $(\nu,\mu)$: $V_{\nu}\subset V_{\mu}\otimes V^*_{\mu}$ & ${\rm FPdim}(V_\nu)$ & maximum $\mathfrak{e}ll$\\
\hline\hline
$\mathfrak{e}_6$& $({\lambda}_2,{\lambda}_1)$ & $\mathfrak{f}rac{[8][9][13]}{[4][3]}$ & $75$
\\ \hline $\mathfrak{e}_7$& $({\lambda}_1,{\lambda}_7)$ & $\mathfrak{f}rac{[12][14][19]}{[4][6]}$ & $120$
\\ \hline $\mathfrak{e}_8$& $({\lambda}_8,{\lambda}_8)$& $\mathfrak{f}rac{[20][24][31]}{[6][10]}$ & $210$
\\ \hline $\mathfrak{f}_4$, $\mathfrak{e}ll$ even & $({\lambda}_1,{\lambda}_1)$ & $\mathfrak{f}rac{[3][8][13][18]}{[4][6][9]}$ & $66$
\\ \hline $\mathfrak{f}_4$, $\mathfrak{e}ll$ odd & $({\lambda}_1,{\lambda}_1)$ & $\mathfrak{f}rac{[13][8]}{[4]}$ & $51$
\\ \hline $\mathfrak{g}_2$, $3\mid\mathfrak{e}ll$ & $({\lambda}_1,{\lambda}_1)$ & $\mathfrak{f}rac{[2][7][12]}{[4][6]}$ & $33$
\\ \hline $\mathfrak{g}_2$, $3\nmid\mathfrak{e}ll$ & $({\lambda}_1,{\lambda}_1)$ & $[7]$ & $14$
\\
\hline
\mathfrak{e}nd{tabular}
\mathfrak{e}nd{table}
\section{Failure of property $\mathbf{F}$}{\lambda}bel{4:noF}
In this section we demonstrate that property $\mathbf{F}$ fails for each pair $(\mathfrak{g},\mathfrak{e}ll)$ such that $\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)$ is not weakly integral (see Table \ref{weak}).
\subsection{Lie type $G_2$}
\begin{thm}
The non-trivial categories $\mathbb CC(\mathfrak{g}_2,q,\mathfrak{e}ll)$ do not have property $\mathbf{F}$ unless $\mathfrak{e}ll=8$.
\mathfrak{e}nd{thm}
\begin{proof}
We first remark that it is enough to consider only the specific choice $q=e^{\pi i/\mathfrak{e}ll}$ as Galois conjugation
does not affect the question of finiteness of the image of $\mathcal{B}_3$: the relations in a finite group presentation induce polynomial equations in the entries with integer coefficients. By non-trivial we mean that the rank is at least $2$.
Let $V$ be the simple object labeled by ${\lambda}_1=\varepsilon_1-\varepsilon_3$ i.e. the highest weight of the $7$-dimensional fundamental representation of $\mathfrak{g}_2$. There are two cases to consider: $3\mid\mathfrak{e}ll$ and $3\nmid\mathfrak{e}ll$.
We first consider the cases where $3\mid \mathfrak{e}ll$. Here $\mathfrak{e}ll=12$ corresponds to the trivial rank $1$ category and $\mathfrak{e}ll=15$ has rank $2$. The latter case was considered in \cite{RSW}, where it is called the ``Fibonacci category" and can be identified with a subcategory of $\mathbb CC(\mathfrak{sl}_2,q,5)$. In particular it is known to have infinite braid group image (going back to \cite{jones86}). We claim that, provided $\mathfrak{e}ll\mathfrak{g}eq 18$, the rank of $\mathbb CC(\mathfrak{g}_2,q,\mathfrak{e}ll)$ is at least $4$ and $\Hom(V,V^{\otimes 3})$ is a $4$-dimensional irreducible representation of $\mathcal{B}_3$. Note that $V^{\otimes 2}\cong \mathbf{1}\opluslus V\opluslus V_{{\lambda}_2}\opluslus V_{2{\lambda}_1}$, and that each of these summands is in $C_\mathfrak{e}ll(\mathfrak{g}_2)$. One computes the eigenvalues of the image of $\sigma_1$ acting on $\Hom(V,V^{\otimes 3})$ using Reshetikhin's formula (\ref{eigs}): $\{1,-q^6,-q^{12},q^{14}\}$ (note that $\bigwedge^2 V\cong V\opluslus V_{{\lambda}_2}$ which accounts for the signs). It is convenient to rescale these to: $S:=\{q^{-12},-q^{-6},-1,q^2\}$ which of course does not affect the projective image. These eigenvalues are distinct for $q=e^{\pi i/\mathfrak{e}ll}$ with $\mathfrak{e}ll\mathfrak{g}eq 18$ and so it follows from Lemma \ref{twlemma} that $\mathcal{B}_3$ acts irreducibly on $\Hom(V,V^{\otimes 3})$ and the projective order of the image of $\sigma_1$ is $2\mathfrak{e}ll$ if $\mathfrak{e}ll$ is odd and $\mathfrak{e}ll$ if $\mathfrak{e}ll$ is even. Next we claim that the image of $\mathcal{B}_3$ is primitive: by Theorem \ref{rttheorem}(b) is is enough to checks that $S$ is not of the form $\{\pm r,\pm s\}$ or $\chi\{1,\omegaega,\omegaega^2\}\cup\{\alphapha\}$
with $\omegaega$ a 3rd root of unity. Since $q=e^{\pi i/\mathfrak{e}ll}$ with $\mathfrak{e}ll\mathfrak{g}eq 18$ we find that $1\not\in S$ so $S\not=\{\pm r,\pm s\}$. For $\mathfrak{e}ll\mathfrak{g}eq 18$ none of $-1/q^2,q^{-12}/q^2$ or $-q^{-6}/q^2$ is equal to $1$ so we need only check that $\{-1,q^{-12},-q^{-6}\}\not=\chi\{1,\omegaega,\omegaega^2\}$ for any $\chi$. Assuming to the contrary we must have $q^{-18}=1$ but this implies $q^2$ is a $9$th root of unity which contradicts $\mathfrak{e}ll\mathfrak{g}eq 18$. Now it follows from Theorem \ref{rttheorem}(a) and (d)(iii)] that the image of $\mathcal{B}_3$ is infinite unless $\mathfrak{e}ll=24$. For the case $\mathfrak{e}ll=24$ one must work a little harder: in this case there are, up to equivalence, two irreducible $4$ dimensional representations of $\mathcal{B}_3$ with these eigenvalues, explicitly described in \cite[Prop. 2.6]{TW}. For example for one of the two choices the image of $\sigma_1$ is
$$A:=\begin{pmatrix} {q}^{-12}&{\mathfrak{f}rac {{q}^{8}+ {q}^{4}+1}{{q}^{6}}}&-{\mathfrak{f}rac {{q}^{8}+{q}^{4}+1}{{q}^{14}}}&-1
\\0&{q}^{2}&-{\mathfrak{f}rac {{q}^{4}-1}{{q}^{10}}}&-1\\0&0&-{q}^{-6}&-1
\\0&0&0&-1\mathfrak{e}nd{pmatrix}$$ while
the image of $\sigma_2$ is:
$$B:=\begin{pmatrix}-1&0&0&0\\{q}^{-6}&-{q}^{-6}&0&0\\ {q}^{6}
&- \left( {q}^{4}+1 \right) {q}^{2}&
{q}^{2}&0\\-1 &{
\mathfrak{f}rac {{q}^{8} +{q}^{4}+1}{{q}^{8}}}&-{\mathfrak{f}rac {{q}^{8}+{q}^{4}+1}{{q}^{12}}}&{q}^{-12}\mathfrak{e}nd{pmatrix}$$
Substituting $q=e^{\pi i/24}$ one finds that the matrix $C:=AB^{-1}$ has infinite order. Indeed by \cite[Lemma 5.1]{RT} one need only verify that $C^j$ is not proportional to $I$ for $1\leq j\leq 24$.
Next we consider the case $3\nmid \mathfrak{e}ll$. In this case $\mathbb CC(\mathfrak{g}_2,q,\mathfrak{e}ll)$ is trivial for $\mathfrak{e}ll=7$ and is pointed of rank $2$ for $\mathfrak{e}ll=8$. For $\mathfrak{e}ll\mathfrak{g}eq 10$ we again find that $\Hom(V,V^{\otimes 3})$ is a $4$-dimensional irreducible $\mathcal{B}_3$-representation as we have the same decomposition of $V^{\otimes 2}$ and eigenvalues as above. The case $\mathfrak{e}ll=10$ (rank $4$) has been studied: $\mathbb CC(\mathfrak{g}_2,q,10)$ has the Fibonacci category as a (modular) subcategory (see \cite[Theorem 3.4]{RJPAA}) and hence has infinite braid group image. For $\mathfrak{e}ll\mathfrak{g}eq 11$, Theorem \ref{rttheorem}(d) is again sufficient to conclude the image of $\mathcal{B}_3$ is infinite except for the case $\mathfrak{e}ll=20$. We may use the same matrices $A,B$ and $C$ described above and explicitly check that $C$ has infinite order for $q=e^{\pi i/20}$.
\mathfrak{e}nd{proof}
\subsection{Lie type $F_4$}
\begin{thm}
The non-trivial categories $\mathbb CC(\mathfrak{f}_4,q,\mathfrak{e}ll)$ do not have property $\mathbf{F}$.
\mathfrak{e}nd{thm}
\begin{proof}
Now let $\mathfrak{g}=\mathfrak{f}_4$, and $V$ be the simple object in $\mathbb CC(\mathfrak{f}_4,q,\mathfrak{e}ll)$ analogous to the ($26$-dimensional) vector representation of $\mathfrak{f}_4$ labeled by ${\lambda}_1$. Again there are two cases $2\mid\mathfrak{e}ll$ and $2\nmid\mathfrak{e}ll$.
First, suppose $\mathfrak{e}ll$ is even. Then if $22\leq\mathfrak{e}ll$, $\operatorname{dim}\Hom(V,V^{\otimes 3})=5$ and the eigenvalues of the image of $\sigma_1$ are (up to an overall scale factor): $\{q^{-24},q^{-12},q^2,-1,-q^{-6}\}$. Notice that these eigenvalues are distinct unless $\mathfrak{e}ll=24$ so that $\Hom(V,V^{\otimes 3})$ is an irreducible $\mathcal{B}_3$-representation if $\mathfrak{e}ll\not=24$. Moreover, the image is primitive (see Theorem \ref{rttheorem}(b)) so Theorem \ref{rttheorem}(d)(iv) implies that the image of $\mathcal{B}_3$ is infinite for $\mathfrak{e}ll=22$ or $26\leq\mathfrak{e}ll$ as the projective order of the image of $\sigma_1$ is $\mathfrak{e}ll$ in these cases. In the case $\mathfrak{e}ll=24$ we have repeated eigenvalues, hence either the representation is reducible (or has infinite image by Theorem \ref{rttheorem}(a)). Thus we must consider the $\mathfrak{e}ll=24$ case separately. In this case $\mathbb CC(\mathfrak{f}_4,q,24)$ has rank $9$: the simple objects are labeled by $$\{0,{\lambda}_1,2{\lambda}_1,3{\lambda}_1,{\lambda}_2,{\lambda}_3,{\lambda}_4,{\lambda}_1+{\lambda}_2,{\lambda}_1+{\lambda}_4\}.$$ If we let $U$ be the simple object labeled by ${\lambda}_4$ we compute that $U^{\otimes 2}$ decomposes as $\mathbf{1}\opluslus V_{2{\lambda}_1}\opluslus U\opluslus V_{{\lambda}_3}$. To see this one compute the second tensor power of the fundamental $52$-dimensional representation with highest weight ${\lambda}_4$ and then discards the object labeled by $2{\lambda}_4$ as this lies on the upper hyperplane of the Weyl alcove for $\mathfrak{e}ll=24$. That is, ${\lambda}ngle 2{\lambda}_4+\rho,\vartheta_0\rangle=24$. One Then computes the eigenvalues of the action of $\sigma_1$ on $\Hom(U,U^{\otimes 3})$ to be: $\{1,q^{26},-q^{18},-q^{36}\}$. For $q=e^{\pi i/24}$ these are distinct so that $\Hom(U,U^{\otimes 3})$ is an irreducible $4$-dimensional $\mathcal{B}_3$-representation. However, the projective order of the image of $\sigma_1$ is $24$ so we must resort to explicit computations as in the $\mathfrak{g}_2$ situation above. We again find that the image of $\sigma_1\sigma_2^{-1}$ has infinite image.
Next assume that $\mathfrak{e}ll$ is odd. We have $\operatorname{dim}\Hom(V,V^{\otimes 3})=5$ when $15\leq\mathfrak{e}ll$, and the projective order of the image of $\sigma_1$ is $2\mathfrak{e}ll$ so the image of $\mathcal{B}_3$ is again always infinite by Theorem \ref{rttheorem}(d).
\mathfrak{e}nd{proof}
\subsection{Lie types $E_6,E_7$ and $E_8$}
\begin{thm}
The non-trivial categories $\mathbb CC(\mathfrak{e}_N,q,\mathfrak{e}ll)$ do not have property $\mathbf{F}$ unless $(N,\mathfrak{e}ll)\in\{(6,13),(7,19),(8,32)\}$.
\mathfrak{e}nd{thm}
\begin{proof}
The braid group representations for $U_q\mathfrak{e}_N$ with $q$ generic have been studied at length in \cite{wenzlEN}, and the results there can be used to give a uniform proof of the infinitude of the braid group image for $N=6,7$ and $8$. In this case we take $V$ to be the simple object analogous to the vector representation of $\mathfrak{e}_N$. The highest weight of $V$ corresponds to the node in the Dynkin diagram of $E_N$ furthest from the triple point (for $N=6$ this is ambiguous but we may pick either as they correspond to dual objects). We will call this highest weight ${\lambda}_1$ (although this does not coincide with \cite{Bou} in some cases).
The $\mathcal{B}_3$ representation space we consider is $\Hom(V_{{\lambda}_1+{\lambda}_N},V^{\otimes 3})$. Note that $V_{{\lambda}_1+{\lambda}_N}$ appears in what Wenzl calls $V^{\otimes 3}_{new}$ in \cite{wenzlEN}.
This space is $3$-dimensional provided ${\lambda}_1+{\lambda}_N\in C_\mathfrak{e}ll(\mathfrak{e}_N)$. For $N=6$ this is satisfied for $\mathfrak{e}ll\mathfrak{g}eq 14$, for $N=7$ we need $\mathfrak{e}ll\mathfrak{g}eq 21$ and for $N=8$ we must have $\mathfrak{e}ll\mathfrak{g}eq 34$. Let us say that $\mathfrak{e}ll$ is in the \textit{stable range} if $\mathfrak{e}ll$ is large enough to ensure ${\lambda}_1+{\lambda}_N\in C_\mathfrak{e}ll(\mathfrak{e}_N)$. We will consider the cases corresponding to the pairs $(N,\mathfrak{e}ll)\in\{ (7,20),(8,33)\}$ separately. Provided $\mathfrak{e}ll$ is in the stable range the braiding eigenvalues on $H:=\Hom(V_{{\lambda}_1+{\lambda}_N},V^{\otimes 3})$ are (up to an overall scale factor): $\{q,-q^{-1},q^{3-2N}\}$ (see \cite{wenzlEN}) where $N=6,7$ or $8$. In fact, it is observed in \cite[Remark 5.10]{wenzlEN} that this $3$-dimensional representation $H$ is equivalent to the $3$-dimensional representation of $\mathcal{B}_3$ obtained from BMW-algebras (see \cite{wenzlbcd,tubawenzlbcd}) specialized at $r=q^{2N-3}$. The irreducibility of this representation at roots of unity is analyzed in \cite{wenzlbcd}. In particular \cite[Theorem 6.4(a)]{wenzlbcd} implies that as long as $2N-3<\mathfrak{e}ll-2$ the corresponding $3$-dimensional $\mathcal{B}_3$ representation is irreducible. This is clearly satisfied for $\mathfrak{e}ll$ in the stable range. An alternative approach to showing that $H$ is irreducible is to follow the proof in \cite[Section 4]{wenzlEN}, which is a matter of checking that the argument there is valid for the root of unity case provided $\mathfrak{e}ll$ is in the stable range.
Having shown that $H$ is irreducible it is now a routine application of Theorem \ref{rttheorem}(c)(i) and (d)(ii) to see that the image of $\mathcal{B}_3$ on $H$ is primitive and hence infinite as the projective order of the image of $\sigma_1$ is at least $8$. This proves the statement for all but the two cases $(N,\mathfrak{e}ll)\in\{(7,20),(8,33)\}$.
One finds that $\mathbb CC(\mathfrak{e}_7,q,20)$ has rank $6$ and is a product of two well-known categories: the Fibonacci category (rank $2$) and the Ising category (rank $3$). The former has infinite braid group image (although the latter does not!) so we may deduce the infinitude of the braid group image for $(N,\mathfrak{e}ll)=(7,20)$.
The rank $5$ category $\mathbb CC(\mathfrak{e}_8,q,33)$ is conjugate to $\mathbb CC(\mathfrak{f}_4,q,22)$ and can also be realized as a modular subcategory of $\mathbb CC(\mathfrak{sl}_2,q,11)$. We have already seen that $\mathbb CC(\mathfrak{f}_4,q,22)$ has infinite braid group image, so we have shown that the case $(N,\mathfrak{e}ll)=(8,33)$ has infinite braid group image finishing the proof.
\iffalse
First we must establish that $H:=\Hom(V_{{\lambda}_1+{\lambda}_N},V^{\otimes 3})$ is irreducible as a $\mathcal{B}_3$-representation for $q=e^{\pi i/\mathfrak{e}ll}$ with $\mathfrak{e}ll$ in the stable range. For $q$ generic (i.e. not a root of unity) it is immediate from \cite[Theorem 4.9]{wenzlEN} that $H$ is an irreducible $\mathcal{B}_3$ representation as the braid group image generates $\mathcal{E}nd(V^{\otimes 3}_{new})$ of which $H$ is a simple module (since $N\mathfrak{g}eq 6$ the ``quasi-Pfaffian'' is not needed). To see that $H$ is irreducible for $\mathfrak{e}ll$ in the stable range we verify that Wenzl's proof, applied to $H$, remains valid. Firstly, observe that the Littelman path formalism is unchanged for objects in $\mathbb CC(\mathfrak{e}_N,q,\mathfrak{e}ll)$ provided all paths are contained in the Weyl alcove (the path closure of $C_\mathfrak{e}ll(\mathfrak{e}_N)$). The path space we are interested in is $\mathscr{P}({\lambda}_1,{\lambda}_1+{\lambda}_N)$ of length 2 paths from ${\lambda}_1$ to ${\lambda}_1+{\lambda}_N$. Requiring $\mathfrak{e}ll$ is in the stable range guarantees that these (three) paths are indeed in the Weyl alcove. Next, we must compute the quantities $f_{\mu}:={\lambda}ngle \mu+2\rho,\mu\rangle$ (called $c_\mu$ in \cite{wenzlEN}) and
$$e(\mu):=f_{\mu}-(f_{{\lambda}_1}+f_{{\lambda}_1+{\lambda}_N})/2+\mathfrak{f}rac{1}{9-N}$$
for each $V_\mu$ appearing in $V^{\otimes 2}_{new}$. In \cite[Section 4]{wenzlEN} the matrix entries for the action of $\mathcal{B}_n$ with $n<N$ are explicitly determined (see \cite[Prop. 5.3]{wenzlEN} for the statement of this fact).
First consider $\mathbb CC(\mathfrak{e}_6,q,\mathfrak{e}ll)$ with $q=e^{\pi i/\mathfrak{e}ll}$ with $\mathfrak{e}ll\mathfrak{g}eq 14$. If we let $U:=V_{{\lambda}_1}$ we find that
$$U^{\otimes 2}\cong U^*\opluslus V_{2{\lambda}_1}\opluslus V_{{\lambda}_3}$$
where $U^*=V_{{\lambda}_6}$. We cannot apply Lemma \ref{twlemma} to this situation because $U$ is not self-dual. However the $E_N$ series has been explicitly investigated in \cite{WtypeE,Wtens} and we can conclude from \cite[Prop. 3.6]{wtens} that $\mathcal{B}_3$ acts irreducibly on $\Hom(V_{{\lambda}_1+{\lambda}_6},U^{\otimes 3})$ provided $\mathfrak{e}ll\mathfrak{g}eq 14$. The corresponding eigenvalues are (up to an overall scale factor): $S=\{q,1/q^9,-1/q\}$ (see \cite[Table 6.1]{LRW}) (which are clearly distinct for $\mathfrak{e}ll\mathfrak{g}eq 14$). To verify that the image is a primitive group we need only check that $\{\pm \chi\}\not\subset S$. One finds that the projective order of the image of $\sigma_1$ is $\mathfrak{e}ll$ if $\mathfrak{e}ll$ is even and $2\mathfrak{e}ll$ if $\mathfrak{e}ll$ is odd. Since $\mathfrak{e}ll\mathfrak{g}eq 14$ it follows from Theorem \ref{rttheorem}(d)(ii) that the image of $\mathcal{B}_3$ is infinite.
For $\mathfrak{e}_7$ we first consider $Y:=V_{{\lambda}_7}$ with $\mathfrak{e}ll\mathfrak{g}eq 20$. We compute:
$$Y^{\otimes 2}\cong V_{2{\lambda}_7}\opluslus V_{{\lambda}_1}\opluslus V_{{\lambda}_6}\opluslus \mathbf{1},$$
with corresponding braiding eigenvalues: $S=\{q^{30},q^{18},-q^{28},-1\}$. These eigenvalues are distinct for $\mathfrak{e}ll\mathfrak{g}eq 20$ except for $\mathfrak{e}ll=30$, so Lemma \cite{twlemma} implies that $\mathcal{B}_3$ acts irreducibly on the $4$-dimensional representation $\Hom(Y,Y^{\otimes 3})$ if $\mathfrak{e}ll\mathfrak{g}eq 20$ and $\mathfrak{e}ll\not=30$. The projective order of the image of $\sigma_1$ is $\mathfrak{e}ll$ if $\mathfrak{e}ll$ is even and $2\mathfrak{e}ll$ if $\mathfrak{e}ll$ is odd. One verifies that the form of $S$ is not as in Theorem \ref{rttheorem}(c) so the image of $\mathcal{B}_3$ is primitive, and infinite by Theorem \ref{rttheorem}(d)(iii) unless $\mathfrak{e}ll=20,24$ (or $\mathfrak{e}ll=30$).
For the cases $\mathfrak{e}ll=20,24,30$ and $\mathfrak{e}ll=24$ we consider instead the object $Z:=V_{{\lambda}_1}$. The tensor square decomposition
$$Z^{\otimes 2}\cong Z\opluslus V_{{\lambda}_3}\opluslus V_{2{\lambda}_1}\opluslus V_{{\lambda}_6}\opluslus \mathbf{1}$$
is valid provided $\mathfrak{e}ll\mathfrak{g}eq 22$, and the corresponding braiding eigenvalues are: $S=\{-q^{18},-q^{36},q^{38},q^{28},1\}$. In the cases $\mathfrak{e}ll=24,30$ these eigenvalues are distinct so it follows from Theorem \ref{rttheorem}(d)(iv) and Lemma \ref{twlemma} that the braid group image is infinite (NEED to check projective order!). For $\mathfrak{e}ll=20$ the above decomposition of $Z^{\otimes 2}$ must be truncated: in this case the weight ${\lambda}_3$ is on the upper hyperplane of the fundamental alcove so it must be discarded. Moreover, the weight $2{\lambda}_1$ is outside the fundamental alcove and so $V_{2{\lambda}_1}$ ``cancels" with either $Z:=V_{{\lambda}_1}$ or $V_{{\lambda}_6}$. In either case the (irreducible) $\mathcal{B}_3$ representation on $\Hom(Z,Z^{\otimes 3})$ is 2-dimensional and the image of $\sigma_1$ has projective order at least $10$. This is enough to conclude that the braid group image is infinite by Theorem \ref{rttheorem}(d)(i). In fact one finds that $\mathbb CC(\mathfrak{e}_7,q,20)$ has rank $6$ and is a product of two well-known categories: the Fibonacci category (rank $2$) and the Ising category (rank $3$). The former has infinite braid group image (although the latter does not!) so we may deduce the infinitude of the braid group image for $\mathfrak{e}ll=20$.
Finally we consider the object $T:=V_{{\lambda}_8}$ in $\mathbb CC(\mathfrak{e}_8,q,\mathfrak{e}ll)$ with $\mathfrak{e}ll\mathfrak{g}eq 33$. The decomposition:
$$T^{\otimes 2}\cong T\opluslus \mathbf{1}\opluslus V_{{\lambda}_1}\opluslus V_{2{\lambda}_8}\opluslus V_{{\lambda}_7}$$ is valid for $\mathfrak{e}ll\mathfrak{g}eq 34$ while for $\mathfrak{e}ll=33$ we must discard $V_{2{\lambda}_8}$ as $2{\lambda}_8$ lies on the upper hyperplane of the Weyl alcove for $\mathfrak{e}ll=33$. We compute the corresponding braiding eigenvalues to be: $S=\{-q^{30},1,q^{48},q^{62},-q^{60}\}$
with $q^{62}$ discarded for the $\mathfrak{e}ll=33$ case. One sees that for $\mathfrak{e}ll\mathfrak{g}eq 33$ these eigenvalues are distinct except if $\mathfrak{e}ll=60$. So provided $\mathfrak{e}ll\not\in\{33,60\}$ the action of $\mathcal{B}_3$ on the $5$-dimensional representation $\Hom(T,T^{\otimes 3})$ is irreducible and it follows from Theorem \ref{rttheorem}(d)(iv) that the image is infinite, as the projective order of the image of $\sigma_1$ is $\mathfrak{e}ll$.
For $\mathfrak{e}ll=33$ the four remaining eigenvalues are easily seen to fail the conditions of imprimitivity and the projective order of the image of $\sigma_1$ is $22$ so by Theorem \ref{rttheorem}(d)(iii) the braid group image is infinite in this case as well. For the $\mathfrak{e}ll=60$ case we must again appeal to the results in \cite{WtypeE,Wtens} on tensor categories of Lie type $E_N$. The key result is that $\Hom(V_\mu,T^{\otimes 3})$ is an irreducible $\mathcal{B}_3$ representation for generic $q$. For any $V_\mu$ with the property that the action of $\sigma_1$ on $\Hom(V_\mu,T^{\otimes 3})$ has distinct eigenvalues the argument in \cite{Wtens} goes through without change. In the case of $\mu={\lambda}_7$ and $\mathfrak{e}ll=60$ we have such a situation: $\Hom(V_{{\lambda}_7},T^{\otimes 3})$ is $4$-dimensional with braiding eigenvalues $$\{-q^{30},q^{48},q^{62},-q^{60}\}=\{-i,e^{4\pi i/5},e^{-29\pi i/30},1\}.$$ Thus the corresponding $\mathcal{B}_3$-representation has infinite (primitive) image for $\mathfrak{e}ll=60$ as well.
\mathfrak{f}i
\mathfrak{e}nd{proof}
\section{Conclusions}{\lambda}bel{5:conclusions}
With these results the verification of the ``non-weakly-integral implies no property $\mathbf{F}$'' direction of Conjecture \ref{propfconj} for quantum group categories is essentially complete. Although a classification of weakly integral categories of classical Lie type has not appeared (to our knowledge), it is essentially known to experts. In type $A$ it is known that $\mathbb CC(\mathfrak{sl}_N,q,\mathfrak{e}ll)$ is weakly integral for $\mathfrak{e}ll\in\{N,N+1,4,6\}$ and for type $C$ we have $\mathbb CC(\mathfrak{sp}_4,q,10)$ weakly integral. There are two infinite families of weakly integral categories coming from types $B$ and $D$: $\mathbb CC(\mathfrak{so}_N,q,2N)$ with $N$ odd and $\mathbb CC(\mathfrak{so}_M,q,M)$ with $M$ even (see \cite{NR}). In addition $\mathbb CC(\mathfrak{so}_N,q,2N-2)$ with $N$ odd and $\mathbb CC(\mathfrak{so}_M,q,M-1)$ with $M$ even are weakly integral but are always rank $3$ and $4$ respectively and only give rise to finitely many inequivalent categories. This is expected to be a complete list (except possibly for some low-rank coincidences). The results of \cite{jones86,FLW,LRW} show that in all but these cases property $\mathbf{F}$ fails for $\mathbb CC(\mathfrak{g},q,\mathfrak{e}ll)$ of classical Lie type.
We conclude with two ``plausibility arguments'' for the general conjecture.
Firstly, if $G$ is a primitive linear group of degree $m$ then the projective order of any element of $G$ is bounded by a ($G$-independent) function of $m$. Indeed, see \cite[Corollary 4.3]{LRW} for a result of this type. Now it is known that for a (unitary) modular category, the ${\rm FPdim}(X_j)$ lie in the cyclotomic field $\mathbb Q(\theta_1,\ldots,\theta_k)$ generated by the twists (see \cite{NS}). So if ${\rm FPdim}(X_j)$ is far from being integral, that is $[\mathbb Q({\rm FPdim}(X_j):\mathbb Q]$ is large, then the order of some $\theta_i$ is large. Since the eigenvalues of the image of $\sigma_i$ are square-roots of products of twists it follows that the projective order of the image of $\sigma_1$ in some $\mathcal{B}_3$ representation is large. On the other hand the degrees of the irreducible representations of $\mathcal{B}_3$ associated to an object $X$ are bounded by ${\rm FPdim}(X)^2$ (the maximum number of simple subobjects of $X^{\otimes 2}$) and is typically much smaller. Thus it is reasonable to expect that non-weakly-integral categories give rise to infinite braid group images.
The main result of \cite{ERW} is that group-theoretical braided fusion categories have property $\mathbf{F}$. Here $\mathbb CC$ is \mathfrak{e}mph{group-theoretical} if the Drinfeld center of $\mathbb CC$ is equivalent to $\mathbb Rep(D^\omega G)$ for some finite group $G$ and cocycle $\omega$. A group-theoretical fusion category is always integral, but not conversely. This notion has been generalized to weakly group-theoretical fusion categories (see \cite{ENO2}). Roughly speaking, weakly group-theoretical fusion categories are those that can be defined in terms of finite group data.
Weakly group-theoretical fusion categories are always weakly integral, and no counterexample to the converse is currently known. If indeed weak integrality and weak group-theoreticity are equivalent notions, one need only show that weakly group-theoretical braided fusion categories have property $\mathbf{F}$ to prove the other direction of Conjecture \ref{propfconj}.
\begin{thebibliography}{9999}
\bibitem{andersen} H.\ H.\ Andersen, \mathfrak{e}mph{Tensor products of quantized
tilting modules}, Comm. Math. Phys. \textbf{149} (1991), 149-159.
\bibitem{BK} B.\ Bakalov and A.\ Kirillov, Jr., Lectures on
Tensor Categories and Modular Functors, University Lecture
Series, vol.\ {\bf 21}, Amer.\ Math.\ Soc., 2001.
\bibitem{Bou} N.\ Bourbaki, \mathfrak{e}mph{Groupes et alg\`ebres de Lie}, Chap. 4--6 (1968), Hermann, Paris.
\bibitem{ENO} P.\ Etingof, D.\ Nikshych, and V.\ Ostrik, \mathfrak{e}mph{On fusion categories}, Ann. of Math. (2) \textbf{162} (2005), no. 2, 581-642.
\bibitem{ENO2} P.~Etingof, D.~Nikshych, V.~Ostrik,
\textit{Weakly group-theoretical and solvable fusion categories},
arXiv:0809.3031.
\bibitem{ERW} P.~Etingof, E.\ C.\ Rowell, S.~Witherspoon,
\textit{Braid group representations from quantum doubles of finite groups},
Pacific J. Math. \textbf{234} (2008), no. 1, 33–41.
\bibitem{FRW} J.\ M.\ Franko, E.\ C.\ Rowell and Z.\ Wang, \mathfrak{e}mph{Extraspecial 2-groups and images of braid group representations.} J. Knot Theory Ramifications \textbf{15} (2006), no. 4, 413--427.
\bibitem{FLW} M.\ H.\ Freedman, M.\ J.\ Larsen and Z.\ Wang,
\mathfrak{e}mph{The two-eigenvalue problem and density of Jones representation of braid groups.}
Comm.\ Math.\ Phys.\ {228} (2002), 177-199.
\bibitem{jones86} V.\ F.\ R.\ Jones, \mathfrak{e}mph{Braid groups, Hecke algebras and type ${\rm II}\sb 1$ factors} in
Geometric methods in
operator algebras (Kyoto, 1983), 242--273, Pitman Res. Notes Math. Ser., 123, Longman Sci. Tech.,
Harlow, 1986.
\bibitem{jonescmp} V.\ F.\ R.\ Jones, \textit{On a certain value of the Kauffman polynomial.}
Comm. Math. Phys. \textbf{125} (1989), no. 3, 459--467.
\bibitem{LRW} M.\ J.\ Larsen, E. C. Rowell, Z. Wang, \mathfrak{e}mph{The $N$-eigenvalue problem
and two applications} Int. Math. Res. Not. \textbf{2005}, no. 64, 3987--4018.
\bibitem{LeducRam} R. Leduc and A. Ram, A\mathfrak{e}mph{ ribbon Hopf algebra
approach to the irreducible representations of centralizer
algebras: The Brauer, Birman-Wenzl and type $A$ Iwahori-Hecke
algebras}, Adv. Math. \textbf{125} (1997) 1-94.
\bibitem{NR} D.\ Naidu, E.\ C.\ Rowell, \mathfrak{e}mph{A finiteness property for braided fusion categories}, to appear in Algebr. Represent. Theory.
\bibitem{NS} S.-H. Ng and P. Schauenburg; \mathfrak{e}mph{Congruence subgroups and generalized Frobenius-Schur indicators} preprint, arXiv:0806.2493.
\bibitem{Survey} E.\ C.\ Rowell \mathfrak{e}mph{From quantum groups to unitary modular tensor categories}
in Contemp.\ Math.\ \textbf{413} (2006), 215--230.
\bibitem{RJPAA} E.\ C.\ Rowell, \textit{Unitarizability of premodular categories}. J. Pure Appl. Algebra, \textbf{212} (2008) no. 8, 1878--1887.
\bibitem{RSW} E.\ Rowell; R.\ Stong; Z.\ Wang, \textit{On classification of modular tensor categories}, Comm. Math. Phys. \textbf{292} (2009) no. 2, 343-389.
\bibitem{RT} E. C. Rowell and I. Tuba, \mathfrak{e}mph{Finite linear quotients of $\mathcal{B}_3$ of low dimension}, to appear in J. Knot Theory Ramifications, arXiv:0806.0168.
\bibitem{TW} I. Tuba and H. Wenzl,
\mathfrak{e}mph{Representations of the braid groups $B_3$ and of
$SL(2,\mathbb Z)$}, Pacific J. Math. \textbf{197} (2001), no. 2, 491-510.
\bibitem{tubawenzlbcd} I.\ Tuba and H.\ Wenzl, \mathfrak{e}mph{On braided tensor categories of type $BCD$},
J.\ Reine Angew.\ Math.\ \textbf{581} (2005), 31-69.
\bibitem{Tur} V. G. Turaev, Quantum Invariants of Knots and 3-Manifolds.
de Gruyter Studies in Mathematics, 18. Walter de Gruyter \& Co., Berlin, 1994.
\bibitem{wenzlbcd} H.\ Wenzl, \mathfrak{e}mph{Quantum groups and subfactors of
Lie type B, C and D}, Comm. Math. Phys. \textbf{133} (1990)
383-433.
\bibitem{wenzlcstar} H.\ Wenzl, \mathfrak{e}mph{$C^*$ tensor categories from quantum groups},
J of AMS, \textbf{11} (1998) 261-282.
\bibitem{wenzlEN} H.\ Wenzl, \mathfrak{e}mph{On tensor categories of type $E_N$, $N\not=9$}. Adv. Math. \textbf{177} (2003), 66-104.
\mathfrak{e}nd{thebibliography}
\mathfrak{e}nd{document}
|
\begin{document}
\begin{frontmatter}
\title{{\textbf{Distributivity between extended nullnorms and uninorms on fuzzy truth values}}
\thanksref{thk}}
\thanks[thk]{This work is supported by National Natural Science Foundation of China (No. 11171242).}
\author{Zhi-qiang Liu},
\author{Xue-ping Wang\corauthref{cor}}\qquad
\corauth[cor]{Corresponding author. Fax: +86 28 84761393.}
\ead{[email protected]}
\address{School of Mathematical Sciences, Sichuan Normal University, Chengdu, Sichuan 610066, P.R. China}
\begin{abstract}
This paper mainly investigates the distributive laws between extended nullnorms and uninorms on fuzzy truth values under the condition that the nullnorm is conditionally distributive over the uninorm. It presents the distributive laws between the extended nullnorm and t-conorm, and the left and right distributive laws between the extended generalization nullnorm and uninorm, where a generalization nullnorm is an operator from the class of aggregation operators with absorbing element that generalizes a nullnorm.
\end{abstract}
\begin{keyword} Fuzzy truth values; Extended nullnorm; Extended uninorm; Distributive law
\end{keyword}
\end{frontmatter}
\section{Introduction}
The concept of a type-2 fuzzy set was introduced by Zadeh in 1975 \cite{Zadeh1975} as an extension of type-1 fuzzy sets, and it has been heavily
investigated both as a mathematical object and for use in applications \cite{Walker2005,Zadeh1975}. The algebra of truth values for fuzzy sets of type-2 consists of all mappings from the unit interval into itself and their operations which are convolutions of operations on the unit interval \cite{Walker2005}.
The algebra theory was studied extensively by Harding, C. and E. Walker \cite{Harding2016}, and C. and E. Walker \cite{Walker2005,WALKER2006,Walker2009}.
Theory of aggregation of real numbers play an important role in many different theoretical and practical fields, e.g., decision making theory, fuzzy set theory, integration theory, ect. Aggregation operators for real numbers are extended to ones for type-2 fuzzy sets. For example,
Gera and Dombi \cite{Gera20081} proposed computationally simple, pointwise formulas for extended t-norms and t-conorms on fuzzy truth values;
Tak\'{a}\u{c} \cite{Takac2014} investigated extended aggregation operations on the algebra of convex normal fuzzy truth values with their left and right parts;
Torres-Blanc, Cubillo, and Hern\'{a}ndez \cite{Torres2017} applied the Zadeh's extension principle to extend the aggregation operations of type-1 to the case of tyep-2 fuzzy sets. In particular, the distributive laws between those convolution operations on fuzzy truth values become an interesting and natural research area, so that they are discussed in many articles.
For instance, Harding, C. and E. Walker \cite{Harding2016} and C. and E. Walker \cite{Walker2005,Walker2009} discussed the distributive laws between extended minimums and maximums, and extended maximums and minimums, respectively, the distributive laws between extended t-norms and maximums, and the distributive laws between extended t-conorms and minimums.
Hu and Kwong \cite{HuBQ2014} also presented the distributive laws between extended t-norms and maximums, and the distributive laws between extended t-conorms and minimums. Xie \cite{Xie2018} extended type-1 proper nullnorms and proper uninorms to fuzzy truth values and studied the distributive laws between the extended uninorms and minimums, and the distributive laws between the extended uninorms and maximums.
Recently, Liu and Wang \cite{Liu2019} discussed distributivity between extended t-norms and t-conorms on fuzzy truth values under the condition that the t-norm is conditionally distributive over the t-conorm or the t-conorm is conditionally distributive over the t-norm. It is well known that uninorms \cite{Yager1996} and nullnorms \cite{Calvo2001} are aggregation operations with neutral elements and absorbing elements on $[0,1]$, respectively. They are generalizations of t-norms and t-conorms as well. However, the distributive laws between the extended nullnorms and uninorms on fuzzy truth values are not discussed till now, so that this paper will investigate these problems based on the results of conditionally distributivity of nullnorms over the uninorms in \cite{Dragan2013,Dragan2015,LiG2015}.
This paper is organized as follows. In Section 2 we recall some necessary definitions and previous results.
In Section 3 we investigate the distributive laws between extended nullnorms and uninorms on fuzzy truth values under the condition that the nullnorm is conditionally distributive over the uninorm. In Section 4 we study distributivity of extended continuous operators with absorbing element and extended uninorms.
A conclusion is given in Section 5.
\section{Previous Results}
In this section, we recall some basic concepts and terminologies used throughout the paper.
\begin{definition}[\cite{Klement2000}]
A t-norm (resp. t-conorm) is a binary operation $T:[0,1]^{2}\rightarrow[0,1]$ (resp. $S:[0,1]^{2}\rightarrow[0,1]$) that is commutative, associative, non-decreasing in each variable, and has a neutral element $1$ (resp. $0$).
\end{definition}
\begin{definition}[\cite{Klement2000}]
\quad
(i) A t-norm $T$ is said to be strict, if $T$ is continuous and strictly monotone.
(ii) A t-norm $T$ is said to be nilpotent, if $T$ is continuous and if each $x\in(0,1)$ is a nilpotent element of $T$.
\end{definition}
The basic continuous t-norms are minimum, $T_{M}(x,y)=\min(x,y)$, the product, $T_{P}(x,y)=xy$, and the {\L}ukasiewicz t-norm, $T_{L}(x,y)=\max(x+y-1,0)$. Dually, the basic continuous t-conorms are maximum, $S_{M}(x,y)=\max(x,y)$, the probabilistic sum, $S_{P}(x,y)=x+y-xy$, and the {\L}ukasiewicz t-conorm, $S_{L}(x,y)=\min(x+y,1)$.
\begin{definition}[\cite{Klement2000}]
A binary function $U : [0,1]^{2}\rightarrow[0,1]$ is called a uninorm if it is commutative, associative, non-decreasing in each place and there exists
some element $e\in[0,1]$ such that $U(x,e)=x$ for all $x\in[0,1]$ where $e$ is called a neutral element of $U$.
\end{definition}
One can see that a uninorm $U$ is a t-norm if $e=1$, and a t-conorm if $e=0$. A uninorm $U$ is called proper if its neutral element $e\in(0,1)$. It is clear that $U(0,1)\in\{0,1\}$ (see \cite{Fodor1997}). $U$ is said to be conjunctive if $U(1,0)=0$, and be disjunctive if $U(1,0)=1$.
With any uninorm $U$ with neutral element $e\in(0,1)$, we can associate two binary operations $T_{U}$ and $S_{U}: [0,1]^{2}\rightarrow [0,1]$ defined by
\begin{eqnarray*}
T_{U}(x,y)=\frac{U(ex,ey)}{e} \mbox{ \ \ \ and \ \ \ }
S_{U}(x,y)=\frac{U((e+(1-e)x,e+(1-e)y))-e}{1-e},
\end{eqnarray*}
respectively. It is easy to see that $T_{U}$ is a t-norm and that $S_{U}$ is a t-conorm where $T_{U}$ is called an underlying t-norm, and $S_{U}$ is called an underlying t-conorm.
Let us denote the remaining part of the unit square by $E$, i.e., $E=[0,1]^{2}\setminus([0,e]^{2}\cup[e,1]^{2})$. On the set $E$, any uninorm $U$ is bounded by the minimum and maximum of its arguments, i.e., for any $(x,y)\in E$, $$\min(x,y)\leqslant U(x,y)\leqslant \max(x,y).$$
The most studied classes of uninorms are:
$\bullet$ Idempotent uninorms in $\mathcal{U}_{id}$\cite{Baets,LiG2015}, those that satisfy $U(x,x)=x$ for all $x\in[0,1]$.
$\bullet$ Uninorms in $\mathcal{U}_{\min}$ (resp. $\mathcal{U}_{\max}$)\cite{Fodor1997,LiG2015}, those given by minimum (resp. maximum) in $E$.
$\bullet$ Uninorms in $\mathcal{CU}$\cite{HuSK2001,LiG2015}, those that are continuous in the open square $(0,1)^{2}$.
$\bullet$ Uninorms in $\mathcal{WCU}$\cite{Calvo20015,Saminger2007,LiG201501,LiG2015}, those that are with a continuous underlying t-norm and t-conorm.
\begin{definition}[\cite{Calvo2001,Klement2000}]\label{defi2.4}
A nullnorm is a binary operation $F : [0,1]^{2}\rightarrow[0,1]$, which is commutative, associative, non-decreasing in each variable and there exists an
element $k\in[0,1]$ such that $F(0,x)=x$ for all $x\in [0, k]$ and $F(1,x)=x$ for all $x\in [k,1]$.
\end{definition}
Clearly, a nullnorm $F$ is a t-norm if $k=0$, and a t-conorm if $k=1$. If $k\in(0,1)$, then a nullnorm $F$ is called proper. It is immediately clear that every nullnorm $F$ satisfies $F(k,x)=k$ for all $x\in[0,1]$, i.e., $k$ is an absorbing element of $F$.
\begin{definition}[\cite{Walker2005}]
Fuzzy truth values are mapping of $[0,1]$ into itself. The set of fuzzy truth values is denoted by $\mathcal{F}=\{f \mid f: [0,1]\rightarrow [0,1]\}$.
\end{definition}
According to Zadeh's extension principle, a two-place function $G: [0,1]^{2}\rightarrow[0,1]$ can be extended to $\odot_{G}:\mathcal{F}^{2}\rightarrow\mathcal{F}$ by the convolution of $G$ with respect to $\wedge$ and $\vee$. Let $f,g\in\mathcal{F}$. Then $(f\odot_{G} g)(z)=\bigvee\limits_{z=G(x,y)}(f(x)\wedge g(y))$. Here, $\odot_{G}$ is called the extended $G$.
If $G$ is a nulllnorm $F$ or a uninorm $U$, then we have its extended nullnorm or uninorm defined by
\begin{eqnarray}\label{eqn41}
(f\odot_{F} g)(z)=\bigvee_{F(x,y)=z}(f(x)\wedge g(y))
\end{eqnarray}
and
\begin{eqnarray}\label{eqn42}
(f\odot_{U} g)(z)=\bigvee_{U(x,y)=z}(f(x)\wedge g(y)),
\end{eqnarray}
respectively (see \cite{Xie2018}). In particular, if $G$ is the t-norm $T_{M}=\min$ or t-conorm $S_{M}=\max$, then we use $\sqcap$ and $\sqcup$ instead of $\odot_{F}$ and $\odot_{U}$, respectively (see \cite{Walker2005}), i.e.,
\begin{eqnarray}\label{eq413}
(f\sqcap g)(z)=\bigvee\limits_{x\wedge y=z}(f(x)\wedge g(y)),
\end{eqnarray}
\begin{eqnarray}\label{eq414}
(f\sqcup g)(z)=\bigvee\limits_{x\vee y=z}(f(x)\wedge g(y)).
\end{eqnarray}
\begin{definition}[\cite{Walker2005}]\label{conves1}
An element $f\in \mathcal{F}$ is said to be convex if for all $x,y,z\in[0,1]$ for which $x\leqslant y\leqslant z$, we have $f(y)\geqslant f(x)\wedge f(z)$.
\end{definition}
\section{Distributive laws between the extended nullnorms and uninorms}
In this section, the distributive laws between the extended nullnorms and uninorms on fuzzy truth values are discussed.
\begin{theorem}\label{theor41}
Let $F:[0,1]^{2}\rightarrow[0,1]$ be a continuous non-decreasing operator. If $f\in \mathcal{F}$ is convex, then the following statements hold for all $g,h\in\mathcal{F}$.
\begin{enumerate}
\item [(i)] $f\odot_{F}(g\sqcap h)=(f\odot_{F}g)\sqcap(f\odot_{F}h)$;
\item [(ii)] $f\odot_{F}(g\sqcup h)=(f\odot_{F}g)\sqcup(f\odot_{F}h)$.
\end{enumerate}
\end{theorem}
\begin{proof}
We only provide the proof of statement (i), the statement of (ii) being analogous.
According to formulas (\ref{eqn41}) and (\ref{eq413}), for all $z\in[0,1]$, we have
\begin{eqnarray*}
\left(f\odot_{F}(g\sqcap h)\right)(z)=\bigvee\limits_{F(y,u\wedge v)=z}f(y)\wedge g(u)\wedge h(v)
\end{eqnarray*}
and
\begin{eqnarray*}
\left((f\odot_{F}g)\sqcap(f\odot_{F}h)\right)(z)&=&\bigvee\limits_{F(p,q)\wedge F(s,t)=z}f(p)\wedge g(q)\wedge f(s)\wedge h(t).
\end{eqnarray*}
On one hand, due to $F(y,u\wedge v)=F(y,u)\wedge F(y,v)$, it holds that
\begin{eqnarray}\label{eqna40}
\left(f\odot_{F}(g\sqcap h)\right)(z)\leqslant\left((f\odot_{F}g)\sqcap(f\odot_{F}h)\right)(z).
\end{eqnarray}
On the other hand, suppose that $z=F(p,q)\wedge F(s,t)$. It is easy to see if there exists $y\in[0,1]$ such that both of the following hold then the reverse inequality of (\ref{eqna40}) is holds.
\begin{eqnarray}\label{eqna41}
z=F(y,q\wedge t)
\end{eqnarray}
and
\begin{eqnarray}\label{eqna42}
f(y)\wedge g(q)\wedge h(t)\geqslant f(p)\wedge g(q)\wedge f(s)\wedge h(t).
\end{eqnarray}
Next, we shall prove formulas (\ref{eqna41}) and (\ref{eqna42}). From $z=F(p,q)\wedge F(s,t)$, we distinguish three cases.
(i) If $F(p,q)=F(s,t)=z$, then let $y=p\wedge s$. Thus $F(y,q\wedge t)=F(y,q)\wedge F(y,t)=z$, and $f(y)=f(p)$ or $f(y)=f(s)$. Therefore, the formulas (\ref{eqna41}) and (\ref{eqna42}) are hold.
(ii) If $F(p,q)>z$ and $F(s,t)=z$, then we have the following two subcases.
\begin{enumerate}
\item []
\begin{enumerate}
\item [{\rm $\bullet$}] If $F(s,q)\geqslant z$, then $F(s,q)\wedge F(s,t)=F(s,t)=z$, and put $y=s$. Then both (\ref{eqna41}) and (\ref{eqna42}) hold.
\item [{\rm $\bullet$}] If $F(s,q)<z$, then $F(s,q)<F(s,t)$ means $q<t$. Moreover, $F(p,q)>z>F(s,q)$ implies $s<p$. Because $F$ is continuous, there exists a $y$ with $s<y<p$ such that $F(y,q)=z$, and $f(y)\geqslant f(p)\wedge f(s)$ since $f$ is convex. Hence both (\ref{eqna41}) and (\ref{eqna42}) hold.
\end{enumerate}
\end{enumerate}
(iii) If $F(p,q)=z$ and $F(s,t)>z$, then similar to (ii), we can get that (\ref{eqna41}) and (\ref{eqna42}).
With Cases (i), (ii) and (iii), we always know that both (\ref{eqna41}) and (\ref{eqna42}) hold. Therefore,
$$\left(f\odot_{F}(g\sqcap h)\right)(z)\geqslant\left((f\odot_{F}g)\sqcap(f\odot_{F}h)\right)(z).$$
This completes the proof.
\end{proof}
Note that Theorem \ref{theor41} generalizes the sufficiency of Proposition 3.9 in \cite{HuBQ2014} (see also the conclusions 1 and 4 of Theorem 5.5.3 in \cite{Harding2016}).
\begin{lemma}[\cite{Martín2003}]\label{le3.1}
Consider $e\in(0,1)$. The following statements are equivalent:
\begin{enumerate}
\item [(i)] $U$ is an idempotent uninorm with neutral element $e$.
\item [(ii)] There exists a non-increasing function $g:[0,1]\rightarrow[0,1]$, symmetric with respect to the main diagonal, with $g(e)=e$, such that, for all $(x,y)\in E$
\begin{eqnarray}
U(x,y)=\left\{\begin{array}{ll}
\min(x,y), & {\mbox{\scriptsize\normalsize if }y<g(x) \mbox{ or }y=g(x) \mbox{ and }x<g(g(x)),}\\
\max(x,y), & {\mbox{\scriptsize\normalsize if }y>g(x) \mbox{ or }y=g(x) \mbox{ and }x>g(g(x)),}\\
x \mbox{ or } y, & {\mbox{\scriptsize\normalsize if }y=g(x) \mbox{ and }x=g(g(x))}\\
\end{array}
\right.
\end{eqnarray}
being commutative on the set of points $(x,y)$ such that $y=g(x)$ with $x=g(g(x))$.
\end{enumerate}
\end{lemma}
\begin{remark}\label{remark4}
The first uninorms, which were constructed by Yager and Rybalov\cite{Yager1996}, are idempotent uninorms from classes $\mathcal{U}_{\min}$ and $\mathcal{U}_{\max}$ of the following form:
\begin{eqnarray*}
\underline{U}(x,y)=\left\{\begin{array}{ll}
\max(x,y) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,1]^{2},}\\
\min(x,y), & {\mbox{\scriptsize\normalsize otherwise}}
\end{array}
\right.
\end{eqnarray*}
and
\begin{eqnarray*}
\overline{U}(x,y)=\left\{\begin{array}{ll}
\min(x,y) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,e]^{2},}\\
\max(x,y), & {\mbox{\scriptsize\normalsize otherwise. }}
\end{array}
\right.
\end{eqnarray*}
\end{remark}
These uninorms are the only explicit examples of idempotent uninorms.
From Theorem \ref{theor41} and Lemma \ref{le3.1}, we immediately have the following result.
\begin{theorem}
Let $F$ be a continuous nulllnorm and $U$ an idempotent uninorm. If $f\in \mathcal{F}$ is convex, then the following holds for all $g,h\in\mathcal{F}$.
$$f\odot_{F}(g\odot_{U} h)=(f\odot_{F}g)\odot_{U}(f\odot_{F}h).$$
\end{theorem}
\begin{definition}
Let $F:[0,1]^{2}\rightarrow[0,1]$ be a continuous non-decreasing operator and $U$ be a uninorm.
\begin{enumerate}
\item [{\rm $\bullet$}] $F$ is conditionally distributive over $U$ from the left (CDl) if $F(x,U(y,z))=U(F(x,y),F(x,z))$ for all $x,y,z\in[0,1]$ whenever $U(y,z)<1$.
\item [{\rm $\bullet$}] $F$ is conditionally distributive over $U$ from the right (CDr) if $F(U(x,y),z)=U(F(x,z),F(y,z))$ for all $x,y,z\in[0,1]$ whenever $U(x,y)<1$.
\end{enumerate}
\end{definition}
Of course, for a commutative operator $F$ (CDl) and (CDr) coincides and are denoted by (CD).
In the sequel, we study the distributive laws between the extended nullnorm and uninorm on fuzzy truth values under the condition that the nullnorm is conditionally distributive over the uninorm. First, we need the lemma as follows.
\begin{lemma}[\cite{LiG2015}]\label{theor44}
A continuous nullnorm $F$ with an absorbing element $k\in(0,1)$ and a disjunctive uninorm $U\in\mathcal{WCU}$ with neutral element $e\in(0,1)$ satisfy (CD) if and only if one of the following cases is fulfilled:
\begin{enumerate}
\item []
\begin{enumerate}
\item [{\rm (i)}]
$e<k$ and $F,U$ are given as in \cite{Mas2002} (Proposition 4.2), i.e.,
\begin{eqnarray}\label{eqn45}
U(x,y)=\left\{\begin{array}{ll}
\min(x,y) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,e]^{2},}\\
\max(x,y), & {\mbox{\scriptsize\normalsize otherwise }}
\end{array}
\right.
\end{eqnarray}
and
\begin{eqnarray}\label{eqn46}
F(x,y)=\left\{\begin{array}{ll}
eS_{1}(\frac{x}{e},\frac{y}{e}) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,e]^{2},}\\
e+(k-e)S_{2}\left(\frac{x-e}{k-e},\frac{y-e}{k-e}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,k]^{2},}\\
k+(1-k)T\left(\frac{x-k}{1-k},\frac{y-k}{1-k}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[k,1]^{2},}\\
\max(x,y), & {\mbox{\scriptsize\normalsize if }\min(x,y)\leqslant e\leqslant \max(x,y)\leqslant k,}\\
k, & {\mbox{\scriptsize\normalsize otherwise, }}
\end{array}
\right.
\end{eqnarray}
where $S_{1}$ and $S_{2}$ are continuous t-conorms and $T$ is a continuous t-norm.
\end{enumerate}
\begin{enumerate}
\item [{\rm (ii)}]
$e<k$ and $F,U$ are given as in \cite{Dragan2013} (Theorem 16), i.e., there is $a\in[k,1)$ such that $F$ and $U$ are given by
\begin{eqnarray}\label{eqn47}
U(x,y)=\left\{\begin{array}{ll}
\min(x,y) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,e]^{2},}\\
a+(1-a)S\left(\frac{x-a}{1-a},\frac{y-a}{1-a}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[a,1]^{2},}\\
\max(x,y), & {\mbox{\scriptsize\normalsize otherwise }}
\end{array}
\right.
\end{eqnarray}
and
\begin{eqnarray}\label{eqn48}
F(x,y)=\left\{\begin{array}{ll}
eS_{1}(\frac{x}{e},\frac{y}{e}) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,e]^{2},}\\
e+(k-e)S_{2}\left(\frac{x-e}{k-e},\frac{y-e}{k-e}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,k]^{2},}\\
k+(a-k)T_{1}\left(\frac{x-k}{a-k},\frac{y-k}{a-k}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[k,a]^{2},}\\
a+(1-a)T\left(\frac{x-a}{1-a},\frac{y-a}{1-a}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[a,1]^{2},}\\
\max(x,y), & {\mbox{\scriptsize\normalsize if } \min(x,y)\leqslant e\leqslant \max(x,y)\leqslant k,}\\
\min(x,y), & {\mbox{\scriptsize\normalsize if } k\leqslant\min(x,y)\leqslant a\leqslant\max(x,y),}\\
k, & {\mbox{\scriptsize\normalsize otherwise, }}
\end{array}
\right.
\end{eqnarray}
where $S_{1}$ and $S_{2}$ are continuous t-conorms, $T_{1}$ is a continuous t-norm and $S$ is a nilpotent t-conorm such that the additive generator $s$ of $S$ satisfying $s(1)=1$ is also a multiplicative generator of the strict t-norm $T$.
\end{enumerate}
\begin{enumerate}
\item [{\rm (iii)}]
$e>k$ and
\begin{eqnarray}\label{eqn49}
U(x,y)=\left\{\begin{array}{ll}
\max(x,y) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,1]^{2},}\\
1, & {\mbox{\scriptsize\normalsize if }x=1 \mbox{ or } y=1,}\\
\min(x,y), & {\mbox{\scriptsize\normalsize otherwise }}
\end{array}
\right.
\end{eqnarray}
and
\begin{eqnarray}\label{eqn410}
F(x,y)=\left\{\begin{array}{ll}
kS_{1}(\frac{x}{k},\frac{y}{k}) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,k]^{2},}\\
k+(e-k)T_{1}\left(\frac{x-k}{e-k},\frac{y-k}{e-k}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[k,e]^{2},}\\
e+(1-e)T_{2}\left(\frac{x-e}{1-e},\frac{y-e}{1-e}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,1]^{2},}\\
k, & {\mbox{\scriptsize\normalsize if }\min(x,y)\leqslant k\leqslant \max(x,y),}\\
\min(x,y), & {\mbox{\scriptsize\normalsize otherwise, }}
\end{array}
\right.
\end{eqnarray}
where $S_{1}$ is continuous t-conorm and $T_{1}$ and $T_{2}$ are continuous t-norms.
\end{enumerate}
\end{enumerate}
\end{lemma}
Then we have the following theorem.
\begin{theorem}\label{5theo3.4}
Let $F$ be a continuous nullnorm with an absorbing element $k\in(0,1)$ and $U\in\mathcal{WCU}$ a disjunctive uninorm with neutral element $e\in(0,1)$ satisfying (CD). If $f\in \mathcal{F}$ is convex, then the following holds for all $g,h\in\mathcal{F}$.
$$f\odot_{F}(g\odot_{U} h)=(f\odot_{F}g)\odot_{U}(f\odot_{F}h).$$
\end{theorem}
\begin{proof}
First, from formulas (\ref{eqn41}) and (\ref{eqn42}), we have that
\begin{eqnarray}\label{eqn411}
\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\bigvee\limits_{F(y,U(u,v))=z}f(y)\wedge g(u)\wedge h(v)
\end{eqnarray}
and
\begin{eqnarray}\label{eqn412}
\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)&=&\bigvee\limits_{U(F(p,q),F(s,t))=z}f(p)\wedge g(q)\wedge f(s)\wedge h(t).
\end{eqnarray}
Now, suppose that $z\in [0,1)$. Then from $z=F(y,U(u,v))\in [0,1)$, we have $U(u,v)<1$ for any $u,v\in [0,1]$. Thus $F(y,U(u,v))=U(F(y,u),F(y,v))\in [0,1)$ since $F$ and $U$ satisfy (CD).
Next, we divide our proof into three cases as follows from Lemma \ref{theor44}.
(i) $e<k$ and $U,F$ are given as Eqs. (\ref{eqn45}) and (\ref{eqn46}), respectively. From Theorem \ref{theor41} (i) and (ii), it is obvious that $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ for $z\in[0,1)$.
(ii) $e<k$ and $U,F$ are given as Eqs. (\ref{eqn47}) and (\ref{eqn48}), respectively. In the following, we shall prove $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ for $z\in[0,1)$. This will be done by checking the subsequent four cases.
Case (a). If $z=F(y,U(u,v))\in[0,e]$, then $U(u,v)=\min(u,v)$. Therefore, $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ from Theorem \ref{theor41} (i).
Case (b). If $z=F(y,U(u,v))\in[e,k]$, then we distinguish three subcases.
\begin{enumerate}
\item []
\begin{enumerate}
\item [{\rm $\bullet$}] If $y\in[e,k]$ and $U(u,v)\in[e,k]$, then $U(u,v)=\max(u,v)$. Therefore, $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ from Theorem \ref{theor41} (ii).
\item [{\rm $\bullet$}] If $y\in[e,k]$ and $U(u,v)\in[0,e]$, then $\min(y,U(u,v))\leqslant e\leqslant \max(y,U(u,v))\leqslant k$, which implies $F(y,U(u,v))=\max(y,U(u,v))$, and $U(u,v)=\min(u,v)$. Therefore, $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ from Theorem \ref{theor41} (i).
\item [{\rm $\bullet$}] If $y\in[0,e]$ and $U(u,v)\in[e,k]$, then $\min(y,U(u,v))\leqslant e\leqslant \max(y,U(u,v))\leqslant k$, which means that $F(y,U(u,v))=\max(y,U(u,v))$, and $U(u,v)=\max(u,v)$. Therefore, $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ from Theorem \ref{theor41} (ii).
\end{enumerate}
\end{enumerate}
Case (c). If $z=F(y,U(u,v))\in[k,a]$, then we distinguish three subcases.
\begin{enumerate}
\item []
\begin{enumerate}
\item [{\rm $\bullet$}] If $y\in[k,a]$ and $U(u,v)\in[k,a]$, then $U(u,v)=\max(u,v)$. Therefore, $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ from Theorem \ref{theor41} (ii).
\item [{\rm $\bullet$}] If $y\in[k,a]$ and $U(u,v)\in(a,1)$, then $k\leqslant\min(y,U(u,v))\leqslant a\leqslant\max(y,U(u,v))$, which implies $F(y,U(u,v))=\min(y,U(u,v))$. Since $U(u,v)\in(a,1)$, we have either $F=\min$ and $U=\max$ or $F=\min$ and $U(u,v)=a+(1-a)S\left(\frac{u-a}{1-a},\frac{v-a}{1-a}\right)$ with $u,v\in(a,1)$.
\begin{enumerate}
\item [{\rm ($\ast$)}] If $F=\min$ and $U=\max$, then $$\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$$ from Theorem \ref{theor41} (ii).
\item [{\rm ($\ast\ast$)}] If $F=\min$, and $U(u,v)=a+(1-a)S\left(\frac{u-a}{1-a},\frac{v-a}{1-a}\right)$ with $u,v\in(a,1)$, then $z=y=F(y,U(u,v))=U(F(y,u),F(y,v))=U(y,y)$, i.e., $y=U(y,y)$ since $y< U(u,v)$, which means that $y$ is an idempotent element of $U$, contrary to $U(u,v)= a+(1-a)S\left(\frac{u-a}{1-a},\frac{v-a}{1-a}\right)$ with $u,v\in(a,1)$. Therefore, this subcase is not possible.
\end{enumerate}
\item [{\rm $\bullet$}] If $y\in[a,1]$ and $U(u,v)\in[k,a]$, then $k\leqslant\min(y,U(u,v))\leqslant a\leqslant\max(y,U(u,v))$, it follows that $U(u,v)=\max(u,v)$ and $F(y,U(u,v))=\min(y,U(u,v))$. Therefore, $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ from Theorem \ref{theor41} (ii).
\end{enumerate}
\end{enumerate}
Case (d). If $z=F(y,U(u,v))\in[a,1)$, then $y\in[a,1]$ and $U(u,v)\in[a,1)$, we distinguish two subcases.
\begin{enumerate}
\item []
\begin{enumerate}
\item [{\rm $\bullet$}] If $F(y,U(u,v))=a+(1-a)T\left(\frac{y-a}{1-a},\frac{U(u,v)-a}{1-a}\right)$ and $U=\max$, then $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ by Theorem \ref{theor41} (ii).
\item [{\rm $\bullet$}] If $F(y,U(u,v))=a+(1-a)T\left(\frac{y-a}{1-a},\frac{U(u,v)-a}{1-a}\right)$ and $U(u,v)=a+(1-a)S\left(\frac{u-a}{1-a},\frac{v-a}{1-a}\right)$, where $T$ is a strict t-norm and $S$ is a nilpotent t-conorm. Due to $F(y,U(u,v))=U(F(y,u),F(y,v))$, from formulas (\ref{eqn411}) and (\ref{eqn412}), it holds that
\begin{eqnarray}\label{eqn413}
\left(f\odot_{F}(g\odot_{U} h)\right)(z)\leqslant\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z).
\end{eqnarray}
Therefore, we just need to prove $\left(f\odot_{F}(g\odot_{U} h)\right)(z)\geqslant\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$.
We first prove the following statement.
{\textbf A}. For any $p,q, s, t, y'\in [a,1]$, $U(F(p,q),F(s,t))=z$ and $U(F(y',q),F(y',t))=z$ imply $f(y')\wedge g(q)\wedge h(t)\geqslant f(p)\wedge g(q)\wedge f(s)\wedge h(t)$ whenever $U(q,t)<1$.
Since $U(F(p,q),F(s,t))=z$ and $U(F(y',q),F(y',t))=z$, we have $p\leqslant y'\leqslant s$ or $s\leqslant y'\leqslant p$. Otherwise, $y'<p\wedge s$ or $y'>p\vee s$. Say, $y'<p\wedge s$. Then
$z=U(F(p,q),F(s,t))\geqslant U(F(p\wedge s,q),F(p\wedge s,t))=F(p\wedge s,U(q,t))>F(y',U(q,t))=U(F(y',q),F(y',t))=z$ since $U(q,t)<1$ and $F$ is a strict t-norm on $[a,1]$, a contradiction. Consequently, $p\leqslant y'\leqslant s$ or $s\leqslant y'\leqslant p$. Therefore, $f(y')\geqslant f(p)\wedge f(s)$ since $f$ is convex, which means that $f(y')\wedge g(q)\wedge h(t)\geqslant f(p)\wedge g(q)\wedge f(s)\wedge h(t)$. This completes the proof of {\textbf A}.
Then, using {\textbf A}, we have that
\begin{eqnarray*}
\left((f\odot_{F} g)\odot_{U} (f\odot_{F} h)\right)(z)&=&\bigvee\limits_{U(F(p,q),F(s,t))=z}f(p)\wedge g(q)\wedge f(s)\wedge h(t)\\
&\leqslant&\bigvee\limits_{U(F(y',q),F(y',t))=z}f(y')\wedge g(q)\wedge h(t)\\
&=&\bigvee\limits_{F(y',U(q,t))=z}f(y')\wedge g(q)\wedge h(t)\\
&=&\bigvee\limits_{F(y,U(u,v))=z}f(y)\wedge g(u)\wedge h(v)\\
&=&\left(f\odot_{F}(g\odot_{U}h)\right)(z),
\end{eqnarray*}
i.e., $\left(f\odot_{F}(g\odot_{U} h)\right)(z)\geqslant\left((f\odot_{F} g)\odot_{U} (f\odot_{F} h)\right)(z)$.
\end{enumerate}
\end{enumerate}
(iii) $e>k$ and $U,F$ are given as Eqs. (\ref{eqn49}) and (\ref{eqn410}), respectively. By Theorem \ref{theor41} (i) and (ii), it is obvious that $\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(z)$ for $z\in[0,1)$.
Cases (i), (ii) and (iii) yield that
$$\left(f\odot_{F}(g\odot_{U} h)\right)(z)=\left((f\odot_{F} g)\odot_{U} (f\odot_{F} h)\right)(z) \mbox{ for any }z\in[0,1).$$
In the subsequent, we shall prove $\left(f\odot_{F}(g\odot_{U} h)\right)(1)=\left((f\odot_{F} g)\odot_{U} (f\odot_{F} h)\right)(1)$.
Indeed, if $F(y,U(u,v))=1$ and $U(F(p,q),F(s,t))=1$, then $y=1$ and $u\vee v=1$, $p=q=1$ or $s=t=1$. Thus from formulas (\ref{eqn411}) and (\ref{eqn412}),
\begin{eqnarray*}
\left(f\odot_{F}(g\odot_{U} h)\right)(1)&=&\left(\bigvee\limits_{u\in[0,1]}f(1)\wedge g(u)\wedge h(1)\right)\vee\left(\bigvee\limits_{v\in[0,1]}f(1)\wedge g(1)\wedge h(v)\right)
\end{eqnarray*}
and
\begin{eqnarray*}
\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(1)&=&\left(\bigvee\limits_{p,q\in[0,1]}f(p)\wedge g(q)\wedge f(1)\wedge h(1)\right)\\
&&\vee\left(\bigvee\limits_{s,t\in[0,1]}f(1)\wedge g(1)\wedge f(s)\wedge h(t)\right).
\end{eqnarray*}
Obviously, $\left(f\odot_{F}(g\odot_{U} h)\right)(1)\geqslant\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(1)$ since
$$\bigvee\limits_{u\in[0,1]}f(1)\wedge g(u)\wedge h(1)\geqslant\bigvee\limits_{p,q\in[0,1]}f(p)\wedge g(q)\wedge f(1)\wedge h(1)$$ and
$$\bigvee\limits_{v\in[0,1]}f(1)\wedge g(1)\wedge h(v)\geqslant\bigvee\limits_{s,t\in[0,1]}f(1)\wedge g(1)\wedge f(s)\wedge h(t).$$
On the other hand, from $y=1$ and $u\vee v=1$, $p=q=1$ or $s=t=1$, we also have $F(y,U(u,v))=1=U(F(y,u),F(y,v))$. Thus
\begin{eqnarray*}
\left(f\odot_{F}(g\odot_{U} h)\right)(1)&=&\bigvee\limits_{F(y,U(u,v))=1}f(y)\wedge g(u)\wedge h(v)\\
&=&\bigvee\limits_{U(F(y,u),F(y,v))=1}f(y)\wedge g(u)\wedge f(y)\wedge h(v) \\
&\leqslant &\bigvee\limits_{U(F(p,q),F(s,t))=1}f(p)\wedge g(q)\wedge f(s)\wedge h(t)\\
&=&\left((f\odot_{F}g)\odot_{U}(f\odot_{F}h)\right)(1).
\end{eqnarray*}
Consequently, $\left(f\odot_{F}(g\odot_{U} h)\right)(1)=\left((f\odot_{F} g)\odot_{U} (f\odot_{F} h)\right)(1)$.
In summary, $$(f\odot_{F}(g\odot_{U} h))(z)=((f\odot_{F}g)\odot_{U}(f\odot_{F}h))(z) \mbox{ for all }z\in[0,1].$$
\end{proof}
For a conjunctive uninorm $U\in\mathcal{WCU}$ with neutral element $e\in(0,1)$, we first have the following lemma.
\begin{lemma}[\cite{Dragan2015,LiG2015}]\label{theor414}
A continuous nullnorm $F$ with an absorbing element $k\in(0,1)$ and a conjunctive uninorm $U\in\mathcal{WCU}$ with neutral element $e\in(0,1)$ satisfy (CD) if and only if one of the following cases is fulfilled:
\begin{enumerate}
\item []
\begin{enumerate}
\item [{\rm (i)}]
$e>k$ and $F,U$ are given as in \cite{Mas2002} (Proposition 4.3), i.e.,
\begin{eqnarray}\label{eqn415}
U(x,y)=\left\{\begin{array}{ll}
\max(x,y) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,1]^{2},}\\
\min(x,y), & {\mbox{\scriptsize\normalsize otherwise }}
\end{array}
\right.
\end{eqnarray}
and
\begin{eqnarray}\label{eqn416}
F(x,y)=\left\{\begin{array}{ll}
kS_{1}(\frac{x}{k},\frac{y}{k}) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,k]^{2},}\\
k+(e-k)T_{1}\left(\frac{x-k}{e-k},\frac{y-k}{e-k}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[k,e]^{2},}\\
e+(1-e)T_{2}\left(\frac{x-e}{1-e},\frac{y-e}{1-e}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,1]^{2},}\\
k, & {\mbox{\scriptsize\normalsize if }\min(x,y)\leqslant k\leqslant \max(x,y),}\\
\min(x,y), & {\mbox{\scriptsize\normalsize otherwise, }}
\end{array}
\right.
\end{eqnarray}
where $S_{1}$ is continuous t-conorm and $T_{1}$ and $T_{2}$ are continuous t-norms.
\end{enumerate}
\begin{enumerate}
\item [{\rm (ii)}]
$e>k$ and $F, U$ are given as in \cite{Dragan2013} (Theorem 17), i.e., there is $a\in[e,1)$ such that $F$ and $U$ are given by
\begin{eqnarray}\label{eqn419}
U(x,y)=\left\{\begin{array}{ll}
\min(x,y) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,e]\times[0,1]\cup[0,1]\times[0,e],}\\
a+(1-a)S\left(\frac{x-a}{1-a},\frac{y-a}{1-a}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[a,1]^{2},}\\
\max(x,y), & {\mbox{\scriptsize\normalsize otherwise}}
\end{array}
\right.
\end{eqnarray}
and
\begin{eqnarray}\label{eqn420}
F(x,y)=\left\{\begin{array}{ll}
kS_{1}(\frac{x}{k},\frac{y}{k}) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,k]^{2},}\\
k+(e-k)T_{1}\left(\frac{x-k}{e-k},\frac{y-k}{e-k}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[k,e]^{2},}\\
e+(a-e)T_{2}\left(\frac{x-e}{a-e},\frac{y-e}{a-e}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,a]^{2},}\\
a+(1-a)T\left(\frac{x-a}{1-a},\frac{y-a}{1-a}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[a,1]^{2},}\\
k, & {\mbox{\scriptsize\normalsize if }\min(x,y)\leqslant k\leqslant \max(x,y),}\\
\min(x,y), & {\mbox{\scriptsize\normalsize otherwise, }}
\end{array}
\right.
\end{eqnarray}
where $S_{1}$ is continuous t-conorm and $T_{1}$ and $T_{1}$ are continuous t-norms. Moreover, $S$ is a nilpotent t-conorm such that the additive generator $s$ of $S$ satisfying $s(1)=1$ is also a multiplicative generator of the strict t-norm $T$.
\end{enumerate}
\item [{\rm (iii)}]
$e<k$ and
\begin{eqnarray}\label{eqn421}
U(x,y)=\left\{\begin{array}{ll}
\min(x,y) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,e]^{2},}\\
\max(x,y), & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,1]^{2},}\\
1, & {\mbox{\scriptsize\normalsize if }x=1,y\neq0 \mbox{ or } x\neq0, y=1,}\\
\min(x,y), & {\mbox{\scriptsize\normalsize otherwise }}
\end{array}
\right.
\end{eqnarray}
and
\begin{eqnarray}\label{eqn422}
F(x,y)=\left\{\begin{array}{ll}
eS_{1}\left(\frac{x}{e},\frac{y}{e}\right) , & {\mbox{\scriptsize\normalsize if }(x,y)\in[0,e]^{2},}\\
e+(k-e)S_{2}\left(\frac{x-e}{k-e},\frac{y-e}{k-e}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[e,k]^{2},}\\
k+(1-k)T\left(\frac{x-k}{1-k},\frac{y-k}{1-k}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[k,1]^{2},}\\
\max(x,y), & {\mbox{\scriptsize\normalsize if }\min(x,y)\leqslant e\leqslant \max(x,y)\leqslant k,}\\
k, & {\mbox{\scriptsize\normalsize otherwise, }}
\end{array}
\right.
\end{eqnarray}
where $S_{1}$ and $S_{2}$ are continuous t-conorms and $T$ is a continuous t-norm.
\end{enumerate}
\end{lemma}
Then based on Lemma \ref{theor414}, by a completely similar proof to Theorem \ref{5theo3.4}, we have the following theorem.
\begin{theorem}
Let $F$ be a continuous nullnorm with an absorbing element $k\in(0,1)$ and $U\in\mathcal{WCU}$ a conjunctive uninorm with neutral element $e\in(0,1)$ satisfying (CD). If $f\in \mathcal{F}$ is convex, then the following holds for all $g,h\in\mathcal{F}$.
$$f\odot_{F}(g\odot_{U} h)=(f\odot_{F}g)\odot_{U}(f\odot_{F}h).$$
\end{theorem}
\section{Distributivity of extended continuous operators and uninorms}
A form of the relaxed nullnorm that is obtained by omitting commutativity and associativity from Definition \ref{defi2.4} was introduced in \cite{Drewniak2008}. The set of all such type of operators is denoted by $Z_{k}$ where $k$ is an absorbing element of such opertors.
In order to investigate the distributive laws between the extended continuous operators and uninorms, we first need the following three lemmas.
\begin{lemma}[\cite{Dragan2013}]\label{lem43}
A continuous operator $F\in Z_{k}$ and a continuous t-conorm $S$ satisfy (CDl) if and only if exactly one of the following cases is fulfilled:
(i) $S=S_{M}$,
(ii) there is an $a\in[k,1)$ such that $S,F$ are given by
\begin{eqnarray}\label{eqnarr42}
S(x,y)=\left\{\begin{array}{ll}
a+(1-a)S_{L}\left(\frac{x-a}{1-a},\frac{y-a}{1-a}\right), & {\mbox{\scriptsize\normalsize if }(x,y)\in[a,1]^{2},}\\
\max(x,y), & {\mbox{\scriptsize\normalsize otherwise}}
\end{array}
\right.
\end{eqnarray}
and
\begin{eqnarray}\label{eqnarr402}
F=\left\{\begin{array}{ll}
A, & {\mbox{\scriptsize\normalsize on }[0,k]^{2},}\\
B, & {\mbox{\scriptsize\normalsize on }[k,1]^{2},}\\
k, & {\mbox{\scriptsize\normalsize otherwise, }}
\end{array}
\right.
\end{eqnarray}
where $A:[0,k]^{2}\rightarrow[0,k]$ is a continuous increasing operator with neutral element $0$, $B:[k,1]^{2}\rightarrow[k,1]$ is a continuous increasing operator with neutral element $1$ such that $B(x,y)\in[a,1]$ for all $x,y\in[a,1]$ and $B=T_{P}$ on $[a,1]^{2}$.
\end{lemma}
\begin{lemma}[\cite{Dragan2013}]\label{lem44}
A continuous operator $F\in Z_{k}$ and a uninorm $U\in U_{\max}\cap\mathcal{WCU}$ with neutral element $e$ satisfy (CDl) if and only if $e<k$ and exactly one of the following cases is fulfilled:
(i) $F$ and $U$ are given as in \cite{Drewniak2008} (Theorem 16), i.e., $U=\overline{U}$ and
\begin{eqnarray}\label{eqn427}
F=\left\{\begin{array}{ll}
A_{1}, & {\mbox{\scriptsize\normalsize on }[0,e]^{2},}\\
A_{2}, & {\mbox{\scriptsize\normalsize on }[e,k]^{2},}\\
A_{3}, & {\mbox{\scriptsize\normalsize on }[0,e]\times[e,k],}\\
\max, & {\mbox{\scriptsize\normalsize on }[e,k]\times[0,e],}\\
B, & {\mbox{\scriptsize\normalsize on }[k,1]^{2},}\\
k, & {\mbox{\scriptsize\normalsize otherwise, }}
\end{array}
\right.
\end{eqnarray}
where $0$ is neutral element of the operator $A_{1}$ and a left side neutral element of $A_{3}$, $1$ is a neutral element of $B$, $e$ is a right side neutral element of $A_{2}$ and where $A_{1}$, $A_{2}$, $A_{3}$, $B$ are continuous increasing operators.
(ii) there is an $a\in[k,1)$ such that $U$ is given by (\ref{eqn47})and $F$ is given by (\ref{eqn427}) such that $B(x,y)\in[a,1]$ for all $x,y\in[a,1]$ and $B=T_{P}$ on $[a,1]^{2}$.
\end{lemma}
\begin{lemma}[\cite{Dragan2013}]\label{lem45}
A continuous operator $F\in Z_{k}$ and a uninorm $U\in U_{\min}\cap\mathcal{WCU}$ with neutral element $e\in(0,1)$ satisfy (CDl) if and only if $k<e$ and exactly one of the following cases is fulfilled:
(i) $F$ and $U$ are given as in \cite{Drewniak2008} (Theorem 18), i.e., $U=\underline{U}$ and
\begin{eqnarray}\label{eqn428}
F=\left\{\begin{array}{ll}
A, & {\mbox{\scriptsize\normalsize on }(x,y)\in[0,k]^{2},}\\
B_{1}, & {\mbox{\scriptsize\normalsize on }(x,y)\in[k,e]^{2},}\\
B_{3}, & {\mbox{\scriptsize\normalsize on }(x,y)\in[e,1]\times[k,e],}\\
\min, & {\mbox{\scriptsize\normalsize on }(x,y)\in[k,e]\times[e,1],}\\
B_{2}, & {\mbox{\scriptsize\normalsize on }(x,y)\in[e,1]^{2},}\\
k, & {\mbox{\scriptsize\normalsize otherwise, }}
\end{array}
\right.
\end{eqnarray}
where $0$ is neutral element of $A$, $1$ is neutral element of $B$, and a left side neutral element of $B_{3}$, $e$ is a right side neutral element of $B_{1}$, and where $B_{1}$, $B_{2}$, $B_{3}$, $A$ are continuous increasing operators.
(ii) there is an $a\in[e,1)$ such that $U$ is given by (\ref{eqn419})and $F$ is given by (\ref{eqn428}) such that $B_{2}(x,y)\in[a,1]$ for all $x,y\in[a,1]$ and $B=T_{P}$ on $[a,1]^{2}$.
\end{lemma}
Consider the distributivity between $\odot_{F}$ and $\odot_{S}$ (where $\odot_{S}$ is an extended t-conorm) on fuzzy truth values. Then we have the following result.
\begin{theorem}\label{the41}
Let $F\in Z_{k}$ be a continuous operator and $S$ a continuous t-conorm satisfying (CDl). If $f\in\mathcal{F}$ is convex, then the following holds for all $g,h\in\mathcal{F}$.
$$f\odot_{F}(g\odot_{S} h)=(f\odot_{F}g)\odot_{S}(f\odot_{F}h).$$
\end{theorem}
\begin{proof}First, from formulas (\ref{eqn41}) and (\ref{eqn42}), we have that
\begin{eqnarray}\label{eqn401}
\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\bigvee\limits_{F(y,S(u,v))=z}f(y)\wedge g(u)\wedge h(v)
\end{eqnarray}
and
\begin{eqnarray}\label{eqn402}
\left((f\odot_{F}g)\odot_{S}(f\odot_{F}h)\right)(z)&=&\bigvee\limits_{S(F(p,q),F(s,t))=z}f(p)\wedge g(q)\wedge f(s)\wedge h(t).
\end{eqnarray}
Now, suppose that $z\in [0,1)$. Then from $z=F(y,S(u,v))\in [0,1)$, we have $S(u,v)<1$ for any $u,v\in [0,1]$. Thus $F(y,S(u,v))=S(F(y,u),F(y,v))\in [0,1)$ since $F$ and $S$ satisfy (CDl).
Next, we divide our proof into two cases as follows from Lemma \ref{lem43}.
(i) If $S=S_{M}$, then $\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z)$ for $z\in [0,1)$ by Theorem \ref{theor41} (ii).
(ii) If there is an $a\in [k,1)$ such that $S$ and $F$ are given by formulas (\ref{eqnarr42}) and (\ref{eqnarr402}), respectively, then we distinguish three subcases.
Case (a). If $z=F(y,S(u,v))\in[0,k]$, then $F=A$ and $S=S_{M}$. Consequently, $\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z)$ by Theorem \ref{theor41} (ii).
Case (b). If $z=F(y,S(u,v))\in[k,1)$, then we have that either $z=F(y,S(u,v))\in[k,a]$ or $z=F(y,S(u,v))\in[a,1)$.
\begin{enumerate}
\item []
\begin{enumerate}
\item [{\rm $\bullet$}] If $z=F(y,S(u,v))\in[k,a]$, then $F=B$ and $S=S_{M}$. Consequently, $\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z)$ by Theorem \ref{theor41} (ii).
\item [{\rm $\bullet$}] If $z=F(y,S(u,v))\in[a,1)$, then $F=B=T_{P}$ and $S(u,v)=a+(1-a)S_{L}\left(\frac{u-a}{1-a},\frac{v-a}{1-a}\right)$ with $u,v\in[a,1]$. Due to $F(y,S(u,v))=S(F(y,u),F(y,v))$, from formulas (\ref{eqn401}) and (\ref{eqn402}), it holds that
\begin{eqnarray}\label{eqn403}
\left(f\odot_{F}(g\odot_{S} h)\right)(z)\leqslant\left((f\odot_{F}g)\odot_{S}(f\odot_{F}h)\right)(z).
\end{eqnarray}
Therefore, we just need to prove $\left(f\odot_{F}(g\odot_{S} h)\right)(z)\geqslant\left((f\odot_{F}g)\odot_{S}(f\odot_{F}h)\right)(z)$. We first prove the following statement whose proof is completely similar to {\textbf A}.
{\textbf B}. For any $p,q, s, t, y'\in [a,1)$, $S(F(p,q),F(s,t))=z$ and $S(F(y',q),F(y',t))=z$ imply $f(y')\wedge g(q)\wedge h(t)\geqslant f(p)\wedge g(q)\wedge f(s)\wedge h(t)$ whenever $S(q,t)<1$.
Then, using {\textbf B}, we have $\left(f\odot_{F}(g\odot_{S} h)\right)(z)\geqslant\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z)$, which together with formula (\ref{eqn403}) yields that $$\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z)\mbox{ for }z\in [a,1).$$
\end{enumerate}
\end{enumerate}
Case (c). If $z=F(y,S(u,v))=k$, then we have either $y=k$ and $S(u,v)\in[0,1)$ or $y\in[0,1]$ and $S(u,v)=k$ since $k$ is an absorbing element of $F$. \begin{enumerate}
\item []
\begin{enumerate}
\item [{\rm$\bullet$}] If $y=k$ and $S(u,v)\in[0,1)$, then $S=S_{M}$ since $S(F(y,u),F(y,v))=F(y,S(u,v))=k$ and $k\leqslant a$. Therefore, $\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z)$ by Theorem \ref{theor41} (ii).
\item [{\rm$\bullet$}] If $y\in[0,1]$ and $S(u,v)=k$, then $S=S_{M}$ since $S(u,v)=k$ and $k\leqslant a$. Therefore, $\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z)$ by Theorem \ref{theor41} (ii).
\end{enumerate}
\end{enumerate}
Cases (a), (b) and (c) deduce that $\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z)$ for any $z\in[0,1)$.
Therefore, with Cases (i) and (ii), we have that
$$\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z) \mbox{ for any } z\in[0,1).$$
If $z=1$, then, in a similar way to the proof of Theorem \ref{5theo3.4}, we can get $\left(f\odot_{F}(g\odot_{S} h)\right)(1)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(1)$.
In summary, $$\left(f\odot_{F}(g\odot_{S} h)\right)(z)=\left((f\odot_{F} g)\odot_{S} (f\odot_{F} h)\right)(z) \mbox{ for any } z\in[0,1].$$
\end{proof}
Moreover, by using Lemmas \ref{lem44} and \ref{lem45}, respectively, the following two distributive laws can be derived in complete analogy to the proof of Theorem \ref{the41}.
\begin{theorem}\label{th3.7}
Let $F\in Z_{k}$ be a continuous operator and $U\in U_{\max}\cap\mathcal{WCU}$ a uninorm with neutral element $e$ satisfying (CDl). If $f\in\mathcal{F}$ is convex, then the following holds for all $g,h\in\mathcal{F}$.
$$f\odot_{F}(g\odot_{U} h)=(f\odot_{F}g)\odot_{U}(f\odot_{F}h).$$
\end{theorem}
\begin{theorem}\label{th3.8}
Let $F\in Z_{k}$ be a continuous operator and $U\in U_{\min}\cap\mathcal{WCU}$ a uninorm with neutral element $e$ satisfying (CDl). If $f\in\mathcal{F}$ is convex, then the following holds for all $g,h\in\mathcal{F}$.
$$f\odot_{F}(g\odot_{U} h)=(f\odot_{F}g)\odot_{U}(f\odot_{F}h).$$
\end{theorem}
\begin{remark}\cite{Dragan2013}\label{remar3.2}
Three lemmas for (CDr) are analogous to the presented ones for (CDl) and, therefore, omitted.
\end{remark}
Furthermore, from Remark \ref{remar3.2}, and similar to Theorems \ref{the41}, \ref{th3.7} and \ref{th3.8}, the following three distributive laws are hold.
\begin{theorem}
Let $F\in Z_{k}$ be a continuous operator and $S$ a continuous t-conorm satisfying (CDr). If $h\in\mathcal{F}$ is convex, then the following holds for all $f,g\in\mathcal{F}$.
$$(f\odot_{F}g)\odot_{S} h=(f\odot_{F}h)\odot_{S}(g\odot_{F}h).$$
\end{theorem}
\begin{theorem}
Let $F\in Z_{k}$ be a continuous operator and $U\in U_{\max}\cap\mathcal{WCU}$ a uninorm with neutral element $e$ satisfying (CDr). If $h\in\mathcal{F}$ is convex, then the following holds for all $f,g\in\mathcal{F}$.
$$(f\odot_{F}g)\odot_{U} h=(f\odot_{F}h)\odot_{U}(g\odot_{F}h).$$
\end{theorem}
\begin{theorem}
Let $F\in Z_{k}$ be a continuous operator and $U\in U_{\min}\cap\mathcal{WCU}$ a uninorm with neutral element $e$ satisfying (CDr). If $h\in\mathcal{F}$ is convex, then the following holds for all $f,g\in\mathcal{F}$.
$$(f\odot_{F}g)\odot_{U} h=(f\odot_{F}h)\odot_{U}(g\odot_{F}h).$$
\end{theorem}
\section{Conclusions}
The main contributions are the distributive laws between the extended nullnorm and uninorm on fuzzy truth values under the condition that the nullnorm is conditionally distributive over the uninorm, and the left and right distributive laws between the extended generalization nullnorms and uninorms. The results in this paper generalize the corresponding ones in \cite{Harding2016,Xie2018,Walker2005}.
\end{document}
|
\begin{document}
\newcommand{\ket}[1]{\mathop{\left|#1\right>}\nolimits}
\newcommand{\bra}[1]{\mathop{\left<#1\right|}\nolimits}
\newcommand{\Tr}[1]{\mathop{{\mathrm{Tr}}_{#1}}}
\newcommand{\braket}[2]{\langle #1 | #2 \rangle}
\newcommand{\ketbra}[2]{| #1\rangle\!\langle #2 |}
\newcommand{{\mathbb R}}{{\mathbb R}}
\newcommand{{\mathbb Z}}{{\mathbb Z}}
\newcommand{\ahead}[2]{\genfrac{}{}{0pt}{}{#1}{#2}}
\newcommand{\nonumber}{\nonumber}
\title{Continuous variable private quantum channel}
\author{Kamil Br\'adler}
\affiliation{Department of Chemical Physics and Optics, Charles
University, Ke~Karlovu 3, 121~16 Prague~2, Czech~Republic}
\email{[email protected]}
\affiliation{Department of Optics, Palack\'y University,
17.~listopadu 50, 772\,00 Olomouc, Czech~Republic}
\date{\today}
\begin{abstract}
In this paper we introduce the concept of quantum private channel
within the continuous variables framework (CVPQC) and investigate
its properties. In terms of CVPQC we naturally define a
``maximally" mixed state in phase space together with its explicit
construction and show that for increasing number of encryption
operations (which sets the length of a shared key between Alice
and Bob) the encrypted state is arbitrarily close to the maximally
mixed state in the sense of the Hilbert-Schmidt distance. We bring
the exact solution for the distance dependence and give also a
rough estimate of the necessary number of bits of the shared
secret key (i.e. how much classical resources are needed for an
approximate encryption of a generally unknown continuous-variable
state). The definition of the CVPQC is analyzed from the Holevo
bound point of view which determines an upper bound of information
about an incoming state an eavesdropper is able to get from his
optimal measurement.
\end{abstract}
\pacs{03.67.-a, 03.67.Dd, 03.67.Hk}
\keywords{Private quantum channel; Quantum state randomization;
Continuous variables; Quantum cryptography}
\maketitle
\section{Introduction}
The task of quantum state
encryption~\cite{quant_ver_pad,PQC,encryption} (quantum Vernam
cipher, quantum one-time pad) is defined as follows. Let's suppose
that there are two communicating parties, Alice and Bob, and Alice
wants to send an arbitrary unknown quantum state to Bob (the state
needn't to be known either Alice or Bob). They are connected
through the quantum channel which is accessible to Eve's
manipulations. To avoid any possible information leakage from the
state to Eve (via some kind of generalized measurement or state
estimation) Alice and Bob share a secret and random string
(classical key) of bits by which Alice chooses a unitary operation
from the given (and publicly known) set. The transformed state is
sent to Bob via the quantum channel who applies the same operation
to decrypt the state. For an external observer (e.g. Eve) the
state on the channel is a mixture of all possible transformations
that Alice can create because she doesn't know the secret string.
If, moreover, the mixture is independent on the state to be
encrypted then Eve has no chance to deduce any information on the
state. We say that the state was perfectly encrypted. After the
formalization of this procedure we get the definition of {\it
private quantum channel -- PQC}~\cite{PQC}. As the mixture it is
advantageous to choose a maximally mixed state -- identity density
matrix.
It is natural to ask how many operations are needed for encryption
of a $k$-qubit input state. Based on thoughts on entropy it was
shown~\cite{encryption} and later generalized~\cite{PQC} for PQC
with ancilla that $2k$ classical bits are sufficient (i.e.
$2^{2k}$ operations). Generally, for perfect encryption of
$d$-dimensional quantum state $d^{2}$~unitary operations are
needed. Thus, the length of participant's secret and random key
must have at least $2\log d$ bits~\footnote{Throughout this paper,
all logarithms have base two.}. After weakening the security
definition an approximate secrecy was stated
in~\cite{approx_encryption} defining of {\em approximate private
quantum channel -- aPQC}. Then, it was shown that asymptotically
just $d\log d$ operations are needed for approximate quantum state
encryption. Next progress in this research topic was achieved
in~\cite{approx_encryption1} where a polynomial algorithm for
constructing a set of encryption/decryption operations suitable
for aPQC was presented.
In this paper we investigate the possibility of quantum state
encryption for continuous variables (CV)~\cite{CV}. Under CV we
understand two conjugate observables such as e.g. position and
momentum of a particle. Especially, we concentrate on coherent
states states which Wigner function has the form of the normalized
Gaussian distribution. Gaussian states are the most important
class of CV states used in quantum communication and
computation~\cite{CV_prehled}. The most of intriguing processes
and algorithms discovered for discrete $d$-level quantum systems
were also generalized for~CV. Among others, let's mention CV
quantum state teleportation~\cite{CV_tele1,CV_tele2}, CV quantum
state cloning~\cite{CV_clone} and quantum computation with
CV~\cite{CV_comp}. Importantly, a great progress was made in
quantum key distribution (QKD) based on CV where theoretical
groundwork was
laid~\cite{CV_crypto1,CV_crypto2,CV_crypto3,CV_crypto4,CV_crypto5}
and experiments were
performed~\cite{CV_crypto_exp1,CV_crypto_exp2}.
After brief introducing into the questions of distances used in
quantum information theory in Sec.~\ref{sec_intro} the main part
follows in Sec.~\ref{sec_main} and Sec.~\ref{sec_threats}. There
we present the notion of CV quantum state encryption and in the
later we define a continuous-variable private quantum channel
(CVPQC). This, foremost, consists of defining the ``maximally''
mixed state within the context of continuous variables and
estimating the length of a secret shared key between Alice and Bob
for a given secrecy (by secrecy it is meant the HS~distance
between ``maximally" mixed and the investigated state). In
Sec.~\ref{sec_threats} we will touch the question of
``maximality'' of the mixed state (from now on, let's omit the
apostrophes) in the context of bosonic
channels~\cite{bos_chan1,bos_chan2} and their generalized
lossy~\cite{bos_chan_lossy} and Gaussian
relatives~\cite{bos_gauss_chan_mem}. We will also discuss
important differences between discrete and CV encryption from the
viewpoint of eavesdropping followed by the calculation of the
Holevo bound limiting information accessible to Eve from the
encrypted channel. Necessary Appendices come at the end of the
paper. In Appendix~\ref{app_exact_value} we give a derivation of
the exact formula for the HS~distance. The object of
Appendix~\ref{app_guess} is to inference the mentioned estimate of
the HS~distance.
\section{Measures of quantum states closeness}
\label{sec_intro}
Quantum states can be distinguished in the sense of their mutual
distance. The distance is usually induced by a norm defined on the
space of quantum states. This is the case of Schatten $p$-norm
\begin{equation}\label{schatten}
\|A\|_p
=\left(
\Tr{}\left(\left|A\right|^p\right)
\right)^\frac{1}{ p}
=\left(
\Tr{}\left(A^p\right)
\right)^\frac{1}{ p},
\end{equation}
where $|A|=\sqrt{A^\dagger A}$ and the last equation is valid for
$A=A^\dagger$. If $A=\varrho_1-\varrho_2$ the Hermiticity of $A$
is still preserved and for $p=2$ we get the Hilbert-Schmidt~(HS)
distance
\begin{equation}\label{HSdist}
D_{HS}(\varrho_1,\varrho_2)
=\|\varrho_1-\varrho_2\|_2=\sqrt{\Tr{}\left((\varrho_1-\varrho_2)^2\right)}.
\end{equation}
On the other hand, there is a whole family of distances based on
Uhlmann's fidelity~\cite{uhlmann}
\begin{equation}\label{fid_uhl}
F(\rho_{1},\rho_{2})=\Tr{}\left(\left(\sqrt{\rho_{1}}\rho_{2}
\sqrt{\rho_{1}}\right)^\frac{1}{2}\right).
\end{equation}
One of them is Bures distance~\cite{bures}
\begin{equation}\label{buresdist}
D_B(\varrho_1,\varrho_2)
=\left(2-2\Tr{}\sqrt{\sqrt{\varrho_1}\varrho_2\sqrt{\varrho_1}}\right)^\frac{1}{2}
\end{equation}
which coincides with the HS~distance if $\varrho_1,\varrho_2$ are
pure states.
There is a certain equivocation which distance is more suitable
for a given task in QIT where a general problem of quantum state
distinguishability or closeness has to be resolved. Both distances
have many useful properties and they are subject of detailed
investigation~\cite{SommZyc}. For example, output ``quality" of a
quantum state in the problem of universal quantum-copying machine
was first analyzed with the help of the HS~distance~\cite{UQCM_HS}
and later revisited from the viewpoint of the Bures and trace
distance (Schatten $p$-norm~(\ref{schatten}) for
$p=1$)~\cite{UQCM_Bures}. Other problem, among others, where
closeness of two quantum states plays a significant role is
quantum state estimation~\cite{quantstateest}. Here the closeness
of estimated states is often measured by the
HS~distance~\cite{quantstateest_HS}.
The motivation for using the HS~distance in our calculation is
two-fold. First, the security criterion for approximate quantum
state encryption~\cite{approx_encryption} is based on the operator
distance induced by operator norm ($p\to\infty$
in~(\ref{schatten})) while some other results therein are
calculated for the trace distance which is in some sense weaker
than the operator distance (the reason is computational
difficulty). Second, for our purpose we need to calculate distance
between two infinitely-dimensional mixed states what is difficult
in the case of all fidelity based distances. However, we suppose
that the HS~distance is a good choice for coherent states and
provides an adequate view on measure of the closeness of two
states.
\section{CV state encryption and its analysis}
\label{sec_main}
Let's define the task of CV state encryption. As in the case of
discrete variable quantum state encryption, Alice and Bob are
interconnected via a quantum channel which is fully accessible to
Eve's manipulations. Both legal participants share a secret string
of random bits (key) which sufficient length is, among others,
subject of this paper. The key indexes several unitary operations
which Alice/Bob chooses to encrypt/decrypt single-mode coherent
states. The purpose of the encryption is to secure these states
from leakage of any information about them to an eavesdropper
(Eve). The way to achieve this task is to ``randomize" every
coherent state to be close as much as possible to a maximally
mixed state (maximally in the sense specified next). The
randomization is performed with several publicly known unitary
operations (it is meant that the set from which participants
choose is known but the particular sequence of operations from the
set is given by the key which is kept secret). So, suppose that
Alice generates or gets an arbitrary and generally unknown
single-mode coherent state. The only public knowledge about the
state is its appearance somewhere inside the circle of radius $b$
in phase space with the given distribution probability. Here we
suppose that states occur with the same probability for all $r\leq
b$ and with zero probability elsewhere.
Suppose for a while that Alice encrypts a vacuum state. This
situation will be immediately generalized with the help of the
HS~distance properties for an unknown coherent state within the
circle of radius $b$ as stated in the previous paragraph. The
first problem we have to tackle is the definition of a maximally
(or completely) mixed state. Here the situation is different from,
generally, $d$-dimensional discrete Hilbert space where a
normalized unit matrix is considered as the maximally mixed state.
This is inappropriate in phase space nevertheless we may inspire
ourselves in a way the discrete maximally mixed state is
generated. In fact, we get the maximally mixed state (in case of
qubits $\varrho_V=\varrho_{r\vartheta\varphi}$) by integrating out
over all density matrix populating the Bloch sphere
$\openone\propto\int\varrho_V{\rm d}V$ (irrespective of the fact
that the finite number of unitary operations suffice for this task
as the theory of PQC learns). Similarly, as a maximally mixed
state~\footnote{In fact, Eq.~(\ref{integral_0b}) belongs to the
class of bosonic channels later discussed
in~section~\ref{sec_threats}. We will address the problem of the
``measure of maximality'' of the mixed state in the context of the
calculated Holevo bound on the channel.} we can naturally choose
an integral performed over all possible single-mode states within
the circle of radius $r\leq b$
\begin{align}\label{integral_0b}
\openone_b
& =\frac{1}{ C}\int{\rm d}^2\alpha\ketbra{\alpha}{\alpha}
=\frac{1}{ C}\sum_{m,n=0}^\infty\ketbra{m}{n}\int_0^{2\pi}
{\rm d}\vartheta\int_0^{b}{\rm d}r
\ e^{-r^2}\frac{r^{m+n+1}}{\sqrt{m!n!}}e^{i(m-n)\vartheta}
=\frac{2\pi}{ C}\sum_{n=0}^\infty\frac{\ketbra{n}{n}}{ n!}
\int_0^{b}{\rm d}r\ e^{-r^2}r^{2n+1}\nonumber\\
& =\frac{\pi}{ C}\sum_{n=0}^\infty\frac{\ketbra{n}{n}}{ n!}
\int_0^{b^2}{\rm d}x\ e^{-x}x^n
=\frac{\pi}{ C}\exp\left(-b^2\right)\sum_{n=0}^\infty
\left(\exp\left(b^2\right)-\sum_{k=0}^n\frac{b^{2k}}{ k!}\right)\ketbra{n}{n}.
\end{align}
$\ket{\alpha}=D(\alpha)\ket{0}
=\exp(-|\alpha|^2/2)\sum_{n=0}^\infty\frac{\alpha^n}{\sqrt{n!}}\ket{n}$
is a coherent state represented as a displaced vacuum via the
displacement operator $D(\alpha)$ (from now on it will not be
mentioned explicitly that $|\alpha|\leq b$ for all displacement
operators used in this paper and for given $b$) and $C=\pi b^2$ is
a normalization constant. Note that
calculation~(\ref{integral_0b}) for $b\to\infty$ without the
normalization is nothing else than well known resolving of unity
giving the evidence of spanning the whole Hilbert space.
Having defined the maximally mixed state let's investigate which
operations Alice uses for encryption. This transformation should
be as close as possible to the maximally state in the HS~distance
sense and the closeness will depend on the number of used
operations. Beforehand, we will note how to facilitate forthcoming
tedious calculations on a sample example. Suppose that Alice has
e.g. four encryption operations, which displace the vacuum state
to the same distance $r_4$ from the origin but under four
different angles (from symmetrical reason these angles are
multiples of $\frac{\pi}{2}$ in this case). Alice chooses these
operations with the same probability. Now, if we write down the
overall mixture from the states, it can be shown that there exists
a computationally advantageous ``conformation" when the mixture
reads
\begin{equation}\label{mix_on_circle__example}
\varrho_4=\frac{1}{4}\sum_{q=1}^4\ketbra{\alpha_{4q}}{\alpha_{4q}}
=\exp(-r_4^2)\sum_{m,n=0}^\infty\frac{r_4^{m+n}}{\sqrt{m!n!}}
\ketbra{m}{n}\,\delta_m^{m'},
\end{equation}
where $m'=m+4l$, $l=1,\dots,\infty$ and $\delta_m^{m'}=1$ for all
$m$ (occupation number) else $\delta_m^{m'}=0$. Informally,
(\ref{mix_on_circle__example}) is always a real density matrix
with off-diagonal non-zero ``stripes" separated from main diagonal
and from a neighbouring stripe by three zero off-diagonals.
$\varrho_4$ acquires relatively simple
form~(\ref{mix_on_circle__example}) if
$\alpha_{4q}=r_4e^{iq\vartheta_{4q}}$ for
$\vartheta_{4q}=\frac{\pi}{4}+(q-1)\frac{\pi}{2}$. This can be
generalized for arbitrary number of states dispersed on a circle
with given radius $r_p$
\begin{equation}\label{mix_on_circle__general}
\varrho_p=\frac{1}{ p}\sum_{q=1}^p\ketbra{\alpha_{pq}}{\alpha_{pq}}
=\exp(-r_p^2)\sum_{m,n=0}^\infty\frac{r_p^{m+n}}{\sqrt{m!n!}}
\ketbra{m}{n}\,\delta_m^{m'},
\end{equation}
where $p\in{\mathbb Z}^+$,$m'=m+pl$ and $\delta_m^{m'}$ is defined as
before. Favorable
``$p$-conformations''~(\ref{mix_on_circle__general}) occur when
$\alpha_{pq}=r_pe^{iq\vartheta_{pq}}$ for
$\vartheta_{pq}=\frac{\pi}{ p}(2q-1)$. Now, we may proceed to the
mixture characterizing all Alice's encryption operations. She
chooses $N\geq1$ and defines $r_p=pb/N$ for $p=1,\dots,N$. Then
\begin{equation}\label{mix_on_circle}
\Phi_N=\frac{1}{M}\sum_{p=1}^Np\varrho_p
=\frac{1}{
M}\sum_{p=1}^N\sum_{q=1}^pD(\alpha_{pq})\ketbra{0}{0}D^\dagger(\alpha_{pq})
\end{equation}
with normalization $M=\frac{N(N+1)}{2}$. To sum up the protocol,
Alice and Bob share a secret and random key which indexes their
operations. So, Alice equiprobably chooses from the set of $M$
displacement operators
$D(\alpha_{pq})=\exp(\alpha_{pq}a^\dagger-\alpha_{pq}^*a)$ where
just one operator creates a coherent state on the circle of radius
$r_1$, two operators generate two states on the circle of radius
$r_2$ and so forth up to $N$. Mixtures of the states on particular
circles are in favourable form~(\ref{mix_on_circle__general}) and
the whole state is~(\ref{mix_on_circle}). The rest of the protocol
is the same as in discrete state encryption. Alice sends the
encrypted state through a quantum channel towards Bob who makes
Alice's inverse operation to decrypt the state.
The keystone in quantum state encryption both discrete and CV is
the fact that an unknown and arbitrary state can be encrypted. If
the state was known we would't need this procedure at all because
Alice could just send information about the preparation of the
state to Bob. So, it would suffice to encrypt this classical
information with the Vernam cipher. Also, our definition of CVPQC
must be independent on the state which is to be encrypted. To
provide this we will find useful unitary invariance of the
HS~distance $D_{HS}(\varrho,\sigma)=D_{HS}(U\varrho
U^\dagger,U\sigma U^\dagger)$ for an arbitrary unitary matrix $U$.
This invariance is, however, necessary but not sufficient
condition for our purpose. The second important issue is due to
advantageous algebraic properties of displacement operators. Let's
demonstrate it on Alice's encryption algorithm which stays the
same as before. If she gets an arbitrary coherent state
$\ket{\beta}$ ($|\beta|\leq b$) she randomly chooses one from
$M$~displacement operators $D(\alpha_{pq})$ (as the shared key
with Bob dictates). Then, even if two displacement operators
generally do not commute we may write a general encryption TPCP
(trace-preserving completely positive) map
\begin{equation}\label{map_Alice}
\mathcal{E}_N(\ket{\beta})=\frac{1}{
M}\sum_{p=1}^N\sum_{q=1}^pD(\alpha_{pq})D(\beta)\ketbra{0}{0}D^\dagger(\beta)D^\dagger(\alpha_{pq})
=\frac{1}{
M}\sum_{p=1}^N\sum_{q=1}^pD(\beta)D(\alpha_{pq})\ketbra{0}{0}D^\dagger(\alpha_{pq})D^\dagger(\beta)
=D(\beta)\Phi_ND^\dagger(\beta).
\end{equation}
It is obvious that generally
$\mathcal{E}_N(\ket{\beta})\not=\Phi_N$ (and similarly $\int
D(\alpha)\ketbra{\beta}{\beta}D^\dagger(\alpha){\rm
d}^2\alpha\propto
D(\beta)\openone_bD^\dagger(\beta)=\openone_b^{\beta}\not
=\openone_b$) but their HS~distances are equivalent, i.e.
$D_{HS}(\openone_b^{\beta},\mathcal{E}_N(\ket{\beta}))=D_{HS}(\openone_b,\Phi_N)$.
Thus, we are henceforth entitled to make all calculations of the
HS~distance between $\openone_b^{\beta}$ and
$\mathcal{E}_N(\ket{\beta})$ with explicit
forms~(\ref{integral_0b}) and~(\ref{mix_on_circle}). After some
calculations we will see (Appendix~\ref{app_guess}) that
\begin{equation}\label{HSdist_guess}
D_{HS}^2\left(\openone_b^{\beta},\mathcal{E}_N(\ket{\beta})\right)
\approx\left(\frac{1}{
N+1}\right)^2+\mathcal{O}\left(N^{-4}\right),
\end{equation}
which holds for all for all input coherent states $\ket{\beta}$.
The guess~(\ref{HSdist_guess}) is far from optimal (e.g.
independent on $b$) and is not even an inequality. But this
doesn't matter because the exact form can be derived (for details
see Appendix~\ref{app_exact_value}). Its only problem is relative
complexity so we cannot easily deduce the number of operations for
a given level of secrecy. Nevertheless, (\ref{HSdist_guess})
asymptotically approaches to the exact expression (as is shown in
Appendix~\ref{app_guess}). Notice that in spite of the derivation
of~(\ref{HSdist_guess}) (or, next, analytical
expression~(\ref{HSdist_explicit})) we still cannot reasonably
define a CVPQC. We will do so in section~\ref{sec_threats} after
presenting another assumptions regarding eavesdropping on our
private quantum channel.
If we consider the described unitary invariance of the HS~distance
we write down lhs~of~(\ref{HSdist_guess})
\begin{equation}\label{HSdist_into_traces}
D_{HS}^2(\openone_b,\Phi_N)
=\Tr{}\left((\openone_b-\Phi_N)^2\right)
=\Tr{}\left(\openone_b^2\right)-2\Tr{}\left(\openone_b\Phi_N\right)
+\Tr{}\left(\Phi_N^2\right).
\end{equation}
After some calculations (see mentioned
Appendix~\ref{app_exact_value}) we get a little bit lengthy
expression
\begin{align}\label{HSdist_explicit}
D_{HS}^2(\openone_b,\Phi_N)
& =\frac{1}{ b^2\exp(2b^2)}
\left(
\exp(2b^2)-I_0\left(2b^2\right)-I_1\left(2b^2\right)
\right)
-\frac{4\exp(-b^2)}{ b^2N(N+1)}
\sum_{p=1}^N\frac{p}{\exp\left(r_p^2\right)}
\sum_{k=1}^\infty\left(\frac{b}{ r_p}\right)^kI_k(2r_pb)\nonumber\\
& +\left(\frac{2}{ N(N+1)}\right)^2
\left[\sum_{p=1}^N\frac{p^2}{\exp\left(2r_p^2\right)}
\left(
I_0\left(2r_p^2\right)+2\sum_{k=1}^\infty I_{pk}\left(2r_p^2\right)
\right)
\right.\nonumber\\
& +\sum_{\ahead{p_1,p_2=1}{p_1\not=p_2}}^N\frac{p_1p_2}{\exp\left(r_{p_1}^2+r_{p_2}^2\right)}
\left.
\left(
I_0(2R_{12})+2\sum_{k=1}^\infty I_{p_1p_2k}(2R_{12})
\right)
\right],
\end{align}
where $r_p=pb/N,r_{p_l}=p_lb/N,R_{12}=p_1p_2\left(b/N\right)^2$
and $I_n(x)$ is a modified Bessel function of the first kind of
order $n$
\begin{equation}\label{bessfce}
I_n(x)=\sum_{s=0}^\infty{\left(\frac{x}{2}\right)^{n+2s}}\frac{1}{(n+s)!s!}.
\end{equation}
However, as we declared, for a rough estimation of the number of
operations for a given secrecy we will
employ~(\ref{HSdist_guess}). Using
$M=\frac{N(N+1)}{2}\approx\frac{(N+1)^2}{2}$ (and then again
$M\approx\frac{N^2}{2}$) we find that the dependence of the number
of bits on the HS~distance~(\ref{HSdist_explicit}) is $n=\log
M=-2(1+\log D_{HS})+\log \left(1+\sqrt{1+CD_{HS}^2}\right)
\approx-1-2\log D_{HS}$ because the guess is in particular precise
for $D_{HS}\ll1$. $C$ is a constant (polynomial) bounding $N^{-4}$
in~(\ref{HSdist_guess}) from above. By the numerical simulations
based on~(\ref{HSdist_explicit}) for different $b$ and $M$ we see
that the guess is accurate and for higher $M$ even serves as a
relatively good upper bound.
\subsection*{Simplified encryption}
Now, let's see what is going to happen if we simplify our
encryption protocol. The arrangement is the same, i.e. Alice's
task is to encipher single-mode coherent states and her only
knowledge is that the states are somewhere inside the circle of
radius $r\leq b$. However, Alice's technology is limited and we
will tend to replace technologically demanding displacements by
simple phase shifts given by the well known time evolution
$\ket{\alpha(t)}=\exp(-iHt/\hbar)\ket{\alpha}$ with the
Hamiltonian $H=\hbar\omega(n+\frac{1}{2})$. The resulting state is
$\ket{\alpha(t)}=\exp(-i\omega t/2)\ket{\alpha\exp(-i\omega t)}$
so the state undertakes the rotation about $\Theta=\omega t$
regardless the distance (which stays preserved) from the phase
space origin. Note that unitary operator $U=\exp(-iHt/\hbar)$ has
the form
\begin{equation}\label{unitary_evolution}
U=\exp\left(-\frac{i\omega t}{2}\right)\sum_{n=0}^\infty\exp(-i\omega t n)
\ketbra{n}{n}.
\end{equation}
Next, suppose that Alice is equipped with $p$ encryption
operations where she can rotate the state about $q$~multiples of
$2\pi/p$. So, $\Theta_q=q2\pi/p$ where $q=1,\dots,p$. Then, for
someone without any information about the angle of rotation chosen
by Alice (e.g. Eve) the state leaving Alice is in the form
\begin{equation}\label{ro_p_tilde}
\tilde\varrho_p=\frac{1}{
p}\sum_{q=1}^{p}U_q\ketbra{\alpha}{\alpha}U^{-1}_q=\frac{1}{
p}\sum_{q=1}^{p}\left(\ketbra{\alpha}{\alpha}\right)_q=\frac{1}{
p}\sum_{q=1}^{p}\ketbra{\alpha e^\frac{q2\pi}{p}}{\alpha
e^\frac{q2\pi}{ p}}
\end{equation}
where $U_q$ is~(\ref{unitary_evolution}) with $\Theta_q=\omega
t_q=q2\pi/p$. Because Alice doesn't know where exactly the state
$\ket{\alpha}$ is placed we cannot write an explicit form of
$\tilde\varrho_q$. Apparently, however, for arbitrary $p$ it can
be transformed to our favourite
state~(\ref{mix_on_circle__general}), again with the help of
unitary operation~(\ref{unitary_evolution})
\begin{equation}\label{ro_p}
\varrho_p(r_p)=U\tilde\varrho_pU^{-1}
=\exp(-r_p^2)\sum_{m,n=0}^\infty\frac{r_p^{m+n}}{\sqrt{m!n!}}
\ketbra{m}{n}\,\delta_m^{m'},
\end{equation}
where $m'=m+pl$, $l=1,\dots,\infty$. It remains to show that
$\openone_b=U\openone_bU^{-1}$. This can be readily seen from the
fact that~(\ref{unitary_evolution}) is diagonal. This is vital.
Now we can easily calculate the HS~distance of $\varrho_p(r_p)$
and $\openone_b$ and regarding the unitary invariance of the
HS~distance the result will be valid for any arbitrary input
coherent state (this trick is akin to that one used
in~(\ref{map_Alice})). After the calculation we get the same
result as in~(\ref{HSdist_explicit}) but for fixed $p$ and without
the summation over $p_1,p_2$
\begin{align}\label{HSdist_simple_explicit}
D_{HS}^2(\openone_b,\varrho_p)
& =\frac{1}{ b^2\exp(2b^2)}
\left(
\exp(2b^2)-I_0\left(2b^2\right)-I_1\left(2b^2\right)
\right)
-\frac{2}{\exp\left(b^2\right)b^2}
\frac{1}{\exp\left(r_p^2\right)}
\sum_{k=1}^\infty\left(\frac{b}{ r_p}\right)^kI_k(2r_pb)\nonumber\\
& +\frac{1}{\exp\left(2r_p^2\right)}
\left(
I_0\left(2r_p^2\right)+2\sum_{k=1}^\infty I_{pk}\left(2r_p^2\right)
\right).
\end{align}
\begin{figure}
\caption{\label{figs_saturated_min}
\label{figs_saturated_min}
\end{figure}
The behaviour of this quantity is completely different compared
to~(\ref{HSdist_explicit}) describing the original case as can be
seen in Fig.~\ref{figs_saturated_min}~(A). As an example we
put~$b=2$ and we see that there exists $r_{min}<b$ for which the
HS~distance between~(\ref{integral_0b}) and~(\ref{ro_p}) reaches
its minimum for sufficiently high~$p$. In order to find a radius
minimizing the HS~distance of these density matrices for
various~$b$ (we put $p\to\infty$ and thus $2\sum_{k=1}^\infty
I_{pk}\left(2r_p^2\right)$ vanishes -- we seek for, say, a
saturated minimum which is independent on the number of operations
on a given circle) we can calculate
\begin{equation}\label{min_D_HS}
\frac{\partial\left(D_{HS}^2(\openone_b,\varrho_p)\right)}{\partial r}
\stackrel{p\to\infty}{=}\frac{4}{\exp\left(2r_{min}^2\right)}
\left(
r_{min}I_1\left(2r_{min}^2\right)-r_{min}I_0\left(2r_{min}^2\right)
+\frac{\exp\left(2r_{min}^2\right)}{b\exp\left(b^2\right)}I_1\left(2r_{min}b\right)
\right)=0.
\end{equation}
The solution is the root of the expression in parentheses.
Unfortunately, an analytical solution wasn't found so numerical
calculation of $r_{min}$ for several $b\in\left(0,7\right.\rangle$
is depicted in Fig.~\ref{figs_saturated_min}~(B).
What are the physical consequences of this whole simplification?
From Fig.~\ref{figs_saturated_min}~(A) we see that a maximally
mixed state $\openone_b$ can be in the distance sense substituted
with a mixture of coherent state on a circle (with radius
$r_{min}$ where the diversion from $\openone_b$ is smallest) and
the mixing requires relatively few encryption operations (low $p$
for the distance saturation). It holds not only for $b=2$ what is
the case in the picture. So, now we can make the process of
encryption/decryption technologically easier. We equip Alice/Bob
with mentioned phase shifts leading to~(\ref{ro_p}) and just one
displacement operator $D(r_{min})$. Every incoming coherent state
is first displaced and then encrypted by choosing a phase shift
(indexed by a secret key). The role of
Fig.~\ref{figs_saturated_min}~(B) is in a proper choice of
$r_{min}$ for given~$b$. Finally, Alice examines
Fig.~\ref{figs_saturated_min}~(A) to select a sufficient
number~$p$ of phase shifts for which the minimum
of~HS~distance~(\ref{HSdist_simple_explicit}) stays essentially
the same.
The important point is that incoming coherent states come to Alice
randomly and equiprobably for given $b$ so after this simplified
encryption (and for many encryption instances) they are as close
as possible to full-fashioned encrypted states as they would be if
processed by using~(\ref{mix_on_circle}), i.e. the case of fully
technologically equipped Alice and Bob, respectively. The overall
advantage is the use of just one technologically demanding
operation which doesn't need to be tuned for given $b$ (it stays
fixed in the encryption protocol described at the beginning of
chapter~\ref{sec_main}).
\section{Eavesdropping on encrypted CV states}
\label{sec_threats}
Quantum channels are TPCP maps on quantum states. Following the
classical channel coding theorem~\cite{shannon48} one may ask how
much information is a quantum channel able to convey. This
question naturally leads to the definition of quantum channel
capacity (for a nice survey on various quantum channel capacities
see Ref.~\cite{shor_capa}) as the maximum of the accessible
information over the probability distributions of the input states
ensemble $\{p_i,\sigma_i\}$ entering the channel (after
classical-quantum coding onto input quantum states
$x_i\rightleftharpoons\sigma_i$ the ensemble of classical messages
is indicated by the variable $X=\{p(x_i),x_i\}$ where
$p(x_i)\equiv p_i$). The accessible information $I_{acc}$ itself
is the maximization over all measurement of the mutual information
$I(X;Y)$ with the variable $Y=\{p\,(y_j),y_j\}$ giving the
probability $p\,(y_j)$ of the result~$y_j$ (an output alphabet) of
a measurement on the channel output. Sometimes it might be
difficult to calculate the accessible information to determine the
channel capacity. Then, we can estimate the accessible information
from above by the Holevo bound
\begin{equation}\label{holevo_bound}
\chi\left(\{p_i,\sigma_i\}\right)
=S\biggl(\sum_ip_i\sigma_i\biggr)-\sum_ip_iS(\sigma_i),
\end{equation}
for which was proved~\cite{holevo_bound} that $\chi\geq I_{acc}$
($S(\varrho)$ is the von~Neumann entropy) reaching equality if all
$\sigma_i$ commute.
In our case we try to get as close as possible to our maximally
mixed state~(\ref{integral_0b}) with TPCP
map~(\ref{mix_on_circle}) (or, generally, to~$\openone_b^\beta$
with~(\ref{map_Alice})) which both belong to the class of bosonic
channels~\cite{bos_chan1,bos_chan2}. However, our task is quite
different from reaching the highest quantum channel capacity (what
means ``tuning'' of $p_i$). Now, the variable $X$ is fixed. For
our purpose the Holevo bound can tell us what is the upper bound
on information which is a third party (Eve) able to learn from her
very best (that is optimal) measurement on the quantum channel.
The question now is of what state we should calculate the Holevo
bound to find Eve's maximum of attainable information on {\em
incoming} states. Unlike the discrete case (perfect and
approximate encryption) the condition of perfect and acceptable
closeness of an encrypted state to the maximally mixed state is
not in this case sufficient. Let's demonstrate the reason for this
difference on the Bloch sphere (i.e. perfect qubit encryption). If
Alice encrypts an unknown qubit (with a PQC compound from e.g.
four Pauli matrices) she gets a maximally mixed two-qubit state
(normalized unity matrix). It means that Eve cannot construct any
measurement giving her information on the state. Moreover, even if
she had a priori information on the state (in the sense that Alice
gets and encrypts many copies of the same unknown state) she
wouldn't be able to use any method of unknown quantum states
reconstruction~\cite{quantstateest}. She wouldn't get any clue
where to find the state because all are transformed (encrypted) to
the maximally mixed state dwelling in the center of the sphere.
Unfortunately, this is not our case. Here, if Alice encrypts many
identical coherent states (howbeit unknown) Eve could in principle
reconstruct in which part of the phase space the encryption had
been carried out. Because we next suppose that encryption
operations are publicly known (naturally not the secret key
sequence itself) Eve then could be able to deduce the original
state from many variant ciphers of the same state. So we will
suppose that incoming state do not exhibit such statistics and,
moreover, they are distributed equiprobably and randomly in the
whole region of considering (i.e. within the circle of radius
$r\leq b$)~\footnote{To avoid misunderstanding we distinguish in
this chapter between distribution and statistics. As usually,
distribution means a probability of occurrence for incoming states
or, eventually, for encryption operations. By contrast, statistics
means correlation or better relationship between incoming states
what may be publicly known, e.g. information that several incoming
states will be the same. This has no influence on the
distribution.}. Less restrictive requirement could be a
distribution independent on the phase and changing only with the
distance from the origin (rotationally invariant) what will be
discussed at the end of this chapter.
Based on theses thoughts, we may finally define CVPQC. We call the
object
$\{\mathcal{B},P_{\mathcal{B}},\mathcal{E}_N(\ket{\beta}),\openone_b^{\beta}\}$
as CVPQC where $\mathcal{B}$ is the set of all coherent states
$\ket{\beta}$ ($|\beta|\leq b$) with distribution
$P_{\mathcal{B}}$ around the origin of phase space,
$\mathcal{E}_N(\ket{\beta})$ is the TPCP~map defined
in~(\ref{map_Alice}) and $\openone_b^{\beta}$ is maximally mixed
state centered around an input state $\ket{\beta}$
(displaced~(\ref{integral_0b})). Let's note that the definition of
CVPQC is valid for all coherent mixed states of the general form
$\varrho_\beta=\sum_ip_i(\ketbra{\beta}{\beta})_i$ with
$\sum_ip_i=1$ and $|\beta|_i\leq b,\,\forall i$ because of
convexity of~HS~norm. The situation is similar as
in~\cite{approx_encryption} for the operator norm.
Considering the assumptions from the previous paragraph let's
focus on the problem of security on the channel. For our purpose
we employ an integral version of~(\ref{holevo_bound}). First note
that we are now interested in the limiting case when Alice uses
infinitely many encryption operations~\footnote{Of course, in
reality for every $\ket{\beta}$ Alice produces a ``finite''
mixture $\mathcal{E}_N(\ket{\beta})$ from~(\ref{map_Alice}) which
should be as close as possible to
$\openone_b^\beta=D(\beta)\openone_bD^\dagger(\beta)$. For the
examination how these states are close to each other and its
connection to the number of bits of the key we have derived
expression~(\ref{HSdist_guess}).}. More importantly, it remains to
correctly answer the question posed above. That is, what actually
calculate as the Holevo bound? We know that Eve will measure on
the encrypted channel to get any information on incoming states
(states before the encryption). To learn something about position
of a coherent state before encryption Eve must try to distinguish
among objects which preserve some information about input states
location in phase space. In other words Eve has to choose a
correct ``encoding'' $\sigma_i$ from Eq.~(\ref{holevo_bound}) but
now for a continuous index. Due to the used encryption
operation~(\ref{map_Alice}) (which in our case $N\to\infty$
transforms to $\openone_b^\beta$ as is proved
in~Appendix~\ref{app_exact_value}) a state $\openone_b^\beta$ is
that object which preserves the information on placement of an
incoming state~$\ket{\beta}$ because is centered around it. Then,
a continuous version of~(\ref{holevo_bound}) reads
\begin{equation}\label{holevo_bound_int1}
\chi\bigl(\{P(\beta),\openone_b^\beta\}\bigr)
=S\bigl(\tilde\Lambda_{2b}\bigr)
-\int{\rm
d^2}\beta\,P(\beta)S\bigl(\openone_b^\beta\bigr),
\end{equation}
where $\tilde\Lambda_{2b}$ is a total state leaving Alice's
apparatus (normalized Eq.~(\ref{Lambda})), $P(\beta)$ is an input
distribution from the CVPQC definition and
$\openone_b^\beta=D(\beta)\openone_bD^\dagger(\beta)$. Since
$S(D(\beta)\openone_bD^\dagger(\beta))=S(\openone_b)$ for all
$\beta$ it immediately follows from~(\ref{holevo_bound_int1})
\begin{equation}\label{holevo_bound_int2}
\chi\left(\{P(\beta),\openone_b^\beta\}\right)
=S\bigl(\tilde\Lambda_{2b}\bigr)-S\bigl(\openone_b\bigr).
\end{equation}
\begin{figure}
\caption{\label{fig_holevo_bound}
\label{fig_holevo_bound}
\end{figure}
To calculate the Holevo bound~(\ref{holevo_bound_int2}) it remains
to find the total state after the encryption procedure provided
that Eve doesn't know neither which particular coherent
state~$\ket{\beta}$ comes to Alice nor the key used for
encryption. Thus, because the incoming distribution
$P(\beta)\equiv P(x,\vartheta)=1$ (no need to normalize it here)
is publicly known then Alice quite accidentally experiences
$\openone_b$ also as a total {\em incoming} state. So, the state
{\em leaving} Alice is in the form
\begin{align}\label{Lambda}
\Lambda_{2b}
& = \int{\rm d^2}\alpha D(\alpha)
\left(\int{\rm d^2}\beta\,\ketbra{\beta}{\beta}\right)D^\dagger(\alpha)
=\int_{0}^{b}\int_{0}^{2\pi}{\rm d^2}\beta
\left(
\int_{0}^{b}\int_{0}^{2\pi}{\rm d^2}\alpha\,
D(\alpha)\ketbra{\beta}{\beta}D^\dagger(\alpha)
\right)\nonumber\\
& =\int_{0}^{b}x{\rm d}x\int_{0}^{b}y{\rm d}y\int_{0}^{2\pi}{\rm
d}\varphi
\left(
\int_{0}^{2\pi}{\rm d}\vartheta\,\ketbra{\beta+\alpha}{\beta+\alpha}
\right)
=2\pi\sum_{n=0}^{\infty}\frac{\ketbra{n}{n}}{n!}
\int_{0}^{b}\int_{0}^{b}\int_{0}^{2\pi}{\rm d}x{\rm d}y{\rm d}\varphi
\ e^{-R^2}R^{2n+1}xy,
\end{align}
where $R^2=x^2+y^2-2xy\cos\varphi\leq4b^2$, ${\rm d^2}\alpha=y{\rm
d}y{\rm d}\varphi$, ${\rm d^2}\beta=x{\rm d}x{\rm d}\vartheta$ and
$D(\alpha)$ are encryption displacements (again uniformly
distributed as the CVPQC definition dictates and without a
normalization). Shortly said, by changing the order of integration
we see that $\Lambda_{2b}$ is diagonal. Concretely, the term in
the parentheses in the reordered integral (second row
in~(\ref{Lambda})) is an ordinary coherent state
$\ket{\beta+\alpha}$ ($r$ is its distance from the origin and
abscissae formed by points in the phase space $(\alpha,\beta)$ and
$(\beta,0)$ contain a relative angle $\varphi$) and is integrated
out over the phase. This gives a diagonal matrix and next
integrating just changes the statistics on the diagonal. Note that
$\Lambda_{2b}$ ``covers'' the area of radius~$2b$ in phase space.
Unfortunately, it is difficult to obtain an analytical solution
of~(\ref{Lambda}). Nevertheless, based on~(\ref{Lambda}) we may
generally write (tilde indicates normalization)
$\tilde\Lambda_{2b}=\sum_n\lambda^{(b)}_n\ketbra{n}{n}$, where
$\lambda^{(b)}_n$ are needed to be determined numerically.
Inserting $\tilde\Lambda_{2b}=\sum_n\lambda^{(b)}_n\ketbra{n}{n}$
and~(\ref{integral_0b}) into Eq.~(\ref{holevo_bound_int2}) we find
the desired Holevo bound what is depicted in
Fig.~\ref{fig_holevo_bound}. It is interesting to note that the
convergence of the Holevo bound is tightly connected with the
energy constraint $\int\Tr{}\left(\varrho_\beta a^\dagger
a\right){\rm d}P_{\mathcal{B}}\leq\bar n_b$ which is automatically
satisfied through the finiteness of~$b$ in our CVPQC. For a
general distribution~$P(\beta)$ it is satisfied due to the natural
condition $\int{\rm d^2}\beta P(\beta)=1$. The importance of the
constraint has been already recognized for capacities of bosonic
channels~\cite{bos_chan1,bos_chan2,bos_chan_lossy,bos_gauss_chan_mem}.
From Fig.~\ref{fig_holevo_bound} we see that there is no chance
for Eve to find which coherent state actually passes on the
channel. This is not so surprising considering the non-discrete
character of the input distribution $P(\beta)$. But Eve cannot
divide the appropriate area at least roughly to approximately
devise the position of a particular encrypted state
$\openone_b^\beta$ (and from this to derive the desired position
of an incoming state $\ket{\beta}$). From the informational point
of view the Holevo bound results could be interpreted such that
for a given~$b$ there doesn't exist any optimal measurement
enabling Eve to divide the circle of the radius~$2b$ in more
than~$2^{\chi}$ sections and tells her in which one the encrypted
state occurs.
Having fixed the input distribution $P(\beta)$ of coherent states
coming to Alice another way how to next decrease the Holevo bound
might be a different definition of an encryption distribution
in~Eq.~(\ref{Lambda}) where a uniform distribution is implicitly
used. If this distribution was symmetrical (i.e. rotationally
invariant, for example Gaussian one) then we would
find~Eq.~(\ref{Lambda}) diagonal as well and thus we could easily
calculate~(\ref{holevo_bound_int2}). However, a big challenge is
proving the optimality of such distribution function. Anyhow, it
means that by minimizing the Holevo
bound~(\ref{holevo_bound_int2}) within the context of the CVPQC
definition we could tackle the previously mentioned problem of
maximality of our mixed state by putting this more suitable
distribution into Eq.~(\ref{integral_0b}).
\section{Conclusion}
In this work we have opened the problem of continuous variable
encryption of unknown quantum states and introduced the concept of
private quantum channels (CVPQC) into this area. For the start we
have restricted ourselves on coherent states which belong to the
important class of states with the Gaussian distribution function.
A~particular continuous variable private quantum channel was
proposed and we have studied its properties. Firstly, it means
that we have established the notion of so called maximally mixed
state with regard to its non-discrete (continuous variable)
nature. For this kind of mixture we were interested how many
encryption operations are sufficient to consider an incoming
coherent state to be secure. This quantity was determined by
calculating the Hilbert-Schmidt distance between the mixture and
the encrypted state for an arbitrary number of encryption
operations. Next, we have studied the possibility of eavesdropping
on the quantum channel. We have supposed that Eve is able to
perform an optimal measurement to get the maximum information on
the state (which is limited by the calculated Holevo bound) or is
able to use some quantum state estimation methods. The second
possibility (which requires Eve's a priori information on the
statistics of the states in the sense that she knows that Alice
gets many copies of the same unknown coherent state to encrypt)
restricts the CVPQC definition. The way how to avoid this kind of
attack is the most desired direction of next research.
Beside the above mentioned topic we have addressed many more
intriguing questions which can stimulate another research in this
area and improve the existing protocol. Among many topics let's
name the problem of either universal distribution of incoming
state or a universal distribution of encryption operations or
both. This is tightly connected with the freedom in choice of the
definition of maximally mixed state. We have seen that there
exists a certain ambiguity in the definition of what we call here
the maximally mixed state in phase space. Its relevance is
measured by the accessible information (or eventually limited from
above by the Holevo bound) Eve can get and the question is what
kind of definition of the maximally mixed state is the most
appropriate for a given incoming distribution of states. Together
with this topics another generalization presents itself. It is the
possibility to encrypt another Gaussian states, especially
squeezed states.
\begin{acknowledgments}
The author is grateful to L.~Mi\v sta, R.~Filip and J.~Fiur\'a\v
sek for useful discussions in early stage of this work, M.~Du\v
sek for reading the manuscript and T.~Hol\'y for granting the
computational capacity. The support from the EC project SECOQC
(IST-2002-506813) is acknowledged.
\end{acknowledgments}
\appendix
\section{}
\label{app_exact_value}
Eq.~(\ref{HSdist_into_traces}) consists of three parts. Let's
calculate them step by step. If we rewrite~(\ref{integral_0b}) in
a more suitable form we get diagonal elements in the form
\begin{equation}\label{app_integral0b}
\openone_b(n,n)=\frac{1}{b^2\exp(b^2)}
\left(\sum_{m=n+1}^\infty\frac{b^{2m}}{m!}\right)\ketbra{n}{n}.
\end{equation}
Now, we may easily calculate
\begin{align}
\Tr{}\left(\openone_b^2\right)
& = \frac{1}{b^4\exp(2b^2)}
\sum_{n=1}^\infty\left(\sum_{m=n}^\infty\frac{b^{2m}}{ m!}\right)^2
= \frac{1}{b^4\exp(2b^2)}\left(\sum_{n=1}^\infty\frac{b^{4n}}{(n!)^2}n
+ 2\sum_{k=1}^\infty\sum_{n=1}^\infty\frac{b^{4n+2k}}{ n!(n+k)!}n\right)\nonumber \\
& = \frac{1}{b^4\exp(2b^2)}\left(\sum_{n=0}^\infty\frac{b^{4(n+1)}}{ n!(n+1)!}
+ 2\sum_{k=1}^\infty\sum_{n=0}^\infty\frac{b^{4(n+1)+2k}}{n!(n+k+1)!}\right)
= \frac{1}{b^2\exp(2b^2)}\left(I_1\left(2b^2\right)
+ 2\sum_{k=1}^\infty I_{k+1}\left(2b^2\right)\right)\nonumber\\
& =
\frac{1}{b^2\exp(2b^2)}\left(\exp(2b^2)-I_0\left(2b^2\right)-I_1\left(2b^2\right)\right),
\end{align}
where $I_s$ is a modified Bessel function of the first kind of
order $s$. The last row is due to identity
\begin{equation}\label{bess_ident}
I_0(x)+2\sum_{k=1}^\infty I_k(x)=\exp(x).
\end{equation}
For the calculation of the cross element
$\Tr{}\left(\openone_b\Phi_N\right)$ we use~(\ref{app_integral0b})
again. But for the sake of clarity we will calculate only
$p\Tr{}\left(\openone_b\varrho_p\right)$. The overall trace is
given by summation $\Tr{}\left(\openone_b\Phi_N\right)=1/M
\sum_{p=1}^Np\Tr{}\left(\openone_b\varrho_p\right)$ what follows
from~(\ref{mix_on_circle}). From~(\ref{mix_on_circle__general}) we
get a general diagonal element
\begin{equation}\label{prhop}
p\varrho_p(n,n)=p\exp\left(-r_p^2\right)\frac{r_p^{2n}}{ n!}
\ketbra{n}{n}
\end{equation}
and then
\begin{align}
p\Tr{}\left(\openone_b\varrho_p\right)
&= \frac{1}{ b^2\exp(b^2)}\frac{p}{\exp(r_p^2)}
\sum_{n=0}^\infty\sum_{m=n+1}^\infty\frac{r_p^{2n}}{ n!}\frac{b^{2m}}{ m!}
= \frac{1}{ b^2\exp(b^2)}\frac{p}{\exp(r_p^2)}
\sum_{k=1}^\infty\sum_{n=0}^\infty\frac{b^{2(n+k)}}{ n!(n+k)!}r_p^{2n}\nonumber \\
&= \frac{1}{ b^2\exp(b^2)}\frac{p}{\exp(r_p^2)}\sum_{k=1}^\infty
\left(\frac{b}{ r_p}\right)^kI_k(2r_pb).
\end{align}
Overall, we have
\begin{equation}\label{cross}
\Tr{}\left(\openone_b\Phi_N\right)
=\frac{2}{ N(N+1)}\frac{1}{ b^2\exp(b^2)}
\sum_{p=1}^Np\exp\left(-r_p^2\right)\sum_{k=1}^\infty
\left(\frac{b}{ r_p}\right)^kI_k(2r_pb).
\end{equation}
The third part can be written
\begin{equation}\label{Phi_n^2}
\Tr{}\left(\Phi_N^2\right)=\left(\frac{2}{N(N+1)}\right)^2
\Tr{}\left(\sum_{i,j=1}^{N}p_ip_j\varrho_{p_{i}}\varrho_{p_{j}}\right)
\end{equation}
where particular summands have the form
\begin{equation}\label{rhoirhoj}
\Tr{}\left(p_ip_j\varrho_{p_{i}}\varrho_{p_{j}}\right)
=\frac{p_ip_j}{\exp\left(r_{p_i}^2+r_{p_j}^2\right)}
\left(
I_0(2R_{ij})+2\sum_{k=1}^\infty I_{p_ip_jk}(2R_{ij})
\right),
\end{equation}
what can be easily seen if we
substitute~(\ref{mix_on_circle__general}) into a slight
generalization of~(\ref{bess_ident})
\begin{equation}\label{bess_ident2}
I_0(2xy)+2\sum_{k=1}^\infty I_k(2xy)=\exp(x^2)\exp(y^2),
\end{equation}
where for our purpose
$x=r_{p_i}=p_ib/N,y=r_{p_j}=p_jb/N,R_{ij}=p_ip_j(b/N)^2$.
\section{}
\label{app_guess}
When performing a limit transition it can be shown that
$\lim_{N\to\infty}|\Phi_N-\openone_b|$=0. The only nonzero
elements of $\Phi_N$ stays on its diagonal and are equal to
diagonal elements of $\openone_b$. Let's demonstrate it on first
diagonal element ($n=0$). From~(\ref{mix_on_circle__general})
and~(\ref{mix_on_circle}) we see that
\begin{equation}\label{Phi_N00}
\Phi_N(0,0)=\frac{2}{ N(N+1)}\sum_{p=1}^Np\exp(-r_p^2)\ketbra{0}{0}
=\frac{2}{ N(N+1)}\sum_{p=1}^N\sum_{n=0}^\infty(-1)^n{b^{2n}}{ n!}
\frac{p^{2n+1}}{ N^{2n}}\ketbra{0}{0}.
\end{equation}
Assuming that
\begin{equation}\label{powersum}
\sum_{p=1}^Np^d=\frac{1}{
d+1}\sum_{p=1}^{d+1}(-1)^{d+1-p}\binom{d+1}{p}B_{d+1-p}N^p,
\end{equation}
where $B_n$ are Bernoulli numbers, inserting the highest
polynomial $\frac{N^{d+1}}{ d+1}$ (lower polynomials subsequently
tends to zero) from~(\ref{powersum}) into~(\ref{Phi_N00}) and
finally letting $N\to\infty$ we get (omitting ketbra)
\begin{equation}\label{PhiN00_trans}
\lim_{N\to\infty}\Phi_N(0,0)=2\lim_{N\to\infty}\sum_{n=0}^\infty
(-1)^n\frac{b^{2n}}{ n!}\frac{N^{2(n+2)}}{ N^{2n+1}(N+1)}\frac{1}{2(n+1)}
=\sum_{n=1}^\infty(-1)^n\frac{b^{2n}}{ n!}={1-\exp(-b^2)}{
b^2}\equiv\openone_b(0,0)
\end{equation}
as can be seen from~(\ref{app_integral0b}). In this way we could
continue for all diagonal elements of $\Phi_N(n,n)$. But turn our
attention elsewhere. If we realize that a general diagonal element
is of the form
\begin{equation}\label{PhiNnn}
\Phi_N(n,n)=\frac{2}{ N(N+1)}\frac{1}{n!}
\sum_{p=1}^Np\exp(-r_p^2)r_p^{2n}\ketbra{n}{n},
\end{equation}
we see that the expression for the
HS~distance~(\ref{HSdist_explicit}) can be simplified in the
following way
\begin{align}\label{HS_simple}
D_{HS}^2(\openone_b,\Phi_N)& =|\openone_b-\Phi_N|^2\\
& +
2\left(\frac{2}{N(N+1)}\right)^2
\left[\sum_{k=1}^N\frac{k^2}{\exp\left(2r_k^2\right)}
\left(
\sum_{n=1}^\infty I_{kn}\left(2r_k^2\right)
\right)
+\sum_{\ahead{k_1,k_2=1}{k_1\not=k_2}}^N\frac{k_1k_2}{\exp\left(r_{k_1}^2+r_{k_2}^2\right)}
\left(
\sum_{n=1}^\infty I_{k_1k_2n}(2R_{12})
\right)
\right].\nonumber
\end{align}
This form will help us in deriving~(\ref{HSdist_guess})
\begin{equation}\label{guess}
|\openone_b-\Phi_N|=\sum_{k=0}^\infty|\openone_b(k,k)-\Phi_N(k,k)|
\approx\sum_{k=0}^\infty\left|\openone_b(k,k)\left(1-\frac{N}{ N+1}\right)\right|
=\frac{1}{ N+1}
\end{equation}
and hence
$|\openone_b-\Phi_N|^2\approx\left(\frac{1}{N+1}\right)^2$. We
again put the highest polynomial $\frac{N^{d+1}}{d+1}$ into
$\Phi_N(k,k)$ and used the normalization condition
$\sum_{k=0}^\infty\openone_b(k,k)=1$. Second part
of~(\ref{HS_simple}) tends to zero even faster. We can see it with
the help of~(\ref{bess_ident2}) from which follows that
$\sum_{n=1}^\infty
I_{kn}\left(2r_k^2\right)\ll\exp\left(2r_k^2\right)$ especially
for higher $k$ (and, of course, similarly for the second summand
in square brackets of expression~(\ref{HS_simple})). From these
considerations~(\ref{HSdist_guess}) follows and, not surprisingly,
asymptotically approaches the exact value.
\end{document}
|
\begin{document}
\title{Studies in the Theory of Quantum Games}
\author{Azhar Iqbal}
\prevdegrees{B.Sc. (Hons), University of Sheffield, UK, 1995}
\department{Department of Electronics }
\thisdegree{Doctor of Philosophy}
\university{Quaid-i-Azam University, Islamabad, Pakistan}
\degreemonth{September}
\degreeyear{2004}
\date{3rd September 2004}
\chairmanname{Professor Dr. S. Azhar Abbas Rizvi}
\chairmantitle{Head of Department}
\super{Dr. Abdul Hameed Toor}
\supertitle{Associate Professor}
\maketitle
\tableofcontents
\listoffigures
\begin{abstract}
Theory of quantum games is a new area of investigation that has gone through
rapid development during the last few years. Initial motivation for playing
games, in the quantum world, comes from the possibility of re-formulating
quantum communication protocols, and algorithms, in terms of games between
quantum and classical players. The possibility led to the view that quantum
games have a potential to provide helpful insight into working of quantum
algorithms, and even in finding new ones. This thesis analyzes and compares
some interesting games when played classically and quantum mechanically. A
large part of the thesis concerns investigations into a refinement notion of
the Nash equilibrium concept. The refinement, called an evolutionarily stable
strategy (ESS), was originally introduced in 1970s by mathematical biologists
to model an evolving population using techniques borrowed from game theory.
Analysis is developed around a situation when quantization changes ESSs
without affecting corresponding Nash equilibria. Effects of quantization on
solution-concepts other than Nash equilibrium are presented and discussed. For
this purpose the notions of value of coalition, backwards-induction outcome,
and subgame-perfect outcome are selected. Repeated games are known to have
different information structure than one-shot games. Investigation is
presented into a possible way where quantization changes the outcome of a
repeated game. Lastly, two new suggestions are put forward to play quantum
versions of classical matrix games. The first one uses the association of De
Broglie's waves, with travelling material objects, as a resource for playing a
quantum game. The second suggestion concerns an EPR type setting exploiting
directly the correlations in Bell's inequalities to play a bi-matrix game.
\end{abstract}
\vspace*{3.35in}
\begin{center}
{\Large Dedication}
\end{center}
\textit{This thesis is dedicated to my wife Ayesha and son Emmad. Both
suffered and were kept waiting for long hours during the years of research. I
greatly appreciate Ayesha's warm and continuous support and Emmad's patience,
without which the task was plainly impossible. The thesis is also dedicated to
the loving memory of my late parents.}
\begin{description}
\item
\end{description}
This thesis is based on the following publications:
\begin{itemize}
\item A. Iqbal and A. H. Toor, \textit{Evolutionarily stable strategies in
quantum games}. Physics Letters, A \textbf{280}/5-6, pp 249-256 (2001).
\item A. Iqbal and A. H. Toor, \textit{Entanglement and dynamic stability of
Nash equilibria in a symmetric quantum game}. Physics Letters, A
\textbf{286}/4, pp 245-250 (2001).
\item A. Iqbal and A. H. Toor, \textit{Quantum mechanics gives stability to a
Nash equilibrium}. Physical Review, A \textbf{65}, 022306 (2002). {\small This
article is also selected to be reproduced in the February 1, 2002 issue of the
Virtual Journal of Biological Physics Research: http://www.vjbio.org.}
\item A. Iqbal and A. H. Toor, \textit{Quantum cooperative games}. Physics
Letters, A \textbf{293}/3-4 pp 103-108 (2002).
\item A. Iqbal and A. H. Toor, \textit{Darwinism in quantum systems?} Physics
Letters, A \textbf{294}/5-6 pp 261-270 (2002).
\item A. Iqbal and A. H. Toor, \textit{Backwards-induction outcome in a
quantum game}. Physical Review, A \textbf{65}, 052328 (2002). {\small This
article is also selected to be reproduced in the May 2002 issue of the Virtual
Journal of Quantum Information: http://www.vjquantuminfo.org.}
\item A. Iqbal and A. H. Toor, \textit{Quantum repeated games}. Physics
Letters, A \textbf{300}/6, pp 537-542 (2002).
\item A. Iqbal and A. H. Toor, \textit{Stability of mixed Nash equilibria in
symmetric quantum games}. Communications in Theoretical Physics, Vol.
\textbf{42}, No. 3, pp 335-338 (2004).
\item A. Iqbal, \textit{Quantum games with a multi-slit electron diffraction
set-up.} Nuovo Cimento B, Vol. \textbf{118}, Issue 5, pp 463-468 (2003).
\item A. Iqbal, \textit{Quantum correlations and Nash equilibria of a
bi-matrix game}. Journal of Physics A: Mathematical and General \textbf{37},
L353-L359 (2004).
\end{itemize}
\pagebreak
\chapter{Introduction}
Game theory \cite{Rasmusen89} is a branch of mathematics that presents formal
analysis of the interaction among a group of rational players. The players
have choices available to them, so as to select particular course of action.
They are supposed to behave strategically and are motivated to increase their
utilities that depend on the collective course of action.
Modern game theory started with the work of John von Neumann and Oskar
Morgenstern \cite{Neumann} in 1930s. During the same years von Neumann
\cite{NeumannQM} also made important contributions in quantum mechanics, a
branch of physics developed in 1920s to understand the microworld of atoms and
molecules. However, game theory and quantum mechanics were developed as
separate fields with apparently different domains of applications.
The early years of development in both of these fields could not find some
common ground, or physical situation, that could motivate an interplay between
the two fields. More than fifty years afterwards, quantum computation
\cite{Feynman1, Deutsch,Shor,Shor1,Grover,Grover1}\ was developed in 1980s as
a new field of research that combined elements from quantum mechanics and the
theory of computation \cite{Turing}. Computer science extensively uses the
theory of information and communication \cite{Shannon}. Quantum computation
motivated the development of quantum information \cite{Schumacher}; thus
providing an environment where the two distinct interests of von Neumann, i.e.
quantum mechanics and game theory, could be shown to have some sort of
interplay. Certain quantum communication protocols, and algorithms, were
reformulated in the language of game theory
\cite{Wiesner,Goldenberg,Vaidman,Ekert,Gisin,WernerRF}. It was not long before
the first systematic procedures \cite{MeyerDavid,Eisert} were proposed to
quantize well-known classical games \cite{Rasmusen89}.
Classical bits are the entities that are used to physically implement
classical information. Quantum information \cite{Nielsen}, on the other hand,
uses quantum bits (qubits) for its physical implementation. It is known that
the problems of classical game theory can be translated into physical set-ups
that use classical bits. It immediately motivates the question of how games
can be transformed when implemented with qubits. Like it is the case with
quantum information, it helps to quantize classical games when qubits, instead
of classical bits, are used in physical set-ups to play games.
This thesis follows a particular approach in the theory of quantum games. The
thesis builds up on proposed convincing procedures telling how to quantize
well-known games from the classical game theory. A large part of this thesis
concerns studying the concept of an Evolutionarily Stable Strategy (ESS)
\cite{Smith} from mathematical population biology within the context of
quantum games. The thesis argues that importing a population setting towards
quantum games is not unusual though it may give such an impression. It is
observed that even John Nash \cite{JohnNash,JohnNash1} had a population
setting in his mind when he introduced his solution concept of the Nash
equilibrium for non-cooperative games. The study of evolutionary stability in
quantum games is presented with the view that importing the concept of an ESS,
and its associated population setting, to quantum games is natural to an equal
extent as it is to study Nash equilibrium in quantum games.
Game theory \cite{Rasmusen89} also offers solution concepts that are relevant
to certain types of games. The notions of value of coalition,
backwards-induction outcome and subgame-perfect outcome present a few
examples. The types of games for which these concepts are appropriate are
known to be the cooperative, the sequential (with moves made in order) and
repeated (with moves made simultaneously in one stage) games, respectively. To
show how quantization affects solutions of these games, the relevant solution
concepts are investigated in relation to quantization of these games. This
study shows that quantum versions of these games may have outcomes that are
often extraordinary and sometimes may even be counter-intuitive, from the
point of view of classical game theory.
Motivated by our preferred approach towards quantum games, i.e. to rely on
proposed convincing procedures to quantize known games from the classical game
theory, two new suggestions are put forward about quantum versions of
two-player two-strategy games. The first suggestion presents a set-up that
uses the association of De Broglie waves with travelling material objects to
play a quantum version of a two-player two-strategy game. The second
suggestion uses an EPR type setting in which spatially separated players make
measurements along chosen directions to play a game.
The concluding chapter collects together main results obtained in the thesis.
\chapter{Elements of game theory}
\section{Introduction}
Many decision making problems in sociology, politics and economics deal with
situations in which the results depend not only on the action of one
individual but also on the actions of others. Game theory is a branch of
mathematics which is used in modelling situations in which many individuals
with conflicting interests interact, such that the results depend on the
actions of all the participants. It is considered a formal way to analyze
interaction among a group of individuals who behave rationally and
strategically. The participants in a game strive to maximize their (expected)
utilities by choosing particular courses of action. Because the actions of the
others matter, a player's final utility depends on the profile of courses of
action chosen by all the individuals. A game deals with the following concepts:
\begin{itemize}
\item \emph{Players}. These are the individuals who compete in the game. A
player can be an individual or a set of individuals.
\item A \emph{move}\textit{\ }will be a player's action.
\item A player's (pure) \emph{strategy}\textit{\ }will be a rule (or
function) that associates a player's move with the information available to
her at the time when she decides which move to choose.
\item A player's \emph{mixed strategy} is a probability measure on the
player's space of pure strategies.
\item \emph{Payoffs} are real numbers representing the players' utilities.
\end{itemize}
Although first attempts to analyze such problems are apparently rather old
\cite{Cournot}, modern game theory started with the work of John von Neumann
and Oskar Morgenstern who wrote the book \textit{Theory of Games and Economic
Behaviour }\cite{Neumann}. Game theory is now widely used in research in
diverse areas ranging from economics, social science, to evolutionary biology
and population dynamics.
\section{Representations of games}
There are different ways to represent a strategic interaction between players.
In game theory \cite{Rasmusen89} two representations are well known:
\subsection{Normal form}
A \textit{normal} (strategic) \textit{form} of a game consists of:
\begin{enumerate}
\item A finite set of $N$ agents or players
\item Strategy sets $S_{1},S_{2},...S_{N}$ for the $N$ players
\item Payoff functions $P_{i}$, $i=1,2,...N$, are mappings from the set
$S_{1}\times S_{2}\times...\times S_{N}$ to the set of real numbers
$\mathbf{R}$.
\end{enumerate}
The set $S_{1}\times S_{2}\times...\times S_{N}$ is called the strategy space
$S$. A member $s\in S$ is known as a strategy profile with $s=(s_{1}
,s_{2},...s_{N})$ and $s_{i}\in S_{i}$.
\subsection{Extensive form}
The \textit{extensive form} of a game is a complete description of:
\begin{enumerate}
\item the set of players
\item who moves when and what their choices are
\item the players' payoffs as a function of the choices that are made
\item what players know when they move
\end{enumerate}
The extensive form of a game, as opposed to the normal (or strategic) form,
provides a more appropriate framework for the analysis of strategic
interactions that involve sequential moves. It gives a richer specification of
a strategic interaction by specifying who moves when, doing what and with what
information. The easiest way to represent an extensive form game is to use a
\textit{game tree}, which is multi-person generalization of a \textit{decision
tree }\cite{Rasmusen89}.
\section{Information structure in games}
The information at the disposal of a player, when she has to select a move, is
described by the \textit{information structure }in the game\textit{. }Based on
this structure games can usually be put in either one of the following two
broad classes, which also form the two main branches of game theory.
\subsection{Cooperative games}
In cooperative games the players are allowed to form \textit{binding
agreements. }These are restrictions on the possible actions decided by two or
more players. To be binding an agreements usually requires an outside
authority that can monitor the agreement at no cost and impose on violators
sanctions so severe that cheating is prevented. For players in a binding
agreement there is a strong incentive to work together to receive the largest
total payoff. The agreements may include, for example, \textit{commitments}
and \textit{threats}.
\subsection{Non-cooperative games}
In non-cooperative games the players may not form binding agreements. Neither
do the players cooperate nor do they enter into negotiation for achieving a
common course of action. However the players know how the actions, their own
and the actions of the other players, will determine the payoffs of every player.
\section{Matrix games}
One way to describe a game is to list the players participating in the game,
and to list the alternative choices or moves available to each player. In the
case of a two-player game, the moves of the first player form the rows, and
the moves of the second player the columns of a \textit{matrix}. The entries
in the matrix are two numbers representing the payoff to the first and second
player, respectively. Such a description of a game makes possible to
completely represent the players' payoffs by a matrix. In game theory these
games are recognized as \textit{matrix games. }The example below is a matrix
game between two players:
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\begin{array}
[c]{c}
S_{1}\\
S_{2}\\
...\\
S_{N}
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{
\begin{array}
[c]{ccccccc}
S_{1} & & S_{2} & & ... & & S_{N}
\end{array}
}}{\left(
\begin{array}
[c]{cccc}
(a_{11},b_{11}) & (a_{12},b_{12}) & ... & (a_{1N},b_{1N})\\
(a_{21},b_{21}) & (a_{22},b_{22}) & ... & (a_{2N},b_{2N})\\
... & ... & ... & ...\\
(a_{N1},b_{N1}) & (a_{N2},b_{N2}) & ... & (a_{NN},b_{NN})
\end{array}
\right) }
\end{equation}
\subsection{Constant-sum games}
In a \textit{constant-sum game}, the sum of all players' payoffs is the same
for any outcome. Hence, a gain for one participant is always at the expense of
another, such as in most sporting events.
\subsection{Zero-sum game}
A \textit{zero-sum game} is a special case of a constant sum game in which all
outcomes involve a sum of all player's payoffs of $0$. Since payoffs can
always be normalized, a constant sum game may be represented as (and is
equivalent to) a zero-sum game.
\subsection{Bi-matrix games}
A class of games that have attracted much attention because of the relative
simplicity of their mathematical analysis involve two players Alice and Bob.
Each player has his own payoff matrix written as $a_{ij}$ and $b_{ij}$,
respectively. Games of this kind are called \textit{bi-matrix games.}
\section{Examples of matrix games}
The following examples describe some well-known matrix games.
\subsection{Prisoners' Dilemma}
The most popular bi-matrix game is the so-called the \textit{Prisoners'
Dilemma} (PD) describing the following situation:
\begin{itemize}
\item Two criminals are arrested after having committed a crime together and
wait for their trial.
\item Each suspect is placed in a separate cell and offered the opportunity
to confess to the crime.
\item Each suspect may choose between two strategies namely confessing ($D$)
and not confessing ($C$), where $C$ and $D$ stand for cooperation and defection.
\item If neither suspect confesses, i.e. $(C,C),$ they go free, and split the
proceeds of their crime which we represent by $3$ units of payoff for each suspect.
\item However, if one prisoner confesses ($D$) and the other does not ($C$),
the prisoner who confesses testifies against the other in exchange for going
free and gets the entire $5$ units of payoff, while the prisoner who did not
confess goes to prison and gets nothing.
\item If both prisoners confess, i.e. ($D,D$), then both are given a reduced
term, but both are convicted, which we represent by giving each $1$ unit of
payoff: better than having the other prisoner confess, but not so good as
going free.
\end{itemize}
The game can be represented by the following matrix of payoffs:
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\begin{array}
[c]{c}
C\\
D
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{
\begin{array}
[c]{cc}
C & D
\end{array}
}}{\left(
\begin{array}
[c]{cc}
(3,3) & (0,5)\\
(5,0) & (1,1)
\end{array}
\right) } \label{PDmatrix1}
\end{equation}
where the first and the second entry correspond to Alice's and Bob's payoff, respectively.
For either choice of the opponent it is hence advantageous to defect ($D$). On
the other hand, if both defect ($D,D$) the payoff remains less than in the
case when both cooperate ($C,C$). This is the origin of dilemma.
A generalized matrix for the PD is given as:
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\begin{array}
[c]{c}
C\\
D
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{
\begin{array}
[c]{cc}
C & D
\end{array}
}}{\left(
\begin{array}
[c]{cc}
(r,r) & (s,t)\\
(t,s) & (u,u)
\end{array}
\right) } \label{PDmatrix}
\end{equation}
where $s<u<r<t$.
\subsection{Battle of Sexes}
Battle of Sexes (BoS) is a bi-matrix game that can be described as follows:
\begin{itemize}
\item Alice and Bob agree to meet in the evening, but cannot recall if they
will be attending the opera or a boxing match.
\item Alice prefers the opera and Bob prefers the boxing match.
\item Both prefer being together to being apart.
\end{itemize}
Thus, while both parties prefer to find themselves at the same place, Alice
and Bob cannot agree which event to attend. The game has the following matrix representation:
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\begin{array}
[c]{c}
S_{1}\\
S_{2}
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{
\begin{array}
[c]{cc}
S_{1} & S_{2}
\end{array}
}}{\left(
\begin{array}
[c]{cc}
(\alpha,\beta) & (\gamma,\gamma)\\
(\gamma,\gamma) & (\beta,\alpha)
\end{array}
\right) } \label{BoSMatrix}
\end{equation}
where $\alpha>\beta>\gamma$.
\subsection{Matching Pennies}
Matching Pennies is a zero-sum game with two players Alice and Bob. Each shows
either heads or tails from a coin. If both are heads or both are tails then
Alice wins, otherwise Bob wins. The payoff matrix is given as
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\begin{array}
[c]{c}
H\\
T
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{
\begin{array}
[c]{cc}
H & T
\end{array}
}}{\left(
\begin{array}
[c]{cc}
(1,-1) & (-1,1)\\
(-1,1) & (1,-1)
\end{array}
\right) }
\end{equation}
with a winner getting a reward of $1$ against the loser getting $-1$.
\subsection{Rock-Scissors-Paper}
Two children, Alice and Bob, simultaneously make one of three symbols with
their fists - a rock, paper, or scissors (RSP). Simple rules of ``rock breaks
scissors, scissors cut paper, and paper covers rock'' dictate which symbol
beats the other. If both symbols are the same, the game is a tie:
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\begin{array}
[c]{c}
R\\
S\\
P
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{
\begin{array}
[c]{ccc}
R & S & P
\end{array}
}}{\left(
\begin{array}
[c]{ccc}
0 & 1 & -1\\
-1 & 0 & 1\\
1 & -1 & 0
\end{array}
\right) }
\end{equation}
\section{Solution concepts}
Solving a game means finding a set of moves for the players which represent
their rational choices. Unlike in other fields, the notion of a ``solution''
is more tenuous in game theory. In game theory a solution is generally thought
of as a systematic description\textit{ }of the outcomes that may emerge during
the play of a game.
\subsection{Rational ``solution'' of Prisoners' Dilemma}
For the bi-matrix PD it is self-evident how an intelligent individual should
behave. No matter what a suspect believes his partner is going to do, it is
always best to confess ($D$):
\begin{itemize}
\item If the partner in the other cell is not confessing ($C$), it is
possible to get $5$ instead of $3$.
\item If the partner in the other cell is confessing ($D$), it is possible to
get $1$ instead of $0$.
\end{itemize}
Yet the pursuit of individually sensible behavior results in each player
getting only $1$ unit of payoff, much less than the $3$ units each that they
would get if neither confessed ($C,C$). This conflict between the pursuit of
individual goals and the common good is at the heart of many game theoretic
problems. For PD the rational choice for both players is to defect.
\subsection{Nash equilibrium}
A \textit{Nash equilibrium (}NE), named after John Nash, is a set of
strategies, one for each player, such that no player has an incentive to
unilaterally change her action. Players are in equilibrium if a change in
strategies by any one of them would lead that player to earn less than if she
remained with her current strategy.
The implicit assumption behind the concept of a NE is that players make their
choices simultaneously and independently. This idea also assumes that each
player participating in a game behaves rational and searches to maximize
his/her own payoff. A strategy profile $s=(s_{1}^{\ast},s_{2}^{\ast}
,...s_{N}^{\ast})$ is a NE if none of them is left with a motivation to
deviate unilaterally. Suppose $P_{i}$ is the $i$th-player's payoff then the
following condition defines the NE:
\begin{equation}
P_{i}(s_{1}^{\ast},s_{2}^{\ast},...s_{i-1}^{\ast},s_{i}^{\ast},s_{i+1}^{\ast
}...,s_{N}^{\ast})\geq P_{i}(s_{1}^{\ast},s_{2}^{\ast},...s_{i-1}^{\ast}
,s_{i},s_{i+1}^{\ast}...,s_{N}^{\ast})
\end{equation}
When the $N$ players are playing the strategy profile $s=(s_{1}^{\ast}
,s_{2}^{\ast},...s_{N}^{\ast})$ the $i$th player's decision to play $s_{i} $
instead of $s_{i}^{\ast}$ cannot increase his/her payoff. A NE thus defines a
set of strategies that represents a \textit{best choice} for each single
player if all the other players take their best decisions too.
The well-known \emph{Nash Theorem} \cite{JohnNash} in game theory guarantees
the existence of a set of mixed strategies for finite non-cooperative games of
two or more players in which no player can improve his payoff by unilaterally
changing his/her strategy.
\subsection{Nash equilibrium in the Prisoners' Dilemma}
Let Alice play $C$ with probability $p$ and play $D$ with probability $(1-p)$.
Similarly, let Bob play $C$ with probability $q$ and play $D$ with probability
$(1-q)$. The players' payoffs for the PD matrix (\ref{PDmatrix1}) are
\begin{align}
P_{A}(p,q) & =pq(3)+p(1-q)(0)+(1-p)q(5)+(1-p)(1-q)(1)\nonumber\\
& =-p+4q-pq+1\\
P_{B}(p,q) & =pq(3)+p(1-q)(5)+(1-p)q(0)+(1-p)(1-q)(1)\nonumber\\
& =4p-q-pq+1
\end{align}
The inequalities defining the NE in PD can be written as
\begin{align}
P_{A}(p^{\ast},q^{\ast})-P_{A}(p,q^{\ast}) & =-(p^{\ast}-p)(1+q^{\ast}
)\geq0\nonumber\\
P_{B}(p^{\ast},q^{\ast})-P_{B}(p^{\ast},q) & =-(q^{\ast}-q)(1+p^{\ast})\geq0
\end{align}
which produces a unique NE in the PD: $p^{\ast}=q^{\ast}=0$. The NE
corresponds to both players playing the pure strategy $D$.
\subsection{Nash equilibrium in the Battle of Sexes}
Similar to the case of PD we assume that the numbers $p,q\in\lbrack0,1]$ are
the probabilities with which Alice and Bob play the strategy $S_{1}$,
respectively. They then play $S_{2}$ with the probabilities $(1-p)$ and
$(1-q)$, respectively. Players' payoffs for the BoS matrix (\ref{BoSMatrix})
are \cite{Marinatto1}:
\begin{align}
P_{A}(p,q) & =p\left[ q(\alpha-2\gamma+\beta)+\gamma-\beta\right]
+q(\gamma-\beta)+\beta\nonumber\\
P_{A}(p,q) & =q\left[ p(\alpha-2\gamma+\beta)+\gamma-\alpha\right]
+p(\gamma-\alpha)+\alpha
\end{align}
The NE $(p^{\ast},q^{\ast})$ is then found from the inequalities:
\begin{align}
P_{A}(p^{\ast},q^{\ast})-P_{A}(p,q^{\ast}) & =(p^{\ast}-p)\left[ q^{\ast
}(\alpha+\beta-2\gamma)-\beta+\gamma\right] \geq0\nonumber\\
P_{B}(p^{\ast},q^{\ast})-P_{B}(p^{\ast},q) & =(q^{\ast}-q)\left[ p^{\ast
}(\alpha+\beta-2\gamma)-\alpha+\gamma\right] \geq0
\end{align}
Three NE arise:
\subsubsection{1. $p_{1}^{\ast}=q_{1}^{\ast}=1,$}
Both players play the pure strategy $S_{1}$. The Nash inequalities are
\begin{align}
P_{A}(1,1)-P_{A}(p,1) & =(1-p)(\alpha-\gamma)\geq0\nonumber\\
P_{B}(1,1)-P_{B}(1,q) & =(1-q)(\beta-\gamma)\geq0
\end{align}
and the payoffs they obtain are
\begin{equation}
P_{A}(1,1)=\alpha\text{ \ \ }P_{B}(1,1)=\beta
\end{equation}
\subsubsection{2. $p_{2}^{\ast}=q_{2}^{\ast}=0,$}
Both players now play the pure strategy $S_{2}$ and the Nash inequalities are
\begin{align}
P_{A}(0,0)-P_{A}(p,0) & =p(\beta-\gamma)\geq0\nonumber\\
P_{B}(0,0)-P_{B}(0,q) & =q(\alpha-\gamma)\geq0
\end{align}
The players get
\begin{equation}
P_{A}(0,0)=\beta\text{ \ \ }P_{B}(0,0)=\alpha
\end{equation}
\subsubsection{3. $p_{3}^{\ast}=\frac{\alpha-\gamma}{\alpha+\beta-2\gamma},$
$\ \ q_{3}^{\ast}=\frac{\beta-\gamma}{\alpha+\beta-2\gamma}.$}
Players play a \emph{mixed} strategy because $p_{3}^{\ast},q_{3}^{\ast}
\in(0,1)$. The players' payoffs are
\begin{equation}
P_{A}(p_{3}^{\ast},q_{3}^{\ast})=P_{B}(p_{3}^{\ast},q_{3}^{\ast})=\frac
{\alpha\beta-\gamma^{2}}{\alpha+\beta-2\gamma}
\end{equation}
Compared to the equilibria $(p_{1}^{\ast},q_{1}^{\ast})$ and $(p_{2}^{\ast
},q_{2}^{\ast})$ the players now get strictly smaller payoffs because
\begin{equation}
\gamma<P_{A(B)}(p_{3}^{\ast},q_{3}^{\ast})<\beta<\alpha
\end{equation}
\section{Evolutionary game theory\label{EGT}}
Game theory suggests static `solutions' obtained by analyzing the behavior of
`rational agents'. Such models are obviously unrealistic because real life
behavior is shaped by trial and error. Real life `players' are subjected to
the pressures of adaptation and are forced to learn individually. In
situations where players do not have the capacity to learn individually,
natural selection favors better players through step-wise adaptation. John von
Neumann and Oskar Morgenstern, in their pioneering work on game theory
\cite{Neumann}, also realized the need for such a dynamic approach towards
game theory. After all, the word game itself suggests `motion' in one way or
the other.
In 1970's Maynard Smith developed game-theoretic models of evolution in a
population which is subjected to Darwinian selection. In his book
\textit{Evolution and the Theory of Games} \cite{Smith} he diverted attention
away from the prevalent view -- treating players as rational beings -- and
presented an evolutionary approach in game theory. This approach can be seen
as a large population model of adjustment to a NE i.e. an adjustment of
population segments by evolution as opposed to learning. Maynard Smith's model
consisted of strategic interaction among the members of a population
continuing over time in which higher payoff strategies gradually displace
strategies with lower payoffs. To distinguish evolutionary from revolutionary
changes some inertia is involved, guaranteeing that aggregate behavior does
not change too abruptly.
Most important feature of evolutionary game theory is that the assumption of
rational players -- borrowed from game theory -- does not remain crucial. It
is achieved when players' payoffs are equated to success in terms of their
survival. Players in an evolutionary model are programmed to play only one
strategy. Step-wise selection assures survival of better players at the
expense of others. In other words, an initial collection of strategies play a
tournament and the average scores are recorded. Successful strategies increase
their share of the population. Changing the population mix changes the
expected payoff. Again successful strategies increase in the population, and
the expected payoff is calculated. A population equilibrium occurs when the
population shares are such that the expected payoffs for all strategies are equal.
Many successful applications of evolutionary game theory appeared in
mathematical biology \cite{Broom et al} to predict the behavior of bacteria
and insects, that can hardly be said to think at all.
Economists too did not like game theory that mostly concerned itself with
hyper-rational players who are always trying to maximize their payoffs. Hence
the population setting of game theory, invented by mathematical biologists,
was welcomed by the economists too. Even John Nash himself, as it was found
later \cite{Hofbauer}, had a population setting in his mind when he introduced
his equilibrium notion. In his unpublished thesis he wrote `\textit{it is
unnecessary to assume that the participants have...... the ability to go
through any complex reasoning process. But the participants are supposed to
accumulate empirical information on the various pure strategies at their
disposal.......We assume that there is a population .......of
participants......and that there is a stable average frequency with which a
pure strategy is employed by the ``average member'' of the appropriate
population}'\cite{JohnNash1,Leonard}.
\subsection{Evolutionarily stable strategies}
Maynard Smith introduced the idea of an Evolutionarily Stable Strategy (ESS)
in a seminal paper `The logic of animal conflict' \cite{Smith Price}. In rough
terms \cite{MarkBroom3} an ESS is a strategy which, if played by almost all
the members of a population, cannot be displaced by a small invading group
that plays any alternative strategy. So that, a population playing an ESS can
withstand invasion by a small group. The concept was developed by combining
ingredients from game theory and some work on the evolution of the sex ratio
\cite{CanningsOrive}.
Maynard Smith considers a large population in which members are matched
repeatedly and randomly in pairs to play a bi-matrix game. The players are
anonymous, that is, any pair of players plays the same symmetric bi-matrix
game. Also the players are identical with respect to their set of strategies
and their payoff functions. The symmetry of a bi-matrix game means that for a
strategy set $S$ Alice's payoff when she plays $S_{1}\in S$ and Bob plays
$S_{2}\in S$ is the same as Bob's payoff when he plays $S_{1}$ and Alice plays
$S_{2}$. In game theory \cite{Rasmusen89}\ a symmetric bi-matrix game is
represented by an expression $G=(M,M^{T})$ where $M$ is the first player's
payoff matrix and $M^{T}$, its transpose, is the second players' payoff
matrix. In a symmetric pair-wise contest $P(x,y)$ gives the payoff to a
$x$-player against a $y$-player. In such contest exchange of strategies by the
two players also exchanges their respective payoffs. Hence, a player's payoff
is defined by his/her strategy and \emph{not} by his/her identity.
Mathematically speaking, \cite{Weibull} $x$ is an ESS when for each strategy
$y\neq x$ the inequality:
\begin{equation}
P[x,(1-\epsilon)x+\epsilon y]>P[y,(1-\epsilon)x+\epsilon y] \label{ESSDefIneq}
\end{equation}
should hold for all sufficiently small $\epsilon>0$. The left side of
(\ref{ESSDefIneq}) is the payoff to the strategy $x$ when played against the
strategy $(1-\epsilon)x+\epsilon y$ where $\epsilon\in\left[ 0,\epsilon
_{0}\right) $. For $\epsilon$ becoming greater than $\epsilon_{0}$ the
inequality (\ref{ESSDefIneq}) does not hold and $x$ does not remain an ESS.
The situation when $\epsilon>\epsilon_{0}$ is also known as the
\emph{invasion} by the mutant strategy. The quantity $\epsilon_{0}$ is called
the \emph{invasion barrier}.
To be precise \cite{Hofbauer} a strategy $x$ is an ESS:
\begin{itemize}
\item If for each mutant strategy $y$ there exists a positive invasion barrier\textit{.}
\item The invasion barrier exists such that if the population share of
individuals playing the mutant strategy $y$ falls below this barrier, then $x$
earns a higher expected payoff than $y$.
\end{itemize}
This condition for an ESS can be shown \cite{Smith} equivalent to the
following two requirements:
\begin{align}
1.\text{ \ \ \ }P(x,x) & >P(y,x)\nonumber\\
2.\text{ If\ }P(x,x) & =P(y,x)\ \text{then}\ P(x,y)>P(y,y) \label{DefESS}
\end{align}
An ESS, therefore, is a symmetric NE which also possesses a property of
stability against small mutations. Condition $1$ in the definition
(\ref{DefESS}) shows $(x,x)$ is a NE for the bi-matrix game $G=(M,M^{T})$ if
$x$ is an ESS. Nevertheless, the converse is not true. That is, if $(x,x)$ is
a NE then $x$ is an ESS only if $x$ satisfies condition $2$ in the definition.
In evolutionary game theory the concept of \emph{fitness} \cite{Prestwich} of
a strategy is considered crucial. Suppose $x$ and $y$ are pure strategies
played in a population setting. Their fitnesses are defined as:
\begin{align}
W(x) & =P(x,x)F_{x}+P(x,y)F_{y}\nonumber\\
W(y) & =P(y,x)F_{x}+P(y,y)F_{y} \label{fitnesses}
\end{align}
where $F_{x}$ and $F_{y}$\ are frequencies (the relative proportions) of the
pure strategies $x$ and $y$ respectively.
The concept of evolutionary stability provided much of the motivation for the
development of evolutionary game theory. Presently, the ESS concept is
considered as the central model of evolutionary dynamics of a populations of
interacting individuals. It asks, and finds answer to it, a basic question:
Which states of a population -- during the course of a selection process that
favors better performing strategies -- are stable against perturbations
induced by mutations? The theory is inspired by Darwinian natural selection
which is formulated as an algorithm called \emph{replicator dynamics}.
Iterations of selections from randomly mutating replicators is the important
feature of the dynamics. The dynamics is a mathematical statement saying that
in a population the proportion of players which play better strategies
increase with time. With replicator dynamics being the underlying selection
mechanism in a population, ESSs come out \cite{TaylorJonker}\ as stable
strategies against small perturbations. In other words ESSs are \emph{rest
points} of the replicator dynamics.
\subsection{ESS as a refinement of Nash equilibrium}
In the history of game theory elaborate definitions of rationality, on the
behalf of the players, led to many refinements \cite{MyersonRB} of the NE
concept. In situations where multiple NE appear as potential solutions to a
game, a refinement is required to prefer some over the others. Refinements of
NE are popular as well as numerous in classical game theory. Speaking
historically, the set of refinements became so large that eventually almost
any NE could be justified in terms of someone or other's refinement. The
concept of an ESS is a refinement on the set of symmetric Nash equilibria
\cite{Weibull}. Apart from being a symmetric NE it has robustness against
small mutations \cite{Cressman}. For symmetric bi-matrix games this
relationship is described as \cite{Gerard van}:
\begin{equation}
\bigtriangleup^{ESS}\subset\bigtriangleup^{PE}\subset\bigtriangleup^{NE}
\end{equation}
where $\bigtriangleup^{PE}\neq\Phi$ and $\bigtriangleup^{NE}$, $\bigtriangleup
^{PE}$, $\bigtriangleup^{ESS}$ are the sets of symmetric Nash equilibria,
symmetric proper equilibrium, and ESSs respectively.
\chapter{Review of quantum mechanics}
\textit{Quantum mechanics: Real Black Magic Calculus}
\textit{-- Albert Einstein}
\section{Introduction}
Quantum theory is the theoretical basis of modern physics that explains the
nature and behavior of matter and energy on the atomic and subatomic level.
The physical systems at these levels are known as \emph{quantum systems}. Thus
quantum mechanics is a mathematical model of the physical world that describes
the behavior of quantum systems. A physical model is characterized by how it
represents \emph{physical states}, \emph{observables}, \emph{measurements},
and \emph{dynamics} of the system under consideration. A quantum description
of a physical model is based on the following concepts:
\section{Fundamental concepts}
A \emph{state} is a complete description of a physical system. Quantum
mechanics associates a ray in \emph{Hilbert space} to the physical state of a
system. What is Hilbert space?
\begin{itemize}
\item Hilbert space is a complex linear vector space. In Dirac's ket-bra
notation states are denoted by \emph{ket vectors} $\left| \psi\right\rangle $
in Hilbert space. Any two state vectors differing only by an overall phase
factor $e^{i\theta}$ ($\theta$ real) represent the same state.
\item Corresponding to a ket vector $\left| \psi\right\rangle $ there is
another kind of state vector called \emph{bra vector}, which is denoted by
$\left\langle \psi\right| $. The \emph{inner product} of a bra $\left\langle
\psi\right| $ and ket $\left| \phi\right\rangle $ is defined as follows:
\begin{align}
\left\langle \psi\right| \left\{ \left| \phi_{1}\right\rangle +\left|
\phi_{2}\right\rangle \right\} & =\left\langle \psi\mid\phi_{1}
\right\rangle +\left\langle \psi\mid\phi_{2}\right\rangle \nonumber\\
\left\langle \psi\right| \left\{ c\left| \phi_{1}\right\rangle \right\}
& =c\left\langle \psi\mid\phi_{1}\right\rangle
\end{align}
for any $c\in\mathbf{C}$, the set of complex numbers. There is a one-to-one
correspondence between the bras and the kets. Furthermore
\begin{align}
\left\langle \psi\mid\phi\right\rangle & =\left\langle \phi\mid
\psi\right\rangle ^{\ast}\nonumber\\
\left\langle \psi\mid\psi\right\rangle & >0\text{ for }\left|
\psi\right\rangle \neq0
\end{align}
\item The state vectors in Hilbert space are normalized which means that the
inner product of a state vector with itself gives unity, i.e.,
\end{itemize}
\begin{equation}
\left\langle \psi\mid\psi\right\rangle =1
\end{equation}
\begin{itemize}
\item Operations can be performed on a ket $\left| \psi\right\rangle $ and
transform it to another ket $\left| \chi\right\rangle $. There are operations
on kets which are called \emph{linear operators}, which have the following
properties. For a linear operator $\hat{\alpha}$ we have
\end{itemize}
\begin{align}
\hat{\alpha}\left\{ \left| \psi\right\rangle +\left| \chi\right\rangle
\right\} & =\hat{\alpha}\left| \psi\right\rangle +\hat{\alpha}\left|
\chi\right\rangle \nonumber\\
\hat{\alpha}\left\{ c\left| \psi\right\rangle \right\} & =c\hat{\alpha
}\left| \psi\right\rangle
\end{align}
for any $c\in\mathbf{C}$.
\begin{itemize}
\item The sum and product of two linear operators $\hat{\alpha}$ and
$\hat{\beta}$ are defined as:
\begin{align}
\left\{ \hat{\alpha}+\hat{\beta}\right\} \left| \psi\right\rangle &
=\hat{\alpha}\left| \psi\right\rangle +\hat{\beta}\left| \psi\right\rangle
\nonumber\\
\left\{ \hat{\alpha}\hat{\beta}\right\} \left| \psi\right\rangle &
=\hat{\alpha}\left\{ \hat{\beta}\left| \psi\right\rangle \right\}
\end{align}
Generally speaking $\hat{\alpha}\hat{\beta}$ is not necessarily equal to
$\hat{\beta}\hat{\alpha}$, i.e. $\left[ \hat{\alpha},\hat{\beta}\right] \neq0$
\item The \emph{adjoint} $\hat{\alpha}^{\dagger}$ of an operator $\hat
{\alpha}$ is defined by the requirement:
\begin{equation}
\left\langle \psi\mid\hat{\alpha}\chi\right\rangle =\left\langle \hat{\alpha
}^{\dagger}\psi\mid\chi\right\rangle
\end{equation}
for all kets $\left| \psi\right\rangle $, $\left| \chi\right\rangle $ in the
Hilbert space.
\item An operator $\hat{\alpha}$ is said to be \emph{self-adjoint} or
\emph{Hermitian} if:
\begin{equation}
\hat{\alpha}^{\dagger}=\hat{\alpha}
\end{equation}
\end{itemize}
Hermitian operators are the counterparts of real numbers in operators. In
quantum mechanics, the dynamical variables of physical systems are represented
by Hermitian operators. More specifically, every experimental arrangement in
quantum mechanics is associated with a set of operators describing the
dynamical variables that can be observed. These operators are usually called
\emph{observables}.
\section{Postulates of quantum mechanics}
For an isolated quantum system, quantum theory is based on the following postulates:
\begin{itemize}
\item A ket vector $\left| \psi\right\rangle $ in Hilbert space gives a
\emph{complete description} of the state of the physical system.
\item Dynamics are specified by \emph{Hermitian operators} and time evolution
is given by \emph{Schr\"{o}dinger's equation:}
\end{itemize}
\begin{equation}
i\hbar\frac{\partial\left| \psi\right\rangle }{\partial t}=\hat{H}\left|
\psi\right\rangle \label{SchrodingerEq}
\end{equation}
where $\hat{H}$ is the \emph{Hamiltonian operator}. Schr\"{o}dinger's equation
is a \textit{deterministic equation of motion} that allows one to determine
the state vector at any time once the initial conditions are provided.
Classical games can be played when players share coins. Coins are physical
systems that represent classical bits which takes one of the two possible
values $\left\{ 0,1\right\} $, or simply head and tail. A bit is also the
indivisible unit of classical information. For example in non-cooperative
games coins are distributed among the players and they do their actions on
them. At the end of the game the coins are collected by a referee who rewards
the players, after observing the collected coins. The earliest suggestions for
playing quantum games can be thought of letting players act on \emph{qubits},
which are a quantum generalization of classical two level systems like a coin.
\section{Qubits}
In two-dimensional Hilbert space an orthonormal basis can be written as
$\left\{ \left| 0\right\rangle ,\left| 1\right\rangle \right\} $. A
general qubit state is then
\begin{equation}
\left| \psi\right\rangle =a\left| 0\right\rangle +b\left| 1\right\rangle
\label{QubitState}
\end{equation}
where $a,b\in\mathbf{C}$ satisfying $\left| a\right| ^{2}+\left| b\right|
^{2}=1$. In other words, $\left| \psi\right\rangle $ is a unit vector in
two-dimensional complex vector space for which a particular basis has been
fixed. One of the simplest physical examples of a qubit is the spin $1/2$ of
an electron. The spin-up and spin-down states of an electron can be taken as
the states $\left| 0\right\rangle $, $\left| 1\right\rangle $ of a qubit.
A non-cooperative classical game can be played by coin distribution and
players' rewards are decided after observing the coins. Likewise, a
non-cooperative quantum game can be played by distributing qubits among the
players. After the players' moves the qubits are brought together for an
observation which is known as \emph{quantum measurement}.
\section{Quantum measurement}
Unlike observation of coins by a referee who organizes a classical game, the
concept of measurement of a quantum state of many qubits is subtle and lies at
the heart of quantum theory. The \emph{measurement postulate} of quantum
mechanics states \cite{Preskill}:
\begin{itemize}
\item Mutually exclusive measurement outcomes correspond to orthogonal
\emph{projection operators} $\left\{ \hat{P}_{0},\text{ }\hat{P}
_{1},...\right\} $ and the probability of a particular outcome $i$ is
$\left\langle \psi\mid\hat{P}_{i}\mid\psi\right\rangle $. If the outcome $i$
is attained the (normalized) quantum state after the measurement becomes
\end{itemize}
\begin{equation}
\frac{\hat{P}_{i}\left| \psi\right\rangle }{\sqrt{\left\langle \psi\mid
P_{i}\mid\psi\right\rangle }}
\end{equation}
Consider a measurement made on a qubit whose state vector resides in
two-dimensional Hilbert space. A measuring device has associated an
\emph{orthonormal basis} with respect to which the quantum measurement takes
place. Measurement transforms the state of the qubit into one of measuring
device's associated \emph{basis vectors}. Assume the measurement is performed
on the qubit that has the state (\ref{QubitState}). The measurement projects
the state (\ref{QubitState}) to the basis $\left\{ \left| 0\right\rangle
,\left| 1\right\rangle \right\} $. Now in this case the measurement
postulate says that the outcome $\left| 0\right\rangle $ will happen with
probability $\left| a\right| ^{2}$ and the outcome $\left| 1\right\rangle $
with probability $\left| b\right| ^{2}$.
Furthermore, measurement of a quantum state changes the state according to the
result of the measurement. That is, if the measurement of $\left|
\psi\right\rangle =a\left| 0\right\rangle +b\left| 1\right\rangle $ results
in $\left| 0\right\rangle $, then the state $\left| \psi\right\rangle $
changes to $\left| 0\right\rangle $ and a second measurement, with respect to
the same basis, will return $\left| 0\right\rangle $ with probability $1$.
Thus, unless the original state happened to be one of the basis vectors,
measurement will change that state, and it is \emph{not} possible to determine
what the original state was.
Although a qubit can be put in infinitely many superposition states, only a
single classical bit's worth of information can be extracted from it. It is
because the measurement changes the state of the qubit to one of the basis states.
Measurement made with orthogonal projection operators $\left\{ \hat{P}
_{0},\text{ }\hat{P}_{1},...\right\} $ is also called \emph{projective
measurement}.
\subsection{Positive Operator-Valued Measure}
Apart from projective measurement the quantum theory also uses another
important concept of measurement, whose implementation can be useful. It is
the concept of positive operator-valued measure (POVM). A POVM consists of a
set of non-negative quantum mechanical Hermitian operators that add up to the
identity. The probability that a quantum system is in a particular state is
given by the expectation value of the POVM operator corresponding to that
state. POVMs are sometimes also referred to as the ``generalized measurements''.
Nielsen and Chuang \cite{Nielsen}\ have discussed a simple example showing the
utility of POVM formalism. Suppose Alice gives Bob a qubit prepared in one of
two states, $\left| \psi_{1}\right\rangle =\left| 0\right\rangle $ or
$\left| \psi_{2}\right\rangle =(\left| 0\right\rangle +\left|
1\right\rangle )/\sqrt{2}$. It can be shown that there is no quantum
measurement capable of distinguishing the two states with perfect reliability.
However, using a POVM Bob can perform a measurement that distinguishes the two
states some of the time, but never makes an error of misidentification.
In this connection Neumark's theorem \cite{AsherPeres} needs to be mentioned
here that states that, at least in principle, any POVM can be implemented by
the adjunction of an ancilla \footnote{Ancilla bits are extra scratch qubits
that quantum operations often use.} in a known state, followed by a standard
measurement in the enlarged Hilbert space.
\section{Pure and mixed states}
In quantum mechanics a \emph{pure state} is defined as a quantum state that
can be described by a ket vector:
\begin{equation}
\left| \psi\right\rangle =\underset{k}{\sum}c_{k}\left| \psi_{k}
\right\rangle
\end{equation}
Such a state evolves in time according to the time-dependent Schr\"{o}dinger
equation (\ref{SchrodingerEq}). A \emph{mixed quantum state} is a statistical
mixture of pure states. In such a state the exact quantum-mechanical state of
the system is not known and only the probability of the system being in a
certain state can be given, which is accomplished by the \emph{density matrix}.
\section{Density matrix}
A quantum game involves two or more players having access to parts of a
quantum system. These parts are usually the subsystems of a bigger quantum
system. To use the system for playing a game one must know it detailed
statistical state. Quantum mechanics uses the concept of a density matrix to
describe the statistical state of a quantum system. It is the
quantum-mechanical analogue to a phase-space density (probability distribution
of position and momentum) in classical statistical mechanics. Suppose the
quantum state of a system is expressed in terms of a denumerable orthonormal
basis $\left\{ \left| \phi_{n}\right\rangle ,\text{ }n=1,2,3...\right\} $.
The state $\left| \psi(t)\right\rangle $ of the system at time $t$ in the
basis is given as
\begin{equation}
\left| \psi(t)\right\rangle =\underset{n}{\sum}a_{n}(t)\left| \phi
_{n}\right\rangle
\end{equation}
Let $\left| \psi(t)\right\rangle $ be normalized
\begin{equation}
\left\langle \psi(t)\mid\psi(t)\right\rangle =1=\underset{n}{\sum}\underset
{m}{\sum}a_{n}(t)a_{m}^{\ast}(t)\left\langle \phi_{m}\mid\phi_{n}\right\rangle
=\underset{n}{\sum}\left| a_{n}(t)\right| ^{2}
\end{equation}
The matrix elements of a self-adjoint operator $\hat{O}$ in the basis are
\begin{equation}
\hat{O}_{mn}=\left\langle \phi_{m}\mid\hat{O}\phi_{n}\right\rangle
=\left\langle \hat{O}\phi_{m}\mid\phi_{n}\right\rangle =\left\langle \phi
_{m}\right| \hat{O}\left| \phi_{n}\right\rangle
\end{equation}
The average (expectation) value of $\hat{O}$ at time $t$ for the system in
state $\left| \psi(t)\right\rangle $ is
\begin{equation}
\left\langle \hat{O}\right\rangle =\left\langle \psi(t)\mid\hat{O}
\psi(t)\right\rangle =\underset{n}{\sum}\underset{m}{\sum}a_{m}^{\ast}
(t)a_{n}(t)\hat{O}_{mn}
\end{equation}
Consider the operator $\left| \psi(t)\right\rangle \left\langle
\psi(t)\right| $. It has matrix elements
\begin{equation}
\left\langle \phi_{m}\mid\psi(t)\right\rangle \left\langle \psi(t)\mid\phi
_{n}\right\rangle =a_{m}(t)a_{n}^{\ast}(t)
\end{equation}
The calculation of $\left\langle \hat{O}\right\rangle $ involves these matrix
elements. Hence define
\begin{equation}
\rho(t)=\left| \psi(t)\right\rangle \left\langle \psi(t)\right|
\label{DensityMatrixProj}
\end{equation}
which is known as the \emph{density matrix} of the pure state $\left|
\psi(t)\right\rangle $. It is a Hermitian operator, acting on the Hilbert
space of the system in question, with matrix elements
\begin{equation}
\rho_{mn}=\left\langle \phi_{m}\mid\rho(t)\phi_{n}\right\rangle =a_{m}
(t)a_{n}^{\ast}(t)
\end{equation}
Eq. (\ref{DensityMatrixProj}) shows that for a pure state the density matrix
is given by the projection operator of this state.
Since $\left| \psi(t)\right\rangle $ is normalized, we also have
\begin{equation}
1=\underset{n}{\sum}\left| a_{n}(t)\right| ^{2}=\underset{n}{\sum}\rho
_{nn}(t)=\text{Tr}\left[ \rho(t)\right]
\end{equation}
The expectation value of the observable $\hat{O}$ can now be re-expressed
using the density operator:
\begin{align}
\left\langle \hat{O}\right\rangle & =\underset{m}{\sum}\underset{n}{\sum
}a_{m}(t)a_{n}^{\ast}(t)\hat{O}_{mn}=\underset{m}{\sum}\underset{n}{\sum}
\rho_{nm}(t)\hat{O}_{mn}\nonumber\\
& =\underset{n}{\sum}\left[ \rho(t)\hat{O}\right] _{nm}=\text{Tr}\left[
\rho(t)\hat{O}\right]
\end{align}
For a mixed state, where a quantum system is in the state $\left| \psi
_{j}(t)\right\rangle $ with probability $p_{j}$, the density matrix is the sum
of the projectors weighted with the appropriate probabilities:
\begin{equation}
\rho(t)=\underset{j}{\sum}p_{j}\left| \psi_{j}(t)\right\rangle \left\langle
\psi_{j}(t)\right|
\end{equation}
Density matrix is a powerful tool in quantum games because a game usually
involves a multi-partite quantum system. Compared to the description of a
quantum game based on state vectors, density matrix provides much compact notation.
\section{Quantum Entanglement}
Some of the most interesting investigations in quantum games concern the
relationship between game-theoretic solution concepts and entanglement present
within the quantum system that players are using to play the game. The
phenomenon of \emph{entanglement} can be traced back to Einstein, Podolsky and
Rosen (EPR)'s famous paper \cite{EPR}\ of 1935. EPR argued that quantum
mechanical description of \emph{physical reality} can not be considered
\emph{complete} because of its rather strange predictions about two particles
that once have interacted but now are separate from one another and do not
interact. Quantum mechanics predicts that the particles can be
\emph{entangled} even after separation. Entangled particles have correlated
properties and these correlations are at the heart of the EPR paradox.
Consider a system that can be divided into two subsystems. Assume $H_{A}$ and
$H_{B}$ to be the Hilbert spaces corresponding to the subsystems. Let $\left|
i\right\rangle _{A}$ $($where $i=1,2,...)$ be a complete orthonormal basis for
$H_{A}$, and $\left| j\right\rangle _{B}$ $($where $j=1,2,...)$ be a complete
orthonormal basis for $H_{B}$. In quantum mechanics the Hilbert space
$H_{A}\otimes H_{B}$ (tensor product) is associated to the two subsystems
taken together. The tensor product Hilbert space $H_{A}\otimes H_{B}$ is
spanned by the states $\left| i\right\rangle _{A}\otimes\left|
j\right\rangle _{B}$. By dropping the tensor product sign $\left|
i\right\rangle _{A}\otimes\left| j\right\rangle _{B}$ is also written as
$\left| i\right\rangle _{A}\left| j\right\rangle _{B}$. Any state of the
system $\left| \Psi\right\rangle _{AB}$ is a linear combination of the basis
states $\left| i\right\rangle _{A}\left| j\right\rangle _{B}$ i.e.
\begin{equation}
\left| \Psi\right\rangle _{AB}=\underset{i,j}{\sum c_{ij}}\left|
i\right\rangle _{A}\left| j\right\rangle _{B}
\end{equation}
where $c_{ij}$ are complex coefficients. State $\left| \Psi\right\rangle
_{AB}$ is usually taken to be normalized
\begin{equation}
\underset{i,j}{\sum\left| c_{ij}\right| ^{2}}=1
\end{equation}
A state $\left| \Psi\right\rangle _{AB}$ is a \emph{direct product} state
when it factors into a normalized state $\left| \psi^{(A)}\right\rangle
_{A}=\underset{i}{\sum}c_{i}^{(A)}\left| i\right\rangle _{A}$ in $H_{A}$ and
a normalized state $\left| \psi^{(B)}\right\rangle _{B}=\underset{j}{\sum
}c_{j}^{(B)}\left| j\right\rangle _{B}$ in $H_{B}$ i.e.
\begin{equation}
\left| \Psi\right\rangle _{AB}=\left| \psi^{(A)}\right\rangle _{A}\left|
\psi^{(B)}\right\rangle _{B}=\left( \underset{i}{\sum}c_{i}^{(A)}\left|
i\right\rangle _{A}\right) \left( \underset{j}{\sum}c_{j}^{(B)}\left|
j\right\rangle _{B}\right)
\end{equation}
Now, interestingly, there exist some states in $H_{A}\otimes H_{B}$ that can
not be written as product states. The state $\left( \left| 1\right\rangle
_{A}\left| 1\right\rangle _{B}+\left| 2\right\rangle _{A}\left|
2\right\rangle _{B}\right) \diagup\sqrt{2}$ is one example. When $\left|
\Psi\right\rangle _{AB}$ is not a product state it is called \emph{entangled
}\cite{Hoi-Kwong,Nielsen}.
Quantum games have extensively used entangled states to see the resulting
affects on solutions of a game. However, it is considered a usual requirement
in quantum games that players' access to product states leads to the classical game.
\chapter{Quantum games}
\section{Introduction}
It is difficult to trace back the earliest work on quantum games. Many
situations in quantum theory can be reformulated in terms of game theory.
Several works in the literature of quantum physics can be identified having
game-like underlying structure. For example:
\begin{itemize}
\item Wiesner's work on quantum money \cite{Wiesner}.
\item Mermin's account \cite{Mermin}\ of Greeberger, Horne, and Zeilinger
(GHZ)'s \cite{GHZ} version of the Bell's theorem \cite{Bell,AsherPeres}
without inequalities.
\item Elitzur-Vaidman bomb detector \cite{Elitzur-Vaidman}, suggesting an
interferometer which splits a photon in two and then puts it back together
again (interaction-free measurement).
\item Vaidman's illustration \cite{Vaidman}\ of GHZ's version of the Bell's theorem.
\item Meyer's demonstration \cite{MeyerDavid} of a quantum version of a
penny-flip game.
\item Eisert, Wilkens, and Lewenstein's \cite{Eisert} quantization of the
famous game of Prisoners' Dilemma (PD).
\end{itemize}
In general, a quantum game can be thought of as strategic manoeuvreing of a
quantum system by parties who have necessary means for such actions. Some of
its essential parts can be recognized as follows:
\begin{itemize}
\item A definition of the physical system which can be analyzed using the
tools of quantum mechanics.
\item Existence of one or more parties, usually referred to as players, who
are able to manipulate the quantum system.
\item Players' knowledge about the quantum system on which they will make
their moves or actions.
\item A definition of what constitutes a strategy for a player.
\item A definition of strategy space for the players, which is the set of all
possible actions that players can take on the quantum system.
\item A definition of the pay-off functions or utilities associated with the
players' strategies.
\end{itemize}
A two-player quantum game, for example, is a set \cite{Eisert1}:
\begin{equation}
\Gamma=(\mathcal{H},\rho,S_{A},S_{B},P_{A},P_{B})
\end{equation}
consisting of an underlying Hilbert space $\mathcal{H}$ of the physical
system, the initial state $\rho$, the sets $S_{A}$ and $S_{B}$ of allowed
quantum operations for two players, and the pay-off functions or utilities
$P_{A}$ and $P_{B}$. In most of the existing set-ups to play quantum games the
initial state $\rho$ is the state of one or more qubits. More complex quantum
systems like qutrits (three-dimensional quantum systems) or even qudits
(d-dimensional quantum system) can also be used to play quantum games.
\section{Why games in the quantum world?}
The question why game theory can be interesting in the quantum world has been
addressed in the earliest suggestions for quantum games. Some of the stated
reasons \cite{MeyerDavid,Eisert} are:
\begin{itemize}
\item Classical game theory is based on probability to a large extent.
Generalizing it to quantum probability is fundamentally interesting.
\item Quantum algorithms may be thought of as games between classical and
quantum agents. Only a few quantum algorithms are known to date. It appears
reasonable that an analysis of quantum games may help finding new quantum algorithms.
\item There is an intimate connection between the theory of games and theory
of quantum communication. Eavesdropping \cite{Ekert,Gisin} and optimal cloning
\cite{WernerRF} can readily be conceived as games between players.
\item Quantum mechanics may assure fairness in remote gambling
\cite{Goldenberg}.
\item If the `Selfish Gene' \cite{Dawkins} is a reality then the games of
survival are already being played at molecular level, where quantum mechanics
dictates the rules.
\end{itemize}
\section{Examples of quantum games}
As the subject of quantum games has developed during recent years, many
examples have been put forward illustrating how such game can be different
from their classical analogues. Here are some of the well known quantum games:
\subsection{Vaidman's game}
Vaidman \cite{Vaidman} presented an example of a game for a team of three
players that can \emph{only} be won if played in the quantum world. Three
players are sent to remote locations $A,$ $B$ and $C$. At a certain time $t$
each player is asked one of the two possible questions:
\begin{enumerate}
\item What $X$?
\item What $Y$?
\end{enumerate}
Both of these questions have $+1$ or $-1$ as possible answers. Rules of the
game are such that:
\begin{itemize}
\item Either all players are asked the $X$ question or
\item Only one player is asked the $X$ question and the other two are asked
the $Y$ question.
\end{itemize}
The team wins if
\begin{itemize}
\item The product of their three answers is $-1$ in the case of three $X$
questions or
\item The product of three answers is $1$ in the case of one $X$ and two $Y$ questions.
\end{itemize}
What should the team do? Let $X_{A}$ be the answer of player $A$ to the $X$
question. Similarly, one can define $X_{B},X_{C}$ etc. The winning condition
can now be written as
\begin{align}
X_{A}X_{B}X_{C} & =-1\nonumber\\
X_{A}Y_{B}Y_{C} & =1\nonumber\\
Y_{A}X_{B}Y_{C} & =1\nonumber\\
Y_{A}Y_{B}X_{C} & =1 \label{Vaidmanproduct}
\end{align}
The product of all left hand sides of Eqs. (\ref{Vaidmanproduct}) is
$X_{A}^{2}X_{B}^{2}X_{C}^{2}Y_{A}^{2}Y_{B}^{2}Y_{C}^{2}=1$, because each of
the $X$ or $Y$ take the values $\pm1$ only. The product of right sides is
$-1$, which leads to a contradiction. Therefore, the game cannot be won, with
a success probability of $1$, by a team of classical players. Eqs.
(\ref{Vaidmanproduct}) show that the probability of winning the game by
classical players can not exceed $3/4$. However, Vaidman showed that a quantum
solution exists for the team. Three particles are prepared in a correlated
state (GHZ):
\begin{equation}
\left| GHZ\right\rangle =\frac{1}{\sqrt{2}}\left\{ \left| \uparrow
_{Z}\right\rangle _{A}\left| \uparrow_{Z}\right\rangle _{B}\left|
\uparrow_{Z}\right\rangle _{C}-\left| \downarrow_{Z}\right\rangle _{A}\left|
\downarrow_{Z}\right\rangle _{B}\left| \downarrow_{Z}\right\rangle
_{C}\right\}
\end{equation}
If a member \ of the team is asked the $X$ question, she measures $\hat
{\sigma}_{x}$. If she is asked the $Y$ question, she measures $\hat{\sigma
}_{y}$ instead. Quantum mechanics implies that for the GHZ state one gets \cite{Vaidman}
\begin{align}
\hat{\sigma}_{A_{x}}\hat{\sigma}_{B_{x}}\hat{\sigma}_{C_{x}} &
=-1\nonumber\\
\hat{\sigma}_{A_{x}}\hat{\sigma}_{B_{y}}\hat{\sigma}_{C_{y}} & =1\nonumber\\
\hat{\sigma}_{A_{y}}\hat{\sigma}_{B_{x}}\hat{\sigma}_{C_{y}} & =1\nonumber\\
\hat{\sigma}_{A_{y}}\hat{\sigma}_{B_{y}}\hat{\sigma}_{C_{x}} & =1
\end{align}
The Vaidman's game can, therefore, be won by a group of quantum players with
$100\%$ success probability.
There remains a subtle point in Vaidman's argument. The contradiction obtained
by comparing the four equations in (\ref{Vaidmanproduct}) assume that all four
equations hold simultaneously. In fact, the four equations represent four
incompatible situations.
\subsection{Meyer's PQ Penny-Flip}
Two players can play a simple game if they share a coin having two possible
states, head or tail. The first strong argument for quantum games was
presented by Meyer \cite{MeyerDavid} as a coin flip game played by two
characters, Captain Picard and Q, from the popular American science fiction
series Star Trek. In a quantum version of the game the flipping action is
performed on a ``quantum coin'', which can be thought of as an electron that,
on measurement, is found to exist either in spin-up ($H$) or in spin-down
($T$) state.
In Meyer's interesting description of a quantum game, the story starts when
starship Enterprise faces some imminent catastrophe. Q appears on the bridge
and offers Picard to rescue the ship if he can beat him in a penny-flip game.
Q asks Picard to place the penny in a box, head up. Then Q, Picard, and
finally Q play their moves. Q wins if the penny is head up when the box is
opened. For classical version of this game the payoff matrix can be
constructed as
\begin{equation}
\begin{array}
[c]{c}
\text{Picard}
\end{array}
\begin{array}
[c]{c}
N\\
F
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{Q}
\end{array}
}{
\begin{array}
[c]{cccc}
NN & NF & FN & FF
\end{array}
}}{\left(
\begin{array}
[c]{cccc}
-1 & 1 & 1 & -1\\
1 & -1 & -1 & 1
\end{array}
\right) } \label{PQPennyFlipMatrix}
\end{equation}
where rows and columns are Picard's and Q's pure strategies respectively. Let
$(H,T)$ be the basis of a $2$-dimensional vector space. The players' moves can
be represented by a sequence of $2\times2$ matrices. In the matrix
(\ref{PQPennyFlipMatrix}) the moves `to flip' and `not to flip' are
represented by $F$ and $N$, respectively:
\begin{equation}
F:
\begin{array}
[c]{c}
H\\
T
\end{array}
\overset{
\begin{array}
[c]{cc}
H & T
\end{array}
}{\left(
\begin{array}
[c]{cc}
0 & 1\\
1 & 0
\end{array}
\right) }\text{ \ \ \ \ \ \ \ \ \ \ }N:
\begin{array}
[c]{c}
H\\
T
\end{array}
\overset{
\begin{array}
[c]{cc}
H & T
\end{array}
}{\left(
\begin{array}
[c]{cc}
1 & 0\\
0 & 1
\end{array}
\right) }
\end{equation}
defined to act, on left multiplication, on a vector representing the state of
the coin. A general mixed strategy is described by the matrix:
\begin{equation}
\begin{array}
[c]{c}
H\\
T
\end{array}
\overset{
\begin{array}
[c]{cc}
H & T
\end{array}
}{\left(
\begin{array}
[c]{cc}
1-p & p\\
p & 1-p
\end{array}
\right) } \label{MixedStrategy}
\end{equation}
where $p\in\left[ 0,1\right] $ is the probability with which the player
flips the coin. A sequence of mixed actions puts the state of the coin into a
convex linear combination $aH+(1-a)T$ where $0\leq a\leq1$. The coin is then
in $H$ state with probability $a$. Q plays his move first, after Picard puts
the coin in the $H$ state.
Now Meyer presents a look at a quantum version of this game. Q has studied
quantum theory and implements his strategy as a sequence of unitary rather
than stochastic matrices. Such action requires a description of the state of
the coin in two-dimensional Hilbert space. Let its basis be the kets $\left|
H\right\rangle $ and $\left| T\right\rangle $, in Dirac notation. A pure
state of the coin is $a\left| H\right\rangle +b\left| T\right\rangle $ where
$a,b\in\mathbf{C}$ and $aa^{\ast}+bb^{\ast}=1$.
Given the coin is initially in the state $\left| H\right\rangle $, the
following unitary action $U(a,b)$ by $Q$ puts the coin into the state
$a\left| H\right\rangle +b\left| T\right\rangle $:
\begin{equation}
\begin{array}
[c]{c}
U(a,b)=
\end{array}
\begin{array}
[c]{c}
H\\
T
\end{array}
\overset{
\begin{array}
[c]{cc}
H & T
\end{array}
}{\left(
\begin{array}
[c]{cc}
a & b\\
b^{\ast} & -a^{\ast}
\end{array}
\right) } \label{Q's action}
\end{equation}
Using the density matrix notation, the initial state of the coin can be
written as
\begin{equation}
\rho_{0}=\left| H\right\rangle \left\langle H\right|
\end{equation}
Q's unitary action $U(a,b)$ changes the state $\rho_{0}$ to
\begin{equation}
\rho_{1}=U\rho_{0}U^{\dagger}=\left(
\begin{array}
[c]{cc}
aa^{\ast} & ab^{\ast}\\
ba^{\ast} & bb^{\ast}
\end{array}
\right)
\end{equation}
because unitary transformations act on density matrices by conjugation. Picard
is restricted to use only a classical mixed strategy (\ref{MixedStrategy}) by
flipping the coin with probability $p$. After his action the coin is in the
pure state $b\left| H\right\rangle +a\left| T\right\rangle $ with
probability $p$ and in the pure state $a\left| H\right\rangle +b\left|
T\right\rangle $ with probability $(1-p)$. Picard's action acts on the density
matrix $\rho_{1}$, not as a stochastic matrix on a probabilistic state, but as
a convex linear combination of unitary (deterministic) transformations:
\begin{align}
\rho_{2} & =pF\rho_{1}F^{\dagger}+(1-p)N\rho_{1}N^{\dagger}\nonumber\\
& =\left(
\begin{array}
[c]{cc}
pbb^{\ast}+(1-p)aa^{\dagger} & pba^{\ast}+(1-p)ab^{\dagger}\\
pab^{\ast}+(1-p)ba^{\ast} & paa^{\ast}+(1-p)bb^{\dagger}
\end{array}
\right)
\end{align}
Interestingly, Q has at his disposal a move:
\begin{equation}
U_{1}=U(\frac{1}{\sqrt{2}},\frac{1}{\sqrt{2}})=\frac{1}{\sqrt{2}}\left(
\begin{array}
[c]{cc}
1 & 1\\
1 & -1
\end{array}
\right)
\end{equation}
that can put the coin into a simultaneous eigenstate with eigenvalue $1$ of
both $F$ and $N,$ which then becomes an invariant under any mixed strategy
$pF+(1-p)N$ of Picard. In his second action Q acts again with $U(\frac
{1}{\sqrt{2}},\frac{1}{\sqrt{2}})$ and gets back the state $\rho_{0}=\left|
H\right\rangle \left\langle H\right| $ and wins. The game can also be
understood with the following chart.
\begin{equation}
\left| H\right\rangle \text{ \ \ }\overset{
\begin{array}
[c]{c}
\text{Q}
\end{array}
}{\underset{
\begin{array}
[c]{c}
\hat{H}
\end{array}
}{\longrightarrow}}\text{ \ \ }\frac{1}{\sqrt{2}}\left( \left|
H\right\rangle +\left| T\right\rangle \right) \text{ \ \ }\overset{
\begin{array}
[c]{c}
\text{Picard}
\end{array}
}{\underset{
\begin{array}
[c]{c}
\sigma_{x}\text{ or }\hat{I}
\end{array}
}{\longrightarrow}}\text{ \ \ }\frac{1}{\sqrt{2}}\left( \left|
H\right\rangle +\left| T\right\rangle \right) \text{ \ \ }\overset{
\begin{array}
[c]{c}
\text{Q}
\end{array}
}{\underset{
\begin{array}
[c]{c}
\hat{H}
\end{array}
}{\longrightarrow}}\text{ \ \ }\left| H\right\rangle
\end{equation}
where $\hat{H}=\frac{1}{\sqrt{2}}\left(
\begin{array}
[c]{cc}
1 & 1\\
1 & -1
\end{array}
\right) $ is a Hadamard transformation and $\sigma_{x}=\left(
\begin{array}
[c]{cc}
0 & 1\\
1 & 0
\end{array}
\right) $ is the flipping operator. $\left| H\right\rangle $ is the head
state of the coin and $\hat{I}$ is the identity operator. Q plays a quantum
strategy by putting the coin into a symmetric superposition of head and tail.
Now, whether Picard flips the coin or not, it remains in the symmetric
superposition which Q can rotate back to head applying $\hat{H}$ again since
$\hat{H}=$ $\hat{H}^{-1}$.
\subsection{Eisert, Wilkens and Lewenstein's quantum Prisoners' Dilemma}
Eisert, Wilkens, and Lewenstein \cite{Eisert} gave a physical model of the PD
and suggested that the players can escape the dilemma if they both resort to
quantum strategies. Their physical model consists of
\begin{itemize}
\item A source making available two bits, one for each player.
\item Physical instruments enabling the players to manipulate, in a strategic
manner, their own bits.
\item A measurement device that determines the players' payoffs from the
final state of the two bits.
\end{itemize}
In a quantum formulation the classical strategies $C$ and $D$ are assigned two
basis vectors $\left| C\right\rangle $ and $\left| D\right\rangle $ in
Hilbert space of a qubit. A vector in the tensor product space, which is
spanned by the classical game basis $\left| CC\right\rangle ,\left|
CD\right\rangle ,\left| DC\right\rangle $ and $\left| DD\right\rangle $
describes the state of the game.
The game's initial state is $\left| \psi_{in}\right\rangle =\hat{J}\left|
CC\right\rangle $ where $\hat{J}$ is a unitary operator known to both players.
Alice and Bob's strategic moves are associated with unitary operators $\hat
{U}_{A}$ and $\hat{U}_{B}$ respectively, chosen from a strategic space $S$.
The players' actions are local i.e. each operates on his/her qubit. After
players' moves the state of the game changes to $(\hat{U}_{A}\otimes\hat
{U}_{B})\hat{J}\left| CC\right\rangle $. Measurements are now performed to
determine the players' payoffs. Measurement consists of applying a reverse
unitary operator $\hat{J}^{\dagger}$ followed by a pair of Stern-Gerlach type
detectors. Before detection the final state of the game is given by
\begin{equation}
\left| \psi_{f}\right\rangle =\hat{J}^{\dagger}(\hat{U}_{A}\otimes\hat{U}
_{B})\hat{J}\left| CC\right\rangle
\end{equation}
Eisert et al. \cite{Eisert} define Alice's expected payoff as
\begin{equation}
P_{A}=r\left| \left\langle CC\mid\psi_{f}\right\rangle \right| ^{2}+s\left|
\left\langle CD\mid\psi_{f}\right\rangle \right| ^{2}+t\left| \left\langle
DC\mid\psi_{f}\right\rangle \right| ^{2}+u\left| \left\langle DD\mid\psi
_{f}\right\rangle \right| ^{2} \label{PDAlice'sPayoff}
\end{equation}
where the quantities $r,s,t$ and $u$ are from the PD matrix (\ref{PDmatrix}).
Bob's payoff $P_{B}$ is obtained by interchanging $s\leftrightarrow t$ in Eq.
(\ref{PDAlice'sPayoff}). Eisert and Wilkens \cite{Eisert1} use following
matrix representations of unitary operators of their one- and two-parameter
strategies, respectively:
\begin{align}
U(\theta) & =\left(
\begin{array}
[c]{cc}
\cos(\theta/2) & \sin(\theta/2)\\
\text{-}\sin(\theta/2) & \cos(\theta/2)
\end{array}
\right) \label{OneParameterSet}\\
U(\theta,\phi) & =\left(
\begin{tabular}
[c]{ll}
e$^{i\phi}\cos(\theta/2)$ & $\sin(\theta/2)$\\
$\text{-}\sin(\theta/2)$ & e$^{-i\phi}\cos(\theta/2)$
\end{tabular}
\right) \label{TwoParameterSet}
\end{align}
where $0\leq\theta\leq\pi$ and $0\leq\phi\leq\pi/2$. To ensure that the
ordinary PD is faithfully represented in its quantum version, Eisert et al.
imposed additional conditions on $\hat{J}$:
\begin{equation}
\left[ \hat{J},\hat{D}\otimes\hat{D}\right] =0,\left[ \hat{J},\hat
{D}\otimes\hat{C}\right] =0,\left[ \hat{J},\hat{C}\otimes\hat{D}\right] =0
\label{condition1}
\end{equation}
where $\hat{C}$ and $\hat{D}$ are the operators corresponding to the
strategies of cooperation and defection respectively. A unitary operator
satisfying the condition (\ref{condition1}) is
\begin{equation}
\hat{J}=\exp\left\{ i\gamma\hat{D}\otimes\hat{D}/2\right\}
\end{equation}
where $\gamma\in\left[ 0,\pi/2\right] $. $\hat{J}$ can be called a measure
of the game's entanglement. At $\gamma=0$ the game reduces to its classical
form. For a maximally entangled game $\gamma=\pi/2$ the classical NE $\hat
{D}\otimes\hat{D}$ is replaced by a different unique equilibrium $\hat
{Q}\otimes\hat{Q}$ with $\hat{Q}\sim\hat{U}(0,\pi/2).$ The new equilibrium is
also found to be \emph{Pareto optimal}, that is, a player cannot increase
his/her payoff by deviating from this pair of strategies without reducing the
other player's payoff. Classically ($C,C$) is Pareto optimal, but is not an
equilibrium. Eisert et al. claimed that in its quantum version the dilemma in
PD disappears from the game and quantum strategies give a superior performance
if entanglement is present.
\begin{figure}
\caption{Eisert et al.'s scheme to play a quantum game.}
\label{Fig1}
\end{figure}
In density matrix notation, the players' actions change the initial state
$\rho$ to
\begin{equation}
\hat{\sigma}=(\hat{U}_{A}\otimes\hat{U}_{B})\rho(\hat{U}_{A}\otimes\hat{U}
_{B})^{\dagger}
\end{equation}
The arbiter applies the following operators on $\sigma$:
\begin{align}
\hat{\pi}_{CC} & =\left| \psi_{CC}\right\rangle \left\langle \psi
_{CC}\right| \text{, \ \ \ \ \ \ }\hat{\pi}_{CD}=\left| \psi_{CD}
\right\rangle \left\langle \psi_{CD}\right| \nonumber\\
\hat{\pi}_{DC} & =\left| \psi_{DC}\right\rangle \left\langle \psi
_{DC}\right| \text{, \ \ \ \ \ }\hat{\pi}_{DD}=\left| \psi_{DD}\right\rangle
\left\langle \psi_{DD}\right|
\end{align}
The expected payoffs are
\begin{align}
P_{A,B} & =[P_{CC}]_{A,B}\text{Tr}[\hat{\pi}_{CC}\hat{\sigma}]+[P_{CD}
]_{A,B}\text{Tr}[\hat{\pi}_{CD}\hat{\sigma}]\nonumber\\
& +[P_{DC}]_{A,B}\text{Tr}[\hat{\pi}_{DC}\hat{\sigma}]+[P_{DD}]_{A,B}
\text{Tr}[\hat{\pi}_{DD}\hat{\sigma}]
\end{align}
Where, for example, $[P_{CD}]_{A}$ is the Alice's classical payoff when she
plays $C$ and Bob plays $D$. For one-parameter strategies the classical pure
strategies $C$ and $D$ are realized as $\hat{U}(0)$ and $\hat{U}(\pi)$,
respectively; while for two-parameter strategies the classical pure strategies
$C$ and $D$ are realized as $\hat{U}(0,0)$ and $\hat{U}(\pi,0)$, respectively.
Fig. (\ref{Fig1}) shows Eisert et al.'s scheme to play a quantum game.
Many recent investigations \cite{FlitneyAbbott4,Flitney5,Flitney6,Flitney7}
\ in quantum games have been motivated by the Eisert et. al.'s scheme.
\subsection{Quantum Battle of Sexes}
Motivated by the Eisert et al.'s proposal, Marinatto and Weber
\cite{Marinatto1} introduced a new scheme for quantizing bi-matrix games by
presenting a quantized version of the BoS. In this scheme a state in a
$2\otimes2$ dimensional Hilbert space is referred to as a \emph{strategy}. At
the start of the game the players are supplied with this strategy. The players
manipulate the strategy, in the next phase, by playing their \emph{tactics}.
The state is finally measured and the payoffs are rewarded depending on the
results of the measurement. A player can do actions within a two-dimensional
subspace. Tactics are therefore \emph{local actions} on a player's qubit. The
final measurement, made independently on each qubit, takes into consideration
the local nature of players' manipulations. It is achieved by selecting a
measurement basis that respects the division of Hilbert space into two equal parts.
Essentially, the scheme differs from the earlier proposed scheme of Eisert et
al. \cite{Eisert} by the absence of reverse gate $J^{\dagger}$. The gate makes
sure that the classical game remains a subset of its quantum version. In
Marinatto and Weber's scheme the state is measured without passing it through
the reverse gate. They showed that the classical game still remains a subset
of the quantum game if the players' tactics are limited to a probabilistic
choice between applying the identity $\hat{I}$ and the Pauli spin-flip
operator $\hat{\sigma}_{x}$. Also the classical game results when the players
are forwarded an initial strategy $\left| \psi_{in}\right\rangle =\left|
00\right\rangle $.
Suppose $\rho_{in}$ is the initial strategy, which the players Alice and Bob
receive at the start of the game. Let Alice acts with identity $\hat{I}$ on
$\rho_{in}$ with probability $p$ and with $\hat{\sigma}_{x}$ with probability
$(1-p)$. Similarly, let Bob acts with identity $\hat{I}$ with probability $q$
and with $\hat{\sigma}_{x}$ with probability $(1-q)$. After the players'
actions the state changes to
\begin{align}
\rho_{fin} & =pq\hat{I}_{A}\otimes\hat{I}_{B}\rho_{in}\hat{I}_{A}^{\dagger
}\otimes\hat{I}_{B}^{\dagger}+p(1-q)\hat{I}_{A}\otimes\hat{\sigma}_{xB}
\rho_{in}\hat{I}_{A}^{\dagger}\otimes\hat{\sigma}_{xB}^{\dagger}+\nonumber\\
& q(1-p)\hat{\sigma}_{xA}\otimes\hat{I}_{B}\rho_{in}\hat{\sigma}
_{xA}^{\dagger}\otimes\hat{I}_{B}^{\dagger}+\nonumber\\
& (1-p)(1-q)\hat{\sigma}_{xA}\otimes\hat{\sigma}_{xB}\rho_{in}\hat{\sigma
}_{xA}^{\dagger}\otimes\hat{\sigma}_{xB}^{\dagger}
\end{align}
For the bi-matrix:
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\begin{array}
[c]{c}
S_{1}\\
S_{2}
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{
\begin{array}
[c]{cc}
S_{1} & S_{2}
\end{array}
}}{\left(
\begin{array}
[c]{cc}
(\alpha_{A},\alpha_{B}) & (\beta_{A},\beta_{B})\\
(\gamma_{A},\gamma_{B}) & (\delta_{A},\delta_{B})
\end{array}
\right) }
\end{equation}
Marinatto and Weber defined the following payoff operators
\begin{align}
(P_{A})_{oper} & =\alpha_{A}\left| 00\right\rangle \left\langle 00\right|
+\beta_{A}\left| 01\right\rangle \left\langle 01\right| +\gamma_{A}\left|
10\right\rangle \left\langle 10\right| +\delta_{A}\left| 11\right\rangle
\left\langle 11\right| \nonumber\\
(P_{B})_{oper} & =\alpha_{B}\left| 00\right\rangle \left\langle 00\right|
+\beta_{B}\left| 01\right\rangle \left\langle 01\right| +\gamma_{B}\left|
10\right\rangle \left\langle 10\right| +\delta_{B}\left| 11\right\rangle
\left\langle 11\right|
\end{align}
where the states $\left| 0\right\rangle $ and $\left| 1\right\rangle $ are
used for the measurement basis, corresponding to the pure strategies $S_{1}$
and $S_{2}$, respectively. The payoff functions are then obtained as mean
values of these operators:
\begin{equation}
P_{A,B}=\text{Tr}\left\{ (P_{A,B})_{oper}\rho_{fin}\right\}
\end{equation}
\begin{figure}
\caption{Marinatto and Weber's scheme to play a quantum game.}
\label{Fig2}
\end{figure}
Fig. (\ref{Fig2}) sketches the idea of playing a quantum game in Marinatto and
Weber's scheme. The scheme was developed for the BoS given by the matrix
(\ref{BoSMatrix}). On receiving an initial strategy:
\begin{equation}
\psi_{ini}=\frac{1}{\sqrt{2}}(\left| 00\right\rangle +\left| 11\right\rangle
)
\end{equation}
the players' tactics cannot change it, and the final strategy remains
identical to the initial one. The players' expected payoffs are maximized for
the tactics $p^{\ast}=q^{\ast}=0$ or $p^{\ast}=q^{\ast}=1$, that is, both
players either apply $\hat{\sigma}_{x}$ with certainty or $\hat{I}$ with
certainty. In either case the expected payoff is $(\alpha+\beta)/2$ to each
player. Marinatto and Weber suggested that a unique solution thus exists in
the game.
\subsection{Quantum version of the Monty Hall problem}
Monty Hall is a game in which Alice secretly selects one door out of three to
place a prize there. It is now Bob's turn who picks a door. Alice then opens a
different door showing that the prize is not behind it. Bob now has the option
of changing to the untouched door or sticking with his current selection. In
classical version of the game Bob's optimum strategy is to alter his choice of
door and it doubles his chances of winning.
Li et al. \cite{Li et al}, Flitney and Abbot \cite{FlitneyAbbott} and D'Ariano
et al. \cite{DAriano}\ have proposed various quantum versions of the Monty
Hall problem.
\subsection{Quantum market games}
During recent years Piotrowski and Sladkowski
\cite{Piotrowski1,Piotrowski2,Piotrowski3} have proposed quantum-like
description of markets and economics. This development can be shown having
roots in quantum game theory and is considered part of new field of
econophysics. In econophysics \cite{Econophysics} mathematical techniques
developed by physicists are used to analyze the complex financial and economic
systems. Developments in econophysics have motivated some authors
\cite{Blankmeyer} to ask about the possibility of a meaning of Heisenberg
uncertainty principle in economics. Others have even claimed that quantum
mechanics and mathematical economics are isomorphic \cite{Lambertini}.
\subsection{Quantum Parrondo's Games}
A Parrondo's game is an interesting problem in game theory. Two games that are
losing when played individually can be combined to produce a winning game. The
game can be put into the form of a gambling utilizing a set of biased coins.
Flitney and Abbott \cite{FlitneyAbbott1,FlitneyAbbott2} studied a quantum
version of the Parrondo's game where the rotation operators representing the
toss of a classical biased coin are replaced by general SU(2) operators to
transform the game into the quantum domain. They found that superposition of
qubits can couple the two games and produce interference leading to different
payoffs than in the classical case.
\chapter{Comments on proposed set-ups to play quantum games}
Meyer \cite{MeyerDavid} demonstrated with the example of a penny-flip game how
quantum mechanics can affect game theory. He introduced a game where a
suitable quantum strategy can beat any classical strategy. Comments and
criticism followed soon after this demonstration of the power of quantum
strategies, which are reviewed in the following.
\section{Enk's comment on Meyer's quantum Penny-Flip}
Though agreeing that Meyer reached a correct conclusion, Enk \cite{Enk}
commented that Meyer's particular example is flawed for the following reasons:
\begin{itemize}
\item Q's quantum strategy can also be implemented classically.
\item Meyer's game only shows the superiority of an extended set of
strategies over a restricted one, which is not surprising.
\item A single qubit is not truly a quantum system because its dynamics and
its response to measurements can also be described by a classical
hidden-variable model. Bell's inequalities, or the Kochen-Specker theorem, do
not exist for a two-dimensional system, thus making it possible to explicitly
construct classical models for such systems.
\end{itemize}
\subsection{Meyer's reply}
Meyer replied \cite{Meyer's Reply} and disagreed with Enk's claim that the
existence of classical models for Q's strategy necessarily prevents it from
being called quantum mechanical. He argued as the following:
\begin{itemize}
\item Enk's claim implies that P's strategy is also not classical because
quantum models exist for flipping a two-state system.
\item Though classical models do indeed exist for qubit systems but they
scale exponentially as the number of qubits increase.
\end{itemize}
Entangled qubits do not possess classical models but entanglement itself has
been shown unnecessary to outperform a quantum algorithm from a classical one.
For example, Grover's algorithm \cite{Grover}, although discovered in the
context of quantum computation, can be implemented using a system allowing
superposition of states, like classical coupled simple harmonic oscillators
\cite{ApporvaPatel}. It does not seem fair to claim that such an
implementation prohibits calling it a quantum algorithm.
Related to the third point in Enk's comment, it seems related to mention that
recently Khrennikov \cite{Khrennikov} proved an analogue of Bell's inequality
for conditional probabilities. Interestingly the inequality can be applied not
only to pairs of correlated particles, but also to a single particle. The
inequality is violated for spin projections of the single particle. Khrennikov
concludes that a realistic pre-quantum model does not exist even for the
two-dimensional Hilbert space.
\section{Benjamin and Hayden's comment on quantization of Prisoners' Dilemma}
Eisert et al. obtained $\hat{Q}$ as the new quantum equilibrium in PD, when
both players have access to a two-parameter set (\ref{TwoParameterSet}) of
unitary $2\times2$ matrices. Benjamin and Hayden \cite{Benjamin1} observed
that when their two-parameter set is extended to all local unitary operations
(i.e. all of $SU(2)$) the strategy $\hat{Q}$ does not remain an equilibrium.
They showed that in the full space of deterministic quantum strategies there
exists no equilibrium for Eisert et al.'s quantum PD. They also observed that
Eisert's set of two-parameter quantum strategies is not closed under
composition, which is reasonable requirement for a set of quantum strategies.
\section{Benjamin's comment on Marinatto and Weber's quantum Battle of Sexes}
In his comment Benjamin \cite{Benjamin2} made two observations about Marinatto
and Weber's quantum battle of sexes:
\begin{itemize}
\item The overall quantization scheme is fundamentally very similar to the
Eisert et al.'s previously proposed scheme \cite{Eisert}.
\item The quantum BoS does not have a unique solution. Though the dilemma may
be easier to resolve in its quantum version, the players still face it as they
do in the traditional game.
\end{itemize}
In the quantum BoS the players's expected payoffs are maximized when their
tactics consist of either both applying $\hat{I}$ with certainty ($p^{\ast
}=1,q^{\ast}=1$) or both applying $\hat{\sigma}_{x}$ with certainty ($p^{\ast
}=0,q^{\ast}=0$). Marinatto and Weber concluded that an entangled initial
strategy $(\left| 00\right\rangle +\left| 11\right\rangle )/\sqrt{2}$,
therefore, gives a unique solution in the game. Given that the players'
tactics are independent, the players are faced with a dilemma once again in
opting for ($p^{\ast}=1,q^{\ast}=1$) or ($p^{\ast}=0,q^{\ast}=0$). Mismatched
tactics, i.e. ($p^{\ast}=1,q^{\ast}=0$) or ($p^{\ast}=0,q^{\ast}=1$), both
lead to a worst-case situation.
Benjamin \cite{Benjamin2} also pointed out a \emph{difference} in terminology.
In Marinatto and Weber's set-up an initial strategy, in the form of a quantum
state, is forwarded to the players who then apply their tactics to modify the
state. In the Eisert et al.'s scheme, on the other hand, players' `moves' are
their manipulations, and their overall act of choosing what move to play is
their strategy.
Recently Nawaz and Toor \cite{NawazToor} showed that by using a more general
initial quantum state the dilemma in the classical BoS can be resolved, and a
unique solution can be found.
\subsection{Marinatto and Weber's reply}
In a reply Marinatto and Weber \cite{Marinatto's reply} defended their choice
of calling strategies the quantum states instead of operators used to
manipulate them. They claimed that their choice is very natural and consistent
with the spirit of classical game theory, where at the start of a game each
player has at her disposal an ensemble of strategies.
Regarding Benjamin's claim that the dilemma persists in quantum BoS since
players cannot decide between the two options, i.e. ($p^{\ast}=0,q^{\ast}=0 $)
and ($p^{\ast}=1,q^{\ast}=1$), Marinatto and Weber replied that the second
option of doing nothing ($p^{\ast}=1,q^{\ast}=1$) amounts to the most rational
behavior of the two players. According to them no incentive exists for a
player for doing something (i.e. $p^{\ast}=0$ or $q^{\ast}=0$) because:
\begin{itemize}
\item It cannot lead to a better payoff and each player knows that.
\item It only generates the extra risk of incurring a loss.
\item It is more expensive than doing nothing, in terms of resources needed
to operate on the strategies.
\end{itemize}
\subsection{`Quantum form' of a matrix game and initial quantum states}
In Eisert et al.'s set-up when the parameter $\gamma$ of the initial quantum
state is different from zero, the players' payoffs are generally
non-classical, except for special moves available to them that can result in
the classical payoffs. Eisert et al. allow a range of values to the parameter
$\gamma$ and found how it affects the equilibria of the game.
Marinatto and Weber \cite{Marinatto1} forward an initial strategy to the two
players who then apply their `tactics' on it. In their scheme the classical
game corresponds to the initial state $\left| 00\right\rangle $.
Suppose the players receive pure two-qubit states, different from $\left|
00\right\rangle $, but the measurement uses the \emph{same} payoff operators.
The payoff operators used in measurement in Marinatto and Weber's scheme
contains \emph{all} the information about what matrix game is being played.
Given the measurement apparatus remains the same, a `quantum form' of the
matrix game can be obtained by \emph{only }choosing among different initial
states. Hence, this approach translates the problem of finding a quantum form
of a matrix game to the problem of finding a pure initial state.
The approach should be seen from the view that the only restriction on a
`quantum form' of a game is that the corresponding classical game must be
reproducible as a special case. Because a product initial state results in a
classical game, therefore, the above approach is within the mentioned restriction.
The Eisert et al.'s set-up suggests studying the behavior of equilibria in
relation to the parameter $\gamma$. The above approach, on the other hand,
suggests studying the behavior of equilibria in relation to different pure
initial states.
\section{\label{EnkPikeComment}Enk and Pike's comment on quantum Prisoners' Dilemma}
More recently Enk and Pike \cite{EnkPike} have argued that the quantum
solutions of PD, found by Eisert et al. \cite{Eisert}, are neither quantum
mechanical nor do they solve the classical game. Their argument is based on
the observation that it is possible to capture the essence of quantized PD by
simply extending the payoff matrix of the classical game, by \emph{only}
including an additional purely classical move corresponding to $\hat{Q}$,
which Eisert et al. obtained as a new quantum-mechanical `solution-move' that
could remove the dilemma inherent in the game. Enk and Pike maintained that
when Eisert's quantum solution to PD can be reconstructed in a classical way,
the only defense that remains for its quantum solution is its efficiency,
which does not play a role in PD.
Enk and Pike also suggested that a quantum game that exploits non-classical
correlations in entangled states, similar to those that violate the Bell's
inequality, should be worthy of investigation. Such correlations are without a
role in Eisert et al.'s set-up, and other quantization procedures derived from
it, even though entangled states may be present. It is because various qubits,
after their local unitary manipulations, are brought together during the final
stage of the game to make the payoffs-generating measurement.
\chapter{Evolutionary stability in quantum games}
\section{Introduction}
As discussed in Section (\ref{EGT}), the concept of an ESS was introduced in
classical game theory for two reasons:
\begin{enumerate}
\item \label{Reason1}Two player games can have multiple Nash equilibria and
ESS offers its refinement notion.
\item \label{Reason2}Population biology problems can be modelled with the help
of the ESS concept.
\end{enumerate}
The reasons for claim that (\ref{Reason1}) holds for quantum as well as
classical games are not far from obvious. In our opinion the reason
(\ref{Reason2}) also has a meaning in a quantum context. Like NE, the ESS is a
game-theoretic concept. The concept assumes a population setting which is
relevant to problems in evolutionary biology. As a game-theoretic concept, the
ESS is equally worthy of investigation as the concept of NE is in relation to
quantization of games. The view that a population setting of evolutionary
biology can not be relevant in quantum games is based on the assumption that
the participants in a quantum game \emph{must} always be rational agents. We
believe that when the rewards for players, forming a population, not only
depend on their individual moves but also on whether the game they play is
classical or quantum in nature, then the concepts fundamentally developed for
a population setting also become relevant in a quantum context. As mentioned
in the Section (\ref{EGT}), John Nash himself had a population setting in his
mind when he introduced his equilibrium notion. His equilibrium notion is
well-known from the early known studies in quantum games. The fact that a
population setting was behind the notion of a NE provides an almost natural
relevance of this setting for the quantum games as well. The idea of a
population of `quantum players' itself is not very much beyond imagination.
Such a population may, for example, consist of a large number of interacting
molecules where `decisions' are taken in individual quantum interactions.
These interactions can easily be imagined pair-wise and also random, which are
the fundamental assumptions behind the concept of an ESS.
In quantum setting the players' payoffs become sensitive to quantum affects.
Which direction evolution drives the population of quantum players now? The
direction should, of course, be decided by the nature of quantum affects.
\section{Quantization as a refinement notion of Nash equilibrium?}
Research in quantum games \cite{Eisert,Marinatto1}\ has shown appearance of
entirely new equilibria on quantization of a game. The next logical question
is to ask whether quantization can provide another refinement to the NE
concept? Such a question is relevant in a situation where an equilibrium is
retained, whether the game is played classically or quantum mechanically, but
some property of the equilibrium changes during such a switch-over. ESS, being
a refinement notion of the NE concept, is a symmetric NE with an extra
property of stability against small perturbations. We believe the question
whether quantization can affect stability of a symmetric NE is equally
interesting as the question how quantization leads to different equilibria.
\section{Quantization changing evolutionary stability?}
Our motivation is how game-theoretic models, of evolutionary dynamics in a
population, shape themselves in the new setting recently provided to game
theory by quantum mechanics? This motivation is, in a sense, a portion of a
bigger question: Can quantum mechanics have a role in directing, or even
dictating, the dynamics of evolution? To study evolution in a quantum setting
we have chosen the ESS concept firstly for its beauty and simplicity.
Secondly, because ESS is a game-theoretical concept, the new developments in
quantum games themselves provide a motivation to look at the resulting effects
on such concepts. Following questions arise immediately:
\begin{itemize}
\item How ESSs are affected when a classical game, played by a population,
changes itself to one of its quantum forms?
\item How pure and mixed ESSs are distinguished from one another when such a
change in the form of a game takes place?
\end{itemize}
And most importantly
\begin{itemize}
\item How and if evolutionary dynamics can be related to quantum entanglement?
\end{itemize}
Imagine a population of players in which a classical strategy has established
itself as an ESS. We ask:
\begin{itemize}
\item What happens when `mutants' of ESS theory come up with quantum
strategies and try to invade the classical ESS?
\item What happens if such an invasion is successful and a new ESS is
established -- an ESS that is quantum in nature?
\item Suppose afterwards another small group of mutants appears which is
equipped with some other quantum strategy. Would it be successful now to
invade the quantum ESS?
\end{itemize}
In the following we present an analysis based on these questions considering a
population in which symmetric pair-wise contests are taking place.
In trying to extend an idea, originally proposed for problems in population
biology, to quantum domain we give an analysis using Eisert et al.'s
quantization of the symmetric bi-matrix game of PD.
\section{ESSs in Eisert, Wilkens and Lewenstein's scheme}
For PD Cooperation ($C$) and Defection ($D$) are the pure classical
strategies. Which strategies are likely to be stable and persistent when the
game is played by a population engaged in pair-wise contests? In each such
contest PD is played. Straightforward analysis \cite{Prestwich} shows that $D$
will be the pure classical strategy prevalent in the population and hence the
classical ESS.
Eisert et al. used the matrix (\ref{PDmatrix1}) in their quantum version of
PD. Assume a population setting where in each pair-wise encounter the players
play PD with the \emph{same} matrix. Consider the following three situations:
\begin{enumerate}
\item \label{caseA}A small group of mutants appear equipped with one-parameter
quantum strategy $\hat{U}(\theta)$ when $D$ exists as a classical ESS.
\item \label{caseB}The mutants are equipped with two-parameter quantum
strategy $\hat{U}(\theta,\phi)$ against the classical ESS.
\item \label{caseC}The mutants have successfully invaded and a two-parameter
quantum strategy $\hat{Q}\sim\hat{U}(0,\pi/2)$ has established itself as a new
quantum ESS. Again another small group of mutants appear, using some other
two-parameter quantum strategy, and try to invade the quantum ESS, that is
$\hat{Q}$.
\end{enumerate}
\subsection{Case (\ref{caseA})}
In quantum PD with the matrix (\ref{PDmatrix1}):
\begin{equation}
\left(
\begin{array}
[c]{cc}
(3,3) & (0,5)\\
(5,0) & (1,1)
\end{array}
\right) \label{PDMatrix2}
\end{equation}
the players are anonymous and one can denote, for example, $P(\hat{U}
(\theta),D)$ to represent the payoff to $\hat{U}(\theta)$-player against the
$D$-player. Here $\hat{U}(\theta)$ is the Eisert et al.'s one-parameter
quantum strategy set (\ref{OneParameterSet}). Players' payoffs can be found as
\begin{align}
P(\hat{U}(\theta),D) & =\sin^{2}(\theta/2)\nonumber\\
P(\hat{U}(\theta),\hat{U}(\theta)) & =2\cos^{2}(\theta/2)+5\cos^{2}
(\theta/2)\sin^{2}(\theta/2)+1\nonumber\\
P(D,\hat{U}(\theta)) & =5\cos^{2}(\theta/2)+\sin^{2}(\theta/2)\nonumber\\
P(D,D) & =1
\end{align}
Now $P(D,D)>P(\hat{U}(\theta),D)$ for all $\theta\in\lbrack0,\pi)$. Hence the
first condition for an ESS holds and $D\sim\hat{U}(\pi)$ is an ESS. The case
$\theta=\pi$ corresponds to one-parameter mutant strategy coinciding with the
ESS, which is ruled out. If $D\sim\hat{U}(\pi)$ is played by almost all the
members of the population -- which corresponds to high frequency $F_{D}$ for
$D$ -- we then have $W(D)>W(\theta)$ for all $\theta\in\lbrack0,\pi)$. The
fitness of a one-parameter quantum strategy\footnote{In Eisert et al.'s set-up
one-parameter quantum strategies correspond to mixed (randomized) classical
strategies.}, therefore, cannot be greater than that of a classical ESS. And a
one-parameter quantum strategy cannot invade a classical ESS.
\subsection{Case (\ref{caseB})}
Let $\hat{U}(\theta,\phi)$ be a two-parameter strategy from the set
(\ref{TwoParameterSet}). The expected payoffs are
\begin{align}
P(D,D) & =1\nonumber\\
P(D,\hat{U}(\theta,\phi)) & =5\cos^{2}(\phi)\cos^{2}(\theta/2)+\sin
^{2}(\theta/2)\nonumber\\
P(\hat{U}(\theta,\phi),D) & =5\sin^{2}(\phi)\cos^{2}(\theta/2)+\sin
^{2}(\theta/2)\nonumber\\
P(\hat{U}(\theta,\phi),\hat{U}(\theta,\phi)) & =3\left| \cos(2\phi)\cos
^{2}(\theta/2)\right| ^{2}+5\cos^{2}(\theta/2)\sin^{2}(\theta/2)\left|
\sin(\phi)-\cos(\phi)\right| ^{2}+\nonumber\\
& \left| \sin(2\phi)\cos^{2}(\theta/2)+\sin^{2}(\theta/2)\right| ^{2}
\end{align}
Here $P(D,D)>P(\hat{U}(\theta,\phi),D)$ if $\phi<\arcsin(1/\sqrt{5})$ and if
$P(D,D)=P(\hat{U}(\theta,\phi),D)$ then $P(D,\hat{U}(\theta,\phi))>P(\hat
{U}(\theta,\phi),\hat{U}(\theta,\phi))$. Therefore $D$ is an ESS if
$\phi<\arcsin(1/\sqrt{5})$ otherwise the strategy $\hat{U}(\theta,\phi)$ will
be in position to invade $D$. Alternatively if most of the members of the
population play $D\sim\hat{U}(\pi,0)$ -- meaning high frequency $F_{D}$ for
$D$ -- then the fitness $W(D)$ will remain greater than the fitness $W[\hat
{U}(\theta,\phi)]$ if $\phi<\arcsin(1/\sqrt{5})$. For $\phi>\arcsin(1/\sqrt
{5})$ the strategy $\hat{U}(\theta,\phi)$ can invade the strategy $D$, which
is the classical ESS.
In this analysis the possession of a richer strategy by the mutants leads to
invasion of $D$ when $\phi>\arcsin(1/\sqrt{5})$. Such an invasion may seem not
so unusual given the mutants exploiting richer strategies. But it leads to the
third case when `quantum mutants' have successfully invaded and a
two-parameter strategy $\hat{U}$ has established itself. Can now some new
mutants coming up with $\hat{Q}\sim\hat{U}(0,\pi/2)$ and invade the `quantum ESS'?
\subsection{Case (\ref{caseC})}
Eisert et al. \cite{Eisert,Eisert1} showed that in their quantum PD the
quantum strategy $\hat{Q}$, played by both the players, is the unique NE. How
mutants playing $\hat{Q}$ come up against $\hat{U}(\theta,\phi)$ which already
exists as an ESS? To find it following payoffs are obtained.
\begin{align}
P(\hat{Q},\hat{Q}) & =3\nonumber\\
P(\hat{U}(\theta,\phi),\hat{Q}) & =[3-2\cos^{2}(\phi)]\cos^{2}
(\theta/2)\nonumber\\
P(\hat{Q},\hat{U}(\theta,\phi)) & =[3-2\cos^{2}(\phi)]\cos^{2}
(\theta/2)+5\sin^{2}(\theta/2)
\end{align}
Now the inequality $P(\hat{Q},\hat{Q})>P(\hat{U}(\theta,\phi),\hat{Q})$ holds
for all $\theta\in\lbrack0,\pi]$ and $\phi\in\lbrack0,\pi/2]$ except when
$\theta=0$ and $\phi=\pi/2$, which is the case when the mutant strategy
$\hat{U}(\theta,\phi)$ is the same as $\hat{Q}$. This case is obviously ruled
out. The first condition for $\hat{Q}$ to be an ESS, therefore, holds. The
condition $P(\hat{Q},\hat{Q})=P(\hat{U}(\theta,\phi),\hat{Q})$ implies
$\theta=0$ and $\phi=\pi/2$. Again we have the situation of mutant strategy
same as $\hat{Q}$ and the case is neglected. If $\hat{Q}$ is played by most of
the players, meaning high frequency $F_{\hat{Q}}$ for $\hat{Q}$, then
$W(\hat{Q})>W[\hat{U}(\theta,\phi)]$ for all $\theta\in(0,\pi]$ and $\phi
\in\lbrack0,\pi/2)$. A two parameter quantum strategy $\hat{U}(\theta,\phi)$,
therefore, cannot invade the quantum ESS (i.e. the strategy $\hat{Q}\sim
\hat{U}(0,\pi/2)$). Mutants' access to richer strategies, as it happens in the
case (B), does not continue to be an advantage and most of the population also
have access to it. Hence $\hat{Q}$ comes out as the unique NE and ESS of the game.
\section{ESSs in Marinatto and Weber's scheme}
What happens to PD, from the point of view of evolutionary stability, when it
is played via Marinatto and Weber's scheme \cite{Marinatto1}? In our view this
scheme is more suitable for consideration of evolutionary stability in quantum
regime for the following reasons:
\begin{itemize}
\item In a symmetric bi-matrix game, played in a population setting, players
have access to two pure strategies. Players can also play a mixed strategy by
combining the pure strategies with certain probabilities. In a similar way
players in Marinatto and Weber's scheme can be said to play a mixed strategy
when they apply the two unitary operators, on the initial state, with a
probabilistic combination.
\item Definition (\ref{fitnesses}) of fitness of a pure strategy in
evolutionary games \cite{Prestwich}\ can be given a straightforward extension
in Marinatto and Weber's scheme. It corresponds to a situation when, in the
quantum game, a player uses only one unitary operator out of the two.
\item Theory of ESSs, in the classical domain, deals with anonymous players
possessing a discrete number of pure strategies. Eisert's scheme involves
players possessing a continuum of pure quantum strategies. The concept of an
ESS as a stable equilibrium is confronted with problems \cite{Oechssler} when
players possess a continuum of pure strategies.
\end{itemize}
\subsection{Example of quantum Prisoners' Dilemma}
Assume the PD, defined by the matrix (\ref{PDMatrix2}), is played with
Marinatto and Weber's scheme. The initial state made available to the players is
\begin{equation}
\left| \psi_{in}\right\rangle =a\left| CC\right\rangle +b\left|
DD\right\rangle \text{, \ \ \ \ with }\left| a\right| ^{2}+\left| b\right|
^{2}=1 \label{IniStatQuantumPD}
\end{equation}
where $\left| C\right\rangle \sim\left| 0\right\rangle $ and $\left|
D\right\rangle \sim\left| 1\right\rangle $. Payoffs to Alice and Bob can be
found as
\begin{align}
P_{A}(p,q) & =3\{pq\left| a\right| ^{2}+(1-p)(1-q)\left| b\right|
^{2}\}+5\{p(1-q)\left| b\right| ^{2}+q(1-p)\left| a\right| ^{2}
\}+\nonumber\\
& \{pq\left| b\right| ^{2}+(1-p)(1-q)\left| a\right| ^{2}\}\nonumber\\
P_{B}(p,q) & =3\{pq\left| a\right| ^{2}+(1-p)(1-q)\left| b\right|
^{2}\}+5\{p(1-q)\left| a\right| ^{2}+q(1-p)\left| b\right| ^{2}
\}+\nonumber\\
& \{pq\left| b\right| ^{2}+(1-p)(1-q)\left| a\right| ^{2}\}
\end{align}
where $p$ and $q$ are the probabilities for Alice and Bob, respectively, to
act with the operator $\hat{I}$. We look for symmetric Nash equilibria from
the Nash inequalities while using only the parameter $b\in\mathbf{C}$ of the
initial state $\left| \psi_{in}\right\rangle $. For the state
(\ref{IniStatQuantumPD}) the game is reduced to the classical game when
$\left| b\right| ^{2}=0$, i.e. when it is a product state. Nash inequalities
are then
\begin{align}
P_{A}(\overset{\star}{p},\overset{\star}{q})-P_{A}(p,\overset{\star}{q}) &
=(\overset{\star}{p}-p)\{3\left| b\right| ^{2}-(\overset{\star}{q}
+1)\}\geq0\nonumber\\
P_{B}(\overset{\star}{p},\overset{\star}{q})-P_{B}(\overset{\star}{p},q) &
=(\overset{\star}{q}-q)\{3\left| b\right| ^{2}-(\overset{\star}{p}+1)\}\geq0
\end{align}
Here the parameter $b$ decides what should be the Nash equilibria of the game.
Three symmetric equilibria arise:
\begin{align}
1.\qquad\overset{\star}{p} & =\overset{\star}{q}=0\text{ \ when \ }3\left|
b\right| ^{2}\leq1\nonumber\\
2.\qquad\overset{\star}{p} & =\overset{\star}{q}=1\text{ \ when \ }3\left|
b\right| ^{2}\geq2\nonumber\\
3.\qquad\overset{\star}{p} & =\overset{\star}{q}=3\left| b\right|
^{2}-1\text{ \ when \ }1<3\left| b\right| ^{2}<2
\end{align}
The first two equilibria are independent of the parameter $b$ while the third
depends on it. We ask which of these equilibria are evolutionary stable
assuming that an equilibrium exists for initial states of the form
(\ref{IniStatQuantumPD}). Because the players play a symmetric game, the
payoff to a player using $\hat{I}$ with probability $p$, when opponent uses it
with the probability $q$, can be written as
\begin{align}
P(p,q) & =3\{pq\left| a\right| ^{2}+(1-p)(1-q)\left| b\right|
^{2}\}+5\{p(1-q)\left| b\right| ^{2}+q(1-p)\left| a\right| ^{2}
\}+\nonumber\\
& \{pq\left| b\right| ^{2}+(1-p)(1-q)\left| a\right| ^{2}\}
\end{align}
which can also be identified as the payoff to the $p$-player against the
$q$-player. For the strategy pair $\overset{\star}{p}=\overset{\star}{q}=0$
one gets $P(0,0)>P(p,0)$ when $3\left| b\right| ^{2}<1$ and $P(0,0)=P(p,0)$
imply $3\left| b\right| ^{2}=1$. Also $P(q,q)=-q^{2}+\frac{5}{3}(q+1)$ and
$P(0,q)=\frac{5}{3}(q+1)$. Now $P(0,q)>P(q,q)$ when $q\neq0.$ Therefore the
pair $\overset{\star}{p}=\overset{\star}{q}=0$ is an ESS when $3\left|
b\right| ^{2}\leq1$.
For the pair $\overset{\star}{p}=\overset{\star}{q}=1$ we have $P(1,1)>P(p,1)$
which means $3\left| b\right| ^{2}>2$ if $p\neq1$. And $P(1,1)=P(p,1)$ means
for$\ p\neq1$ we have $3\left| b\right| ^{2}=2$. In such case $P(q,q)=-q^{2}
+\frac{1}{3}(q+7)$ and $P(1,q)=\frac{5}{3}(2-q)$. Now $P(1,q)>P(q,q)$ because
$(1-q)^{2}>0$ for $q\neq1$. Therefore $\overset{\star}{p}=\overset{\star}
{q}=1$ is an ESS when $3\left| b\right| ^{2}\geq2 $.
For the third pair, $\overset{\star}{p}=\overset{\star}{q}=3\left| b\right|
^{2}-1$, we get $P(3\left| b\right| ^{2}-1,3\left| b\right| ^{2}
-1)=-36\left| b\right| ^{6}+36\left| b\right| ^{4}-5\left| b\right|
^{2}+6$. Also we find $P(p,3\left| b\right| ^{2}-1)=-21\left| b\right|
^{4}+21\left| b\right| ^{2}-3$. Hence, the condition $P(3\left| b\right|
^{2}-1,3\left| b\right| ^{2}-1)>P(p,3\left| b\right| ^{2}-1)$ holds and
the pair $\overset{\star}{p}=\overset{\star}{q}=3\left| b\right| ^{2}-1$ is
an ESS for $1<3\left| b\right| ^{2}<2$.
All three symmetric equilibria, definable for different ranges of $\left|
b\right| ^{2}$, are also ESSs. Each of the three sets of initial states
$\left| \psi_{in}\right\rangle $\ give a unique equilibrium which is an ESS
too. Switching from one to the other set of initial states also changes the
equilibrium and ESS accordingly.
A question arises here: Is it possible that a particular equilibrium
switches-over between `ESS' and `not ESS' when the initial state changes
between some of its possible choices? The question is relevant given the fact
that transition between classical and quantum game is also achieved by a
change in the initial state: classical payoffs are obtained when the initial
state is a product state. It implies that it may be possible for a symmetric
NE to switch-over between being `ESS' and being `not ESS' when a game changes
between its `classical' and `quantum' forms. This possibility makes the ESS
concept interesting also from the point of view of quantum games. Because the
quantum PD, in the form considered above, does not allow such a possibility,
therefore, asymmetric bi-matrix games are investigated now.
\subsection{ESSs in two-player two-strategy asymmetric games}
ESS for an asymmetric bi-matrix game, i.e. $G=(M,N)$ when $N\neq M^{T}$, is
defined as a strict NE \cite{Weibull}. A strategy pair $(\overset{\star}
{x},\overset{\star}{y})\in S$ is an ESS of the game $G$ if it is a strict NE:
\begin{align}
1.\qquad P_{A}(\overset{\star}{x},\overset{\star}{y}) & >P_{A}
(x,\overset{\star}{y})\text{ for all }x\neq\overset{\star}{x}\nonumber\\
2.\qquad P_{B}(\overset{\star}{x},\overset{\star}{y}) & >P_{B}
(\overset{\star}{x},y)\text{ for all }y\neq\overset{\star}{y}
\end{align}
For example, consider BoS with the matrix:
\begin{equation}
\left(
\begin{array}
[c]{cc}
(\alpha,\beta) & (\gamma,\gamma)\\
(\gamma,\gamma) & (\beta,\alpha)
\end{array}
\right) \label{AsymmetricGame}
\end{equation}
where $\alpha>\beta>\gamma$. It is a asymmetric game with three classical NE
\cite{Marinatto1}:
\begin{align}
1.\qquad\overset{\star}{p_{1}} & =\overset{\star}{q_{1}}=0\nonumber\\
2.\qquad\overset{\star}{p_{2}} & =\overset{\star}{q_{2}}=1\nonumber\\
3.\qquad\overset{\star}{p_{3}} & =\frac{\alpha-\gamma}{\alpha+\beta-2\gamma
}\qquad\overset{\star}{q_{3}}=\frac{\beta-\gamma}{\alpha+\beta-2\gamma}
\end{align}
The equilibria ($1$) and ($2$) are also ESS's but ($3$) is not because it is
not a strict NE. The asymmetric game (\ref{AsymmetricGame}) played with an
initial state $\left| \psi_{in}\right\rangle =a\left| S_{1}S_{1}
\right\rangle +b\left| S_{2}S_{2}\right\rangle $, where $S_{1}$ and $S_{2}$
are players' pure classical strategies, has the following three Nash
equilibria \cite{Marinatto1}:
\begin{align}
1.\qquad\overset{\star}{p_{1}} & =\overset{\star}{q_{1}}=1\nonumber\\
2.\qquad\overset{\star}{p_{2}} & =\overset{\star}{q_{2}}=0\nonumber\\
3.\qquad\overset{\star}{p_{3}} & =\frac{(\alpha-\gamma)\left| a\right|
^{2}+(\beta-\gamma)\left| b\right| ^{2}}{\alpha+\beta-2\gamma}\qquad
\overset{\star}{q_{3}}=\frac{(\alpha-\gamma)\left| b\right| ^{2}
+(\beta-\gamma)\left| a\right| ^{2}}{\alpha+\beta-2\gamma}\nonumber\\
&
\end{align}
Similar to the classical case, the equilibria ($1$) and ($2$) are ESSs while
($3$) is not. First two ESSs do not depend on the parameters $a$ and $b$ of
the initial state while the third NE does. Interestingly, playing BoS game
with a different initial state:
\begin{equation}
\left| \psi_{in}\right\rangle =a\left| S_{1}S_{2}\right\rangle +b\left|
S_{2}S_{1}\right\rangle \label{antisymmetricState}
\end{equation}
changes the scene. The payoffs to Alice and Bob are:
\begin{align}
P_{A}(p,q) & =p\left\{ -q(\alpha+\beta-2\gamma)+\alpha\left| a\right|
^{2}+\beta\left| b\right| ^{2}-\gamma\right\} +\nonumber\\
& q\left\{ \alpha\left| b\right| ^{2}+\beta\left| a\right| ^{2}
-\gamma\right\} +\gamma\nonumber\\
P_{B}(p,q) & =q\left\{ -p(\alpha+\beta-2\gamma)+\beta\left| a\right|
^{2}+\alpha\left| b\right| ^{2}-\gamma\right\} +\nonumber\\
& p\left\{ \beta\left| b\right| ^{2}+\alpha\left| a\right| ^{2}
-\gamma\right\} +\gamma
\end{align}
and there is only one NE i.e.
\begin{equation}
\overset{\star}{p}=\frac{\beta\left| a\right| ^{2}+\alpha\left| b\right|
^{2}-\gamma}{\alpha+\beta-\gamma}\qquad\overset{\star}{q_{3}}=\frac
{\alpha\left| a\right| ^{2}+\beta\left| b\right| ^{2}-\gamma}{\alpha
+\beta-\gamma}
\end{equation}
which is not an ESS. So that no ESS exists when BoS is played with the state
(\ref{antisymmetricState}).
An essential requirement on a quantum version of a game is that the
corresponding classical game must be its subset. Suppose for a quantum game,
corresponding to an asymmetric bi-matrix classical game, a particular strategy
pair $(\overset{\star}{x},\overset{\star}{y})$ is an ESS for all initial
states $\left| \psi_{in}\right\rangle $ given in some particular form. That
is, it remains an ESS for all $a$ and $b$ when $\left| \psi_{in}\right\rangle
$ is given in terms of $a$ and $b$. Because classical game is a subset of the
quantum game, the strategy pair $(\overset{\star}{x},\overset{\star}{y})$
\emph{must} then also be an ESS in the classical game. On the other hand a
strategy pair $(\overset{\star}{x},\overset{\star}{y})$ which is an ESS in a
classical game \emph{may not} remain so in its quantum version.
Quantization of an asymmetric classical game can thus make disappear the
classical ESSs but it cannot make appear new ESSs, provided an ESS in quantum
version remains so for every possible choice of the parameters $a$ and $b$.
However when an ESS is defined as a strict NE existing only for a set of
initial states for which that NE exists, the statement that quantization can
only make disappear classically available ESSs may not remain valid. In such a
case quantization may make appear new ESSs definable for certain ranges of the
parameters $a$ and $b$.
To find games with the property that a particular NE switches over between
`ESS' and `not ESS' when the initial state changes between its possible
choices, we now look at the following asymmetric quantum game:
\begin{equation}
\left(
\begin{array}
[c]{cc}
(\alpha_{1},\alpha_{2}) & (\beta_{1},\beta_{2})\\
(\gamma_{1},\gamma_{2}) & (\sigma_{1},\sigma_{2})
\end{array}
\right)
\end{equation}
where
\begin{equation}
\left(
\begin{array}
[c]{cc}
\alpha_{1} & \beta_{1}\\
\gamma_{1} & \sigma_{1}
\end{array}
\right) \neq\left(
\begin{array}
[c]{cc}
\alpha_{2} & \beta_{2}\\
\gamma_{2} & \sigma_{2}
\end{array}
\right) ^{T}
\end{equation}
For the initial state $\left| \psi_{in}\right\rangle =a\left| S_{1}
S_{1}\right\rangle +b\left| S_{2}S_{2}\right\rangle $ the players' payoffs are:
\begin{align}
P_{A,B}(p,q) & =\alpha_{1,2}\left\{ pq\left| a\right| ^{2}
+(1-p)(1-q)\left| b\right| ^{2}\right\} +\nonumber\\
& \beta_{1,2}\left\{ p(1-q)\left| a\right| ^{2}+q(1-p)\left| b\right|
^{2}\right\} +\nonumber\\
& \gamma_{1,2}\left\{ p(1-q)\left| b\right| ^{2}+q(1-p)\left| a\right|
^{2}\right\} +\nonumber\\
& \sigma_{1,2}\left\{ pq\left| b\right| ^{2}+(1-p)(1-q)\left| a\right|
^{2}\right\}
\end{align}
The NE conditions are
\begin{gather}
P_{A}(\overset{\star}{p},\overset{\star}{q})-P_{A}(p,\overset{\star}
{q})=\nonumber\\
(\overset{\star}{p}-p)\left[ \left| a\right| ^{2}(\beta_{1}-\sigma
_{1})+\left| b\right| ^{2}(\gamma_{1}-\alpha_{1})-\overset{\star}{q}\left\{
(\beta_{1}-\sigma_{1})+(\gamma_{1}-\alpha_{1})\right\} \right] \geq0\\
P_{B}(\overset{\star}{p},\overset{\star}{q})-P_{B}(\overset{\star}
{p},q)=\nonumber\\
(\overset{\star}{q}-q)\left[ \left| a\right| ^{2}(\gamma_{2}-\sigma
_{2})+\left| b\right| ^{2}(\beta_{2}-\alpha_{2})-\overset{\star}{p}\left\{
(\gamma_{2}-\sigma_{2})+(\beta_{2}-\alpha_{2})\right\} \right] \geq0
\end{gather}
Let now $\overset{\star}{p}=\overset{\star}{q}=0$ be a NE:
\begin{align}
P_{A}(0,0)-P_{A}(p,0) & =-p\left[ (\beta_{1}-\sigma_{1})+\left| b\right|
^{2}\left\{ (\gamma_{1}-\alpha_{1})-(\beta_{1}-\sigma_{1})\right\} \right]
\geq0\nonumber\\
P_{B}(0,0)-P_{B}(0,q) & =-q\left[ (\gamma_{2}-\sigma_{2})+\left| b\right|
^{2}\left\{ (\beta_{2}-\alpha_{2})-(\gamma_{2}-\sigma_{2})\right\} \right]
\geq0\nonumber\\
&
\end{align}
When the strategy pair $(0,0)$ is an ESS in the classical game $($i.e.
$\left| b\right| ^{2}=0)$ we should have
\begin{align}
P_{A}(0,0)-P_{A}(p,0) & =-p(\beta_{1}-\sigma_{1})>0\text{ for all }
p\neq0\nonumber\\
P_{B}(0,0)-P_{B}(0,q) & =-q(\gamma_{2}-\sigma_{2})>0\text{ for all }q\neq0
\end{align}
It implies $(\beta_{1}-\sigma_{1})<0$ and $(\gamma_{2}-\sigma_{2})<0$.
For the pair $(0,0)$ to be not an ESS for some $\left| b\right| ^{2}\neq0$,
let take $\gamma_{1}=\alpha_{1\text{ }}$and $\beta_{2}=\alpha_{2}$. We have
\begin{align}
P_{A}(0,0)-P_{A}(p,0) & =-p(\beta_{1}-\sigma_{1})\left\{ 1-\left|
b\right| ^{2}\right\} \nonumber\\
P_{B}(0,0)-P_{B}(0,q) & =-q(\gamma_{2}-\sigma_{2})\left\{ 1-\left|
b\right| ^{2}\right\}
\end{align}
and the pair $(0,0)$ doesn't remain an ESS at $\left| b\right| ^{2}=1$. A
game with these properties is given by the matrix:
\begin{equation}
\left(
\begin{array}
[c]{cc}
(1,1) & (1,2)\\
(2,1) & (3,2)
\end{array}
\right)
\end{equation}
For this game the pair $(0,0)$ is an ESS when $\left| b\right| ^{2}=0$
(classical game) but it is not when for example $\left| b\right| ^{2}
=\frac{1}{2}$, though it remains a NE in both the cases. The example shows a
NE can be switched between ESS and `not ESS' by adjusting the parameters $a$
and $b$ of the initial state. Opposite to the previous case, the initial
states - different from the one corresponding to the classical game - can also
make a strategy pair an ESS. An example of a game for which it happens is
\begin{equation}
\begin{array}
[c]{c}
S_{1}\\
S_{2}
\end{array}
\overset{
\begin{array}
[c]{cc}
S_{1} & S_{2}
\end{array}
}{\left(
\begin{array}
[c]{cc}
(2,1) & (1,0)\\
(1,0) & (1,0)
\end{array}
\right) } \label{ExampleGame1}
\end{equation}
Playing this game again via $\left| \psi_{in}\right\rangle =a\left|
S_{1}S_{1}\right\rangle +b\left| S_{2}S_{2}\right\rangle $ gives the
following payoff differences for the strategy pair $(0,0)$:
\begin{equation}
P_{A}(0,0)-P_{A}(p,0)=p\left| b\right| ^{2}\ \ \text{and}\ \ P_{B}
(0,0)-P_{B}(0,q)=q\left| b\right| ^{2}
\end{equation}
for Alice and Bob respectively. Therefore (\ref{ExampleGame1}) is an example
of a game for which the pair $(0,0)$ is not an ESS when the initial state
corresponds to the classical game. But the pair is an ESS for other initial
states for which $0<\left| b\right| ^{2}<1$.
\subsection{ESSs in two-player two-strategy symmetric games}
To explore a possible relation between evolutionary stability and quantization
consider the following symmetric bi-matrix game:
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\begin{array}
[c]{c}
S_{1}\\
S_{2}
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{
\begin{array}
[c]{cc}
S_{1} & S_{2}
\end{array}
}}{\left(
\begin{array}
[c]{cc}
(\alpha,\alpha) & (\beta,\gamma)\\
(\gamma,\beta) & (\delta,\delta)
\end{array}
\right) } \label{PayoffMatrixGen2Player}
\end{equation}
which is played by an initial state:
\begin{equation}
\left| \psi_{in}\right\rangle =a\left| S_{1}S_{1}\right\rangle +b\left|
S_{2}S_{2}\right\rangle \text{, \ \ with }\left| a\right| ^{2}+\left|
b\right| ^{2}=1 \label{IniStatGen2Player}
\end{equation}
Let Alice's strategy consists of applying the identity operator $\hat{I}$ with
probability $p$ and the operator $\hat{\sigma}_{x}$ with probability $(1-p)$,
on the initial state written $\rho_{in}$ in density matrix notation. Similarly
Bob applies the operators $\hat{I}$ and $\hat{\sigma}_{x} $ with the
probabilities $q$ and $(1-q)$ respectively. The final state is
\begin{equation}
\rho_{fin}=\underset{\hat{U}=\hat{I},\hat{\sigma}_{x}}{\sum}\Pr(\hat{U}
_{A})\Pr(\hat{U}_{B})[\hat{U}_{A}\otimes\hat{U}_{B}\rho_{in}\hat{U}
_{A}^{\dagger}\otimes\hat{U}_{B}^{\dagger}]
\end{equation}
where unitary and Hermitian operator $\hat{U}$ is either $\hat{I}$ or
$\hat{\sigma}_{x}$. $\Pr(\hat{U}_{A})$, $\Pr(\hat{U}_{B})$ are the
probabilities, for Alice and Bob respectively, to apply the operator on the
initial state. The matrix $\rho_{fin}$ is obtained from $\rho_{in}$ by a
convex combination of players' possible quantum operations. Payoff operators
for Alice and Bob are \cite{Marinatto1}
\begin{equation}
(P_{A,B})_{oper}=\alpha,\alpha\left| S_{1}S_{1}\right\rangle \left\langle
S_{1}S_{1}\right| +\beta,\gamma\left| S_{1}S_{2}\right\rangle \left\langle
S_{1}S_{2}\right| +\gamma,\beta\left| S_{2}S_{1}\right\rangle \left\langle
S_{2}S_{1}\right| +\delta,\delta\left| S_{2}S_{2}\right\rangle \left\langle
S_{2}S_{2}\right|
\end{equation}
The payoffs are then obtained as mean values of these operators i.e.
$P_{A,B}=$Tr$\left[ (P_{A,B})_{oper}\rho_{fin}\right] $. Because the quantum
game is symmetric with the initial state (\ref{IniStatGen2Player}) and the
payoff matrix (\ref{PayoffMatrixGen2Player}), there is no need for subscripts.
We can , then, write the payoff to a $p$-player against a $q$-player as
$P(p,q)$, where the first number is the focal player's move. When
$\overset{\star}{p}$ is a NE we find the following payoff difference:
\begin{gather}
P(\overset{\star}{p},\overset{\star}{p})-P(p,\overset{\star}{p})=(\overset
{\star}{p}-p){\LARGE [}\left| a\right| ^{2}(\beta-\delta)+\nonumber\\
\left| b\right| ^{2}(\gamma-\alpha)-\overset{\star}{p}\left\{ (\beta
-\delta)+(\gamma-\alpha)\right\} {\LARGE ]} \label{General2PlayerDifference1}
\end{gather}
Now the ESS conditions for the pure strategy $p=0$ are given as
\begin{gather}
1.\text{ \ \ \ }\left| b\right| ^{2}\left\{ (\beta-\delta)-(\gamma
-\alpha)\right\} >(\beta-\delta)\nonumber\\
2.\text{ If }\left| b\right| ^{2}\left\{ (\beta-\delta)-(\gamma
-\alpha)\right\} =(\beta-\delta)\nonumber\\
\text{then }q^{2}\left\{ (\beta-\delta)+(\gamma-\alpha)\right\} >0
\end{gather}
where $1$ is the NE condition. Similarly the ESS conditions for the pure
strategy $p=1$ are
\begin{gather}
1.\text{ \ \ \ }\left| b\right| ^{2}\left\{ (\gamma-\alpha)-(\beta
-\delta)\right\} >(\gamma-\alpha)\nonumber\\
2.\text{ If }\left| b\right| ^{2}\left\{ (\gamma-\alpha)-(\beta
-\delta)\right\} =(\gamma-\alpha)\nonumber\\
\text{then }(1-q)^{2}\left\{ (\beta-\delta)+(\gamma-\alpha)\right\} >0
\end{gather}
Because these conditions, for both the pure strategies $p=1$ and $p=0$, depend
on $\left| b\right| ^{2}$, therefore, there can be examples of two-player
symmetric games for which the evolutionary stability of pure strategies can be
changed while playing the game using initial state in the form $\left|
\psi_{in}\right\rangle =a\left| S_{1}S_{1}\right\rangle +b\left| S_{2}
S_{2}\right\rangle $. However, for the mixed NE, given as $\overset{\star}
{p}=\frac{\left| a\right| ^{2}(\beta-\delta)+\left| b\right| ^{2}
(\gamma-\alpha)}{(\beta-\delta)+(\gamma-\alpha)}$, the corresponding payoff
difference (\ref{General2PlayerDifference1}) becomes identically zero. From
the second condition of an ESS we find for the mixed NE $\overset{\star}{p}$
the difference
\begin{align}
& P(\overset{\star}{p},q)-P(q,q)=\frac{1}{(\beta-\delta)+(\gamma-\alpha
)}\times\nonumber\\
& {\LARGE [}(\beta-\delta)-q\left\{ (\beta-\delta)+(\gamma-\alpha)\right\}
-\left| b\right| ^{2}\left\{ (\beta-\delta)-(\gamma-\alpha)\right\}
{\LARGE ]}^{2}
\end{align}
Therefore, the mixed strategy $\overset{\star}{p}$ is an ESS when $\left\{
(\beta-\delta)+(\gamma-\alpha)\right\} >0$. This condition, making the mixed
NE $\overset{\star}{p}$ an ESS, is independent \footnote{An alternative
possibility is to adjust $\left| b\right| ^{2}$=$\frac{(\beta-\delta
)-q\left\{ (\beta-\delta)+(\gamma-\alpha)\right\} }{\left\{ (\beta
-\delta)-(\gamma-\alpha)\right\} }$ which makes the difference $\left\{
P(\overset{\star}{p},q)-P(q,q)\right\} $ identically zero. The mixed strategy
$\overset{\star}{p}$ then does not remain an ESS. However such `mutant
dependent' adjustment of $\left| b\right| ^{2}$ is not reasonable because
the mutant strategy $q$ can be anything in the range $[0,1]$.} of $\left|
b\right| ^{2}$. So that, in this symmetric two-player quantum game,
evolutionary stability of the mixed NE $\overset{\star}{p}$ can not be changed
when the game is played using initial quantum states of the form
(\ref{IniStatGen2Player}).
However, evolutionary stability of pure strategies can be affected, with this
form of the initial states, for two-player symmetric games. Examples of the
games with this property are easy to find. The class of games for which
$\gamma=\alpha$ and $(\beta-\delta)<0$ the strategies $p=0$ and $p=1$ remain
NE for all $\left| b\right| ^{2}\in\lbrack0,1]$; but the strategy $p=1$ is
not an ESS when $\left| b\right| ^{2}=0$ and the strategy $p=0$ is not an
ESS when $\left| b\right| ^{2}=1$.
\subsubsection{An example}
Consider the symmetric bi-matrix game (\ref{PayoffMatrixGen2Player}) with the
constants $\alpha,\beta,\gamma,\delta$ satisfying the conditions:
\begin{align}
\alpha,\beta,\gamma,\delta & \geq0\nonumber\\
(\delta-\beta) & >0\nonumber\\
(\gamma-\alpha) & \geq0\nonumber\\
(\gamma-\alpha) & <(\delta-\beta) \label{GameDefinition}
\end{align}
The condition making $(p^{\star},p^{\star})$ a NE is given by
(\ref{General2PlayerDifference1}). For this game three Nash equilibria arise
i.e. two pure strategies $p^{\ast}=0$, $p^{\ast}=1$, and one mixed strategy
$p^{\ast}=\frac{(\delta-\beta)\left| a\right| ^{2}-(\gamma-\alpha)\left|
b\right| ^{2}}{(\delta-\beta)-(\gamma-\alpha)}$. These three cases are
considered below.
\paragraph{Case $p^{\star}=0$}
For the strategy $p^{\star}=0$ to be a NE one requires
\begin{equation}
P(0,0)-P(p,0)=\frac{p}{(\gamma-\alpha)+(\delta-\beta)}\left[ \left|
a\right| ^{2}-\frac{(\gamma-\alpha)}{(\gamma-\alpha)+(\delta-\beta)}\right]
\geq0 \label{Difference1Symmetric}
\end{equation}
and the difference $\left\{ P(0,0)-P(p,0)\right\} >0$ when $1\geq\left|
a\right| ^{2}>\frac{(\gamma-\alpha)}{(\gamma-\alpha)+(\delta-\beta)}$. In
this range of $\left| a\right| ^{2}$ the equilibrium $p^{\star}=0$ is a pure
ESS. However, when $\left| a\right| ^{2}=\frac{(\gamma-\alpha)}
{(\gamma-\alpha)+(\delta-\beta)}$ we have the difference $\left\{
P(0,0)-P(p,0)\right\} $ identically zero. The strategy $p^{\star}=0$ can be
an ESS if
\begin{align}
& P(0,p)-P(p,p)\nonumber\\
& =p\left\{ (\gamma-\alpha)+(\delta-\beta)\right\} \left\{ \left|
a\right| ^{2}-\frac{(1-p)(\gamma-\alpha)+p(\delta-\beta)}{(\gamma
-\alpha)+(\delta-\beta)}\right\} >0
\end{align}
that can be written as
\begin{equation}
P(0,p)-P(p,p)=p\left\{ (\gamma-\alpha)+(\delta-\beta)\right\} \left\{
\left| a\right| ^{2}-\digamma\right\} >0
\end{equation}
where $\frac{(\gamma-\alpha)}{(\gamma-\alpha)+(\delta-\beta)}\leq\digamma
\leq\frac{(\delta-\beta)}{(\gamma-\alpha)+(\delta-\beta)}$ when $0\leq
p\leq1.$ The strategy $p^{\star}=0$ can be an ESS only when $\left| a\right|
^{2}>\frac{(\delta-\beta)}{(\gamma-\alpha)+(\delta-\beta)}$ which is not
possible because $\left| a\right| ^{2}$ is fixed at $\frac{(\gamma-\alpha
)}{(\gamma-\alpha)+(\delta-\beta)}.$ Therefore the strategy $p^{\star}=0$ is
an ESS for $1\geq\left| a\right| ^{2}>\frac{(\gamma-\alpha)}{(\gamma
-\alpha)+(\delta-\beta)}$ and for $\left| a\right| ^{2}=\frac{(\gamma
-\alpha)}{(\gamma-\alpha)+(\delta-\beta)}$ this NE becomes unstable. The
classical game is obtained by taking $\left| a\right| ^{2}=1$ for which
$p^{\star}=0$ is an ESS or a stable NE. However this NE does not remain stable
for $\left| a\right| ^{2}=\frac{(\gamma-\alpha)}{(\gamma-\alpha
)+(\delta-\beta)}$ which corresponds to an entangled initial state; though the
NE remains intact in both forms of the game.
\paragraph{Case $p^{\star}=1$}
Similar to the last case the NE condition for the strategy $p^{\star}=1$ can
be written as
\begin{equation}
P(1,1)-P(p,1)=\frac{(1-p)}{(\gamma-\alpha)+(\delta-\beta)}\left[ -\left|
a\right| ^{2}+\frac{(\delta-\beta)}{(\gamma-\alpha)+(\delta-\beta)}\right]
\geq0 \label{Difference2Symmetric}
\end{equation}
Now $p^{\star}=1$ is a pure ESS for $0\leq\left| a\right| ^{2}<\frac
{(\delta-\beta)}{(\gamma-\alpha)+(\delta-\beta)}$. For $\left| a\right|
^{2}=\frac{(\delta-\beta)}{(\gamma-\alpha)+(\delta-\beta)}$ the difference
$\left\{ P(1,1)-P(p,1)\right\} $ becomes identically zero. The strategy
$p^{\star}=1$ is an ESS when
\begin{align}
& P(1,p)-P(p,p)\nonumber\\
& =(1-p)\left\{ (\gamma-\alpha)+(\delta-\beta)\right\} \left\{ -\left|
a\right| ^{2}+\frac{(1-p)(\gamma-\alpha)+p(\delta-\beta)}{(\gamma
-\alpha)+(\delta-\beta)}\right\} >0\nonumber\\
&
\end{align}
It is possible only if $\left| a\right| ^{2}<\frac{(\gamma-\alpha)}
{(\gamma-\alpha)+(\delta-\beta)}.$ Therefore the strategy $p^{\star}=1$ is a
stable NE (ESS) for $0\leq\left| a\right| ^{2}<\frac{(\delta-\beta)}
{(\gamma-\alpha)+(\delta-\beta)}.$ It is not stable classically (i.e. for
$\left| a\right| ^{2}=1$) but becomes stable for an entangled initial state.
\paragraph{Case $p^{\star}=\frac{(\delta-\beta)\left| a\right| ^{2}
-(\gamma-\alpha)\left| b\right| ^{2}}{(\delta-\beta)-(\gamma-\alpha)}$}
In case of the mixed strategy:
\begin{equation}
p^{\star}=\frac{(\delta-\beta)\left| a\right| ^{2}-(\gamma-\alpha)\left|
b\right| ^{2}}{(\delta-\beta)-(\gamma-\alpha)} \label{MixedStrategySmmetric}
\end{equation}
the NE condition (\ref{General2PlayerDifference1}) turns into
\begin{equation}
P(p^{\star},p^{\star})-P(p,p^{\star})=0 \label{balance}
\end{equation}
The mixed strategy (\ref{MixedStrategySmmetric}) can be an ESS if
\begin{align}
& P(p^{\star},p)-P(p,p)\nonumber\\
& =(p^{\star}-p)\left[ -\left| a\right| ^{2}(\delta-\beta)+\left|
b\right| ^{2}(\gamma-\alpha)+p\left\{ (\delta-\beta)-(\gamma-\alpha
)\right\} \right] >0\nonumber\\
& \label{Difference3Symmetric}
\end{align}
for all $p\neq p^{\star}$. Write now the strategy $p$ as $p=p^{\star
}+\bigtriangleup$. For the mixed strategy (\ref{MixedStrategySmmetric}) the
payoff difference of the Eq. (\ref{Difference3Symmetric}) is reduced to
\begin{equation}
P(p^{\star},p)-P(p,p)=-\bigtriangleup^{2}\left\{ (\delta-\beta)-(\gamma
-\alpha)\right\}
\end{equation}
Hence, for the game defined in the conditions (\ref{GameDefinition}), the
mixed strategy $p^{\star}=\frac{(\delta-\beta)\left| a\right| ^{2}
-(\gamma-\alpha)\left| b\right| ^{2}}{(\delta-\beta)-(\gamma-\alpha)}$
cannot be an ESS, though it can be a NE of the symmetric game.
It is to be pointed out that above considerations apply when the game is
played with the initial state (\ref{IniStatGen2Player}).
To find examples of symmetric quantum games, where evolutionary stability of
the mixed strategies may also be affected by controlling the initial states,
the number of players are now increased from two to three.
\subsection{ESSs in three-player two-strategy symmetric games}
In extending the two-player scheme to a three-player case, we assume that
three players $A,B,$ and $C$ play their strategies by applying the identity
operator $\hat{I}$ with the probabilities $p,q$ and $r$ respectively on the
initial state $\left| \psi_{in}\right\rangle $. Therefore, they apply the
operator $\hat{\sigma}_{x}$ with the probabilities $(1-p),(1-q)$ and $(1-r)$
respectively. The final state becomes
\begin{equation}
\rho_{fin}=\underset{\hat{U}=\hat{I},\hat{\sigma}_{x}}{\sum}\Pr(\hat{U}
_{A})\Pr(\hat{U}_{B})\Pr(\hat{U}_{C})\left[ \hat{U}_{A}\otimes\hat{U}
_{B}\otimes\hat{U}_{C}\rho_{in}\hat{U}_{A}^{\dagger}\otimes\hat{U}
_{B}^{\dagger}\otimes\hat{U}_{C}^{\dagger}\right]
\end{equation}
where the $8$ basis vectors are $\left| S_{i}S_{j}S_{k}\right\rangle $, for
$i,j,k=1,2$. Again we use initial quantum state in the form $\left| \psi
_{in}\right\rangle =a\left| S_{1}S_{1}S_{1}\right\rangle +b\left| S_{2}
S_{2}S_{2}\right\rangle $, where $\left| a\right| ^{2}+\left| b\right|
^{2}=1$. It is a quantum state in $2\otimes2\otimes2$ dimensional Hilbert
space that can be prepared from a system of three two-state quantum systems or
qubits. Similar to the two-player case, the payoff operators for the players
$A,$ $B,$ and $C$ can be defined as
\begin{align}
& (P_{A,B,C})_{oper}=\nonumber\\
& \alpha_{1},\beta_{1},\eta_{1}\left| S_{1}S_{1}S_{1}\right\rangle
\left\langle S_{1}S_{1}S_{1}\right| +\alpha_{2},\beta_{2},\eta_{2}\left|
S_{2}S_{1}S_{1}\right\rangle \left\langle S_{2}S_{1}S_{1}\right| +\nonumber\\
& \alpha_{3},\beta_{3},\eta_{3}\left| S_{1}S_{2}S_{1}\right\rangle
\left\langle S_{1}S_{2}S_{1}\right| +\alpha_{4},\beta_{4},\eta_{4}\left|
S_{1}S_{1}S_{2}\right\rangle \left\langle S_{1}S_{1}S_{2}\right| +\nonumber\\
& \alpha_{5},\beta_{5},\eta_{5}\left| S_{1}S_{2}S_{2}\right\rangle
\left\langle S_{1}S_{2}S_{2}\right| +\alpha_{6},\beta_{6},\eta_{6}\left|
S_{2}S_{1}S_{2}\right\rangle \left\langle S_{2}S_{1}S_{2}\right| +\nonumber\\
& \alpha_{7},\beta_{7},\eta_{7}\left| S_{2}S_{2}S_{1}\right\rangle
\left\langle S_{2}S_{2}S_{1}\right| +\alpha_{8},\beta_{8},\eta_{8}\left|
S_{2}S_{2}S_{2}\right\rangle \left\langle S_{2}S_{2}S_{2}\right|
\end{align}
where $\alpha_{l},\beta_{l},\eta_{l}$ for $1\leq l\leq8$ are $24$ constants of
the matrix of this three-player game. Payoffs to the players $A,B,$ and $C$
are then obtained as mean values of these operators
\begin{equation}
P_{A,B,C}(p,q,r)=\text{Tr}\left[ (P_{A,B,C})_{oper}\rho_{fin}\right]
\end{equation}
Here, similar to the two-player case, the classical payoffs can be obtained by
making the initial quantum state unentangled and fixing $\left| b\right|
^{2}=0$. To get a symmetric game we define $P_{A}(x,y,z)$ as the payoff to
player $A$ when players $A$, $B$, and $C$ play the strategies $x$, $y$, and
$z$ respectively. With following relations the players' payoffs become identity-independent.
\begin{gather}
P_{A}(x,y,z)=P_{A}(x,z,y)=P_{B}(y,x,z)\nonumber\\
=P_{B}(z,x,y)=P_{C}(y,z,x)=P_{C}(z,y,x) \label{3PlayerSymmetricConditions}
\end{gather}
The players in the game then become anonymous and their payoffs depend only on
their strategies. The relations (\ref{3PlayerSymmetricConditions}) hold with
the following replacements for $\beta_{i}$ and $\eta_{i}$:
\begin{align}
\beta_{1} & \rightarrow\alpha_{1}\qquad\beta_{2}\rightarrow\alpha_{3}
\qquad\beta_{3}\rightarrow\alpha_{2}\qquad\beta_{4}\rightarrow\alpha
_{3}\nonumber\\
\beta_{5} & \rightarrow\alpha_{6}\qquad\beta_{6}\rightarrow\alpha_{5}
\qquad\beta_{7}\rightarrow\alpha_{6}\qquad\beta_{8}\rightarrow\alpha
_{8}\nonumber\\
\eta_{1} & \rightarrow\alpha_{1}\qquad\eta_{2}\rightarrow\alpha_{3}
\qquad\eta_{3}\rightarrow\alpha_{3}\qquad\eta_{4}\rightarrow\alpha
_{2}\nonumber\\
\eta_{5} & \rightarrow\alpha_{6}\qquad\eta_{6}\rightarrow\alpha_{6}
\qquad\eta_{7}\rightarrow\alpha_{5}\qquad\eta_{8}\rightarrow\alpha_{8}
\end{align}
Also, it is now necessary that we should have
\begin{equation}
\alpha_{6}=\alpha_{7}\text{, }\alpha_{3}=\alpha_{4}
\end{equation}
A symmetric game between three players, therefore, can be defined by only six
constants of the payoff matrix. These constants can be taken as $\alpha
_{1},\alpha_{2},\alpha_{3},\alpha_{5},\alpha_{6},$ and $\alpha_{8}$. Payoff to
a $p$-player, when other two players play $q$ and $r$, can now be written as
$P(p,q,r)$. A symmetric NE $\overset{\star}{p}$ is now found from the Nash
condition $P(\overset{\star}{p},\overset{\star}{p},\overset{\star}
{p})-P(p,\overset{\star}{p},\overset{\star}{p})\geq0$ i.e.
\begin{gather}
P(\overset{\star}{p},\overset{\star}{p},\overset{\star}{p})-P(p,\overset
{\star}{p},\overset{\star}{p})=(\overset{\star}{p}-p)\text{{\LARGE [}}
\overset{\star}{p}^{2}(1-2\left| b\right| ^{2})(\sigma+\omega-2\eta
)+\nonumber\\
2\overset{\star}{p}\left\{ \left| b\right| ^{2}(\sigma+\omega-2\eta
)-\omega+\eta\right\} +\left\{ \omega-\left| b\right| ^{2}(\sigma
+\omega)\right\} \text{{\LARGE ]}}\geq0
\end{gather}
where
\begin{align}
(\alpha_{1}-\alpha_{2}) & =\sigma\nonumber\\
(\alpha_{3}-\alpha_{6}) & =\eta\nonumber\\
(\alpha_{5}-\alpha_{8}) & =\omega
\end{align}
Three possible NE are found as
\begin{equation}
\left.
\begin{array}
[c]{c}
\overset{\star}{p}_{1}=\frac{\left\{ (\omega-\eta)-\left| b\right|
^{2}(\sigma+\omega-2\eta)\right\} \pm\sqrt{\left\{ (\sigma+\omega
)^{2}-(2\eta)^{2}\right\} \left| b\right| ^{2}(1-\left| b\right|
^{2})+(\eta^{2}-\sigma\omega)}}{(1-2\left| b\right| ^{2})(\sigma
+\omega-2\eta)}\\%
\begin{array}
[c]{c}
\overset{\star}{p}_{2}=0\\
\overset{\star}{p}_{3}=1
\end{array}
\end{array}
\right\}
\end{equation}
It is observed that the mixed NE $\overset{\star}{p_{1}}$ makes the difference
$\left\{ P(\overset{\star}{p},\overset{\star}{p},\overset{\star}
{p})-P(p,\overset{\star}{p},\overset{\star}{p})\right\} $ identically zero
and two values for $\overset{\star}{p}_{1}$ can be found for a given $\left|
b\right| ^{2}$. Apart from $\overset{\star}{p}_{1}$ the other two NE (i.e.
$\overset{\star}{p}_{2}$ and $\overset{\star}{p}_{3}$) are pure strategies.
Also now $\overset{\star}{p}_{1}$ comes out a NE without imposing further
restrictions on the matrix of the symmetric three-player game. However, the
pure strategies $\overset{\star}{p}_{2}$ and $\overset{\star}{p}_{3}$ can be
NE when further restriction are imposed on the matrix of the game. For
example, $\overset{\star}{p}_{3}$ can be a NE provided $\sigma\geq
(\omega+\sigma)\left| b\right| ^{2}$ for all $\left| b\right| ^{2}
\in\lbrack0,1]$. Similarly $\overset{\star}{p}_{2}$ can be NE when $\omega
\leq(\omega+\sigma)\left| b\right| ^{2}$.
Now we address the question: How evolutionary stability of these three NE can
be affected while playing the game via initial quantum states given in the
following form?
\begin{equation}
\left| \psi_{in}\right\rangle =a\left| S_{1}S_{1}S_{1}\right\rangle
+b\left| S_{2}S_{2}S_{2}\right\rangle \label{IniStatSymm3Player}
\end{equation}
For the two-player asymmetric game of BoS we showed that out of three NE only
two can be evolutionary stable. In classical evolutionary game theory the
concept of an ESS is well-known \cite{MarkBroom1,BroomMutiplayer} to be
extendable to multi-player case. When mutants are allowed to play only one
strategy the definition of an ESS for the three-player case is written as \cite{MarkBroom1}
\begin{align}
1.\text{ \ \ \ }P(p,p,p) & >P(q,p,p)\nonumber\\
2.\text{ If }P(p,p,p) & =P(q,p,p)\text{ then }P(p,q,p)>P(q,q,p)
\end{align}
Here $p$ is a NE if it satisfies the condition $1$ against all $q\neq p$. For
our case the ESS conditions for the pure strategies $\overset{\star}{p}_{2}$
and $\overset{\star}{p}_{3}$ can be written as follows. For example
$\overset{\star}{p}_{2}=0$ is an ESS when
\begin{align}
1.\text{ \ \ \ }\sigma\left| b\right| ^{2} & >\omega\left| a\right|
^{2}\nonumber\\
2.\text{ If }\sigma\left| b\right| ^{2} & =\omega\left| a\right|
^{2}\text{ then }-\eta q^{2}(\left| a\right| ^{2}-\left| b\right| ^{2})>0
\label{Cond3PlayerSymme1}
\end{align}
where $1$ is NE condition for the strategy $\overset{\star}{p}_{2}=0$.
Similarly $\overset{\star}{p}_{3}=1$ is an ESS when
\begin{align}
1.\text{ \ \ \ }\sigma\left| a\right| ^{2} & >\omega\left| b\right|
^{2}\nonumber\\
2.\text{ If }\sigma\left| a\right| ^{2} & =\omega\left| b\right|
^{2}\text{ then }\eta(1-q)^{2}(\left| a\right| ^{2}-\left| b\right|
^{2})>0 \label{Conditions3PlayerSymm}
\end{align}
and both the pure strategies $\overset{\star}{p}_{2}$ and $\overset{\star}
{p}_{3}$ are ESSs when $\left| a\right| ^{2}=\left| b\right| ^{2}$. The
conditions (\ref{Conditions3PlayerSymm}) can also be written as
\begin{align}
1.\text{ \ \ \ }\sigma & >(\omega+\sigma)\left| b\right| ^{2}\nonumber\\
2.\text{ If }\sigma & =\left| b\right| ^{2}(\omega+\sigma)\text{ then
}\frac{\gamma(\omega-\sigma)}{(\omega+\sigma)}>0
\end{align}
For the strategy $\overset{\star}{p}_{2}=0$ the ESS conditions
(\ref{Cond3PlayerSymme1}) reduce to
\begin{align}
1.\text{ \ \ \ \ }\omega & <(\omega+\sigma)\left| b\right| ^{2}\nonumber\\
2.\text{ If \ }\omega & =\left| b\right| ^{2}(\omega+\sigma)\text{ then
}\frac{\gamma(\omega-\sigma)}{(\omega+\sigma)}>0
\end{align}
Examples of three-player symmetric games are easy to find for which a pure
strategy is a NE for the whole range $\left| b\right| ^{2}\in\lbrack0,1]$,
but it is not an ESS for some particular value of $\left| b\right| ^{2}$. An
example of a class of such games is for which $\sigma=0$, $\omega<0$, and
$\eta\leq0$. In this class the strategy $\overset{\star}{p}_{2}=0$ is a NE for
all $\left| b\right| ^{2}\in\lbrack0,1]$ but not an ESS when $\left|
b\right| ^{2}=1$.
Apart from the pure strategies, the mixed strategy equilibrium $\overset
{\star}{p}_{1}$ forms the most interesting case. It makes the payoff
difference $\left\{ P(\overset{\star}{p_{1}},\overset{\star}{p_{1}}
,\overset{\star}{p_{1}})-P(p,\overset{\star}{p_{1}},\overset{\star}{p_{1}
})\right\} $ identically zero for every strategy $p$. The strategy
$\overset{\star}{p_{1}}$ is an ESS when $\left\{ P(\overset{\star}{p_{1}
},q,\overset{\star}{p_{1}})-P(q,q,\overset{\star}{p_{1}})\right\} >0$ but
\begin{align}
& P(\overset{\star}{p_{1}},q,\overset{\star}{p_{1}})-P(q,q,\overset{\star
}{p_{1}})\nonumber\\
& =\pm(\overset{\star}{p_{1}}-q)^{2}\sqrt{\left\{ (\sigma+\omega)^{2}
-(2\eta)^{2}\right\} \left| b\right| ^{2}(1-\left| b\right| ^{2}
)+(\eta^{2}-\sigma\omega)} \label{3PlayerSqr}
\end{align}
Therefore, out of the two possible roots $(\overset{\star}{p_{1}})_{1}$ and
$(\overset{\star}{p_{1}})_{2}$ of the quadratic equation\footnote{These roots
make the difference $\left\{ P(\overset{\star}{p_{1}},q,\overset{\star}
{p_{1}})-P(q,q,\overset{\star}{p_{1}})\right\} $ greater than and less than
zero, respectively.}:
\begin{gather}
\overset{\star}{p_{1}}^{2}(1-2\left| b\right| ^{2})(\sigma+\omega
-2\eta)+\nonumber\\
2\overset{\star}{p_{1}}\left\{ \left| b\right| ^{2}(\sigma+\omega
-2\eta)-\omega+\eta\right\} +\left\{ \omega-\left| b\right| ^{2}
(\sigma+\omega)\right\} =0 \label{DefQuadEqForp1Star}
\end{gather}
only $(\overset{\star}{p_{1}})_{1}$ can exist as an ESS. When the square root
term in the equation (\ref{3PlayerSqr}) becomes zero we have only one mixed
NE, which is not an ESS. Hence, out of four possible NE in this three-player
game only three can be ESSs.
An interesting class of three-player games is the one for which $\eta
^{2}=\sigma\omega$. For these games the mixed NE are
\begin{equation}
\overset{\star}{p_{1}}=\frac{\left\{ (w-\eta)-\left| b\right| ^{2}
(\sigma+\omega-2\eta)\right\} \pm\left| a\right| \left| b\right| \left|
\sigma-\omega\right| }{(1-2\left| b\right| ^{2})(\sigma+\omega-2\eta)}
\end{equation}
and, when played classically, we can get only one mixed NE that is not an ESS.
However for all $\left| b\right| ^{2}$, different from zero, we generally
obtain two NE out of which one can be an ESS.
Similar to the two-player case, the equilibria in a three-player symmetric
game where quantization affects evolutionary stability, are the ones that
survive for two initial states, one of which is unentangled and corresponds to
the classical game. Suppose $\overset{\star}{p_{1}}$ remains a NE for $\left|
b\right| ^{2}=0$ and some other non-zero $\left| b\right| ^{2}$. It is
possible when
\begin{equation}
(\sigma-\omega)(2\overset{\star}{p_{1}}-1)=0
\end{equation}
Alternatively the strategy $\overset{\star}{p}=\frac{1}{2}$ remains a NE for
all $\left| b\right| ^{2}\in\lbrack0,1]$. It reduces the defining quadratic
equation (\ref{DefQuadEqForp1Star}) for $\overset{\star}{p_{1}}$ to
\begin{equation}
\sigma+\omega+2\eta=0
\end{equation}
and makes the difference $\left\{ P(\overset{\star}{p_{1}},q,\overset{\star
}{p_{1}})-P(q,q,\overset{\star}{p_{1}})\right\} $ independent of $\left|
b\right| ^{2}$. Therefore the strategy $\overset{\star}{p}=\frac{1}{2}$, even
when it is retained as an equilibrium for all $\left| b\right| ^{2}
\in\lbrack0,1]$, cannot be an ESS in only one version of the symmetric
three-player game. For the second possibility $\sigma=\omega$ the defining
equation for $\overset{\star}{p_{1}}$ is reduced to
\begin{equation}
(1-2\left| b\right| ^{2})\left\{ \overset{\star}{p_{1}}-\frac{(\eta
-\sigma)-\sqrt{\eta^{2}-\sigma^{2}}}{2(\eta-\sigma)}\right\} \left\{
\overset{\star}{p_{1}}-\frac{(\eta-\sigma)+\sqrt{\eta^{2}-\sigma^{2}}}
{2(\eta-\sigma)}\right\} =0
\end{equation}
for which
\begin{equation}
P(\overset{\star}{p_{1}},q,\overset{\star}{p_{1}})-P(q,q,\overset{\star}
{p_{1}})=\pm2(\overset{\star}{p_{1}}-q)^{2}\left| \left| b\right|
^{2}-\frac{1}{2}\right| \sqrt{\eta^{2}-\sigma^{2}}
\end{equation}
Here the difference $\left\{ P(\overset{\star}{p_{1}},q,\overset{\star}
{p_{1}})-P(q,q,\overset{\star}{p_{1}})\right\} $ still depends on $\left|
b\right| ^{2}$ and becomes zero for $\left| b\right| ^{2}=1/2$.
Hence, for the class of games for which $\sigma=\omega$ and $\eta>\sigma$, one
of the mixed strategies $(\overset{\star}{p_{1}})_{1},(\overset{\star}{p_{1}
})_{2}$ remains a NE for all $\left| b\right| ^{2}\in\lbrack0,1]$ but not an
ESS when $\left| b\right| ^{2}=1/2$. In this class of three-player quantum
games the evolutionary stability of a mixed strategy can, therefore, be
changed while the game is played using initial quantum states in the form
(\ref{IniStatSymm3Player}).
\section{Quantum Rock-Scissors-Paper game}
Long played as a children's pastime, or as an odd-man-out selection process,
the Rock-Scissors-Paper (RSP) is a game for two players typically played using
the players' hands. The players opposite each others, tap their fist in their
open palm three times (saying Rock, Scissors, Paper) and then show one of
three possible gestures. The Rock wins against the scissors (crushes it) but
looses against the paper (is wrapped into it). The Scissors wins against the
paper (cuts it) but looses against the rock (is crushed by it). The Paper wins
against the rock (wraps it) but looses against the scissors (is cut by it).
The game is also played in nature like many other games. Lizards in the Coast
Range of California play this game \cite{LizardGame} using three alternative
male strategies locked in an ecological never ending process from which there
seems little escape.
\subsection{Rock-Scissors-Paper in a slightly modified version}
In a slightly modified version of the RSP game both players get a small
premium $\epsilon$\ for a draw. This game can be represented by the payoff matrix:
\begin{equation}
\begin{array}
[c]{c}
R\\
S\\
P
\end{array}
\overset{
\begin{array}
[c]{ccc}
R & S & P
\end{array}
}{\left(
\begin{array}
[c]{ccc}
-\epsilon & 1 & -1\\
-1 & -\epsilon & 1\\
1 & -1 & -\epsilon
\end{array}
\right) } \label{RSPMatrix}
\end{equation}
where $-1<\epsilon<0$. The matrix of the usual game is obtained when
$\epsilon$ is zero.
\subsection{Nash equilibrium and ESS in Rock-Scissors-Paper game}
One cannot win if one's opponent knew which strategy was going to be picked.
For example, picking Rock consistently all the opponent needs to do is pick
Paper and s/he would win. Players find soon that in case predicting opponent's
strategy is not possible the best strategy is to pick Rock, Scissors, or Paper
at random. In other words, the player selects Rock, Scissors, or Paper with a
probability of $1/3$. In case opponent's strategy is predictable picking a
strategy at random with a probability of $1/3$ is \emph{not} the best thing to
do unless the opponent is doing the same \cite{Prestwich}.
Analysis \cite{Weibull} of the modified RSP game of matrix (\ref{RSPMatrix})
shows that its NE consists of playing each of the three different pure
strategies with a fixed equilibrial probability $1/3$. However it is not an
ESS because $\epsilon$ is negative.
Here we want to study the effects of quantization on evolutionary stability
for the modified RSP game. The game is different, from others considered
earlier, because classically each player now possesses three pure strategies
instead of two. A classical mixed NE exists which is not an ESS. Our
motivation is to explore the possibility that the classical mixed NE becomes
an ESS for some quantum form of the game.
\subsection{Quantization of Rock-Scissors-Paper game}
Using simpler notation: $R\sim1,$ $S\sim2,$ $P\sim3$ we quantize this game via
Marinatto and Weber's scheme \cite{Marinatto1}. We assume the two players are
in possession of three unitary and Hermitian operators $\hat{I},\hat{C}$ and
$\hat{D}$ defined as follows.
\begin{align}
\hat{I}\left| 1\right\rangle & =\left| 1\right\rangle \text{,}\qquad
\hat{C}\left| 1\right\rangle =\left| 3\right\rangle \text{,}\qquad\hat
{D}\left| 1\right\rangle =\left| 2\right\rangle \nonumber\\
\hat{I}\left| 2\right\rangle & =\left| 2\right\rangle \text{,}\qquad
\hat{C}\left| 2\right\rangle =\left| 2\right\rangle \text{,}\qquad\hat
{D}\left| 2\right\rangle =\left| 1\right\rangle \nonumber\\
\hat{I}\left| 3\right\rangle & =\left| 3\right\rangle \text{,}\qquad
\hat{C}\left| 3\right\rangle =\left| 1\right\rangle \text{,}\qquad\hat
{D}\left| 3\right\rangle =\left| 3\right\rangle
\end{align}
where $\hat{C}^{\dagger}=\hat{C}=\hat{C}^{-1}$ and $\hat{D}^{\dagger}=\hat
{D}=\hat{D}^{-1}$ and $\hat{I}$ is the identity operator.
Consider a general two-player payoff matrix when each player has three strategies:
\begin{equation}
\begin{array}
[c]{c}
1\\
2\\
3
\end{array}
\overset{
\begin{array}
[c]{ccccccc}
1 & & & 2 & & & 3
\end{array}
}{\left(
\begin{array}
[c]{ccc}
(\alpha_{11},\beta_{11}) & (\alpha_{12},\beta_{12}) & (\alpha_{13},\beta
_{13})\\
(\alpha_{21},\beta_{21}) & (\alpha_{22},\beta_{22}) & (\alpha_{23},\beta
_{23})\\
(\alpha_{31},\beta_{31}) & (\alpha_{32},\beta_{32}) & (\alpha_{33},\beta_{33})
\end{array}
\right) } \label{Gen3StrategyMatrix}
\end{equation}
where $\alpha_{ij}$ and $\beta_{ij}$ are the payoffs to Alice and Bob,
respectively, when Alice plays $i$ and Bob plays $j$ and $1\leq i,j\leq3$.
Suppose Alice and Bob apply the operators $\hat{C}$, $\hat{D}$, and $\hat{I}$
with the probabilities $p$, $p_{1}$, $(1-p-p_{1})$ and $q$, $q_{1}$,
$(1-q-q_{1})$ respectively. The initial state of the game is $\rho_{in}$.
Alice's move changes the state changes to
\begin{equation}
\overset{A}{\rho_{in}}=(1-p-p_{1})\hat{I}_{A}\rho_{in}\hat{I}_{A}^{\dagger
}+p\hat{C}_{A}\rho_{in}\hat{C}_{A}^{\dagger}+p_{1}\hat{D}_{A}\rho_{in}\hat
{D}_{A}^{\dagger}
\end{equation}
The final state, after Bob too has played his move, is
\begin{equation}
\overset{A,B}{\rho_{f}}=(1-q-q_{1})\hat{I}_{B}\overset{A}{\rho_{in}}\hat
{I}_{B}^{\dagger}+q\hat{C}_{B}\overset{A}{\rho_{in}}\hat{C}_{B}^{\dagger
}+q_{1}\hat{D}_{B}\overset{A}{\rho_{in}}\hat{D}_{B}^{\dagger}
\end{equation}
This state can be written as
\begin{align}
\overset{A,B}{\rho_{f}} & =(1-p-p_{1})(1-q-q_{1})\left\{ \hat{I}_{A}
\otimes\hat{I}_{B}\rho_{in}\hat{I}_{A}^{\dagger}\otimes\hat{I}_{B}^{\dagger
}\right\} +p(1-q-q_{1})\times\nonumber\\
& \left\{ \hat{C}_{A}\otimes\hat{I}_{B}\rho_{in}\hat{C}_{A}^{\dagger}
\otimes\hat{I}_{B}^{\dagger}\right\} +p_{1}(1-q-q_{1})\left\{ \hat{D}
_{A}\otimes\hat{I}_{B}\rho_{in}\hat{D}_{A}^{\dagger}\otimes\hat{I}_{B}
^{\dagger}\right\} +\nonumber\\
& (1-p-p_{1})q\left\{ \hat{I}_{A}\otimes\hat{C}_{B}\rho_{in}\hat{I}_{A}
^{\dagger}\otimes\hat{C}_{B}^{\dagger}\right\} +pq\left\{ \hat{C}_{A}
\otimes\hat{C}_{B}\rho_{in}\hat{C}_{A}^{\dagger}\otimes\hat{C}_{B}^{\dagger
}\right\} +\nonumber\\
& p_{1}q\left\{ \hat{D}_{A}\otimes\hat{C}_{B}\rho_{in}\hat{D}_{A}^{\dagger
}\otimes\hat{C}_{B}^{\dagger}\right\} +(1-p-p_{1})q_{1}\left\{
\hat{I}_{A}\otimes\hat{D}_{B}\rho_{in}\hat{I}_{A}^{\dagger}\otimes\hat{D}
_{B}^{\dagger}\right\} \nonumber\\
& +pq_{1}\left\{ \hat{C}_{A}\otimes\hat{D}_{B}\rho_{in}\hat{C}_{A}^{\dagger
}\otimes\hat{D}_{B}^{\dagger}\right\} +p_{1}q_{1}\left\{ \hat{D}_{A}
\otimes\hat{D}_{B}\rho_{in}\hat{D}_{A}^{\dagger}\otimes\hat{D}_{B}^{\dagger
}\right\}
\end{align}
The nine basis vectors of initial quantum state with three pure classical
strategies are $\left| ij\right\rangle $ for $i,j=1,2,3$. We consider the
initial state to be a pure quantum state of two qutrits i.e.
\begin{equation}
\left| \psi_{in}\right\rangle =\underset{i,j=1,2,3}{\sum}c_{ij}\left|
ij\right\rangle \text{, \ \ \ \ where }\underset{i,j=1,2,3}{\sum}\left|
c_{ij}\right| ^{2}=1 \label{RSPIniStat}
\end{equation}
The payoff operators for Alice and Bob are \cite{Marinatto1}
\begin{align}
(P_{A,B})_{oper} & =(\alpha,\beta)_{11}\left| 11\right\rangle \left\langle
11\right| +(\alpha,\beta)_{12}\left| 12\right\rangle \left\langle 12\right|
+(\alpha,\beta)_{13}\left| 13\right\rangle \left\langle 13\right|
+\nonumber\\
& (\alpha,\beta)_{21}\left| 21\right\rangle \left\langle 21\right|
+(\alpha,\beta)_{22}\left| 22\right\rangle \left\langle 22\right|
+(\alpha,\beta)_{23}\left| 23\right\rangle \left\langle 23\right|
+\nonumber\\
& (\alpha,\beta)_{31}\left| 31\right\rangle \left\langle 31\right|
+(\alpha,\beta)_{32}\left| 32\right\rangle \left\langle 32\right|
+(\alpha,\beta)_{33}\left| 33\right\rangle \left\langle 33\right| \nonumber\\
&
\end{align}
The players' payoffs are then
\begin{equation}
P_{A,B}=\text{Tr}[\left\{ (P_{A,B})_{oper}\right\} \overset{A,B}{\rho_{f}}]
\label{3StrategyPayoff}
\end{equation}
Payoff to Alice, for example, can be written as
\begin{equation}
P_{A}=\Phi\Omega\Upsilon^{T} \label{RSPPayoffAlice}
\end{equation}
where $T$ is for transpose, and the matrices $\Phi,$ $\Omega,$ and $\Upsilon$ are
\begin{align}
\Phi & =[
\begin{array}
[c]{ccc}
(1-p-p_{1})(1-q-q_{1}) & p(1-q-q_{1}) & p_{1}(1-q-q_{1})
\end{array}
\nonumber\\
&
\begin{array}
[c]{cccccc}
(1-p-p_{1})q & pq & p_{1}q & (1-p-p_{1})q_{1} & pq_{1} & p_{1}q_{1}
\end{array}
]\nonumber\\
\Upsilon & =[
\begin{array}
[c]{ccccccccc}
\alpha_{11} & \alpha_{12} & \alpha_{13} & \alpha_{21} & \alpha_{22} &
\alpha_{23} & \alpha_{31} & \alpha_{32} & \alpha_{33}
\end{array}
]\nonumber\\
\Omega & =\left[
\begin{array}
[c]{ccccccccc}
\left| c_{11}\right| ^{2} & \left| c_{12}\right| ^{2} & \left|
c_{13}\right| ^{2} & \left| c_{21}\right| ^{2} & \left| c_{22}\right|
^{2} & \left| c_{23}\right| ^{2} & \left| c_{31}\right| ^{2} & \left|
c_{32}\right| ^{2} & \left| c_{33}\right| ^{2}\\
\left| c_{31}\right| ^{2} & \left| c_{32}\right| ^{2} & \left|
c_{33}\right| ^{2} & \left| c_{21}\right| ^{2} & \left| c_{22}\right|
^{2} & \left| c_{23}\right| ^{2} & \left| c_{11}\right| ^{2} & \left|
c_{12}\right| ^{2} & \left| c_{13}\right| ^{2}\\
\left| c_{21}\right| ^{2} & \left| c_{22}\right| ^{2} & \left|
c_{23}\right| ^{2} & \left| c_{11}\right| ^{2} & \left| c_{12}\right|
^{2} & \left| c_{13}\right| ^{2} & \left| c_{31}\right| ^{2} & \left|
c_{32}\right| ^{2} & \left| c_{33}\right| ^{2}\\
\left| c_{13}\right| ^{2} & \left| c_{12}\right| ^{2} & \left|
c_{11}\right| ^{2} & \left| c_{23}\right| ^{2} & \left| c_{22}\right|
^{2} & \left| c_{21}\right| ^{2} & \left| c_{33}\right| ^{2} & \left|
c_{32}\right| ^{2} & \left| c_{31}\right| ^{2}\\
\left| c_{33}\right| ^{2} & \left| c_{32}\right| ^{2} & \left|
c_{31}\right| ^{2} & \left| c_{23}\right| ^{2} & \left| c_{22}\right|
^{2} & \left| c_{21}\right| ^{2} & \left| c_{13}\right| ^{2} & \left|
c_{12}\right| ^{2} & \left| c_{11}\right| ^{2}\\
\left| c_{23}\right| ^{2} & \left| c_{22}\right| ^{2} & \left|
c_{21}\right| ^{2} & \left| c_{13}\right| ^{2} & \left| c_{12}\right|
^{2} & \left| c_{11}\right| ^{2} & \left| c_{33}\right| ^{2} & \left|
c_{32}\right| ^{2} & \left| c_{31}\right| ^{2}\\
\left| c_{12}\right| ^{2} & \left| c_{11}\right| ^{2} & \left|
c_{13}\right| ^{2} & \left| c_{22}\right| ^{2} & \left| c_{21}\right|
^{2} & \left| c_{23}\right| ^{2} & \left| c_{32}\right| ^{2} & \left|
c_{31}\right| ^{2} & \left| c_{33}\right| ^{2}\\
\left| c_{32}\right| ^{2} & \left| c_{31}\right| ^{2} & \left|
c_{33}\right| ^{2} & \left| c_{22}\right| ^{2} & \left| c_{21}\right|
^{2} & \left| c_{23}\right| ^{2} & \left| c_{12}\right| ^{2} & \left|
c_{11}\right| ^{2} & \left| c_{13}\right| ^{2}\\
\left| c_{22}\right| ^{2} & \left| c_{21}\right| ^{2} & \left|
c_{23}\right| ^{2} & \left| c_{12}\right| ^{2} & \left| c_{11}\right|
^{2} & \left| c_{13}\right| ^{2} & \left| c_{32}\right| ^{2} & \left|
c_{31}\right| ^{2} & \left| c_{33}\right| ^{2}
\end{array}
\right] \nonumber\\
& \label{RSPAliceSigma}
\end{align}
The payoff (\ref{RSPPayoffAlice}) corresponds to the matrix
(\ref{Gen3StrategyMatrix}). Payoffs in classical mixed strategy game can be
obtained from Eq. (\ref{3StrategyPayoff}) for the initial state $\left|
\psi_{in}\right\rangle =\left| 11\right\rangle $. The game is symmetric when
$\alpha_{ij}=\beta_{ji}$ in the matrix (\ref{Gen3StrategyMatrix}). The quantum
game played using the quantum state (\ref{RSPIniStat}) is symmetric when
$\left| c_{ij}\right| ^{2}=\left| c_{ji}\right| ^{2}$ for all constants
$c_{ij}$ in the state (\ref{RSPIniStat}). These two conditions together
guarantee a symmetric quantum game. The players' payoffs $P_{A}$, $P_{B}$ then
do not need a subscript and we can simply use $P(p,q)$ to denote the payoff to
the $p$-player against the $q$-player.
The question of evolutionary stability in quantized RSP game is addressed below.
\subsection{Consideration of evolutionary stability}
Assume a strategy is defined by a pair of numbers $(p,p_{1})$ for players
playing the quantized RSP game. These numbers are the probabilities with which
the player applies the operators $\hat{C}$ and $\hat{D}$. The identity
operator $\hat{I}$ is, then, applied with probability $(1-p-p_{1})$. Similar
to the conditions $1$ and $2$ in Eq. (\ref{DefESS}), the conditions making a
strategy $(p^{\star},p_{1}^{\star})$ an ESS can be written as \cite{Smith
Price,Weibull}
\begin{align}
1\text{. \ \ \ }P\{(p^{\star},p_{1}^{\star}),(p^{\star},p_{1}^{\star})\} &
>P\{(p,p_{1}),(p^{\star},p_{1}^{\star})\}\nonumber\\
2\text{. if }P\{(p^{\star},p_{1}^{\star}),(p^{\star},p_{1}^{\star})\} &
=P\{(p,p_{1}),(p^{\star},p_{1}^{\star})\}\text{ then}\nonumber\\
P\{(p^{\star},p_{1}^{\star}),(p,p_{1})\} & >P\{(p,p_{1}),(p,p_{1})\}
\label{ESScondsRSP}
\end{align}
Suppose $(p^{\star},p_{1}^{\star})$ is a mixed NE then
\begin{equation}
\left\{ \frac{\partial P}{\partial p}\mid_{\substack{p=q=p^{\star}
\\p_{1}=q_{1}=p_{1}^{\star} }}(p^{\star}-p)+\frac{\partial P}{\partial p_{1}
}\mid_{\substack{p=q=p^{\star} \\p_{1}=q_{1}=p_{1}^{\star} }}(p_{1}^{\star
}-p_{1})\right\} \geq0
\end{equation}
Using substitutions
\begin{equation}
\begin{array}
[c]{cc}
\left| c_{11}\right| ^{2}-\left| c_{31}\right| ^{2}=\bigtriangleup_{1}, &
\left| c_{21}\right| ^{2}-\left| c_{11}\right| ^{2}=\bigtriangleup_{1}^{
\acute{}
}\\
\left| c_{13}\right| ^{2}-\left| c_{33}\right| ^{2}=\bigtriangleup_{2}, &
\left| c_{22}\right| ^{2}-\left| c_{12}\right| ^{2}=\bigtriangleup_{2}^{
\acute{}
}\\
\left| c_{12}\right| ^{2}-\left| c_{32}\right| ^{2}=\bigtriangleup_{3}, &
\left| c_{23}\right| ^{2}-\left| c_{13}\right| ^{2}=\bigtriangleup_{3}^{
\acute{}
}
\end{array}
\end{equation}
we get
\begin{align}
\frac{\partial P}{\partial p} & \mid_{\substack{p=q=p^{\star} \\p_{1}
=q_{1}=p_{1}^{\star}}}=p^{\star}(\bigtriangleup_{1}-\bigtriangleup
_{2})\left\{ (\alpha_{11}+\alpha_{33})-(\alpha_{13}+\alpha_{31})\right\}
+\nonumber\\
& p_{1}^{\star}(\bigtriangleup_{1}-\bigtriangleup_{3})\left\{ (\alpha
_{11}+\alpha_{32})-(\alpha_{12}+\alpha_{31})\right\} -\nonumber\\
& \bigtriangleup_{1}(\alpha_{11}-\alpha_{31})-\bigtriangleup_{2}(\alpha
_{13}-\alpha_{33})-\bigtriangleup_{3}(\alpha_{12}-\alpha_{32})
\label{RSPRestPoint1}
\end{align}
\begin{align}
\frac{\partial P}{\partial p_{1}} & \mid_{\substack{p=q=p^{\star}
\\p_{1}=q_{1}=p_{1}^{\star}}}=p^{\star}(\bigtriangleup_{3}^{
\acute{}
}-\bigtriangleup_{1}^{
\acute{}
})\left\{ (\alpha_{11}+\alpha_{23})-(\alpha_{13}+\alpha_{21})\right\}
+\nonumber\\
& p_{1}^{\star}(\bigtriangleup_{2}^{
\acute{}
}-\bigtriangleup_{1}^{
\acute{}
})\left\{ (\alpha_{11}+\alpha_{22})-(\alpha_{12}+\alpha_{21})\right\}
+\nonumber\\
& \bigtriangleup_{1}^{
\acute{}
}(\alpha_{11}-\alpha_{21})+\bigtriangleup_{2}^{
\acute{}
}(\alpha_{12}-\alpha_{22})+\bigtriangleup_{3}^{
\acute{}
}(\alpha_{13}-\alpha_{23}) \label{RSPRestPoint2}
\end{align}
For the matrix (\ref{RSPMatrix}) the equations (\ref{RSPRestPoint1},
\ref{RSPRestPoint2}) can be written as
\begin{align}
\frac{\partial P}{\partial p} & \mid_{\substack{p=q=p^{\star} \\p_{1}
=q_{1}=p_{1}^{\star}}}=\bigtriangleup_{1}\left\{ -2\epsilon p^{\star
}-(3+\epsilon)p_{1}^{\star}+(1+\epsilon)\right\} +\nonumber\\
& \bigtriangleup_{2}\left\{ 2\epsilon p^{\star}+(1-\epsilon)\right\}
+\bigtriangleup_{3}\left\{ (3+\epsilon)p_{1}^{\star}-2\right\} \\
\frac{\partial P}{\partial p_{1}} & \mid_{\substack{p=q=p^{\star}
\\p_{1}=q_{1}=p_{1}^{\star}}}=\bigtriangleup_{1}^{
\acute{}
}\left\{ -p^{\star}(3-\epsilon)+2\epsilon p_{1}^{\star}+(1-\epsilon)\right\}
-\nonumber\\
& \bigtriangleup_{2}^{
\acute{}
}\left\{ 2\epsilon p_{1}^{\star}-(1+\epsilon)\right\} +\bigtriangleup_{3}^{
\acute{}
}\left\{ (3-\epsilon)p^{\star}-2\right\}
\end{align}
The payoff difference in the second condition of an ESS given in the Eq.
(\ref{ESScondsRSP}) reduces to
\begin{align}
& P\{(p^{\star},p_{1}^{\star}),(p,p_{1})\}-P\{(p,p_{1}),(p,p_{1}
)\}\nonumber\\
& =(p^{\star}-p)[-\bigtriangleup_{1}\{2\epsilon p+(3+\epsilon)p_{1}
-(1+\epsilon)\}+\nonumber\\
& \bigtriangleup_{2}\{2\epsilon p+(1-\epsilon)\}+\bigtriangleup
_{3}\{(3+\epsilon)p_{1}-2\}]+\nonumber\\
& (p_{1}^{\star}-p_{1})[-\bigtriangleup_{1}^{
\acute{}
}\{(3-\epsilon)p-2\epsilon p_{1}-(1-\epsilon)\}-\nonumber\\
& \bigtriangleup_{2}^{
\acute{}
}\{2\epsilon p_{1}-(1+\epsilon)\}+\bigtriangleup_{3}^{
\acute{}
}\{(3-\epsilon)p-2\}]
\end{align}
With the substitutions $(p^{\star}-p)=x$ and $(p_{1}^{\star}-p_{1})=y$ the
above payoff difference is
\begin{align}
& P\{(p^{\star},p_{1}^{\star}),(p,p_{1})\}-P\{(p,p_{1}),(p,p_{1}
)\}\nonumber\\
& =\bigtriangleup_{1}x\left\{ 2\epsilon x+(3+\epsilon)y\right\}
-\bigtriangleup_{2}(2\epsilon x^{2})-\bigtriangleup_{3}xy(3+\epsilon
)-\nonumber\\
& \bigtriangleup_{1}^{
\acute{}
}y\left\{ 2\epsilon y-(3-\epsilon)x\right\} +\bigtriangleup_{2}^{
\acute{}
}(2\epsilon y^{2})-\bigtriangleup_{3}^{
\acute{}
}xy(3-\epsilon) \label{2ndESSRSP}
\end{align}
provided
\begin{equation}
\frac{\partial P}{\partial p}\mid_{\substack{p=q=p^{\star} \\p_{1}=q_{1}
=p_{1}^{\star}}}=0\text{ \ \ \ \ \ \ \ \ \ \ \ \ \ }\frac{\partial P}{\partial
p_{1}}\mid_{\substack{p=q=p^{\star} \\p_{1}=q_{1}=p_{1}^{\star}}}=0
\label{CondsRSP}
\end{equation}
The conditions in Eq. (\ref{CondsRSP}) together define the mixed NE
$(p^{\star},p_{1}^{\star})$. Consider now the modified RSP game in classical
form obtained by setting $\left| c_{11}\right| ^{2}=1$. The Eqs.
(\ref{CondsRSP}) become
\begin{align}
-2\epsilon p^{\star}-(\epsilon+3)p_{1}^{\star}+(\epsilon+1) & =0\nonumber\\
(-\epsilon+3)p^{\star}-2\epsilon p_{1}^{\star}+(\epsilon-1) & =0
\end{align}
and $p^{\star}=p_{1}^{\star}=\frac{1}{3}$ is obtained as a mixed NE for all
the range $-1<\epsilon<0$. From Eq. (\ref{2ndESSRSP}) we get
\begin{align}
& P\{(p^{\star},p_{1}^{\star}),(p,p_{1})\}-P\{(p,p_{1}),(p,p_{1}
)\}\nonumber\\
& =2\epsilon(x^{2}+y^{2}+xy)=\epsilon\left\{ (x+y)^{2}+(x^{2}+y^{2}
)\right\} \leq0 \label{differ}
\end{align}
In the classical RSP game, therefore, the mixed NE $p^{\star}=p_{1}^{\star
}=\frac{1}{3}$ is a NE but not an ESS, because the second condition of an ESS
given in the Eq. (\ref{ESScondsRSP}) does not hold.
Now define a new initial state as
\begin{equation}
\left| \psi_{in}\right\rangle =\frac{1}{2}\left\{ \left| 12\right\rangle
+\left| 21\right\rangle +\left| 13\right\rangle +\left| 31\right\rangle
\right\} \label{InistatRSP}
\end{equation}
and use it to play the game, instead of the classical game obtained from
$\left| \psi_{in}\right\rangle =\left| 11\right\rangle $. The strategy
$p^{\star}=p_{1}^{\star}=\frac{1}{3}$ still forms a mixed NE because the
conditions (\ref{CondsRSP}) hold true for it. However the payoff difference of
Eq. (\ref{2ndESSRSP}) is now given below, when $-1<\epsilon<0$ and $x,y\neq0$:
\begin{align}
& P\{(p^{\star},p_{1}^{\star}),(p,p_{1})\}-P\{(p,p_{1}),(p,p_{1}
)\}\nonumber\\
& =-\epsilon\left\{ (x+y)^{2}+(x^{2}+y^{2})\right\} >0
\end{align}
Therefore the mixed NE $p^{\star}=p_{1}^{\star}=\frac{1}{3}$, not existing as
an ESS in the classical form of the RSP game, becomes an ESS when the game is
quantized and played using an initial (entangled) quantum state given by the
Eq. (\ref{InistatRSP}).
Note that from Eq. (\ref{3StrategyPayoff}) the sum of the payoffs to Alice and
Bob $(P_{A}+P_{B})$ can be obtained for both the classical mixed strategy game
(i.e. when $\left| \psi_{in}\right\rangle =\left| 11\right\rangle $) and the
quantum game played using the quantum state of Eq. (\ref{InistatRSP}). For the
matrix (\ref{RSPMatrix}) we write these sums as $(P_{A}+P_{B})_{cl}$ and
$(P_{A}+P_{B})_{qu}$ for classical mixed strategy and quantum games
respectively. We obtain
\begin{equation}
(P_{A}+P_{B})_{cl}=-2\epsilon\left\{ (1-p-p_{1})(1-q-q_{1})+p_{1}
q_{1}+pq\right\}
\end{equation}
and
\begin{equation}
(P_{A}+P_{B})_{qu}=-\left\{ \frac{1}{2}(P_{A}+P_{B})_{cl}+\epsilon\right\}
\end{equation}
In case $\epsilon=0$ both the classical and quantum games are clearly zero
sum. For the slightly modified version of the RSP game we have $-1<\epsilon<0$
and both versions of the game become non zero-sum.
\section{Stability of a mixed Nash equilibrium}
In a classical symmetric bi-matrix game, played in an evolutionary set up
involving a population, all the members of the population are
indistinguishable and each individual is equally likely to face the other. In
such a set-up individuals interact only in pair-wise encounters.
Assume a finite set of pure strategies $\left\{ 1,2,...,n\right\} $ is
available to each player. In one pair-wise encounter let a player $A$ receives
a reward $a_{ij}$ by playing strategy $i$ against another player $B$ playing
strategy $j$. In symmetric situation the player $B$, then, gets $a_{ji}$ as a
reward. The value $a_{ij}$ is an element in the $n\times n$ payoff matrix
$\mathbf{M}$. We assume that the players also have an option to play a mixed
strategy. It means he/she plays the strategy $i$ with probability $p_{i}$ for
all $i=1,2,...,n.$ A strategy vector $\mathbf{p,}$ with components $p_{i},$
represents the mixed strategy played by the player. In standard notation an
average, or expected, payoff for player $A$ playing strategy $\mathbf{p}$
against player $B$ playing $\mathbf{q}$ is written as $P(\mathbf{p,q)}$
\cite{MarkBroom2}:
\begin{equation}
P(\mathbf{p,q)=}\sum a_{ij}p_{i}q_{j}=\mathbf{p}^{T}\mathbf{Mq}
\label{PayoffsMixed}
\end{equation}
where $T$ is for transpose.
In evolutionary game theory mixed strategies play a significant role. The
well-known \textit{Bishop-Cannings theorem} (BCT) \cite{BishopCanning}
describes an interesting property of mixed ESSs in symmetric bi-matrix games.
Introducing the concept of \textit{support }of an ESS is helpful to understand
the BCT \cite{MarkBroom2,Vickers}. Suppose a strategy vector $\mathbf{p=(}
p_{i}\mathbf{)}$ is an ESS. Its \emph{support} $S(\mathbf{p})$ is the set
$S(\mathbf{p})=\left\{ i:p_{i}>0\right\} $. Hence the support of
$\mathbf{p}$ is the set of pure strategies that can be played by a $\mathbf{p}
$-player. BCT states that if $\mathbf{p}$ is an ESS with support
I\textbf{\ }and\textbf{\ }$\mathbf{r}$\textbf{\ }$\neq\mathbf{p}$ is an ESS
with support $J$, then $I$ $\nsupseteq$ $J$. For bi-matrix games the
BCT\ shows that \textit{no pure strategy can be evolutionary stable when a
mixed ESS exists} \cite{MarkBroom2}. Naturally one, then, asks about the
classical pure ESSs when a switch-over to a quantum form of a classical
symmetric bi-matrix game also gives evolutionary stability to a mixed
symmetric NE.
Following the approach developed for quantum RSP game, we now consider a
general form of a two-qubit initial quantum state. Our results show that for
this form of the initial quantum state, the corresponding quantum version of a
bi-matrix game can give evolutionary stability to a mixed symmetric NE when
classically it is not stable. It is interesting to observe that by ensuring
evolutionary stability to a mixed NE in a quantum form of the game, the BCT
forces out the pure ESSs present in classical form of the game.
The payoff to a player in the quantized version of the RSP game can also be
written in similar form to (\ref{PayoffsMixed}), provided the matrix
$\mathbf{M}$ is replaced with a matrix corresponding to quantum version of
this game.
For example, Alice's payoff, who plays the strategy $\mathbf{p}$ (where
$\mathbf{p}^{T}\mathbf{=}[(1-p-p_{1})$ \ \ \ $p_{1}$ \ \ \ $p]$) against Bob
who plays the strategy $\mathbf{q}$ (where $\mathbf{q}^{T}\mathbf{=}
[(1-q-q_{1})$ \ $\ \ q_{1}$ \ \ $\ q]$), can be written as
\begin{equation}
P_{A}(\mathbf{p,q)=p}^{T}\mathbf{\omega q}
\end{equation}
where the matrix $\mathbf{\omega}$ is given by
\begin{equation}
\mathbf{\omega=}\left(
\begin{array}
[c]{ccc}
\omega_{11} & \omega_{12} & \omega_{13}\\
\omega_{21} & \omega_{22} & \omega_{23}\\
\omega_{31} & \omega_{32} & \omega_{33}
\end{array}
\right) \label{QMatrixMixed}
\end{equation}
and the elements of $\mathbf{\omega}$ are given by the following matrix equation
\begin{align}
& \left(
\begin{array}
[c]{ccccccccc}
\omega_{11} & \omega_{12} & \omega_{13} & \omega_{21} & \omega_{22} &
\omega_{23} & \omega_{31} & \omega_{32} & \omega_{33}
\end{array}
\right) \nonumber\\
& =\left(
\begin{array}
[c]{ccccccccc}
\alpha_{11} & \alpha_{12} & \alpha_{13} & \alpha_{21} & \alpha_{22} &
\alpha_{23} & \alpha_{31} & \alpha_{32} & \alpha_{33}
\end{array}
\right) \times\nonumber\\
& \left(
\begin{array}
[c]{ccccccccc}
\left| c_{11}\right| ^{2} & \left| c_{12}\right| ^{2} & \left|
c_{13}\right| ^{2} & \left| c_{21}\right| ^{2} & \left| c_{22}\right|
^{2} & \left| c_{23}\right| ^{2} & \left| c_{31}\right| ^{2} & \left|
c_{32}\right| ^{2} & \left| c_{33}\right| ^{2}\\
\left| c_{12}\right| ^{2} & \left| c_{11}\right| ^{2} & \left|
c_{12}\right| ^{2} & \left| c_{22}\right| ^{2} & \left| c_{21}\right|
^{2} & \left| c_{22}\right| ^{2} & \left| c_{32}\right| ^{2} & \left|
c_{31}\right| ^{2} & \left| c_{32}\right| ^{2}\\
\left| c_{13}\right| ^{2} & \left| c_{13}\right| ^{2} & \left|
c_{11}\right| ^{2} & \left| c_{23}\right| ^{2} & \left| c_{23}\right|
^{2} & \left| c_{21}\right| ^{2} & \left| c_{33}\right| ^{2} & \left|
c_{33}\right| ^{2} & \left| c_{31}\right| ^{2}\\
\left| c_{21}\right| ^{2} & \left| c_{22}\right| ^{2} & \left|
c_{23}\right| ^{2} & \left| c_{11}\right| ^{2} & \left| c_{12}\right|
^{2} & \left| c_{13}\right| ^{2} & \left| c_{21}\right| ^{2} & \left|
c_{22}\right| ^{2} & \left| c_{23}\right| ^{2}\\
\left| c_{22}\right| ^{2} & \left| c_{21}\right| ^{2} & \left|
c_{22}\right| ^{2} & \left| c_{12}\right| ^{2} & \left| c_{11}\right|
^{2} & \left| c_{12}\right| ^{2} & \left| c_{22}\right| ^{2} & \left|
c_{21}\right| ^{2} & \left| c_{22}\right| ^{2}\\
\left| c_{23}\right| ^{2} & \left| c_{23}\right| ^{2} & \left|
c_{21}\right| ^{2} & \left| c_{13}\right| ^{2} & \left| c_{13}\right|
^{2} & \left| c_{11}\right| ^{2} & \left| c_{23}\right| ^{2} & \left|
c_{23}\right| ^{2} & \left| c_{21}\right| ^{2}\\
\left| c_{31}\right| ^{2} & \left| c_{32}\right| ^{2} & \left|
c_{33}\right| ^{2} & \left| c_{31}\right| ^{2} & \left| c_{32}\right|
^{2} & \left| c_{33}\right| ^{2} & \left| c_{11}\right| ^{2} & \left|
c_{12}\right| ^{2} & \left| c_{13}\right| ^{2}\\
\left| c_{32}\right| ^{2} & \left| c_{31}\right| ^{2} & \left|
c_{32}\right| ^{2} & \left| c_{32}\right| ^{2} & \left| c_{31}\right|
^{2} & \left| c_{32}\right| ^{2} & \left| c_{12}\right| ^{2} & \left|
c_{11}\right| ^{2} & \left| c_{12}\right| ^{2}\\
\left| c_{33}\right| ^{2} & \left| c_{33}\right| ^{2} & \left|
c_{31}\right| ^{2} & \left| c_{33}\right| ^{2} & \left| c_{33}\right|
^{2} & \left| c_{31}\right| ^{2} & \left| c_{13}\right| ^{2} & \left|
c_{13}\right| ^{2} & \left| c_{11}\right| ^{2}
\end{array}
\right) \nonumber\\
& \label{QMatrix1Mixed}
\end{align}
The matrix (\ref{QMatrixMixed}) reduces to its classical form
(\ref{Gen3StrategyMatrix}) by fixing $\left| c_{11}\right| ^{2}=1$.
In a symmetric game the exchange of strategies by Alice and Bob also exchanges
their respective payoffs. The quantum game corresponding to the matrix
(\ref{Gen3StrategyMatrix}), when played using the initial quantum state of Eq.
(\ref{RSPIniStat}), becomes symmetric when
\begin{equation}
\left| c_{ij}\right| ^{2}=\left| c_{ji}\right| ^{2}\text{ for }i\neq j
\label{SymCond}
\end{equation}
The two-player quantum game, with three pure strategies, gets a form similar
to a classical matrix game. The payoff matrix of the classical game, however,
is replaced with its quantum version (\ref{QMatrixMixed}). Also the matrix
(\ref{QMatrixMixed}) now involves the coefficients $c_{ij}$ of the initial
quantum state (\ref{RSPIniStat}).
To reduce the above mathematical formalism to two-player, two-strategy quantum
game lets fix $p_{1}=q_{1}=0$, that is, both players do not use the operator
$\hat{D}$ at all, and apply only the operators $\hat{C}$ and $\hat{I}$ on the
initial quantum state. Payoff to the player who plays the strategy vector
$\mathbf{p}$ (where $\mathbf{p}^{T}\mathbf{=}[1-p$ \ \ $p]$) against the
player who plays the strategy vector $\mathbf{q}$ (where $\mathbf{q}
^{T}\mathbf{=}[1-q$ \ \ $q]$) can again be written as $P(\mathbf{p,q)=p}
^{T}\mathbf{\omega q}$. Nevertheless, $\mathbf{\omega}$ is reduced to its
simpler form:
\begin{equation}
\mathbf{\omega}=\left(
\begin{array}
[c]{cc}
\omega_{11} & \omega_{13}\\
\omega_{31} & \omega_{33}
\end{array}
\right)
\end{equation}
where elements of the matrix are
\begin{equation}
\left(
\begin{array}
[c]{c}
\omega_{11}\\
\omega_{13}\\
\omega_{31}\\
\omega_{33}
\end{array}
\right) =\left(
\begin{array}
[c]{cccc}
\left| c_{11}\right| ^{2} & \left| c_{13}\right| ^{2} & \left|
c_{31}\right| ^{2} & \left| c_{33}\right| ^{2}\\
\left| c_{13}\right| ^{2} & \left| c_{11}\right| ^{2} & \left|
c_{33}\right| ^{2} & \left| c_{31}\right| ^{2}\\
\left| c_{31}\right| ^{2} & \left| c_{33}\right| ^{2} & \left|
c_{11}\right| ^{2} & \left| c_{13}\right| ^{2}\\
\left| c_{33}\right| ^{2} & \left| c_{31}\right| ^{2} & \left|
c_{13}\right| ^{2} & \left| c_{11}\right| ^{2}
\end{array}
\right) \left(
\begin{array}
[c]{c}
\alpha_{11}\\
\alpha_{13}\\
\alpha_{31}\\
\alpha_{33}
\end{array}
\right) \label{TermsMixed}
\end{equation}
It becomes a bi-matrix game played with the initial quantum state
(\ref{RSPIniStat}). The available pure strategies are $1$ and $3$ \emph{only}
and the terms with subscripts containing $2$ disappear. Take $x=1-p$ and
$y=1-q,$ so that $x$ and $y$ are probabilities with which players apply
identity operator on the initial state $\left| \psi_{ini}\right\rangle $. The
strategy vectors $\mathbf{p}$ and $\mathbf{q}$ can now be represented only by
the numbers $x$ and $y$, respectively. Payoff to a $x$-player against a $y
$-player is then obtained as
\begin{equation}
P(x\mathbf{,}y\mathbf{)=p}^{T}\mathbf{\omega q=}x\left\{ \omega_{11}
y+\omega_{13}(1-y)\right\} +(1-x)\left\{ \omega_{31}y+\omega_{33}
(1-y)\right\} .
\end{equation}
Suppose $(x^{\star},x^{\star})$ is a NE, i.e.
\begin{align}
& P(x^{\star},x^{\star})-P(x,x^{\star})\nonumber\\
& =(x^{\star}-x)\left\{ x^{\star}(\omega_{11}-\omega_{13}-\omega_{31}
+\omega_{33})+(\omega_{13}-\omega_{33})\right\} \geq0
\end{align}
for all $x\in\lbrack0,1]$. The mixed strategy $x^{\star}=x_{q}^{\star}
=(\omega_{33}-\omega_{13})/(\omega_{11}-\omega_{13}-\omega_{31}+\omega_{33})$
makes the payoff difference $P(x^{\star},x^{\star})-P(x,x^{\star})$
identically zero. The subscript $q$ is for `quantum'. Let $\bigtriangleup
x=x^{\star}-x$ then
\begin{equation}
P(x_{q}^{\star},x)-P(x,x)=-(\bigtriangleup x)^{2}\left\{ \omega_{11}
-\omega_{13}-\omega_{31}+\omega_{33}\right\} \label{ESS2}
\end{equation}
Now $x_{q}^{\star}$ is an ESS if $\left\{ P(x_{q}^{\star},x)-P(x,x)\right\}
>0$ for all $x\neq x_{q}^{\star}$, which leads to the requirement
$(\omega_{11}-\omega_{31}-\omega_{13}+\omega_{33})<0.$
The classical game corresponds when $\left| c_{11}\right| ^{2}=1$ and it
gives $\omega_{11}=\alpha_{11}$, $\omega_{13}=\alpha_{13},$ $\omega
_{31}=\alpha_{31}$, and $\omega_{33}=\alpha_{33}$, in accordance with the Eq.
(\ref{QMatrix1Mixed})$.$ In case $(\alpha_{11}-\alpha_{13}-\alpha_{31}
+\alpha_{33})>0$, the mixed NE of a classical game, i.e. $x^{\star}
=x_{c}^{\star}=(\alpha_{33}-\alpha_{13})/(\alpha_{11}-\alpha_{13}-\alpha
_{31}+\alpha_{33})$, is not an ESS. Here the subscript $c$ is for `classical'.
Since we look for a situation where evolutionary stability of a symmetric NE
changes --- while the corresponding NE remains intact --- in a switch-over of
the game from its classical to quantum form, lets take
\begin{equation}
x_{c}^{\star}=x_{q}^{\star}=\frac{\alpha_{33}-\alpha_{13}}{\alpha_{11}
-\alpha_{31}-\alpha_{13}+\alpha_{33}}=\frac{\omega_{33}-\omega_{13}}
{\omega_{11}-\omega_{31}-\omega_{13}+\omega_{33}}. \label{MixedNE1}
\end{equation}
saying that the classical NE $x_{c}^{\star}$ is also a NE in quantum form of
the game. Now from the matrix in the Eq. (\ref{TermsMixed})
\begin{align}
& (\omega_{11}-\omega_{31}-\omega_{13}+\omega_{33})\nonumber\\
& =(\alpha_{11}-\alpha_{13}-\alpha_{31}+\alpha_{33})(\left| c_{11}\right|
^{2}-\left| c_{13}\right| ^{2}-\left| c_{31}\right| ^{2}+\left|
c_{33}\right| ^{2}) \label{MixedNE2}
\end{align}
and
\begin{align}
\omega_{33}-\omega_{13} & =\left| c_{11}\right| ^{2}(\alpha_{33}
-\alpha_{13})+\left| c_{13}\right| ^{2}(\alpha_{31}-\alpha_{11})+\nonumber\\
& \left| c_{31}\right| ^{2}(\alpha_{13}-\alpha_{33})+\left| c_{33}\right|
^{2}(\alpha_{11}-\alpha_{31}) \label{MixedNE3}
\end{align}
A substitution from Eqs. (\ref{MixedNE2},\ref{MixedNE3}) into the Eq.
(\ref{MixedNE1}) gives $\alpha_{33}-\alpha_{13}=\alpha_{11}-\alpha_{31}$, and
this leads to $x_{c}^{\star}=x_{q}^{\star}=1/2$. Therefore the mixed strategy
$x^{\star}=1/2$ remains a NE in both the classical and quantum form of the
game. Consider this mixed NE for a classical game with $(\alpha_{11}
-\alpha_{13}-\alpha_{31}+\alpha_{33})>0$, a condition that assures that it is
not an ESS. The Eq. (\ref{MixedNE2}) shows an interesting possibility that one
can have $(\omega_{11}-\omega_{31}-\omega_{13}+\omega_{33})<0$ if
\begin{equation}
(\left| c_{11}\right| ^{2}+\left| c_{33}\right| ^{2})<(\left|
c_{13}\right| ^{2}+\left| c_{31}\right| ^{2}) \label{IniCondMixed}
\end{equation}
In other words, the evolutionary stability of a mixed strategy, which is a NE
in both classical and quantum versions of the game, changes when the game
switches-over between its two forms. To have a symmetric game also in its
quantum form we need $\left| c_{13}\right| ^{2}=\left| c_{31}\right| ^{2}
$, which reduces the inequality (\ref{IniCondMixed}) to $\frac{1}{2}(\left|
c_{11}\right| ^{2}+\left| c_{33}\right| ^{2})<\left| c_{13}\right| ^{2}$.
Hence, a quantum version of a symmetric bi-matrix classical game with the matrix:
\begin{equation}
\left(
\begin{array}
[c]{cc}
(\alpha_{11},\alpha_{11}) & (\alpha_{13},\alpha_{31})\\
(\alpha_{31},\alpha_{13}) & (\alpha_{33},\alpha_{33})
\end{array}
\right)
\end{equation}
can be played if both the players have access to two unitary and Hermitian
operators and the game starts with a two-qubit quantum state of the form:
\begin{equation}
\left| \psi_{ini}\right\rangle =\underset{i,j=1,3}{\sum}c_{ij}\left|
ij\right\rangle \label{SymmInit}
\end{equation}
where $\underset{i,j=1,3}{\sum}\left| c_{ij}\right| ^{2}=1$. In case
$\alpha_{33}-\alpha_{13}=\alpha_{11}-\alpha_{31}$ the mixed strategy
$x^{\star}=1/2$ is not an ESS in the classical game if $(\alpha_{33}
-\alpha_{13})>0$. Nevertheless, the strategy $x^{\star}=1/2$ becomes an ESS
when $\left| c_{11}\right| ^{2}+\left| c_{33}\right| ^{2}<\left|
c_{13}\right| ^{2}+\left| c_{31}\right| ^{2}$. In case $(\alpha_{33}
-\alpha_{13})<0$ the strategy $x^{\star}=1/2$ is an ESS classically but does
not remain so if again $\left| c_{11}\right| ^{2}+\left| c_{33}\right|
^{2}<\left| c_{13}\right| ^{2}+\left| c_{31}\right| ^{2}$. Now suppose
$\left| c_{13}\right| ^{2}=\left| c_{31}\right| ^{2}=0$, the Eq.
(\ref{MixedNE2}) reduces to
\begin{equation}
(\omega_{11}-\omega_{13}-\omega_{31}+\omega_{33})=(\alpha_{11}-\alpha
_{13}-\alpha_{31}+\alpha_{33}) \label{StabilityMixedStrategyCondition}
\end{equation}
It is observed from the equation (\ref{StabilityMixedStrategyCondition}) that
if a quantum game is played by the following simple form of the initial
quantum state:
\begin{equation}
\left| \psi_{ini}\right\rangle =c_{11}\left| 11\right\rangle +c_{33}\left|
33\right\rangle \label{SimpInit}
\end{equation}
then it is not possible to influence the evolutionary stability of a mixed NE.
\subsection{Discussion}
Mixed ESSs appear in many games of interest that are played in the natural
world. The examples of the RSP and Hawks and Doves games are well known from
evolutionary game theory. The Bishop-Cannings theorem of evolutionary game
theory does not permit pure ESSs when a mixed ESS exists in a population
engaged in a bi-matrix game. The possibility of changing evolutionary
stability of a pure symmetric NE has been considered earlier with normalized
states of the form $\left| \psi_{ini}\right\rangle =c_{11}\left|
1,1\right\rangle +c_{22}\left| 2,2\right\rangle $. Using such initial states,
however, \emph{cannot} change evolutionary stability of a mixed strategy. In
this section, following an approach developed for the quantum RSP game, we
play a bi-matrix game with a general two-qubit pure initial quantum state.
Such state allows changing evolutionary stability of a mixed NE. For a
bi-matrix game a symmetric mixed NE is found that remains intact in both the
classical and quantum versions of the game. For this mixed NE conditions are
then found allowing the change of evolutionary stability with a switch-over of
the game, between its two forms, one classical and the other quantum.
\section{Equilibria of replicator dynamics in quantum games}
\subsection{Introduction}
Maynard Smith and Price \cite{Smith Price} introduced the idea of an ESS
essentially as a static concept. Nothing in the definition of an ESS
guarantees that the dynamics of evolution in small mutational steps will
necessarily converge the process of evolution to an ESS. In fact directional
evolution may also become responsible for the establishment of strategies that
are not evolutionary stable \cite{Cressman}.
What are the advantages involved in a dynamic approach towards theory of ESSs?
A stated reason \cite{CressmanSchlag} is that dynamic approach introduces
structural stability into game theory. Historically Liapunov provided a
classic definition of stability of equilibria for general dynamical systems.
This definition can also be adapted for the stability of a NE. A pair of
strategies $(p^{\star},q^{\star})$ is \emph{Liapunov stable} when for every
trajectory starting somewhere in a small neighborhood of radius $\epsilon>0$
around a point representing the pair $(p^{\star},q^{\star})$ another small
neighborhood of radius $\delta>0$ can be defined such that the trajectory
stays in it. When every trajectory starting in a small neighborhood of radius
$\sigma>0$ around the point $(p^{\star},q^{\star})$ converges to $(p^{\star
},q^{\star})$ the strategy pair $(p^{\star},q^{\star})$ becomes an
\emph{attractor}. Trajectories are defined by the dynamics underlying the game.
Taylor and Jonker \cite{TaylorJonker} introduced a dynamics into evolutionary
games with the hypothesis that the growth rate of those playing each strategy
is proportional to the advantage of that strategy. This hypothesis is now
understood as one of many different forms of replicator dynamics\textit{\ }
\cite{Cressman,Bomze}. In simple words assume that $p_{i}$ is the frequency
(i.e. relative proportion) of the individuals using strategy $i$ and
$\mathbf{p}$, where $\mathbf{p}^{T}=[p_{1},p_{2}...p_{i}...p_{n}]$ and $T$ is
the transpose, is a vector whose components are the frequencies with
$\overset{n}{\underset{i=1}{\Sigma}}p_{i}=1$. Let $P_{i}(\mathbf{p})$ be the
average payoff for using $i$ when the population is in the state $\mathbf{p}$.
Let $\bar{P}=\Sigma p_{j}P_{j}$ be the \emph{average success} in the
population. The replicator equation is, then, written as \cite{Sigmund}
\begin{equation}
\dot{p}_{i}=p_{i}(P_{i}(\mathbf{p})-\bar{P}) \label{Replicator Eq}
\end{equation}
where the dot is derivative with respect to time. Let the payoff matrix be
$A=(a_{ij})$ with $a_{ij}$ being the average payoff for strategy $i$ when the
other player uses $j$. The average payoff for the strategy $i$ in the
population (with the assumption of random encounters of the individuals) is
$(A\mathbf{p)}_{i}=a_{i1}p_{1}+...+a_{in}p_{n}$ and the Eq. (\ref{Replicator
Eq}) becomes
\begin{equation}
\dot{p}_{i}=p_{i}((A\mathbf{p)}_{i}-\mathbf{p}^{T}A\mathbf{p})
\label{Replicator Eq1}
\end{equation}
The population state is then given as a point in $n$ simplex $\bigtriangleup$
\cite{Zeeman}. The hypothesis of Taylor and Jonker \cite{TaylorJonker} gives a
flow on $\bigtriangleup$ whose flow lines represent the evolution of the
population. In evolutionary game theory it is agreed \cite{Weibull} that every
ESS is an attractor of the flow defined on $\bigtriangleup$ by the replicator
equation (\ref{Replicator Eq}), however, the converse does not hold: an
attractor is not necessarily an ESS.
Our motivation is to find how equilibria of replicator dynamics are affected
when a matrix game played by a population is quantized. It should, of course,
be sensitive to quantization procedure selected for the matrix game. We find
the effects when the matrix game is quantized via Marinatto and Weber's scheme.
\subsection{Equilibria and attractors of replicator dynamics}
Early studies about the attractors of replicator dynamic by Schuster, Sigmund
and Wolff \cite{Eigen,Schuster} reported the dynamics of enzymatic actions of
chemicals in a mixture when their relative proportions could be changed. For
example, in the case of a mixture of three chemicals added in a correct order,
such that corresponding initial conditions are in the basin of an interior
attractor, it becomes a stable cooperative mixture of all three chemicals. But
if they are added in a wrong order the initial conditions then lie in another
basin and only one of the chemicals survives with others two excluded. Eigen
and Schuster \cite{Eigen,Schuster,Hofbauer} also studied resulting dynamics in
the evolution of macromolecules before the advent of life.
Schuster and Sigmund \cite{Schuster1} applied the dynamic to animal behavior
in BoS and described the evolution of strategies by treating it as a dynamical
system. They wrote replicator Eqs. (\ref{Replicator Eq1}) for the payoff matrix:
\begin{equation}
\begin{array}
[c]{c}
\text{Male's strategy}
\end{array}
\begin{array}
[c]{c}
X_{1}\\
X_{2}
\end{array}
\overset{
\begin{array}
[c]{cc}
\text{Female's} & \text{strategy}\\
Y_{1} & Y_{2}
\end{array}
}{\left(
\begin{array}
[c]{cc}
(a_{11},b_{11}) & (a_{12},b_{21})\\
(a_{21},b_{12}) & (a_{22},b_{22})
\end{array}
\right) } \label{GenMatrixReplicator}
\end{equation}
where a male can play pure strategies $X_{1}$, $X_{2}$ and a female can play
pure strategies $Y_{1}$, $Y_{2}$ respectively. Let in a population engaged in
this game the frequencies of $X_{1}$ and $X_{2}$ are $p_{1}$ and $p_{2}$
respectively. Similarly the frequencies of $Y_{1}$ and $Y_{2}$ are $q_{1}$ and
$q_{2}$ respectively. Obviously
\begin{align}
p_{1}+p_{2} & =q_{1}+q_{2}=1\nonumber\\
\text{where }p_{i} & \geq0,\text{ }q_{i}\geq0\text{, for }i=1,2
\label{CondReplicator}
\end{align}
the replicator equations (\ref{Replicator Eq1}) for the matrix
(\ref{GenMatrixReplicator}) with conditions (\ref{CondReplicator}) are, then,
written as
\begin{align}
\dot{p} & =p(1-p)\left\{ q(a_{11}-a_{12}-a_{21}+a_{22})+(a_{12}
-a_{22})\right\} \nonumber\\
\dot{q} & =q(1-q)\left\{ p(b_{11}-b_{12}-b_{21}+b_{22})+(b_{12}
-b_{22})\right\} \label{ReplicatorEqs}
\end{align}
where $p_{1}=p$ and $q_{1}=q$. These are Lotka-Volterra type equations
describing the evolution of two populations identified as predator and prey
\cite{Hirsch}. Schuster and Sigmund \cite{Schuster1} simplified the problem by taking
\begin{align}
a_{11} & =b_{11}=a_{22}=b_{22}=0\nonumber\\
a_{12} & =a\text{ \ \ }a_{21}=b\text{ \ \ and }\nonumber\\
b_{12} & =c\text{ \ \ }b_{21}=d \label{SimplyReplicator}
\end{align}
which does not restrict generality of the problem and the replicator Eqs.
(\ref{ReplicatorEqs}) remain similar. Payoffs to the male $P_{M}(p,q)$ and to
the female $P_{F}(p,q)$, when the male plays $X_{1}$ with probability $p$ (he
then plays $X_{2}$ with the probability $(1-p)$) and the female plays $Y_{1}$
with the probability $q$ (she then plays $Y_{2}$ with the probability $(1-q)$)
are written as \cite{MarkBroom3}
\begin{align}
P_{M}(p,q) & =\mathbf{p}^{T}\mathbf{Mq}\nonumber\\
P_{F}(p,q) & =\mathbf{q}^{T}\mathbf{Fp} \label{PayoffsReplicator}
\end{align}
where
\begin{equation}
\mathbf{M=}\left(
\begin{array}
[c]{cc}
a_{11} & a_{12}\\
a_{21} & a_{22}
\end{array}
\right) ,\text{ \ \ and \ \ }\mathbf{F=}\left(
\begin{array}
[c]{cc}
b_{11} & b_{12}\\
b_{21} & b_{22}
\end{array}
\right)
\end{equation}
also
\begin{equation}
\mathbf{p=}\left(
\begin{array}
[c]{c}
p\\
1-p
\end{array}
\right) ,\text{ \ \ and \ \ }\mathbf{q=}\left(
\begin{array}
[c]{c}
q\\
1-q
\end{array}
\right)
\end{equation}
and $T$ is for transpose.
Now a quantum form of the matrix game (\ref{GenMatrixReplicator}) can be
played using Marinatto and Weber's scheme \cite{Marinatto1}. The players have
at their disposal an initial quantum state:
\begin{equation}
\left| \psi_{ini}\right\rangle =\underset{i,j=1,2}{\sum}c_{ij}\left|
ij\right\rangle
\end{equation}
with
\begin{equation}
\underset{i,j=1,2}{\sum}\left| c_{ij}\right| ^{2}=1 \label{NormRp}
\end{equation}
In quantum version the male and female players apply the identity $\hat{I}$ on
$\left| \psi_{ini}\right\rangle $ with probabilities $p$ and $q$
respectively. Also they apply $\hat{\sigma}_{x}$ with probabilities $(1-p)$
and $(1-q)$, respectively. Payoffs to the players are written in a similar
form, as in the Eq. (\ref{PayoffsReplicator}):
\begin{align}
P_{M}(p,q) & =\mathbf{p}^{T}\mathbf{\omega q}\nonumber\\
P_{F}(p,q) & =\mathbf{q}^{T}\mathbf{\chi p}
\end{align}
$\mathbf{\omega}$ and $\mathbf{\chi}$ are quantum forms of the payoff matrices
$\mathbf{M}$ and $\mathbf{F}$ respectively i.e.
\begin{equation}
\mathbf{\omega=}\left(
\begin{array}
[c]{cc}
\omega_{11} & \omega_{12}\\
\omega_{21} & \omega_{22}
\end{array}
\right) \text{ \ \ and\ \ \ }\mathbf{\chi=}\left(
\begin{array}
[c]{cc}
\chi_{11} & \chi_{12}\\
\chi_{21} & \chi_{22}
\end{array}
\right)
\end{equation}
where
\begin{align}
\omega_{11} & =a_{11}\left| c_{11}\right| ^{2}+a_{12}\left|
c_{12}\right| ^{2}+a_{21}\left| c_{21}\right| ^{2}+a_{22}\left|
c_{22}\right| ^{2}\nonumber\\
\omega_{12} & =a_{11}\left| c_{12}\right| ^{2}+a_{12}\left|
c_{11}\right| ^{2}+a_{21}\left| c_{22}\right| ^{2}+a_{22}\left|
c_{21}\right| ^{2}\nonumber\\
\omega_{21} & =a_{11}\left| c_{21}\right| ^{2}+a_{12}\left|
c_{22}\right| ^{2}+a_{21}\left| c_{11}\right| ^{2}+a_{22}\left|
c_{12}\right| ^{2}\nonumber\\
\omega_{22} & =a_{11}\left| c_{22}\right| ^{2}+a_{12}\left|
c_{21}\right| ^{2}+a_{21}\left| c_{12}\right| ^{2}+a_{22}\left|
c_{11}\right| ^{2}
\end{align}
similarly
\begin{align}
\chi_{11} & =b_{11}\left| c_{11}\right| ^{2}+b_{12}\left| c_{12}\right|
^{2}+b_{21}\left| c_{21}\right| ^{2}+b_{22}\left| c_{22}\right|
^{2}\nonumber\\
\chi_{12} & =b_{11}\left| c_{12}\right| ^{2}+b_{12}\left| c_{11}\right|
^{2}+b_{21}\left| c_{22}\right| ^{2}+b_{22}\left| c_{21}\right|
^{2}\nonumber\\
\chi_{21} & =b_{11}\left| c_{21}\right| ^{2}+b_{12}\left| c_{22}\right|
^{2}+b_{21}\left| c_{11}\right| ^{2}+b_{22}\left| c_{12}\right|
^{2}\nonumber\\
\chi_{22} & =b_{11}\left| c_{22}\right| ^{2}+b_{12}\left| c_{21}\right|
^{2}+b_{21}\left| c_{12}\right| ^{2}+b_{22}\left| c_{11}\right| ^{2}
\label{termsF}
\end{align}
For the initial state $\left| \psi_{ini}\right\rangle =\left|
11\right\rangle $ the matrices $\mathbf{\omega}$ and $\mathbf{\chi}$ are same
as $\mathbf{M}$ and $\mathbf{F}$ respectively. The classical game is,
therefore, embedded in the quantum game. Simplified matrices $\mathbf{\omega}$
and $\mathbf{\chi}$ can be obtained by the assumption of Eq.
(\ref{SimplyReplicator}), that is
\begin{align}
\omega_{11} & =a\left| c_{12}\right| ^{2}+b\left| c_{21}\right|
^{2}\text{, \ \ }\omega_{12}=a\left| c_{11}\right| ^{2}+b\left|
c_{22}\right| ^{2}\nonumber\\
\omega_{21} & =a\left| c_{22}\right| ^{2}+b\left| c_{11}\right|
^{2}\text{, \ \ }\omega_{22}=a\left| c_{21}\right| ^{2}+b\left|
c_{12}\right| ^{2}\nonumber\\
\chi_{11} & =c\left| c_{12}\right| ^{2}+d\left| c_{21}\right|
^{2}\text{, \ \ }\chi_{12}=c\left| c_{11}\right| ^{2}+d\left|
c_{22}\right| ^{2}\nonumber\\
\chi_{21} & =c\left| c_{22}\right| ^{2}+d\left| c_{11}\right|
^{2}\text{, \ \ }\chi_{22}=c\left| c_{21}\right| ^{2}+d\left|
c_{12}\right| ^{2}
\end{align}
The replicator Eqs. (\ref{ReplicatorEqs}) can now be written in the following
`quantum' form:
\begin{align}
\dot{x} & =x(1-x)[aK_{1}+bK_{2}-(a+b)(K_{1}+K_{2})y]\nonumber\\
\dot{y} & =y(1-y)[cK_{1}+dK_{2}-(c+d)(K_{1}+K_{2})x] \label{QRpEq}
\end{align}
where $K_{1}=\left| c_{11}\right| ^{2}-\left| c_{21}\right| ^{2}$ and
$K_{2}=\left| c_{22}\right| ^{2}-\left| c_{12}\right| ^{2}$. These
equations reduce to Eqs. (\ref{ReplicatorEqs}) for $\left| \psi
_{ini}\right\rangle =\left| 11\right\rangle $ i.e. $\left| c_{11}\right|
^{2}=1$. Similar to the classical version \cite{Schuster1} the dynamics
(\ref{QRpEq}) has five rest or equilibrium points $x=0,$ $y=0$;$\qquad x=0,$
$y=1$;$\qquad x=1,$ $y=0$;$\qquad x=1,$ $y=1$; and an interior equilibrium point:
\begin{equation}
x=\frac{cK_{1}+dK_{2}}{(c+d)(K_{1}+K_{2})}\text{, \ \ }y=\frac{aK_{1}+bK_{2}
}{(a+b)(K_{1}+K_{2})} \label{IntEqRp}
\end{equation}
This equilibrium point is the same as in the classical game \cite{Schuster1}
for $\left| \psi_{ini}\right\rangle =\left| 11\right\rangle $ i.e.
\begin{equation}
x=\frac{c}{c+d}\text{, \ \ }y=\frac{a}{a+b}
\end{equation}
We use the method of linear approximation \cite{Hirsch} at equilibrium points
to find the general character of phase diagram of the system (\ref{QRpEq}).
Write the system (\ref{QRpEq}) as
\begin{equation}
\dot{x}=\mathbf{X(}x,y\mathbf{)}\text{, \ \ }\dot{y}=\mathbf{Y}(x,y)
\end{equation}
The matrix for linearization \cite{Hirsch} is
\begin{equation}
\left[
\begin{array}
[c]{cc}
\mathbf{X}_{x} & \mathbf{X}_{y}\\
\mathbf{Y}_{x} & \mathbf{Y}_{y}
\end{array}
\right] \label{linztn}
\end{equation}
where, for example, $\mathbf{X}_{x}$ denotes $\frac{\partial\mathbf{X}
}{\partial x}$. The matrix (\ref{linztn}) is evaluated at each equilibrium
point in turn. Write now these terms as
\begin{align}
\mathbf{X}_{x} & =(1-2x)\left\{ aK_{1}+bK_{2}-(a+b)(K_{1}+K_{2})y\right\}
\nonumber\\
\mathbf{X}_{y} & =-x(1-x)(a+b)(K_{1}+K_{2})\nonumber\\
\mathbf{Y}_{x} & =-y(1-y)(c+d)(K_{1}+K_{2})\nonumber\\
\mathbf{Y}_{y} & =(1-2y)\left\{ cK_{1}+dK_{2}-(c+d)(K_{1}+K_{2})x\right\}
\label{TermML}
\end{align}
and the characteristic equation \cite{Hirsch} at an equilibrium point is
obtained from
\begin{equation}
\left|
\begin{array}
[c]{cc}
(\mathbf{X}_{x}-\lambda) & \mathbf{X}_{y}\\
\mathbf{Y}_{x} & (\mathbf{Y}_{y}-\lambda)
\end{array}
\right| =0 \label{CharEqRp}
\end{equation}
The patterns of phase paths around equilibrium points classify the points into
a few principal cases. Suppose $\lambda_{1},\lambda_{2}$ are roots of the
characteristic Eq. (\ref{CharEqRp}). A few cases are as follows:
\begin{enumerate}
\item $\lambda_{1},\lambda_{2}$ are real, different, non-zero, and of same
sign. If $\lambda_{1},\lambda_{2}>0$ then the equilibrium point is an
\emph{unstable node} or a repeller. If $\lambda_{1},\lambda_{2}<0$ the node is
stable or an \emph{attractor}.
\item $\lambda_{1},\lambda_{2}$ are real, different, non-zero, and of opposite
sign. The equilibrium point is a \emph{saddle point}.
\item $\lambda_{1},\lambda_{2}=\alpha\pm i\beta$, and $\beta\neq0$. The
equilibrium is a \emph{stable spiral} (attractor) if $\alpha<0$, an
\emph{unstable spiral} (repeller) if $\alpha>0$, a \emph{center} if $\alpha=0$.
\end{enumerate}
Consider an equilibrium or rest point $x=1,$ $y=0$, written simply as $(1,0)$.
At this point the characteristic Eq. (\ref{CharEqRp}) has the roots:
\begin{equation}
\lambda_{1}=-aK_{1}-bK_{2}\text{, \ \ }\lambda_{2}=-cK_{2}-dK_{1}
\label{RootsRp}
\end{equation}
For the classical game, i.e. $\left| \psi_{ini}\right\rangle =\left|
11\right\rangle $, these roots are $\lambda_{1}=-a$, $\lambda_{2}=-d$.
Therefore in case $a,d>0$ the equilibrium point $(1,0)$ is an attractor in the
classical game.
Consider the interior equilibrium point $(x,y)$ of Eq. (\ref{IntEqRp}). The
terms of the matrix of linearization of Eq. (\ref{TermML}) are:
\begin{align}
\mathbf{X}_{x} & =0\text{, \ \ }\mathbf{Y}_{y}=0\nonumber\\
\mathbf{X}_{y} & =\frac{-(cK_{1}+dK_{2})(cK_{2}+dK_{1})(a+b)}{(c+d)^{2}
(K_{1}+K_{2})}\nonumber\\
\mathbf{Y}_{x} & =\frac{-(aK_{1}+bK_{2})(aK_{2}+bK_{1})(c+d)}{(a+b)^{2}
(K_{1}+K_{2})}
\end{align}
the roots of the characteristic Eq. (\ref{CharEqRp}) are numbers $\pm\lambda$ where
\begin{equation}
\lambda=\sqrt{\frac{(aK_{1}+bK_{2})(aK_{2}+bK_{1})(cK_{1}+dK_{2}
)(cK_{2}+dK_{1})}{(a+b)(c+d)(K_{1}+K_{2})^{2}}}
\end{equation}
the term in square root can be a positive or negative real number. Therefore:
\begin{itemize}
\item A saddle (center) in classical game can be a center (saddle) in certain
quantum form of the game.
\item A saddle or a center in a classical (quantum) game can not be an
attractor or a repeller in quantum (classical) form of the game.
\end{itemize}
\chapter{Relevance of evolutionary stability in quantum games}
Evolutionary game theory considers attractors of a dynamics and ESSs with
reference to population models. Extending these ideas to a quantum setting
requires an assumption of population of individuals, or entities, with access
to quantum states and quantum mechanical operators. What is the possible
relevance of such an assumption in the real world? To answer it we observe
that the concept of evolutionary stability is based on the following
assumptions, that also define its population setting:
\begin{itemize}
\item There are random and pair-wise interactions between the participating
players forming a population. These interactions can be re-expressed in
game-theoretic language by constructing symmetric bi-matrices.
\item A step-wise selection mechanism that ensures that a successful strategy
has better chance to spread itself in the population at the expense of other strategies.
\end{itemize}
While bringing the ESS concept to quantum games, we retain the population
setting of evolutionary game theory as well as the step-wise selection
mechanism. However, the games played among the players, during pair-wise
interactions, are replaced with their quantum counterparts. Questions now
naturally arise how such a switch-over to a quantum game changes the
evolutionary outcome.
Following are the some suggestions where a relevance of quantization of games
may affect, and even decide, an evolutionary outcome.
\section{Quantum mechanics deciding an evolutionary outcome}
Evolutionary game theory was developed to provide game-theoretic models of
animal conflicts that occur in our macro-world. However, recent work in
biology \cite{Turner} suggests that nature also plays classical games at
micro-level. Bacterial infections by viruses are classical game-like
situations where nature prefers dominant strategies. The game-theoretical
explanation of stable states in a population of interacting individuals can be
considered a model of rationality which is physically grounded in natural selection.
A motivation to study evolutionary stability in quantum games exists because
the population setting of evolutionary game theory can also be introduced to
quantum games. It can be done on the \emph{same} ground as it is done in the
classical games. The notion of a Nash equilibrium, that became the topic of
pioneering work on quantum games, was itself motivated by a population setting.
Consideration of evolutionary stability in quantum games shows how
quantization of games, played in a population, can lead to new stable states
of the population. It shows that the presence of quantum interactions, in a
population undergoing evolution, can alter its stable states resulting from
the evolutionary dynamics. When quantum effects decide the evolutionary
outcomes, the role for quantum mechanics clearly increases, from just keeping
atoms together, to deciding the outcome of an evolutionary dynamics.
\section{Development of complexity and self-organization}
This new role for quantum mechanics can be to define and maintain complexity
emerging from quantum interactions among a collection of molecules. Eigen,
Schuster, Sigmund and Wolf \cite{Eigen,Schuster} consider an example of a
mixture in which an equilibrium is achieved from competing chemical reactions.
Such an equilibrium can also be an outcome of quantum interactions occurring
at molecular level. When quantum nature of molecular interactions can decide
an equilibrium state, there is a clear possibility for the quantum mechanical
role in the models of self-organization in matter. These considerations seem
quite relevant to the evolution of macromolecules before the advent of life.
The possibility that stability of solutions (or equilibria) can be affected by
quantum interactions provides a new approach towards understanding of rise of
complexity in groups of quantum-interacting entities.
Physicists have expressed opinions \cite{Frohlich} about the possibility of
quantum mechanics `fast tracking' a chemical soup to states that are
biological and complex, and the debate continues. We suggest that quantum game
theory also have contributions to make towards the attempts to understand
quantum mechanical role in life, especially evolution and development of self
organization and complexity in molecular systems, and possibly the origin of consciousness.
Considering development of quantum mechanical models of life, in a recent
paper Flitney and Abbott\ \cite{FlitneyAbbott3}\ studied a version of John
Conway's game of Life \cite{Game of Life}\ where the normal binary values of
the cells are replaced by oscillators which can represent a superposition of
states. They showed that the original game of Life is reproduced in the
classical limit, but in general additional properties not seen in the original
game are present that display some of the effects of a quantum mechanical Life.
\section{Genetic code evolution}
Genetic code is the relationship between sequence of bases in DNA and the
sequence of amino acids in proteins. Suggestions have been made earlier about
quantum mechanical role in the genetic code. For example, supersymmetry in
particle physics, giving a unified description of fermions and bosons, have
been suggested \cite{Bashford} to provide an explanation of coding assignments
in genetic code. Recent work \cite{Knight} about evolvability of the genetic
code suggests that the code, like all other features of organisms, was shaped
by natural selection. The question about the process and evolutionary
mechanism by which the genetic code was optimized is still unanswered. Two
suggested possibilities are:
\begin{itemize}
\item A large number of codes existed out of which the adaptive one was selected.
\item Adaptive and error-minimizing constraints gave rise to an adaptive code
via code expansion and simplification.
\end{itemize}
The second possibility of code expansion from earlier simpler forms is now
thought to be supported by much empirical and genetic evidence \cite{Knight1}
and results suggest that the present genetic code was strongly influenced by
natural selection for error minimization.
Patel \cite{Patel} suggested quantum dynamics played a role in the DNA
replication and in the optimization criteria involved in genetic information
processing. He considers the criteria as a task similar to an unsorted
assembly operation, with possible connection to the Grover's database search
algorithm \cite{Grover}, given different optimal solutions result from the
classical and quantum dynamics.
The assumption in this approach is that an adaptive code was selected out of a
large numbers that existed earlier. The suggestion of natural selection being
the process, for error minimization in the mechanism of adaptive code
evolution, puts forward an evolutionary approach for this optimization
problem. We believe that, in the evolution and expansion of the code from its
earlier simpler forms, quantum dynamics has played a role. The mechanism
leading to this optimization will be, however, different. Our result that
stable outcomes, of an evolutionary process based on natural selection, may
also depend on the quantum nature of interactions clearly implies the
possibility that such interactions may decide the optimal outcome of evolution.
We believe that the code optimization is a problem having close similarities
to the problem of evolutionary stability. And this optimization was probably
achieved by interactions that were quantum mechanical in nature.
\section{Quantum evolutionary algorithms}
A polynomial time algorithm that can solve an NP problem is not known yet. A
viable alternative approach, shown to find acceptable solutions within a
reasonable time period, is the evolutionary search \cite{Back}. Iteration of
selection based on competition, random variation or mutation, and exploration
of the fitness landscape of possible solutions, are the basic ingredients of
many distinct paradigms of evolutionary computing \cite{Back1}. On the other
hand superposition of all possible solution states, unitary operators
exploiting interference to enhance the amplitude of the desired states, and
final measurement extracting the solution are the components of quantum
computing. These two approaches in computing are believed to represent
different philosophies \cite{Greenwood}.
Finding ESSs can also be formulated as an evolutionary algorithm. The view
that quantum mechanics has a role in the theory of ESSs suggests that the two
philosophies -- considered different so far -- may have common grounds uniting
them. It also hints the possibility of evolutionary algorithms that utilize,
or even exploit, quantum effects. In such an evolutionary algorithm, we may
have, for example, fitness functions depending on the amount of entanglement
present. The natural question to ask is then how the population will evolve
towards an equilibrium state in relation to the amount of entanglement.
\section{Evolutionary quantum optimization and computation}
The perspective that matrix game theory provides, on what should be an outcome
of evolution, has been studied in this thesis. Another perspective is provided
by optimization models \cite{Meszena}. In evolutionary matrix games a
frequency-dependent selection takes place and all alternative strategies
become equally fit when an ESS establishes itself. On the other hand, in
optimization models the selection is frequency-independent and evolution is
imagined as a hill-climbing process. Optimal solution is obtained when fitness
is maximized. Evolutionary optimization is the basis of evolutionary and
genetic algorithms and is generally considered to be a different approach from
ESSs in matrix games. These are not, however, in direct contradiction and give
different outlooks on evolutionary process. We suggest that evolutionary
optimization is another area where a role for quantum mechanics exists and
quantum game theory provides hints to find it.
It seems appropriate to mention here the evolutionary quantum computation
(EQC) described in the Ref \cite{Goertzel}. In EQC an ensemble of quantum
subsystems is considered changing continually such a way as to optimize some
measure of emergent patterns between the system and its environment. It seems
reasonable that this optimization is related to an equilibrium or some of its
properties. When quantum interactions decide the equilibria and their
stability properties, it implies that the optimization itself depends on it.
Brain also has been proposed \cite{Goertzel} as an evolutionary quantum computer.
\chapter{Cooperation in quantum games}
\section{Introduction}
In contrast to non-cooperative games the players in cooperative games are not
able to form binding agreements even if they may communicate. The distinctive
feature of cooperative games is a strong incentive to work together to receive
the largest total payoff. These games allow players to form coalitions,
binding agreements, pay compensations, make side payments etc. Von Neumann and
Morgenstern \cite{Neumann} in their pioneering work on the theory of games
offered models of coalition formation where the strategy of each player
consists of choosing the coalition s/he wishes to join. In coalition games,
that are part of cooperative game theory, the possibilities for the players
are described by the available resources of different groups (coalitions) of
players. Joining a group or remaining outside is part of strategy of a player
affecting his/her payoff.
Recent work in quantum games arises a natural and interesting question: what
is the possible quantum mechanical role in cooperative games that are
considered an integral part of the classical game theory? In our view it may
be quite interesting, and fruitful as well, to investigate coalitions in
quantum versions of cooperative games. Our present motivation is to
investigate what might happen to the advantage of forming a coalition in a
quantum game compared to its classical analogue. We rely on the concepts and
ideas of von Neumann's cooperative game theory \cite{Neumann} and consider a
three-player coalition game in a quantum form. We then compare it to the
classical version of the game and see how the advantage of forming a coalition
can be affected.
In classical analysis of coalition games the notion of a strategy disappears;
the main features are those of a coalition and the value or worth of the
coalition. The underlying assumption is that each coalition can guarantee its
members a certain amount called the `\emph{value of a coalition}'\textit{.}
``The value of coalition measures the worth the coalition possesses and is
characterized as the payoff which the coalition can assure for itself by
selecting an appropriate strategy, whereas the `odd man' can prevent the
coalition from getting more than this amount''\textit{\ }\cite{Burger}. Using
this definition we study cooperative games in quantum settings to see how
advantages of making coalitions can be influenced in the new setting.
Within the framework of playing a quantum game given by Marinatto and Weber,
we find a quantum form of a symmetric cooperative game played by three
players. In classical form of this game any two players, out of three, get an
advantage when they successfully form a coalition and play the same strategy.
We investigate how the advantage for forming a coalition are affected when the
game switches its form from classical to quantum.
\section{A three-player symmetric cooperative game}
\subsection{Classical form}
A classical three-person normal form game \cite{Burger} is given by:
\begin{itemize}
\item Three non-empty sets $\Sigma_{A}$, $\Sigma_{B}$, and $\Sigma_{C}$.
These are the strategy sets of the players $A$, $B$, and $C$.
\item Three real valued functions $P_{A}$, $P_{B}$, and $P_{C}$ defined on
$\Sigma_{A}\times\Sigma_{B}\times\Sigma_{C}$.
\end{itemize}
The \emph{product space} $\Sigma_{A}\times\Sigma_{B}\times\Sigma_{C}$ is the
set of all tuples $(\sigma_{A},\sigma_{B},\sigma_{C})$ with $\sigma_{A}
\in\Sigma_{A}$, $\sigma_{B}\in\Sigma_{B}$ and $\sigma_{C}\in\Sigma_{C}$. A
strategy is understood as such a tuple $(\sigma_{A},\sigma_{B},\sigma_{C})$
and $P_{A}$, $P_{B}$, $P_{C}$ are payoff functions of the three players. The
game is denoted as $\Gamma=\left\{ \Sigma_{A},\Sigma_{B},\Sigma_{C}
;P_{A},P_{B},P_{C}\right\} $. Let $\Re=\left\{ A,B,C\right\} $ be the set
of players and $\wp$ be an arbitrary subset of $\Re$. The players in $\wp$ may
form a coalition so that, for all practical purposes, the coalition $\wp$
appears as a single player. It is expected that players in $(\Re-\wp)$ will
form an opposing coalition and the game has two opposing ``coalition players''
i.e. $\wp$ and $(\Re-\wp)$.
We study quantum version of an example of a classical three player cooperative
game discussed in Ref. \cite{Burger}. Each of the three players $A$, $B$, and
$C$ chooses one of the two strategies $1$, $2$. If the three players choose
the same strategy there is no payoff; otherwise, the two players who have
chosen the same strategy receive one unit of money each from the `odd man.'
Payoff functions $P_{A}$, $P_{B}$ and $P_{C}$ for players $A$, $B$ and $C$,
respectively, are given as \cite{Burger}:
\begin{align}
P_{A}(1,1,1) & =P_{A}(2,2,2)=0\nonumber\\
P_{A}(1,1,2) & =P_{A}(2,2,1)=P_{A}(1,2,1)=P_{A}(2,1,2)=1\nonumber\\
P_{A}(1,2,2) & =P_{A}(2,1,1)=-2 \label{PayoffsCoop}
\end{align}
with similar expressions for $P_{B}$ and $P_{C}$. Suppose $\wp=\left\{
B,C\right\} $, hence $\Re-\wp=\left\{ A\right\} $. The coalition game
represented by $\Gamma_{\wp}$ is given by the payoff matrix \cite{Burger}:
\begin{equation}
\begin{array}
[c]{c}
\left[ 11\right] \\
\left[ 12\right] \\
\left[ 21\right] \\
\left[ 22\right]
\end{array}
\overset{
\begin{array}
[c]{cc}
\left[ 1\right] & \left[ 2\right]
\end{array}
}{\left(
\begin{array}
[c]{cc}
0 & 2\\
-1 & -1\\
-1 & -1\\
2 & 0
\end{array}
\right) }
\end{equation}
Here the strategies $\left[ 12\right] $ and $\left[ 21\right] $ are
dominated by $\left[ 11\right] $ and $\left[ 22\right] $. After
eliminating these dominated strategies the payoff matrix becomes
\begin{equation}
\begin{array}
[c]{c}
\left[ 11\right] \\
\left[ 22\right]
\end{array}
\overset{
\begin{array}
[c]{cc}
\left[ 1\right] & \left[ 2\right]
\end{array}
}{\left(
\begin{array}
[c]{cc}
0 & 2\\
2 & 0
\end{array}
\right) }
\end{equation}
It is seen that the mixed strategies:
\begin{align}
& \frac{1}{2}\left[ 11\right] +\frac{1}{2}\left[ 22\right] \text{,}
\label{cltCoop}\\
& \frac{1}{2}\left[ 1\right] +\frac{1}{2}\left[ 2\right] \text{.}
\label{lftCoop}
\end{align}
are optimal for $\wp$ and $(\Re-\wp)$ respectively. With these strategies a
payoff $1$ for players $\wp$ is assured for all strategies of the opponent;
hence, the value of the coalition $\upsilon(\Gamma_{\wp})$ is $1$ i.e.
$\upsilon(\left\{ B,C\right\} )=1$. Since $\Gamma$ is a zero-sum game
$\upsilon(\Gamma_{\wp})$ can also be used to find $\upsilon(\Gamma_{\Re-\wp})$
as $\upsilon(\left\{ A\right\} )=-1$. The game is symmetric and one can write
\begin{align}
\upsilon(\Gamma_{\wp}) & =1\text{, \ \ and\ \ \ }\upsilon(\Gamma_{\Re-\wp
})=-1\text{ or}\nonumber\\
\upsilon(\left\{ A\right\} ) & =\upsilon(\left\{ B\right\}
)=\upsilon(\left\{ C\right\} )=-1\nonumber\\
\upsilon(\left\{ A,B\right\} ) & =\upsilon(\left\{ B,C\right\}
)=\upsilon(\left\{ C,A\right\} )=1 \label{VcltC}
\end{align}
\subsection{Quantum form}
In quantum form of this three-player game the players -- identified as $A$,
$B$ and $C$ -- play their strategies by applying the identity operator
$\hat{I}$ with probabilities $p$, $q$, and $r$, respectively, on a three-qubit
initial quantum state. The players apply the operator $\hat{\sigma}_{x}$ with
probabilities $(1-p)$, $(1-q)$, and $(1-r)$ respectively. If $\rho_{in}$ is
the initial state, the final state, after players have played their
strategies, becomes
\begin{equation}
\rho_{fin}=\underset{\hat{U}=\hat{I},\hat{\sigma}_{x}}{\sum}\Pr(\hat{U}
_{A})\Pr(\hat{U}_{B})\Pr(\hat{U}_{C})\hat{U}_{A}\otimes\hat{U}_{B}\otimes
\hat{U}_{C}\rho_{in}\hat{U}_{A}^{\dagger}\otimes\hat{U}_{B}^{\dagger}
\otimes\hat{U}_{C}^{\dagger} \label{FinstatCoop}
\end{equation}
where the unitary and Hermitian operator $\hat{U}$ can be either $\hat{I}$ or
$\hat{\sigma}_{x}$. $\Pr(\hat{U}_{A})$, $\Pr(\hat{U}_{B})$ and $\Pr(\hat
{U}_{C})$ are the probabilities with which players $A$, $B$, and $C$ apply the
operator $\hat{U}$ on the initial state respectively. $\rho_{fin}$ corresponds
to a convex combination of players' quantum operations. Let the arbiter
prepares a three-qubit pure initial quantum state:
\begin{equation}
\left| \psi_{in}\right\rangle =\underset{i,j,k=1,2}{\sum}c_{ijk}\left|
ijk\right\rangle \text{, \ \ where \ \ }\underset{i,j,k=1,2}{\sum}\left|
c_{ijk}\right| ^{2}=1 \label{InstateCoop}
\end{equation}
where the basis vectors of the quantum state are $\left| ijk\right\rangle $
for $i,j,k=1,2$. The state (\ref{InstateCoop}) is in $2\otimes2\otimes2$
dimensional Hilbert space and corresponds to three qubits.
Assume the matrix of the three-player game is given by $24$ constants
$\alpha_{t},\beta_{t},\gamma_{t}$ with $1\leq t\leq8$. Write the payoff
operators for players $A$, $B$ and $C$ as
\begin{align}
(P_{A,B,C})_{oper} & =\alpha_{1},\beta_{1},\gamma_{1}\left|
111\right\rangle \left\langle 111\right| +\alpha_{2},\beta_{2},\gamma
_{2}\left| 211\right\rangle \left\langle 211\right| +\nonumber\\
& \alpha_{3},\beta_{3},\gamma_{3}\left| 121\right\rangle \left\langle
121\right| +\alpha_{4},\beta_{4},\gamma_{4}\left| 112\right\rangle
\left\langle 112\right| +\nonumber\\
& \alpha_{5},\beta_{5},\gamma_{5}\left| 122\right\rangle \left\langle
122\right| +\alpha_{6},\beta_{6},\gamma_{6}\left| 212\right\rangle
\left\langle 212\right| +\nonumber\\
& \alpha_{7},\beta_{7},\gamma_{7}\left| 221\right\rangle \left\langle
221\right| +\alpha_{8},\beta_{8},\gamma_{8}\left| 222\right\rangle
\left\langle 222\right| \label{PayoperCoop}
\end{align}
Payoffs to the players $A$, $B$ and $C$\ are then obtained as mean values of
these operators:
\begin{equation}
P_{A,B,C}(p,q,r)=\text{Tr}\left[ (P_{A,B,C})_{oper}\rho_{fin}\right]
\end{equation}
\begin{figure}
\caption{A three-player quantum game played with Marinatto and Weber's scheme.
Players $B$ and $C$ form a coalition. $\hat{I}
\label{Fig3}
\end{figure}
Where the players' moves are identified by the numbers $p$, $q$ and $r$,
respectively. Fig. (\ref{Fig3}) shows the three-player quantum game. The
cooperative game of Eq. (\ref{PayoffsCoop}) with the classical payoff
functions $P_{A}$, $P_{B}$ and $P_{C}$ for players $A$, $B$ and $C$
respectively, together with the definition of payoff operators for these
players in Eq. (\ref{PayoperCoop}), imply that
\begin{equation}
\alpha_{1}=\alpha_{8}=0\text{, \ \ \ \ }\alpha_{3}=\alpha_{4}=\alpha
_{6}=\alpha_{7}=1\text{, \ \ and\ \ \ }\alpha_{2}=\alpha_{5}=-2
\end{equation}
With these constants, in quantum version of the game, the payoff to player $A
$, for example, can be found as
\begin{equation}
P_{A}(p,q,r)=
\begin{array}
[c]{c}
(-4rq-2p+2pr+2pq+r+q)(\left| c_{111}\right| ^{2}+\left| c_{222}\right|
^{2})\\
+(-4rq+2p-2pr-2pq+3r+3q-2)(\left| c_{211}\right| ^{2}+\left| c_{122}
\right| ^{2})\\
+(4rq+2pr-2pq-3r-q+1)(\left| c_{121}\right| ^{2}+\left| c_{212}\right|
^{2})\\
+(4rq-2pr+2pq-r-3q+1)(\left| c_{112}\right| ^{2}+\left| c_{221}\right|
^{2})
\end{array}
\label{PoffCoopQ}
\end{equation}
Similarly, payoffs to players $B$ and $C$ can be obtained. Classical mixed
strategy payoffs can be recovered from the Eq. (\ref{PoffCoopQ}) by taking
$\left| c_{111}\right| ^{2}=1$. The classical game is therefore imbedded in
its quantum form.
The classical form of this game is symmetric in the sense that payoff to a
player depends on his/her strategy and not on his/her identity. These
requirements that result in a three-player symmetric game are written as
\begin{align}
P_{A}(p,q,r) & =P_{A}(p,r,q)=P_{B}(q,p,r)=P_{B}(r,p,q)\nonumber\\
& =P_{C}(r,q,p)=P_{A}(q,r,p) \label{RqmntsCoop}
\end{align}
Now in this quantum form of the game, $P_{A}(p,q,r)$ becomes same as
$P_{A}(p,r,q)$ when \cite{DaiChen}:
\begin{equation}
\left| c_{121}\right| ^{2}+\left| c_{212}\right| ^{2}=\left|
c_{112}\right| ^{2}+\left| c_{221}\right| ^{2} \label{Rqmnts1Coop}
\end{equation}
Similarly $P_{B}(q,p,r)=P_{B}(r,p,q)$ and $P_{C}(r,q,p)=P_{C}(q,r,p)$ when the
following conditions hold \cite{DaiChen}:
\begin{align}
\left| c_{211}\right| ^{2}+\left| c_{122}\right| ^{2} & =\left|
c_{112}\right| ^{2}+\left| c_{221}\right| ^{2}\nonumber\\
\left| c_{211}\right| ^{2}+\left| c_{122}\right| ^{2} & =\left|
c_{121}\right| ^{2}+\left| c_{212}\right| ^{2}
\label{Requrmnts2Cooperativegames}
\end{align}
Combining Eq. (\ref{Rqmnts1Coop}) and Eq. (\ref{Requrmnts2Cooperativegames}) give
\begin{equation}
\left| c_{211}\right| ^{2}+\left| c_{122}\right| ^{2}=\left|
c_{121}\right| ^{2}+\left| c_{212}\right| ^{2}=\left| c_{112}\right|
^{2}+\left| c_{221}\right| ^{2}
\end{equation}
and then payoff to a $p$-player remains same when other two players
interchange their strategies. The symmetry conditions (\ref{RqmntsCoop}) hold
if, together with Eqs. (\ref{Rqmnts1Coop}), the following relations are
\emph{also} true
\begin{equation}
\begin{array}
[c]{cc}
\alpha_{1}=\beta_{1}=\gamma_{1}, & \alpha_{5}=\beta_{6}=\gamma_{7}\\
\alpha_{2}=\beta_{3}=\gamma_{4}, & \alpha_{6}=\beta_{5}=\gamma_{6}\\
\alpha_{3}=\beta_{2}=\gamma_{3}, & \alpha_{7}=\beta_{7}=\gamma_{5}\\
\alpha_{4}=\beta_{4}=\gamma_{2}, & \alpha_{8}=\beta_{8}=\gamma_{8}
\end{array}
\end{equation}
These form the extra restrictions on the constants of payoff matrix and,
together with the conditions (\ref{Rqmnts1Coop}), give a three player
symmetric game in a quantum form. No subscript in a payoff expression is then
needed and $P(p,q,r)$ represents the payoff to a $p$-player against two other
players playing $q$ and $r$. The payoff $P(p,q,r)$ is found as
\begin{equation}
P(p,q,r)=(\left| c_{111}\right| ^{2}+\left| c_{222}\right| ^{2}-\left|
c_{211}\right| ^{2}-\left| c_{122}\right| ^{2})(-4rq-2p+2pr+2pq+r+q)
\label{QpayoffCoop}
\end{equation}
Assume now that the pure strategies $[1]$ and $[2]$ correspond to $p=0$ and
$p=1$, respectively. The mixed strategy $n\left[ 1\right] +(1-n)\left[
2\right] $, where $0\leq n\leq1$, means that the strategy $\left[ 1\right]
$ is played with probability $n$ and $\left[ 2\right] $ with probability
$(1-n)$. Also \emph{suppose} that coalition $\wp$ plays the mixed
strategy\footnote{In a Comment on ``Quantum cooperative games'' that appeared
in Physics Letters A, Volume 328, Issues 4-5, Pages 414-415, 2 August
2004,\ Liang Dai and Qing Chen have pointed out that because the mixed
strategies $[12]$ and $[21]$ are not always dominated by $[11]$ and $[22]$ in
quantum form, there is no ground for assuming that the coalition $\wp$ always
plays the mixed strategy $l[11]+(1-l)[22]$.}:
\begin{equation}
l[11]+(1-l)[22] \label{cltQ}
\end{equation}
where the strategy $[11]$ means that both players in the coalition $\wp$ apply
the identity operator $\hat{I}$ with zero probability. Similarly the strategy
$[22]$ can be defined. The strategy in the Eq. (\ref{cltQ}) is such that the
coalition $\wp$ plays $[11]$ with probability $l$ and $[22]$ with probability
$(1-l)$. Similarly assume that the player in $(\Re-\wp)$ plays the mixed strategy:
\begin{equation}
m[1]+(1-m)[2] \label{lftQ}
\end{equation}
The payoff to the coalition $\wp$ is then obtained as
\begin{align}
P_{\wp} & =(lm)P_{\wp\lbrack111]}+l(1-m)P_{\wp\lbrack112]}+\nonumber\\
& (1-l)mP_{\wp\lbrack221]}+(1-l)(1-m)P_{\wp\lbrack222]} \label{cltPQ}
\end{align}
where $P_{\wp\lbrack111]}$ is the payoff to $\wp$ when all three players play
$p=0$ i.e. the strategy $[1]$. Similarly $P_{\wp\lbrack221]}$ is the coalition
payoff when the coalition players play $p=1$ and the player in $(\Re-\wp)$
plays $p=0$. Now from Eq. (\ref{QpayoffCoop}) we get
\begin{align}
P_{\wp\lbrack111]} & =2P(0,0,0)=0\nonumber\\
P_{\wp\lbrack112]} & =2P(0,0,1)=2(\left| c_{111}\right| ^{2}+\left|
c_{222}\right| ^{2}-\left| c_{211}\right| ^{2}-\left| c_{122}\right|
^{2})\nonumber\\
P_{\wp\lbrack221]} & =2P(1,1,0)=2(\left| c_{111}\right| ^{2}+\left|
c_{222}\right| ^{2}-\left| c_{211}\right| ^{2}-\left| c_{122}\right|
^{2})\nonumber\\
P_{\wp\lbrack222]} & =2P(1,1,1)=0
\end{align}
Also from Eq. (\ref{cltPQ}):
\begin{equation}
P_{\wp}=2(\left| c_{111}\right| ^{2}+\left| c_{222}\right| ^{2}-\left|
c_{211}\right| ^{2}-\left| c_{122}\right| ^{2})\left\{
l(1-m)+(1-l)m\right\}
\end{equation}
To find the value of coalition $\upsilon(\Gamma_{\wp})$ in the quantum game we
find $\frac{\partial P_{\wp}}{\partial m}$ and equate it to zero i.e. $P_{\wp
}$ is such a payoff to $\wp$ that the player in $(\Re-\wp)$ cannot change it
by changing his/her strategy given in Eq. (\ref{lftQ}). It gives,
interestingly, $l=\frac{1}{2}$ and the classical optimal strategy of the
coalition $\frac{1}{2}\left[ 11\right] +\frac{1}{2}\left[ 22\right] $
becomes optimal in the quantum game as well. In the quantum game the coalition
then secures following payoff, which is also termed as the value of the coalition:
\begin{equation}
\upsilon(\Gamma_{\wp})=(\left| c_{111}\right| ^{2}+\left| c_{222}\right|
^{2})-(\left| c_{211}\right| ^{2}+\left| c_{122}\right| ^{2})
\end{equation}
Similarly we get the value of coalition for $(\Re-\wp)$:
\begin{equation}
\upsilon(\Gamma_{\Re-\wp})=-\left\{ \left| c_{111}\right| ^{2}+\left|
c_{222}\right| ^{2}+\left| c_{211}\right| ^{2}+\left| c_{122}\right|
^{2}\right\}
\end{equation}
Note that these values reduce to their classical counterparts of Eq.
(\ref{VcltC}) when the initial quantum state becomes unentangled and is given
by $\left| \psi_{in}\right\rangle =\left| 111\right\rangle $. Classical form
of the coalition game is, therefore, a subset of its quantum version.
Suppose the arbiter now has at his disposal a quantum state:
\begin{gather}
\left| \psi_{in}\right\rangle =c_{111}\left| 111\right\rangle +c_{222}
\left| 222\right\rangle +c_{211}\left| 211\right\rangle +c_{122}\left|
122\right\rangle \nonumber\\
\text{with }(\left| c_{211}\right| ^{2}+\left| c_{122}\right|
^{2})>(\left| c_{111}\right| ^{2}+\left| c_{222}\right| ^{2})
\end{gather}
If we \emph{assume} that, with this initial state, the coalition $\wp$ still
plays the mixed strategy\footnote{Liang Dai and Qing Chan have also indicated
that when $(\left| c_{211}\right| ^{2}+\left| c_{122}\right|
^{2})>(\left| c_{111}\right| ^{2}+\left| c_{222}\right| ^{2})$ the
strategies $\frac{1}{2}[12]+\frac{1}{2}[21]$ and $\frac{1}{2}[1]+\frac{1}
{2}[2]$ are optimal.} $l[11]+(1-l)[22]$ of the classical case, then
$\upsilon(\Gamma_{\wp})$ becomes a negative quantity and $\upsilon
(\Gamma_{(\Re-\wp)})=-1$ because of the normalization given in Eq.
(\ref{InstateCoop}). Another possible case is when the arbiter has the state:
\begin{equation}
\left| \psi_{in}\right\rangle =c_{211}\left| 211\right\rangle +c_{122}
\left| 122\right\rangle
\end{equation}
at his disposal. Because now both $\upsilon(\Gamma_{\wp})$ and $\upsilon
(\Gamma_{\Re-\wp})$ are $-1$ and the players are left with no motivation to
form the \emph{same} coalition as they do in the classical game.
Liang Dai and Qing Chen \cite{DaiChen} have observed \footnote{Liang Dai and
Qing Chen \cite{DaiChen} pointed out a flaw in the calculations in the Ref.
\cite{CooperativeGames} by Iqbal and Toor. We argue that even after the
detection of the indicated flaw by Liang Dai and Qing Chen \cite{DaiChen} the
main conclusion of the Ref. \cite{CooperativeGames} remains intact. It can be
seen as follows.
\par
In their comment Liang Dai and Qing Chen \cite{DaiChen} wrote ``In quantum
form, the authors (Iqbal and Toor \cite{CooperativeGames}) concluded that the
game was not zero-sum, and, in some cases, the players had no motivation to
make a coalition because the advantage was lost. In this comment we argue that
the conclusions in Ref. \cite{CooperativeGames} are incorrect and led to
invalid conclusions.'' Now we refer to the main conclusion of the Ref.
\cite{CooperativeGames}, written in its abstract, saying ``In its classical
form (of a three-player game) making a coalition gives advantage to players
and they are motivated to do so. However, in its quantum form the advantage is
lost and players are left with no motivation to make a coalition.'' We argue
that this conclusion remains intact because:
\par
\begin{enumerate}
\item Consider the quote from the page $108$ of the Ref.
\cite{CooperativeGames} ``The underlying assumption in this approach is that
because the arbiter, responsible for providing three-qubit pure quantum
initial states to be later unitarily manipulated by the players, can forward a
quantum state that corresponds to the classical game, therefore, other games
corresponding to different initial pure quantum states are quantum forms of
the classical game.This assumption makes possible to translate the problem of
finding a quantum version of the classical coalition game, having the property
that the advantage of making a coalition is lost, to finding some pure initial
quantum states. We showed that such quantum states can be found and,
therefore, there are quantum versions of the three-player coalition game where
the motivation for coalition formation is lost.''
\par
\item In view of this quote along with Liang Dai and Qing Chen's finding that
when $\left| c_{111}\right| ^{2}+\left| c_{222}\right| ^{2}=\left|
c_{211}\right| ^{2}+\left| c_{122}\right| ^{2}$ their remains no motivation
to form a coalition, it can be observed that, even after the indicated flaw in
the calculation, the main conclusion of the Ref. \cite{CooperativeGames}
remain intact. It is because the assumption made in the Ref.
\cite{CooperativeGames}, which is quoted above in detail, allows to consider
the corresponding game when $\left| c_{111}\right| ^{2}+\left|
c_{222}\right| ^{2}=\left| c_{211}\right| ^{2}+\left| c_{122}\right|
^{2}$ as a quantum form of the classical game. So that, Liang Dai and Qing
Chen \cite{DaiChen} main conclusion is same as in the Ref.
\cite{CooperativeGames}, apart from their identification of the correct
mathematical conditions that are required to find the particular quantum form
of the game in which the advantages of forming a coalition are lost.
\end{enumerate}
} that in case $\left| c_{111}\right| ^{2}+\left| c_{222}\right| ^{2}
\neq\left| c_{211}\right| ^{2}+\left| c_{122}\right| ^{2}$, the motivation
to form a coalition remains in the quantum form. In case $\left|
c_{111}\right| ^{2}+\left| c_{222}\right| ^{2}=\left| c_{211}\right|
^{2}+\left| c_{122}\right| ^{2}$, every player's payoff becomes zero
whatever strategies they adapt, and hence the motivation to form a coalition
is lost.
\section{Discussion}
There may appear several guises in which the players can cooperate in a game.
One possibility is that they are able to communicate and, hence, able to
correlate their strategies. In certain situations players can make binding
commitments before or during the play of a game. Even in the post-play
behavior the commitments can make players to redistribute their final payoffs.
Two-player games are different from multi-player games in an important aspect.
In two-player games the question before the players is whether to cooperate or
not. In multi-player case the players are faced with a more difficult task.
Each player has to decide which coalition to join. There is also certain
uncertainty that the player faces about the extent to which players outside
his coalition may coordinate their actions.
Recent developments in quantum games provide a motivation to see how forming a
coalition, and its associated advantages, can be influenced in quantum
versions of these classical cooperative games. To study this we selected an
interesting, but simple, cooperative game and a recently proposed scheme
telling how to play a quantum game. We allowed the players in the quantum
version of the game to form a coalition similar to the classical game.
The \emph{underlying assumption} in this approach is that because the arbiter,
responsible for providing three-qubit pure initial quantum states, to be later
manipulated by the players, can forward a quantum state that corresponds to
the classical game, therefore, other games that result from different initial
pure quantum states are quantum forms of the classical game. This assumption
reduces the problem of finding a quantum version of the classical coalition
game to finding some pure initial quantum states. It is shown that a quantum
version of the three-player coalition game can be found where the motivation
for coalition formation is lost.
\chapter{Backwards-induction outcome in quantum games}
\section{Introduction}
The notion of NE, the central solution-concept in non-cooperative game theory,
was developed by John Nash in early 1950s. In fact Cournot (1838)
\cite{Cournot} anticipated Nash's definition of equilibrium by over a century
but only in the context of a particular model of a market which is dominated
by only two producers. In economics an \textit{oligopoly} is a form of market
in which a number $n$ of producers, say, $n\geq2$, \emph{and no others},
provide the market with a certain commodity. In the special case where $n=2$
it is called a duopoly. Cournot's work \cite{Cournot} is one of the classics
of game theory and also a cornerstone of the theory of industrial organization
\cite{Tirole}.
In Cournot model of duopoly two-firms simultaneously put certain quantities of
a homogeneous product in the market. Cournot obtained an equilibrium value for
the quantities both firms will decide to put in the market. This equilibrium
value was based on a rule of behavior which says that if all the players
except one abide by it, the remaining player cannot do better than to abide by
it too. Nash gave a general concept of an equilibrium point in a
noncooperative game but existence of an equilibrium in duopoly game was known
much earlier. The ``Cournot equilibrium'' refers to NE in non-cooperative form
of duopoly that Cournot considered.
In an interesting later development, Stackelberg (1934) \cite{Stackelberg,
Gibbons} proposed a \textit{dynamic }model of duopoly in which -- contrary to
Cournot's assumption of simultaneous moves -- a leader (or dominant) firm
moves first and a follower (or subordinate) firm moves second. A well known
example is the General Motors playing this leadership role in the early
history of US automobile industry when more than one firms like Ford and
Chrysler acted as followers. In this sequential game a ``Stackelberg
equilibrium'' is obtained using the \textit{backwards-induction outcome} of
the game. Stackelberg equilibrium refers to sequential nature of the game and
it is a stronger solution-concept than the NE because sequential move games
sometimes have multiple NE , only one of which is associated with the
backwards-induction outcome of the game \cite{Gibbons}.
In this chapter we present a quantum perspective on the interesting game of
Stackelberg duopoly. We start with the same assumption that a game is decided
only by players' unitary manipulations, payoff operators, and the measuring
apparatus deciding payoffs. When these are same a different input quantum
initial state gives a different form of the same game. With this assumption we
studied evolutionary stability of a mixed NE in the RSP game. Hence, a game
obtained by using a general two-qubit pure state is a quantum form of the
classical game provided the rest of the procedures in playing the quantum game
remain same\textit{.}
We now present an analysis of the Stackelberg duopoly by raising a question:
Is it possible to find a two-qubit pure quantum state that generates the
classical Cournot equilibrium as a backwards-induction outcome of the quantum
form of Stackelberg duopoly? Why can this question be of interest? It is
interesting because in case the answer is yes, then, quantization can
potentially be a useful element for the follower in the leader-follower model
of the Stackelberg duopoly \cite{Gibbons}. It is due to a known result that,
in classical setting, when static duopoly changes to a dynamic form, the
follower becomes worse-off compared to the leader who becomes better-off. We
find that, under certain restrictions, it is possible to find the needed
two-qubit quantum states. Hence a quantum form of a dynamic game of complete
information has an equilibrium that corresponds to classical static form of
the same game. The leader, thus, does not become better-off in the quantum
form of the dynamic duopoly.
\section{Backwards-induction outcome}
Consider a simple three step game:
\begin{enumerate}
\item Player $1$ chooses an action $a_{1}$ from the feasible set $A_{1}$.
\item Player $2$ observes $a_{1}$ and then chooses an action $a_{2}$ from the
feasible set $A_{2}$.
\item Payoffs are $u_{1}(a_{1},a_{2})$ and $u_{2}(a_{1},a_{2})$.
\end{enumerate}
This game is an example of the dynamic games of complete and perfect
information. Key features of such games are:
\begin{enumerate}
\item The moves occur in sequence.
\item All previous moves are known before next move is chosen, and
\item The players' payoffs are common knowledge.
\end{enumerate}
Given the action $a_{1}$ is previously chosen, at the second stage of the
game, when player $2$ gets the move s/he faces the problem:
\begin{equation}
\underset{a_{2}\in A_{2}}{Max}\text{ }u_{2}(a_{1},a_{2}) \label{max2}
\end{equation}
Assume that for each $a_{1}$ in $A_{1}$, player $2$'s optimization problem has
a unique solution $R_{2}(a_{1})$, which is also known as the \textit{best
response} of player $2$. Now player $1$ can also solve player $2$'s
optimization problem by anticipating player $2$'s response to each action
$a_{1}$ that player $1$ might take. So that player $1$ faces the problem:
\begin{equation}
\underset{a_{1}\in A_{1}}{Max}\text{ }u_{1}(a_{1},R_{2}(a_{1})) \label{max1}
\end{equation}
Suppose this optimization problem also has a unique solution for player $1$
and is denoted by $a_{1}^{\star}$. The solution $(a_{1}^{\star},R_{2}
(a_{1}^{\star}))$ is the backwards-induction outcome of this game.
In a simple version of the Cournot's model two firms simultaneously decide the
quantities $q_{1}$ and $q_{2}$ respectively of a homogeneous product they want
to put into market. Suppose $Q$ is the aggregate quantity i.e. $Q=q_{1}+q_{2}$
and $P(Q)=a-Q$ be the \emph{market-clearing price}, which is the price at
which all products or services available in a market will find buyers. Assume
the total cost to a firm producing quantity $q_{i}$ is $cq_{i}$ i.e. there are
no \emph{fixed costs} and the \emph{marginal cost} is a constant $c$ with
$c<a$. In a two-player game theoretical model of this situation a firm's
payoff or profit can be written as \cite{Gibbons}
\begin{equation}
P_{i}(q_{i},q_{j})=q_{i}\left[ P(Q)-c\right] =q_{i}\left[ a-c-(q_{i}
+q_{j})\right] =q_{i}\left[ k-(q_{i}+q_{j})\right] \label{PayoffEq}
\end{equation}
Solving for the NE easily gives the Cournot equilibrium:
\begin{equation}
q_{1}^{\star}=q_{2}^{\star}=\frac{k}{3} \label{Ceqbrm}
\end{equation}
At this equilibrium the payoffs to both the firms from Eq. (\ref{PayoffEq}) are
\begin{equation}
P_{1}(q_{1}^{\star},q_{2}^{\star})_{Cournot}=P_{2}(q_{1}^{\star},q_{2}^{\star
})_{Cournot}=\frac{k^{2}}{9} \label{CourPayoffs}
\end{equation}
Consider now the classical form of duopoly game when it becomes dynamic. In
dynamic form of the game the payoffs to players are given by Eq.
(\ref{PayoffEq}) as they are for the Cournot's game. We find
backwards-induction outcome in classical and a quantum form of the
Stackelberg's duopoly. Taking advantage from a bigger picture given to this
dynamic game, by the Hilbert structure of its strategy space, we then find
two-qubit pure quantum states that give classical Cournot's equilibrium as the
backwards-induction outcome of the quantum game of Stackelberg's duopoly.
\section{Stackelberg duopoly}
\subsection{Classical form}
A leader (or dominant) firm moves first and a follower (or subordinate) firm
moves second in Stackelberg model of duopoly \cite{Gibbons}. The sequence of
events is
\begin{enumerate}
\item Firm $A$ chooses a quantity $q_{1}\geq0$.
\item Firm $B$ observes $q_{1}$ and then chooses a quantity $q_{2}\geq0$.
\item The payoffs to firms $A$ and $B$ are given by their respective profit
functions as
\end{enumerate}
\begin{align}
P_{A}(q_{1},q_{2}) & =q_{1}\left[ k-(q_{1}+q_{2})\right] \nonumber\\
P_{B}(q_{1},q_{2}) & =q_{2}\left[ k-(q_{1}+q_{2})\right] \label{ProfFunc}
\end{align}
The backwards-induction outcome is found by first finding firm $B$'s reaction
to an arbitrary quantity by firm $A.$ Denoting this quantity as $R_{2}(q_{1})$
we find
\begin{equation}
R_{2}(q_{1})=\underset{q_{2}\geq0}{Max}\text{ }P_{B}(q_{1},q_{2}
)=\frac{k-q_{1}}{2} \label{bestRes}
\end{equation}
with $q_{1}<k$. Firm $A$ can now solve also the firm $B$'s problem. Firm $A$
can anticipate that a choice of the quantity $q_{1}$ will meet a reaction
$R_{2}(q_{1})$. In the first stage of the game firm $A$ can then compute a
solution to his/her optimization problem as
\begin{equation}
\underset{q_{1}\geq0}{Max}\text{ }P_{A}\left[ q_{1},R_{2}(q_{1})\right]
=\underset{q_{1}\geq0}{Max}\text{ }\frac{q_{1}(k-q_{1})}{2}
\end{equation}
It gives
\begin{equation}
q_{1}^{\star}=\frac{k}{2}\text{ \ \ and \ \ }R_{2}(q_{1}^{\star})=\frac{k}{4}
\label{StkEq}
\end{equation}
It is the classical backwards-induction outcome of dynamic form of the duopoly
game. At this equilibrium payoffs to the players $A$ and $B$ are given by Eqs.
(\ref{ProfFunc}) and (\ref{StkEq})
\begin{equation}
P_{A}\left[ q_{1}^{\star},R_{2}(q_{1}^{\star})\right] _{Stackelberg}
=\frac{k^{2}}{8},\text{ \ \ \ \ }P_{B}\left[ q_{1}^{\star},R_{2}(q_{1}
^{\star})\right] _{Stackelberg}=\frac{k^{2}}{16} \label{StkPayffs}
\end{equation}
From Eq. (\ref{StkPayffs}) find the ratio:
\begin{equation}
\frac{P_{A}\left[ q_{1}^{\star},R_{2}(q_{1}^{\star})\right] _{Stackelberg}
}{P_{B}\left[ q_{1}^{\star},R_{2}(q_{1}^{\star})\right] _{Stackelberg}}=2
\label{ratio}
\end{equation}
showing that in comparison with the Cournot game, the leader firm becomes
better-off and the follower firm becomes worse-off in the Stackelberg game.
This aspect also hints an important difference between single and multi-person
decision problems. In single-person decision theory having more information
can never make the decision maker worse-off. In game theory, however, having
more information (or, more precisely, having it made public that one has more
information) can make a player worse-off \cite{Gibbons}.
Now we look at the backwards-induction outcome in a quantum perspective. Our
motivation is an interesting aspect that quantum form can bring into the
backwards-induction outcome. It is the possibility of firm $B$ not becoming
worse-off because of having extra information.
\subsection{Quantum form}
Stackelberg duopoly is a two-player sequential game. Meyer \cite{MeyerDavid}
considered a quantum form of the sequential game of PQ penny flip by unitary
operations on single qubit. Important difference between Meyer's game and
Stackelberg duopoly is that at the second stage player in PQ penny flip
doesn't know the previous move but in Stackelberg duopoly he knows that.
We prefer Marinatto and Weber's scheme to play the sequential game of
Stackelberg duopoly for two reasons:
\begin{enumerate}
\item The classical game is obtained for a product initial state.
\item When players' actions and payoff-generating measurement are
\emph{exactly} the same, we assume other games, corresponding to every pure
two-qubit initial state, are \emph{quantum forms} of the classical game.
\end{enumerate}
As discussed earlier, the second assumption originates from the fact that the
classical game corresponds to a pure two-qubit initial product state. The
assumption reduces the problem of finding a quantum form of Stackelberg
duopoly, with the property that its equilibrium is the same as in Cournot's
duopoly, to the problem of finding conditions on the parameters of two-qubit
pure initial quantum state. If the conditions are realistic then the
corresponding quantum game gives Cournot's equilibrium as the
backwards-induction outcome.
Stackelberg duopoly is a dynamic game of complete information. Its quantum
form in Marinatto and Weber's scheme starts by preparing a pure two-qubit
initial quantum state. Suppose Alice plays first and she announces her move
immediately, so that Bob knows Alice's move before playing his move. Bob plays
his move and both players forward their qubits for measurement.
Information about the previous moves is crucial for the game considered here.
A comparison of the sequential game of Stackelberg duopoly with the
simultaneous-move game of BoS makes evident different information structure in
these games. For example let BoS be played sequentially. But Alice does not
announce her first move to Bob before he makes his. It makes the game
sequential but the information structure is still the same as in its static
form. Hence, a sequential BoS in the above form has the same NE as in its
static form. An unobserved-action form of a game has the same NE as its
simultaneous-move form. This observation led us to play a quantum form of
Stackelberg duopoly while keeping intact the original structure of a scheme
designed for simultaneous moves. A consideration of playing a sequential game
in a quantum way brings to mind the Meyer's PQ penny-flip \cite{MeyerDavid}
where only one qubit is used. Contrary to this, in present section we use the
two-qubit system of a simultaneous-move game, to play a sequential game.
Why to use two qubits when a quantum form of this sequential game can also be
played by only one qubit, in similar way as Meyer's PQ penny-flip. We prefer
two qubits because in this case a comparison between classical and a quantum
form of the game translates itself to comparing two games resulting from using
a product and an entangled initial quantum state. We do not rule out the
possibility that a consideration of the dynamic game using only single qubit
gives equally, or even more, interesting results. We let classical payoffs in
Stackelberg duopoly, given by Eq. (\ref{ProfFunc}), reproduced when the
initial state $\left| \psi_{in}\right\rangle =\left| 11\right\rangle $ is
used to play the game. The upper state of a qubit is then represented by $2$.
The state $\left| \psi_{in}\right\rangle $ in density matrix notation is
\begin{equation}
\rho_{in}=\left| 11\right\rangle \left\langle 11\right| \label{IniDenMat}
\end{equation}
Assume the player Alice and Bob apply $\hat{I}$ with probabilities $x$ and $y
$ respectively. The state (\ref{IniDenMat}) changes to
\begin{align}
\rho_{fin} & =xy\hat{I}_{A}\otimes\hat{I}_{B}\rho_{in}\hat{I}_{A}^{\dagger
}\otimes\hat{I}_{B}^{\dagger}+x(1-y)\hat{I}_{A}\otimes\hat{\sigma}_{xB}
\rho_{in}\hat{I}_{A}^{\dagger}\otimes\hat{\sigma}_{xB}^{\dagger}+\nonumber\\
& y(1-x)\hat{\sigma}_{xA}\otimes\hat{I}_{B}\rho_{in}\hat{\sigma}
_{xA}^{\dagger}\otimes\hat{I}_{B}^{\dagger}+(1-x)(1-y)\hat{\sigma}_{xA}
\otimes\hat{\sigma}_{xB}\rho_{in}\hat{\sigma}_{xA}^{\dagger}\otimes\hat
{\sigma}_{xB}^{\dagger}\nonumber\\
& \label{FinDenMat}
\end{align}
where $x,y\in\lbrack0,1]$ are identified as the players' moves. The moves by
Alice and Bob in classical duopoly game are given by quantities $q_{1}$ and
$q_{2}$ where $q_{1},q_{2}\in\lbrack0,\infty)$. We assume that Alice and Bob
agree on a function that can \emph{uniquely }define a real positive number in
the range $(0,1]$ for every quantity $q_{1},q_{2}$ in $[0,\infty)$. A simple
such function is $1/(1+q_{i})$. So that, Alice and Bob find $x$ and $y$,
respectively, as
\begin{equation}
x=\frac{1}{1+q_{1}},\text{ \ \ \ \ \ }y=\frac{1}{1+q_{2}} \label{DefFuns}
\end{equation}
and use these real positive numbers as the probabilities with which they apply
the identity operator $\hat{I}$ on the quantum state at their disposal. With a
substitution from Eqs. (\ref{IniDenMat}, \ref{DefFuns}) the final state
(\ref{FinDenMat}) becomes
\begin{equation}
\rho_{fin}=\frac{1}{(1+q_{1})(1+q_{2})}\left[ \left| 11\right\rangle
\left\langle 11\right| +q_{1}q_{2}\left| 22\right\rangle \left\langle
22\right| +q_{1}\left| 21\right\rangle \left\langle 21\right| +q_{2}\left|
12\right\rangle \left\langle 12\right| \right]
\end{equation}
We now assume that in the measurement and payoffs-finding phase the quantities
$q_{1}$ and $q_{2}$ are also known to the referee. The referee applies the
following payoff operators on the final quantum state:
\begin{align}
(P_{A})_{oper} & =(1+q_{1})(1+q_{2})q_{1}\left[ k\left| 11\right\rangle
\left\langle 11\right| -\left| 21\right\rangle \left\langle 21\right|
-\left| 12\right\rangle \left\langle 12\right| \right] \nonumber\\
(P_{B})_{oper} & =(1+q_{1})(1+q_{2})q_{2}\left[ k\left| 11\right\rangle
\left\langle 11\right| -\left| 21\right\rangle \left\langle 21\right|
-\left| 12\right\rangle \left\langle 12\right| \right] \label{PayOpers}
\end{align}
Note that the classical payoffs of Eq. (\ref{ProfFunc}) are reproduced with
the initial state $\left| \psi_{in}\right\rangle =\left| 11\right\rangle $ as
\begin{equation}
P_{A,B}(q_{1},q_{2})=\text{Tr}\left[ (P_{A,B})_{oper}\rho_{fin}\right]
\label{PlayersPayoffs}
\end{equation}
A more general form of quantum duopoly can now be played by keeping the payoff
operators of Eq. (\ref{PayOpers}) in the referee's possession and preparing an
initial two-qubit pure state:
\begin{equation}
\left| \psi_{in}\right\rangle =\underset{i,j=1,2}{\sum}c_{ij}\left|
ij\right\rangle \text{, \ \ \ with }\underset{i,j=1,2}{\sum}\left|
c_{ij}\right| ^{2}=1 \label{BIOIniStat}
\end{equation}
Payoffs to Alice and Bob can now be obtained, in this quantum game, from Eqs.
(\ref{PlayersPayoffs}) that use the payoff operators of Eqs. (\ref{PayOpers}).
The payoffs to Alice and Bob are written as
\begin{align}
\left[ P_{A}(q_{1},q_{2})\right] _{qtm} & =\frac{(\omega_{11}+\omega
_{12}q_{2})+q_{1}(\omega_{21}+\omega_{22}q_{2})}{(1+q_{1})(1+q_{2}
)}\nonumber\\
\left[ P_{B}(q_{1},q_{2})\right] _{qtm} & =\frac{(\chi_{11}+\chi_{12}
q_{2})+q_{1}(\chi_{21}+\chi_{22}q_{2})}{(1+q_{1})(1+q_{2})} \label{QPayoffs}
\end{align}
where the subscript $qtm$ is for `quantum' and
\begin{align}
\left[
\begin{array}
[c]{c}
\omega_{11}\\
\omega_{12}\\
\omega_{21}\\
\omega_{22}
\end{array}
\right] & =\left[
\begin{array}
[c]{cccc}
\left| c_{11}\right| ^{2} & \left| c_{12}\right| ^{2} & \left|
c_{21}\right| ^{2} & \left| c_{22}\right| ^{2}\\
\left| c_{12}\right| ^{2} & \left| c_{11}\right| ^{2} & \left|
c_{22}\right| ^{2} & \left| c_{21}\right| ^{2}\\
\left| c_{21}\right| ^{2} & \left| c_{22}\right| ^{2} & \left|
c_{11}\right| ^{2} & \left| c_{12}\right| ^{2}\\
\left| c_{22}\right| ^{2} & \left| c_{21}\right| ^{2} & \left|
c_{12}\right| ^{2} & \left| c_{11}\right| ^{2}
\end{array}
\right] \left[
\begin{array}
[c]{c}
kq_{1}(1+q_{1})(1+q_{2})\\
-q_{1}(1+q_{1})(1+q_{2})\\
-q_{1}(1+q_{1})(1+q_{2})\\
0
\end{array}
\right] \nonumber\\
\left[
\begin{array}
[c]{c}
\chi_{11}\\
\chi_{12}\\
\chi_{21}\\
\chi_{22}
\end{array}
\right] & =\left[
\begin{array}
[c]{cccc}
\left| c_{11}\right| ^{2} & \left| c_{12}\right| ^{2} & \left|
c_{21}\right| ^{2} & \left| c_{22}\right| ^{2}\\
\left| c_{12}\right| ^{2} & \left| c_{11}\right| ^{2} & \left|
c_{22}\right| ^{2} & \left| c_{21}\right| ^{2}\\
\left| c_{21}\right| ^{2} & \left| c_{22}\right| ^{2} & \left|
c_{11}\right| ^{2} & \left| c_{12}\right| ^{2}\\
\left| c_{22}\right| ^{2} & \left| c_{21}\right| ^{2} & \left|
c_{12}\right| ^{2} & \left| c_{11}\right| ^{2}
\end{array}
\right] \left[
\begin{array}
[c]{c}
kq_{2}(1+q_{1})(1+q_{2})\\
-q_{2}(1+q_{1})(1+q_{2})\\
-q_{2}(1+q_{1})(1+q_{2})\\
0
\end{array}
\right] \nonumber\\
& \label{consts}
\end{align}
The classical payoffs of duopoly game given in Eqs. (\ref{ProfFunc}) are
recovered from the Eqs. (\ref{QPayoffs}) when the initial quantum state is
$\left| \psi_{ini}\right\rangle =\left| 11\right\rangle $. Classical duopoly
is, therefore, a subset of its quantum version.
\begin{figure}
\caption{Playing a quantum form of Stackelberg duopoly.}
\label{Fig4}
\end{figure}
We now find the backwards-induction outcome in this quantum form of
Stackelberg duopoly. Fig. (\ref{Fig4}) shows the overall idea to play the
game. We proceed in exactly the same way as it is done in the classical game
except that players' payoffs are now given by Eqs. (\ref{QPayoffs}) and
\emph{not} by Eqs. (\ref{ProfFunc}). The first step in the backwards-induction
in quantum game is to find Bob's reaction to an arbitrary quantity $q_{1}$
chosen by Alice. Denoting this quantity as $\left[ R_{2}(q_{1})\right]
_{qtm}$ we find
\begin{equation}
\left[ R_{2}(q_{1})\right] _{qtm}=\underset{q_{2}\geq0}{Max}\left[
P_{B}(q_{1},q_{2})\right] _{qtm}=\frac{q_{1}\triangle_{1}+\triangle_{2}
}{-2\left\{ q_{1}\triangle_{3}+\triangle_{4}\right\} } \label{QbestRes}
\end{equation}
where
\begin{gather}
\left| c_{11}\right| ^{2}+\left| c_{22}\right| ^{2}-k\left|
c_{21}\right| ^{2}=\triangle_{1}\text{, \ \ \ \ \ \ \ \ }\left|
c_{12}\right| ^{2}+\left| c_{21}\right| ^{2}-k\left| c_{11}\right|
^{2}=\triangle_{2}\nonumber\\
\left| c_{12}\right| ^{2}+\left| c_{21}\right| ^{2}-k\left|
c_{22}\right| ^{2}=\triangle_{3}\text{, \ \ \ \ \ \ \ \ }\left|
c_{11}\right| ^{2}+\left| c_{22}\right| ^{2}-k\left| c_{12}\right|
^{2}=\triangle_{4}
\end{gather}
This reaction reduces to its classical value of Eq. (\ref{bestRes}) when
$\left| c_{11}\right| ^{2}=1$. Similar to the classical game Alice can now
solve Bob's problem as well. Alice can anticipate that a choice of quantity
$q_{1}$ will meet a reaction $\left[ R_{2}(q_{1})\right] _{qtm}$. In the
first stage of the game, like its classical version, Alice can compute a
solution to her optimization problem as
\begin{equation}
\underset{q_{1}\geq0}{Max}\left[ P_{A}\left\{ q_{1},\left\{ R_{2}
(q_{1})\right\} _{qtm}\right\} \right] _{qtm}
\end{equation}
To find it Alice calculates the quantity:
\begin{align}
\frac{d\left[ P_{A}(q_{1},q_{2})\right] _{qtm}}{dq_{1}} & =\frac{(\left|
c_{11}\right| ^{2}+\left| c_{22}\right| ^{2}-\left| c_{12}\right|
^{2}-\left| c_{21}\right| ^{2})}{(1+q_{1})}\left\{ -2q_{1}^{2}
+q_{1}(k-2)+k\right\} \nonumber\\
& +(1+2q_{1})\left\{ (k-1)\left| c_{21}\right| ^{2}-\left| c_{12}\right|
^{2}\right\} +k(\left| c_{12}\right| ^{2}-\left| c_{22}\right|
^{2})\nonumber\\
& -q_{1}\frac{dq_{2}}{dq_{1}}\left\{ \triangle_{4}+q_{1}\triangle
_{3}\right\} -q_{2}\left\{ 2q_{1}\triangle_{3}+\triangle_{4}\right\}
\nonumber\\
& \label{derivative}
\end{align}
and replaces $q_{2}$ in Eq. (\ref{derivative}) with $\left[ R_{2}
(q_{1})\right] _{qtm}$ given by Eq. (\ref{QbestRes}) and then equates Eq.
(\ref{derivative}) to zero to find a $q_{1}^{\star}$ that maximizes her payoff
$\left[ P_{A}(q_{1},q_{2})\right] _{qtm}$. For a maxima she would ensure
that the second derivative of $P_{A}\left\{ q_{1},\left\{ R_{2}
(q_{1})\right\} _{qtm}\right\} $ with respect to $q_{1}$ at $q_{1}
=q_{1}^{\star}$ is a negative quantity. The quantity $q_{1}^{\star}$ together
with $\left[ R_{2}(q_{1}^{\star})\right] _{qtm}$ will form the
backwards-induction outcome of the quantum game.
An interesting situation is when the backwards-induction outcome in quantum
version of Stackelberg duopoly becomes same as the classical Cournot
equilibrium of duopoly. The classical situation of leader becoming better-off,
while the follower becomes worse-off, is then avoided in the quantum form of
Stackelberg duopoly. To look for this possibility we need such an initial
state $\left| \psi_{ini}\right\rangle $ that at $q_{1}^{\star}=q_{2}^{\star
}=k/3$ we have the following relations to be true, along with the
normalization condition given in Eq. (\ref{BIOIniStat}):
\begin{gather}
\frac{d\left[ P_{A}\left\{ q_{1},\left\{ R_{2}(q_{1})\right\}
_{qtm}\right\} \right] _{qtm}}{dq_{1}}\mid_{q_{1}=q_{1}^{\star}
}=0\label{Condn1}\\
\left[ \frac{d^{2}\left[ P_{A}\left\{ q_{1},\left\{ R_{2}(q_{1})\right\}
_{qtm}\right\} \right] _{qtm}}{dq_{1}^{2}}\mid_{q_{1}=q_{1}^{\star}}\right]
<0\label{Condn2}\\
q_{2}^{\star}=\left\{ R_{2}(q_{1}^{\star})\right\} _{qtm} \label{Condn3}
\end{gather}
The conditions (\ref{Condn1}, \ref{Condn2}) simply say that the
backwards-induction outcome of the quantum game is the same as Cournot
equilibrium in classical game. The condition (\ref{Condn3}) says that Bob's
reaction to Alice's choice of $q_{1}^{\star}=k/3$ is $q_{2}^{\star}=k/3$. To
show that such quantum states can exist for which the conditions
(\ref{Condn1}, \ref{Condn2}, \ref{Condn3}), with the normalization
(\ref{BIOIniStat}), to be true, we give an example where $\left|
c_{11}\right| ^{2},\left| c_{12}\right| ^{2}$ and $\left| c_{21}\right|
^{2}$ are written as functions of $k$, with our assumption that $\left|
c_{22}\right| ^{2}=0$. Though this assumption puts its own restriction, on
the possible range of $k$ for which the above conditions hold for these
functions, but still it shows clearly the possibility of finding the required
initial quantum states. The functions are found as
\begin{align}
\left| c_{12}(k)\right| ^{2} & =\frac{-f(k)+\sqrt{f(k)^{2}-4g(k)h(k)}
}{2g(k)}\text{ \ \ where}\nonumber\\
f(k) & =j(k)\left\{ \frac{-7}{18}k^{2}+\frac{1}{3}k+\frac{1}{2}\right\}
+\left\{ \frac{k^{2}}{9}+\frac{k}{3}+\frac{1}{2}\right\} \nonumber\\
g(k) & =j(k)^{2}\left\{ \frac{-1}{9}k^{3}+\frac{7}{18}k^{2}-\frac{1}
{2}\right\} +j(k)\left\{ \frac{2}{9}k^{3}+\frac{5}{18}k^{2}-\frac{1}
{2}k-1\right\} +\nonumber\\
& \left\{ \frac{-1}{9}k^{2}-\frac{1}{2}k-\frac{1}{2}\right\} \nonumber\\
h(k) & =\frac{-1}{6}k\text{, \ \ \ \ \ \ }j(k)=\frac{9-4k^{2}}{k^{2}-9}
\end{align}
also
\begin{align}
\left| c_{21}(k)\right| ^{2} & =j(k)\left| c_{12}(k)\right| ^{2}\\
\left| c_{11}(k)\right| ^{2} & =1-\left| c_{12}(k)\right| ^{2}-\left|
c_{21}(k)\right| ^{2}
\end{align}
Now, interestingly, given that allowed range of $k$ is $1.5\leq k\leq1.73205$,
all of the conditions (\ref{BIOIniStat}, \ref{Condn1}, \ref{Condn2},
\ref{Condn3}) hold at $q_{1}^{\star}=q_{2}^{\star}=k/3$. So that in this range
of $k$ a quantum form of Stackelberg duopoly exists that gives the classical
Cournot equilibrium as the backwards-induction outcome. The restriction on
allowed range of $k$ is the result of our assumption that $\left|
c_{22}(k)\right| ^{2}=0$, which is introduced to simplify the calculations.
Nevertheless it does not forbid obtaining a quantum form of Stackelberg
duopoly, without the mentioned restriction of the range of $k$, when the
quantum game still possesses the same properties.
\section{Discussion}
What can be a possible relevance of considering a quantum form of a game that
models a competition between two firms in macroscopic world of economics?
Quantum mechanics was developed to understand phenomena in the regime of
atomic and subatomic interactions and it is still mostly used in that domain.
What is of interest in extending a game-theoretical model of interaction
between firms towards quantum domain? These questions naturally arise not only
with reference to Stackelberg duopoly considered in this paper but also other
related works in quantum games.
We believe that like other notions of game theory, finding some relevance in
quantum information, a consideration of backwards-induction can be of interest
for exactly the same reasons. It does not seem hard to imagine situations in
quantum information where moves occur in sequence, all previous moves are
observed before the next move is chosen, and players' payoffs from each
feasible combination of moves are common knowledge. Interesting questions then
arise about how a quantum version of dynamic game of complete information can
influence the outcome.
The duopoly game models economic competition between firms and applied
economics is the area where it is studied in detail. The fact that quantum
game theory can give entirely new views on games, which are important in
economics, is apparent in recent interesting papers by Piotrowski and
Sladkowski \cite{Piotrowski1, Piotrowski2} proposing a quantum-like
description of markets and economies where players' strategies belong to
Hilbert space. It shows that quantum games certainly have features of interest
to applied economists. Reciprocating with it we showed that games played by
firms in economic competition can give counter-intuitive solutions when played
in the quantum world.
\chapter{Quantum repeated games}
\section{Introduction}
The PD attracted early attention \cite{Eisert} in recent studies in quantum
game theory. In classical game theory \cite{Gibbons} a two-stage repeated
version of this game consists of two players playing the game twice, observing
the outcome of the first play before the second play begins. The payoffs for
the entire game are simply taken as the sum of the payoffs from the two
stages. Generally a two-stage repeated game has more complex strategic
structure than its one-stage counterpart and players' strategic choices in the
second stage are affected by the outcome of their moves in the first stage.
For the classical one-stage PD the strategy of `defection' by both the players
is a unique NE. In its two-stage version the same NE appears again at the
second stage because the first stage payoffs are added as constants to the
second stage. In fact in all of finitely repeated versions of the PD
`defection' by both the players appears as unique NE at every stage
\cite{Gibbons}.
Eisert et al.'s study \cite{Eisert} of the one-stage quantum PD raises a
question: what can possibly be a role for quantum mechanics when the game is
played twice? It seems that this role should be relevant to the new feature
showing itself in the game i.e. the two-stages. A role for quantum mechanics
exists if it inter-links the two stages of the game in some way of interest.
Classically both the players `defect' at each stage and strategic choices
remain the same because of uniqueness of the NE at each stage. In our search
for a quantum role we find useful the idea of \textit{subgame-perfect outcome}
(SGPO) \cite{Gibbons} in a two-stage repeated bi-matrix game in its quantum form.
For a two-stage repeated game the idea of a SGPO is natural analog of the
backwards-induction outcome (BIO) \cite{Gibbons} from the games of complete
and perfect information. In the last chapter we considered the BIO idea in a
quantum form of duopoly game and showed how a quantum version of this game can
give an outcome corresponding to the static form of the duopoly, even when the
game is played dynamically. In present chapter we study the natural analogue
of BIO for a two-stage repeated PD quantum game, i.e., the idea of SGPO in a
situation that can be said to lie in quantum domain. We solve the two-stage PD
quantum game in the spirit of backwards-induction from the last section; but
now the first step in working backwards from the end of the game involves
solving a real game rather than solving a single-person optimization problem.
In game theory the idea of SGPO comes out as a stronger solution concept
especially when multiple NE appear in a stage. Our motivation is the
observation that a quantization scheme for the PD is known in which the NE in
a stage is not unique -- thus making relevant a consideration of the concept
of SGPO in the two-stage game played in a quantum setting. For the purpose of
completeness, we will first describe how SGPO works for the classical
two-stage PD. Afterwards, we quantize the game using a known scheme, and then,
show how a SGPO can exist that is counter-intuitive compared to the classical
SGPO for the two-stage repeated PD.
\section{Two-stage games of complete but imperfect information}
Like dynamic game of complete and perfect information -- for example the
Stackelberg duopoly -- the play in a two-stage game of complete but imperfect
information proceeds in a sequence, with the moves in the first stage observed
before the next stage begins. The new feature is that within each stage now
there are simultaneous moves. The simultaneity of moves within each stage
means that information is imperfect in the game. A two-stage game of complete
but imperfect information consists of the steps \cite{Gibbons}:
\begin{enumerate}
\item Players $A$ and $B$ simultaneously choose actions $p$ and $q$ from
feasible sets $\mathbf{P}$ and $\mathbf{Q}$, respectively.
\item Players $A$ and $B$ observe outcome of the first stage, $(p,q)$, and
then simultaneously choose actions $p_{1}$ and $q_{1}$ from feasible sets
$\mathbf{P}$ and $\mathbf{Q}$, respectively.
\item Payoffs are $P_{i}(p,q,p_{1},q_{1})$ for $i=A,$ $B$.
\end{enumerate}
A usual approach to solve a game from this class uses the method of
backwards-induction. In the last section the first step in working backwards
involves solving a single-person optimization problem. Now the first step
involves solving a simultaneous-move game between players $A$ and $B$ in the
second stage, given the outcome from stage one. If the players $A$ and $B$
anticipate that their second-stage behavior will be given by $(p_{1}^{\star
}(p,q),q_{1}^{\star}(p,q))$, then the first-stage interaction between them
amounts to the simultaneous-move game:
\begin{enumerate}
\item Players $A$ and $B$ simultaneously choose actions $p$ and $q$ from
feasible sets $\mathbf{P}$ and $\mathbf{Q}$, respectively.
\item Payoffs are $P_{i}(p,q,p_{1}^{\star}(p,q),q_{1}^{\star}(p,q))$ for
$i=A,B$.
\end{enumerate}
When $(p^{\star},q^{\star})$ is the unique NE of this simultaneous-move game,
the set of four numbers $(p^{\star},q^{\star},p_{1}^{\star}(p,q),q_{1}^{\star
}(p,q))$ is known as the SGPO \cite{Gibbons} of this two-stage game. This
outcome is the natural analog of BIO in games of complete and perfect information.
\section{Two-stage Prisoners' Dilemma}
\subsection{Classical form}
We use a normal form of the PD given by the matrix:
\begin{equation}
\begin{array}
[c]{c}
\text{A}
\end{array}
\begin{array}
[c]{c}
C\\
D
\end{array}
\overset{\overset{
\begin{array}
[c]{c}
\text{B}
\end{array}
}{
\begin{array}
[c]{cc}
C & D
\end{array}
}}{\left(
\begin{array}
[c]{cc}
(3,3) & (5,0)\\
(0,5) & (1,1)
\end{array}
\right) } \label{Matrix1Repeated}
\end{equation}
The players play this simultaneous-move game twice. The outcome of the first
play is observed before the second stage begins. Payoff for the entire game is
simply the sum of the payoffs from the two stages. It is a two-stage game of
complete but imperfect information \cite{Gibbons}.
Assume players $A$ and $B$ play the pure strategy $C$ with probabilities $p$
and $q$, respectively, in the stage $1$. Also assume the players $A$ and $B$
play the strategy $C$ with probabilities $p_{1}$ and $q_{1}$, respectively, in
the stage $2$. Call $\left[ P_{A1}\right] _{cl}$ and $\left[ P_{B1}\right]
_{cl}$ the payoffs to players $A$ and $B$, respectively, in the stage $1$,
where the symbol $cl$ is for `classical'. These payoffs can be found from the
matrix (\ref{Matrix1Repeated}) as
\begin{equation}
\left[ P_{A1}\right] _{cl}=-pq+4q-p+1\text{, \ \ \ \ \ }\left[
P_{B1}\right] _{cl}=-pq+4p-q+1 \label{PayoffsC1}
\end{equation}
The NE conditions for this stage are
\begin{equation}
\left[ P_{A1}(p^{\star},q^{\star})-P_{A1}(p,q^{\star})\right] _{cl}
\geq0\text{, \ \ \ \ \ }\left[ P_{B1}(p^{\star},q^{\star})-P_{B1}(p^{\star
},q)\right] _{cl}\geq0 \label{NEconds1}
\end{equation}
giving $p^{\star}=q^{\star}=0$ (i.e. defection for both the players) as the
unique NE in this stage. Likewise, in the second stage the payoffs to players
$A$ and $B$ are written as $\left[ P_{A2}\right] _{cl}$ and $\left[
P_{B2}\right] _{cl}$ respectively, where
\begin{equation}
\left[ P_{A2}\right] _{cl}=-p_{1}q_{1}+4q_{1}-p_{1}+1\text{, \ \ \ \ \ }
\left[ P_{B2}\right] _{cl}=-p_{1}q_{1}+4p_{1}-q_{1}+1 \label{PayoffsC2}
\end{equation}
and once again the strategy of defection, i.e. $p_{1}^{\star}=q_{1}^{\star}
=0$, comes out as the unique NE in the second stage. To compute SGPO of this
two-stage game, we analyze its first stage given that the second-stage outcome
is also the NE of that stage ---namely $p_{1}^{\star}=q_{1}^{\star}=0$. For
this NE the players' payoffs in the second stage are
\begin{equation}
\left[ P_{A2}(0,0)\right] _{cl}=1,\text{ \ \ \ \ \ }\left[ P_{B2}
(0,0)\right] _{cl}=1 \label{PDefC}
\end{equation}
The players' first-stage interaction, therefore, in the two-stage PD amounts
to a one-shot game, in which the payoff pair $(1,1)$ from the second stage is
added to their first-stage payoff pair. Write the players' payoffs in the
one-shot game as
\begin{align}
\left[ P_{A(1+2)}\right] _{cl} & =\left[ P_{A1}+P_{A2}(0,0)\right]
_{cl}=-pq+4q-p+2\nonumber\\
\left[ P_{B(1+2)}\right] _{cl} & =\left[ P_{B1}+P_{B2}(0,0)\right]
_{cl}=-pq+4p-q+2 \label{PayoffsTotal}
\end{align}
It has again $(0,0)$ as the unique NE. Therefore, the unique SGPO of the
two-stage PD is $(0,0)$ in the first stage, followed by $(0,0)$ in the second
stage. The strategy of defection in both the stages comes out as SGPO for the
two stage classical PD.
It is now shown how it becomes possible, in a quantum form of this two-stage
PD, to achieve a SGPO in which the players decide to cooperate in the first
stage while knowing that they both will defect in the second. The quantum form
of the two-stage PD is played using a system of four qubits. Players
manipulate these qubits in Marinatto and Weber's scheme to play a quantum form
of a matrix game.
\subsection{Quantum form}
Marinatto and Weber's scheme can be extended to play a two-stage version of a
bi-matrix game. For example, a quantum version of the two-stage PD starts by
making available a four-qubit pure quantum state to the players. This state
can be written as
\begin{equation}
\left| \psi_{in}\right\rangle =\underset{i,j,k,l=1,2}{\sum}c_{ijkl}\left|
ijkl\right\rangle \text{ \ \ where \ \ }\underset{i,j,k,l=1,2}{\sum}\left|
c_{ijkl}\right| ^{2}=1 \label{IniStatRepeated}
\end{equation}
where $i,j,k$ and $l$ are identifying symbols for four qubits. The upper and
lower states of a qubit are $1$ and $2$ respectively; and $c_{ijkl}$ are
complex numbers. It is a quantum state in $2\otimes2\otimes2\otimes
2$-dimensional Hilbert space. We suppose the qubits $i$ and $j$ are
manipulated by the players in the first stage of the game and, similarly, the
qubits $k$ and $l$ are manipulated in the second stage. Let $\rho_{in}$ denote
the initial state (\ref{IniStatRepeated}) in the density matrix formalism.
Assume during their moves in the first stage of the game, the players $A$ and
$B$ apply the identity operator $\hat{I}$ on the initial state with
probabilities $p$ and $q$, respectively. Also they apply the operator
$\hat{\sigma}_{x}$ with probabilities $(1-p)$ and $(1-q)$, respectively. The
players' actions in the first stage changes $\rho_{in}$ to
\begin{align}
\rho_{fin} & =pq\hat{I}_{A}\otimes\hat{I}_{B}\rho_{in}\hat{I}_{A}^{\dagger
}\otimes\hat{I}_{B}^{\dagger}+p(1-q)\hat{I}_{A}\otimes\hat{\sigma}_{xB}
\rho_{in}\hat{I}_{A}^{\dagger}\otimes\hat{\sigma}_{xB}^{\dagger}+\nonumber\\
& q(1-p)\hat{\sigma}_{xA}\otimes\hat{I}_{B}\rho_{in}\hat{\sigma}
_{xA}^{\dagger}\otimes\hat{I}_{B}^{\dagger}+(1-p)(1-q)\hat{\sigma}_{xA}
\otimes\hat{\sigma}_{xB}\rho_{in}\hat{\sigma}_{xA}^{\dagger}\otimes\hat
{\sigma}_{xB}^{\dagger}\nonumber\\
&
\end{align}
The players' actions in this stage are simultaneous and they remember their
moves (i.e. the numbers $p$ and $q$) also in the next stage. In the second
stage, players $A$ and $B$ apply the identity operator with probabilities
$p_{1}$ and $q_{1}$, respectively, on $\rho_{fin}$. The operator $\hat{\sigma
}_{x}$ is, then, applied with probabilities $(1-p_{1})$ and $(1-q_{1})$ on
$\rho_{fin}$, respectively. Fig. (\ref{Fig6}) shows the overall idea of
playing the two-stage game. Players' moves in the two stages of the game are
done on two different pairs of qubits.
\begin{figure}
\caption{Playing a two-stage quantum game of Prisoners' Dilemma.}
\label{Fig6}
\end{figure}
After the moves performed in the second stage the quantum state changes to
\begin{align}
\rho_{ffin} & =p_{1}q_{1}\hat{I}_{A}\otimes\hat{I}_{B}\rho_{fin}\hat{I}
_{A}^{\dagger}\otimes\hat{I}_{B}^{\dagger}+p_{1}(1-q_{1})\hat{I}_{A}
\otimes\hat{\sigma}_{xB}\rho_{fin}\hat{I}_{A}^{\dagger}\otimes\hat{\sigma
}_{xB}^{\dagger}+\nonumber\\
& q_{1}(1-p_{1})\hat{\sigma}_{xA}\otimes\hat{I}_{B}\rho_{fin}\hat{\sigma
}_{xA}^{\dagger}\otimes\hat{I}_{B}^{\dagger}+\nonumber\\
& (1-p_{1})(1-q_{1})\hat{\sigma}_{xA}\otimes\hat{\sigma}_{xB}\rho_{fin}
\hat{\sigma}_{xA}^{\dagger}\otimes\hat{\sigma}_{xB}^{\dagger}
\end{align}
which is ready for measurement, giving payoffs for the two stages of the game.
If classically the bi-matrix game (\ref{Matrix1Repeated}) is played at each
stage, the possession of the following four payoff operators by the referee
corresponds to a `quantum version' of the two-stage game:
\begin{align}
\left[ \left( P_{A}\right) _{oper}\right] _{1} & =\underset
{k,l=1,2}{\sum}\left\{ 3\left| 11kl\right\rangle \left\langle 11kl\right|
+5\left| 21kl\right\rangle \left\langle 21kl\right| +\left|
22kl\right\rangle \left\langle 22kl\right| \right\} \nonumber\\
\left[ \left( P_{A}\right) _{oper}\right] _{2} & =\underset
{i,j=1,2}{\sum}\left\{ 3\left| ij11\right\rangle \left\langle ij11\right|
+5\left| ij21\right\rangle \left\langle ij21\right| +\left|
ij22\right\rangle \left\langle ij22\right| \right\} \nonumber\\
\left[ \left( P_{B}\right) _{oper}\right] _{1} & =\underset
{k,l=1,2}{\sum}\left\{ 3\left| 11kl\right\rangle \left\langle 11kl\right|
+5\left| 12kl\right\rangle \left\langle 12kl\right| +\left|
22kl\right\rangle \left\langle 22kl\right| \right\} \nonumber\\
\left[ \left( P_{B}\right) _{oper}\right] _{2} & =\underset
{i,j=1,2}{\sum}\left\{ 3\left| ij11\right\rangle \left\langle ij11\right|
+5\left| ij12\right\rangle \left\langle ij12\right| +\left|
ij22\right\rangle \left\langle ij22\right| \right\} \nonumber\\
&
\end{align}
The corresponding payoffs are, then, obtained as mean values of these
operators. For example, Alice's payoff in stage $1$ is
\begin{equation}
\left[ P_{A1}\right] _{qu}=\text{Tr}\left\{ \left[ \left( P_{A}\right)
_{oper}\right] _{1}\rho_{ffin}\right\}
\end{equation}
We consider a two-stage quantum PD played with pure four-qubit initial state
in the form:
\begin{equation}
\left| \psi_{ini}\right\rangle =c_{1}\left| 1111\right\rangle +c_{2}\left|
1122\right\rangle +c_{3}\left| 2211\right\rangle +c_{4}\left|
2222\right\rangle
\end{equation}
with $\underset{t=1}{\overset{4}{\sum}}\left| c_{t}\right| ^{2}=1$. For this
state the payoffs to the players $A$ and $B$ in the two stages are found as
\begin{align}
\left[ P_{A1}\right] _{qu} & =(\left| c_{1}\right| ^{2}+\left|
c_{2}\right| ^{2})(-pq-p+4q+1)+\nonumber\\
& (\left| c_{3}\right| ^{2}+\left| c_{4}\right| ^{2}
)(-pq+2p-3q+3)\nonumber\\
\left[ P_{A2}\right] _{qu} & =(\left| c_{1}\right| ^{2}+\left|
c_{3}\right| ^{2})(-p_{1}q_{1}-p_{1}+4q_{1}+1)+\nonumber\\
& (\left| c_{2}\right| ^{2}+\left| c_{4}\right| ^{2})(-p_{1}q_{1}
+2p_{1}-3q_{1}+3)\nonumber\\
\left[ P_{B1}\right] _{qu} & =(\left| c_{1}\right| ^{2}+\left|
c_{2}\right| ^{2})(-pq-q+4p+1)+\nonumber\\
& (\left| c_{3}\right| ^{2}+\left| c_{4}\right| ^{2}
)(-pq+2q-3p+3)\nonumber\\
\left[ P_{B2}\right] _{qu} & =(\left| c_{1}\right| ^{2}+\left|
c_{3}\right| ^{2})(-p_{1}q_{1}-q_{1}+4p_{1}+1)+\nonumber\\
& (\left| c_{2}\right| ^{2}+\left| c_{4}\right| ^{2})(-p_{1}q_{1}
+2q_{1}-3p_{1}+3) \label{Payoffs12q}
\end{align}
The players' payoffs in the classical two-stage PD given by Eqs.
(\ref{PayoffsC1}, \ref{PayoffsC2}) can now be recovered from the Eq.
(\ref{Payoffs12q}) by taking $\left| c_{1}\right| ^{2}=1$. The classical
game is, therefore, a subset of its quantum version.
One now proceeds, in the spirit of backwards-induction, to find a NE in the
second stage of the quantum game. Suppose $(p_{1}^{\star},q_{1}^{\star})$ is a
NE in the second stage, then
\begin{equation}
\left[ P_{A2}(p_{1}^{\star},q_{1}^{\star})-P_{A2}(p_{1},q_{1}^{\star
})\right] _{qu}\geq0,\text{ \ \ \ \ \ \ }\left[ P_{B2}(p_{1}^{\star}
,q_{1}^{\star})-P_{B2}(p_{1}^{\star},q_{1})\right] _{qu}\geq0
\label{NashIneq}
\end{equation}
With the players' payoffs of the two stages given by Eq. (\ref{Payoffs12q}),
the Nash inequalities (\ref{NashIneq}) can be written as
\begin{align}
(p_{1}^{\star}-p_{1})\left\{ -q_{1}^{\star}+2(\left| c_{2}\right|
^{2}+\left| c_{4}\right| ^{2})-(\left| c_{1}\right| ^{2}+\left|
c_{3}\right| ^{2})\right\} & \geq0\nonumber\\
(q_{1}^{\star}-q_{1})\left\{ -p_{1}^{\star}+2(\left| c_{2}\right|
^{2}+\left| c_{4}\right| ^{2})-(\left| c_{1}\right| ^{2}+\left|
c_{3}\right| ^{2})\right\} & \geq0
\end{align}
and the strategy of defection by both the players, i.e. $p_{1}^{\star}
=q_{1}^{\star}=0,$ becomes a NE in the second stage of the quantum game, if
\begin{equation}
\left\{ 2(\left| c_{2}\right| ^{2}+\left| c_{4}\right| ^{2})-(\left|
c_{1}\right| ^{2}+\left| c_{3}\right| ^{2})\right\} \leq0 \label{Cond1}
\end{equation}
Similar to the classical analysis, players' payoffs can be found when both
decide to defect in the second stage:
\begin{equation}
\left[ P_{A2}(0,0)\right] _{qu}=\left[ P_{B2}(0,0)\right] _{qu}=3(\left|
c_{2}\right| ^{2}+\left| c_{4}\right| ^{2})+(\left| c_{1}\right|
^{2}+\left| c_{3}\right| ^{2}) \label{PDefQ}
\end{equation}
The classical payoffs, when both players defect, of the Eq. (\ref{PDefC}) can
be recovered from Eq. (\ref{PDefQ}) when $\left| c_{1}\right| ^{2}=1$, i.e.
for an unentangled initial state.
Similar to the classical case the players' first-stage interaction, in the
two-stage quantum PD, amounts to a one-shot game. In this one-shot game the
payoff $3(\left| c_{2}\right| ^{2}+\left| c_{4}\right| ^{2})+(\left|
c_{1}\right| ^{2}+\left| c_{3}\right| ^{2})$, from the second stage, is
added to their first-stage payoffs:
\begin{align}
\left[ P_{A(1+2)}\right] _{qu} & =\left[ P_{A1}+P_{A2}(0,0)\right]
_{qu}=\left| c_{1}\right| ^{2}(-pq+4q-p+2)+\nonumber\\
& \left| c_{2}\right| ^{2}(-pq+4q-p+4)+\left| c_{3}\right| ^{2}
(-pq-3q+2p+4)+\nonumber\\
& \left| c_{4}\right| ^{2}(-pq-3q+2p+6)\nonumber\\
\left[ P_{B(1+2)}\right] _{qu} & =\left[ P_{B1}+P_{B2}(0,0)\right]
_{qu}=\left| c_{1}\right| ^{2}(-pq+4p-q+2)+\nonumber\\
& \left| c_{2}\right| ^{2}(-pq+4p-q+4)+\left| c_{3}\right| ^{2}
(-pq-3p+2q+4)+\nonumber\\
& \left| c_{4}\right| ^{2}(-pq-3p+2q+6)
\end{align}
The strategy of cooperation (that is $p_{1}^{\star}=q_{1}^{\star}=1$) can now
be a NE for the first-stage interaction in this two-stage quantum game, if
\begin{equation}
\left\{ 2(\left| c_{1}\right| ^{2}+\left| c_{2}\right| ^{2})-(\left|
c_{3}\right| ^{2}+\left| c_{4}\right| ^{2})\right\} \leq0 \label{Cond2}
\end{equation}
The inequalities (\ref{Cond1}) and (\ref{Cond2}) are the conditions on the
initial state when the players cooperate in their first-stage interaction
while both defect in the next stage. These conditions can be rewritten as
\begin{equation}
\left| c_{1}\right| ^{2}+\left| c_{2}\right| ^{2}\leq\frac{1}{3}\text{,
\ \ \ \ \ \ \ }\left| c_{2}\right| ^{2}+\left| c_{4}\right| ^{2}\leq
\frac{1}{3} \label{Conds}
\end{equation}
For example, at $\left| c_{1}\right| ^{2}=\left| c_{2}\right| ^{2}=\left|
c_{4}\right| ^{2}=\frac{1}{6}$ and $\left| c_{3}\right| ^{2}=\frac{1}{2}$
these conditions hold. Because for the classical game the inequalities
(\ref{Conds}) together cannot hold, showing why classically it is not possible
that players cooperate in the first stage knowing that they both will defect
in the second.
\section{Discussion}
Essentially, the repeated games differ from one-shot games in that players'
current actions can depend on their past behavior. In a repeated bi-matrix
game the same matrix game is played repeatedly, over a number of stages that
represent the passing of time. The payoffs are accumulated over time.
Accumulation of information about the ``history'' of the game changes the
structure of the game with time. With each new stage the information at the
disposal of the players changes and, since strategies transform this
information into actions, the players' strategic choices are affected. If a
game is repeated twice, the players' moves at the second stage depend on the
outcome of the first stage. This situation becomes more and more complex as
the number of stages increases, since the players can base their decisions on
histories represented by sequences of actions and outcomes observed over
increasing number of stages.
Recent findings in quantum game theory motivate a study of repeated games in
the new quantum setting. It is because useful and extensive analysis of
repeated games is already found in literature of classical game theory. In
present chapter -- to look for a quantum role in repeated games -- we consider
a quantum form of a well-known bi-matrix game of PD.
Classical analysis of the PD has been developed in many different formats,
including its finitely and infinitely repeated versions. In the history of
quantum games the PD became a focus of early and important study \cite{Eisert}
telling how to play a quantum form of a bi-matrix game. To play a quantum form
of repeated PD we select Marinatto and Weber's scheme. In this scheme a
repeated game is played when the players select positive numbers in the range
$\left[ 0,1\right] $, giving the probabilities with which they apply the
identity operator $\hat{I}$ on a four-qubit pure initial quantum state. The
players' actions in each stage are performed on two different pairs of qubits.
The classical two-stage PD corresponds to an unentangled initial state, and
the classical SGPO consists of players defecting in both the stages. It is
shown that a SGPO where the players go for cooperation in a stage is a
non-classical feature that can be made to appear in quantum setting.
The argument presented here is based on the assumption that other games,
resulting from a play starting with a four-qubit quantum state of the form of
the Eq. (\ref{IniStatRepeated}), are `quantum forms' of the classical
two-stage game. This assumption originates from the fact that the classical
game corresponds to a particular four-qubit pure quantum state which is
unentangled. The assumption makes possible to translate the desired appearance
of cooperation in a stage to certain conditions on the parameters of the
initial state, thus giving a SGPO\ where players decide to cooperate in their
first-stage interaction while they both defect in the next stage.
One may ask about the compelling reason to choose a $2\otimes2\otimes
2\otimes2$ dimensional Hilbert space instead of a $2\otimes2$ dimensional one.
A $2\otimes2$ dimensional treatment of this problem, in the same quantization
scheme, involves denominator terms in the expressions for payoff operators,
when these are obtained under the condition that classical game corresponds to
an unentangled initial state. It then leads to many `if-then' conditions
before one gets finally the payoffs. On the contrary, a treatment in
$2\otimes2\otimes2\otimes2$ dimensions is much smoother. Also a study of the
concept of SGPO in a two-stage repeated quantum game, then, becomes a logical
extension of the backwards-induction procedure proposed in the last chapter.
\chapter{New proposals to play quantum games}
\section{\label{IntNewProp}Introduction}
Meyer \cite{MeyerDavid} and Du et al. \cite{Du1} have shown that entanglement
may not be essential for a quantum game. Eisert et al.'s quantum PD was the
first proposal where entanglement was used as a resource. There has been
noticeably greater attention paid in exploiting entanglement for quantum
games. It is not unusual and can be traced back to exciting and
counter-intuitive properties of this phenomenon, as well as to its recent
enthusiastic investigation in quantum information theory \cite{Nielsen}.
Local unitary manipulations of entangled qubits to play matrix games is indeed
an interesting concept that gives new dimensions to classical game theory. But
it does not forbid the use of other quantum mechanical effects to play other
`quantum forms' of matrix games -- games for which extensive analysis in the
classical domain already exists in literature \cite{Burger,Gibbons}. A look at
the Eisert et al.'s set-up \cite{Eisert,Eisert1} makes apparent some of its
similarities to well-known Young's double-slit apparatus \cite{Hecht}.
Simultaneous and local unitary manipulation of a maximally entangled two-qubit
quantum state, and its later measurement, is the essential feature of Eisert
et al.'s set-up. In Young's double-slit set-up, however, coherent light passes
through two slits to form a diffraction pattern on a screen facing the slits.
Similarity between these setups becomes noticeable if a comparison is made between:
\begin{itemize}
\item The properties of entanglement and coherence.
\item Players' moves in manipulations of qubits and the process of opening or
closing the slits.
\item Wavefunction-collapsing measurement and the appearance of the
diffraction pattern.
\end{itemize}
Such a comparison, in its turn, asks for a quantum feature that can be
exploited to give new dimension to a matrix game, when it is played using a
Young's double-slit like apparatus. In Eisert et al.'s set-up this quantum
feature is obviously the quantum phenomenon of entanglement. In Young's
apparatus this feature is the association of wave-like properties to material
objects like electrons, producing a diffraction pattern on a screen. Section
(\ref{diffraction}) exploits such association of waves as a resource to play
quantum versions of classical games.
Playing of a game requires resources for its physical implementation. For
example, to play a bi-matrix game the resources may consist of pairs of
two-valued `objects', like coins, distributed between the players. The players
perform their moves on the objects and later a referee decides payoffs after
observing the objects. Game theory usually links players' actions
\emph{directly }to their payoffs, without a reference to the nature of the
objects on which the players have made their moves. However, playing quantum
games \cite{Eisert} show that radically different `solutions' can emerge when
the \emph{same} game is physically implemented on distributed objects which
are quantum mechanically correlated.
In Section (\ref{EnkPikeComment}) Enk and Pike's argument \cite{EnkPike} is
briefly discussed. Essentially it says that the emergence of new equilibrium
in quantum Prisoners' Dilemma can also be understood as an equilibrium in a
modified form of the game. They constructed \emph{another} matrix game, in
which players have access to three pure classical strategies instead of the
usual two, commenting that it `captures' everything quantum Prisoners' Dilemma
has to offer. Constructing an extended matrix with an extra pure classical
move, in their view, is justified because \emph{also} in quantum Prisoners'
Dilemma players can play moves which are superpositions of the two classical moves.
Truly quantum pairs of objects possess non-local correlations. Though it is
impossible to have a local model of a quantum game set-up, producing
\emph{exactly} the same data, but how such unusual correlations may explicitly
affect solutions of a game when implemented with quantum objects. To how far
extent solutions of a quantum game themselves can be called `truly quantum' in
nature. Section ($10.3$) tries to address these questions.
\section{Quantum games with a diffraction set-up\label{diffraction}}
Historically speaking, the De Broglie's original idea \cite{Hecht,Debroglie}
-- that travelling material particles have waves associated with them -- was
undoubtedly the key concept behind the development of quantum physics in early
part of the twentieth century. Soon afterwards, Davisson and Germer
\cite{Hecht} verified the idea in their experimental demonstration of the
diffraction of electrons by crystals. De Broglie's proposed that a travelling
electron with momentum $p$ has an associated wave of wavelength $\lambda=h/p$,
where $h$ is the Plank's constant. To make $\lambda$ a measurable quantity,
under normal laboratory conditions, the momentum $p$ should have similar order
of magnitude as $h$. $h$ being a very small quantity shows why it is very hard
to detect waves associated with macroscopic objects. Our motivation is to take
this quantum feature -- associating wave-like properties to micro objects --
as a resource that can be used to play a quantum game. Such a quantum game can
be realized using an apparatus consisting of travelling electrons, multiple
slits intercepting them, and a resulting diffraction pattern. In this set-up a
player's choice of a `pure strategy' consists of opening or closing slits at
his/her disposal. Suppose the apparatus is adjusted such that when $\lambda$
approaches zero the classical game is reproduced. It can then be argued that
because an observation of a value of $\lambda$ quite away from zero is
entirely a quantum feature, therefore, the resulting different payoffs for the
players correspond to a quantum form of the classical game. In this setup the
players' payoffs are to be found from the diffraction pattern formed on the
screen. We show the possibility of finding a value for $\lambda$ that makes
appear a non-classical equilibrium in the PD when the players play only the
pure strategies. The classical game remains a subset of its quantum version
because with $\lambda$ approaching zero the classical game is reproduced.
The motivation to play a quantum form of PD, without using the phenomenon of
entanglement, also derives from Feynman's excellent exposition \cite{Feynman}
of quantum behavior of atomic objects. He describes and compares the
diffraction patterns in two similar set-ups that are imaginary but
experimentally realizable. The two set-ups consist of bullets and electrons
passing through a wall with two slits. Feynman then describes the well-known
quantum property -- associating waves to all material particles -- to
distinguish the diffraction patterns of bullets and electrons. The
disappearance of a pattern for the bullets, he explains, is due to tiny
wavelengths of the associated waves. For such waves the pattern becomes very
fine and, with a detector of finite size, one cannot distinguish the separate
maxima and minima. We ask why not to play a game, in the Feynman's imaginary
experimental set-up, such that the classical game corresponds when, in
Feynman's words, bullets are fired; and a quantum game corresponds when
electrons replace the bullets.
\subsection{Playing Prisoners' Dilemma}
We select the PD to be played in a diffraction set-up. The classical PD, in
its general form, is represented by the following matrix:
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\begin{array}
[c]{c}
C\\
D
\end{array}
\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{\overset{
\begin{array}
[c]{cc}
C & D
\end{array}
}{\left(
\begin{array}
[c]{cc}
(r,r) & (s,t)\\
(t,s) & (u,u)
\end{array}
\right) }} \label{DiffractionPD}
\end{equation}
where $t>r>u>s$. To make the classical game imbedded in its quantum version
the positive coefficients $u,r,s$ and $t$, appearing in the matrix
(\ref{DiffractionPD}), are translated into distances between the slits. Each
player is in control of two slits such that his/her strategy consists of
opening one of the slits and closing the other. For example, if Alice decides
to cooperate then she opens the slit $C$ and closes the slit $D$. Because Bob
has a similar choice, therefore, all possible moves by the players leads to
the opening of two slits and closure of the other two, with the separation
between the two open slits depending on the moves of the players. It happens
when only the so-called `pure-strategies' can be played by the players, which
in the present setup means to open a slit and close the other. Now, at the
final stage of the game, the action of the arbiter -- responsible for finding
the payoffs when the players have made their moves -- consists of measuring
the distance between two peaks of the diffraction pattern. This peak-to-peak
distance is known \cite{Hecht} to be $\lambda/d$, where $d$ is the separation
between the open sits and $\lambda$ is the wavelength associated with the
bombarded material objects, like electrons. Payoffs to the players are
functions of $\lambda/d$ and it, then, explains why it is useful to translate
the coefficients of the matrix of the classical game into the separations $d$
between the slits. When bullets are fired, which means the particles become
heavier and corresponding $\lambda$ is very nearly zero, the payoffs become
classical and depend only on $d$ i.e. the separation between the slits. A
payoff representation in terms of $\lambda/d$ contains both the classical and
quantum aspects of the matrix game played in this set-up. The experimental
set-up shown in the Fig. (\ref{Fig7}) sketches the diffraction set-up to play
a quantum game.
\begin{figure}
\caption{A multi-slit diffraction set-up to play a quantum form of Prisoners'
Dilemma. A window with four slits faces an electron source. Each player has
access to two slits. A player plays a pure strategy by opening a slit and
closing the other. Referee finds the players' payoffs by measuring the
peak-to-peak distance on the diffraction pattern formed on the screen.}
\label{Fig7}
\end{figure}
For PD the payoffs are symmetric for the players and a single equation can
describe the payoffs to both the players when their strategies are known. A
usual way to express it is to write $P(s_{1},s_{2})$ for the payoff to the
$s_{1}$-player against the $s_{2}$-player. Such a single equation
representation is usually used in evolutionary games \cite{Weibull} consisting
of symmetric bi-matrix conflicts. The $s_{1}$-player is referred to as the
`focal' player and the $s_{2}$-player as just the `other' player. The PD is
one such example for which a single payoff equation can capture the essence of
the idea of a symmetric NE. A strategy $s^{\star}$ is a symmetric NE if
\begin{equation}
P(s^{\star},s^{\star})-P(s,s^{\star})\geq0,\text{ \ \ \ for all }s
\label{NEDiffraction}
\end{equation}
saying that the focal player cannot be better off by diverging away from
$s^{\star}$. Because the set-up of the Fig. (\ref{Fig7}) involves coefficients
in the classical payoff matrix corresponding to the first player, therefore,
finding a symmetric NE with Eq. (\ref{NEDiffraction}) becomes immediately
possible when the first player is taken as focal. It also shows why writing
payoff as $P(s_{1},s_{2})$ is relevant to the set-up of the Fig. (\ref{Fig7}).
For example, classically the strategy of defection $D$ is a symmetric NE
because $P(D,D)-P(C,D)=(u-s)>0$, where the players' moves consist of only the
pure strategies.
In the set-up of Fig. (\ref{Fig7}) for every pure strategy move the players
have option to play, a unique separation $d$ between the slits is obtained
that can have four possible values i.e. $u,r,s$ or $t$. Classically
$P(C,C)=r$, $P(C,D)=s$, $P(D,C)=t$ and $P(D,D)=u$. It is observed in the Fig.
(\ref{Fig7}) that the classical payoff to the focal player, against the other,
can be equated to the separation between the two open slits $d$.
Now assume that instead of simply $P(s_{1},s_{2})=d$ the arbiter uses the
payoff equation:
\begin{equation}
P(s_{1},s_{2})=d+k(\lambda/d)
\end{equation}
where $k$ is a positive constant that can be called a \textit{scaling factor}.
$P(s_{1},s_{2})$, obviously, reduces to its classical counterpart when
$\lambda$ is very nearly zero. Suppose the strategy of cooperation $C$ is a
symmetric NE:
\begin{equation}
P(C,C)-P(D,C)=\left\{ k\lambda(1/r-1/t)-(t-r)\right\} \geq0
\end{equation}
It requires $\lambda\geq rt/k$. For electrons of mass $m$ travelling with
velocity $v$ it gives $v\leq(kh/mrt).$ Supposing $r$ and $t$ are both
non-zero, the arbiter's problem consists of finding an appropriate value for
the scaling factor $k$ that brings $v$ into a reasonable range from
experimental point of view. When the electrons have associated wavelength
$\lambda\geq rt/k$ the strategy of cooperation becomes a symmetric NE, and
each player gets a payoff $r+k\lambda/r$. Similarly when the pure strategy of
defection is a symmetric NE in the quantum game, we have
\begin{equation}
P(D,D)-P(C,D)=\left\{ -k\lambda(1/s-1/u)+(u-s)\right\} \geq0
\end{equation}
It requires $\lambda\leq su/k$. After the scaling factor $k$ is decided the
wavelength $\lambda$ determines which pure strategy should be a symmetric NE.
Two ranges for $\lambda$ can be indicated as $\lambda\leq su/k$ and
$\lambda\geq rt/k$. Defection and cooperation are symmetric NE for these
ranges, respectively. Because the constants $t,r,u,s$ and $k$ are all positive
the classical game is in the first range of $\lambda$. Non-classical
equilibrium of cooperation shows itself in the second range of $\lambda$.
Du et al.'s recent analysis \cite{Du} of the quantum PD, with players' access
to Eisert's two-parameter set of unitary operators, has shown an intriguing
structure in the game as a function of the amount of entanglement. The game
becomes classical when the entanglement vanishes.
In the set-up of Fig. (\ref{Fig7}) the quantity $\lambda$ behaves in a way
similar to the amount of entanglement in Du et al.'s analysis \cite{Du,Du2}.
But this set-up is devoid of the notion of entanglement and relies instead on
a different quantum aspect. An aspect which is as much `quantum' in nature as
the phenomenon of entanglement for qubit systems.
There is however a difference, to be noticed, between the set-ups of Eisert et
al. and that of the Fig. (\ref{Fig7}). Players' actions in Eisert et al.'s
set-up are quantum mechanical in nature in that they make moves with quantum
operators. In the present set-up, on the contrary, the players' actions are
entirely classical consisting of opening or closing slits. In a sense it is
similar to the players' actions in Marinatto and Weber's scheme. In their
scheme players possess quantum operators but they apply them on an initial
quantum state with classical probabilities; so that the players' moves can be
considered classical as well. It can be said that, apart from the work of
Eisert et al., the set up of Fig. (\ref{Fig7}) is also motivated, to an almost
equal extent, by the Marinatto and Weber's idea of playing a quantum version
of a matrix game.
\section{\label{HullSect}Performing EPR type experiments to play a bi-matrix
game\label{EPR type expts}}
To address the question raised in the Section (\ref{IntNewProp}), i.e. to how
much extent a quantum game can be called `truly quantum', following two
\emph{constraints} are suggested \cite{iqbalweigert} which a quantization
scheme should follow:
(C1). In both classical and quantum version of the game the \emph{same} set of
moves should be made available to the players.
(C2). The players agree together on explicit expressions for their payoffs
which \emph{must} not be modified when introducing the quantized version of
the game.
With these constraints one can hope that only the nature of correlations,
existing between the objects the players receive, will decide whether the
resulting game is classical or quantum.
Consider a symmetric bi-matrix game between two players Alice and Bob with the
matrix representation:
\begin{equation}
\begin{array}
[c]{c}
\text{Alice}
\end{array}
\overset{
\begin{array}
[c]{c}
\text{Bob}
\end{array}
}{
\begin{array}
[c]{c}
S_{1}\\
S_{2}
\end{array}
\overset{
\begin{array}
[c]{cc}
S_{1} & S_{2}
\end{array}
}{
\begin{array}
[c]{cc}
(r,r) & (s,t)\\
(t,s) & (u,u)
\end{array}
}} \label{CorrelationMatrix}
\end{equation}
For which the mixed strategy payoffs for the players can be written as
\begin{align}
P_{A}(p,q) & =Kpq+Lp+Mq+N\nonumber\\
P_{B}(p,q) & =Kpq+Mp+Lq+N \label{CorrelClassPayoffs}
\end{align}
where the constants $K,L,M$ and $N$ can be found in terms of $r,s,t$ and $u$,
the coefficients of the bi-matrix (\ref{CorrelationMatrix}). The NE defining
conditions are
\begin{align}
P_{A}(p^{\ast},q^{\ast})-P_{A}(p,q^{\ast}) & \geq0\nonumber\\
P_{B}(p^{\ast},q^{\ast})-P_{B}(p^{\ast},q) & \geq0 \label{CorrelClassNE}
\end{align}
For example, for PD we may have $r=3,s=0,t=5$ and $u=1$ that reduce the
inequalities (\ref{CorrelClassNE}) to
\begin{align}
(p^{\ast}-p)(1+q^{\ast}) & \leq0\nonumber\\
(q^{\ast}-q)(1+p^{\ast}) & \leq0 \label{CorrelPDNE}
\end{align}
It produces $p^{\ast}=q^{\ast}=0$ or $(D,D)$ as the unique equilibrium.
\subsection{Quantum correlation games}
The idea of a `correlation game' in the Ref. \cite{iqbalweigert} was
introduced to put forward a scheme to play a quantum version of a bi-matrix
game that respects the constraints C1 and C2 of the Section (\ref{EPR type
expts}). Its motivation comes from EPR type experiments performed on singlet
states involving \emph{correlations} of the measurement outcomes. In such
experiments the Bell's inequalities \cite{AsherPeres} are well-known to be the
constraints -- derived under the \emph{principle of local causes} -- on
correlations of measurement outcomes of two-valued (dichotomic) variables.
Truly quantum correlations are non-local in character and violate the inequalities.
The two parties involved in the usual \ EPR type setting are recognized as the
players. Repeated measurements are performed on correlated pairs of objects by
the two players, each receiving one half.
Players Alice and Bob share a Cartesian coordinate system between them and
each player's move consists of deciding a direction in a given plane. For
Alice and Bob these are the $x$-$z$ and $y$-$z$ planes respectively.\ Call
$\alpha$ and $\beta$ the unit vectors representing the players' moves. Both
players have a choice between two different orientations i.e. $\alpha$ and $z$
for Alice and $\beta$ and $z$ for Bob. Each player measures the angular
momentum or spin of his/her respective half in one of two directions. Let the
vectors $\alpha$ and $\beta$ make angles $\theta_{A}$ and $\theta_{B}$,
respectively, with the $z$-axis.
To link the players' moves, represented now by angles $\theta_{A}$ and
$\theta_{B}$, to the usual probabilities $p$ and $q$ appearing in a bi-matrix
game, an invertible function $g$ is made public at the start of a game. The
$g$-function maps $[0,\pi]$ to $[0,1]$ and allows to translate the players'
moves to the probabilities $p$ and $q$.
The results of measurements performed on dichotomic variables may take only
the values $\pm1$. These are represented by $a,b$ and $c$ for the directions
$\alpha,\beta$ and the $z$-axis respectively. Correlations $\langle
ac\rangle,\langle cb\rangle$ and $\langle ab\rangle$ can then be found from
the measurement outcomes, where the two entries in a bracket represent the
players' chosen directions.
In a correlation experiment in which the $z$-axis is the common direction for
the players, the Bell's inequality\footnote{For perfectly anticorrelated pairs
the right hand side of the inequality is $1+\left\langle bc\right\rangle $.}
is written \cite{AsherPeres} as
\begin{equation}
\left| \left\langle ab\right\rangle -\left\langle ac\right\rangle \right|
\leq1-\left\langle bc\right\rangle \label{BellsInequality}
\end{equation}
The classical correlations corresponding to the above situation, when written
in terms of $\theta_{A}$ and $\theta_{B}$, are known \cite{AsherPeres} to be
invertible. This invertibility allows to express $\theta_{A}$ and $\theta_{B}$
in terms of the correlations $\langle ac\rangle$ and $\langle cb\rangle$. The
$g$-function allows now to translate $\theta_{A}$ and $\theta_{B}$ to $p$ and
$q$, respectively. In effect the classical bi-matrix payoffs are
\emph{re-expressed} in terms of the classical correlations $\langle ac\rangle$
and $\langle cb\rangle$.
One can now claim that the classical game is given \emph{by definition} in
terms of the correlations. The point for such a re-expression of the classical
game is that it opens the way to `quantum' version of the game. It, of course,
happens when the correlations become quantum mechanical.
In the setting of a correlation game the players' payoffs involve only the
correlations $\langle ac\rangle$ and $\langle cb\rangle$, instead of the three
correlations $\langle ac\rangle,\langle cb\rangle$ and $\langle ab\rangle$
present in the inequality (\ref{BellsInequality}), when $z$-axis is the common
direction between the players. This aspect results in \cite{iqbalweigert}
obtaining `quantum' payoffs even when the correlations are local and satisfy
the inequality (\ref{BellsInequality}).
The motivation for introducing EPR type setting to bi-matrix games is to
exploit quantum correlations to generate quantum payoffs. So that, when the
correlations are local, the classical game \emph{must} be produced. We show
below the possibility of such a connection by a different setting in which the
classical payoffs are \emph{always} obtained whenever the correlations
$\langle ac\rangle,\langle cb\rangle$ and $\langle ab\rangle$ satisfy the
Bell's inequality (\ref{BellsInequality}).
\subsection{A new approach towards defining a correlation game}
Consider an EPR type set-up to play a game between two players. Following
rules apply:
\begin{enumerate}
\item A player's move consists of defining a direction in space by
orientating a unit vector. However, this direction is not confined to only the
$x$-$z$ or $y$-$z$ planes. A player's choice of a direction can be
\emph{anywhere} in three-dimensional space. Therefore, Alice's move is to
define a unit vector $\alpha$ and, similarly, Bob's move is to define a unit
vector $\beta$.
\item The $z$-axis is shared between the players as the common direction.
\item On receiving a half of a correlated pair, a player measures its spin in
one of the two directions. For Alice these directions are $\alpha$ and $z$ and
for Bob these directions are $\beta$ and $z$.
\item Each player measures spin with \emph{equal} probability in his/her two directions.
\item Players agree together on \emph{explicit expressions} giving their
payoffs $P_{A}$ and $P_{B}$ in terms of all three correlations i.e.
\end{enumerate}
\begin{align}
P_{A} & =P_{A}(\langle ac\rangle,\langle cb\rangle,\langle ab\rangle
)\nonumber\\
P_{B} & =P_{B}(\langle ac\rangle,\langle cb\rangle,\langle ab\rangle)
\label{CorrelPayoffs}
\end{align}
A game defined by these rules eliminates the need for introducing the
$g$-functions. The rules are also consistent with the constraints C1 and C2
and the idea of a correlation game essentially retains its spirit.
\subsection{Defining correlation payoffs}
A possible way is shown now to define the correlation payoffs
(\ref{CorrelPayoffs}) which reduce to the classical payoffs
(\ref{CorrelClassPayoffs}) whenever the correlations $\langle ab\rangle
,\langle ac\rangle$ and $\langle bc\rangle$ satisfy the inequality
(\ref{BellsInequality}).
Consider two quantities $\varepsilon$ and $\sigma$ defined as:
\begin{equation}
\varepsilon=\sqrt{3+\langle bc\rangle^{2}+2\langle ab\rangle\langle ac\rangle
},\text{ \ \ \ }\sigma=\sqrt{2(1+\langle bc\rangle)+\langle ab\rangle
^{2}+\langle ac\rangle^{2}} \label{A&B}
\end{equation}
The quantities $\varepsilon$ and $\sigma$ can adapt only real values because
the correlations $\langle ac\rangle,\langle cb\rangle$ and $\langle ab\rangle$
are always in the interval $\left[ -1,1\right] $. Consider now the
quantities $(\varepsilon-\sigma)$ and $(\varepsilon+\sigma)$. By definition
$\varepsilon$ and $\sigma$ are non-negative, therefore, the quantity
$(\varepsilon+\sigma)$ always remains non-negative. It is observed that if
$0\leq$ $(\varepsilon-\sigma)$ then the correlations $\langle ac\rangle
,\langle cb\rangle$ and $\langle ab\rangle$ \emph{always} satisfy the
inequality (\ref{BellsInequality}). It is because if $0\leq(\varepsilon
-\sigma)$ then $0\leq(\varepsilon+\sigma)(\varepsilon-\sigma)=\varepsilon
^{2}-\sigma^{2}$. But $\varepsilon^{2}-\sigma^{2}=(1-\langle bc\rangle
)^{2}-\left| \langle ab\rangle-\langle ac\rangle\right| ^{2}$ so that
$\left| \langle ab\rangle-\langle ac\rangle\right| ^{2}\leq(1-\langle
bc\rangle)^{2}$ which results in the inequality (\ref{BellsInequality}). All
the steps in the proof can be reversed and it follows that whenever the
correlations $\langle ac\rangle,\langle cb\rangle$ and $\langle ab\rangle$
satisfy the Bell's inequality, the quantity $(\varepsilon-\sigma)$ remains non-negative
For a singlet state satisfying the inequality (\ref{BellsInequality}) both the
quantities $(\varepsilon+\sigma)$ and $(\varepsilon-\sigma)$ are non-negative
and must have maxima. Hence, it is possible to find two non-negative numbers
$\frac{(\varepsilon-\sigma)}{\max(\varepsilon-\sigma)}$ and $\frac
{(\varepsilon+\sigma)}{\max(\varepsilon+\sigma)}$ in the range $\left[
0,1\right] $, whenever the inequality (\ref{BellsInequality}) holds. Because
$0\leq\varepsilon,\sigma\leq\sqrt{6}$ we have $\max(\varepsilon-\sigma
)=\sqrt{6}$ and $\max(\varepsilon+\sigma)=2\sqrt{6}$. The numbers
$(\varepsilon-\sigma)/\sqrt{6}$ and $(\varepsilon+\sigma)/2\sqrt{6}$ are in
the range $[0,1]$ when the inequality holds. These numbers are also
\emph{independent} from each other.
The above argument paves the way to associate a pair $(p,q)$ of independent
numbers to the players' moves $(\alpha,\beta)$, that is
\begin{equation}
p=p(\alpha,\beta)\text{, \ \ \ \ \ \ }q=q(\alpha,\beta) \label{ProbDirLink}
\end{equation}
where the numbers $p,q$ are in the interval $[0,1]$ when the input states do
not violate the inequality (\ref{BellsInequality}) for all direction pairs
($\alpha,\beta$). The pair $(p,q)$ is related to the two directions as
\begin{equation}
\alpha=\alpha(p,q)\text{, \ \ \ \ \ \ }\beta=\beta(p,q) \label{ProbDirLink1}
\end{equation}
It can be noticed that more than one pair $(\alpha,\beta)$ of directions may,
however, correspond to a given pair of numbers. The converse, although, is not
true for known input states. That is, for known input states, only one pair
$(p,q)$ can be obtained from a given pair $(\alpha,\beta)$ of directions.
Players' payoffs can now be re-expressed by making the replacements:
\begin{equation}
p(\alpha,\beta)\sim(\varepsilon-\sigma)/\sqrt{6},\text{ \ \ }q(\alpha
,\beta)\sim(\varepsilon+\sigma)/2\sqrt{6} \label{Replacements}
\end{equation}
which lead to re-writing the classical payoffs (\ref{CorrelClassPayoffs}) as
\begin{align}
P_{A}(\alpha,\beta) & =Kp(\alpha,\beta)q(\alpha,\beta)+Lp(\alpha
,\beta)+Mq(\alpha,\beta)+N\nonumber\\
P_{B}(\alpha,\beta) & =Kp(\alpha,\beta)q(\alpha,\beta)+Mp(\alpha
,\beta)+Lq(\alpha,\beta)+N
\end{align}
or more explicitly:
\begin{align}
P_{A}(\alpha,\beta) & =\frac{K}{12}(\varepsilon^{2}-\sigma^{2})+\frac
{L}{\sqrt{6}}(\varepsilon-\sigma)+\frac{M}{2\sqrt{6}}(\varepsilon
+\sigma)+N\nonumber\\
P_{B}(\alpha,\beta) & =\frac{K}{12}(\varepsilon^{2}-\sigma^{2})+\frac
{M}{\sqrt{6}}(\varepsilon-\sigma)+\frac{L}{2\sqrt{6}}(\varepsilon+\sigma)+N
\label{CorrelQpayoffs}
\end{align}
This expression shows that a player's payoff now depends on the direction s/he
has chosen. The payoffs (\ref{CorrelQpayoffs}) are obtained under the
constraints C1 and C2 and are functions of all the three correlations.
The relations (\ref{ProbDirLink}) can also imagined as follows. When Alice
decides a direction $\alpha$ in space, it corresponds to a curve in the
$p$-$q$ plane. Similarly, Bob's decision of the direction $\beta$ defines
another curve in the $p$-$q$ plane. The relations (\ref{Replacements}) assure
that only one pair $(p,q)$ can then be obtained as the intersection between
the two curves.
The set-up assures that for input product states all of the players' moves
$(\alpha,\beta)$ result in the correlation payoffs (\ref{CorrelQpayoffs})
generating identical to the classical payoffs (\ref{CorrelClassPayoffs}). For
such input states the relations (\ref{Replacements}) give the numbers $p,q$ in
the interval $[0,1]$, which can then be interpreted as probabilities. However,
for input states for which the inequality (\ref{BellsInequality}) is violated
in some directions, a pair $(p,q)$ $\in\lbrack0,1]$ \emph{cannot} be
associated to those directions. It is because for entangled states there exist
pairs of directions for which the corresponding quantity $(\varepsilon
-\sigma)$ becomes negative. For those directions the correlation payoffs
(\ref{CorrelQpayoffs}) would generate results that can \emph{only} be
understood, within the structure of classical payoffs
(\ref{CorrelClassPayoffs}), by invoking negative probabilities.
\subsection{Nash equilibria of quantum correlation games}
Because the players' moves consist of defining directions in space, the Nash
inequalities are
\begin{align}
P_{A}(\alpha_{0},\beta_{0})-P_{A}(\alpha,\beta_{0}) & \geq0\nonumber\\
P_{B}(\alpha_{0},\beta_{0})-P_{B}(\alpha_{0},\beta) & \geq0
\label{NEdirections}
\end{align}
where the pair $(\alpha_{0},\beta_{0})$ corresponds to the pair $(p^{\ast
},q^{\ast})$ via the relations (\ref{Replacements}). The inequalities
(\ref{NEdirections}) are same as the inequalities (\ref{CorrelClassNE}),
except their re-expression in terms of the directions.
When the correlations in the input state correspond to an entangled state, the
payoff relations (\ref{CorrelQpayoffs}) would lead to disappearance of the
classical equilibria. It can be seen, for example, by considering the Nash
inequalities for the Prisoners' Dilemma (\ref{CorrelPDNE}). Let the
directional pair $(\alpha_{D},\beta_{D})$ correspond to the equilibrium
$(D,D)$, that is, the inequalities (\ref{NEdirections}) are
\begin{align}
P_{A}(\alpha_{D},\beta_{D})-P_{A}(\alpha,\beta_{D}) & \geq0\nonumber\\
P_{B}(\alpha_{D},\beta_{D})-P_{B}(\alpha_{D},\beta) & \geq0 \label{NEDD}
\end{align}
Assume the players receive input states that are entangled. There will now
exist pairs of players' moves $\alpha$ and $\beta$ that would make the
quantity $(\varepsilon-\sigma)<0$. The pair $(\alpha,\beta)$ will not
correspond to a point in the $p$-$q$ plane where $p,q\in\lbrack0,1]$.
It can also be noticed that for entangled input states the directional pair
$(\alpha_{D},\beta_{D})$ does not remain a NE. It is because the
pair\textit{\ }$(\alpha_{D},\beta_{D})$\textit{\ }is a NE \emph{only} if
players' choices of \emph{any} directional pair\textit{\ }$(\alpha,\beta
)$\textit{\ }corresponds to a point in the\textit{\ }$p$\textit{-}
$q$\textit{\ }plane where\textit{\ }$p,q\in\lbrack0,1]$.
Because for entangled input states there exist pairs of players' moves
$(\alpha,\beta)$ that do not correspond to points in the $p$-$q$ plane with
$p,q\in\lbrack0,1]$. Hence, the directional pair $(\alpha_{D},\beta_{D})$ does
not remain a NE in the quantum game. Interestingly, the disappearance of the
classical equilibrium now becomes directly \textit{linked} with the violation
of the inequality (\ref{BellsInequality}) by the correlations in the input states.
\subsection{Quantum game as another classical game?}
Coming back to the questions raised in the Section (\ref{IntNewProp}), we now
try to construct a classical bi-matrix game corresponding to a quantum game
resulting from the payoff relations (\ref{CorrelQpayoffs}). The classical game
is assumed to have the \emph{same} general structure of players' payoffs as
given in Eqs. (\ref{CorrelClassPayoffs}). This assumption derives from the
hope that the quantum game, corresponding to correlations in the input states
that violate the inequality (\ref{BellsInequality}), is also equivalent to
another symmetric bi-matrix game. It is shown below that such a construction
cannot be permitted.
Suppose the correlations in the input states violate the inequality
(\ref{BellsInequality}). A pair of directions ($\alpha,\beta$) can now be
found for which the Bell's inequality is violated. For Alice's move to select
the direction $\alpha$ her payoff, given by the Eqs. (\ref{CorrelQpayoffs}), is
\begin{equation}
P_{A}(\alpha,\beta)=K^{\prime}pq+L^{\prime}p+Mq+N \label{Alice'sNewPayoff}
\end{equation}
where $K^{\prime}=-K$ and $L^{\prime}=-L$ and $p,q\in\lbrack0,1]$. Assuming
that the constants $K^{\prime},L^{\prime},M,$ and $N$ define a `new' symmetric
bi-matrix game the Bob's payoff is
\begin{equation}
P_{B}(p,q)=K^{\prime}pq+Mp+L^{\prime}q+N \label{Bob'sNewPayoff1}
\end{equation}
But in fact (\ref{Bob'sNewPayoff1}) is not obtained as the Bob's payoff in our
quantum game when he goes for the direction $\beta$. Bob's payoff is, in fact,
given as
\begin{equation}
P_{B}(p,q)=K^{\prime}pq+M^{\prime}p+Lq+N \label{Bob'sNewPayoff2}
\end{equation}
which may not necessarily coincide with the payoff given in the Eq.
(\ref{Bob'sNewPayoff1}). Hence, the game resulting from the presence of
quantum correlations in the input states \emph{cannot} be simply explained as
another classical symmetric bi-matrix game: a game obtained by defining new
coefficients of the matrix involved. Players' payoffs in the quantum game are
found to reside outside the structure of payoffs in a classical symmetric
bi-matrix game. The payoffs can be explained within this structure \emph{only}
by invoking negative probabilities.
An asymmetric bi-matrix game can, of course, be constructed having identical
solutions to the quantum game. In fact for \emph{any} quantum game a classical
model can \emph{always} be constructed that summarizes the complete situation
and has identical to the quantum solutions, as far as the players' payoffs are
concerned. It would be a model that relates players' moves to their payoffs in
accordance with the usual approach in game theory. But constructing such a
model is not an answer to our original question: How solutions of a game are
affected by the presence of quantum correlations between the physical objects
used to implement the game? It is because the question can then simply be
rephrased as: What if the modified classical game is played with physical
objects having quantum correlations?
\subsection{Discussion}
The idea of a correlation game is about re-expression of payoffs of a
classical bi-matrix game in terms of correlations of measurement outcomes made
on pairs of disintegrating particles. The measurement outcomes are dichotomic
variables and their correlations are obtained by averaging over a large number
of pairs. Bell's inequalities represent constraints on these correlations
obtained under the principle of local causes. A re-expression of the classical
payoffs of a bi-matrix game in terms of correlations opens the way to
explicitly see the effects of quantum correlations on the solutions of the game.
We have proposed a new setting where two players play a bi-matrix game by
repeatedly performing measurements on correlated pairs of objects. The setting
is motivated by EPR type experiments performed on singlet states. On receiving
a half of a pair, a player makes a measurement of its spin in one of the two
directions available to him/her. The measurements are performed with
\emph{equal probability} in the two directions. Both players share a common
direction and defining the \emph{other} direction is a player's \emph{move}.
We show how within this set-up a correlation version of a symmetric bi-matrix
game can be defined. The correlation game shows some interesting properties.
For example the correlation game reduces to the corresponding classical game
when the correlations in the input states are local and do not violate the
Bell's inequality (\ref{BellsInequality}). However, when the inequality is
violated, the stronger correlations generate results that can be understood,
within the structure of classical payoffs in a symmetric bi-matrix game,
\emph{only} by invoking negative probabilities. It is shown that a classical
NE is affected when the game is played with input states having quantum
correlations. The proposed set-up also provides a new perspective on the
possibility of reformulating the Bell's inequalities in terms of a bi-matrix
game played between two spatially-separated players.
\chapter{Conclusions}
To conclude a summary of the results developed in this thesis is presented
below. The results 1 -- 8 refer to Eisert et al.'s and Marinatto and Weber's
schemes of quantization of matrix games.
\begin{enumerate}
\item In a population engaged in symmetric bi-matrix classical game of
Prisoners' Dilemma an invasion of classical ESS is possible by the mutants
exploiting Eisert's two-parameter set of quantum strategies. We presented an
example of an \emph{asymmetric} quantum game between two players in which a
strategy pair can be an ESS for either classical or quantum version of the
game, even when it remains a NE in both the versions. It shows quantization
can change evolutionary stability of Nash equilibria in certain asymmetric
bi-matrix games.
\item ESS concept was originally defined for symmetric bi-matrix contests. We
showed that quantization can also change evolutionary stability of a NE in
certain types of \emph{symmetric} bi-matrix games. It immediately makes study
of quantum games also relevant to evolutionary game theory and conversely.
Hence, quantization not only leads to new equilibria but it also presents
itself also as \emph{another} refinement notion of the NE concept.
\item Like pure strategies the evolutionary stability of \emph{mixed}
strategies can also change as a symmetric bi-matrix game is switched from its
classical to quantum form. However, for mixed strategies we require more
general initial quantum states.
\item Rock-Scissors-Paper (RSP) is a two-player three-strategy game. We
consider a slightly modified form of RSP played in its classical version. A
mixed NE exists that is not an ESS. We find a quantum form of the \emph{same}
game in which the classical NE becomes an ESS. The quantum form is obtained
when the game is played with an initial \emph{entangled} state.
\item Quantization can change properties of equilibria of replicator
dynamics. We consider a game played in a population setting with the
underlying process of replicator dynamics. We found a `quantum form' of the
replicator equations, which retain their form as that of Lotka-Volterra type.
The effects of quantization of the game on a saddle or a center of the
dynamics are then studied. It is found that a saddle (center) in the classical
game can be a center (saddle) in certain quantum form of the game. A saddle or
center in a classical (quantum) game can not be, however, an attractor or a
repeller in quantum (classical) form of the game.
\item A symmetric cooperative game played by three players is analyzed in its
classical and quantum forms. In classical form of this game forming a
coalition gives advantage to players and players are motivated to do so. In
quantum form of the game, however, an initial quantum state can be prepared by
the arbiter such that forming the \emph{same} coalition is of no advantage.
\item A comparison between the NE in Cournot game with the
backwards-induction outcome in classical Stackelberg duopoly shows that having
Alice (who acts first) know that Bob (who acts second) knows Alice's move
hurts Bob. In fact in classical version of the Stackelberg game Bob should not
believe that Alice has chosen its Stackelberg quantity. We have shown that
there can be a quantum version of Stackelberg duopoly where Bob is \emph{not}
hurt even if he knows the quantity chosen by Alice. The backwards-induction
outcome of this quantum game is \emph{same} as the NE in classical Cournot
game, where decisions are made simultaneously and there is no such information
that hurts a player.
\item In infinitely repeated versions of the classical game of Prisoners'
Dilemma it is established \cite{Gibbons} that cooperation can occur in every
stage of a subgame-perfect outcome (SGPO), even though the only NE in the
stage game is defection. We find how cooperation in two-stage Prisoners'
Dilemma can be achieved by quantum means. In two-stage Prisoners' Dilemma
getting a SGPO where players cooperate in the first stage is a result with no
classical analogue. We have also introduced a possible way to study the
concept of SGPO in repeated quantum games.
\item In the standard set-ups to play a quantum game a measure of
entanglement for a qubit system is introduced. Quantum version of the game
reduces to classical when the measure becomes zero. We suggested a set-up that
exploits another resource from quantum physics, i.e. the association of waves
with travelling material objects like electrons. We show how in this set-up
such association of waves can lead to a non-classical equilibrium in the
Prisoners' Dilemma. With associating wavelength approaching zero the quantum
aspect disappears and the classical game is reproduced.
\item Playing a symmetric bi-matrix game is usually physical implemented by
sharing pairs of `objects' between two players. We proposed a new setting that
explicitly shows the effects of quantum correlations between the pairs on the
structure of payoff relations and `solutions' of the game. The setting is
based on a re-expression of the game such that the players play the classical
game \textit{only }if\textit{\ }their moves are performed on pairs of objects
having correlations that satisfy the Bell's inequalities. On players receiving
pairs with quantum correlations the resulting game \textit{cannot} be
considered \textit{another} classical symmetric bi-matrix game. Also the Nash
equilibria of the game are found to be decided by the nature of the correlations.
\end{enumerate}
\end{document}
|
\begin{document}
\begin{CJK}{UTF8}{gkai}
\title[Asymptotic stability]{Asymptotic stability for a free boundary tumor model with a periodic supply of external nutrients}
\alphauthor[Huang]{Yaodan Huang}
\alphaddress[1]{School of Mathematics, Sun Yat-Sen University, Guangzhou, 510275, China}
\email{[email protected]}
\thanks{Corresponding author: Yaodan Huang}
\thanks{Keywords: Free boundary problems; Tumor growth; Periodic solution; Nonlinear stability}
\thanks{2020 Mathematics Subject Classification: 35B10, 35B35, 35R35, 92C37}
\maketitle
\begin{abstract}
For tumor growth, the morphological instability provides a mechanism for invasion via tumor fingering and fragmentation. This work considers the asymptotic stability of a free boundary tumor model with a periodic supply of external nutrients. The model consists of two elliptic equations describing the concentration of nutrients and the distribution of the internal pressure in the tumor tissue, respectively. The effect of the parameter $\mu$ representing a measure of mitosis on the morphological stability are taken into account. It was recently established in \cite{HZH3} that there exists a critical value $\mu_\alphast$ such that the unique spherical periodic positive solution is linearly stable for $0<\mu<\mu_\alphast$ and linearly unstable for $\mu>\mu_\alphast$. In this paper, we further prove that the spherical periodic positive solution is asymptotically
stable for $0<\mu<\mu_\alphast$ for the fully nonlinear problem.
\end{abstract}
\section{Introduction}
Within the past several decades a number of mathematical models have been developed and studied that aimed at describing the evolution to carcinomas in the form of free boundary problems. Choices have to be made for a variety of different models to explore the mechanisms of tumor growth, such as several types of cells (see, for instance, \cite{WK2,TP}), the impact of inhibitors (cf. \cite{BC1}). Accordingly, many significant results have been obtained for rigorous mathematical analysis and numerical simulation of such tumor models; see \cite{CLN,C1,CE2,EM3,FL,HHH2,HHH3,HZH1,HZH2,HZH4,LL,SHZ,W0,WC2,WZ1,WZ,ZH,ZH2,ZX,ZC} and the references given there. Lowengrub et al. \cite{LFJC} provided a systematic survey of tumor model studies.
Recent study indicates that human beings and many animals have regular living and feeding activities, related to the biological rhythm,
the concentration of nutrients in their blood may change periodically over time \cite{FD}. Motivated by this study, in this paper, we shall consider a free boundary tumor model describing the growth of tumors with a periodic supply of external nutrients:
\begin{eqnarray}
\Delta\sigma & =& \sigma \ \ \ \ \ \qquad \ \ \ \ \mbox{in} \ \Omega(t),\ \ t>0,\label{1.2} \\
-\Delta p &=& \mu(\sigma-\widetilde{\sigma}) \ \ \ \ \ \mbox{in}\ \Omega(t),\ \ t>0, \label{1.4}\\
\sigma &=& \phi(t) \ \ \ \qquad \ \ \mbox{on}\ \partial\Omega(t),\ \ t>0,\label{1.3} \\
p &=& \gamma\kappa \ \ \ \ \qquad \ \ \ \mbox{on}\ \partial\Omega(t),\ \ t>0,\label{1.5}\\
V_n&=&-\frac{\partial p}{\partial \vec{n}} \ \qquad \ \ \ \mbox{on}\ \partial\Omega(t),\ \ t>0.\label{1.6}
\end{eqnarray}
Here $\Omega(t)\subseteq\mathbb{R}^3$ is an apriorily unknown bounded domain occupied by the tumor at time $t$, $\sigma=\sigma(x,t)$ and $p=p(x,t)$ are unknown functions representing the nutrient concentration in the tumor region and the pressure between tumor cells, respectively. $V_n$ is the velocity of the free boundary in the direction $\vec{n}$ which is the unit outward normal. $\mu$, $\widetilde{\sigma}$ and $\gamma$ are positive constants, among which $\mu$ measures the aggressiveness of the tumor, $\widetilde{\sigma}$ is the threshold nutrient concentration for apoptosis, and $\gamma$ represents the surface tension coefficient of the tumor surface $\partial\Omega(t)$. $\kappa$ is the mean curvature of $\partial\Omega(t)$ whose sign is defined by the convention that convex hypersurfaces are associated with
positive mean curvature; $\kappa=1/R(t)$ if $\Omega(t)$ is a ball of radius $R(t)$. Besides, $\phi(t)$ accounts for the external concentration of nutrients which we shall assume is a smooth, positive, periodic function with period $T$.
The problem (\ref{1.2})-(\ref{1.6}) is a natural extension of that proposed by Bryne and Chaplain in \cite{BC1}: the action of a periodic supply of external nutrients is considered, so that (\ref{1.3}) holds. In this model, the quasi-stationary approximation of the
diffusion equation (\ref{1.2}) combined
with the boundary condition (\ref{1.3}) are used to describe the distribution of the nutrient concentration. Accordingly, the problem (\ref{1.2})-(\ref{1.6}) is called the \emph{quasi-stationary model}. In addition, assuming that the extracellular matrix is a porous medium where Darcy's law $\vec{V}=-\nabla p$ holds, the law of conservation of mass $\mbox{div}\vec{V}=\mu(\sigma-\widetilde{\sigma})$ yields the equation (\ref{1.4}). Equation (\ref{1.6}) is the classical Stefan condition for free boundary $\partial\Omega(t)$. Finally, the cell-to-cell adhesiveness leads to the relation (\ref{1.5}) (see \cite{B,BC3,G2}). If the stationary diffusion equation (\ref{1.2}) is replaced by its non-stationary version
\begin{equation*}\label{0.67}
c\sigma_t=\Delta\sigma-\sigma \qquad \qquad \ \mbox{in} \ \Omega(t),\ \ t>0,
\end{equation*}
where the positive constant $c=T_{\mbox{diffusion}}/T_{\mbox{growth}}$ is the ratio of the nutrient diffusion time scale, $T_{\mbox{diffusion}}\alphapprox1\mbox{min}$, to the tumor-cell doubling time scale, $T_{\mbox{growth}}\alphapprox1\mbox{day}$, so that $c\ll1$ (see \cite{BC1}), the corresponding problem of (\ref{1.2})-(\ref{1.6}) is called the \emph{fully non-stationary model}.
If the external concentration of nutrients is assumed to be a constant, i.e., the boundary condition (\ref{1.3}) is reduced to $\sigma=\overline{\sigma}=\mbox{const.}$, rigorous mathematical analysis including existence and stability theorems have been established. For the fully non-stationary model, Friedman and Reitich \cite{FR1} proved that under the assumption $0<\widetilde{\sigma}<\overline{\sigma}$, there exists a unique radially symmetric stationary solution $(\sigma_S(r),p_S(r),R_S)$. They also proved that there exists a family of symmetric-breaking bifurcation branches of stationary solutions bifurcating from this unique radial stationary solution $(\sigma_S(r),p_S(r),R_S)$ in \cite{FR3}.
In the sequel, it was proved in \cite{BF} that $(\sigma_S(r),p_S(r),R_S)$ is asymptotically stable with respect to non-radial perturbations for $\mu$ sufficiently small. This work was refined by Friedman and Hu \cite{FH1,FH2}, that is, they determined the threshold value $\mu^\alphast$ such that for $0<\mu<\mu^\alphast$ the trivial solution is asymptotically stable under small non-radial perturbations, while for $\mu>\mu^\alphast$ the trivial solution is unstable. Later on, Cui and Escher extended the asymptotic stability result to the general case that the nutrient consumption rate and the tumor cell proliferate rate are general increasing functions. They found a positive critical value $\gamma^\alphast$ (surface tension coefficient in (\ref{1.5})) for which the radial stationary solution changes from instability to stability under non-radial perturbations for the quasi-stationary model in \cite{CE1} and for the fully non-stationary model with small $c$ in \cite{C3}, respectively.
In this paper, we study the quasi-stationary model (\ref{1.2})-(\ref{1.6}) with the nutrient supply given by a periodic function $\phi(t)$. We expect to determine how the tumor will evolve as time goes to infinity under the effect of this periodic external nutrient supply. To give a precise statement of our main result, let us make some preparations. By a rescaling if necessary, we take $\gamma=1$. It is not difficult to verify (see also \cite{HX}) that the radially symmetric $T$-periodic positive solution of the problem (\ref{1.2})-(\ref{1.6}), which we denote by $(\sigma_\alphast(r,t),p_\alphast(r,t),R_\alphast(t))$,
is of the form
\begin{equation}\label{2.14}
\sigma_\alphast(r,t)=\phi(t)\frac{R_\alphast^{1/2}(t)}{I_{1/2}(R_\alphast(t))}\frac{I_{1/2}(r)}{r^{1/2}}, \ \ \ 0<r<R_\alphast(t),
\end{equation}
\begin{equation}\label{2.15}
p_\alphast(r,t)=-\mu\sigma_\alphast(r,t)+\frac{1}{6}\mu\widetilde{\sigma}r^{2}+\frac{1}{R_\alphast(t)}+\mu\phi(t)-\frac{1}{6}\mu\widetilde{\sigma}R_\alphast^{2}(t), \ \ \ 0<r<R_\alphast(t),
\end{equation}
and $R_\alphast(t)$ satisfies
\begin{equation}\label{2.16}
\begin{split}
\frac{dR_\alphast(t)}{dt}=\mu R_\alphast(t)\Big\{\phi(t)P_0(R_\alphast(t))-\frac{\widetilde{\sigma}}{3}\Big\},
\end{split}
\end{equation}
where $I_n(r)$ is the modified Bessel function of order $n$ and $P_0(r)$ is defined by (\ref{2.1}).
As was analyzed in \cite{WX}, for $\frac{1}{T}\int_0^T\phi(t)dt>\widetilde{\sigma}$, there exists a unique $T$-periodic positive solution $R_\alphast(t)$ for the equation (\ref{2.16}). Substituting the unique solution $R_\alphast(t)$ into the expressions (\ref{2.14})-(\ref{2.15}), one finds that the radially symmetric $T$-periodic positive solution of the problem (\ref{1.2})-(\ref{1.6}) is uniquely
determined.
Let us introduce the concept of linear stability/instability and asymptotic stability of the periodic solution $(\sigma_\alphast(r,t),p_\alphast(r,t),R_\alphast(t))$. By \emph{linear stability/instability}, we mean: \\
Linearize the system (\ref{1.2})-(\ref{1.6}) at $(\sigma_\alphast(r,t),p_\alphast(r,t),R_\alphast(t))$ by writing
\begin{equation*}
\begin{split}
\sigma(r,\theta,\varphi,t)=\sigma_\alphast(r,t)+\varepsilon w(r,\theta,\varphi,t), \ \ \ \ p(r,\theta,\varphi,t)=p_\alphast(r,t)+\varepsilon q(r,\theta,\varphi,t),
\end{split}
\end{equation*}
$$\partial\Omega(t):\ r=R_\alphast(t)+\varepsilon\rho(\theta,\varphi,t),$$
and collect only the $\varepsilon$-order terms. Since the original problem is translation invariant, we say that the periodic solution $(\sigma_\alphast(r,t),p_\alphast(r,t),R_\alphast(t))$ is linearly stable in the sense that for any initial data $\rho|_{t=0}=\rho_0(\theta,\varphi)$ and $w|_{t=0}=w_0(r,\theta,\varphi)$,
\begin{equation*}
\Big|\rho(\theta,\varphi,t)-\sum_{m=-1}^1a_mY_{1,m}(\theta,\varphi)\Big|\leq Ce^{-\delta t}, \ \ \ \ t>\overline{t}
\end{equation*}
for some constants $a_m$, $\delta>0$ and $\overline{t}>0$. The periodic solution is said to be linearly unstable if it is not linearly stable.
By \emph{asymptotic stability}, we mean the following:\\
Given any initial data
\begin{equation}\label{0.68}
\begin{split}
\partial\Omega(0):\ r=R_0(\theta,\varphi)=R_\alphast(0)+\varepsilon\rho_0(\theta,\varphi),\\ \sigma|_{t=0}=\sigma_0(r,\theta,\varphi)=\sigma_\alphast(r,0)+\varepsilon w_0(r,\theta,\varphi),
\end{split}
\end{equation}
if $|\varepsilon|$ is sufficiently small, then there exists a solution to (\ref{1.2})-(\ref{1.6}) for all $t>0$, and
\begin{equation*}
\partial\Omega(t)\ \ \ \mbox{behaves\ like} \ \ \ \partial B_{R_\alphast(t)}(a)=\big\{x:|x-a|=R_\alphast(t)\big\}, \ \ \ t>\overline{t}
\end{equation*}
for some center $a$ and $\overline{t}>0$.
Set
\begin{equation}\label{0.1}
\mu_\alphast=\frac{\int_0^T\frac{6}{R^3_\alphast(t)}dt}{-\frac{\widetilde{\sigma}}{2}\int_0^T\left[R_\alphast(t)\frac{I_3(R_\alphast(t))}{I_2(R_\alphast(t))}
-R_\alphast(t)\frac{I_0(R_\alphast(t))}{I_1(R_\alphast(t))}+2\right]dt}>0, \ \ \ (\mbox{two-space\ dimension}),
\end{equation}
or,
\begin{equation}\label{0.16}
\mu_\alphast=\frac{\int_0^T\frac{4}{R^3_\alphast(t)}dt}{\int_0^T\frac{\widetilde{\sigma}}{3}R^2_\alphast(t)\left[P_1(R_\alphast(t))-P_2(R_\alphast(t))\right]dt}>0,\ \ \ \ (\mbox{three-space\ dimension}).
\end{equation}
It was recently established in \cite{HZH3} that the unique radially symmetric $T$-periodic positive solution $(\sigma_\alphast(r,t),p_\alphast(r,t),R_\alphast(t))$ is linearly stable under non-radial perturbations for $0<\mu<\mu_\alphast$ and linearly unstable for $\mu>\mu_\alphast$ in the two-space dimensional case; see \cite{HX} for the similar result in the three-space dimensional case. In this paper, we further study the asymptotic stability for the fully nonlinear problem. Compared with the linearized problem, the $O(\varepsilon^2)$ terms cannot be dropped. On the one hand, we need to find the necessary PDE estimates to estimate the nonlinear error terms. On the other hand, since this system is translation invariant in the coordinate space, the center of the limiting sphere is not known in advance which depends on the perturbation of mode 1. To be specific, the perturbation of mode 1 results in the translation of the origin with its magnitude the same order as the perturbation, as well as the decay behavior in time $t$. This is a challenge for this type of problems. We employ on a fixed point theorem to find the correct translation of the origin which is technical and carried out by Theorem \ref{th3}.
Throughout the paper, $\mu_\alphast$ is defined by (\ref{0.16}). The main result of this paper is stated as follows (see Theorem \ref{th4} for more explicit statement).
\begin{thm}
Let $\mu<\mu_\alphast$, then the unique radially symmetric $T$-periodic solution $(\sigma_\alphast(r,t),p_\alphast(r,t),R_\alphast(t))$ of the system (\ref{1.2})-(\ref{1.6}) is asymptotically stable modulus translation, i.e., there exists a new center $\varepsilon a^\alphast(\varepsilon)$, where $a^\alphast(\varepsilon)$ is a bounded function of $\varepsilon$, such that $\partial\Omega(t)$ behaves like
\begin{equation*}
\partial B_{R_\alphast(t)}(\varepsilon a^\alphast(\varepsilon))=\big\{x:|x-\varepsilon a^\alphast(\varepsilon)|=R_\alphast(t)\big\}, \ \ \ \ \ t>\overline{t}
\end{equation*}
for some $\overline{t}>0$.
\end{thm}
\begin{rem}
Note that our method and result is also applicable to the two-space dimensional case.
\end{rem}
The structure of the rest of this paper is arranged as follows. In the next section, we collect some preliminaries which are needed in the sequel. In Section 3, we transform the nonlinear free boundary problem (\ref{1.2})-(\ref{1.6}) into an initial-boundary value problem defined on a ball, which we then rewrite as a inhomogeneous linear system (\ref{3.150})-(\ref{3.190}) in Section 4.
The method we shall use is a fixed point argument:
\noindent(i) For given inhomogeneous terms $(f^i,b^j)$, solve the inhomogeneous linear system (\ref{3.150})-(\ref{3.190}) by using the spherical harmonic expansion;
\noindent(ii) define new inhomogeneous terms $(\widetilde{f}^i,\widetilde{b}^j)$ (see (\ref{0.17}));
\noindent(iii) establish the existence of a fixed point for the mapping:
\begin{equation*}
S:(f^i,b^j)\rightarrow(\widetilde{f}^i,\widetilde{b}^j).
\end{equation*}
Furthermore, in Section 4, we establish decay estimates for each mono-mode system obtained from the spherical harmonic expansion. In order to derive decay estimates for mode 1 terms, we need to translate the origin $x=0$ to $x=\varepsilon a^\alphast(\varepsilon)$, which is carried out by Theorem \ref{th3}. Section 5 aims at showing the mapping $S$ admits a fixed point, thereby completing the proof of
the asymptotic stability for the fully nonlinear problem.
\section{Preliminaries}
In this section, we collect some preliminaries which are needed in the sequel.
The function $P_n(r)$ introduced by Friedman and Hu \cite{FH1} is defined by
\begin{equation}\label{2.1}
P_n(r)=\frac{I_{n+3/2}(r)}{rI_{n+1/2}(r)}, \ \ \ \ n=0,1,2,3,\cdots
\end{equation}
where $I_m(r)$ is the modified Bessel function, $m\geq0$ and $r>0$.
Recall \cite{FH1,HZH4} that
\begin{equation}\label{2.2}
P_0(r)=\frac{1}{r}\coth r-\frac{1}{r^2},
\end{equation}
\begin{equation}\label{2.18}
P_n(r)=\frac{1}{r^2P_{n+1}(r)+2n+3},
\end{equation}
\begin{equation}\label{0.8}
P_n(r)>P_{n+1}(r),
\end{equation}
\begin{equation}\label{0.2}
\frac{d}{dr}\Big(\frac{I_{n+1/2}(r)}{r^{1/2}}\Big)=\frac{I_{n+3/2}(r)+\frac{n}{r}I_{n+1/2}(r)}{r^{1/2}}.
\end{equation}
\begin{lem}
For $n\geq1$ and $r>0$,
\begin{equation}\label{2.17}
rP_n(r)<rP_0(r)\leq1.
\end{equation}
\end{lem}
\noindent{\bf Proof.}\ By (\ref{2.2}), we get
\begin{equation*}
\frac{d}{dr}(rP_0(r))=\frac{d}{dr}\Big(\coth r-\frac{1}{r}\Big)=\frac{(e^{2r}-1)^2-4r^2e^{2r}}{r^2(e^{2r}-1)^2}.
\end{equation*}
Denote the numerator by $F(r)$. We compute
\begin{equation*}
F(0)=0\ \ \ \ \ \mbox{and}\ \ \ \ \frac{dF(r)}{dr}=4e^{2r}(e^{2r}-1-2r-2r^2)>0,
\end{equation*}
then $rP_0(r)$ is an increasing function, and
\begin{equation*}
rP_0(r)\leq\lim_{r\rightarrow\infty}rP_0(r)=1.
\end{equation*}
It follows from (\ref{0.8}) that (\ref{2.17}) holds. Therefore, our proof is complete.
$\Box$
\begin{lem}\label{lemma8}
(\mbox{see} \cite[Lemma 4.1]{HX}) The following relations hold:
\begin{equation}\label{0.12}
\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}=\phi(t)R_\alphast(t)P_0(R_\alphast(t)),
\end{equation}
\begin{equation}\label{0.13}
\frac{\partial p_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}=-\frac{dR_\alphast(t)}{dt},
\end{equation}
\begin{equation}\label{0.14}
\frac{\partial^2p_\alphast}{\partial r^2}\Big|_{r=R_\alphast(t)}=-\mu\phi(t)R^2_\alphast(t)P_0(R_\alphast(t))P_1(R_\alphast(t))-\frac{1}{R_\alphast(t)}\frac{dR_\alphast(t)}{dt}.
\end{equation}
\end{lem}
As established in \cite{CF},
we have the following local existence theorem:
\begin{thm}\label{th1}
If
\begin{equation}\label{0.57}
(\sigma_0,R_0)\in C^{1+\gamma}(\overline{\Omega}(0))\times C^{4+\alphalpha}(\partial \Omega(0))\ \ \ \mbox{and}\ \ \sigma_0=\phi(0) \ \ \ \mbox{on}\ \ \partial\Omega(0)
\end{equation}
for some $\alphalpha$, $\gamma$ $\in(0,1)$, then there exists a unique solution $(\sigma,p,R)$ of (\ref{1.2})-(\ref{1.6}) for $t\in[0,T]$ with some $T>0$, and
\begin{equation*}
\sigma\in C^{1+\gamma,(1+\gamma)/2}\Big(\bigcup_{t\in[0,T]}\overline{\Omega}(t)\times\{t\}\Big)\cap C^{2+2\alphalpha/3,1+\alphalpha/3}\Big(\bigcup_{t\in[t_0,T]}\overline{\Omega}(t)\times\{t\}\Big) \ \ \ \mbox{for\ any}\ t_0>0,
\end{equation*}
\begin{equation*}
p\in C^{2+\alphalpha,\alphalpha/3}\Big(\bigcup_{t\in[0,T]}\overline{\Omega}(t)\times\{t\}\Big), \ \ \ R\in C^{4+\alphalpha,1+\alphalpha/3}.
\end{equation*}
\end{thm}
The following fixed point theorem
(\hspace{-0.07em}\cite{HZH4}) will be crucial
in the proof of Theorem \ref{th3}:
\begin{thm}\label{th2}
Let $(X,\|\cdot\|)$ be a Banach space and let $\overline{B}_K(a_0)$ denote the closed ball in $X$ with center $a_0$ and radius $K$. Let $F$ be a mapping from $\overline{B}_K(a_0)$ into $X$ and
\begin{equation*}
F(x)=F_1(x)+\varepsilon G(x),
\end{equation*}
such that
(i) $F_1'(x)$ and $G'(x)$ are both continuous for $x\in\overline{B}_K(a_0)$,
(ii) $F_1(a_0)=0$ and the operator $F'_1(a_0)$ is invertible.
\noindent Then for small $|\varepsilon|$, the equation $F(x)=0$ admits a unique solution $x$ in $\overline{B}_K(a_0)$.
\end{thm}
\section{Transformation}
In this section, we transform the nonlinear free boundary problem into a nonlinear perturbation in a spherical region.
Let us assume the solution of the system (\ref{1.2})-(\ref{1.6}) is of the form
\begin{equation*}
\begin{split}
\partial\Omega(t):\ r&=R_\alphast(t)+\varepsilon\rho(\theta,\varphi,t),\\
\sigma(r,\theta,\varphi,t)&=\sigma_\alphast(r,t)+\varepsilon w(r,\theta,\varphi,t),\\
p(r,\theta,\varphi,t)&=p_\alphast(r,t)+\varepsilon q(r,\theta,\varphi,t).
\end{split}
\end{equation*}
Recall \cite{HZH1} that
\begin{equation*}
\vec{n}=\frac{1}{\sqrt{1+|\varepsilon\nabla_\omega\rho|^2/(R_\alphast(t)+\varepsilon\rho)^2}}
\Big(\vec{e}_r-\frac{\varepsilon}{R_\alphast(t)+\varepsilon\rho}\frac{\partial\rho}{\partial\theta}\vec{e}_\theta
-\frac{\varepsilon}{(R_\alphast(t)+\varepsilon\rho)\sin\theta}\frac{\partial\rho}{\partial\varphi}\vec{e}_\varphi\Big),
\end{equation*}
and
\begin{equation*}
\nabla=\vec{e}_r\frac{\partial}{\partial r}+\vec{e}_\theta\frac{1}{r}\frac{\partial}{\partial\theta}+\vec{e}_\varphi\frac{1}{r\sin\theta}\frac{\partial}{\partial\varphi}
=\vec{e}_r\frac{\partial}{\partial r}+\frac{1}{r}\nabla_\omega,
\end{equation*}
where $\nabla_\omega=\vec{e}_\theta\frac{\partial}{\partial\theta}+\vec{e}_\varphi\frac{1}{\sin\theta}\frac{\partial}{\partial\varphi}$.
The system (\ref{1.2})-(\ref{1.6}) can then be written in terms of $(w,q,\rho)$ as follows:
\begin{equation}\label{3.2}
\Delta w=w \ \ \ \mbox{in} \ \Omega(t),\ t>0,
\end{equation}
\begin{equation}\label{3.4}
-\Delta q=\mu w\ \ \ \mbox{in} \ \partial\Omega(t),\ t>0,
\end{equation}
\begin{equation}\label{3.6}
\frac{\partial\rho}{\partial t}=-\frac{1}{\varepsilon}\Big(\frac{\partial p_\alphast}{\partial\vec{n}}+\varepsilon\frac{\partial q}{\partial\vec{n}}\Big)\sqrt{1+\frac{|\varepsilon\nabla_\omega\rho|^2}{(R_\alphast(t)+\varepsilon\rho)^2}}-\frac{1}{\varepsilon}\frac{dR_\alphast(t)}{dt} \ \ \mbox{on}\ \partial\Omega(t),\ t>0,
\end{equation}
\begin{equation}\label{3.3}
w=-\frac{1}{\varepsilon}\Big[\sigma_\alphast(R_\alphast(t)+\varepsilon\rho,t)-\sigma_\alphast(R_\alphast(t),t)\Big] \ \ \ \mbox{on} \ \partial\Omega(t),\ t>0,
\end{equation}
\begin{equation}\label{3.5}
q=-\frac{1}{\varepsilon}\big[p_\alphast(R_\alphast(t)+\varepsilon\rho,t)-\kappa\big] \ \ \ \mbox{on} \ \partial\Omega(t),\ t>0.
\end{equation}
By the Taylor expansion, one of the right-hand side terms of (\ref{3.6}) can be written in the following way,
\begin{equation}\label{3.7}
\begin{split}
\sqrt{1+\frac{|\varepsilon\nabla_\omega\rho|^2}{(R_\alphast(t)+\varepsilon\rho)^2}}\frac{\partial p_\alphast}{\partial\vec{n}}\Big|_{r=R_\alphast(t)+\varepsilon\rho}&=\sqrt{1+\frac{|\varepsilon\nabla_\omega\rho|^2}{(R_\alphast(t)+\varepsilon\rho)^2}}\nabla p_\alphast|_{r=R_\alphast(t)+\varepsilon\rho}\cdot\vec{n}\\
&=\frac{\partial p_\alphast(R_\alphast(t)+\varepsilon\rho,t)}{\partial r}\\
&=\frac{\partial p_\alphast(R_\alphast(t),t)}{\partial r}+\frac{\partial^2 p_\alphast(R_\alphast(t),t)}{\partial r^2}\varepsilon\rho+\varepsilon^2 P_\varepsilon.
\end{split}
\end{equation}
In addition,
\begin{equation}\label{3.07}
\begin{split}
\sqrt{1+\frac{|\varepsilon\nabla_\omega\rho|^2}{(R_\alphast(t)+\varepsilon\rho)^2}}\frac{\partial q}{\partial\vec{n}}\Big|_{r=R_\alphast(t)+\varepsilon\rho}&=\sqrt{1+\frac{|\varepsilon\nabla_\omega\rho|^2}{(R_\alphast(t)+\varepsilon\rho)^2}}\nabla q|_{r=R_\alphast(t)+\varepsilon\rho}\cdot\vec{n}\\
&=\frac{\partial q}{\partial r}\Big|_{r=R_\alphast(t)+\varepsilon\rho}-\frac{\varepsilon}{(R_\alphast(t)+\varepsilon\rho)^2}\frac{\partial\rho}{\partial\theta}\frac{\partial q}{\partial\theta}\Big|_{r=R_\alphast(t)+\varepsilon\rho}\\
&\quad-\frac{\varepsilon}{(R_\alphast(t)+\varepsilon\rho)^2\sin^2\theta}\frac{\partial\rho}{\partial\varphi}\frac{\partial q}{\partial\varphi}\Big|_{r=R_\alphast(t)+\varepsilon\rho}.
\end{split}
\end{equation}
Again by the Taylor expansion, the right-hand side terms of (\ref{3.3}) and (\ref{3.5}) can be expanded into the following format, respectively,
\begin{equation}\label{3.8}
\sigma_\alphast(R_\alphast(t)+\varepsilon\rho,t)-\sigma_\alphast(R_\alphast(t),t)=\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\varepsilon\rho+\varepsilon^2S_\varepsilon,
\end{equation}
and
\begin{equation}\label{3.9}
\begin{split}
p_\alphast(R_\alphast(t)+\varepsilon\rho,t)-\kappa=&p_\alphast(R_\alphast(t),t)+\frac{\partial p_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\varepsilon\rho+\varepsilon^2K_{1\varepsilon}\\
&-\frac{1}{R_\alphast(t)}+\frac{\varepsilon}{R^2_\alphast(t)}\Big(\rho+\frac{1}{2}\Delta_\omega\rho\Big)+\varepsilon^2K_{2\varepsilon}\\
=&\varepsilon\Big[\frac{1}{R_\alphast^2(t)}\Big(\rho+\frac{1}{2}\Delta_\omega\rho\Big)+\frac{\partial p_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho\Big]+\varepsilon^2K_\varepsilon,
\end{split}
\end{equation}
where we have used the fact, see \cite[Theorem 8.1]{FR2},
\begin{equation*}
\kappa=\frac{1}{R}-\frac{\varepsilon}{R^2}\Big(\rho+\frac{1}{2}\Delta_\omega\rho\Big)+\varepsilon^2K
\end{equation*}
with
\begin{equation*}
\Delta_\omega\rho=\frac{1}{\sin\theta}\frac{\partial}{\partial\theta}\left(\sin\theta\frac{\partial\rho}{\partial\theta}\right)
+\frac{1}{\sin^2\theta}\frac{\partial^2\rho}{\partial\varphi^2}.
\end{equation*}
The Hanzawa transformation is defined by
\begin{equation*}
r=r'+\chi(R-r')\varepsilon \rho(\theta,\varphi,t), \ \ \ t=t', \ \ \ \ \theta=\theta', \ \ \ \varphi=\varphi',
\end{equation*}
here
\begin{equation*}
\chi(z)\in C^{\infty}, \ \ \ \ \chi(z)=\left\{\begin{aligned}
&0, \ \ \mbox{if} \ |z|\geq\frac{3}{4}\delta_0 \\
&1, \ \ \mbox{if}\ |z|<\frac{1}{4}\delta_0
\end{aligned}\right., \ \ \ \
\left|\frac{d^{k}\chi}{dz^{k}}\right|\leq\frac{C}{\delta_0^{k}},\ \ \ \ (\delta_0\ \mbox{positive\ and\ small}).
\end{equation*}
Under the Hanzawa transformation and (\ref{3.7})-(\ref{3.9}), the system (\ref{3.2})-(\ref{3.5}) is transformed into
\begin{equation}\label{3.10}
-\Delta' w'+w'=\varepsilon A_\varepsilon w' \ \ \ \mbox{in} \ B_{R_\alphast(t)},\ t>0,
\end{equation}
\begin{equation}\label{3.12}
-\Delta' q'-\mu w'=\varepsilon A_\varepsilon q' \ \ \ \mbox{in} \ B_{R_\alphast(t)},\ t>0,
\end{equation}
\begin{equation}\label{3.14}
\frac{\partial\rho'}{\partial t'}=-\frac{\partial^2p_\alphast}{\partial r'^2}\rho'-\frac{\partial q'}{\partial r'}+\varepsilon B^1_\varepsilon \ \ \ \mbox{on} \ \partial B_{R_\alphast(t)},\ t>0,
\end{equation}
\begin{equation}\label{3.11}
w'=-\frac{\partial\sigma_\alphast}{\partial r'}\rho'+\varepsilon B^2_\varepsilon \ \ \ \mbox{on} \ \partial B_{R_\alphast(t)},\ t>0,
\end{equation}
\begin{equation}\label{3.13}
q'=-\frac{1}{R_\alphast^2(t)}\Big(\rho'+\frac{1}{2}\Delta_\omega\rho'\Big)-\frac{\partial p_\alphast}{\partial r'}\rho'+\varepsilon B^3_\varepsilon \ \ \ \mbox{on} \ \partial B_{R_\alphast(t)},\ t>0,
\end{equation}
where $B_{R_\alphast(t)}$ is the ball with radius $R_\alphast(t)$, $A_\varepsilon$ given in \cite{FH2} is a second order differential operator in $(r',\theta',\varphi')$, and
\begin{equation*}
\begin{split}
B_\varepsilon^1=&-\frac{1}{\varepsilon^2}\frac{\partial p_\alphast(R_\alphast(t')+\varepsilon\rho',t')}{\partial r'}+\frac{1}{(R_\alphast(t')+\varepsilon\rho')^2}\frac{\partial\rho'}{\partial\theta'}\frac{\partial q'}{\partial\theta'}\\
&+\frac{1}{(R_\alphast(t')+\varepsilon\rho')^2\sin^2\theta'}\frac{\partial\rho'}{\partial\varphi'}\frac{\partial q'}{\partial\varphi'}-\frac{1}{\varepsilon^2}\frac{dR_\alphast(t')}{dt'}+\frac{1}{\varepsilon}\frac{\partial^2 p_\alphast(R_\alphast(t'),t')}{\partial r'^2}\rho',
\end{split}
\end{equation*}
\begin{equation*}
\begin{split}
B^2_\varepsilon=&-\frac{1}{\varepsilon^2}\big[\sigma_\alphast(R_\alphast(t')+\varepsilon\rho',t')-\sigma_\alphast(R_\alphast(t'),t')\big]
+\frac{1}{\varepsilon}\frac{\partial\sigma_\alphast(R_\alphast(t'),t')}{\partial r'}\rho',
\end{split}
\end{equation*}
\begin{equation*}
B_\varepsilon^3=-\frac{1}{\varepsilon^2}\big[p_\alphast(R_\alphast(t')+\varepsilon\rho',t')-\kappa\big]+\frac{1}{\varepsilon R_\alphast^2(t')}\Big(\rho'+\frac{1}{2}\Delta_\omega\rho'\Big)+\frac{1}{\varepsilon}\frac{\partial p_\alphast(R_\alphast(t'),t')}{\partial r'}\rho'.
\end{equation*}
As pointed out in \cite{FH2} that all terms of $A_\varepsilon$ do not involve any singularity, then it follows from Theorem \ref{th1} that, for $T>1$,
\begin{equation*}
A_\varepsilon w'\in C^{2\alphalpha/3,\alphalpha/3}(B_{R_\alphast(t)}\times[0,T]),
\end{equation*}
\begin{equation*}
A_\varepsilon q'\in C^{\alphalpha,\alphalpha/3}(B_{R_\alphast(t)}\times[0,T]).
\end{equation*}
Notice that the term $\frac{1}{\varepsilon}$ appearing in $A_\varepsilon$ is cancelled out by the coefficient that accompanies it, so that both $C^{2\alphalpha/3,\alphalpha/3}$ norm of $A_\varepsilon w'$ and $C^{\alphalpha,\alphalpha/3}$ norm of $A_\varepsilon q'$ are uniformly bounded in $\varepsilon$.
Moreover, although $\sin^2\theta'$ appears in the denominator in the last term of $B_\varepsilon^1$, this incurs no singularities. Indeed, one can simply choose a different coordinate system to deal with this problem, i.e., $B_\varepsilon^1$ is a function defined on the unit sphere $\Sigma=\{x:|x|=1\}$ (rather than in the variable $(\theta,\varphi)$). Then (\ref{2.15}) and Theorem \ref{th1} yield $B_\varepsilon^1\in C^{1+\alphalpha,\alphalpha/3}(\partial B_{R_\alphast(t)}\times[0,T])$. It follows from (\ref{3.7}) and (\ref{0.13}) that $C^{1+\alphalpha,\alphalpha/3}$ norm of $B_\varepsilon^1$ is uniformly bounded in $\varepsilon$.
By (\ref{2.14}) and Theorem \ref{th1}, we find that $B_\varepsilon^2\in C^{1+2\alphalpha/3,1+\alphalpha/3}(\partial B_{R_\alphast(t)}\times[0,T])$, and (\ref{3.8}) implies that $C^{1+2\alphalpha/3,1+\alphalpha/3}$ norm of $B_\varepsilon^2$ is uniformly bounded in $\varepsilon$.
Similarly, by (\ref{3.9}),
we immediately derive that $B_\varepsilon^3\in C^{2+\alphalpha,\alphalpha/3}(\partial B_{R_\alphast(t)}\times[0,T])$, and its $C^{2+\alphalpha,\alphalpha/3}$ norm is uniformly bounded in $\varepsilon$.
For notational convenience, we shall denote functions $w'(r',\theta',\varphi',t')$, $q'(r',\theta',\varphi',t')$ and $\rho'(\theta',\varphi',t')$ again by $w(r,\theta,\varphi,t)$, $q(r,\theta,\varphi,t)$ and $\rho(\theta,\varphi,t)$, respectively, in the rest of this paper.
\section{The Inhomogeneous Linear System}
In this section, we rewrite (\ref{3.10})-(\ref{3.13}) as a inhomogeneous linear system (\ref{3.150})-(\ref{3.190}), and use spherical harmonic expansions to solve this inhomogeneous system. Furthermore, we establish decay estimates for each mono-mode system obtained from the spherical harmonic expansion.
Specifically, we consider the system (\ref{3.10})-(\ref{3.13}) where the $\varepsilon$ terms of any order are replaced by given functions,
\begin{equation}\label{3.150}
-\Delta w+w=\varepsilon f^1(r,\theta,\varphi,t) \ \ \ \mbox{in} \ B_{R_\alphast(t)},\ t>0,
\end{equation}
\begin{equation}\label{3.160}
-\Delta q=\mu w+\varepsilon f^2(r,\theta,\varphi,t) \ \ \ \mbox{in} \ B_{R_\alphast(t)},\ t>0,
\end{equation}
\begin{equation}\label{3.170}
\begin{split}
\frac{\partial\rho}{\partial t}=&-\frac{\partial^2p_\alphast}{\partial r^2}\Big|_{r=R_\alphast(t)}\rho-\frac{\partial q}{\partial r}\Big|_{r=R_\alphast(t)}+\varepsilon b^1(\theta,\varphi,t) \ \ \ \ \mbox{on} \ \partial B_{R_\alphast(t)}, \ t>0,
\end{split}
\end{equation}
\begin{equation}\label{3.180}
w=-\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho+\varepsilon b^2(\theta,\varphi,t) \ \ \ \ \mbox{on} \ \partial B_{R_\alphast(t)}, \ t>0,
\end{equation}
\begin{equation}\label{3.190}
\begin{split}
q=&-\frac{1}{R_\alphast^2(t)}\Big(\rho+\frac{1}{2}\Delta_\omega\rho\Big)-\frac{\partial p_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho+\varepsilon b^3(\theta,\varphi,t) \ \ \ \mbox{on} \ \partial B_{R_\alphast(t)}, \ t>0,
\end{split}
\end{equation}
and we add initial conditions (cf. (\ref{0.68}))
\begin{equation}\label{3.20}
w|_{t=0}=w_0(r,\theta,\varphi), \ \ \ \quad \ \ \ \rho|_{t=0}=\rho_0(\theta,\varphi).
\end{equation}
Assume that the functions $f^i$, $b^j$ satisfy the following properties
\begin{equation}\label{4.1}
\sqrt{|\varepsilon|}\left(\int_0^\infty e^{2\delta_1t}\|f^1(\cdot,t)\|^2_{L^2(B_{R_\alphast(t)})}dt\right)^{1/2}\leq1,
\end{equation}
\begin{equation}\label{4.2}
\sqrt{|\varepsilon|}\left(\int_0^\infty e^{2\delta_1t}\|f^2(\cdot,t)\|^2_{L^2(B_{R_\alphast(t)})}dt\right)^{1/2}\leq1,
\end{equation}
\begin{equation}\label{4.3}
\sqrt{|\varepsilon|}\left(\int_0^\infty e^{2\delta_1t}\|b^1(\cdot,t)\|^2_{H^{1/2}(\partial B_{R_\alphast(t)})}dt\right)^{1/2}\leq1,
\end{equation}
\begin{equation}\label{4.4}
\sqrt{|\varepsilon|}\left(\int_0^\infty e^{2\delta_1t}\|b^2(\cdot,t)\|^2_{H^{1}(\partial B_{R_\alphast(t)})}dt\right)^{1/2}\leq1,
\end{equation}
\begin{equation}\label{4.5}
\sqrt{|\varepsilon|}\left(\int_0^\infty e^{2\delta_1t}\|b^3(\cdot,t)\|^2_{H^{3/2}(\partial B_{R_\alphast(t)})}dt\right)^{1/2}\leq1,
\end{equation}
where $\delta_1>0$ is sufficiently small, and for some $\alphalpha\in(0,1)$,
\begin{equation}\label{4.6}
\sqrt{|\varepsilon|}\|f^1\|_{C^{2\alphalpha/3,\alphalpha/3}(B_{R_\alphast(t)}\times[0,\infty))}\leq1,
\end{equation}
\begin{equation}\label{4.7}
\sqrt{|\varepsilon|}\|f^2\|_{C^{\alphalpha,\alphalpha/3}(B_{R_\alphast(t)}\times[0,\infty))}\leq1,
\end{equation}
\begin{equation}\label{4.8}
\sqrt{|\varepsilon|}\|b^1\|_{C^{1+\alphalpha,\alphalpha/3}(\partial B_{R_\alphast(t)}\times[0,\infty))}\leq1,
\end{equation}
\begin{equation}\label{4.9}
\sqrt{|\varepsilon|}\|b^2\|_{C^{2+2\alphalpha/3,1+\alphalpha/3}(\partial B_{R_\alphast(t)}\times[0,\infty))}\leq1,
\end{equation}
\begin{equation}\label{4.10}
\sqrt{|\varepsilon|}\|b^3\|_{C^{2+\alphalpha,\alphalpha/3}(\partial B_{R_\alphast(t)}\times[0,\infty))}\leq1.
\end{equation}
For simplicity, we also assume that
\begin{equation}\label{4.12}
\rho_0\in C^{4+\alphalpha}(\overline{B}_{R_\alphast(0)}), \ \ \ \ \ w_0\in C^{2+2\alphalpha/3}(\overline{B}_{R_\alphast(0)}),
\end{equation}
and the compatibility condition of order 2 for $w$ is satisfied.
\begin{rem}\label{rem1}
For given functions $f^i$, $b^j$
in some subset $X_1$ of a Banach space $X$, we solve the inhomogeneous linear system (\ref{3.150})-(\ref{3.190}), and derive the estimate of $(w,q,\rho)$. Then we define the new functions $\widetilde{f}^i$, $\widetilde{b}^j$ by
\begin{equation}\label{0.17}
\widetilde{f}^1=A_\varepsilon w, \ \ \ \ \widetilde{f}^2=A_\varepsilon q,\ \ \ \
\widetilde{b}^1=B_\varepsilon^1, \ \ \ \widetilde{b}^2=B_\varepsilon^2, \ \ \ \widetilde{b}^3=B_\varepsilon^3.
\end{equation}
We shall show that the mapping $S:(f^i,b^j)\rightarrow(\widetilde{f}^i,\widetilde{b}^j)$ admits a fixed point which is carried out in Section 5, leading to the asymptotic stability.
\end{rem}
\begin{rem}
The initial data are supposed to satisfy the conditions of Theorem \ref{th1} rather than (\ref{4.12}). However, in the process of showing that the mapping $S$ admits a fixed point, we shall need H\"{o}lder estimates of $D_x^2\sigma$ which requires the consistency condition of order 2 for $\sigma$ at $\partial\Omega(0)$. This is quite restrictive. As stated in \cite[Remark 3.2]{FH2}, we can avoid it by taking the initial time at $t=\frac{T}{2}$ instead of at $t=0$. To simplify notation, we shall denote $t=\frac{T}{2}$ and the initial data $(w|_{t=T/2},\rho|_{t=T/2})$ by $t=0$ and $(w_0,\rho_0)$, respectively. Then the consistency condition of order 2 is satisfied at $t=0$, with $\rho_0\in C^{4+\alphalpha}(\overline{B}_{R_\alphast(0)})$ and $w_0\in C^{2+2\alphalpha/3}(\overline{B}_{R_\alphast(0)})$ (cf. (\ref{4.12})).
\end{rem}
Now we proceed to use spherical harmonic expansions to solve the inhomogeneous linear system (\ref{3.150})-(\ref{3.190}). We first formally expand all the functions $f^i$, $b^j$ in terms of spherical harmonics
\begin{equation*}
f^i(r,\theta,\varphi,t)=\sum^\infty_{n=0}\sum^n_{m=-n}f^i_{n,m}(r,t)Y_{n,m}(\theta,\varphi), \ \ \ \ i=1,2,
\end{equation*}
\begin{equation*}
b^j(\theta,\varphi,t)=\sum^\infty_{n=0}\sum^n_{m=-n}b^j_{n,m}(t)Y_{n,m}(\theta,\varphi), \ \ \ \ j=1,2,3.
\end{equation*}
Recall \cite{FH2} that (\ref{4.1})-(\ref{4.5}) imply
\begin{equation}\label{4.19}
|\varepsilon|\int_0^\infty e^{2\delta_1t}\|f_{n,m}^1(\cdot,t)\|^2_{L^2(B_{R_\alphast(t)})}dt=F_{n,m}^1, \ \ \ \ \ \sum_{n,m}F_{n,m}^1\leq1,
\end{equation}
\begin{equation}\label{4.20}
|\varepsilon|\int_0^\infty e^{2\delta_1t}\|f_{n,m}^2(\cdot,t)\|^2_{L^2(B_{R_\alphast(t)})}dt=F_{n,m}^2, \ \ \ \ \ \sum_{n,m}F_{n,m}^2\leq1,
\end{equation}
\begin{equation}\label{4.21}
|\varepsilon|(n+1)\int_0^\infty e^{2\delta_1t}|b_{n,m}^1(t)|^2dt=B_{n,m}^1, \ \ \ \ \ \sum_{n,m}B_{n,m}^1\leq C,
\end{equation}
\begin{equation}\label{4.22}
|\varepsilon|(n+1)^2\int_0^\infty e^{2\delta_1t}|b_{n,m}^2(t)|^2dt=B_{n,m}^2, \ \ \ \ \ \sum_{n,m}B_{n,m}^2\leq C,
\end{equation}
\begin{equation}\label{4.23}
|\varepsilon|(n+1)^3\int_0^\infty e^{2\delta_1t}|b_{n,m}^3(t)|^2dt=B_{n,m}^3, \ \ \ \ \ \sum_{n,m}B_{n,m}^3\leq C.
\end{equation}
We then look for a solution of the following form:
\begin{eqnarray*}
w(r,\theta,\varphi,t) &=& \sum^\infty_{n=0}\sum^n_{m=-n}w_{n,m}(r,t)Y_{n,m}(\theta,\varphi), \\
q(r,\theta,\varphi,t) &=& \sum^\infty_{n=0}\sum^n_{m=-n}q_{n,m}(r,t)Y_{n,m}(\theta,\varphi), \\
\rho(\theta,\varphi,t) &=& \sum^\infty_{n=0}\sum^n_{m=-n}\rho_{n,m}(t)Y_{n,m}(\theta,\varphi).
\end{eqnarray*}
By the relation
\begin{equation*}\label{1.20}
\Delta_\omega Y_{n,m}(\theta,\varphi)+n(n+1)Y_{n,m}(\theta,\varphi)=0\ \ \ \mbox{and}\ \ \ \Delta=\frac{1}{r^2}\frac{\partial}{\partial r}\Big(r^2\frac{\partial}{\partial r}\Big)+\frac{1}{r^2}\Delta_\omega,
\end{equation*}
we obtain that $w_{n,m}(r,t)$, $q_{n,m}(r,t)$ and $\rho_{n,m}(t)$ satisfy
\begin{equation}\label{4.13}
-\Delta w_{n,m}(r,t)+\left(\frac{n(n+1)}{r^2}+1\right)w_{n,m}(r,t)=\varepsilon f^1_{n,m}(r,t) \ \ \ \mbox{in}\ B_{R_\alphast(t)}, \ t>0,
\end{equation}
\begin{equation}\label{4.15}
-\Delta q_{n,m}(r,t)+\frac{n(n+1)}{r^2}q_{n,m}(r,t)=\mu w_{n,m}(r,t)+\varepsilon f^2_{n,m}(r,t) \ \ \ \mbox{in} \ B_{R_\alphast(t)},\ t>0,
\end{equation}
\begin{equation}\label{4.17}
\begin{split}
\frac{d\rho_{n,m}(t)}{dt}=&-\frac{\partial^2p_\alphast}{\partial r^2}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)-\frac{\partial q_{n,m}(r,t)}{\partial r}\Big|_{r=R_\alphast(t)}+\varepsilon b^1_{n,m}(t), \ \ \ t>0,
\end{split}
\end{equation}
\begin{equation}\label{4.14}
w_{n,m}(R_\alphast(t),t)=-\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)+\varepsilon b^2_{n,m}(t), \ \ \ \ \ t>0,
\end{equation}
\begin{equation}\label{4.16}
\begin{split}
q_{n,m}(R_\alphast(t),t)=&-\frac{1}{R_\alphast^2(t)}\left(1-\frac{n(n+1)}{2}\right)\rho_{n,m}(t)\\
&-\frac{\partial p_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)+\varepsilon b^3_{n,m}(t), \ \ \ t>0,
\end{split}
\end{equation}
\begin{equation}\label{4.18}
\rho_{n,m}|_{t=0}=\rho_{0,n,m}, \ \ \ \ \ w_{n,m}|_{t=0}=w_{0,n,m}(r) \ \ \ \ \mbox{in}\ \ B_{R_\alphast(0)}.
\end{equation}
As in \cite{HZH1}, we can solve (\ref{4.13}) and (\ref{4.14}) in the form
\begin{equation}\label{4.29}
\begin{split}
w_{n,m}(r,t)=&\Big[-\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)+\varepsilon b^2_{n,m}(t)\Big]\frac{R_\alphast^{1/2}(t)}{I_{n+1/2}(R_\alphast(t))}\frac{I_{n+1/2}(r)}{r^{1/2}}\\
&+\varepsilon\xi_{1,n,m}(r,t),
\end{split}
\end{equation}
where $\xi_{1,n,m}(r,t)$ is the solution of
\begin{equation}\label{4.30}
\begin{split}
-\Delta\xi_{1,n,m}(r,t)+\left(\frac{n(n+1)}{r^2}+1\right)\xi_{1,n,m}(r,t) &= f^1_{n,m}(r,t) \ \ \ \ \mbox{in}\ B_{R_\alphast(t)}, \\
\xi_{1,n,m}(R_\alphast(t),t) &= 0.
\end{split}
\end{equation}
Let
\begin{equation}\label{4.31}
\psi(r,t)=q_{n,m}(r,t)+\mu w_{n,m}(r,t),
\end{equation}
then $\psi(r,t)$ satisfies
\begin{equation}\label{4.32}
-\Delta\psi(r,t)+\frac{n(n+1)}{r^2}\psi(r,t)=\varepsilon\mu f^1_{n,m}(r,t)+\varepsilon f^2_{n,m}(r,t) \ \ \ \ \mbox{in}\ B_{R_\alphast(t)},
\end{equation}
and by (\ref{4.14}) and (\ref{4.16}), $\psi(r,t)$ satisfies the following boundary condition
\begin{equation}\label{4.33}
\begin{split}
\psi(R_\alphast(t),t)=&\frac{1}{R_\alphast^2(t)}\left(\frac{n(n+1)}{2}-1\right)\rho_{n,m}(t)-\frac{\partial p_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)\\
&-\mu\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)+\varepsilon b^3_{n,m}(t)+\mu\varepsilon b^2_{n,m}(t).
\end{split}
\end{equation}
The solution of the problem (\ref{4.32})-(\ref{4.33}) is given by
\begin{equation}\label{4.34}
\begin{split}
\psi(r,t)=&\frac{r^n}{R^n_\alphast(t)}\Big\{\frac{1}{R_\alphast^2(t)}\left(\frac{n(n+1)}{2}-1\right)\rho_{n,m}(t)-\frac{\partial p_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)\\
&-\mu\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)+\varepsilon b^3_{n,m}(t)+\mu\varepsilon b^2_{n,m}(t)\Big\}+\varepsilon\psi_{1,n,m}(r,t),
\end{split}
\end{equation}
where $\psi_{1,n,m}(r,t)$ is the solution of
\begin{equation}\label{4.35}
\begin{split}
-\Delta\psi_{1,n,m}(r,t)+\frac{n(n+1)}{r^2}\psi_{1,n,m}(r,t) &= \mu f^1_{n,m}(r,t)+f^2_{n,m}(r,t)\ \ \ \ \mbox{in}\ B_{R_\alphast(t)}, \\
\psi_{1,n,m}(R_\alphast(t),t) &= 0.
\end{split}
\end{equation}
It follows from (\ref{4.31}), (\ref{4.34}), (\ref{4.29}) and (\ref{0.2}) that
\begin{equation*}\label{4.37}
\begin{split}
\frac{\partial q_{n,m}}{\partial r}\Big|_{r=R_\alphast(t)}=&\frac{\partial\psi}{\partial r}\Big|_{r=R_\alphast(t)}-\mu\frac{\partial w_{n,m}}{\partial r}\Big|_{r=R_\alphast(t)}\\
=&\frac{n}{R_\alphast(t)}\Big\{\frac{1}{R_\alphast^2(t)}\left(\frac{n(n+1)}{2}-1\right)\rho_{n,m}(t)-\frac{\partial p_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)\\
&-\mu\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)+\varepsilon b^3_{n,m}(t)+\mu\varepsilon b^2_{n,m}(t)\Big\}+\varepsilon\frac{\partial\psi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}\\
&-\mu\Big[-\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)+\varepsilon b^2_{n,m}(t)\Big]\left\{\frac{I_{n+3/2}(R_\alphast(t))}{I_{n+1/2}(R_\alphast(t))}+\frac{n}{R_\alphast(t)}\right\}\\
&-\varepsilon\mu\frac{\partial\xi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}.
\end{split}
\end{equation*}
After a direct computation, by (\ref{2.1}), (\ref{0.12}) and (\ref{0.13}), we obtain
\begin{equation*}
\begin{split}
\frac{\partial q_{n,m}}{\partial r}\Big|_{r=R_\alphast(t)}
=&\frac{n}{R_\alphast(t)}\Big\{\frac{1}{R_\alphast^2(t)}\left(\frac{n(n+1)}{2}-1\right)\rho_{n,m}(t)-\frac{\partial p_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}\rho_{n,m}(t)\Big\}\\
&+\mu\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}R_\alphast(t)P_n(R_\alphast(t))\rho_{n,m}(t)+\frac{n}{R_\alphast(t)}\varepsilon b^3_{n,m}(t)\\
&-\varepsilon\mu R_\alphast(t)P_n(R_\alphast(t))b^2_{n,m}(t)
-\varepsilon\mu\frac{\partial\xi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}+\varepsilon\frac{\partial\psi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}\\
=&\Big\{\frac{n}{R_\alphast^3(t)}\left(\frac{n(n+1)}{2}-1\right)+\frac{n}{R_\alphast(t)}\frac{dR_\alphast(t)}{dt}\\
&+\mu\phi(t)R^2_\alphast(t)P_0(R_\alphast(t))P_n(R_\alphast(t))\Big\}\rho_{n,m}(t)+\varepsilon\frac{n}{R_\alphast(t)}b^3_{n,m}(t)\\
&-\varepsilon\mu R_\alphast(t)P_n(R_\alphast(t))b^2_{n,m}(t)
-\varepsilon\mu\frac{\partial\xi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}+\varepsilon\frac{\partial\psi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}.
\end{split}
\end{equation*}
Inserting the above expression into (\ref{4.17}) and using the relation (\ref{0.14}), we derive
\begin{equation*}\label{3.41}
\begin{split}
\frac{d\rho_{n,m}(t)}{dt}
=&\Big\{-\frac{n-1}{R_\alphast(t)}\frac{dR_\alphast(t)}{dt}-\frac{n}{R_\alphast^3(t)}\Big(\frac{n(n+1)}{2}-1\Big)\\
&+\mu\phi(t)R^2_\alphast(t)P_0(R_\alphast(t))\big[P_1(R_\alphast(t))-P_n(R_\alphast(t))\big]\Big\}\rho_{n,m}(t)\\
&+\varepsilon\mu\frac{\partial\xi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}-\varepsilon\frac{\partial\psi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}\\
&+\varepsilon b^1_{n,m}(t)+\varepsilon\mu R_\alphast(t)P_n(R_\alphast(t))b^2_{n,m}(t)-\varepsilon\frac{n}{R_\alphast(t)}b^3_{n,m}(t).
\end{split}
\end{equation*}
Then we solve this ODE in the following form
\begin{equation}\label{4.40}
\begin{split}
\rho_{n,m}(t)
=\rho_{0,n,m}e^{\int_0^tH_n(\tau)d\tau}+\varepsilon\int_0^tQ_{n,m}(s)e^{\int_s^tH_n(\tau)d\tau}ds,
\end{split}
\end{equation}
where
\begin{equation}\label{4.41}
\begin{split}
H_n(\tau)
=&-\frac{n-1}{R_\alphast(\tau)}\frac{dR_\alphast(\tau)}{d\tau}-\frac{n}{R_\alphast^3(\tau)}\Big(\frac{n(n+1)}{2}-1\Big)\\
&+\mu\phi(\tau)R^2_\alphast(\tau)P_0(R_\alphast(\tau))\big[P_1(R_\alphast(\tau))-P_n(R_\alphast(\tau))\big]\\
=&\Big\{R_\alphast(\tau)\big[P_1(R_\alphast(\tau))-P_n(R_\alphast(\tau))\big]-\frac{n-1}{R_\alphast(\tau)}\Big\}\frac{dR_\alphast(\tau)}{d\tau}\\
&-\frac{n}{R_\alphast^3(\tau)}\Big(\frac{n(n+1)}{2}-1\Big)
+\frac{\mu\widetilde{\sigma}}{3}R_\alphast^2(\tau)\big[P_1(R_\alphast(\tau))-P_n(R_\alphast(\tau))\big],
\end{split}
\end{equation}
here we have used the fact, by (\ref{2.16}),
\begin{equation*}
\mu\phi(\tau)P_0(R_\alphast(\tau))=\frac{1}{R_\alphast(\tau)}\frac{dR_\alphast(\tau)}{d\tau}+\frac{\mu\widetilde{\sigma}}{3},
\end{equation*}
and
\begin{equation}\label{0.3}
\begin{split}
Q_{n,m}(s)=&\mu\frac{\partial\xi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(s)}-\frac{\partial\psi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(s)}\\
&+b_{n,m}^1(s)+\mu R_\alphast(s)P_n(R_\alphast(s))b_{n,m}^2(s)-\frac{n}{R_\alphast(s)}b_{n,m}^3(s).
\end{split}
\end{equation}
Note that $\rho_{M,n,m}\triangleq\rho_{0,n,m}e^{\int_0^tH_n(\tau)d\tau}$ is the function $\rho_{n,m}$ corresponding to the case $f_{n,m}^1\equiv f_{n,m}^2\equiv b_{n,m}^1\equiv b_{n,m}^2\equiv b_{n,m}^3\equiv0$ of the system (\ref{4.13})-(\ref{4.16}). The corresponding $w_{n,m}$ and $q_{n,m}$ are denoted by $w_{M,n,m}$ and $q_{M,n,m}$. Let $w_M=\sum_{n=0}^\infty\sum_{m=-n}^nw_{M,n,m}Y_{n,m}$, $q_M=\sum_{n=0}^\infty\sum_{m=-n}^nq_{M,n,m}Y_{n,m}$ and $\rho_M=\sum_{n=0}^\infty\sum_{m=-n}^n\rho_{M,n,m}Y_{n,m}$.
We shall estimate $\rho_{n,m}(t)$ by dividing into three cases: $n=0$, $n=1$ and $n\geq2$.
\subsection{Case 1: $n=0$}
It follows from (\ref{4.40}) and (\ref{0.3}) that
\begin{equation}\label{4.43}
\begin{split}
\rho_{0,0}(t)=&\rho_{0,0,0}e^{\int_0^tH_0(\tau)d\tau}+\varepsilon\int_0^tQ_{0,0}(s)e^{\int_s^tH_0(\tau)d\tau}ds\\
=&\rho_{0,0,0}e^{\int_0^tH_0(\tau)d\tau}+\varepsilon\int_0^t\Big[\mu\frac{\partial\xi_{1,0,0}}{\partial r}\Big|_{r=R_\alphast(s)}-\frac{\partial\psi_{1,0,0}}{\partial r}\Big|_{r=R_\alphast(s)}\Big]e^{\int_s^tH_0(\tau)d\tau}ds\\
&+\varepsilon\int_0^tb_{0,0}^1(s)e^{\int_s^tH_0(\tau)d\tau}ds+\varepsilon\int_0^t\mu R_\alphast(s)P_0(R_\alphast(s))b_{0,0}^2(s)e^{\int_s^tH_0(\tau)d\tau}ds\\
\equiv& \rho_{M,0,0}+\varepsilon L_1+\varepsilon L_2+\varepsilon L_3,
\end{split}
\end{equation}
where, by (\ref{4.41}),
\begin{equation}\label{4.44}
\begin{split}
H_0(\tau)=&\Big\{R_\alphast(\tau)\big[P_1(R_\alphast(\tau))-P_0(R_\alphast(\tau))\big]+\frac{1}{R_\alphast(\tau)}\Big\}\frac{dR_\alphast(\tau)}{d\tau}\\
&+\frac{\mu\widetilde{\sigma}}{3}R_\alphast^2(\tau)\big[P_1(R_\alphast(\tau))-P_0(R_\alphast(\tau))\big].
\end{split}
\end{equation}
We now proceed to estimate the last three terms on the right-hand of (\ref{4.43}), respectively. Before doing that, we establish
the following lemma.
\begin{lem}\label{lemma1}
For $\mu>0$, there exists a positive number $\delta$, depending on $\mu$ and $R_\alphast$, such that the following is true:
\begin{equation}\label{4.45}
e^{\int_s^tH_0(\tau)d\tau}\leq Ce^{-\delta(t-s)}.
\end{equation}
\end{lem}
\noindent{\bf Proof.}\ For any $t>0$ and $0<s<t$, there exist $m\in \mathbb{N}$ and $z\in[0,T)$ such that $t-s=mT+z$, then
\begin{equation*}
\begin{split}
&e^{\int_s^t\left\{R_\alphast(\tau)\big[P_1(R_\alphast(\tau))-P_0(R_\alphast(\tau))\big]+\frac{1}{R_\alphast(\tau)}\right\}\frac{dR_\alphast(\tau)}{d\tau}d\tau}\\
&\quad=e^{\int_s^{s+z}\left\{R_\alphast(\tau)\big[P_1(R_\alphast(\tau))-P_0(R_\alphast(\tau))\big]+\frac{1}{R_\alphast(\tau)}\right\}dR_\alphast(\tau)}\leq C.
\end{split}
\end{equation*}
Furthermore, by the boundedness of $R_\alphast(t)$ and (\ref{0.8}), we obtain
\begin{equation*}
\begin{split}
e^{\int_s^tH_0(\tau)d\tau}&\leq Ce^{\int_s^t\frac{\mu\widetilde{\sigma}}{3}R_\alphast^2(\tau)\big[P_1(R_\alphast(\tau))-P_0(R_\alphast(\tau))\big]d\tau}\\
&\leq Ce^{-\delta(t-s)}
\end{split}
\end{equation*}
for some $\delta>0$. Hence, the proof is complete.
$\Box$
\begin{lem}\label{lemma2}
If
\begin{equation*}
\int_0^\infty e^{2\delta_1t}|b(t)|^2dt\leq A, \ \ \ \ \ 0<\delta_1<\delta,
\end{equation*}
then
\begin{equation}\label{4.46a}
\int_0^\infty e^{2\delta_1t}\Big|\int_0^tb(s)e^{\int_s^tH_0(\tau)d\tau}ds\Big|^2dt\leq CA.
\end{equation}
\end{lem}
\noindent{\bf Proof.}\ Lemma \ref{lemma1} yields
\begin{equation*}
\begin{split}
J_0&\equiv \int_0^\infty e^{2\delta_1t}\Big|\int_0^tb(s)e^{\int_s^tH_0(\tau)d\tau}ds\Big|^2dt\\
&\leq C\int_0^\infty e^{2\delta_1t}\Big|\int_0^t|b(s)|e^{-\delta(t-s)}ds\Big|^2dt\\
&=C\int_0^\infty e^{2\delta_1t}\Big|\int_0^t|b(t-s)|e^{-\delta s}ds\Big|^2dt\\
&\leq C\int_0^\infty\left\{\int_0^t|b(t-s)|^2e^{2\delta_1(t-s)}e^{-\delta s+\delta_1s}ds\int_0^te^{-\delta s+\delta_1s}ds\right\}dt\\
&\leq C\int_0^\infty\int_0^t|b(t-s)|^2e^{2\delta_1(t-s)}e^{-(\delta-\delta_1)s}dsdt.
\end{split}
\end{equation*}
Changing the order of the integration, we obtain
\begin{equation*}
\begin{split}
J_0&\leq C\int_0^\infty\int_s^\infty |b(t-s)|^2e^{2\delta_1(t-s)}e^{-(\delta-\delta_1)s}dtds\\
&=C \int_0^\infty\int_0^\infty |b(t)|^2e^{2\delta_1t}e^{-(\delta-\delta_1)s}dtds\\
&\leq CA\int_0^\infty e^{-(\delta-\delta_1)s}ds\\
&\leq CA,
\end{split}
\end{equation*}
which completes our proof.
$\Box$
\begin{lem}\label{lemma3}
For $0<\delta_1<\delta$, the following estimate holds:
\begin{equation}\label{4.47}
\int_0^\infty e^{2\delta_1t}\big|L_1\big|^2dt\leq C|\varepsilon|^{-1}\big(F_{0,0}^1+F_{0,0}^2\big).
\end{equation}
\end{lem}
\noindent{\bf Proof.}\ Recall \cite[Lemmas 3.2 and 3.3]{FH1} that
\begin{equation}\label{0.4}
\left|\frac{\partial\xi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}\right|^2\leq \frac{C}{n+1}\|f_{n,m}^1(\cdot,t)\|^2_{L^2(B_{R_\alphast(t)})},
\end{equation}
\begin{equation}\label{0.5}
\left|\frac{\partial\psi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}\right|^2\leq \frac{C}{n+1}\big(\|f_{n,m}^1(\cdot,t)\|^2_{L^2(B_{R_\alphast(t)})}+\|f_{n,m}^2(\cdot,t)\|^2_{L^2(B_{R_\alphast(t)})}\big),
\end{equation}
so that, by (\ref{4.19}) and (\ref{4.20}), we obtain
\begin{equation*}
\begin{split}
&\int_0^\infty e^{2\delta_1t}\Big|\mu\frac{\partial\xi_{1,0,0}}{\partial r}\Big|_{r=R_\alphast(t)}-\frac{\partial\psi_{1,0,0}}{\partial r}\Big|_{r=R_\alphast(t)}\Big|^2dt\\
&\quad \leq C\int_0^\infty e^{2\delta_1t}\Big|\frac{\partial\xi_{1,0,0}}{\partial r}\Big|_{r=R_\alphast(t)}\Big|^2dt+C\int_0^\infty e^{2\delta_1t}\Big|\frac{\partial\psi_{1,0,0}}{\partial r}\Big|_{r=R_\alphast(t)}\Big|^2dt\\
&\quad \leq C\int_0^\infty e^{2\delta_1t}\|f_{0,0}^1\|^2_{L^2(B_{R_\alphast(t)})}dt+C\int_0^\infty e^{2\delta_1t}\big(\|f_{0,0}^1\|^2_{L^2(B_{R_\alphast(t)})}+\|f_{0,0}^2\|^2_{L^2(B_{R_\alphast(t)})}\big)dt\\
&\quad \leq C|\varepsilon|^{-1}\big(F_{0,0}^1+F_{0,0}^2\big).
\end{split}
\end{equation*}
Hence, it follows from Lemma \ref{lemma2} that (\ref{4.47}) holds.
$\Box$
\begin{lem}\label{lemma4}
The following estimates hold:
\begin{equation}\label{4.471}
\int_0^\infty e^{2\delta_1t}\big|L_2\big|^2dt\leq C|\varepsilon|^{-1}B_{0,0}^1,
\end{equation}
\begin{equation}\label{4.48}
\int_0^\infty e^{2\delta_1t}\big|L_3\big|^2dt\leq C|\varepsilon|^{-1}B_{0,0}^2.
\end{equation}
\end{lem}
\noindent{\bf Proof.}\ (\ref{4.21}) and Lemma \ref{lemma2} yield (\ref{4.471}). Moreover, by (\ref{2.17}), (\ref{4.22}) and Lemma \ref{lemma2}, we get (\ref{4.48}). Therefore, the proof is complete.
$\Box$
Combining Lemmas \ref{lemma3} and \ref{lemma4}, we establish an estimate for $\rho_{0,0}(t)$ as follows:
\begin{equation}\label{0.10}
\int_0^\infty e^{2\delta_1t}\big|\rho_{0,0}(t)-\rho_{M,0,0}(t)\big|^2dt\leq C|\varepsilon|\big(F_{0,0}^1+F_{0,0}^2+B_{0,0}^1+B_{0,0}^2\big).
\end{equation}
\subsection{Case 2: $n=1$}
It can be easily seen from (\ref{4.41}) that
\begin{equation*}
\begin{split}
H_1(\tau)=0.
\end{split}
\end{equation*}
Therefore, (\ref{4.40}) implies that
\begin{equation}\label{3.33}
\rho_{1,m}(t)=\rho_{0,1,m}+\varepsilon\int_0^tQ_{1,m}(s)ds,
\end{equation}
where
\begin{equation}\label{3.34}
\begin{split}
Q_{1,m}(s)=&\mu\frac{\partial\xi_{1,1,m}}{\partial r}\Big|_{r=R_\alphast(s)}-\frac{\partial\psi_{1,1,m}}{\partial r}\Big|_{r=R_\alphast(s)}\\
&+b_{1,m}^1(s)+\mu R_\alphast(s)P_1(R_\alphast(s))b_{1,m}^2(s)-\frac{1}{R_\alphast(s)}b_{1,m}^3(s).
\end{split}
\end{equation}
As pointed out in the introduction, after the initial values perturbed, say (\ref{0.68}), the domain of the system (\ref{1.2})-(\ref{1.6}) (or (\ref{3.150})-(\ref{3.190})) undergoes a translation owing to the perturbations of mode 1, that is to say, the perturbations of mode 1 terms result in the decay estimates in time $t$, as well as the translation of the origin. In order to establish the asymptotic stability, we need to take care of the center of the limiting sphere. We shall employ on an iterative procedure in which at each iteration we shift the domain to a ``nearly" optimal location with a new center $\varepsilon a=\varepsilon(a_1,a_2,a_3)$. Note that a translation of the origin dose not change the equations (\ref{1.2})-(\ref{1.6}) (or (\ref{3.150})-(\ref{3.190})), but change the initial data. As stated in \cite[Section 6]{FH2}, in the new coordinate system, the initial data are changed as follows, $m=-1,0,1$,
\begin{equation}\label{6.2}
\begin{split}
w_{0,1,m}\ \mbox{is\ replaced\ by}\ w_{0,1,m}+b_{m+2}\frac{\partial\sigma_\alphast(r,0)}{\partial r}+\varepsilon A_m,\\
\mbox{and}\ \rho_{0,1,m}\ \mbox{is\ replaced\ by}\ \rho_{0,1,m}-b_{m+2}+\varepsilon B_m,
\end{split}
\end{equation}
where $A_m$ and $B_m$ are bounded functions of $(a,\varepsilon)$, and $b_{m+2}$ satisfies
\begin{equation*}
b_1-b_3=a_1\sqrt{\frac{8\pi}{3}}, \ \ \ \ i(b_1+b_3)=-a_2\sqrt{\frac{8\pi}{3}}, \ \ \ \ b_2=a_3\sqrt{\frac{4\pi}{3}}.
\end{equation*}
Then, in the new coordinate system, the expression $\rho_{1,m}(t)$ is given by
\begin{equation}\label{3.35}
\rho_{1,m}(t)=\rho_{0,1,m}-b_{m+2}+\varepsilon B_m+\varepsilon\int_0^tQ_{1,m}(s)ds.
\end{equation}
We note that as we go to the system (\ref{3.150})-(\ref{3.190}), we shall always use the translated initial data. As in \cite[Page 632]{FH2}, we take $f^i$, $b^j$ in (\ref{3.150})-(\ref{3.190}) to be functions not only of $(r,\theta,\varphi,t)$, but also of the center $a$, which aims to accommodate the consistency condition of order 2. The function $Q_{1,m}(s)$ depends on $a$ implicitly through the dependence of $\xi_{1,1,m}$, $\psi_{1,1,m}$ and $b^j_{1,m}$ on $a$.
In order to find the center of the limiting sphere and establish the decay estimates in time $t$, we rewrite $\rho_{1,m}(t)$ as
\begin{equation}\label{3.36}
\begin{split}
\rho_{1,m}(t)&=\rho_{0,1,m}-b_{m+2}+\varepsilon B_m+\varepsilon\int_0^\infty Q_{1,m}(s)ds-\varepsilon\int_t^\infty Q_{1,m}(s)ds\\
&\triangleq F_m(a)-\varepsilon\int_t^\infty Q_{1,m}(s)ds.
\end{split}
\end{equation}
Let $F(a)=(F_{-1}(a),F_0(a),F_1(a))$.
We have the following critical theorem.
\begin{thm}\label{th3}
For $|\varepsilon|$ small, there exists a new center $\varepsilon a^\alphast(\varepsilon)$ such that
\begin{equation*}
F(a^\alphast(\varepsilon))=0.
\end{equation*}
\end{thm}
\noindent{\bf Proof.}\ By (\ref{3.36}), we have
\begin{equation*}
\begin{split}
F(a)
= E(a)+\varepsilon G(a).
\end{split}
\end{equation*}
Clearly, there exists $a_0$ such that $E(a_0)=0$ and $E'(a_0)$ is invertible. Moreover, $E'(a)$ and $G'(a)$ are continuous for $a\in \overline{B}_1(a_0)$. Hence, by applying Theorem \ref{th2}, we complete the proof.
$\Box$
Now we proceed to estimate the last term of the right-hand side of (\ref{3.36}). Recall \cite[Lemma 5.4]{FH2} that
\begin{equation*}
\int_0^\infty e^{2\delta_1t}\Big|\int_t^\infty b(\tau)d\tau\Big|^2dt\leq\frac{1}{\delta_1^2}\int_0^\infty e^{2\delta_1t}|b(t)|^2dt,
\end{equation*}
so that, by (\ref{2.17}), (\ref{0.4}), (\ref{0.5}) and (\ref{4.19})-(\ref{4.23}),
we have
the following estimate:
\begin{equation*}\label{3.37}
\begin{split}
&\int_0^\infty e^{2\delta_1t}\Big|\int_t^\infty Q_{1,m}(s)ds\Big|^2dt\\
&\leq C\int_0^\infty e^{2\delta_1t}\big|Q_{1,m}(t)\big|^2dt\\
&\leq C\int_0^\infty e^{2\delta_1t}\Big(\Big|\frac{\partial\xi_{1,1,m}}{\partial r}\Big|_{r=R_\alphast(t)}\Big|^2+\Big|\frac{\partial\psi_{1,1,m}}{\partial r}\Big|_{r=R_\alphast(t)}\Big|^2+|b_{1,m}^1|^2+|b_{1,m}^2|^2+|b_{1,m}^3|^2\Big)dt\\
&\leq C\int_0^\infty e^{2\delta_1t}\Big(\|f_{1,m}^1\|^2_{L^2}+\|f_{1,m}^2\|^2_{L^2}+|b_{1,m}^1|^2+|b_{1,m}^2|^2+|b_{1,m}^3|^2\Big)dt\\
&\leq C|\varepsilon|^{-1}\big(F_{1,m}^1+F_{1,m}^2+B_{1,m}^1+B_{1,m}^2+B_{1,m}^3\big).
\end{split}
\end{equation*}
It follows that
\begin{equation}\label{0.9}
\begin{split}
\int_0^\infty e^{2\delta_1t}|\rho_{1,m}(t)-F_m(a^\alphast(\varepsilon))|^2&=|\varepsilon|^2\int_0^\infty e^{2\delta_1t}\Big|\int_t^\infty Q_{1,m}(s)ds\Big|^2dt\\
&\leq C|\varepsilon|\big(F_{1,m}^1+F_{1,m}^2+B_{1,m}^1+B_{1,m}^2+B_{1,m}^3\big).
\end{split}
\end{equation}
\subsection{Case 3: $n\geq2$}
In this subsection, we shall deal with the case $n\geq2$. We first establish
the following lemma:
\begin{lem}\label{lemma5}
Let $\mu<\mu_\alphast$, where $\mu_\alphast$ is defined by (\ref{0.16}). For $n\geq2$, there exists a small positive number $\delta$, independent of $n$, such that
\begin{equation}\label{4.49}
e^{\int_s^tH_n(\tau)d\tau}\leq e^{-\delta(n^3+1)(t-s)}.
\end{equation}
\end{lem}
\noindent{\bf Proof.}\
Since $0<\mu<\mu_\alphast$ and $R_\alphast(t)$ is periodic and bounded,
it follows from (\ref{4.41}) and (\ref{2.17}) that there exists $\gamma>0$ ($\gamma$ is independent of $n$) such that
\begin{equation*}
\begin{split}
H_n(\tau)
\leq-\gamma(n^3+1)
\end{split}
\end{equation*}
for $n$ sufficiently large, say $n>n_0$, which yields (\ref{4.49}).
Recall \cite[Lemma 4.4]{HX} that
\begin{equation*}
\mu_\alphast\leq\frac{\int_0^{T}\frac{j}{R_\alphast^3(\tau)}\Big(\frac{j(j+1)}{2}-1\Big)d\tau}{\int_{0}^{T}\frac{\widetilde{\sigma}}{3}
R_\alphast^2(\tau)\big[P_1(R_\alphast(\tau))-P_j(R_\alphast(\tau))\big]d\tau}, \ \ \ \ \ j\geq2.
\end{equation*}
Then for each fixed $j\in[2,n_0]$ and $0<\mu<\mu_\alphast$, we have
\begin{equation}\label{3.42}
\begin{split}
\int_{s}^{s+T}&\frac{j}{R_\alphast^3(\tau)}\Big(\frac{j(j+1)}{2}-1\Big)d\tau-\mu\int_{s}^{s+T}\frac{\widetilde{\sigma}}{3}
R_\alphast^2(\tau)\big[P_1(R_\alphast(\tau))-P_j(R_\alphast(\tau))\big]d\tau>\gamma_j,
\end{split}
\end{equation}
where $\gamma_j>0$ is small enough, and dependent of $(R_\alphast,T,\mu)$ and $j$.
For $t>s$, there exists $m\in\mathbb{N}$ and $\nu\in[0,T)$ such that $t-s=mT+\nu$. Furthermore, by (\ref{4.41}) and (\ref{3.42}), we obtain
\begin{equation*}
\begin{split}
e^{\int_s^tH_n(\tau)d\tau}=&e^{\int_s^{s+\nu}H_n(\tau)d\tau+m\int_s^{s+T}H_n(\tau)d\tau}\\
=&\mbox{exp}\Big\{\int_s^{s+\nu}\Big[R_\alphast(\tau)\big[P_1(R_\alphast(\tau))-P_j(R_\alphast(\tau))\big]-\frac{j-1}{R_\alphast(\tau)}\Big]\frac{dR_\alphast(\tau)}{d\tau}d\tau\\
&-\int_s^{s+\nu}\frac{j}{R_\alphast^3(\tau)}\Big(\frac{j(j+1)}{2}-1\Big)d\tau\\
&+\mu\int_s^{s+\nu}\frac{\widetilde{\sigma}}{3} R_\alphast^2(\tau)\big[P_1(R_\alphast(\tau))-P_j(R_\alphast(\tau))\big]d\tau\\
&-m\int_s^{s+T}\frac{j}{R_\alphast^3(\tau)}\Big(\frac{j(j+1)}{2}-1\Big)d\tau\\
&+m\mu\int_s^{s+T}\frac{\widetilde{\sigma}}{3}R_\alphast^2(\tau)\big[P_1(R_\alphast(\tau))-P_j(R_\alphast(\tau))\big]d\tau\Big\}\\
\leq& e^{-m\widetilde{\delta}_j}=e^{-\frac{t-s-\nu}{T}\widetilde{\delta}_j}\leq e^{-\delta_j(t-s)}
\end{split}
\end{equation*}
for some $\widetilde{\delta}_j>0$ and $\delta_j>0$ (both $\widetilde{\delta}_j$ and $\delta_j>0$ depend on $(R_\alphast,T,\mu)$ and $j$).
Let $\delta=\min\{\gamma,\frac{\delta_2}{2^3+1},\frac{\delta_3}{3^3+1},\cdots,\frac{\delta_{n_0}}{n_0^3+1}\}$, then for $n\geq2$, we get (\ref{4.49}), and this completes the proof.
$\Box$
\begin{lem}\label{lemma6}
If
\begin{equation*}
\int_0^\infty e^{2\delta_1t}|b(t)|^2dt\leq A, \ \ \ \ \ 0<\delta_1<\delta,
\end{equation*}
then, for $n\geq2$,
\begin{equation}\label{4.46}
\int_0^\infty e^{2\delta_1t}\Big|\int_0^tb(s)e^{\int_s^tH_n(\tau)d\tau}ds\Big|^2dt\leq CA(n+1)^{-6},
\end{equation}
where $C$ is a constant independent of $n$.
\end{lem}
\noindent{\bf Proof.}\ It follows from Lemma \ref{lemma5} that
\begin{equation*}
\begin{split}
J_1&\equiv \int_0^\infty e^{2\delta_1t}\Big|\int_0^tb(s)e^{\int_s^tH_n(\tau)d\tau}ds\Big|^2dt\\
&\leq \int_0^\infty e^{2\delta_1t}\Big(\int_0^t|b(s)|e^{-\delta(n^3+1)(t-s)}ds\Big)^2dt\\
&=\int_0^\infty e^{2\delta_1t}\Big(\int_0^t|b(t-\tau)|e^{-\delta(n^3+1)\tau}d\tau\Big)^2dt\\
&\leq \int_0^\infty\left\{\int_0^t|b(t-\tau)|^2e^{2\delta_1(t-\tau)}e^{-\delta(n^3+1)\tau+\delta_1\tau}d\tau\int_0^te^{-\delta (n^3+1)\tau+\delta_1\tau}d\tau\right\}dt\\
&\leq C(n+1)^{-3}\int_0^\infty\int_0^t|b(t-\tau)|^2e^{2\delta_1(t-\tau)}e^{-\delta (n^3+1)\tau+\delta_1\tau}d\tau dt.
\end{split}
\end{equation*}
Changing the order of the integration, we obtain
\begin{equation*}
\begin{split}
J_1&\leq C(n+1)^{-3}\int_0^\infty\int_\tau^\infty |b(t-\tau)|^2e^{2\delta_1(t-\tau)}e^{-\delta (n^3+1)\tau+\delta_1\tau}dtd\tau \\
&=C(n+1)^{-3} \int_0^\infty\int_0^\infty |b(t)|^2e^{2\delta_1t}e^{-\delta (n^3+1)\tau+\delta_1\tau}dtd\tau \\
&\leq CA(n+1)^{-3}\int_0^\infty e^{-\delta (n^3+1)\tau+\delta_1\tau}d\tau \\
&\leq CA(n+1)^{-6},
\end{split}
\end{equation*}
which completes our proof.
$\Box$
\begin{lem}\label{lemma7}
For all $n\geq2$ and $|m|\leq n$,
\begin{equation*}\label{4.470}
\int_0^\infty e^{2\delta_1t}\Big|\rho_{n,m}-\rho_{M,n,m}\Big|^2dt\leq C|\varepsilon|(n+1)^{-7}\big(F_{n,m}^1+F_{n,m}^2+B_{n,m}^1+B_{n,m}^2+B_{n,m}^3\big),
\end{equation*}
where $C$ is a constant independent of $n$.
\end{lem}
\noindent{\bf Proof.}\ By (\ref{0.3}), (\ref{2.17}), (\ref{0.4}), (\ref{0.5}) and (\ref{4.19})-(\ref{4.23}), we get
\begin{equation*}
\begin{split}
&\int_0^\infty e^{2\delta_1t}|Q_{n,m}(t)|^2dt\\
&\leq C\int_0^\infty e^{2\delta_1t}\Big(\Big|\frac{\partial\xi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}\Big|^2+\Big|\frac{\partial\psi_{1,n,m}}{\partial r}\Big|_{r=R_\alphast(t)}\Big|^2+|b_{n,m}^1|^2+|b_{n,m}^2|^2+n^2|b_{n,m}^3|^2\Big)dt\\
&\leq C\int_0^\infty e^{2\delta_1t}\Big(\frac{1}{n+1}\|f_{n,m}^1\|^2_{L^2}+\frac{1}{n+1}\|f_{n,m}^2\|^2_{L^2}+|b_{n,m}^1|^2+|b_{n,m}^2|^2+n^2|b_{n,m}^3|^2\Big)dt\\
&\leq C|\varepsilon|^{-1}(n+1)^{-1}\big(F_{n,m}^1+F_{n,m}^2+B_{n,m}^1+B_{n,m}^2+B_{n,m}^3\big).
\end{split}
\end{equation*}
Furthermore, it follows from Lemma \ref{lemma6} that
\begin{equation*}
\begin{split}
\int_0^\infty e^{2\delta_1t}\Big|\rho_{n,m}-\rho_{M,n,m}\Big|^2dt
=&|\varepsilon|^2\int_0^\infty e^{2\delta_1t}\Big|\int_0^t Q_{n,m}(s)e^{\int_s^tH_n(\tau)d\tau}ds\Big|^2dt\\
\leq&C|\varepsilon|(n+1)^{-7}\big(F_{n,m}^1+F_{n,m}^2+B_{n,m}^1+B_{n,m}^2+B_{n,m}^3\big).
\end{split}
\end{equation*}
Hence, the proof is complete.
$\Box$
Combining (\ref{0.10}), (\ref{0.9}) and Lemma \ref{lemma7}, we obtain, in the new coordinate system,
\begin{equation}\label{0.11}
\int_0^\infty e^{2\delta_1t}\|\rho-\rho_M\|^2_{H^{7/2}(\partial B_{R_\alphast(t)})}dt\leq C|\varepsilon|.
\end{equation}
\begin{rem}
In fact, $(w_M,q_M,\rho_M)$ is the solution of the corresponding linear system of the original problem (\ref{1.2})-(\ref{1.6}), and its decay estimate in time $t$ has been derived in \cite{HZH3} for the two-space dimensional case and \cite{HX} for the three-space dimensional case, respectively.
\end{rem}
Moreover, we have the following estimates for $w$, $q$ and $\frac{\partial\rho}{\partial t}$.
\begin{lem}\label{th5}
If $\delta_1$ is sufficiently small then
\begin{equation}\label{0.18}
\int_0^\infty e^{2\delta_1t}\|w-w_M\|^2_{H^{1}( B_{R_\alphast(t)})}dt\leq C|\varepsilon|,
\end{equation}
\begin{equation}\label{0.19}
\int_0^\infty e^{2\delta_1t}\|q-q_M\|^2_{H^{2}( B_{R_\alphast(t)})}dt\leq C|\varepsilon|,
\end{equation}
\begin{equation}\label{0.20}
\int_0^\infty e^{2\delta_1t}\Big\|\frac{\partial}{\partial t}(\rho-\rho_M)\Big\|^2_{H^{1/2}(\partial B_{R_\alphast(t)})}dt\leq C|\varepsilon|.
\end{equation}
\end{lem}
\noindent{\bf Proof.}\ Observe that $\widetilde{w}\equiv w_{n,m}-w_{M,n,m}$ satisfies (\ref{4.13}) and (\ref{4.14}) with $\rho_{n,m}$ replaced by $\widetilde{\rho}\equiv\rho_{n,m}-\rho_{M,n,m}$, i.e.,
\begin{equation*}
\begin{split}
-\Delta \widetilde{w}+\left(\frac{n(n+1)}{r^2}+1\right)\widetilde{w}&=\varepsilon f_{n,m}^1(r,t) \qquad \qquad \quad \ \ \ \ \ \mbox{in} \ B_{R_\alphast(t)},\ t>0,\\
\widetilde{w}(R_\alphast(t),t)&=-\lambda\widetilde{\rho}(t)+\varepsilon b_{n,m}^2(t),\ \qquad \ \ \ t>0,
\end{split}
\end{equation*}
where $\lambda=\frac{\partial\sigma_\alphast}{\partial r}\Big|_{r=R_\alphast(t)}$.
Define $V=\widetilde{w}-\widetilde{w}|_{r=R_\alphast(t)}$, we obtain
\begin{equation*}
\begin{split}
-\Delta V+\left(\frac{n(n+1)}{r^2}+1\right)V&=\varepsilon f_{n,m}^1+\left(\frac{n(n+1)}{r^2}+1\right)(\lambda\widetilde{\rho}-\varepsilon b_{n,m}^2) \ \ \ \mbox{in} \ B_{R_\alphast(t)},\ t>0,\\
V(R_\alphast(t),t)&=0, \qquad \qquad \quad \ t>0.
\end{split}
\end{equation*}
Multiplying both sides with $V$ and integrating over $B_{R_\alphast(t)}$, we get
\begin{equation*}
\begin{split}
&\int_{B_{R_\alphast(t)}}|\nabla V|^2dx+\int_{B_{R_\alphast(t)}}\left(\frac{n(n+1)}{r^2}+1\right)|V|^2dx\\
&=\int_{B_{R_\alphast(t)}}\varepsilon f_{n,m}^1Vdx+\int_{B_{R_\alphast(t)}}\left(\frac{n(n+1)}{r^2}+1\right)\lambda\widetilde{\rho} Vdx-\int_{B_{R_\alphast(t)}}\left(\frac{n(n+1)}{r^2}+1\right)\varepsilon b_{n,m}^2Vdx.
\end{split}
\end{equation*}
By Young's inequality, we further have
\begin{equation*}
\begin{split}
&\int_{B_{R_\alphast(t)}}|\nabla V|^2dx+\int_{B_{R_\alphast(t)}}\left(\frac{n(n+1)}{r^2}+1\right)|V|^2dx\\
&\leq C|\varepsilon|^2\|f_{n,m}^1\|^2_{L^{2}( B_{R_\alphast(t)})}+C(n^2+1)|\widetilde{\rho}(t)|^2+C(n^2+1)|\varepsilon|^2|b^2_{n,m}(t)|^2.
\end{split}
\end{equation*}
Then integrating with $e^{2\delta_1t}dt$ and using (\ref{4.1}), (\ref{0.11}), (\ref{4.4}), we obtain
\begin{equation*}
\begin{split}
\int_0^\infty e^{2\delta_1t}\|w-{w}_M\|^2_{H^{1}( B_{R_\alphast(t)})}dt\leq& C|\varepsilon|^2\int_0^\infty e^{2\delta_1t}\|f^1\|^2_{L^{2}( B_{R_\alphast(t)})}dt\\
&+C\int_0^\infty e^{2\delta_1t}\|\rho-\widetilde{\rho}\|^2_{H^{7/2}(\partial B_{R_\alphast(t)})}dt\\
&+C|\varepsilon|^2\int_0^\infty e^{2\delta_1t}\|b^2\|^2_{H^{1}(\partial B_{R_\alphast(t)})}dt\\
\leq& C|\varepsilon|,
\end{split}
\end{equation*}
which completes (\ref{0.18}).
Using (\ref{0.18}) and the elliptic estimates, we derive (\ref{0.19}). It follows from (\ref{3.170}), (\ref{0.11}), (\ref{0.19}) and (\ref{4.3}) that (\ref{0.20}) holds. Therefore, the proof is complete.
$\Box$
\section{Stability for $\mu<\mu_\alphast$}
In this section, we shall show that the mapping $S$ defined in Remark \ref{rem1} admits a fixed point, thereby establishing the global existence of the system (\ref{1.2})-(\ref{1.6}) and the asymptotic stability of the radially symmetric $T$-periodic positive solution $(\sigma_\alphast(r,t),p_\alphast(r,t),R_\alphast(t))$. Since the proof is similar to that of the corresponding proof in \cite[Page 633-637]{FH2}, we only give an outline of the proof and omit most of its details.
We first have the following H\"{o}lder estimates on $\rho$, $w$, $q$.
\begin{lem}\label{lemma9}
After performing a change of variables $x\rightarrow x+\varepsilon a^\alphast(\varepsilon)$ on the initial data through (\ref{6.2}), then for all $t>0$, there exists a unique solution $(w,q,\rho)$ of the problem (\ref{3.150})-(\ref{3.190}) satisfying
\begin{equation}\label{0.30}
\begin{split}
\|w\|_{C^{2+2\alphalpha/3,1+\alphalpha/3}(B_{R_\alphast(t)}\times[0,\infty))}\leq C,
\end{split}
\end{equation}
\begin{equation}\label{0.31}
\begin{split}
\|q\|_{C^{2+\alphalpha,\alphalpha/3}(B_{R_\alphast(t)}\times[0,\infty))}\leq C,
\end{split}
\end{equation}
\begin{equation}\label{0.32}
\begin{split}
\|\rho,D_x\rho\|_{C^{3+\alphalpha,1+\alphalpha/3}(\partial B_{R_\alphast(t)}\times[0,\infty))}\leq C.
\end{split}
\end{equation}
\end{lem}
The proof of this lemma is similar to that of \cite[Lemma 7.1]{FH2}, so we omit here.
Introduce the space $X$ of functions $\Phi=(f^1,f^2,b^1,b^2,b^3)$, where the norm $\|\Phi\|$ defined by the maximum of the left-hand sides of (\ref{4.1})-(\ref{4.10}) with $\sqrt{|\varepsilon|}$ dropped, and set
\begin{equation*}
X_1=\{\Phi\in X:\ \sqrt{|\varepsilon|}\|\Phi\|\leq1\},
\end{equation*}
then we define a new function $\widetilde{\Phi}\equiv S\Phi=(\widetilde{f}^1,\widetilde{f}^2,\widetilde{b}^1,\widetilde{b}^2,\widetilde{b}^3)$ as follows (cf. (\ref{0.17})):
\begin{equation*}
\begin{split}
\widetilde{f}^1=A_\varepsilon w, \ \ \ \ \widetilde{f}^2=A_\varepsilon q,\ \ \ \ \ \widetilde{b}^1=B_\varepsilon^1,\ \ \ \ \ \widetilde{b}^2=B_\varepsilon^2,\ \ \ \ \ \widetilde{b}^3=B_\varepsilon^3,
\end{split}
\end{equation*}
where $w$, $q$, $\rho$ is the solution of (\ref{3.150})-(\ref{3.190}).
As in the proof of \cite[Lemma 7.2]{FH2} which is based on (\ref{0.30})-(\ref{0.32}) and (\ref{0.11})-(\ref{0.20}), we obtain that $S$ maps $X_1$ into itself and $S$ is a contraction mapping. Hence, the mapping $S$ admits a unique fixed point.
By the above argument, we can now state the main result of this paper.
\begin{thm}\label{th4}
Consider the problem (\ref{1.2})-(\ref{1.6}) with the initial data (\ref{0.68}) satisfying (\ref{0.57}) and let $\mu<\mu_\alphast$. If $|\varepsilon|$ is sufficiently small, then there exists a unique global solution of this problem. And there exists a new center $\varepsilon a^\alphast(\varepsilon)$, where $a^\alphast(\varepsilon)$ is a bounded function of $\varepsilon$, such that for some $\overline{t}>0$, $\partial\Omega(t)$ behaves like
\begin{equation*}
\partial B_{R_\alphast(t)}(\varepsilon a^\alphast(\varepsilon))=\big\{x:|x-\varepsilon a^\alphast(\varepsilon)|=R_\alphast(t)\big\}, \ \ \ \ \ t>\overline{t}.
\end{equation*}
\end{thm}
\begin{rem}
The proof of Theorem \ref{th4} shows that after the translation of the origin $0\rightarrow\varepsilon a^\alphast(\varepsilon)$ and the Hanzawa transformation, the global solution in the new variables $(r,\theta,\varphi)$ is of the form:
\begin{equation*}\label{3.1}
\begin{split}
\sigma(r,\theta,\varphi,t)&=\sigma_\alphast(r,t)+\varepsilon w(r,\theta,\varphi,t),\\
p(r,\theta,\varphi,t)&=p_\alphast(r,t)+\varepsilon q(r,\theta,\varphi,t),\\
\partial\Omega(t):\ r&=R_\alphast(t)+\varepsilon\rho(\theta,\varphi,t),
\end{split}
\end{equation*}
where $w$, $q$, $\rho$ satisfy (\ref{0.30})-(\ref{0.32}).
\end{rem}
\section{Acknowledgments}
This research is supported by China Postdoctoral Science Foundation (Grant No. 2020M683014). I would like to thank Professor S.B. Cui for his encouragement and helpful suggestions. I also thank Professor B. Hu for his advices in the preparation of this paper and constant encouragement.
\end{CJK}
\end{document}
|
\begin{document}
\title{Degenerations of non-simple abelian surfaces}
\begin{abstract}
We study degenerations of non-simple principally polarized abelian surfaces to the boundary in the toroidal compactification of $\mathcal{A}_2$, and describe the degenerate abelian surfaces as well as the degenerate elliptic curves that live inside them.
\end{abstract}
\section{Introduction}
Given a family of non-simple principally polarized abelian varieties, it is natural to ask how such a family could degenerate and try to understand the limit of the abelian subvarieties as well. For example, in \cite{ABH}, the authors studied degenerations of Prym varieties in the second Voronoi compactification of the moduli space of principally polarized abelian varieties. We recall \cite{Alexeev} that the second Voronoi compactification of $\mathcal{A}_g$ is functorial in the sense that the boundary elements can be interpreted as degenerate abelian varieties. In \cite{ABH}, the authors studied how Jacobian varieties of smooth projective curves that are double covers of another curve degenerate to the boundary in this toroidal compactification, all the while keeping track of how the abelian subvariety corresponding to the associated Prym variety degenerates as well.
It seems interesting to understand how abelian subvarieties degenerate when the abelian variety they live in degenerates to the boundary of the moduli space. It is this question that we begin to look at in this article, particularly in the case of principally polarized abelian \textit{surfaces}.
For $g=2$, all known toroidal compactifications of $\mathcal{A}_2$ coincide, and so we will be speaking of \textit{the} toroidal compactification of $\mathcal{A}_2$, and denote it by $\mathcal{A}_2^*$. Our article is separated into the study of two subproblems:
\begin{itemize}
\item Understand the closure of the moduli space of non-simple principally polarized abelian surfaces in $\mathcal{A}_2^*$
\item Study families of non-simple principally polarized abelian surfaces that degenerate to the boundary, and understand the degenerations of the elliptic curves.
\end{itemize}
As for the first point, we give a complete characterization of the boundary points of the moduli space of non-simple principally polarized abelian surfaces. Indeed, our first main result says the following:
\begin{theorem}
Let $m\geq 2$, let $\mathcal{E}_m\subseteq\mathcal{A}_2$ be the moduli space of non-simple principally polarized abelian surfaces that contain an elliptic curve of degree $m$, and let $\mathcal{E}_{m}^{\ast}$ be the closure of $\mathcal{E}_{m}$ in $\mathcal{A}_{2}^{\ast}.$ Then set-theoretically we have that
$$\mathcal{E}_{m}^{\ast} = \mathcal{E}_{m}\sqcup K^{0}(1)[m]\sqcup\mathbb{P}_{\infty}^{1},$$
where $K^{0}(1)[m]$ is a certain subvariety of the universal Kummer surface over $\mathcal{A}_1$, and $\mathbb{P}_{\infty}^{1}$ is a projective line that lies on the border. Moreover, $\overline{K^0(1)[m]}\cap\mathbb{P}_\infty^1$ consists of $\varphi(m)+1$ points and the boundary of $\mathcal{E}_m^*\cap\mathcal{E}_n^*$ is $\mathbb{P}_\infty^1$ if $m\neq n$.
\end{theorem}
We remark that the intersection of the closure of $K^{0}(1)[m]$ with $\mathbb{P}_\infty^{1}$ appears to be quite complicated with nasty singularities. It may be interesting to study in the future.
As for the second part of the article, we take the boundary points studied in the above theorem and look at degenerate abelian surfaces lying above them using a construction of Mumford \cite{complete-deg} (see \cite{Hulek} as well). Among other properties, we obtain the following result:
\begin{theorem}
Using Mumford's construction, the pairs that consist of a degenerate abelian surface $X$ along with the degenerate subelliptic curve $F$ lying over the boundary of $\mathcal{E}_m^*$ are the following:
\begin{enumerate}
\item $X$ is a $\mathbb{P}^1$-bundle over an elliptic curve and $E$ is an $m$-gon of $\mathbb{P}^1$s.
\item $X=\mathbb{P}^1\times\mathbb{P}^1$ and $F$ is a nodal curve if $m=2$
\item $X=\mathbb{P}^1\times\mathbb{P}^1$ and $F$ is an $(m-1)$-gon of $\mathbb{P}^1$s if $m\geq 3$
\item $X$ is the union of two copies of $\mathbb{P}^2$ with the blow-up of $\mathbb{P}^2$ at three points, and $F$ is an $(2m-1)$-gon of $\mathbb{P}^1$s.
\end{enumerate}
\end{theorem}
The structure of this article is as follows: In Section \ref{preliminaries} we go over the necessary preliminaries for non-simple principally polarized abelian surfaces, as well as the toroidal compactification of $\mathcal{A}_2$. In Section \ref{compactification} we describe the compactification of $\mathcal{E}_m$ in $\mathcal{A}_2^*$, and in Section \ref{degeneration} we look at the actual families that are being degenerated to the boundary.
\textbf{Acknowledgments}
I would like to thank to my advisor Dr. Robert Auffarth, who suggested me this problem, for his many helpful comments regarding both the content and redaction of this article.
This work was funded by a scolarship of the Mathematics department of Universidad de Chile and by the MIUR Excellence Department Project
MATH@TOV,
awarded to the Department of Mathematics, University of Rome, Tor
Vergata,
CUP E83C18000100006.
\section{Preliminaries}\label{preliminaries}
In this section we will review some preliminaries on non-simple principally polarized abelian surfaces and toroidal compactifications.
\subsection{Non-simple principally polarized abelian surfaces} Let $\mathcal{A}_2$ denote the (coarse) moduli space of principally polarized abelian surfaces, and define
\[\mathcal{E}_m:=\{(A,\Theta)\in\mathcal{A}_2:A\text{ contains en elliptic curve }E\text{ with }\deg(\Theta|_E)=m\}.\]
The space $\mathcal{A}_2$ can be described as a quotient of the Siegel upper-half space $\mathbb{H}_2$ by the symplectic group $\mathrm{Sp}(4,\mathbb{Z})$; let
\[p:\mathbb{H}_2\to\mathcal{A}_2\]
denote the natural projection.
It is a now classical fact (see, for instance \cite[Corollary 5.5]{Kani}) that $\mathcal{E}_m$ is irreducible of dimension 2, and a matrix $(\tau_{ij})\in\mathbb{H}_2$ lies in $p^{-1}(\mathcal{E}_m)$ if and only if there exists a primitive vector $(a,b,c,d,e)\in\mathbb{Z}^5$ such that the following two equations are satisfied:
\begin{eqnarray}\label{1}m^2&=&b^2-4(ac+de)\\
\label{2}0&=&a\tau_{11}+b\tau_{12}+c\tau_{22}+d(\tau_{11}\tau_{22}-\tau_{12}^2)+e
\end{eqnarray}
Because of this, following \cite{num-char}, if $v:=(a,b,c,d,e)\in\mathbb{Z}^5$ is a vector that satisfies equation \eqref{1}, then we define
\[\mathbb{H}_2(v):=\{\tau\in\mathbb{H}_2:\tau\text{ satisfies }\eqref{2}\}.\]
Therefore
\[\mathbb{E}_m:=p^{-1}(\mathcal{E}_m)=\bigcup_{v}\mathbb{H}_2(v)\]
where the union goes over all $v$ that satisfy equation \eqref{1}. By \cite[Lemma 3.7]{num-char}, we have that for every primitive $v\in\mathbb{Z}^5$ that satisfies equation \eqref{1},
\[\mathcal{E}_m=p(\mathbb{H}_2(v)).\]
\subsection{Toroidal compactifications} In this subsection we will briefly outline the necessary notations and terms we will use in what follows with regards to toroidal compactifications. For a full treatment of the toroidal compactification of $\mathcal{A}_2$ we recommend the reader consult \cite{Hulek}. In particular the notations we will use in this article come from this book. For a more general treatment we recommend the reader consult \cite{mumfordetal} or \cite{Namikawa}.
Throughout this subsection $F$ denotes a rational boundary component of $\mathbb{H}_{2}$ (see \cite[Definition 3.5]{Hulek},and the remark below it). Starting from a collection
$$\Sigma = \{\Sigma(F) : F \mathbb{H}pace{0.2cm}\text{rational boundary component}\}$$
of fans in the vector space $\mathrm{Sym}(2,\mathbb{R})$ of $2\times 2$ real symmetric matrices satisfying certain \emph{admisibility condition} (see \cite[Definition 3.66]{Hulek}) we can obtain a so called toroidal compactification $\mathcal{A}_{2}^{\Sigma}.$ Throughout this work we write $\mathcal{A}_{2}^{\ast}$ for $\mathcal{A}_{2}^{\Sigma}$ when
$\Sigma$ is the Legendre decomposition defined in \cite[Part I,Definition 3.117]{Hulek}.
More concretely, the toroidal compactification $\mathcal{A}_{2}^{\ast}$ is obtained by gluing \emph{partial compactifications} $Y_{\Sigma(F)}(F)$ in a certain way, where $F$ runs over the set of rational boundary components of $\mathbb{H}_{2}$ and $\Sigma(F)$ is a fan in $\mathrm{Sym}(2,\mathbb{R})$ as in the previous paragraph.
Each partial compactification is obtained by the following procedure:
\begin{enumerate}
\item Take the partial quotient $X(F):= P^{\prime}(F)\setminus\mathbb{H}_{2},$ where $P^{\prime}(F)$ is certain subgroup of $\mathrm{Sp}(4,\mathbb{Z})$ defined in \cite[Definition 3.48]{Hulek}.
\item Consider certain space $\mathfrak{X}_{\Sigma(F)}$ defined in \cite[Definition 3.52]{Hulek} which is obtained using the complex toric variety associated to the fan $\Sigma(F)$ and compute the closure $X_{\Sigma(F)}$ of $X(F)$ in $\mathfrak{X}_{\Sigma(F)}.$
\item Finally, take the quotient $Y_{\Sigma(F)}$ of $X_{\Sigma(F)}$ by the action of the group $P^{\prime\prime}(F)$ defined in \cite[Definition 3.48]{Hulek}. We will write
\begin{equation}
\label{defqF}
q_{F}: X_{\Sigma(F)}\to Y_{\Sigma(F)}
\end{equation}
for the corresponding quotient map.
\end{enumerate}
As we said before, $\mathcal{A}_{2}^{\ast}$ is obtained gluing the partial compactifications $Y_{\Sigma(F)}.$ That is, $\mathcal{A}_{2}^{\ast}$ is a quotient of the disjoint union of the partial compactifications under certain equivalence relation (\cite[Definition 3.74]{Hulek}). For a rational boundary component $G$ we will write
\begin{equation}
\label{defpF}
p_{G}^{\ast}: Y_{\Sigma(G)}\rightarrow\mathcal{A}_{2}^{\ast}
\end{equation}
for the composition
$$Y_{\Sigma(G)}\hookrightarrow\coprod_{F} Y_{\Sigma(F)}\twoheadrightarrow\mathcal{A}_{2}^{\ast},$$
where the right arrow is the corresponding quotient map.
Now, actually, to obtain $\mathcal{A}_{2}^{\ast}$ it is enough to study the partial compactifications corresponding to three distinguished rational boundary components: $F_{0} = \mathbb{H}_{2},F_{1}\cong\mathbb{H}$ and $F_{2} = \{\mathrm{id}\}.$ The component $F_k$ will be called the \textbf{corank $k$ boundary component}. It happens that the partial compactification $Y_{\Sigma(F_0)}$ is just $\mathcal{A}_{2},$ but the components $F_{1}$ and $F_{2}$ add new boundary points.
\begin{definicion}
We write
$$\partial_{F_1}\mathcal{A}_{2}^{\ast} := p_{F_1}^{\ast}\left(Y_{\Sigma(F_1)}\right)-\mathcal{A}_{2} = p_{F_1}\left(P^{\prime\prime}(F_1)\setminus\partial X_{\Sigma(F_1)}\right),$$
where $\partial X_{\Sigma(F_1)} = X_{\Sigma(F_1)}-X(F_1)$ is the usual topological boundary.
We also write
$$\partial_{F_2}\mathcal{A}_{2}^{\ast} = p_{F_2}^{\ast}\left(Y_{\Sigma(F_2)}\right)-\left(\mathcal{A}_{2}^{\ast}\cup\partial_{F_1}\mathcal{A}_{2}^{\ast}\right).$$
\end{definicion}
We may think of $\partial_{F_k}$ as ``the set which is added to the boundary of $\mathcal{A}_{2}^{\ast}$ as a contribution of the corank $k$ boundary component''.
\subsubsection{Corank 1 boundary component}
For the corank 1 boundary component we have that the group $P^{\prime}(F_1)$ is isomorphic to $\mathbb{Z}$ and the quotient $X(F_1)$ can be identified with the image of the map $e_{1}:\mathbb{H}_{2}\to\mathbb{C}^{\times}\times\mathbb{C}\times\mathbb{H}$ given by
$$\begin{pmatrix}
\tau_{1} & \tau_{2} \\
\tau_{2} & \tau_{3} \\
\end{pmatrix}\longrightarrow (e^{2\pi i\tau_1},\tau_{2},\tau_{3}).$$
The space $\mathfrak{X}_{\Sigma(F_1)}$ is nothing but $\mathbb{C}\times\mathbb{C}\times\mathbb{H}$ and $\partial X_{\Sigma(F_1)} = \{0\}\times\mathbb{C}\times\mathbb{H}.$
\begin{definicion}
\label{Kcerouno}
Let $K^{0}(1)$ be the the surface defined by the quotient of $\mathbb{C}\times\mathbb{H}$ by the equivalence relation which identifies $(z,\tau)$ with
$$\left(\frac{\varepsilon z +m\tau+n}{c\tau+d},\frac{a\tau+b}{c\tau+d}\right),$$
for $\varepsilon\in\{\pm 1\},m,n\in\mathbb{Z}$ and $\begin{pmatrix}
a & b \\
c & d \\
\end{pmatrix}\in\mathrm{SL}(2,\mathbb{Z}).$
\end{definicion}
Note that we have a fibration $K^{0}(1)\to\mathcal{A}_{1}$ whose fiber over $[\tau]\in\mathcal{A}_{1}$ is $E_{\tau}/\langle z\mapsto -z\rangle,$ whenever $\mathrm{Stab}_{\mathrm{SL}(2,\mathbb{Z})}(\tau)=\{\pm\mathrm{id}\}.$
Studying the action of the group $P^{\prime\prime}(F_1)$ on $\{0\}\times\mathbb{C}\times\mathbb{H} = \partial X_{\Sigma(F_1)}$ (as in \cite[Part I, Proposition 3.101]{Hulek}) one can easily see that the quotient is precisely the surface $K^{0}(1)$ defined above.
\subsubsection{Corank 2 boundary component}
The case of corank 2 is a little subtler. In this case the partial quotient $X(F_2)$ can be identified with the image of the map $e_{2}:\mathbb{H}_{2}\to(\mathbb{C}^{\times})^{3}$ given by
$$\begin{pmatrix}
\tau_{1} & \tau_{2} \\
\tau_{2} & \tau_{3} \\
\end{pmatrix}\mapsto (e^{2\pi i\tau_1},e^{2\pi i\tau_{2}},e^{2\pi i\tau_{3}})$$
and the space $\mathfrak{X}_{\Sigma(F_2)}$ is the toroidal embedding $(\mathbb{C}^{\times})^{3}\subseteq T_{\Sigma(F_2)}$ associated to the Legendre decomposition defined in \cite[Definition 3.117]{Hulek}.
The toroidal embedding $(\mathbb{C}^{\times})^{3}\subseteq T_{\Sigma(F_2)}$ is locally described by immersions $\iota_{n}: (\mathbb{C}^{\times})^{3}\hookrightarrow T_{n}\cong \mathbb{C}^{3}$ given by
$$(t_{1},t_{2},t_{3})\mapsto(t_{1}t_{2}^{-(2n+1)}t_{3}^{n(n+1)},t_{2}t_{3}^{-n},t_{2}^{-1}t_{3}^{n+1}),$$
where $n\in\mathbb{Z}.$
By \cite[Remarks 3.149 and 3.156]{Hulek} we have that $\partial_{F_2}$ is a certain quotient of the axis $\{0\}\times\mathbb{C}\times\{0\}\subseteq T_{-1}$ and is isomorphic to $\mathbb{P}^{1}.$
Summarizing, we have the following:
\begin{theorem}
\label{Adosestrella}
Let $\mathcal{A}_{2}^{\ast}$ be the toroidal compactification of $\mathcal{A}_{2}^{\ast}$ associated to the Legendre decomposition. Set-theoretically we have that
$$\mathcal{A}_{2}^{\ast} = \mathcal{A}_{2}\sqcup K^{0}(1)\sqcup\mathbb{P}_{\infty}^{1},$$
where:
\begin{itemize}
\item $K^{0}(1)$ is the relative Kummer surface $K^{0}(1)\to\mathcal{A}_{1}$ defined in Definition \ref{Kcerouno}, which is a certain quotient of $\{0\}\times\mathbb{C}\times\mathbb{H}\subseteq\mathfrak{X}_{\Sigma(F_1)}$
\item $\mathbb{P}_{\infty}^1$ is a copy of $\mathbb{P}^{1}$ which is a quotient of the axis $\{0\}\times\mathbb{C}\times\{0\}\subseteq\mathbb{C}^{3}\cong T_{-1}\subseteq\mathfrak{X}_{\Sigma(F_2)}.$
\end{itemize}
\end{theorem}
\begin{proof}
See \cite[Part I, Chapter 3, Theorem 3.151]{Hulek} and the remarks below it.
\end{proof}
\begin{definicion}
The $\mathbb{P}_{\infty}^{1}$ from the above theorem will be called the \emph{peripheral $\mathbb{P}^1$}.
\end{definicion}
\section{Compactification of $\mathcal{E}_{m}$}\label{compactification}
In this section we will determine the closure $\mathcal{E}_{m}^{\ast}$ of $\mathcal{E}_m$ in the toroidal compactification $\mathcal{A}_2^*$.
Again let $\mathbb{E}_{m} = p^{-1}(\mathcal{E}_{m}),$ where $p:\mathbb{H}_{2}\to\mathcal{A}_{2}$ is the canonical projection. In order to find the closure of $\mathcal{E}_m$ in $\mathcal{A}_2^*$ we have to compute the closure of the image of $\mathbb{E}_{m}$ in $X_{\Sigma(F_r)}$ for $r\in\{1,2\}.$ We denote by $e_{r}:\mathbb{H}_{2}\to X(F_r) = P^{\prime}(F_r)\setminus\mathbb{H}_{2}$ the partial quotient of corank $r.$
Using the notation of the previous section, we have that
$$\mathbb{E}_{m} = \bigcup_{v} \mathbb{H}_{2}(v),$$
where the union runs over the elements $v\in\mathbb{Z}^{5}$ which satisfy equation \eqref{1}. In what follows, when we use the notation $\mathbb{H}_2(v)$ we will \textbf{always} assume that $v$ satisfies equation \eqref{1}, and so we will frequently leave it out.
In particular, we have that
$$\overline{e_{r}(\mathbb{E}_{m})}\supseteq\bigcup_{v}\overline{e_{r}(\mathbb{H}_{2}(v))},$$
where the closure is taken in $X_{\Sigma(F_r)}$ (which can naturally be seen as a subset of $\mathbb{C}^{3}$). What makes our calculations easier is the following result:
\begin{lemma}
We have that
$$\overline{e_{r}(\mathbb{E}_{m})} = \bigcup_{v}\overline{e_{r}(\mathbb{H}_{2}(v))}.$$
\end{lemma}
\begin{proof}
As before, let $Y_\Sigma(F)$ denote the quotient $P''(F)\backslash X_{\Sigma(F)}$, let $q_F:X_{\Sigma}(F)\to Y_\Sigma(F)$ be the natural projection and let $p_F^*:Y_\Sigma(F)\to\mathcal{A}_2^*$ be the gluing map.
Take $x\in\overline{q_{F}e_{F}(\mathbb{E}_m)}$, and let $z\in q_F^{-1}(x)$. We affirm that $z\in\overline{e_F(\mathbb{H}_2(v))}$ for some $v\in\mathbb{Z}^5$ that satisfies equation \eqref{1}.
Since $P''(F)$ acts properly discontinuously in $X_{\Sigma(F)}(F)$, there exists an open neighborhood $U\subseteq X_{\Sigma(F)}(F)$ of $z$ such that $h(U)\cap U\neq\varnothing$ if and only if $h\in\mathrm{Stab}_{P''(F)}(z)$. Note that this stabilizer is finite. On the other hand, $Y_{\Sigma(F)}(F)$ is an analytic space and by \cite[Theorem II.4.7]{Dem}, $q_F(U)\cap q_Fe_F(\mathbb{E}_m)$ has a finite number of irreducible components. Therefore $x$ must be in the closure of one of these components. Now each of these components must be an irreducible component of $q_F(U)\cap q_Fe_F(\mathbb{H}_2(v))$ for some $v$, and we therefore have that $x\in\overline{q_Fe_F(\mathbb{H}_2(v))}$.
We have an open embedding $\mathrm{Stab}(z)\backslash U\hookrightarrow Y_{\Sigma(F)}(F)$, and since $x\in\overline{q_Fe_F(\mathbb{H}_2(v))}$, we have that the image of $z$ in $\mathrm{Stab}(z)\backslash U$ is in the closure of the image of $e_F(\mathbb{H}_2(v))$ in $\mathrm{Stab}(z)\backslash U$. We therefore have that
\[z\in\overline{\bigcup_{h\in\mathrm{Stab}(z)}he_F(\mathbb{H}_2(v))}=\bigcup_{h\in\mathrm{Stab}(z)}\overline{he_F(\mathbb{H}_2(v))}\]
since $\mathrm{Stab}(z)$ is finite. Now $he_F(\mathbb{H}_2(v))=e_F(\mathbb{H}_2(w))$ for some $w$, and the proof is finished.
\end{proof}
This lemma implies that in order to find the closure of $\mathcal{E}_m$ in $\mathcal{A}_2^*$, we only need to find the closure of $e_1(\mathbb{H}_2(v))$ for every $v$ that satisfies equation \eqref{1}.
\subsection{Corank 1 boundary component}
We will first study the closure in the corank 1 boundary component. As was said at section 2, in this case the partial quotient can be identified with the map $e_{1}:\mathbb{H}_{2}\to\mathbb{C}^{\times}\times\mathbb{C}\times\mathbb{H}$ given by
$$\begin{pmatrix}
\tau_{1} & \tau_{2} \\
\tau_{2} & \tau_{3} \\
\end{pmatrix}\longrightarrow (e^{2\pi i\tau_1},\tau_{2},\tau_{3}).$$
\begin{lemma}
\label{tresdos}
Let $v=(a,b,c,d,e)\in\mathbb{Z}^{5}$ be a vector that satisfies \eqref{1}. We have that $\overline{e_{1}(\mathbb{H}_{2}(v))}$ intersects the boundary of $X_{\Sigma(F_1)}\subseteq\mathbb{C}\times\mathbb{C}\times\mathbb{H}=\mathfrak{X}_{\Sigma(F_1)}$ if and only if $v=(0,\pm m,c,0,e)$ for some $c,e\in\mathbb{Z}$ with $\mathrm{gcd}(m,c,e)=1.$ More precisely, we have that
$$\overline{e_{1}(\mathbb{H}_{2}(v))}\cap X_{\Sigma(F_1)} = \begin{cases}
\emptyset & \text{if $a=0$ or $d=0$} \\
\left\{\left(0,\frac{c\tau+e}{m},\tau\right) : \tau\in\mathbb{H}\right\} & \text{if $v = (0,m,c,0,e)$}
\end{cases}.$$
\end{lemma}
\begin{proof}
We just have to study which points of the form $(0,z,\tau)\in\mathbb{C}\times\mathbb{C}\times\mathbb{H}$ are limits of sequences in $e_{1}(\mathbb{H}_{2}(v)).$ If $(0,z,\tau)$ is such a limit then there exists a sequence
\[\left\{\left(\begin{array}{cc}\omega_n&z_n\\z_n&\tau_n\end{array}\right)\right\}_{n\in\mathbb{N}}\subseteq\mathbb{H}_{2}\]
such that for all $n\in\mathbb{N}$,
\begin{equation}
\label{vn}
a\omega_{n}+bz_{n}+c\tau_{n}+d(z_{n}^{2}-\omega_{n}\tau_{n})+e = 0,
\end{equation}
and when $n\to\infty$,
\begin{itemize}
\item $z_{n}\to z\in\mathbb{C},$
\item $\tau_{n}\to\tau\in\mathbb{H}$
\item $\Im\omega_{n}\to\infty$.
\end{itemize}
The above conditions imply that the sequence $\{\widehat{\omega}_n\}_{n\in\mathbb{N}}$ given by
$$\widehat{\omega}_{n} := (a -d\tau_{n})\omega_{n}$$
must have a finite limit, but it is easy to see that this is impossible if $a,d\neq 0.$ Now, condition \eqref{1} implies that necessarily $v = (0,\pm m, c,0,e)$, and since this vector is primitive, we have that $\mathrm{gcd}(m,c,e)=1$. If we replace \eqref{vn} with $a=d=0$ we get that $z_{n} = \mp\frac{c\tau_{n}+e}{m},$ so the limit is $\left(0,\mp\frac{c\tau+e}{m},\tau\right).$
\end{proof}
\begin{definicion}
\label{defKcerounom}
Let $K^{0}(1)[m]$ denote the subvariety of $K^{0}(1)$ (see Definition \ref{Kcerouno} given by the image in $K^{0}(1)$ of the set
$$\left\{\left(\frac{c\tau+e}{m},\tau\right)\in\mathbb{C}\times\mathbb{H} : \mathrm{g.c.d}(m,c,e) = 1\right\}.$$
\end{definicion}
Note that if $\mathrm{Stab}_{\mathrm{SL}(2,\mathbb{Z})}(\tau) = \{\pm\mathrm{id}\}$ then the fiber of $K^{0}(1)[m]$ over $\tau$ is just the set of points of order $m$ in the elliptic curve $E_{\tau},$ mod $\pm 1 .$
The previous lemma therefore implies the following:
\begin{prop}
We have that
$$\mathcal{E}_{m}^{\ast}\cap\partial_{F_1}\mathcal{A}_{2}^{\ast} = K^{0}(1)[m].$$
\end{prop}
Surprisingly, this boundary component is actually irreducible:
\begin{prop}
For all $m\in\mathbb{Z}$, $K^0(1)[m]$ is irreducible and if $m,n\in\mathbb{N}$ are different, then $K^0(1)[m]\cap K^0(1)[n]=\varnothing$.
\end{prop}
\begin{proof}
Define $X_{(c,e)}$ to be the image of
\[\left\{\left(\frac{c\tau+e}{m},\tau\right)\: \tau\in\mathbb{H}\right\}\]
in $\mathcal{A}_2^*$. We will show that $X_{(x,y)}=X_{(z,w)}$
if $\mathrm{g.c.d.}(x,y,m)=\mathrm{g.c.d.}(z,w,m)=1$.
First of all, we see that in order to get the corank 1 border component, we take the quotient of $\mathbb{C}\times\mathbb{H}$ by the group
\[G:=\left\{\left(\begin{array}{ccc}\epsilon&m&n\\0&a&b\\0&c&d\end{array}\right):\begin{array}{c}\left(\begin{array}{cc}a&b\\c&d\end{array}\right)\in\mathrm{SL}(2,\mathbb{Z})\\m,n\in\mathbb{Z},\epsilon=\pm1\end{array}\right\}\]
Now the subgroup consisting of matrices with $\epsilon=1$ and $m,n=0$ acts as
\[\left(\begin{array}{ccc}1&0&0\\0&a&b\\0&c&d\end{array}\right):(z,\tau)\mapsto(z(c\tau+d)^{-1},(a\tau+b)(c\tau+d)^{-1}).\]
A brief calculation shows that the rational representation of the morphism
\[\mathbb{C}/\langle1,\tau\rangle\to\mathbb{C}/\langle1,(a\tau+b)(c\tau+d)^{-1}\rangle\]
\[z\mapsto z(c\tau+d)^{-1}\]
is the matrix
\[\left(\begin{array}{cc}a&-b\\-c&d\end{array}\right),\]
and so this same matrix modulo $m$ gives the representation of the action of the morphism on the torsion points of both tori.
Now if $(\overline{x},\overline{y}),(\overline{z},\overline{w})\in(\mathbb{Z}/m\mathbb{Z})^2$ are points of order $m$, it is an easy exercise to see that there exists an element $M\in\mathrm{SL}(2,m)$ such that $M(\overline{x},\overline{y})=(\overline{z},\overline{w})$. On the other hand, the reduction map $\pi:\mathrm{SL}(2,\mathbb{Z})\to\mathrm{SL}(2,m)$ is surjective (which can be seen by observing that both groups are generated by elementary matrices, and each elementary matrix modulo $m$ is in the image of the reduction map), and so there exists
\[N=\left(\begin{array}{cc}r&s\\u&v\end{array}\right)\in\mathrm{SL}(2,\mathbb{Z})\]
such that $\pi(N)=M$. Now we get that for any $\tau$,
\[\left(\begin{array}{ccc}1&0&0\\0&r&-s\\0&-u&v\end{array}\right)\cdot\left(\frac{x+\tau y}{m},\tau\right)\equiv \left(\frac{z+\tau' w}{m},\tau'\right)\text{ (mod }G) \]
where $\tau'=(r\tau-s)(u\tau-v)^{-1}$. In particular, $X_{(x,y)}= X_{(z,w)}$, and so we are done.
As for the statement that $K^0(1)[m]\cap K^0(1)[n]=\varnothing$ if $m\neq n$, this is clear since a point of order $m$ on a torus cannot be of order $n$.
\end{proof}
\subsection{Corank 2 boundary component}
Now we study the closure in the corank 2 boundary component. In this case the partial quotient can be identified with the map $e_{2}:\mathbb{H}_{2}\to(\mathbb{C}^{\times})^{3}$ given by
$$\begin{pmatrix}
\tau_{1} & \tau_{2} \\
\tau_{2} & \tau_{3} \\
\end{pmatrix}\mapsto (e^{2\pi i\tau_1},e^{2\pi i\tau_{2}},e^{2\pi i\tau_{3}}).$$
and we have a holomorphic map $\psi:\mathbb{H}_{2}\to\mathbb{C}^{3}$ given by $\iota\circ e_{2},$ where $\iota:(\mathbb{C}^{\times})^{3}\to\mathbb{C}^{3}$ is the map given by $\iota(t_{1},t_{2},t_{3}) = (t_{1}t_{2}^{-1},t_{2},t_{3}t_{2}^{-1})$ which allows us to see $X(F_2):=P^{\prime}(F_2)\setminus\mathbb{H}_{2}$ as a subset of the toroidal embedding $(\mathbb{C}^{\times})^{3}\hookrightarrow T_{\Sigma(F_2)}.$ According to \cite[Part I, Lemma 3.137 and Proposition 3.144]{Hulek} we have that $\mathbb{P}_{\infty}^{1}$ is certain quotient of the axis $\{0\}\times\mathbb{C}\times\{0\}.$
\begin{lemma}
\label{trescuatro}
The axis $\{0\}\times\mathbb{C}\times\{0\}$ lies in the closure of $\psi(\mathbb{E}_{m})\subseteq\mathbb{C}^3.$
\end{lemma}
\begin{proof}
Consider the period matrices defined by
$$T_{z,\tau} = \begin{pmatrix}
\tau & z \\
z & \frac{1}{m-1}\left(\tau-(m-2)z\right) \\
\end{pmatrix},$$
where $z$ is a fixed complex number and $\tau$ is such that $T_{z,r}\in\mathbb{H}_2$. We have that the above matrix is an element of $\mathbb{H}_{2}(v)$ for $v=(1,-(m-2),-(m-1),0,0)$ and when $\Im (\tau)\to\infty$ we obtain that
\[\psi(T_{z,\tau})\to (0,e^{2\pi i z},0).\]
Since $z\in\mathbb{C}$ is arbitrary we get that $\{0\}\times\mathbb{C}\times\{0\}$ is contained in $\overline{\psi(\mathbb{E}_{m})}$ as we wanted to see.
\end{proof}
\begin{corollary}
We have that
$$\mathcal{E}_{m}^{\ast}\cap\partial_{F_2}\mathcal{A}_{2}^{\ast} = \mathbb{P}_{\infty}^{1},$$
where $\mathbb{P}_{\infty}^{1}$ is the \emph{peripheral $\mathbb{P}^{1}$} introduced in Theorem \ref{Adosestrella}
\end{corollary}
By \cite[Remark 3.156]{Hulek}, the closure of the corank 1 boundary component of $\mathcal{A}_2$ in $\mathcal{A}_2^*$ is isomorphic to the Kummer modular surface $\mathcal{K}_1\to\mathbb{P}^1$, and therefore $\mathbb{P}^1_\infty$ is the fiber over $\infty$ of this family.
We now need to see how the boundary components intersect each other.
\begin{definicion}
For $m\in\mathbb{Z}$, let $C_m$ be the closure of $K^0(1)[m]$ in $\mathcal{A}_2^*$.
\end{definicion}
\begin{prop}
The curve $C_m$ intersects $\mathbb{P}_\infty^1$ at $\varphi(m)+1$ points where $\varphi$ is the Euler totient function.
\end{prop}
\begin{proof}
For $a,b\in\{0,1,\ldots,m-1\}$, the image of $\{(\frac{1}{m}(a+b\tau),\tau):\tau\in\mathbb{H}\}$ under the partial quotient map $e:\mathbb{C}\times\mathbb{H}\to\mathbb{C}^\times\times\mathbb{C}^\times$ is
$$\{(e^{2\pi i(a+b\tau)/m},e^{2\pi i\tau}):\tau\in \mathbb{H}\}=\{(t^b\rho^a,t^m):0<|t|<1\},$$
where $t=e^{2\pi i\tau/m}$ and $\rho=e^{2\pi i/m}$ is a primitive $m$th root of unity. The image of this curve in the torus embedding $\mathbb{C}^\times\times\mathbb{C}^\times\hookrightarrow\mathbb{C}\times\mathbb{C}$ where $(u,v)\mapsto(uv^{-n},u^{-1}v^{n+1})$ is the curve
$$\{(t^{b-nm}\rho^a,t^{(n+1)m-b}\rho^{-a}):0<|t|<1\}.$$
We see that the closure of this curve in $\mathbb{C}\times\mathbb{C}$ contains boundary points if and only if
$$-1+\frac{b}{m}\leq n\leq \frac{b}{m}.$$
Since $b<m$, we obtain that $n\in\{-1,0\}$. Under identification by the action of $\mbox{Sp}(4,\mathbb{Z})$, it is easy to see that we only need to consider the case $n=0$, and so we have the curve
$$\{(t^b\rho^a,t^{m-b}\rho^{-a}):0<|t|<1\}.$$
We need to analyze the behavior of the image of this curve in the quotient $\mathbb{C}^2/((x,y)\sim(y,x))$.
If $b=0$, then the boundary point that we add is the image of $(\rho^a,0)$ in the quotient. We see that the involution $(x,y)\mapsto(y,x)$ does not fix these points, they are not equivalent for different values of $b$, and the curve is smooth at this point. Here there are $\varphi(m)$ possible values for $a$.
If $b\neq0$, the point we add is (the image of) $(0,0)$, which shows that there is only one other point of intersection.
As a remark, we see that the quotient map $\mathbb{C}^2\to\mathbb{C}^2/((x,y)\sim(y,x))$ can be identified with $(x,y)\mapsto(x+y,xy)$, and so we need to look at the curve
$$\{(t^a\rho^b+t^{m-a}\rho^{-b},t^m):0<|t|<1\}.$$
Since $t^a\rho^b+t^{m-a}\rho^{-b}=t^a(\rho^b+t^{m-2a}\rho^{-b})$, we notice that if $a\mid m$ then the image is smooth, but if $a\nmid m$ then it is singular.
\end{proof}
\begin{comentario}
The deepest degeneration point on the intersection of $C_m$ with $\mathbb{P}^1_\infty$ seems to be quite complicated and is of high multiplicity; moreover, some of the branches of this intersection are singular and some are smooth.
\end{comentario}
Putting the above results together we directly obtain the following:
\begin{theorem}
\label{teoremaclausura}
Let $m\geq 2$ and $\mathcal{E}_{m}^{\ast}$ be the closure of $\mathcal{E}_{m}$ in $\mathcal{A}_{2}.$
Set-theoretically we have that
$$\mathcal{E}_{m}^{\ast} = \mathcal{E}_{m}\sqcup K^{0}(1)[m]\sqcup\mathbb{P}_{\infty}^{1},$$
where $K^{0}(1)[m]$ is the subvariety of $K^{0}(1)$ from Definition \ref{defKcerounom} and $\mathbb{P}_{\infty}^{1}$ is the \emph{peripheral $\mathbb{P}^{1}$} introduced in Theorem \ref{Adosestrella}. Moreover, $\overline{K^0(1)[m]}\cap\mathbb{P}_\infty^1$ consists of $\varphi(m)+1$ points and the boundary of $\mathcal{E}_m^*\cap\mathcal{E}_n^*$ is $\mathbb{P}_\infty^1$ if $m\neq n$.
\end{theorem}
\section{Degenerate abelian surfaces}\label{degeneration}
Here we study Mumford's final example in \cite{complete-deg} in the global way that is worked out in \cite[Part II, Chapter 2]{Hulek} in order to give an interpretation of the boundary points of $\mathcal{E}_{m}^{\ast}$ as degenerate abelian surfaces which contain a degenerate elliptic curve. We will only gloss over the construction; the interested reader is invited to look at \cite[Part II, Chapters 1 and 2]{Hulek} for more details.
\subsection{Mumford's construction}
First we need to fix some notation. Let $A=\mathbb{C}[T_{1},T_{2},T_{3}],$ $S = \mathrm{Spec}\mathbb{H}pace{0.1cm}A\cong\mathbb{C}^{3}$ and consider the $A$-scheme
$$\widetilde{G} = \mathrm{Spec}\left(\frac{A[U,U^{-1},V,V^{-1},W,W^{-1}]}{\left<UVW-1\right>}\right)\cong\mathbb{C}^{3}\times(\mathbb{C}^{\times})^{2}.$$
Now, let $K$ be the quotient field of $A$ and denote by $\mathbb{Y}$ the subgroup of $\widetilde{G}(K)$ generated by $r$, $s$ and $t$, where
\begin{align*}
r & = (T_{2}T_{3},T_{3}^{-1},T_{2}^{-1}) \\
s & = (T_{3}^{-1},T_{1}T_{3},T_{1}^{-1}) \\
t & = (T_{2}^{-1},T_{1}^{-1},T_{1}T_{2})
\end{align*}
(note that this group is clearly isomorphic to $\mathbb{Z}^{3}/\mathbb{Z}(1,1,1)\cong\mathbb{Z}^{2}$).
These objects are related to the study of degenerations of abelian surfaces as follows:
\begin{itemize}
\item Consider the action of $\mathbb{Z}^{2}$ in $\mathbb{H}_{2}\times(\mathbb{C}^{\times})^{2}$ given by
$$(m,n) : \left(\tau,(w_{1},w_{2})\right)\rightarrow\left(\tau,(e^{2\pi mi\tau_{11}}e^{2\pi ni\tau_{12}}w_{1},e^{2\pi mi\tau_{12}}e^{2\pi ni \tau_{22}}w_{2})\right)$$
and denote
$$\widehat{\mathcal{S}}:= \left(\mathbb{H}_{2}\times(\mathbb{C}^{\times})^{2}\right)/\mathbb{Z}^{2}.$$
\item The action of $P^{\prime}(F_2)$ on $\mathbb{H}_{2}$ induces an action on $\widehat{\mathcal{S}}.$ We denote
$$\mathcal{S} = P^{\prime}(F_2)\setminus\widehat{\mathcal{S}}.$$
\item The projection $\mathbb{H}_{2}\times(\mathbb{C}^{\times})^{2}\rightarrow\mathbb{H}_{2}$ induces a map $\mathcal{S}\rightarrow X(F_2)$ which is the right vertical map in the following commutative diagram:
\begin{equation*}
\xymatrix{ \mathbb{H}_{2}\times(\mathbb{C}^{\times})^{2}\ar[d]\ar[rr]^{/\mathbb{Z}^{2}} & & \widehat{\mathcal{S}}\ar[d]\ar[rr]^{/P^{\prime}(F_2)} & & \mathcal{S}\ar[d] \\
\mathbb{H}_{2}\ar@{=}[rr] & & \mathbb{H}_{2} \ar[rr]_{/P^{\prime}(F_2)} & & X(F_2) }
\end{equation*}
\end{itemize}
Clearly we have that the fiber $\mathcal{S}_{[\tau]}$ of $\mathcal{S}$ over $[\tau]$ is the abelian surface $\mathbb{C}^{2}/(\mathbb{Z}^{2}\oplus\tau\mathbb{Z}^{2}).$
On the other hand, we have that $\mathbb{Y}$ acts on $\widetilde{G}(K)$ by multiplication and hence for every $T\in(\mathbb{C}^{\times})^{3}$ we have an action of $\mathbb{Y}$ on the fiber $\widetilde{G}_{T}.$ Consider the map $\mathbb{P}i: \mathbb{H}_{2}\to\mathbb{C}^{3}$ given by the composition $j\circ e_{2},$ where $j:(\mathbb{C}^{\times})^{3}\to\mathbb{C}^{3}$ is the map given by
\begin{equation}
\label{varphi}
(t_{1},t_{2},t_{3})\mapsto(t_{1}t_{2},t_{2}t_{3},t_{2}^{-1}).
\end{equation}
It is easy to see that if $T=\mathbb{P}i(\tau)$ for some $\tau\in\mathbb{H}_{2}$ then the quotient $\widetilde{G}_{T}/\mathbb{Y}$ is also isomorphic to the abelian surface $\mathbb{C}^{2}/(\mathbb{Z}^{2}\oplus\tau\mathbb{Z}^{2}).$ Moreover, we have the following commutative square:
\begin{equation*}
\xymatrix{ \mathcal{S}\ar[r]^{[\mathbb{P}i\times\mathrm{id}]}\ar[d] & \widetilde{G}/\mathbb{Y}\ar[d] \\
X(F_2)\ar[r]_{j} & \mathbb{C}^{3} }
\end{equation*}
which induces isomorphisms
$$\mathcal{S}_{\tau}\cong\widetilde{G}_{\mathbb{P}i(\tau)}/\mathbb{Y}.$$
The idea of Mumford's construction is to obtain a family $\mathcal{P}\to U$ defined over an open subset $U\subseteq\mathbb{C}^{3}$ containing $X(F_2)$ as a dense subset in such a way that the fibers over $X(F_2)$ are abelian surfaces. For this purpose, let $R = R_{\Phi,\Sigma}$ be the graded $A$-algebra defined in \cite[Part II, Definition 1.10]{Hulek} and $\widetilde{P} = \mathrm{Proj}\mathbb{H}pace{0.1cm}R_{\Phi,\Sigma}.$ We have that $\widetilde{G}$ is a dense open subscheme of $\widetilde{P}$ and the inclusion $\widetilde{G}\hookrightarrow\widetilde{P}$ is a morphism of $A$-schemes. Denote by $U$ the analytic open subset of $\mathbb{C}^3$ given by the interior of the closure of $\mathbb{P}i(\mathbb{H}_{2})$ and define
\begin{equation}
\label{PU}
\widetilde{P}_{U} := \widetilde{P}\times_{\mathbb{C}^3} U.
\end{equation}
We then have a natural analytic morphism $\widetilde{P}_{U}\to U$
and, furthermore, we have the following result:
\begin{theorem}
\label{definicionP}
The group $\mathbb{Y}$ acts properly discontinuosly on $\widetilde{P},$ we have a morphism $\widetilde{P}_{U}/\mathbb{Y}\to U$ and this morphism defines a proper and flat family over $U$ where a general fiber is an abelian surface.
\end{theorem}
\begin{proof}
See \cite[Part II, Theorem 3.10]{Hulek}. For the definition of the action see \cite[Part II, Definition 1.9]{Hulek}
\end{proof}
\begin{definicion}
We write $P\to U$ for the family of the above theorem.
\end{definicion}
\subsection{Families of non-simple abelian surfaces}
Using the matrices employed in the proofs of Lemmas \ref{tresdos} and \ref{trescuatro} we can obtain families whose fibers are non-simple principally polarized abelian surfaces which contain an elliptic curve of exponent $m$. For this, consider the sets
\begin{equation}
\label{Oce}
O_{(c,e)} = \mathbb{P}i\left(\left\{\begin{pmatrix}
\mu & -\frac{c\tau+e}{m} \\
-\frac{c\tau+e}{m} & \tau
\end{pmatrix}\in\mathbb{H}_{2}\right\}\right)\subseteq\mathbb{C}^{3},
\end{equation}
and
\begin{equation}
\label{Oinfty}
O_{\infty} = \mathbb{P}i\left(\left\{\begin{pmatrix}
\tau & z \\
z & \frac{1}{m-1}\left(\tau-(m-2)z\right)
\end{pmatrix}\in\mathbb{H}_{2}\right\}\right)\subseteq\mathbb{C}^{3}.
\end{equation}
Restricting $\mathcal{S}\to X(F_2)$ to $O_{i},$ $i\in\{(c,e)\in\mathbb{N}^{2} : \mathrm{g.c.d}(m,c,e)=1\}\cup\{\infty\}$ we get families $\mathcal{S}_{i}\rightarrow O_{i}$ with the desired properties.
Since the fiber $(\mathcal{S}_{i})_{T}$ is a non-simple abelian surface for every $T\in O_{i}$, we have that all of these surfaces contain an elliptic curve. Now we will show that we can actually put these elliptic curves together to form a family. More precisely, we will prove the following:
\begin{prop}
\label{existenciafamilias}
Let $i\in\{(c,e)\in\mathbb{N}^{2}: \mathrm{g.c.d}(m,c,e)=1\}\cup\{\infty\}$ and consider the morphism $\mathcal{S}_{i}\to O_{i}$ defined above. There exists a morphism $\mathcal{K}_{i}\to O_{i}$ and an $O_{i}$-morphism $\mathcal{K}_{i}\to\mathcal{S}_{i}$ such that
\begin{enumerate}
\item each fiber of the map $\mathcal{K}_{i}\to O_{i}$ is an elliptic curve
\item for every $T\in O_{i}$ the induced map $(\mathcal{K}_{i})_{T}\hookrightarrow(\mathcal{S}_{i})_{T}$ is an inclusion whose image is an abelian subvariety of exponent $m$
\end{enumerate}
\end{prop}
In other words, we now proceed to show that there exists a commutative diagram
\begin{equation*}
\xymatrix{ \mathcal{K}_{i} \ar[rr]\ar[rd] & & \mathcal{S}_{i}\ar[ld] \\
& O_{i} & }
\end{equation*}
with the properties stated in the proposition above.
Let $\Lambda_{\tau}$ denote the lattice in $\mathbb{C}^{2}$ generated by the columns of the matrix $\begin{pmatrix} \mathrm{id} & \tau \end{pmatrix}$ and write $A_{\tau}: = \mathbb{C}^{2}/\Lambda_{\tau}.$ We have that every elliptic curve $E\leq A_{\tau}$ is of the form $W/(W\cap\Lambda_{\tau}),$ where $W$ is a linear subspace of $\mathbb{C}^{2}$ with $\dim_{\mathbb{C}}(W)=1.$ According to \cite{num-char} we have that if $\tau\in\mathbb{H}_{2}(v)$ then we can explicitly compute a linear subspace $W_{v}\leq\mathbb{C}^{2}$ in such a way that the curve $W_{v}/(W_{v}\cap\Lambda_{\tau})$ is a subvariety of exponent $m$ of $A_{\tau}$:
\begin{lemma} We have the following:
\begin{enumerate}[label=\roman*)]
\item Let $(c,e)\in\mathbb{Z}^{2}$ with $\mathrm{g.c.d}(m,c,e)=1$ as in Lemma \ref{tresdos} and $v_{(c,e)}=(0,m,c,0,e).$ For $\tau\in\mathbb{H}_{2}(v_{(c,e)})$ we have that the linear subpspace $W_{(c,e)} := \left<(1,0)\right>_{\mathbb{C}}\leq\mathbb{C}^2$ defines an elliptic curve of exponent $m$ contained in $A_{\tau}.$
\item Let $v_{\infty} = (1,-(m-2),-(m-1),0,0)$ and $\tau\in\mathbb{H}_{2}(v_{\infty}).$ We have that the linear subspace $W_{\infty} := \left<(-1,1)\right>_{\mathbb{C}}$ defines an elliptic curve of exponent $m$ contained in $A_{\tau}.$
\end{enumerate}
\end{lemma}
\begin{proof}
Let $J$ denote the matrix
$$\begin{pmatrix}
0 & -\mathrm{id} \\
\mathrm{id} & 0
\end{pmatrix}\in\mathbb{M}_{4\times 4}(\mathbb{R}),$$
where $0\in\mathbb{M}_{2\times 2}(\mathbb{R})$ is the zero matrix and for $v=(a,b,c,d,e)\in\mathbb{Z}^{5}$ define
$$M_{v} = \begin{pmatrix}
0 & d & -\frac{b-m}{2} & a \\
-d & 0 & -c & \frac{b+m}{2} \\
\frac{b-m}{2} & c & 0 & -e \\
-a & -\frac{b+m}{2} & e & 0
\end{pmatrix}\in\mathbb{M}_{4\times 4}(\mathbb{R}).$$
Now, let $C$ denote the canonical basis of $\mathbb{R}^{4}$ and $B$ denote the $\mathbb{R}$-basis of $\mathbb{C}^{2}$ given by the columns of the matrix $\begin{pmatrix} \mathrm{id} & \tau \end{pmatrix}.$ According to \cite[Proposition 3.2]{num-char} we have that if $\tau\in\mathbb{H}_{2}(v)$ then the image of the $\mathbb{R}$-linear transformation $\mathbb{R}^{4}\to\mathbb{C}^{2}$ given in basis $C,B$ by the matrix $m\cdot\mathrm{id}-JM_{v}$ defines an abelian subvariety of $A_{\tau}$ of dimension 1 and exponent $m.$
Using the above fact, a direct computation finishes the proof.
\end{proof}
\begin{proof}[Proof of Proposition \ref{existenciafamilias}]
The images in $(\mathbb{C}^{\times})^2$ of the linear spaces $W_{(c,e)}$ and $W_{\infty}$ of the Lemma above by the exponential map are the sets
$$L_{(c,e)} = \{(V,U)\in(\mathbb{C}^{\times})^{2} : U = 1\}$$
and
$$L_{\infty} = \{(V,U)\in(\mathbb{C}^{\times})^2 : UV = 1\}.$$
We get the desired families $\mathcal{K}_{i}\to O_{i}$ simply by restricting the map $\mathcal{S}_{i}\to O_{i}$ to the image of the set $O_{i}\times L_{i}\subseteq\mathbb{C}^{3}\times(\mathbb{C}^{\times})^{2}$ in $\mathcal{S}.$
\end{proof}
In the next section we will use Mumford's construction to extend the above families to proper and flat families defined over $\overline{O}_{i}\cap U$, obtaining degenerate abelian surfaces which contain a degenerate elliptic curve.
\subsection{Degenerations of abelian subvarieties}
Recall that an analytic space $A_{0}$ is called a \textbf{degenerate abelian surface} (resp. degenerate elliptic curve) if there exists a smooth space $B,$ a dense open set $U\subseteq B$ and a proper and flat family $\mathcal{X}\to B$ such that the fiber $\mathcal{X}_{t}$ is an abelian surface (resp. elliptic curve) for every $t\in U$ and there exists $t_{0}\in B$ such that $\mathcal{X}_{t_0}\cong A_{0}.$
In this subsection we will prove the following result:
\begin{theorem}
\label{main}
Let $m\geq 2$ be a fixed integer, $\mathcal{E}_{m}$ the subset of $\mathcal{A}_{2}$ given by the classes of non-simple principally polarized abelian surfaces which contain an elliptic curve of exponent $m$ and $\mathcal{E}_{m}^{\ast}$ be the closure of $\mathcal{E}_{m}$ in $\mathcal{A}_{2}^{\ast},$ where $\mathcal{A}_{2}^{\ast}$ is the toroidal compactification of $\mathcal{A}_{2}$ associated to the Legendre decomposition. There exists a finite collection of finite maps $\pi_{i}:X_{i}\to\mathcal{E}_{m}^{\ast}$ such that
\begin{enumerate}
\item The boundary of $\mathcal{E}_{m}^{\ast}$ is contained in the union of the images of the maps $\pi_{i}$
\item For each $i$ there are (explicit) analytic morphisms $Q_{i}\to X_{i},P_{i}\to X_{i}$ and a $X_{i}$-morphism $Q_{i}\to P_{i}$ such that:
\begin{enumerate}
\item For every $T\in X_{i}$ we have that the fiber $(Q_{i})_{T}$ is a degenerate elliptic curve and the fiber $(P_{i})_{T}$ is a degenerate abelian surface. Furthermore, if $\pi_{i}(T)\in\mathcal{E}_{m}$ then $(Q_{i})_{T}$ is an elliptic curve and $(P_{i})_{T}$ is a non-simple principally polarized abelian surface
\item For every $T\in X_i$ the morphism $Q_{i}\to P_{i}$ induces an inclusion $(Q_{i})_{T}\hookrightarrow (P_{i})_{T}.$ Furthermore, if $\pi_{i}(T)\in\mathcal{E}_{m}$ then this inclusion represents a subvariety of exponent $m.$
\end{enumerate}
\end{enumerate}
\end{theorem}
Once the above result is established, we will prove the following theorem:
\begin{theorem}\label{main2}
The pairs (normalised degenerate abelian surface, degenerate elliptic curve) which arise under the families $P_{i},Q_{i}$ are
\begin{itemize}
\item (certain $\mathbb{P}^{1}$-bundle over certain elliptic curve, $m$-gon of $\mathbb{P}^{1}$'s)
\item ($\mathbb{P}^{1}\times\mathbb{P}^{1}$, nodal curve) if $m=2$
\item ($\mathbb{P}^{1}\times\mathbb{P}^{1}$, $(m-1)$-gon of $\mathbb{P}^{1}$'s) if $m\geq 3$
\item (the union of two copies of $\mathbb{P}^{2}$ with one of the blow-up of $\mathbb{P}^{2}$ at three points, $(2m-1)$-gon of $\mathbb{P}^{1}$'s)
\end{itemize}
where each pair can be explicitly calculated.
\end{theorem}
In other words, every point in the boundary of $\mathcal{E}_{m}^{\ast}$ can be interpreted as a degenerate abelian surface which contains a degenerate elliptic curve.
We will proceed in several steps.
\begin{definicion}
Let $(c,e)\in\mathbb{Z}^2$ with $\mathrm{g.c.d}(c,e,m)=1$ and $0\leq c,e\leq m.$
For $i=\infty$ or $i=(c,e)$ as above, denote
\begin{equation}
\label{defVi}
X_{i} = \overline{O_i}\cap U\subseteq\mathbb{C}^3,
\end{equation}
where $U$ is the interior of the closure of $\mathbb{P}i(\mathbb{H}_2)$ as in \eqref{PU} and $O_{i}$ is the open set defined in \eqref{Oce} for $i=(c,e)$ and in \eqref{Oinfty} for $i=\infty.$
\end{definicion}
\begin{definicion}
\label{mapspi}
Define the map $\pi_{i}:X_{i}\to\mathcal{E}_{m}^{\ast}$ as the composition $p_{F_2}^{\ast}\circ q_{F_2},$ where $p_{F_2}^{\ast}$ and $q_{F_2}$ are the maps introduced in equations \eqref{defpF} and \eqref{defqF}, respectively.
\end{definicion}
Directly from the definitions and our proof of Theorem \ref{teoremaclausura} we obtain the following:
\begin{prop}
The maps $\pi_{i}:X_{i}\to\mathcal{E}_{m}^{\ast}$ from Definition \ref{mapspi} are finite and the sets $\pi_{i}(X_i)$ cover the border of $\mathcal{E}_{m}^{\ast}$
\end{prop}
\begin{definicion}
We define the analytic space $P_{i}$ as the restriction of $P$ (from Theorem \ref{definicionP}) to $X_{i}.$ That is, take
$$P_{i} := \left.P\right|_{X_i} = P\times_{U} X_{i}.$$
We write $P_{i}\to X_{i}$ for the natural morphism arising from the universal property of fiber product.
\end{definicion}
From \cite[Part II, Theorem 3.10]{Hulek} we have the following
\begin{prop}
The map $P_{i}\to X_{i}$ defined above is proper, flat and its fibers are the ones stated at Theorem \ref{main}.
\end{prop}
Now, motivated by our proof of the Proposition \ref{existenciafamilias}, we consider the following schemes and spaces:
\begin{definicion}
Let
$$\widetilde{H} = \mathrm{Spec}\left(\frac{A[U,U^{-1},V,V^{-1},W,W^{-1}]}{\left<UVW-1,U-1\right>}\right),$$
and
$$\widetilde{H}_{\infty} = \mathrm{Spec}\left(\frac{A[U,U^{-1},V,V^{-1},W,W^{-1}]}{\left<UVW-1,W-1\right>}\right),$$
which are subschemes of $\widetilde{G}.$
\end{definicion}
\begin{definicion}
We denote by $H_{(c,e)}$ (resp. $H_{\infty}$) the restriction of $\widetilde{H}$ (resp $\widetilde{H}_{\infty}$) to $O_{(c,e)}$ (resp. $O_{\infty}).$
\end{definicion}
Using the notation from the previous definition we have the following commutative diagram:
\begin{equation*}
\xymatrix{ \mathcal{K}_{i}\ar[rr]^{[\mathbb{P}i\times\mathrm{id}]}\ar[dr] & & H_{i}/\mathbb{Y}\ar[ld] \\
& O_{i} & }
\end{equation*}
where $i=(c,e)$ or $i=\infty.$
\begin{prop}
\label{podemosextender}
The map $[\mathbb{P}i\times\mathrm{id}]$ from the above diagram induces isomorphisms
$$\left(\mathcal{K}_{i}\right)_{T}\cong (H_{i})_{T}/\mathbb{Y}$$
for every $T\in O_{i}.$
\end{prop}
To prove this proposition we will first need an important lemma:
\begin{lemma}
\label{subgrupoactuando}
Consider the subgroups of $\mathbb{Y}$ given by $\mathbb{Y}_{c}:= (r^{c}s^{m})$ and $\mathbb{Y}_{\infty} = (r^{-(m-1)}s).$ We have that $\mathbb{Y}_{c}$ (resp. $\mathbb{Y}_{\infty}$) acts on $H_{(c,e)}$ (resp. $H_{\infty}$) and $H_{(c,e)}/\mathbb{Y} = H_{(c,e)}/\mathbb{Y}_{c}$ (resp. $H_{\infty}/\mathbb{Y} = H_{\infty}/\mathbb{Y}_{\infty}$).
\end{lemma}
\begin{proof}
From the definition of the action of $\mathbb{Y}$ on $\widetilde{P}$ given in \cite[Part II, Definition 1.9]{Hulek} we have that
$$S_{ar+bs}(U) = T_{2}^{-a}T_{3}^{b-a}U$$
and
$$S_{ar+bs}(W) = T_{2}^{-a}T_{1}^{-b}W.$$
On the other hand, it is easy to see that
\begin{equation}
\label{contencionesOic}
O_{(c,e)}\subseteq\{(T_{1},T_{2},T_{3})\in(\mathbb{C}^{\times})^{3} : T_{2}^{c} = T_{3}^{m-c}\}
\end{equation}
and
\begin{equation}
\label{contencionOinfty}
O_{\infty}\subseteq \{(T_{1},T_{2},T_{3})\in(\mathbb{C}^{\times})^{3} : T_{1} = T_{2}^{m-1}\},
\end{equation}
thus an easy computation finishes the proof.
\end{proof}
\begin{myproofprop}
From the definition of the map $[\mathbb{P}i\times\mathrm{id}]$ we have that $(H_{i})_{T}$ corresponds to the sets $L_{(c,e)}$ and $L_{\infty}$ from the proof of Proposition \ref{existenciafamilias}, which are both isomorphic to $\mathbb{C}^{\times}$. On the other hand, using the previous lemma it is easy to see that the action agrees with the one that defines the corresponding elliptic curve as a subquotient of $(\mathbb{C}^{\times})^{2}.$
\end{myproofprop}
Recall that we want to extend the families $\mathcal{K}_{i}\to O_{i}$ defined in Proposition \ref{existenciafamilias} to families defined over the sets $X_{i}$ defined in \ref{defVi} From the above proposition, we can do that using the scheme $\widetilde{P}$ from Mumford's construction.
\begin{definicion}
Let $\widetilde{Q}$ (resp $\widetilde{Q}_{\infty}$) be the closure of $\widetilde{H}$ (resp. $\widetilde{H}_{\infty}$) in $\widetilde{P}.$
\end{definicion}
We then have a morphism
$$\widetilde{Q}\to \mathrm{Spec}\mathbb{H}pace{0.1cm}A\cong\mathbb{C}^{3}$$
(resp. $\widetilde{Q}_{\infty}\to\mathrm{Spec}\mathbb{H}pace{0.1cm}A$) given by the composition of the inclusion $\widetilde{Q}\to\widetilde{P}$ (resp. $\widetilde{Q}_{\infty}\to\widetilde{P}$) with the natural map $\widetilde{P}\to\mathrm{Spec}\mathbb{H}pace{0.1cm}A.$
\begin{definicion}
We define $Q_{(c,e)}$ (resp. $Q_{\infty}$) as the image in
$$P_{i} = P\times_{U} X_{i} = \left[\left(\widetilde{P}\times_{\mathbb{C}^3} U\right)\times_{U}\times X_{i}\right]/\mathbb{Y}$$ of the restriction of $\widetilde{Q}$ (resp. $\widetilde{Q}_{\infty}$) to $X_{(c,e)}$ (resp. $X_{\infty}$).
\end{definicion}
\begin{definicion}
\label{defQi}
We write $Q_{i}\to X_{i}$ and $Q_{i}\to P_{i}$ for the natural maps arising from the definition of $Q_{i}.$
\end{definicion}
We have that the maps from the previous definition fit in the following commutative diagram
\begin{equation*}
\xymatrix{H_{i}/\mathbb{Y} \ar[r]\ar[d] & Q_{i} \ar[r]\ar[d] & P_{i}\ar[d] \\
O_{i}\ar@{^{(}->}[r] & X_{i} \ar@{=}[r] & X_{i} }
\end{equation*}
From Proposition \ref{podemosextender} it is then clear that the morphism $Q_{i}\to X_{i}$ extends the family $\mathcal{K}_{i}\to O_{i}$ from Proposition \ref{existenciafamilias}. We claim that this morphism is actually proper and flat. To prove this we will need to describe the fibers of this map. For this we only need to describe the fiber of the map $\widetilde{Q}\to\mathrm{Spec}\mathbb{H}pace{0.1cm}A\cong\mathbb{C}^3$ (resp. $\widetilde{Q}_{\infty}\to\mathrm{Spec}\mathbb{H}pace{0.1cm}A$) over a point $T\in X_{(c,e)}$ (resp. $T\in X_{\infty}$) and see which points are identified under the action of $\mathbb{Y}.$
\begin{lemma}
\label{lemaproj}
We have that
$$\widetilde{Q} = \mathrm{Proj}\left(R/I\right)$$
and
$$\widetilde{Q}_{\infty} = \mathrm{Proj}\left(R/I_{\infty}\right),$$
where $I$ (resp. $I_{\infty}$) is the homogenization of the ideal that defines $\widetilde{H}$ (resp. $\widetilde{H}_{\infty}$).
\end{lemma}
\begin{proof}
It is obvious that $\widetilde{Q}$ (resp. $\widetilde{Q}_{\infty}$) is an open subset of $\mathrm{Proj}(R/I)$ (resp $\mathrm{Proj}(R/I_{\infty}).$ Now, as both $I$ and $I_{\infty}$ are radical in $R,$ the result follows.
\end{proof}
Using the above, our problem becomes merely algebraic.
\begin{prop}
\label{fibras-tilde}
Let $i=(c,e)$ or $i=\infty$ and let $\widetilde{Q},\widetilde{Q}_{\infty}$ and $X_{i}$ be as in Lemma \ref{lemaproj} and equation \eqref{defVi}, respectively.
\begin{enumerate}
\item Consider the groups $\mathbb{Y}_{c}$ and $\mathbb{Y}_{\infty},$ subgroups of $\mathbb{Y}$ defined in Lemma \ref{subgrupoactuando}. We have that for $T\in X_{(c,e)}$ (resp. $T\in X_{\infty})$ the only identifications in $\left(\widetilde{Q}\right)_{T}$ (resp. $\left(\widetilde{Q}_{\infty}\right)_T$) which arise from the action of $\mathbb{Y}$ come from $\mathbb{Y}_{c}$ (resp. $\mathbb{Y}_{\infty}$).
\item We have that the fiber of the map $\widetilde{Q}\to \mathrm{Spec}\mathbb{H}pace{0.1cm}A$ (resp $\widetilde{Q}_{\infty}\to\mathrm{Spec}\mathbb{H}pace{0.1cm}A$) over a point $T\in X_{(c,e)}$ (resp $T\in X_{\infty}$) is a chain of $\mathbb{P}^{1}$'s.
\end{enumerate}
\end{prop}
\begin{proof}
\begin{enumerate}[leftmargin = 0.4cm]
\item According to Lemma \ref{lemaproj} it is enough to find the elements of $\mathbb{Y}$ which preserve the equation that defines the ideal $I$ (resp $I_{\infty}$) given the condition that $T\in X_{(c,e)}$ (resp $T\in X_{\infty}$). These are all easy calculations recalling the contentions \eqref{contencionesOic} and \eqref{contencionOinfty}.
\item Recall that for every $T$ we have that $\widetilde{Q}_{T}$ (resp $\left(\widetilde{Q}_{\infty}\right)_{T}$) is contained in $\widetilde{P}_{T}.$ For $i=(c,e)$ we have that the boundary points of $X_i$ lie in $\{0\}\times(\mathbb{C}^{\times})^{2}$ and according to \cite[Part II, Proposition 2.15]{Hulek} for those kinds of points the fiber $\widetilde{P}_{T}$ is a chain of $\mathbb{C}^{\times}\times\mathbb{P}^{1}$'s. That is, over those points the fiber $\widetilde{P}_{T}$ has countably many irreducible components $Z_{k},k\in\mathbb{Z},$ each one isomorphic to $\mathbb{C}^{\times}\times\mathbb{P}^{1}$ and for $k\neq l$ these components satisfy that
$$Z_{k}\cap Z_{l} \cong \begin{cases}
\mathbb{C}^{\times} & \text{if $|k-l|=1$} \\
\emptyset & \text{in other case}
\end{cases},$$
where $(u,[0:1])\in Z_{k}$ is identified with $(u,[1:0])\in Z_{k+1}.$ Moreover, each of those components has an explicit algebraic description (which can be found in the proof of \cite[Part II, Prop. 2.15]{Hulek}) which can be worked out. Using that explicit description and Lemma \ref{lemaproj} it is easy to see that
$$\widetilde{Q}\cap Z_{k}\cong\mathbb{P}^1$$
and for $k\neq l$ we have that
$$\widetilde{Q}\cap Z_{k}\cap Z_{l}\cong\begin{cases}
\{pt\} & \text{if $|k-l|=1$} \\
\emptyset & \text{in any other case}
\end{cases},$$
that is, $\widetilde{Q}_{T}$ is a chain of $\mathbb{P}^{1}$'s, as we wanted to see.
Now, for $i=\infty$ we have to distinguish between the cases $T\neq 0$ and $T=0.$ For $T\neq 0$ $\widetilde{P}_{T}$ is a net of quadrics, that is, it has countable many irreducible components $Z_{a,b},$ $a,b\in\mathbb{Z},$ each one isomorphic to $\mathbb{P}^{1}\times\mathbb{P}^{1}$ with the intersections suggested by the terminology. This fiber can be visualized as in the figure \ref{P-tildeT} below, where each rectangle represents a copy of $\mathbb{P}^{1}\times\mathbb{P}^{1}$. On the other hand, for $T=0$ the fiber $\widetilde{P}_{T}$ is illustrated by the figure \ref{P-tilde0}, where each triangle is a copy of $\mathbb{P}^{2}$ and each hexagon represents the blow-up of $\mathbb{P}^{2}$ at three general points. In both cases, using local coordinates and the action of the corresponding group, it can be worked out that the fiber is a chain of $\mathbb{P}^{1}$'s and can be visualized as in Figures \ref{Qinfinitonocero} and \ref{Qinfinitocero}.
\begin{figure}
\caption{Illustration of $\widetilde{P}
\label{P-tildeT}
\end{figure}
\begin{figure}
\caption{Illustration of $\widetilde{Q}
\label{Qinfinitonocero}
\end{figure}
\begin{figure}
\caption{Illustration of $\widetilde{P}
\label{P-tilde0}
\end{figure}
\begin{figure}
\caption{Illustration of $\widetilde{Q}
\label{Qinfinitocero}
\end{figure}
\end{enumerate}
\end{proof}
Now, using the previous result we can get the description of the fibers and show that those fibers are actually degenerate elliptic curves.
\begin{prop}
Let $m\geq 2$ be a fixed positive integer number and for $i=(c,e) $ or $i=\infty$ consider the morphisms $Q_{i}\to X_{i}$ defined in Definition \ref{defQi}. We have that
\begin{enumerate}
\item For $T\in(\mathbb{C}^{\times})^{3}$ the fiber of the map $Q_{i}\to X_{i}$ over $T$ is an elliptic curve
\item For $T\notin(\mathbb{C}^{\times})^3$ the fiber of the map $Q_{(c,e)}\to X_{(c,e)}$ over $T$ is a $m$-gon of $\mathbb{P}^{1}$'s
\item For $T\notin(\mathbb{C}^{\times})^{3}$ the fiber of the map $Q_{\infty}\to X_{\infty}$ over $T$ is either
\begin{itemize}
\item a nodal curve if $m=2$ and $T\neq 0$
\item a $(m-1)$-gon of $\mathbb{P}^{1}$'s if $m\geq 3$ and $T\neq 0$
\item a $(2m-1)$-gon of $\mathbb{P}^{1}$'s if $T=0$
\end{itemize}
\item The morphisms $Q_{i}\to X_{i}$ are proper and flat. In particular, each of the fibers above can be called a degenerate elliptic curve
\end{enumerate}
\end{prop}
\begin{proof}
\begin{enumerate}
\item This is just a consequence from the construction of the morphisms and our proof of Theorem \ref{teoremaclausura}.
\item Using the action of the group $\mathbb{Y}_{(c,e)}$ it is just necessary to see which points in $$\left(\widetilde{Q}_{c}\right)_{T}\cap \bigcup_{k=0}^{m-1} Z_{k}$$
are identified by the generator $r^{c}s^{m},$ where $Z_{k}\cong\mathbb{C}^{\times}\times\mathbb{P}^{1}$ are the irreducible component of the fibre $\widetilde{P}_{T}$ (see our proof of Proposition \ref{fibras-tilde}). This is easy because $r$ acts on each $Z_{k}$ and $s$ sends $Z_{k}$ into $Z_{k+1}.$
\item As before, it is enough to see which points in
$$\left(\widetilde{Q}_{\infty}\right)_{T}\cap \bigcup_{k=0}^{m-2} Z_{k,0}$$
are identified by the generator $r^{-(m-1)}s,$ where $Z_{a,b}\cong\mathbb{P}^{1}\times\mathbb{P}^{1}$ are the irreducible components of $\widetilde{P}_{T}$ (see Proposition \ref{fibras-tilde}). This is an easy computation using that $r^{-(m-1)}s$ maps $Z_{k,0}$ to $Z_{k-(m-1),1}$ and the description of the intersections given by \cite[Part II, Proposition 2.15]{Hulek}.
\item Given the above description of the fibers, the result follows from \cite[Lemma 1, p.56]{complexgeom} and \cite[Corollary p.158]{complexgeom}.
\end{enumerate}
\end{proof}
Having established which are the degenerate elliptic curves that arises from our construction, Step 4 is complete and hence we have proved Theorem \ref{main} and Theorem \ref{main2} stated at the beginning of this section.
\end{document}
|
\begin{document}
\title{Adiabatic Quantum Computation is Equivalent to Standard \ Quantum Computation}
\begin{abstract}
Adiabatic quantum computation
has recently attracted attention in the physics and computer science
communities, but its computational power was unknown.
We describe an efficient adiabatic simulation
of any given quantum algorithm, which implies that
the adiabatic computation model and the conventional quantum
computation model are polynomially equivalent.
Our result can be extended to the
physically realistic setting of
particles arranged on a two-dimensional grid with nearest neighbor
interactions. The equivalence between the models
provides a new vantage point from which to tackle
the central issues in quantum computation, namely
designing new quantum algorithms and
constructing fault tolerant quantum computers.
In particular, by translating the main open questions
in the area of quantum algorithms to the language of spectral gaps
of sparse matrices, the result
makes these questions accessible to a
wider scientific audience, acquainted with
mathematical physics, expander theory and rapidly mixing Markov chains.
\ignore{
The model of adiabatic quantum computation
has recently attracted attention in the physics and computer science
communities, but its exact computational power has been unknown.
We settle this question and describe an efficient adiabatic simulation
of any given quantum algorithm. This implies that
the adiabatic computation model and the standard quantum
circuit model are polynomially equivalent.
We also describe an extension of this result with implications to
physical implementations of adiabatic computation.
We believe that our result highlights the potential importance
of the adiabatic computation model in the design of quantum algorithms
and in their experimental realization.}
\end{abstract}
\section{Introduction}
The study of adiabatic quantum computation was initiated several years
ago by Farhi, Goldstone, Gutmann and Sipser \cite{farhiad}, who
suggested a novel quantum algorithm for solving classical optimization
problems such as {\sc Satisfiability} ({\sc Sat}). Their algorithm is
based on a celebrated theorem in quantum mechanics known as {\em the
adiabatic theorem} \cite{kato,messiah}. Although the exact worst-case
behavior of this algorithm is not known, several simulations (see,
e.g., \cite{farhi}) on random instances of up to $20$ quantum bits led
to various optimistic speculations. The bad news is that there is now
mounting evidence \cite{vandamvaz, vandam2, ben} that the algorithm of
\cite{farhiad} takes exponential time in the worst-case for
${\sf{NP}}$-complete problems. Nevertheless, adiabatic computation was
since shown to be promising in other less ambitious directions: it
possesses several interesting algorithmic capabilities, as we will
soon review, and in addition, it exhibits inherent robustness against
certain types of quantum errors \cite{preskill}. We note that a small
scale adiabatic algorithm has already been implemented experimentally,
using a Nuclear Magnetic Resonance (NMR) system \cite{adnmr}.
We briefly describe the model of adiabatic computation (a more precise
description appears in Section~\ref{sec:model}). A computation in
this model is specified by two Hamiltonians named $H_{\mathrm{init}}$ and
$H_{\mathrm{final}}$ (a Hamiltonian is simply a Hermitian matrix). The
eigenvector with smallest eigenvalue (also known as the \emph{ground
state}) of $H_{\mathrm{init}}$ is required to be an easy to prepare state, such
as a tensor product state. The output of the adiabatic computation is
the ground state of the final Hamiltonian $H_{\mathrm{final}}$. Hence, we
choose an $H_{\mathrm{final}}$ whose ground state represents the solution to our
problem. We require the Hamiltonians to be \emph{local}, i.e., we
require them to only involve interactions between a constant number of
particles (this can be seen as the equivalent of allowing gates
operating on a constant number of qubits in the standard model).
This, in particular, makes sure that the Hamiltonians have a short
classical description, by simply listing the matrix entries of each
local term.
\ignore{We also require the eigenvalues of the Hamiltonians to be
bounded by a polynomial in the resources.} The running time of the
adiabatic computation is determined by the minimal spectral
gap\footnote{The spectral gap is the difference between the lowest and
second lowest eigenvalue.} of all the Hamiltonians on the straight
line connecting $H_{\mathrm{init}}$ and $H_{\mathrm{final}}$: $H(s)=(1-s)H_{\mathrm{init}} + s
H_{\mathrm{final}}$ for $s \in [0,1]$. More precisely, the adiabatic
computation is polynomial time if this minimal spectral gap is at
least inverse polynomial.
The motivation for the above definition is physical. The Hamiltonian
operator corresponds to the energy of the quantum system, and for it
to be physically realistic and implementable it must be local. Its
ground state is the state of lowest energy. We can set up a quantum
system in the ground state of $H_{\mathrm{init}}$ (which is supposed to be easy
to generate) and apply the Hamiltonian $H_{\mathrm{init}}$ to the system. We
then slowly modify the Hamiltonian along the straight line from
$H_{\mathrm{init}}$ towards $H_{\mathrm{final}}$. It follows from the adiabatic theorem
that if this transformation is performed slowly enough (how slow is
determined by the minimal spectral gap), the final state of the system
will be in the ground state of $H_{\mathrm{final}}$, as required.
What is the computational power of this model? In order to refer to
the adiabatic model as a computational model that computes classical
functions (rather than quantum states), we consider the result of the
adiabatic computation to be the outcome of a measurement of one or
more of the qubits, performed on the final ground state. It is known that
adiabatic computation can be efficiently simulated by standard quantum
computers \cite{vandamvaz,farhi}. Hence, its computational power is
not greater than that of standard quantum computers. Several positive
results are also known. In \cite{vandamvaz,cerf} it was shown that
Grover's quadratic speed-up for an unsorted search \cite{groversearch}
can be realized as an adiabatic computation. Moreover,
\cite{farhianneal,ben,santoro} showed that adiabatic computation can
`tunnel' through wide energy barriers and thus outperform simulated
annealing, a classical counterpart of the adiabatic model. However,
whether adiabatic computation can achieve the full power of quantum
computation was not known. In fact, even the question of whether
adiabatic computation can simulate general \emph{classical}
computations efficiently was unknown. The focus of this paper is the
exact characterization of the computational power of adiabatic
computation.
Before we describe our results, let us clarify one subtle point. Most
of the previous work on the subject focused on a restricted class of
adiabatic algorithms that can be referred to as adiabatic {\it
optimization} algorithms. In these algorithms, $H_{\mathrm{final}}$ is chosen to
be a diagonal matrix, corresponding to a combinatorial optimization
problem. In particular, this implies that the ground state of
$H_{\mathrm{final}}$ (which is the output of the computation) is a classical
state, i.e., a state in the computational basis. In this paper,
however, we associate the term \emph{adiabatic computation} with the
more general class of adiabatic algorithms, where the only restriction
on $H_{\mathrm{final}}$ is that it is a local Hamiltonian. We do this because,
from a physical point of view, there is no reason to force the
physical process described above to have a diagonal $H_{\mathrm{final}}$, when
all other Hamiltonians are not restricted this way. Thus, our
definition of adiabatic computation seems to be the natural one to
use. It is this natural definition that allows us to prove our
results.
\subsection{Results -- Computational Complexity of the Adiabatic Model}
Our main result clarifies the question of the computational power of
adiabatic algorithms. We show:
\begin{theorem}\label{thm:main}
The model of adiabatic computation is polynomially equivalent to the
standard model of quantum computation.
\end{theorem}
As mentioned above, one direction of the equivalence is already known
\cite{farhi,vandamvaz}. Our contribution is to show that standard
quantum computation can be efficiently simulated by adiabatic
computation. We do this by using adiabatic computation with $3$-local
Hamiltonians. We note that \cite{amnon} made a preliminary step in
the direction of Theorem~\ref{thm:main} but the model that they
considered was quite different.\footnote{Namely, \cite{amnon} showed
that adiabatic computation using \emph{simulatable} Hamiltonians is
as powerful as standard quantum computation. Simulatable Hamiltonians
are Hamiltonians that can be simulated efficiently by a quantum
circuit. They are very different from local Hamiltonians, and they
cannot even be written explicitly. Instead, such Hamiltonians are
specified using products of local unitary matrices.}
One corollary of our main theorem is the following. We can consider
the model of adiabatic computation with a more general set of
Hamiltonians known as \emph{explicit sparse} Hamiltonians. These are
Hermitian matrices that have at most polynomially many nonzero
elements in each row and column, and,
moreover, there is an efficient Turing machine that can generate a
list of all nonzero entries in a given row or column. Clearly, local Hamiltonians are a special case of
explicit sparse Hamiltonians. It was shown in \cite{amnon} that
adiabatic computation with explicit sparse Hamiltonians can still be
simulated by standard quantum computation (this extends the result of
\cite{vandamvaz,farhiad} in a non-trivial way). Hence, we obtain
the following result.
\begin{coro}\label{corol:main}
The model of adiabatic computation with explicit sparse Hamiltonians
is polynomially equivalent to the standard model of quantum
computation.
\end{coro}
Explicit sparse matrices are pervasive in computer science and
combinatorics, and hence this corollary might be more useful than
Theorem~\ref{thm:main} in the context of the design of quantum
algorithms and the study of quantum complexity.
To summarize, our results show that questions about quantum
computation can be equivalently considered in the model of adiabatic
computation, a model that is quite different from the more common
circuit-based models. There are two reasons why it seems worthwhile
to try to design quantum algorithms in the adiabatic framework.
First, there are several known powerful techniques to analyze spectral
gaps of matrices, including expander theory \cite{expanders} and
rapidly mixing Markov chains \cite{lovasz,sinclair}. Indeed,
probability theory is often used in mathematical physics to analyze
spectral gaps of Hamiltonians (see, e.g., \cite{spitzer}), and our
proofs also make extensive use of Markov chain tools. Second, it is
known that many interesting algorithmic problems in quantum
computation can be cast as quantum state generation problems
\cite{amnon}. The problem of generating special quantum states seems
more natural in the adiabatic model than in the standard model.
\subsection{Results -- Towards Experimental Implications}
Theorem~\ref{thm:main} uses 3-local Hamiltonians that act on
particles that may be arbitrarily far apart.
{}From a practical point of view,
it is often difficult to create controlled interactions between
particles located far-away from each other. Moreover,
three-particle Hamiltonians
are technologically very difficult to realize.
If one wants to physically realize the adiabatic algorithms,
it would be much better to have only
two-local interactions between nearest neighbor particles.
To this end
we prove the following theorem. This, we believe, brings the
adiabatic computation model one step closer to physical realization.
\begin{theorem}\label{thm:geo}
Any quantum computation can be efficiently simulated by an adiabatic
computation with two-local nearest neighbor Hamiltonians operating on
six-state particles set on a two dimensional grid.
\end{theorem}
The need for six-state particles arises from our construction. It is
an open question whether this can be improved.
Theorems~\ref{thm:main} and~\ref{thm:geo} open up the possibility of
physically realizing universal quantum computation
using adiabatically evolving quantum systems.
As mentioned before, there are possible advantages to this approach:
adiabatic quantum computation is resilient to certain types
of noise \cite{preskill}. An important component of this
resilience is the existence of a spectral gap in the Hamiltonian.
It is well known in physics that such a gap
plays an important role in the context of protecting quantum systems from
noise. However, it remains to be
further studied, both experimentally and
theoretically, what the right model for noisy adiabatic computation is,
and whether fault tolerant adiabatic computation can be achieved.
We refer the reader to further discussion in Subsection \ref{sec:open}.
\subsection{Proof of Theorem~\ref{thm:main}: Overview}
Given an arbitrary quantum circuit \cite{nielsen}, our goal is to
design an adiabatic computation whose output is the same as that of
the quantum circuit. Some similarities between the models are
obvious: one model involves unitary gates on a constant number of
qubits, while the other involves local Hamiltonians. However, after
some thought, one eventually arrives at the following difficulty. The
output state of the adiabatic computation is the ground state of
$H_{\mathrm{final}}$. The
output state of the quantum circuit is its final state, which is
unknown to us. How can we specify $H_{\mathrm{final}}$ without knowing the
output state of the quantum circuit? Notice that this state can be
some complicated quantum superposition. One might wonder why our task
is not trivial, since this state does have an efficient local
classical description, namely the quantum circuit. However, local
quantum gates, which operate in sequence to generate a non-local
overall action, are very different from local Hamiltonians, which
correspond to simultaneous local constraints. To explain the
solution, we first set some notations.
Without loss of generality we assume that the input to the quantum
circuit consists of $n$ qubits all initialized to
$\ket{0}$'s.\footnote{Otherwise, the first $n$ gates can be used to
flip the qubits to the desired input.} Then, a sequence of $L$ unitary
gates, $U_1,\dots,U_L$, each operating on one or two qubits, is
applied to the state. The system's state after the $\ell$'th gate is
$\ket{\alpha(\ell)}$. The output of the quantum circuit is in general
a complicated quantum state $\ket{\alpha(L)}$ of $n$ qubits, which is
then measured in the standard basis. We now want to associate with it
a corresponding adiabatic computation.
A first natural attempt would be to define $H_{{\mathrm{final}}}$ as a local
Hamiltonian with $\ket{\alpha(L)}$ as its ground state. However, this
attempt encounters the difficulty mentioned above: not knowing
$\ket{\alpha(L)}$, it seems impossible to explicitly specify
$H_{\mathrm{final}}$. The key to resolve this difficulty is the observation that
the ground state of $H_{\mathrm{final}}$ need not necessarily be the state
$\ket{\alpha(L)}$. It is sufficient (under some mild restrictions)
that the ground state has a non-negligible inner product with
$\ket{\alpha(L)}$. This gives us significant flexibility in designing
$H_{\mathrm{final}}$. Our idea is to base our solution on a seemingly unrelated
ingenious result of Kitaev \cite{Kitaev:book}, in which he provides
the first quantum ${\sf{NP}}$-complete problem, namely, {\sc local
Hamiltonians}. This result can be viewed as the quantum analogue of
the Cook-Levin theorem \cite{papa}, which states that $3$-{\sc Sat} is
${\sf{NP}}$-complete. For his proof, Kitaev defined a local Hamiltonian
that checks the time propagation of a quantum circuit. Kitaev's local
Hamiltonian has as its ground state the entire \emph{history} of the
quantum computation, in \emph{superposition}:
\begin{eqnarray}\label{eq:final_state}
\ket{\eta} &:=& \frac{1}{\sqrt{L+1}} \sum_{\ell=0}^L
\ket{\alpha(\ell)} \otimes \ket{1^\ell 0^{L-\ell}}^c.
\end{eqnarray}
The right ($L$ qubits) register is a clock that counts the steps by
adding $1$s from left to right. The superscript $c$ denotes clock
qubits. We note that this state has a non-negligible projection on
our desired state $\ket{\alpha(L)}$. Hence, instead of designing a
Hamiltonian that has the final unknown state of the circuit as its
ground state, a task that seems impossible, we can define $H_{\mathrm{final}}$
to be Kitaev's local Hamiltonian. Why is it possible to define a local
Hamiltonian whose ground state is $\ket{\eta}$, whereas the same task
seems impossible with $\ket{\alpha(L)}$? The idea is that the unary
representation of the clock enables a local verification of correct
propagation of the computation from one step to the next, which cannot
be done without the intermediate computational steps.
We thus choose Kitaev's Hamiltonian \cite{Kitaev:book} to be our
$H_{\mathrm{final}}$. This Hamiltonian involves five body interactions (three
clock particles and two computation particles). For the initial
Hamiltonian $H_{\mathrm{init}}$ we require that it has $\ket{\alpha(0)} \otimes \ket{0^{L}}^c$, the
first term in the history state, as its unique ground state. It is
easy to define such a local Hamiltonian, because $\ket{\alpha(0)} \otimes \ket{0^{L}}^c$ is a
tensor product state. Crucially, $H_{\mathrm{init}}$ and $H_{\mathrm{final}}$ can be
constructed efficiently from the given quantum circuit;
no knowledge of $\ket{\alpha(L)}$ is required for the construction.
A technical problem lies in showing that the spectral gap of the
intermediate Hamiltonian $H(s)$ is lower bounded by some inverse
polynomial (more specifically, we show it is larger than $1/L^2$). To
do this, we use a mapping of the Hamiltonian to a Markov chain
corresponding to a random walk on the $L+1$ time steps. We then apply
the conductance bound from the theory of rapidly mixing Markov chains
\cite{sinclair} to bound the spectral gap of this chain. We note
that, in general, applying the conductance bound requires knowing the
limiting distribution of the chain, which in our case is hard since it
corresponds to knowing the coefficients of the ground state for
all the Hamiltonians $H(s)$.
We circumvent this problem by noticing that it is actually
sufficient in our case to know very little about the limiting
distribution of the Markov chain, namely that it is monotone (in a
certain sense to be defined later). This allows us to apply the
conductance bound, and deduce that the spectral gap is
$\Omega(1/L^2)$. From this it follows that the running time of the
adiabatic computation is polynomial.
Extracting the output of the quantum circuit from the history state
efficiently is easy: Measure all the qubits of the clock and if the
clock is in the state $\ket{1^\ell}$, the computational qubits carry
the result of the circuit. Otherwise, start from
scratch.\footnote{This gives an overhead factor of $L$ which can be avoided
by adding $O(\frac{1}{\epsilon}L)$ identity gates to the quantum
circuit at the end, which has the effect that most of the history state
$|\eta\ra$ is concentrated on the final state $\ket{\alpha(L)}$. See
Subsection~\ref{sec:nonsense} for more details.}
\ignore{This scheme would not suffice to prove Theorem~\ref{thm:geo}.
The basic problem lies in arranging sufficient interaction between the
computational and the clock particles, since if the particles are set
on a grid, each clock particle can only interact with four neighbors.
We circumvent this problem as follows. Instead of having separate
clock and computational particles, we now assign to each particle both
clock and computational degrees of freedom (this is what makes our
particles six-state). We then construct a computation that propagates
locally over the entire set of particles, snaking up and down each
column of the lattice. The adiabatic evolution would now end up in
the history state of this snake-like sequence of states.}
The above scheme gives a proof of Theorem~\ref{thm:main} that uses
$5$-local Hamiltonians, and runs in time roughly $O(L^5)$.
The improvement to $3$-locality is based on a
simple idea (used in \cite{Kempe:03a} to prove that the $3$-local
Hamiltonian problem is quantum ${\sf{NP}}$-complete) but obtaining a lower
bound on the spectral gap is significantly more involved technically.
We postpone its explanation to the body of the paper. The running
time we achieve in this case is roughly $O(L^{14})$.
\dnote{verify the numbers}
\subsection{Proof of Theorem~\ref{thm:geo}: Overview}
The idea underlying the proof of Theorem~\ref{thm:main} by itself does
not suffice to prove Theorem~\ref{thm:geo}. The basic problem lies in
arranging sufficient interaction between the computational and the
clock particles, since if the particles are set on a grid, each clock
particle can only interact with four neighbors. We circumvent this
problem as follows. Instead of having separate clock and computational
particles, we now assign to each particle both clock and computational
degrees of freedom (this is what makes our particles six-state). We
then construct a computation that propagates locally over the entire
set of particles, snaking up and down each column of the lattice. The
adiabatic evolution now ends up in the history state of this
snake-like sequence of states.
The lower bound on the spectral gap is obtained in an essentially
identical way as in the $3$-local Hamiltonian case.
\subsection{Related Work}\label{ssec:related}
After the preliminary version of this paper appeared \cite{thisfocs},
the results regarding Quantum-${\sf{NP}}$ completeness were tightened by
\cite{KKR:04} to show that the $2$-local Hamiltonian problem is
Quantum-${\sf{NP}}$ complete. Following the ideas presented in the current
paper, \cite{KKR:04} used their result to show that
Theorem~\ref{thm:main} holds when the Hamiltonians are $2$-local.
The idea to use an inverse polynomial spectral gap for fault
tolerance is certainly not new. It is a crucial ingredient in
topological (and later, geometrical) quantum computation
\cite{geometric,anyons,holonomic}.
Note, however, that in those models the
spectral gap has no effect on the running time or on any other
algorithmic aspects, and it is used only to separate the computational
subspace from the ``noisy'' subspace. In contrast, the spectral gap in
adiabatic computation is crucial from the algorithmic point of view,
since it determines the time complexity of the computation.
\subsection{Open Questions}\label{sec:open}
This paper demonstrates that quantum computation can be studied and
implemented entirely within the adiabatic computation model, without
losing its computational power. This result raises many open questions
in various directions. First, it would be interesting to determine if
the parameters presented in this work can be improved. For example,
it might be possible to shorten the running time of our adiabatic
simulation. Decreasing the dimensionality of the particles used in
Theorem~\ref{thm:geo} from six to two or three might be important for
implementation applications. An interesting question is whether
Theorem~\ref{thm:geo} can be achieved using a one dimensional instead
of a two dimensional grid.
Second, the possibility of fault tolerant adiabatic computation
deserves to be studied both experimentally and theoretically. Since
the publication of the preliminary version of the current paper
\cite{thisfocs}, several researchers have begun to study adiabatic
computation in the presence of noise \cite{aberg,cerf2, lidar}.
However, it is still unclear whether adiabatic evolution
might be helpful for the physical implementation of quantum computers.
Our results imply the equivalence between standard quantum computation
and various other variants of adiabatic computation that have been
considered in the literature and that are more general than our model.
These include adiabatic computation with a general path between
$H_{\mathrm{init}}$ and $H_{\mathrm{final}}$, rather than a straight line (see
\cite{amnon} and \cite{farhipaths} for a rigorous definition), and
adiabatic computation with explicit sparse Hamiltonians \cite{amnon}
(see Corollary~\ref{corol:main}). A problem we leave open is to
characterize the computational power of adiabatic optimization,
studied in \cite{vandamvaz, vandam2,farhiad}. In this model, the
initial state is a tensor product of qubits in the state
$\frac{1}{\sqrt{2}}(|0\ra+|1\ra)$, the final Hamiltonian is diagonal,
and the evolution is carried out on a straight line. It is still
possible that such a computation can be simulated efficiently by a
classical Turing Machine.
Finally, we hope that the adiabatic framework might lead to the
discovery of new quantum algorithms. As shown in this paper, as well
as in \cite{amnon}, tools from probability theory, mathematical
physics and spectral gap analysis might turn out to be relevant and
useful. In order to improve our understanding of the benefits of the
adiabatic paradigm, it might be insightful to see adiabatic versions
of known quantum algorithms, presented in a meaningful way.
\paragraph{Organization:}
In Section~\ref{sec:prelim} we describe the model of adiabatic
computation and state some relevant facts about Markov chains.
Section~\ref{sec:nogeo} shows how adiabatic systems with local
Hamiltonians allowing five- and later three-body interactions, can
efficiently simulate standard quantum computations. Section
\ref{sec:geometric} shows how to adapt the construction to a
two-dimensional grid.
\section{Preliminaries}\label{sec:prelim}
\subsection{Hamiltonians of $n$-Particle Systems}
For background on $n$-qubit systems, quantum circuits and
Hamiltonians, see \cite{nielsen}. An $n$-particle system is
described by a state in Hilbert space of dimension $d^n$,
the tensor product of $n$ $d$-dimensional Hilbert spaces.
For simplicity, we restrict our discussion in this subsection
to quantum systems composed of $2$-dimensional particles, i.e., qubits;
a similar discussion holds for higher dimensional particles
(such as the $6$-dimensional case we consider later).
In the standard model of quantum computation, the state of $n$ qubits
evolves in discrete time steps by unitary operations. In fact, the
underlying physical description of this evolution is continuous, and
is governed by Schr{\"o}dinger's equation: \( -i
\frac{d}{dt}\ket{\psi(t)}=H(t)\ket{\psi(t)}\). Here $|\psi(t)\ra$ is
the state of the $n$ qubits at time $t$, and $H(t)$ is a Hermitian
$2^n\times 2^n$ matrix operating on the space of $n$ qubits. This
$H(t)$ is the \emph{Hamiltonian} operating on a system; it governs the
dynamics of the system. Given that the state of the system at time
$t=0$ is equal to $|\psi(0)\ra$, one can in principle solve
Schr{\"o}dinger's equation with this initial condition, to get
$|\psi(T)\ra$, the state of the system at a later time $t=T$. The
fact that the Hamiltonian is Hermitian corresponds to the familiar
fact that the discrete time evolution of the quantum state from time
$t_1$ to a later time $t_2$ is unitary.
We sometimes refer to eigenvalues of Hamiltonians as {\em energies}.
The {\em ground energy} of a Hamiltonian is its lowest eigenvalue and
the corresponding eigenvector(s) are called {\em ground state}(s).
We define $\Delta(H)$, the \emph{spectral gap} of a
Hamiltonian $H$, to be the difference between the lowest eigenvalue of
$H$ and its second lowest eigenvalue.
($\Delta(H)=0$ if the lowest eigenvalue is degenerate, namely, has more than
one eigenvector associated with it).
We define the \emph{restriction} of $H$ to some subspace ${\cal{S}}}\def\calA{{\cal{A}}$, denoted
$H_{\cal{S}}}\def\calA{{\cal{A}}$, as $\Pi_{\cal{S}}}\def\calA{{\cal{A}} H \Pi_{\cal{S}}}\def\calA{{\cal{A}}$ where $\Pi_{\cal{S}}}\def\calA{{\cal{A}}$ is the orthogonal
projection on ${\cal{S}}}\def\calA{{\cal{A}}$.
A Hamiltonian on an $n$-particle system represents a certain physical
operation that one can, in principle, apply to an $n$-particle system.
However, it is clear that one cannot efficiently apply any arbitrary
Hamiltonian (just describing it requires roughly $2^{2n}$ space). We say that a
Hamiltonian $H$ is {\em $k$-local} if $H$ can be written as $\sum_A
H^A$ where $A$ runs over all subsets of $k$ particles, and $H^A$
operates trivially on all but the particles in $A$ (i.e., it is a
tensor product of a Hamiltonian on $A$ with identity on the particles
outside of $A$). Notice that for any constant $k$, a $k$-local
Hamiltonian on $n$-qubits can be described by $2^{2k}n^k={\mathrm{poly}}(n)$
numbers. We say that $H$ is local if $H$ is $k$-local for some
constant $k$.
In this paper we restrict our attention to $k$-local Hamiltonians.
This requirement corresponds to the fact that all known interactions
in nature involve a constant number of particles. We attempt to make
$k$ as small as possible to make the Hamiltonian easier to implement.
\subsection{The Model of Adiabatic Computation}\label{sec:model}
The cornerstone of the adiabatic model of computation is the
celebrated adiabatic theorem \cite{kato,messiah}.
Consider a time-dependent Hamiltonian $H(s)$,
$s\in [0,1]$, and a system initialized at time $t=0$ in the ground
state of $H(0)$ (here and in the following we assume that for all $s\in [0,1]$, $H(s)$ has a
unique ground state). Let the system evolve according to the Hamiltonian
$H(t/T)$ from time $t=0$ to time $T$. We refer to such
a process as an \emph{adiabatic evolution according to $H$ for time $T$}.
The adiabatic theorem affirms that for large enough $T$ the
final state of the system is very close to the ground state of $H(1)$.
Just how large $T$ should be for this to happen is determined by the spectral gap
of the Hamiltonians $H(s)$. Such an upper bound on $T$ is given in the following theorem,
adapted from \cite{ben} (whose proof in turn is based on \cite{avron};
see also \cite{odedandris} for a recent elementary proof of a slightly
weaker version).
\begin{theorem}[The Adiabatic Theorem (adapted from \cite{ben})]\label{thm:ad}
Let $H_{{\mathrm{init}}}$ and $H_{{\mathrm{final}}}$ be two
Hamiltonians acting on a quantum system and consider the time-dependent
Hamiltonian $H(s):=(1-s)H_{\mathrm{init}} + s H_{\mathrm{final}}$.
Assume that for all $s$, $H(s)$ has a unique ground state.
Then for any fixed $\delta > 0$, if
\begin{eqnarray}\label{eq:adiabatic_cond}
T &\ge& \Omega
\left(\frac{\|H_{{\mathrm{final}}}-H_{{\mathrm{init}}}\|^{1+\delta}}{\epsilonilon^{\delta}\min_{s
\in [0,1]}\{\Delta^{2+\delta}(H(s))\}} \right)
\end{eqnarray}
then the final state of an adiabatic evolution according to $H$ for
time $T$ (with an appropriate setting of global phase)
is $\epsilon$-close in $\ell_2$-norm to the ground state of $H_{\mathrm{final}}$.
The matrix norm is the spectral norm $\|H\| := \max_w \|Hw\|/\|w\|$.
\end{theorem}
One should think of $\delta$ as being some
fixed constant, say, $0.1$. We cannot
take $\delta=0$ because of the constant hidden in the $\Omega$ notation,
which goes to infinity as $\delta$ goes to $0$.
Let us now describe the model of adiabatic computation.
In this paper we use the following definition of adiabatic computation
that slightly generalizes that of Farhi et al. \cite{farhiad}.
The adiabatic `circuit'
is determined by $H_{\mathrm{init}}$ and $H_{\mathrm{final}}$ and the output of
the computation is (close to) the ground state of $H_{\mathrm{final}}$.
\begin{deff}\label{def:ad}
A $k$-local adiabatic computation $AC(n,d,H_{\mathrm{init}},H_{\mathrm{final}}, \epsilon)$ is
specified by two $k$-local Hamiltonians, $H_{{\mathrm{init}}}$ and $H_{{\mathrm{final}}}$
acting on $n$ $d$-dimensional particles,
such that both Hamiltonians have
unique ground states. The ground state of $H_{{\mathrm{init}}}$ is a tensor
product state. The output is a state that is $\epsilon$-close in
$\ell_2$-norm to the ground state of $H_{\mathrm{final}}$. Let $T$
be the smallest time such that the final
state of an adiabatic evolution according to $H(s):=(1-s)
H_{\mathrm{init}} + s H_{\mathrm{final}}$ for time $T$ is $\epsilonilon$-close in $\ell_2$-norm
to the ground state of $H_{{\mathrm{final}}}$. The running time of the adiabatic
algorithm is defined to be $T\cdot \max_s\|H(s)\|$.
\end{deff}
Observe that we have chosen our definition of running time to be
$T\cdot\max_s\|H(s)\|$ and not $T$.
Notice that if the Hamiltonians are multiplied by some factor, this
divides the bound of Equation \ref{eq:adiabatic_cond}, and hence $T$,
by the same factor. Hence, if the running time is defined to be $T$
one would be able to achieve arbitrarily
small running times, by multiplying the Hamiltonians by large factors.
Our definition, on the other hand, is invariant under a multiplication
of the Hamiltonian by
an overall factor, and so takes into account the known physical
trade-off between time and energy.\footnote{
This trade-off between time and the norm of
the Hamiltonian (namely, the energy), is manifested
in Schr{\"o}dinger's equation whose solution does not change
if time is divided by some factor and at the same time
the Hamiltonian is multiplied by the same factor.}
The right hand side of Equation~\ref{eq:adiabatic_cond} can be
used to provide an upper bound on the running time of an adiabatic
computation.
Hence, in order to show that an adiabatic algorithm is
efficient, it is enough to use Hamiltonians of at most ${\mathrm{poly}}(n)$ norm,
and show that for all $s\in [0,1]$ the spectral
gap $\Delta(H(s))$ is at least inverse polynomial in $n$.
We note that in certain cases, it is possible to obtain a stronger
upper bound on the running time. Indeed, assume there exists a subspace
${\cal{S}}}\def\calA{{\cal{A}}$ such that for all $s\in [0,1]$, $H(s)$ leaves ${\cal{S}}}\def\calA{{\cal{A}}$ invariant,
i.e., $H(s)({\cal{S}}}\def\calA{{\cal{A}})\subseteq {\cal{S}}}\def\calA{{\cal{A}}$. Equivalently, $H(s)$ is block diagonal
in ${\cal{S}}}\def\calA{{\cal{A}}$ and its orthogonal space ${\cal{S}}}\def\calA{{\cal{A}}^\perp$. Consider $H_{\cal{S}}}\def\calA{{\cal{A}}(s)$,
the restriction of $H(s)$ to ${\cal{S}}}\def\calA{{\cal{A}}$. Then, starting from a state inside
${\cal{S}}}\def\calA{{\cal{A}}$, an adiabatic evolution according to $H$ is {\em identical} to
an adiabatic evolution according to $H_{\cal{S}}}\def\calA{{\cal{A}}$ (this follows from
Schr{\"o}dinger's equation). Hence, we
can potentially obtain a stronger upper bound by replacing
$\Delta(H(s))$ with $\Delta(H_{\cal{S}}}\def\calA{{\cal{A}}(s))$ in Equation~\ref{eq:adiabatic_cond}.
This stronger upper bound will be used in our first adiabatic algorithm.
Finally, let us mention that one can define more general models
of adiabatic computation. For example, one might consider non-local
Hamiltonians (see \cite{amnon}). Another possible extension is to
consider more general paths between $H_{{\mathrm{init}}}$ and $H_{{\mathrm{final}}}$
(see, e.g., \cite{farhipaths, preskill, amnon}).
Obviously, our main results, such as Theorem \ref{thm:main},
hold also for these more general models.
\subsection{Markov Chains and Hermitian Matrices}\label{sec:markovham}
Under certain conditions, there exists a standard mapping of
Hamiltonians to Markov chains (for background on Markov chains, see
\cite{lovasz}). The following fact is useful to show that this
mapping applies in the case we analyze.
\begin{fact}[Adapted from Perron's Theorem, Theorem $8.2.11$
in \cite{HornJohnson}]\label{fact:perron}
Let $G$ be a Hermitian matrix with real non-negative entries. If there exists a
finite $k$ such that all entries of $G^k$ are positive, then $G$'s
largest eigenvalue is positive, and all other eigenvalues
are strictly smaller in absolute value.
Moreover, the corresponding eigenvector is unique,
and all its entries are positive.
\end{fact}
We define the mapping for $G$, a Hermitian matrix operating on an
$L+1$ dimensional Hilbert space. Suppose that all the entries of $G$
are real and non-negative, that its eigenvector
$(\alpha_0,\ldots,\alpha_L)$ with largest eigenvalue $\mu$ satisfies
$\alpha_i>0$ for all $0\le i\le L$ and that $\mu>0$. Define $P$ by:
\begin{eqnarray}\label{eq:tran}
P_{ij} &:=& \frac{\alpha_j}{\mu \alpha_i}G_{ij}.
\end{eqnarray}
The matrix $P$ is well defined, and is stochastic because all its entries are
non-negative and each of its rows sums up to one. It is easy to
verify the following fact:
\begin{fact}\label{fact:GH}
The vector $(v_0,\ldots,v_L)$ is an eigenvector of $G$ with eigenvalue $\delta$
if and only if $(\alpha_0 v_0,\ldots,\alpha_L v_L)$ is a left
eigenvector of $P$ with eigenvalue $\delta/\mu$.
\end{fact}
We will consider $G$ of the form $G=I-H$ for
some Hamiltonian $H$. The above fact implies that if
$(\alpha_0,\ldots,\alpha_L)$ is the ground state of $H$ with
eigenvalue $\lambda$ then $(\alpha_0^2,\ldots,\alpha_L^2)$ is a
left eigenvector of $P$ with maximal eigenvalue $1$. By normalizing,
we obtain that $\pi:=(\alpha_0^2/Z,\ldots,\alpha_L^2/Z)$
is the limiting distribution of $P$,
where $Z=\sum \alpha_i^2$.
Moreover, the gap between
$P$'s largest and second largest eigenvalues is equal to
$\Delta(H)/(1-\lambda)$.
\subsection{Spectral Gaps of Markov Chains}\label{sec:cond}
Given a stochastic matrix $P$ with limiting distribution $\pi$, and a
subset $B\subseteq \{0,\dots,L\}$, the \emph{flow} from $B$ is given
by $F(B) := \sum_{i\in B,j\notin B}{\pi_i P_{ij}}.$ Define the
$\pi$-weight of $B$ as $\pi(B):=\sum_{i\in B}{\pi_i}$.
The \emph{conductance} of $P$ is defined by
$\varphi(P) := \min_{B}{{F(B)}/{\pi(B)}}$,
where we minimize over all non-empty subsets $B\subseteq\{0,\dots,L\}$
with $\pi(B)\leq \smfrac{1}{2}$.
\begin{theorem}[The conductance bound \cite{sinclair})]\label{thm:conductance}
The eigenvalue gap of $P$ is at least $\smfrac{1}{2}\varphi(P)^2$.
\end{theorem}
\section{Equivalence of Adiabatic and Quantum Computation}\label{sec:nogeo}
Here we prove Theorem~\ref{thm:main}, by showing how to simulate a
quantum circuit with $L$ two-qubit gates on $n$ qubits by an
adiabatic computation on $n+L$ qubits (the other direction
was shown in \cite{farhiad, vandamvaz}). We first allow five qubit
interactions. We later show how to reduce it to three, using
techniques that will also be used in Section~\ref{sec:geometric}.
\subsection{Five-local Hamiltonian}\label{sec:five}
\begin{theorem}\label{thm:5}
Given a quantum circuit on $n$ qubits with $L$ two-qubit gates
implementing a unitary $U$, and $\epsilonilon>0$, there exists a $5$-local adiabatic computation
$AC(n+L,2,H_{\mathrm{init}},H_{\mathrm{final}},\epsilon)$ whose running time is
${\mathrm{poly}}(L,\frac{1}{\epsilonilon})$ and
whose output (after tracing out some ancilla qubits)
is $\epsilon$-close (in trace distance) to $U\ket{0^n}$.
Moreover, $H_{\mathrm{init}}$ and $H_{\mathrm{final}}$ can be computed by a polynomial
time Turing machine.
\end{theorem}
The running time we obtain here is
$O(\epsilon^{-(5+3 \delta)}L^{5+2 \delta})$ for any fixed $\delta>0$.
\subsubsection{The Hamiltonian}\label{sec:ham}
For our construction we use the Hamiltonian defined in
\cite{Kitaev:book}. Denote $\ket{\gamma_\ell}:= \ket{\alpha(\ell)}
\otimes \ket{1^\ell 0^{L-\ell}}^c$, where $\ket{\alpha(\ell)}$ denotes the
state of the circuit after the $\ell$th gate and the superscript $c$ denotes
the clock qubits. We would like to define a local Hamiltonian $H_{{\mathrm{init}}}$
with ground state $\ket{\gamma_0}=\ket{0^n}\otimes \ket{0^{L}}^c$, and
a local Hamiltonian
$H_{{\mathrm{final}}}$ with ground state $\ket{\eta} = \frac{1}{\sqrt{L+1}}
\sum_{\ell=0}^L \ket{\gamma_\ell}$ as in Equation
\ref{eq:final_state}. To do this, we write
$H_{{\mathrm{init}}}$ and $H_{{\mathrm{final}}}$ as a sum of terms:
\begin{align*}
H_{{\mathrm{init}}}&:= H_{\mathrm{clock}}init+H_{\mathrm{input}}+H_{\mathrm{clock}} \\
H_{{\mathrm{final}}} &:= \frac{1}{2}\sum_{\ell=1}^L{H_\ell}+H_{\mathrm{input}}+H_{\mathrm{clock}}.
\end{align*}
The terms in $H_{{\mathrm{final}}}$ (and likewise in $H_{{\mathrm{init}}}$)
are defined such that the only state whose energy (i.e., eigenvalue)
is $0$ is the desired ground state. This is done by assigning
an {\em energy penalty} to any state that does not satisfy the required
properties of the ground state.
The different terms, which correspond to different properties
of the ground states, are described in the following paragraphs.
The adiabatic evolution then follows the time-dependent Hamiltonian
\begin{eqnarray} \label{eq:hamham}
H(s)&=&(1-s)H_{{\mathrm{init}}}+s H_{{\mathrm{final}}}.
\end{eqnarray}
Notice that as $s$ goes from $0$ to $1$, $H_{\mathrm{clock}}init$ is slowly
replaced by $\frac{1}{2}\sum_{\ell=1}^L{H_\ell}$ while
$H_{\mathrm{input}}$ and $H_{\mathrm{clock}}$ are held constant.
We now describe each of the terms.
First, $H_{{\mathrm{clock}}}$ checks that the
clock's state is of
the form $\ket{1^\ell 0^{L-\ell}}^c$ for some $0\le \ell\le L$.
This is achieved by assigning an energy penalty to any basis state on the
clock qubits that contains the sequence $01$,
\begin{eqnarray*}
H_{{\mathrm{clock}}} &:=& \sum_{\ell=1}^{L-1}{\ketbra{01}{01}^c_{\ell,\ell+1}},
\end{eqnarray*}
where the subscript indicates which clock qubits the projection
operates on. Note that illegal clock states are eigenstates of
$H_{{\mathrm{clock}}}$ with eigenvalue at least $1$; legal clock states have
eigenvalue $0$.
Next, $H_{{\mathrm{input}}}$ checks that if the clock is $\ket{0^L}^c$,
the computation qubits must be in the state $\ket{0^n}$,
\begin{align*}
H_{{\mathrm{input}}} &:= \sum_{i=1}^n{\ketbra{1}{1}_i}\otimes\ketbra{0}{0}^c_1.
\end{align*}
We complete the description of $H_{\mathrm{init}}$ with $H_{\mathrm{clock}}init$ whose goal is
to check that the clock's state is $\ket{0^L}^c$,
\begin{eqnarray*}
H_{{\mathrm{clock}}init}&:=&\ketbra{1}{1}^c_1.
\end{eqnarray*}
\begin{claim}\label{cl:gs}
The state $\ket{\gamma_0}$ is a ground state of $H_{{\mathrm{init}}}$ with eigenvalue $0$.\footnote{
The state $\ket{\gamma_0}$ is in fact the {\em unique} ground state of $H_{{\mathrm{init}}}$ as
will become apparent from the proof of the spectral gap. A similar statement
holds for Claim \ref{cl:gs2}.
}
\end{claim}
\begin{proof}
It is easy to verify that $H_{\mathrm{init}} \ket{\gamma_0} = 0$.
As a sum of projectors, $H_{\mathrm{init}}$ is positive semidefinite
and hence $\ket{\gamma_0}$ is a ground state of $H_{\mathrm{init}}$.
\end{proof}
We now proceed to the first term in $H_{{\mathrm{final}}}$.
The Hamiltonian $H_\ell$ checks that the
propagation from step $\ell-1$ to $\ell$ is correct, i.e., that it
corresponds to the application of the gate $U_\ell$.
For $1 < \ell < L$, it is defined as
\begin{eqnarray} \label{eq:IUUI}
H_\ell &:=&
I\otimes \ketbra{100}{100}^c_{\ell-1,\ell,\ell+1}
- U_\ell\otimes\ketbra{110}{100}^c_{\ell-1,\ell,\ell+1} \nonumber\\
& & - U_\ell^\dag \otimes \ketbra{100}{110}^c_{\ell-1,\ell,\ell+1}
+ I\otimes\ketbra{110}{110}^c_{\ell-1,\ell,\ell+1}.
\end{eqnarray}
Intuitively, the three-qubit terms above move the state of the clock
one step forward, one step backward, or leave it unchanged.
The accompanying matrices $U_\ell, U^\dag_\ell$ describe the associated time evolution.
For the boundary cases $\ell=1,L$, we omit one
clock qubit from these terms and define
\begin{eqnarray}\label{eq:edge}
H_1 & := &
I\otimes\ketbra{00}{00}_{1,2} -U_1 \otimes\ketbra{10}{00}_{1,2}
- U_1^\dag \otimes\ketbra{00}{10}_{1,2} + I\otimes \ketbra{10}{10}_{1,2}
\nonumber \\
H_L & :=&
I\otimes\ketbra{10}{10}_{L-1,L} - U_L \otimes\ketbra{11}{10}_{L-1,L}
- U_L^\dag \otimes \ketbra{10}{11}_{L-1,L} + I\otimes \ketbra{11}{11}_{L-1,L}.
\end{eqnarray}
\begin{claim}\label{cl:gs2}
The history state $\ket{\eta}$ is a ground state of $H_{{\mathrm{final}}}$ with eigenvalue $0$.
\end{claim}
\begin{proof}
It is easy to verify that $H_{\mathrm{final}} \ket{\eta} = 0$.
It remains to notice that for all $1\le \ell\le L$, $H_\ell$ is
positive semidefinite and hence so is $H_{\mathrm{final}}$.
\end{proof}
\begin{remark}
Strictly speaking, Theorem \ref{thm:5} holds even
if we remove the terms $H_{\mathrm{clock}}$ and $H_{{\mathrm{input}}}$ from both $H_{{\mathrm{init}}}$ and $H_{{\mathrm{final}}}$.
We include them mainly for consistency with the rest of the paper.
\end{remark}
\subsubsection{Spectral Gap in a Subspace}
Let ${\cal{S}}}\def\calA{{\cal{A}}_0$ be the $L+1$-dimensional subspace spanned by
$|\gamma_0\ra,\ldots,|\gamma_L\ra$.
It is easy to verify the following claim.
\begin{claim}\label{cl:invariant}
The subspace ${\cal{S}}}\def\calA{{\cal{A}}_0$ is invariant under $H(s)$, i.e., $H(s)({\cal{S}}}\def\calA{{\cal{A}}_0)\subseteq {\cal{S}}}\def\calA{{\cal{A}}_0$
\end{claim}
In this subsection, we show that the spectral gap of $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$,
the restriction of $H$ to ${\cal{S}}}\def\calA{{\cal{A}}_0$, is inverse polynomial in $L$.
As mentioned in Subsection~\ref{sec:model}, this, together with Claim~\ref{cl:invariant},
is enough to obtain a bound on the running time of the adiabatic algorithm.
\begin{lemma}\label{cl:spec}
The spectral gap of the restriction of $H(s)$ to ${\cal{S}}}\def\calA{{\cal{A}}_0$ satisfies
$\Delta(H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s))=\Omega (L^{-2})$ for all $s\in [0,1]$.
\end{lemma}
\begin{proof}
Let us write the Hamiltonians $H_{{\cal{S}}}\def\calA{{\cal{A}}_0, {\mathrm{init}}}$ and $H_{{\cal{S}}}\def\calA{{\cal{A}}_0, {\mathrm{final}}}$ in the basis
$\ket{\gamma_0},\ldots,\ket{\gamma_L}$ of ${\cal{S}}}\def\calA{{\cal{A}}_0$.
Both $H_{\mathrm{clock}}$ and $H_{{\mathrm{input}}}$ are $0$ on ${\cal{S}}}\def\calA{{\cal{A}}_0$ and can thus
be ignored.
We have the following $(L+1) \times (L+1)$ matrices:
\begin{eqnarray}\label{eq:h_initial_s0}
H_{{\cal{S}}}\def\calA{{\cal{A}}_0,{\mathrm{init}}} & = &
\left(
\begin{array}{cccc}
0 & 0 & \ldots & 0 \\
0 & 1 & \ldots & 0 \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \ldots & 1 \\
\end{array}
\right),
\end{eqnarray}
\begin{eqnarray}\label{eq:h_final_s0}
H_{{\cal{S}}}\def\calA{{\cal{A}}_0,{\mathrm{final}}} & = & \smfrac{1}{2}\ketbra{\gamma_0}{\gamma_0} - \smfrac{1}{2}\ketbra{\gamma_0}{\gamma_1}
- \smfrac{1}{2}\ketbra{\gamma_{L}}{\gamma_{L{\mbox{-}}1}}+\smfrac{1}{2}\ketbra{\gamma_L}{\gamma_L} \nonumber \\
& & + \sum_{\ell=1}^{L-1}({-\smfrac{1}{2}\ketbra{\gamma_\ell}{\gamma_{\ell-1}}+
\ketbra{\gamma_\ell}{\gamma_\ell} -\smfrac{1}{2}\ketbra{\gamma_\ell}{\gamma_{\ell+1}}}) \nonumber \\
& = & \left(
\begin{array}{rrrrrrr}
\smfrac{1}{2} & {\mbox{-}} \smfrac{1}{2} &0 & & \cdots& & 0 \\ {\mbox{-}} \smfrac{1}{2} & 1 & {\mbox{-}} \smfrac{1}{2} & 0 & \ddots & & \vdots\\ 0 &
{\mbox{-}} \smfrac{1}{2} & 1 & {\mbox{-}} \smfrac{1}{2} & 0 & \ddots & \vdots\\ & \ddots & \ddots & \ddots & \ddots & \ddots & \\ \vdots& &
0 & {\mbox{-}} \smfrac{1}{2} &1 & {\mbox{-}} \smfrac{1}{2}& 0 \\ & & & 0 & {\mbox{-}} \smfrac{1}{2} &1 & {\mbox{-}} \smfrac{1}{2} \\ 0& & \cdots& & 0&
{\mbox{-}} \smfrac{1}{2} & \smfrac{1}{2} \\
\end{array}
\right)
\end{eqnarray}
We now lower bound $\Delta(H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s))$. We consider two cases:
\vskip 5pt \noindent{\bf The case $s<1/3$:}
Here, $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$ is sufficiently close to $H_{{\cal{S}}}\def\calA{{\cal{A}}_0, {\mathrm{init}}}$
(whose spectral gap is $1$) so we can apply the following standard lemma (see, e.g., \cite{bhatia}, Page 244).
\begin{lemma}[Gerschgorin's Circle Theorem] Let $A$ be any matrix with entries $a_{ij}$.
Consider the discs in the complex plane given by
$$ D_i = \Big\{ z ~|~ |z- a_{ii}| \le \sum_{j \neq i} |a_{ij}| \Big\}, ~~ 1 \le i \le n. $$
Then the eigenvalues of $A$ are contained in $\cup D_i$ and any connected
component of $\cup D_i$ contains as many eigenvalues of $A$ as the number of
discs that form this component.
\end{lemma}
For $s < 1/3$, $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)_{1,1} < 1/6$
and $\sum_{j \neq 1} H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)_{1,j} < 1/6$.
Moreover, for any $i\neq 1$, $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)_{i,i} > 5/6$
and $\sum_{j \neq i} H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)_{i,j} < 1/6$. By the above lemma,
we obtain that there is one eigenvalue smaller than $1/3$ while all
other eigenvalues are larger than $2/3$. Hence, the spectral gap is
at least $1/3$.
\vskip 5pt
\noindent{\bf The case $s\ge 1/3$:}
We note that $H_{{\cal{S}}}\def\calA{{\cal{A}}_0,{\mathrm{final}}}$ is the Laplacian of the
simple random walk \cite{lovasz} of a particle on a line of length $L+1$.
A standard result in Markov chain theory
implies $\Delta(H_{{\cal{S}}}\def\calA{{\cal{A}}_0,{\mathrm{final}}})=\Omega(1/L^2)$ \cite{lovasz}.
For $s \ge 1/3$, $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$ is sufficiently close to
$H_{{\cal{S}}}\def\calA{{\cal{A}}_0, {\mathrm{final}}}$ to apply Markov chain techniques, as we show next.
Let $(\alpha_0,\ldots,\alpha_L)^\dagger$ be the
ground state of $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$ with eigenvalue $\lambda$.
Define the Hermitian matrix $G(s)=I - H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$.
It is easy to see that $G(s)$ satisfies the conditions of
Fact~\ref{fact:perron} for all $s>0$.
We obtain that the largest eigenvalue $\mu=1-\lambda$
of $G(s)$ is positive and non-degenerate and the corresponding eigenvector
$(\alpha_0,\ldots,\alpha_L)^\dagger$ has positive entries.
We can now map the matrix $G(s)$
to a stochastic matrix $P(s)$ as described in
Subsection~\ref{sec:markovham}.
The transition matrix $P(s)$ describes a random walk on the
line of $L+1$ sites (Fig.~\ref{fig:randomwalk}).
Fact \ref{fact:GH} implies that
the limiting distribution of $P(s)$ is given by
$\pi=(\alpha^2_0/Z,\ldots,\alpha^2_L/Z)$ where $Z=\sum_i \alpha_i^2$.
\begin{figure}
\caption{The random walk of $P(s)$}
\label{fig:randomwalk}
\end{figure}
We bound the spectral gap of $P(s)$ using the conductance bound (see
Subsection \ref{sec:cond}).
To do this we need to know that $\pi$ is monotone. We first show:
\begin{claim}\label{claim:mono}
For all $0\le s\le 1$,
the ground state of $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$ is monotone, namely
$\alpha_0 \ge \alpha_1\ge \ldots \ge \alpha_L \ge 0$.
\end{claim}
\begin{proof}
The case $s=0$ is obvious, so assume $s>0$.
We first claim that the ground state $(\alpha_0,\ldots,\alpha_L)^\dagger$
of $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)=I-G(s)$ can be written as the limit
$$\frac{1}{c_0}\lim_{\ell \rightarrow \infty}(G(s)/\mu)^\ell (1,\dots,1)^\dagger$$ for some constant $c_0>0$.
To see this, let $\ket{v_0},\ldots,\ket{v_L}$ be an orthonormal set of eigenvectors
of $G(s)$, with corresponding
eigenvalues $\mu_0 \ge \mu_1 \ge \ldots \ge \mu_L$. By Fact \ref{fact:perron},
the largest eigenvalue corresponds to a unique eigenvector, and hence we have
$\ket{v_0}=(\alpha_0,\ldots,\alpha_L)^\dagger$, and $\mu_0=\mu$.
The set of eigenvectors $\ket{v_i}$
forms an orthonormal basis, and we can
write $(1,\ldots,1)^\dagger$ in terms of this basis:
$(1,\ldots,1)^\dagger=\sum_i c_i \ket{v_i}$.
Now, we have that $(G(s)/\mu)^\ell (1,\dots,1)^\dagger=
\sum_i c_i(\frac{\mu_i}{\mu})^\ell \ket{v_i}$.
By Fact \ref{fact:perron} we have $|\mu_i|<\mu$ for all $i\not=0$, and
$\mu>0$. We thus have that
$\lim_{\ell\rightarrow \infty}(G(s)/\mu)^\ell (1,\dots,1)^\dagger=
c_0\ket{v_0}$.
It is easy to check that $G(s)$ preserves monotonicity, namely,
if $G(s)$ is applied to a monotone vector, the result is a monotone vector.
Hence, when $G(s)/\mu$ is applied to the monotone vector $(1,\ldots,1)^\dagger$,
the
result is a monotone vector. Thus,
$c_0\ket{v_0}$ is monotone.
Finally, we observe that $c_0> 0$. This is because $c_0$ is the inner product
between the all $1$ vector, and $\ket{v_0}$ whose entries are all
positive by Fact \ref{fact:perron}. This implies that
$\ket{v_0}$ is also monotone, as desired.
\end{proof}
It follows that $\pi$ is also monotone. We use this and simple
combinatorial arguments to prove
the following claim.
\begin{claim}\label{cl:conduct}
For all $1/3 \le s \le 1$, $\varphi(P(s))\ge \smfrac{1}{6 L}$.
\end{claim}
\begin{proof}
We show that for any nonempty $B \subseteq \{0,\ldots,L\}$,
$F(B)/\pi(B) \ge \smfrac{1}{6L}$. We consider two cases.
First, assume that $0\in B$. Let $k$ be the smallest
such that $k\in B$ but $k+1 \notin B$. Then,
$$F(B)\ge \pi_k P(s)_{k,k+1}= \pi_k \cdot \frac{\sqrt{\pi_{k+1}}}{\mu \sqrt{\pi_k}} G(s)_{k,k+1}=
\frac{\sqrt{\pi_k \pi_{k+1}}}{1-\lambda} G(s)_{k,k+1} \ge \frac{\pi_{k+1}}{1-\lambda} G(s)_{k,k+1}$$
where the last inequality follows from the monotonicity of $\pi$. Using the definition of $G$
and the assumption that $s \ge 1/3$ we get
that $G(s)_{k,k+1}\geq {1/6}$. We also have
$0<1-\lambda \le 1$, where the second inequality follows from the fact that
$H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$ is positive semidefinite, and the first follows from
$\mu>0$ which we previously deduced from Fact \ref{fact:perron}.
Hence,
\begin{align}\label{eq:kcase1}
\frac{F(B)}{\pi(B)} \ge \frac{\pi_{k+1}}{6\pi(B)}
\end{align}
By $\pi(B) \le 1/2$, we have $\pi(\{k+1,\ldots,L\}) \ge 1/2$.
Together with $\pi(\{k+1,\ldots,L\}) \le L\pi_{k+1}$ we obtain
$\pi_{k+1} \ge 1/(2L)$. This yields the desired bound
$F(B)/\pi(B) \ge 1/(6L)$.
Now assume that $0\notin B$ and let $k$ be the smallest
such that $k \notin B$ and $k+1 \in B$.
It is easy to see that $\pi_k P(s)_{k,k+1} = \pi_{k+1} P(s)_{k+1,k}$.
Hence, using the same argument as before we can see that
Equation \ref{eq:kcase1} holds in this case too.
Since $B \subseteq \{k+1,\ldots,L\}$, we have $\pi(\{k+1,\ldots,L\}) \ge
\pi(B)$. Hence, $\pi_{k+1} \ge \pi(B)/L$. Again, this yields the bound
$F(B)/\pi(B) \ge 1/(6L)$.
\end{proof}
By Theorem~\ref{thm:conductance}, we have that the spectral gap
of $P(s)$ is larger than $1/(2\cdot (6)^2 \cdot L^2)$.
By Subsection~\ref{sec:markovham}, we have that $\Delta(H_{{\cal{S}}}\def\calA{{\cal{A}}_0})\ge
\mu/(2\cdot (6)^2 L^2)$. Finally, notice that $\mu = 1 - \lambda
\ge \frac{1}{2}$, because $\lambda\le
\bra{\gamma_0}H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)\ket{\gamma_0}=\frac{s}{2}\le \frac{1}{2}$.
\end{proof}
\subsubsection{Running Time}\label{sec:nonsense}
We now complete the proof of Theorem \ref{thm:5}.
Note that we have already proved something which is very close
to Theorem \ref{thm:5}.
\begin{claim}\label{cl:lookwhatwehavedone}
Given a quantum circuit on $n$ qubits with $L$ gates,
the adiabatic algorithm with $H_{\mathrm{init}}$ and $H_{\mathrm{final}}$ as
defined in the previous section,
with
$T=O(\epsilon^{-\delta}L^{4+2 \delta})$ for some fixed $\delta>0$,
outputs a final state that is within $\ell_2$-distance $\epsilon$ of
the history state of the circuit, $\ket{\eta}$.
The running time of the algorithm is $O(T\cdot L)$.
\end{claim}
\begin{proof}
Claim~\ref{cl:invariant} shows that ${\cal{S}}}\def\calA{{\cal{A}}_0$ is invariant under $H$.
Hence, as mentioned in Subsection~\ref{sec:model},
an adiabatic evolution according to $H$
is identical to an adiabatic evolution according to $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}$.
Using Lemma \ref{cl:spec} and Theorem \ref{thm:ad} (with $\|H_{\mathrm{init}}-H_{\mathrm{final}}\|=O(1)$),
we obtain that for $T$ as above the final state (with global phase adjusted appropriately) is indeed $\epsilon$-close in $\ell_2$-norm to $\ket{\eta}$.
By our definition, the running time of the adiabatic algorithm
is $O(T\cdot L)$ since $\|H(s)\|\le (1-s)\|H_{{\mathrm{init}}}\|+s\|H_{{\mathrm{final}}}\|=O(L+n)=O(L)$.
The last equality follows from $n=O(L)$, because each qubit is assumed
to participate in the computation (otherwise we can omit it).
\end{proof}
In fact, one might be satisfied with this claim, which enables
generating adiabatically a state
which is very close to $\ket{\eta}$, instead of our desired
$\ket{\alpha(L)}$.
To see why this might be sufficient to simulate quantum circuits,
suppose for a moment that $\epsilon$ is $0$, and the final
state is exactly $\ket{\eta}$.
As mentioned in the introduction, we can now
measure the clock qubits of
the history state, and with probability $1/L$ the outcome is $\ell=L$, which
means that the state of the first register
is the desired state $\ket{\alpha(L)}$.
If the measurement yields another value, we repeat the adiabatic algorithm
from scratch.
To get $\ell=L$ with sufficiently high probability,
we repeat the process $O(L)$ times, which introduces an overhead factor
of $L$. The above discussion
is also true with $\epsilon>0$, as long as it is much smaller than
$1/L$, the weight of $\ket{\alpha(L)}$ in $\ket{\eta}$.
However, this is not sufficient
to complete the proof of Theorem~\ref{thm:5}.
Indeed, the theorem as stated follows our definition of the
model of adiabatic computation, which
allows to perform one adiabatic evolution and then measure (and possibly trace out some qubits).
Classical postprocessing such as conditioning on $\ell$ being equal to
$L$, and repeating the computation if it is not, are not allowed.
Hence, we need to adiabatically generate a state that is close to
$\ket{\alpha(L)}$.
This technical issue can be resolved with the following
simple trick, which at the same time allows us to avoid the overhead factor
of $L$ introduced before. We simply
add another $O(\frac{1}{\epsilon}L)$
identity gates to the original quantum circuit at the end of
its computation and then apply the adiabatic simulation
to this modified circuit.
This modification increases the weight of $\ket{\alpha(L)}$ in the
history state.
The following lemma makes this precise.
\begin{lemma}\label{lem:out}
Assume we can transform any given quantum circuit with $L$ two-qubit
gates on $n$ qubits into a $k$-local adiabatic
computation on n+L $d$-dimensional particles whose output is $\epsilonilon$ close in $\ell_2$-norm to
the history state of the quantum circuit and whose running
time is $f(L,\epsilonilon)$ for some function $f$.
Then, we can transform any given quantum circuit
with $L$ two-qubit gates on $n$ qubits into a $k$-local
adiabatic computation on $n+2L/\epsilon$ $d$-dimensional particles
whose output (after
tracing out some ancilla qubits) is $\epsilonilon$ close in trace
distance to the final state of
the circuit and whose running time is $f(2L/\epsilon, \epsilon/2)$.
\end{lemma}
\begin{proof}
Given a quantum circuit on $n$ qubits with $L$ gates,
consider the circuit obtained by appending to it $(\frac{2}{\epsilonilon}-1)L$
identity gates. Let $L' = 2L/\epsilonilon$ be the number of
gates in the modified circuit and let
$\ket{\eta}$ denote its history state. By our assumption,
we can transform this modified circuit into an adiabatic
computation whose output
is $\epsilon/2$ close in $\ell_2$-norm to $\ket{\eta}$ and whose
running time is $f(L', \epsilon/2)$.
Since the trace distance between two pure states is bounded from
above by the $\ell_2$-distance (see, e.g.,
\cite{computingmixedstates}), we obtain that the output of the
adiabatic computation
is also $\epsilon/2$ close in trace distance to $\ketbra{\eta}{\eta}$.
In addition, it is easy to check that after
we trace out the clock qubits from $\ket{\eta}$, we are left with a
state that is $\epsilon/2$ close
in trace distance to the final state of the circuit. We complete
the proof by applying the triangle inequality.
\end{proof}
We can now apply this lemma on the result of
Claim \ref{cl:lookwhatwehavedone}.
This completes the proof
of Theorem \ref{thm:5}, with the running time being
$O(\epsilon^{-(5+3\delta)}L^{5+2 \delta})$.
\subsubsection{Spectral Gap}\label{ssec:spectralgap}
In the previous subsections, we presented a Hamiltonian $H(s)$ and showed that
inside a preserved subspace ${\cal{S}}}\def\calA{{\cal{A}}_0$ it has a non-negligible spectral gap.
This was enough for the adiabatic algorithm since the entire
adiabatic evolution is performed inside this subspace.
In this subsection, we show that the spectral gap of $H(s)$
in the entire Hilbert space is also non-negligible.
The purpose of this result is twofold. First,
the existence of a non-negligible spectral gap
in the entire Hilbert space
might have some relevance when dealing with adiabatic
computation in the presence of noise (see, e.g., \cite{preskill}).
Second, the techniques that we use here, and in particular Lemma \ref{lem:gapinsides},
are used again in the next subsection, but are easier to present in this
simpler context.
\begin{lemma}
For all $0\le s\le 1$, $\Delta(H(s))=\Omega(L^{-3})$.
\end{lemma}
\begin{proof}
Let ${\cal{S}}}\def\calA{{\cal{A}}$ be the subspace of dimension $(L+1) \cdot 2^n$ spanned by all legal clock states.
Observe that ${\cal{S}}}\def\calA{{\cal{A}}$ is preserved by $H(s)$, i.e., $H(s)({\cal{S}}}\def\calA{{\cal{A}})\subseteq {\cal{S}}}\def\calA{{\cal{A}}$.
Hence, the eigenstates of $H(s)$ belong either to ${\cal{S}}}\def\calA{{\cal{A}}$ or to
its orthogonal subspace ${\cal{S}}}\def\calA{{\cal{A}}^\perp$. We can therefore analyze the spectrum
of $H_{\cal{S}}}\def\calA{{\cal{A}}(s)$ and of $H_{{\cal{S}}}\def\calA{{\cal{A}}^\perp}(s)$ separately.
First, due to the term $H_{{\mathrm{clock}}}$ and the fact that all
other terms are positive semidefinite, the ground energy
of $H_{{\cal{S}}}\def\calA{{\cal{A}}^\perp}(s)$ is at least $1$. Second, as we will show next using
Lemma~\ref{lem:gapinsides}, the spectral gap of $H_{\cal{S}}}\def\calA{{\cal{A}}(s)$
is $\Omega(L^{-3})$. To establish the same spectral gap for $H(s)$,
it is enough to show that the ground energy of $H_{{\cal{S}}}\def\calA{{\cal{A}}}(s)$
is smaller than $\frac{1}{2}$, which would mean that the spectral
gap of $H(s)$ is exactly that of $H_{{\cal{S}}}\def\calA{{\cal{A}}}(s)$. Indeed,
observe that
$$\bra{\gamma_0} H_{{\cal{S}}}\def\calA{{\cal{A}}}(s) \ket{\gamma_0}= \bra{\gamma_0} H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s) \ket{\gamma_0}
= s/2 \le 1/2$$
where the first equality holds because $\ket{\gamma_0} \in {\cal{S}}}\def\calA{{\cal{A}}_0$ and the second
follows from Equations \ref{eq:h_initial_s0} and \ref{eq:h_final_s0}.
Therefore, the smallest eigenvalue of $H_{{\cal{S}}}\def\calA{{\cal{A}}}(s)$ is bounded from above
by $1/2$.
\end{proof}
\begin{lemma}\label{lem:gapinsides}
Let ${\cal{S}}}\def\calA{{\cal{A}}$ denote the subspace spanned by all legal clock states.
Then the ground state of $H_{{\cal{S}}}\def\calA{{\cal{A}}}(0)$ is $\ket{\gamma_0}$, and that
of $H_{{\cal{S}}}\def\calA{{\cal{A}}}(1)$ is $\ket{\eta}$.
Moreover, for all $0\le s\le 1$, $\Delta(H_{\cal{S}}}\def\calA{{\cal{A}}(s))=\Omega(L^{-3})$.
\end{lemma}
\begin{proof}
We can write ${\cal{S}}}\def\calA{{\cal{A}}$ as the direct sum of $2^n$ orthogonal
subspaces ${\cal{S}}}\def\calA{{\cal{A}}_0,{\cal{S}}}\def\calA{{\cal{A}}_1,\ldots,{\cal{S}}}\def\calA{{\cal{A}}_{2^n-1}$, defined as follows.
For $0\le j\le 2^n-1$ and $0\le \ell\le L$ define
$\ket{\gamma^j_\ell}:=\ket{\alpha^j(\ell)} \otimes \ket{1^\ell 0^{L-\ell}}$,
where $\ket{\alpha^j(\ell)}$ is the state of the quantum circuit at time $\ell$ if
the input state corresponds to the binary representation $j$.
Note that $\ket{\gamma^0_\ell}=\ket{\gamma_\ell}$.
The space ${\cal{S}}}\def\calA{{\cal{A}}_j$ is spanned by $\{\ket{\gamma^j_0},\ldots,\ket{\gamma^j_L}\}$.
It is easy to check the following claim (see Figure~\ref{fig:block}).
\begin{claim}\label{cl:blocks}
The Hamiltonian $H_{\cal{S}}}\def\calA{{\cal{A}}(s)$ is block diagonal in the ${\cal{S}}}\def\calA{{\cal{A}}_j$'s.
\end{claim}
\begin{figure}
\caption{$H_{{\cal{S}
\label{fig:block}
\end{figure}
By Claims~\ref{cl:gs}, \ref{cl:gs2}, \ref{cl:spec}, and \ref{cl:blocks},
it suffices to argue that the ground energy of $H_{{\cal{S}}}\def\calA{{\cal{A}}_j}(s)$
for any $j\ne 0$ is larger than the ground energy of
$H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$ by at least $\Omega(1/L^3)$.
Essentially, this follows from the penalty given by the term $H_{{\mathrm{input}}}$
to nonzero input states. The proof, however, is
slightly subtle since $H_{{\mathrm{input}}}$ assigns a penalty only to
states $\ket{\gamma_\ell^j}$ with $\ell=0$.
Notice that
$$H_{{\cal{S}}}\def\calA{{\cal{A}}_j}(s)=H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)+H_{{\cal{S}}}\def\calA{{\cal{A}}_j,{\mathrm{input}}}.$$
Moreover, for $1\le j\le 2^n-1$, $H_{{\cal{S}}}\def\calA{{\cal{A}}_j,{\mathrm{input}}}$ is diagonal, with
its top-left element at least $1$ (it actually equals
the number of $1$'s in the binary representation of $j$)
and all other diagonal elements zero. Hence, if we define
$M$ as
\begin{eqnarray*}
M &:=& \left(
\begin{array}{cccc}
1 & 0 & \ldots & 0 \\
0 & 0 & \ldots & 0 \\
\vdots & \vdots & \ddots & \vdots \\
0 & 0 & \ldots & 0 \\
\end{array}
\right)
\end{eqnarray*}
then $H_{{\cal{S}}}\def\calA{{\cal{A}}_j,{\mathrm{input}}}-M$ is positive definite and therefore
we can lower bound the ground energy of $H_{{\cal{S}}}\def\calA{{\cal{A}}_j}(s)$ with the ground
energy of $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)+M$.
For this, we apply the following geometrical lemma by Kitaev (Lemma 14.4 in
\cite{Kitaev:book}).
\begin{lemma}
Let $H_1,H_2$ be two Hamiltonians with
ground energies $a_1,a_2$, respectively.
Suppose that for both Hamiltonians the difference between the energy of the (possibly degenerate) ground space and the next highest eigenvalue is
larger than $\Lambda$,
and that the angle between the two ground spaces is $\theta$. Then
the ground energy of $H_1+H_2$ is at least
$a_1+a_2+2\Lambda \sin^2(\theta/2)$.
\end{lemma}
\dnote{define angle between subspaces}
We now apply this lemma to $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$ and $M$. By Claim~\ref{cl:spec}, the spectral gap of $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$ is
$\Omega(1/L^2)$. The spectral gap of $M$ is clearly $1$. Moreover, using Claim \ref{claim:mono}, we obtain that the
angle between the two ground spaces satisfies $\cos(\theta)\le 1-1/L$ by the monotonicity property of the ground state
of $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$ (see Claim \ref{claim:mono}). It follows that the ground energy of $H_{{\cal{S}}}\def\calA{{\cal{A}}_j}(s)$ is higher by at
least $\Omega(1/L^3)$ than that of $H_{{\cal{S}}}\def\calA{{\cal{A}}_0}(s)$.
\end{proof}
\begin{remark}
Notice that we only used the following properties of $H_{\mathrm{input}}$: its restriction
to ${\cal{S}}}\def\calA{{\cal{A}}_0$ is $0$ and its restriction to ${\cal{S}}}\def\calA{{\cal{A}}_j$ for any $j\neq 0$ is a diagonal
matrix in the basis $\ket{\gamma^j_0},\ldots,\ket{\gamma^j_L}$ whose top-left entry is at least $1$ and all other entries are non-negative.
This observation will be useful in Section \ref{sec:geometric}.
\end{remark}
\subsection{Three-local Hamiltonian}\label{sec:three}
We now show that adiabatic computation with $3$-local Hamiltonians
is sufficient to simulate standard quantum computations.
\begin{theorem}\label{thm:3}
Given a quantum circuit on $n$ qubits with $L$ two-qubit gates implementing
a unitary $U$, and $\epsilon>0$, there exists
a $3$-local adiabatic computation $AC(n+L,2,H_{\mathrm{init}},H_{\mathrm{final}},\epsilon)$
whose running time is
${\mathrm{poly}}(L,\frac{1}{\epsilonilon})$ and whose output state is
$\epsilon$-close (in trace distance) to $U \ket{0^n}$.
Moreover, $H_{\mathrm{init}}$ and $H_{\mathrm{final}}$ can be computed by a polynomial
time Turing machine.
\end{theorem}
The proof of this theorem builds on techniques developed in previous
subsections.
The techniques developed in this section will be used in
Section~\ref{sec:geometric}.
\subsubsection{The Hamiltonian}
Consider the Hamiltonian constructed in Subsection \ref{sec:ham}.
Notice that all terms except $H_\ell$ are already $3$-local (some are even $2$-local or $1$-local).
In order to obtain a $3$-local Hamiltonian, we remove two clock qubits from the $5$-local
terms in $H_\ell$ and leave only the $\ell$th clock qubit. More precisely,
for $1< \ell < L$ define
\begin{eqnarray*}
H'_\ell &:=&
I\otimes \ketbra{100}{100}^c_{\ell-1,\ell,\ell+1}- U_\ell\otimes\ketbra{1}{0}^c_{\ell}
- U_\ell^\dag \otimes \ketbra{0}{1}^c_{\ell}
+ I\otimes\ketbra{110}{110}^c_{\ell-1, \ell,\ell+1}.
\end{eqnarray*}
For the boundary cases $l=1,L$ we define
\begin{align*}
H'_1 &:=
I\otimes \ketbra{00}{00}^c_{1,2}- U_1\otimes\ketbra{1}{0}^c_{1}
- U_1^\dag \otimes \ketbra{0}{1}^c_{1}
+ I\otimes\ketbra{10}{10}^c_{1,2} \\
H'_L &:=
I\otimes \ketbra{10}{10}^c_{L-1,L}- U_\ell\otimes\ketbra{1}{0}^c_{L}
- U_L^\dag \otimes \ketbra{0}{1}^c_{L}
+ I\otimes\ketbra{11}{11}^c_{L-1,L}.
\end{align*}
Note that because of the terms $\ketbra{1}{0}^c$ and $\ketbra{0}{1}^c$,
these Hamiltonians no longer leave the subspace ${\cal{S}}}\def\calA{{\cal{A}}$ invariant.
To mend this, we assign a much larger
energy penalty to illegal clock states. As we will see soon, this
makes the lower part of the spectrum of our Hamiltonians behave essentially
like in their restriction to ${\cal{S}}}\def\calA{{\cal{A}}$.
Set $J=\epsilon^{-2} L^6$ and define\onote{this can also be taken to be $c \epsilon^{-2} L^5$ for some large
enough constant}
\begin{eqnarray*}
H'_{{\mathrm{init}}}&:= & H_{{\mathrm{clock}}init} + H_{{\mathrm{input}}}+J \cdot H_{\mathrm{clock}} \\
H'_{{\mathrm{final}}} &:= &\frac{1}{2}\sum_{\ell=1}^L{H'_\ell}+H_{{\mathrm{input}}}+J \cdot H_{\mathrm{clock}}.
\end{eqnarray*}
The Hamiltonian we use here is thus
$$H'(s)=(1-s)H'_{{\mathrm{init}}}+sH'_{{\mathrm{final}}}.$$
Essentially the same proof as that of Claim \ref{cl:gs} shows that $\ket{\gamma_0}$ is a ground state of $H'_{{\mathrm{init}}}$.
However, it turns out that $\ket{\eta}$ is no longer a ground state of $H'_{{\mathrm{final}}}$ (the proof of Claim \ref{cl:gs2}
does not apply since $H'_\ell$ is no longer positive semidefinite). However, as we shall see later, $\ket{\eta}$ is
very close to the ground state of $H'_{{\mathrm{final}}}$.
\subsubsection{The Spectral Gap}\label{sec:gapthree}
Our first claim is that, when restricted to ${\cal{S}}}\def\calA{{\cal{A}}$, $H'$ and
$H$ are identical.
\begin{claim}\label{clm:h_hprime_same}
For any $0\le s\le 1$, $H_{\cal{S}}}\def\calA{{\cal{A}}(s) = H'_{\cal{S}}}\def\calA{{\cal{A}}(s)$.
\end{claim}
\begin{proof}
Let $\Pi_{\cal{S}}}\def\calA{{\cal{A}}$ be the orthogonal projection on ${\cal{S}}}\def\calA{{\cal{A}}$. Then our goal is
to show that $\Pi_{\cal{S}}}\def\calA{{\cal{A}} H(s) \Pi_{\cal{S}}}\def\calA{{\cal{A}} = \Pi_{\cal{S}}}\def\calA{{\cal{A}} H'(s) \Pi_{\cal{S}}}\def\calA{{\cal{A}}$.
The only difference between $H(s)$ and $H'(s)$ is the factor of $J$ in $H_{\mathrm{clock}}$, and that the $H_\ell$ terms are
replaced by $H'_\ell$. We note that $H_{{\cal{S}}}\def\calA{{\cal{A}},{\mathrm{clock}}}$ is zero.
Hence, it suffices to show that for all $1 \le \ell \le L$,
$$ \Pi_{\cal{S}}}\def\calA{{\cal{A}} H_\ell \Pi_{\cal{S}}}\def\calA{{\cal{A}} = \Pi_{\cal{S}}}\def\calA{{\cal{A}} H'_\ell \Pi_{\cal{S}}}\def\calA{{\cal{A}}.$$
For this, observe that for any $1 < \ell < L$,
$$ \Pi_{\cal{S}}}\def\calA{{\cal{A}} \ketbra{1}{0}^c_{\ell} \Pi_{\cal{S}}}\def\calA{{\cal{A}} =
\ketbra{1^\ell 0^{L-\ell}}{1^{\ell-1} 0^{L-(\ell-1)}}^c
= \Pi_{\cal{S}}}\def\calA{{\cal{A}} \ketbra{110}{100}^c_{\ell-1,\ell,\ell+1} \Pi_{\cal{S}}}\def\calA{{\cal{A}}$$
and similarly for $\ketbra{0}{1}^c_{\ell}$. A similar statement holds for $\ell=1,L$
with the right hand term modified appropriately.
\end{proof}
Lemma~\ref{lem:gapinsides} and Claim~\ref{clm:h_hprime_same}
imply that $\Delta(H'_{\cal{S}}}\def\calA{{\cal{A}}(s))=\Omega(L^{-3})$.
We now want to deduce from this a lower bound on
$\Delta(H'(s))$, without the restriction to ${\cal{S}}}\def\calA{{\cal{A}}$.
For this we use the following claim.
Essentially, it says that if $J$ is large enough,
then the lower part of the spectrum of $H'(s)$ is
similar to that of $H'_{\cal{S}}}\def\calA{{\cal{A}}(s)$. More precisely, it shows
that the lowest eigenvalues, the second lowest eigenvalues,
and the ground states of the two Hamiltonians are close.
Intuitively, this holds since the energy penalty given to
states in ${\cal{S}}}\def\calA{{\cal{A}}^\perp$, the orthogonal space to ${\cal{S}}}\def\calA{{\cal{A}}$, is very high
and hence any eigenvector with low eigenvalue must be almost orthogonal
to ${\cal{S}}}\def\calA{{\cal{A}}^\perp$ (and hence almost inside ${\cal{S}}}\def\calA{{\cal{A}}$).
We note that a similar lemma was used in
\cite{Kempe:03a} in the context of quantum ${\sf{NP}}$-complete problems.
\begin{lemma}\label{le:leak}
Let $H=H_1+H_2$ be the sum of two Hamiltonians operating on some
Hilbert space ${\cal{H}}=\cal S+\cal S^\perp$. The Hamiltonian $H_2$ is
such that $\cal S$ is a zero eigenspace and the eigenvectors in $\cal
S^\perp$ have eigenvalue at least $J> 2K$ where $K=\|H_1\|$. Let $a$
and $b$ be the lowest and the second lowest eigenvalues of $H_{\cal{S}}}\def\calA{{\cal{A}}$
and let $a'$ and $b'$ be the corresponding quantities for $H$. Then
the lowest eigenvalue of $H$ satisfies $a - \frac{K^2}{J-2K} \le a'
\le a$ and the second lowest eigenvalue of $H$ satisfies $b'\ge b-
\frac{K^2}{J-2K}$. If, moreover, $b>a$ then the ground states
$\ket{\xi},\ket{\xi'}$ of $H_{\cal{S}}}\def\calA{{\cal{A}},H$ respectively satisfy
\begin{eqnarray*}
|\langle \xi | \xi' \rangle | ^2
& \ge& 1- \frac{K^2}{(b-a)(J-2K)}.
\end{eqnarray*}
\end{lemma}
\begin{proof}
First, we show that $a'\le a$. Using $H_2|\xi \ra = 0$,
\begin{eqnarray*}
\bra{\xi}H \ket{\xi}&=&\bra{\xi}H_1 \ket{\xi}+\bra{\xi}H_2 \ket{\xi}
~=~ a
\end{eqnarray*}
and hence $H$ must have an eigenvector of eigenvalue at most $a$.
We now show the lower bound on $a'$. We can write any unit
vector $\ket{v} \in {\cal{H}}$ as $\ket{v}=\alpha_1
\ket{v_1}+\alpha_2 \ket{v_2}$ where $\ket{v_1} \in \cal S$ and $\ket{v_2} \in {\cal{S}}}\def\calA{{\cal{A}}^\perp$ are two unit vectors and
$\alpha_1,\alpha_2$ are two non-negative reals satisfying $\alpha_1^2+\alpha_2^2=1$. Then we have,
\begin{eqnarray*}\label{eq:projection}
\bra{v} H \ket{v}
&\geq& \bra{v} H_1 \ket{v} + J \alpha_2^2 \nonumber \\
&=& (1-\alpha_2^2) \bra{v_1}H_1 \ket{v_1} + 2 \alpha_1 \alpha_2 {\rm Re} \bra{v_1} H_1
\ket{v_2}+\alpha_2^2 \bra{v_2} H_1 \ket{v_2} + J \alpha_2^2 \nonumber \\
&\geq& \bra{v_1}H_1 \ket{v_1} - K \alpha_2^2 - 2 K \alpha_2
- K \alpha_2^2 + J \alpha_2^2 \nonumber \\
&=& \bra{v_1}H_1 \ket{v_1} + (J-2K) \alpha_2^2 - 2 K \alpha_2
\end{eqnarray*}
where we used $\alpha_1^2 = 1-\alpha_2^2$ and $\alpha_1 \le 1$. Since $(J-2K) \alpha_2^2 - 2 K \alpha_2$ is minimized
for $\alpha_2=K/(J-2K)$, we have
\begin{eqnarray}\label{eq:projection2}
\bra{v} H \ket{v} &\geq& \bra{v_1}H_1 \ket{v_1} - \frac{K^2}{J-2K}.
\end{eqnarray}
We obtain the required lower bound by noting that $\bra{v_1}H_1 \ket{v_1} \ge a$.
Consider now the two-dimensional space ${\cal L}$ spanned by the two eigenvectors of $H$ corresponding to $a'$ and
$b'$. For any unit vector $\ket{v} \in {\cal L}$ we have $\bra{v}H\ket{v} \le b'$. Hence, if ${\cal L}$ contains a
vector $\ket{v}$ orthogonal to ${\cal{S}}}\def\calA{{\cal{A}}$, then we have $b' \ge \bra{v}H\ket{v} \ge J-K > K \ge b$ and we are done.
Otherwise, the projection of ${\cal L}$ on ${\cal{S}}}\def\calA{{\cal{A}}$ must be a two-dimensional space. Being two-dimensional, this space
must contain a vector orthogonal to $\ket{\xi}$.
Let $\ket{v}$ be a vector in ${\cal L}$ whose projection on ${\cal{S}}}\def\calA{{\cal{A}}$
is orthogonal to $\ket{\xi}$. By \eqref{eq:projection2},
$b' \ge \bra{v}H\ket{v} \ge b - \frac{K^2}{J-2K}$, as
required.
Finally, let $\beta = |\langle \xi | \xi' \rangle | ^2$.
Then we can write $\ket{\xi} =
\sqrt{\beta}\ket{\xi'}+\sqrt{1-\beta}\ket{\xi'^\perp}$ for some
unit vector $\ket{\xi'^\perp}$ orthogonal to
$\ket{\xi'}$. Since $\ket{\xi'}$ is an eigenvector of $H$,
\begin{eqnarray*}
a ~=~ \bra{\xi}H\ket{\xi} &=& \beta \bra{\xi'}H\ket{\xi'} +
(1-\beta)\bra{\xi'^\perp}H\ket{\xi'^\perp}\\
& \ge &\beta a' + (1-\beta) b' \\
&\ge& \beta \Big(a - \frac{K^2}{J-2K}\Big) +
(1-\beta)\Big(b - \frac{K^2}{J-2K}\Big) \\
&= &a + (1-\beta)(b-a) - \frac{K^2}{J-2K}.
\end{eqnarray*}
Rearranging, we obtain the required bound.
\end{proof}
We can now bound the spectral gap of $H'(s)$.
\begin{lemma}\label{lm:spec3}
For all $0\le s\le 1$, $\Delta(H'(s))=\Omega(L^{-3})$.
\end{lemma}
\begin{proof}
We apply Lemma~\ref{le:leak} by setting $H_2=J\cdot H_{clock}$ and $H_1$ to be the remaining terms such that
$H'(s)=H_1+H_2$. Note that Lemma~\ref{le:leak} implies that the spectral gap of $H'(s)$ is smaller than that of
$H'_{{\cal{S}}}\def\calA{{\cal{A}}}(s)$ (which is $\Omega(1/L^3)$ by Lemma~\ref{lem:gapinsides}) by at most $K^2/(J-2K)$. But it is easy to see
that $K=O(L)$, due to the fact that $H_1$ consists of $O(L)$ terms, each of constant norm.
The result follows since $J =\epsilon^{-2}L^6$.
\end{proof}
This shows the desired bound on the spectral gap.
Before we complete the proof, we must show that the final ground state
is close to the history state.
\begin{lemma}\label{lm:gs_close}
The ground state of
$H'(1)$ is $\epsilon$-close to $\ket{\eta}$.
\end{lemma}
\begin{proof}
Apply Lemma~\ref{le:leak} as in the proof
of Lemma \ref{lm:spec3}, for the case
$s=1$. We obtain that the inner product squared between the ground state of $H'(1)$ and $\ket{\eta}$, is at least
$1-\delta$, with $\delta= \frac{K^2}{(b-a)(J-2K)}=O(L^{-1}\epsilon^2)$,
where we have used $K=O(L)$, $J=\epsilon^{-2} L^{6}$, and $b - a
=\Omega(1/L^3)$ by Lemma~\ref{lem:gapinsides}.
This implies that the $\ell_2$-distance between the ground state of
$H'(1)$ and $\ket{\eta}$ is $O(\epsilon/\sqrt{L}) \leq \epsilon$.
\end{proof}
We now complete the proof of Theorem~\ref{thm:3}.
The adiabatic algorithm starts with $\ket{\gamma_0}$ and evolves according
to $H'(s)$ for $T=\theta(\epsilon^{-\delta}L^{7+3\delta})$.
Such a $T$ satisfies the adiabatic condition
(Equation \ref{eq:adiabatic_cond}), using $\|H'_{{\mathrm{final}}}-H'_{{\mathrm{init}}}\|=O(L)$.
By Theorem \ref{thm:ad} the final state is
$\epsilon$-close in $\ell_2$-distance to the ground state of $H'_{\mathrm{final}}$.
Lemma \ref{lm:gs_close} implies that this state is $\epsilon$-close in
$\ell_2$-distance to $\ket{\eta}$.
Using the triangle inequality we note that the output of the
adiabatic computation is $2\epsilon$-close to $\ket{\eta}$.
The running time of this algorithm is $O(T \cdot J \cdot L)=O(T\cdot \epsilon^{-2}L^7)=
O(\epsilon^{-(2+\delta)}L^{14+3\delta})$.
We can now apply Lemma \ref{lem:out} to obtain a modified
adiabatic computation whose output state after
tracing out the clock qubits is $\epsilon$-close in trace
distance to $U|0^n\ra$. The running time is
$O(\epsilon^{-(16+4\delta)}L^{14+3 \delta})$ for any fixed $\delta>0$.
\section{Two Local Hamiltonians on a Two-Dimensional Lattice}\label{sec:geometric}
In this section we prove Theorem~\ref{thm:geo}.
We simulate a given quantum circuit by an adiabatic evolution of a system
of $6$-dimensional quantum particles arranged on a two-dimensional grid.
More precisely, we prove the following theorem:
\begin{theorem}\label{thm:6level}
Given a quantum circuit on $n$ qubits with $L$ two-qubit gates
implementing a unitary $U$, and $\epsilonilon>0$, there exists a $2$-local adiabatic computation
$AC({\mathrm{poly}}(n,L),6,H_{\mathrm{init}},H_{\mathrm{final}},\epsilon)$ such that $H_{\mathrm{init}}$
and $H_{\mathrm{final}}$ involve only nearest neighbors on a $2$-dimensional grid,
its running time is ${\mathrm{poly}}(L,\frac{1}{\epsilonilon})$,
and its output (after performing a partial measurement on each particle)
is $\epsilon$-close (in trace distance) to
$U \ket{0^n}$. Moreover,
$H_{\mathrm{init}}$ and $H_{\mathrm{final}}$ can be computed by a polynomial time Turing
machine.
\end{theorem}
As mentioned in the introduction, the main problem
in proving this theorem, and more precisely, in
moving to a two dimensional grid, is the notion of a clock.
In the constructions of the previous section,
the clock is represented by an additional
register that counts the clock steps in unary
representation.
The terms $H_\ell$, which check the correct propagation in the $\ell$th time step,
interact between the $\ell$th qubit of the clock and
the corresponding qubits on which $U_\ell$ operates. If we want
to restrict the interaction to nearest neighbors in two dimensions using this
idea, then no matter how the clock qubits are arranged on the grid, we run into problems interacting the qubits with the corresponding clock qubits in a local way.
The solution to this problem lies in the way we represent the clock.
Instead of using an extra register, we embed the clock
into the same particles that perform the computation
by defining the notion of a {\em shape} of a state, to be defined later.
We then create a sequence of legal shapes, and show how states can evolve
from one legal shape to another.
Although the construction of this section is more involved than the ones of
the previous section, its analysis follows almost immediately from the analysis
carried out in Theorem \ref{thm:3}. To achieve this, we make sure that
the Hamiltonians and some relevant subspaces are as similar as possible
to those in the previous section.
\subsection{Assumptions on the Input Circuit}\label{sec:layout}
To simplify the construction of our adiabatic evolution, we first
assume without loss of generality that
the quantum circuit we wish to simulate has a particular layout of its
gates.
Namely, it consists of $R$ rounds, where each round is
composed of $n$ nearest neighbor
gates (some can be the identity gate), followed by $n$ identity gates, as in Figure \ref{figure:circuit}.
More specifically, the first gate in each round is a one-qubit gate applied to the
first qubit. For $i=2,\dots,n$, the $i$th gate is a two-qubit gate
applied to qubits $i-1$ and $i$. For $i=n+1, \ldots, 2n$ the $i$th gate is an identity gate applied to
the $(2n+1-i)$th qubit. These identity gates are included for convenience of notation.
Any circuit can be transformed to such a form by introducing extra identity
and swap gates. Let $L=2nR$ be the total number of gates in the circuit so obtained.
Clearly, $L$ is at most polynomially larger than the number of gates in the original circuit.
\begin{figure}
\caption{The modified circuit with $R=3$.}
\label{figure:circuit}
\end{figure}
\subsection{The Particles of the Adiabatic Quantum System}
The adiabatic computation is performed on $6$-dimensional particles,
arranged on a two-dimensional square lattice with $n$ rows and $R+1$
columns. We number the rows from $1$ (top) to $n$ (bottom) and
the columns from $0$ (left) to $R$ (right).
Columns number $0$ and $1$ are used to simulate the first round of the circuit.
Columns number $1$ and $2$ are used for the second round of computation,
and so on. We denote the six internal states of a particle by $\ket{{\bigcirc}},
\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}},\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}},
\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}},\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}$, and $\ket{\stated}$.
These six states are divided into
four {\em phases}: the \emph{unborn phase} $\ket{{\bigcirc}}$,
the {\em first phase} $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}},\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}$, the {\em second phase}
$\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}},\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}$, and the {\em dead phase} $\ket{\stated}$.
The two states in the first phase and
the two states in the second phase correspond
to computational degrees of freedom, namely to
the $\ket{0}$ and $\ket{1}$ states of a qubit. We write
$\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}$ to denote an arbitrary state in the subspace
spanned by $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}$ and $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}$.
Similarly, $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}}$ denotes a state in the space
spanned by $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}$ and $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}$.
The phases are used to define the {\em shape} of the basis states.
A shape of a basis state is simply an assignment of
one of the four phases to each particle, ignoring the
computational degrees of freedom inside the first and second phase.
These shapes will be used instead of the clock states of the
previous section.
\subsection{Geometrical Clock}
We now describe the way we represent clock using shapes. In the previous
constructions, the space ${\cal{S}}}\def\calA{{\cal{A}}$ of dimension $2^n(L+1)$ was the
ground space of the clock, i.e., the space spanned by legal clock
states. Inside the clock register there were $L+1$ legal clock states.
Note that each such clock state can be
described, essentially, in a geometric way by the ``shape'' of the clock
particles: how many $1$'s precede how many $0$'s.
We now describe the corresponding
subspaces involved in our construction for the
two dimensional case.
For each $0\le \ell \le L$, we have a $2^n$-dimensional subspace
corresponding to that clock state. Each of these $L+1$ subspaces
can be described
by its {\em shape}, that is, a setting of one of the four phases to
each particle.
See Figure~\ref{figure:states} for an illustration
with $n = 6, R=6$. The six shapes shown correspond to clock states
$\ell=0, \ell=4n, \ell=4n+3, \ell=5n+2, \ell=6n$,
and $\ell=2nR$ respectively. Notice that each shape
has exactly $n$ particles in the first or
second phase. Hence, the dimension of the subspace induced
by each shape is $2^n$.
As $\ell$ goes from $0$ to $L$, the shape changes from that
shown in Fig.~\ref{figure:states}a to that shown
in Fig.~\ref{figure:states}. The locations at which
the changes occur form a snake-like pattern winding
down and up the lattice, following the layout of the gates in the input circuit Fig.~\ref{figure:circuit}.
\begin{figure}
\caption{Legal clock states}
\label{figure:states}
\end{figure}
We now describe the legal shapes more formally.
\begin{enumerate} \item The shape
corresponding to clock state
$\ell=2nr+k$ for $0\le k\le n$ has its $r$ leftmost columns
in the dead phase. The top $k$ particles in the $r+1$st column
are in their second phase while the bottom $n-k$ are in the
first phase. Particles in the remaining $R-r$ columns are all
in the unborn phase.
\item The shape corresponding to clock state
$\ell=2nr+n+k$ for $1\le k\le n-1$ has, as before, its
$r$ leftmost columns in the dead phase. The $r+1$st column
has its $n-k$ topmost particles in the second phase,
and its remaining $k$ particles in the dead phase.
The $r+2$nd column has its $n-k$ topmost particles in the
unborn phase and its remaining $k$ particles in the
first phase. All remaining particles are in the unborn
phase.
\end{enumerate}
\begin{figure}
\caption{The initial state}
\label{figure:firststate}
\end{figure}
The subspace ${\cal{S}}}\def\calA{{\cal{A}}$ is defined as the $(L+1)2^n$-dimensional
space spanned by all legal shapes.
As in previous sections we partition ${\cal{S}}}\def\calA{{\cal{A}}$ into $2^n$ subspaces ${\cal{S}}}\def\calA{{\cal{A}}_j$.
Each subspace ${\cal{S}}}\def\calA{{\cal{A}}_j$ is spanned by $L+1$ orthogonal
states $\ket{\gamma^j_0},\ldots,\ket{\gamma^j_L}$, defined as
follows. For each $0\le \ell\le L$ and $0\le j\le 2^n-1$,
the shape of $\ket{\gamma^j_\ell}$ corresponds to $\ell$.
The state of the $n$ active particles (i.e., those in
either the first or the second phase), when read from top to bottom,
corresponds to the state of the circuit after the first $\ell$ gates are
applied to an initial state corresponding to the binary representation
of $j$; i.e., it corresponds to the state $U_\ell \cdot U_{\ell-1}\cdots U_1|j\ra$.
More precisely, these particles are in a superposition obtained by
mapping this state to the state of the $n$ active particles
in the following way: $\ket{0}$ to ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}$
(or ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}$ for a second phase particle)
and $\ket{1}$ to ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}$ (or ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}$ for a second phase particle).
We often denote $\ket{\gamma_\ell^0}$, which corresponds to the
all $0$ input, by $\ket{\gamma_\ell}$.
For example, $\ket{\gamma_0}$ is shown in
Figure \ref{figure:firststate}.
With the risk of being somewhat redundant, let us now give
an alternative description of the states
$\ket{\gamma_0^j},\ldots,\ket{\gamma_L^j}$. This description
is more helpful in understanding the Hamiltonians $H''_\ell$ which
we will define shortly.
Consider a state $\ket{\gamma^j_\ell}$ for some $\ell=2rn$.
The $n$ particles in the $r$th column are in their first phase
and their computational degrees of freedom correspond to the
state of the circuit's qubits at the beginning of the $r$th round.
Particles to the left of this column are dead, those to the right
of this column are unborn. The state $\ket{\gamma^j_{\ell+1}}$
is obtained from $\ket{\gamma^j_{\ell}}$ by changing
the topmost particle in the $r$th column to a second phase
particle and applying the first gate in the $r$th round (a one-qubit gate)
to its computational degrees of freedom.
Next, the state $\ket{\gamma^j_{\ell+2}}$
is obtained from $\ket{\gamma^j_{\ell+1}}$ by changing
the second particle from above in the $r$th column to a second phase
particle and applying the second gate in the $r$th round (a two-qubit gate)
to both this particle and the one on top of it.
We continue in a similar fashion until we reach
$\ket{\gamma^j_{\ell+n}}$, in which the entire
$r$th column is in the second phase.
We refer to these steps as the downward stage.
Next, let us describe the upward stage.
The state $\ket{\gamma^j_{\ell+n+1}}$ is obtained from $\ket{\gamma^j_{\ell+n}}$
by `moving' the bottommost particle in the $r$th column one location
to the right. More precisely, the bottommost particle changes to the dead phase
and the one to the right of it changes to the first phase. The computational
degrees of freedom are the same in both states. This corresponds to the
fact that the $n+1$st gate in a round of the circuit is the identity gate.\footnote{We
could allow arbitrary one-qubit gates here instead of identity gates.
This leads to a slightly more efficient construction but also to more cumbersome
Hamiltonians.}
Continuing in a similar fashion, we see that the upwards stage ends in the
state $\ket{\gamma^j_{\ell+n+n}} = \ket{\gamma^j_{2(r+1)n}}$
that matches the above description of the first state in a round.
\subsection{The Hamiltonian}
The initial and final Hamiltonians are defined as
\begin{eqnarray*}
H''_{{\mathrm{init}}} &:=& H''_{{\mathrm{clock}}init}
~+~ H''_{{\mathrm{input}}} ~+~ J \cdot H''_{{\mathrm{clock}}}\\
H''_{{\mathrm{final}}} &:=& \frac{1}{2}\sum_{\ell=1}^L{H''_\ell}
+ H''_{{\mathrm{input}}} +
J \cdot H''_{{\mathrm{clock}}},
\end{eqnarray*}
where $J=\epsilon^{-2}\cdot L^6$.
These Hamiltonians are chosen to be as similar as possible to the
corresponding Hamiltonians in previous sections.
For example, $H''_{\mathrm{clock}}$ has as its ground space
the space of legal clock states, ${\cal{S}}}\def\calA{{\cal{A}}$. As before, it
allows us to essentially project all other Hamiltonians
on ${\cal{S}}}\def\calA{{\cal{A}}$, by assigning a large energy penalty to states with illegal shape.
Also, the Hamiltonians $H''_\ell$ (once projected to ${\cal{S}}}\def\calA{{\cal{A}}$)
check correct propagation from one step to the next.
Other terms also serve similar roles as before.
Let us start with the simplest terms.
Define
$$H''_{{\mathrm{input}}} := \sum_{i=1}^{n}{(\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}})_{i,1}}.$$
The indices indicate the row and column of the particle on which
the Hamiltonian operates.
This Hamiltonian checks that none of the particles in
the leftmost column are in $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}$.
Then, define
$$H''_{{\mathrm{clock}}init}=(I-\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}-\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}})_{1,1}.$$
This Hamiltonian checks that the top-left particle
is in a $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}$ state.
The remaining terms are described in the following subsections.
\subsubsection{The Clock Hamiltonian}
The shapes we define satisfy the following important property:
there exists a {\em two-local way} to verify that a shape
is legal.
This allows us to define a two-local clock Hamiltonian,
$H''_{\mathrm{clock}}$, whose ground space is
exactly ${\cal{S}}}\def\calA{{\cal{A}}$, the $(L+1)2^n$-dimensional
space spanned by all legal shapes.
\begin{table}[h!]
\center{
\begin{tabular}{|l|l|}
\hline
Forbidden & Guarantees that \\ \hline \hline
${\bigcirc}{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}, {\bigcirc}{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}, {\bigcirc}\stated$& ${\bigcirc}$ is to the right of all other qubits\\ \hline
${\bigcirc}\stated, {\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}\stated, {\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}\stated$& $\stated$ is to the left of all other qubits\\ \hline
${\bigcirc}\stated, \stated{\bigcirc}$& ${\bigcirc}$ and $\stated$ are not horizontally adjacent\\ \hline
${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}$, ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}$,&\\
${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}$, ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}$ & only one of ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}$, ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}$ per row \\ \hline
$\ontop{{\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}}, \ontop{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}}, \ontop{\stated}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}}$& only ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}$ above ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}$\\ \hline
$\ontop{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\bigcirc}}, \ontop{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}}, \ontop{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}\stated$& only ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}$ below ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}$\\ \hline
$\ontop{{\bigcirc}}{\stated}, \ontop{\stated}{{\bigcirc}}$& ${\bigcirc}$ and $\stated$ are not vertically adjacent\\ \hline
$\ontop{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}}{{\bigcirc}}, \ontop{\stated}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}$& no ${\bigcirc}$ below ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}$ and no ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}$ below $\stated$\\ \hline
\end{tabular}
}
\caption{Local rules for basis state to be in ${\cal{S}}}\def\calA{{\cal{A}}$}
\label{tab:rules}
\end{table}
\begin{claim}\label{cl:rules}
A shape is legal if and only if it contains none of the forbidden configurations of Table~\ref{tab:rules}.
\end{claim}
\begin{proof}
It is easy to check that any legal shape contains none of the forbidden configurations.
For the other direction, consider any shape that contains none of these configurations.
Observe that each row must be of the form $\stated^*[{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc},{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}]{\bigcirc}^*$, that is,
it starts with a sequence of zero or more $\stated$, it then contains either
${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}$ or ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}$, and then ends with a sequence of zero or more ${\bigcirc}$.
Columns can be of three different forms. Read from top to bottom, it is
either ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}^*{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}^*$, ${\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{\raisebox{.035em}{\fontsize{9}{0}\selectfont$\Updownarrow$}}}\bigcirc}^*\stated^*$, or ${\bigcirc}^*{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}^*$.
It is now easy to verify that such a shape must be one of the legal shapes.
\end{proof}
Using this claim, we can define a two-local
nearest-neighbor Hamiltonian that guarantees a legal shape.
For example, if the rule forbids a particle at location $(i,j)$
in state ${\bigcirc}$ to the left of a particle at location $(i,j+1)$ in state $\stated$,
then the corresponding term in the Hamiltonian is
$(\ketbra{{\bigcirc},\stated}{{\bigcirc},\stated})_{(i,j),(i,j+1)}$.
Summing over all the forbidden configurations
of Table \ref{tab:rules} and over all relevant pairs of particles, we have
$$H''_{{\mathrm{clock}}} := \sum_{r\in {\mathrm{rules}}} H_{r}.$$
Note that the ground space of $H''_{\mathrm{clock}}$ is the $(L+1)2^n$-dimensional space ${\cal{S}}}\def\calA{{\cal{A}}$.
\subsubsection{The Propagation Hamiltonian}
The choice of legal shapes has the following important
property: the shape of $\ell$
and that of $\ell+1$ differ in at most two locations. This means that
for any $\ell$ and $j$, the shape of
$\ket{\gamma_{\ell-1}^j}$ and that of $\ket{\gamma_\ell^j}$
differ in at most two locations. Moreover, if we consider
the state of the $n$ active particles in both states
we see that these differ on at most two particles, namely,
those on which the $\ell$th gate in the circuit acts.
Crucially, and this is where we use our assumption
on the form of the circuit (Figure \ref{figure:circuit}),
the particle(s) on which the $\ell$th gate acts are at the same
location as the particle(s) whose phase changes. It is
this structure that allows us to define the
Hamiltonians $H''_\ell$. These Hamiltonians act on two
particles and `simultaneously' advance the clock
(by changing the shape) and advance the computational
state (by modifying the state of the active particles).
Since $\ket{\gamma_\ell}$ differs from
$\ket{\gamma_{\ell-1}}$ in at most two adjacent lattice sites, this can
be done using a two-body nearest neighbor Hamiltonian.
The definition of $H''_\ell$ depends on whether $\ell$ is in the
downward phase (i.e., is of the form $2rn+k$ for $1\le k\le n$)
or in the upward phase (i.e., is of the form $2rn+n+k$ for $1\le k\le n$).
\onote{the following needs to be checked whenever the style of the paper
changes to make sure the lines are still the same}
We first define $H''_\ell$ for the upward phase. Assume $\ell = 2rn+n+k$ for
some $0\le r < R,1< k< n$ and let $i=n-k+1$ be the row in
which $\ket{\gamma_{\ell-1}}$ and $\ket{\gamma_\ell}$ differ. Then,
\begin{eqnarray*}
H''_\ell &:=&
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{\stated} \hskip -5pt \ontop{\rm_{i,r}}{\rm_{i+1,r}}
+ \stackketbra{{\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} \hskip -5pt \ontop{\rm_{i-1,r+1}}{\rm_{i,r+1}}
- \left(\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc},{\bigcirc}}{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} + \ketbra{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc},{\bigcirc}}\right)_{\rm(i,r)(i,r+1)}
\\
& + &
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{\stated} \hskip -5pt \ontop{\rm_{i,r}}{\rm _{i+1,r}} +
\stackketbra{{\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} \hskip -5pt \ontop{\rm_{i-1,r+1}}{\rm_{i,r+1}}
- \left(\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc},{\bigcirc}}{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} + \ketbra{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc},{\bigcirc}}\right)_{\rm(i,r)(i,r+1)}.
\end{eqnarray*}
The first line corresponds to changing the state $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc},{\bigcirc}}$ into
$\ket{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}$.
The second line is similar for
$\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc},{\bigcirc}}$ and $\ket{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}$.
The purpose of the first two terms in each line is the same as
that of
$\ketbra{100}{100}^c$ and $\ketbra{110}{110}^c$ in $H_\ell$ from
previous sections.\footnote{There are other (equally good) ways
to define these terms. For example, it is possible to define them
so that they both act on the $r$th column.}
The difference is that here, to
uniquely identify the current clock state, we need to consider
particles on top of each other. The remaining terms in each line
correspond to $\ketbra{100}{110}^c$ and $\ketbra{100}{110}^c$ in $H_\ell$
For the case $k=1,n$, the definition is
\begin{eqnarray*}
H''_{2rn+n+1} &:=&
\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}_{n,r}
+ \stackketbra{{\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} \hskip -5pt \ontop{\rm_{n-1,r+1}}{\rm_{n,r+1}}
- \left(\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc},{\bigcirc}}{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} + \ketbra{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc},{\bigcirc}}\right)_{\rm(n,r)(n,r+1)}
\\
& + &
\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}_{n,r} +
\stackketbra{{\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} \hskip -5pt \ontop{\rm_{n-1,r+1}}{\rm_{n,r+1}}
- \left(\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc},{\bigcirc}}{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} + \ketbra{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc},{\bigcirc}}\right)_{\rm(n,r)(n,r+1)}\\
H''_{2rn+2n} &:=&
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{\stated} \hskip -5pt \ontop{\rm_{1,r}}{\rm_{2,r}}
+ \ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}_{1,r+1}
- \left(\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc},{\bigcirc}}{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} + \ketbra{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc},{\bigcirc}}\right)_{\rm(1,r)(1,r+1)}
\\
& + &
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{\stated} \hskip -5pt \ontop{\rm_{1,r}}{\rm _{2,r}} +
\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}_{1,r+1}
- \left(\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc},{\bigcirc}}{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} + \ketbra{\stated,{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc},{\bigcirc}}\right)_{\rm(1,r)(1,r+1)}.
\end{eqnarray*}
For the downward stage, $H''_\ell$ checks that a gate is
applied correctly. For $\ell=2nr+k$ and $1<k<n$ we define
\begin{align*}
H''_\ell := \left(\begin{array}{cc} 0& -U_\ell\\ -U_\ell^{\dagger}&0
\end{array}\right) &+
\left(
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}
\right)\ontop{\rm_{k-1,r}}{\rm_{k,r}} \\
&+ \left(
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}
\right)\ontop{\rm_{k,r}}{\rm_{k+1,r}}.
\end{align*}
The last two terms are meant, as before, to
replace the terms $\ketbra{110}{110}^c$ and $\ketbra{100}{100}^c$.
Once again, to uniquely identify the current clock state, we need
to consider particles on top of each other.
The first term represents a Hamiltonian that acts on the two
particles in positions $(k,r)$ and $(k+1,r)$. These
particles span a $36$-dimensional space. The matrix shown
above is in fact the restriction of this Hamiltonian to the $8$
dimensional space spanned by
$$ \stackket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} ~~ \stackket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} ~~ \stackket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} ~~ \stackket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}~~~~
\stackket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}} ~~ \stackket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}} ~~ \stackket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}} ~~ \stackket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}$$
(recall that $U_\ell$ acts on two qubits and is therefore a $4\times 4$ matrix). Everywhere else in this $36$ dimensional subspace,
this Hamiltonian acts trivially, i.e., is 0.
For the case $k=n$ we slightly modify the terms that identify the clock states,
\begin{align*}
H''_{2nr+n} := \left(\begin{array}{cc} 0& -U_{2nr+n}\\ -U_{2nr+n}^{\dagger}&0
\end{array}\right) &+
\left(
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}
\right)\ontop{\rm_{n-1,r}}{\rm_{n,r}} \\
&+ \left(
\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}} +
\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}
\right)_{n,r}.
\end{align*}
For the case $k=1$ we have
\begin{align*}
H''_{2nr+1} := \left(\begin{array}{cc} 0& -U_{2nr+1}\\ -U_{2nr+1}^{\dagger}&0
\end{array}\right) &+
\left(
\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} +
\ketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}
\right)_{1,r} \\
&+ \left(
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}} +
\stackketbra{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}
\right)\ontop{\rm_{1,r}}{\rm_{2,r}},
\end{align*}
where the first term shows the restriction an operator acting on the particle $(1,r)$
to the four dimensional space spanned by $\ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\uparrow$}}\bigcirc}}, \ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\downarrow$}}\bigcirc}}, \ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Uparrow$}}\bigcirc}}, \ket{{\settowidth{\bigcirclen}{$\bigcirc$}\makebox[0pt][l]{\makebox[\bigcirclen][c]{$\Downarrow$}}\bigcirc}}$
(recall that $U_{2nr+1}$ is
a one-qubit gate).
\subsection{Spectral Gap}\label{sec:spectralgap6}
The analysis of the spectral gap follows almost immediately from that in Subsection
\ref{sec:gapthree}. The main effort is in verifying that
the restriction of each of our Hamiltonians to ${\cal{S}}}\def\calA{{\cal{A}}$ is identical to
the restriction of the corresponding Hamiltonian
in previous sections to ${\cal{S}}}\def\calA{{\cal{A}}$, when both are constructed
according to the modified quantum circuit of
Subsection~\ref{sec:layout}.
This, in fact, does not hold for $H''_{{\mathrm{input}}}$,
whose projection is not quite the same as that of $H_{\mathrm{input}}$;
still, it is similar enough for the analysis in Subsection
\ref{sec:gapthree} to hold.
\begin{claim}
$H''_{{\cal{S}}}\def\calA{{\cal{A}}, {\mathrm{clock}}init} = H_{{\cal{S}}}\def\calA{{\cal{A}}, {\mathrm{clock}}init}$
\end{claim}
\begin{proof}
Both Hamiltonians are diagonal
in the basis $\ket{\gamma_\ell^j}$ with eigenvalue
$0$ for $\ell=0$ and eigenvalue $1$ for any $\ell>0$.
\end{proof}
\begin{claim}
For any $1\le \ell \le L$, $H''_{{\cal{S}}}\def\calA{{\cal{A}}, \ell}=H_{{\cal{S}}}\def\calA{{\cal{A}}, \ell}$.
\end{claim}
\begin{proof}
It is straightforward to verify that
both Hamiltonians, when restricted to ${\cal{S}}}\def\calA{{\cal{A}}$, are equal to
$$\sum_{j=0}^{2^n-1}[\ketbra{\gamma^j_\ell}{\gamma^j_\ell}+\ketbra{\gamma^j_{\ell-1}}{\gamma^j_{\ell-1}}-
\ketbra{\gamma^j_\ell}{\gamma^j_{\ell-1}}-\ketbra{\gamma^j_{\ell-1}}{\gamma^j_\ell}].$$
\end{proof}
For $H''_{{\mathrm{input}}}$ the situation is similar,
although in this case the restriction to ${\cal{S}}}\def\calA{{\cal{A}}$ is not exactly the same.
Still, the resemblance is enough for the same analysis to hold:
\begin{claim}
Both $H_{{\cal{S}}}\def\calA{{\cal{A}},{\mathrm{input}}}$ and
$H''_{{\cal{S}}}\def\calA{{\cal{A}}, {\mathrm{input}}}$ are diagonal in the basis $\ket{\gamma_\ell^j}$.
Moreover, the eigenvalue in both Hamiltonians
corresponding to $\ket{\gamma_\ell^j}$
for $\ell=0$ is exactly the number of $1$'s in the binary
representation of $j$.
\end{claim}
\begin{proof} Easy to verify.
\end{proof}
The similarity between the two Hamiltonians breaks down as follows.
While the eigenvalues corresponding to $\ket{\gamma_\ell^j}$
for $\ell>0$
are $0$ in $H_{{\cal{S}}}\def\calA{{\cal{A}}, {\mathrm{input}}}$, those in $H''_{{\cal{S}}}\def\calA{{\cal{A}}, {\mathrm{input}}}$ might be positive
(namely, for $0\le \ell \le n$, the eigenvalue of $\ket{\gamma_\ell^j}$ is
the number of $1$'s in the last $n-\ell$ digits in the binary representation
of $j$). Nevertheless, due to the remark at the end of Subsection
\ref{ssec:spectralgap},
Lemma \ref{lem:gapinsides} holds here as well.
We get:
\begin{lemma}
For any $0\le s\le 1$, $H''_{\cal{S}}}\def\calA{{\cal{A}}(s)$ has a spectral
gap of $\Omega(L^{-3})$.
Moreover,
the ground state of $H''_{{\cal{S}}}\def\calA{{\cal{A}}, {\mathrm{final}}}$ is $\ket{\eta}$.
\end{lemma}
The rest of the proof of Theorem \ref{thm:geo}
is essentially the same as in Subsection \ref{sec:gapthree}. By applying Lemma \ref{le:leak}, we obtain that
\begin{lemma}
For all $0\le s \le 1, \Delta(H''(s))=\Omega(L^{-3})$.
Moreover, the ground state of $H''(1)$ is $\epsilonilon$-close to $\ket{\eta}$.
\end{lemma}
The proof is similar to that of Lemmas \ref{lm:spec3} and \ref{lm:gs_close}. This enables us to adiabatically generate the history state with exactly
the same running time as in the three-local case (when the number
of gates is that of the modified circuit of Subsection \ref{sec:layout}).
Finally, we would like to apply
Lemma \ref{lem:out} as before.
However, we cannot quite do this due to a technical issue:
our Hilbert space is no longer a tensor product of
computation qubits and clock qubits
and tracing out the clock qubits is meaningless.
Nevertheless, a minor modification of that lemma still applies.
We first add, say, $L/\epsilonilon$
identity gates to the end of the (modified) circuit.
Now, the adiabatic computation produces a state close to the
history state. We then measure the shape of the system
without measuring the inner computational degrees of freedom.
Due to the additional identity gates, with all but $\epsilon$ probability,
the outcome of the measurement is a shape
$\ell$ for $\ell \ge L$. If this is the case then the state of the system
is such that the active particles are in the final state
of the circuit, as desired.
This completes the proof of the theorem.
\section{Acknowledgments}
We wish to thank Dave Bacon, Ed Farhi, Leonid Gurvitz and Umesh
Vazirani for inspiring discussions.
DA's work is supported in part by
ARO grant DAAD19-03-1-0082, NSF ITR grant CCR-0121555,
ISF grant 032-9738 and an Alon fellowship.
WvD work was supported in part by the
U.S.\ Department of Energy (DOE) and cooperative research
agreement DF-FC02-94ER40818, a CMI postdoctoral fellowship,
and an HP/MSRI fellowship.
JK's effort is partly sponsored by DARPA and AFL,
Air Force Material Command, USAF,
under agreement number F30602-01-2-0524 and FDN00014-01-1-0826 and by ACI S\'ecurit\'e Informatique,
2003-n24, projet ``R\'eseaux Quantiques", ACI-CR 2002-40 and EU 5th
framework program RESQ IST-2001-37559.
OR's effort is supported by an Alon Fellowship, the Israeli Science Foundation, ARO grant DAAD19-03-1-0082,
and NSF grant CCR-9987845.
Part of this work was done while DA, WvD, JK, ZL were
members of the MSRI, Berkeley, CA.
\end{document}
|
\begin{eqnarray}gin{document}
\title{Towards quantum superpositions of a mirror}
\author{William Marshall$^{1,2}$, Christoph Simon$^1$, Roger Penrose$^{3,4}$ and Dik Bouwmeester$^{1,2}$}
\address {$^1$Department of Physics, University of Oxford, Oxford OX1 3PU, United Kingdom\\ $^2$
Department of Physics, University of California, Santa Barbara, CA
93106\\
$^3$ Center for Gravitational Physics and Geometry, The Pennsylvania
State University, University
Park, PA 16802\\
$^4$ Department of Mathematics, University of Oxford, Oxford OX1
3LB, United Kingdom}
\downarrowate{\today}
\begin{eqnarray}gin{abstract}
We propose a scheme for creating quantum superposition states
involving of order $10^{14}$ atoms via the interaction of a single
photon with a tiny mirror. This mirror, mounted on a high-quality
mechanical oscillator, is part of a high-finesse optical cavity
which forms one arm of a Michelson interferometer. By observing
the interference of the photon only, one can study the creation
and decoherence of superpositions involving the mirror. All
experimental requirements appear to be within reach of current
technology.
\end{abstract}
\maketitle
In $1935$ Schr\"{o}dinger pointed out that according to quantum
mechanics even macroscopic systems can be in superposition states
\cite{schroedinger}. The quantum interference effects are expected
to be hard to detect due to environment induced decoherence
\cite{decoherence}. Nevertheless there have been proposals on how
to create and observe macroscopic superpositions in various
systems \cite{bec,bose,armour}, and experiments demonstrating
superposition states of superconducting devices \cite{squidexp}
and fullerene molecules \cite{c60}. One long-term motivation for
this kind of experiment is the question of whether unconventional
decoherence processes such as gravitationally induced decoherence
or spontaneous wave-function collapse \cite{collapse,penrose}
occur.
Here we present a scheme that is close in spirit to
Schr\"{o}dinger's original discussion. A small quantum system (a
photon) is coupled to a large system (a mirror) in order to create
a macroscopic superposition. The basic principle of the experiment
as described in Ref. \cite{penrose} grew out of discussions in
1997 \cite{schmiedmayer}. It consists of a Michelson
interferometer in which one arm has a tiny moveable mirror. The
radiation pressure of a single photon displaces the tiny mirror.
The initial superposition of the photon being in either arm causes
the system to evolve into a superposition of states corresponding
to two distinct locations of the mirror. In the present proposal a
high-finesse cavity is used to enhance the interaction between the
photon and the mirror. The observed interference of the photon
allows one to study the creation of coherent superposition states
periodic with the motion of the mirror. We perform a detailed
analysis of the requirements for such an experiment.
\begin{eqnarray}gin{figure}
\includegraphics[width=0.7 \columnwidth]{Fig1.eps}
\caption{The proposed setup: a Michelson interferometer for a
single photon, where in each arm there is a high-finesse cavity.
The cavity in arm A has a very small end mirror mounted on a
micro-mechanical oscillator. The single photon comes in through
I. If the photon is in arm A, the motion of the small mirror is
affected by its radiation pressure. The photon later leaks out of
either cavity and is detected at D1 or D2.} \langleabel{setup}
\end{figure}
The proposed setup is shown in Fig. 1. In the
cavity in arm A one of the mirrors is very small and attached to a
micromechanical oscillator, similar to the cantilevers in atomic
force microscopes. While the photon is in the cavity, it exerts
radiation pressure on the mirror. Under the conditions that the
oscillator period is much longer than the photon roundtrip time,
and the amplitude of the mirror's motion is small compared to the
cavity length, the system can be described by a Hamiltonian
\cite{law} \begin{eqnarray} H=\hbar \omega_c a^{\downarrowagger} a + \hbar \omega_m
b^{\downarrowagger} b - \hbar g a^{\downarrowagger} a (b+b^{\downarrowagger}), \end{eqnarray} where
$\omega_c$ is the frequency of the photon in the cavity for the
(empty) cavity length $L$, $a$ is the creation operator for the
photon, $\omega_m$ and $b$ are the frequency and phonon creation
operator for the center of mass motion of the mirror, and the
coupling constant is $g=\frac{\omega_c}{L}\sqrt{\frac{\hbar}{2M
\omega_m}}$, where $M$ is the mass of the mirror. To start with,
let us suppose that initially the photon is in a superposition of
being in either arm $A$ or $B$, and the mirror is in a coherent
state $|\begin{eqnarray}ta \rangle = e^{-|\begin{eqnarray}ta|^{2}/2} \sum \langleimits_{n=0}^{\infty}
\frac{\begin{eqnarray}ta^{n}}{\sqrt{n!}}|n\rangle$, where $|n\rangle$ are the eigenstates
of the harmonic oscillator. Then the initial state is
$|\psi(0)\rangle=\frac{1}{\sqrt{2}}(|0\rangle_{A} |1\rangle_{B} + |1\rangle_{A}
|0\rangle_{B})|\begin{eqnarray}ta\rangle$. After a time $t$ the state of the system will
be given by \cite{mancini} \begin{eqnarray}
|\psi(t)\rangle=\frac{1}{\sqrt{2}}e^{-i\omega_c t}(|0\rangle_A |1\rangle_B |\begin{eqnarray}ta
e^{-i \omega_m t}\rangle + \nonumber\\ e^{i\kappa^2(\omega_m t-\sin
\omega_m t)}|1\rangle_A |0\rangle_B |\begin{eqnarray}ta e^{-i \omega_m t}+
\kappa(1-e^{-i \omega_m t})\rangle ), \langleabel{state} \end{eqnarray} where
$\kappa=g/\omega_m$. In the second term on the r.h.s. the motion
of the mirror is altered by the radiation pressure of the photon
in cavity $A$. The parameter $\kappa$ quantifies the displacement
of the mirror in units of the size of the coherent state
wavepacket. In the presence of the photon the mirror oscillates
around a new equilibrium position determined by the driving force.
The maximum possible interference visibility for the photon is
given by twice the modulus of the off-diagonal element of the
photon's reduced density matrix. By tracing over the mirror one
finds from Eq. (\rangleef{state}) that the off-diagonal element has the
form \begin{eqnarray}
\frac{1}{2} e^{-\kappa^2(1-\cos \omega_m t)} e^{i\kappa^2(\omega_m
t-\sin \omega_m t)+i \kappa \mbox{\small Im}[\begin{eqnarray}ta (1-e^{i\omega_m
t})]}
\langleabel{coherence} \end{eqnarray} where Im denotes the imaginary part. The
first factor is the modulus, reaching a minimum after
half a period at $t=\pi/\omega_m$, when the mirror is at its
maximum displacement. The second factor gives the phase, which is
identical to that obtained classically due to the varying length
of the cavity.
For general $t$ the phase depends on
$\begin{eqnarray}ta$, i.e. the initial conditions of the mirror. However, the
effect of $\begin{eqnarray}ta$ averages out after every full
period.
In the absence of decoherence, after a full period, the system is
in the state $\frac{1}{\sqrt{2}}(|0\rangle_A |1\rangle_B + e^{i\kappa^2 2
\pi} |1\rangle_A |0\rangle_B) |\begin{eqnarray}ta\rangle$, such that the mirror is again
disentangled from the photon. Full interference can be observed if
the photon is detected at this time. If the environment of the
mirror ``remembers'' that the mirror has moved, then, even after a
full period, the photon will still be entangled with the mirror's
environment, and thus the interference for the photon will be
reduced. Therefore the setup can be used to measure the
decoherence of the mirror.
In practice the mirror will be in a thermal state, which
can be written as a mixture of coherent states $|\begin{eqnarray}ta\rangle$ with a
Gaussian probability distribution $(1/\pi
\bar{n})e^{-|\begin{eqnarray}ta|^2/\bar{n}}$, where $\bar{n}=1/(e^{\hbar
\omega_m/kT}-1)$ is the mean
thermal number of excitations.
In order to determine the expected
interference visibility of the photon at a time $t$ for an initial
mirror state which is thermal, one has to average the
off-diagonal element Eq. (\rangleef{coherence}) over $\begin{eqnarray}ta$ with this
distribution. The result is \begin{eqnarray} \frac{1}{2}
e^{-\kappa^2(2\bar{n}+1)(1-\cos \omega_m t)} e^{i\kappa^2(\omega_m
t-\sin \omega_m t)}. \langleabel{thermal} \end{eqnarray} As a consequence of the
averaging of the $\begin{eqnarray}ta$-dependent phase in Eq.
({\rangleef{coherence}}), the off-diagonal element now decays on a
timescale $1/(\kappa \omega_m \sqrt{\bar{n}})$ after $t=0$, i.e.
very fast for the experimentally relevant case of $\kappa \sim 1$
and large $\bar{n}$. However, remarkably, it still exhibits a
revival \cite{bose} at $t=2\pi/\omega_m$, when photon and mirror
become disentangled and the phase in Eq. (\rangleef{coherence}) is
independent of $\begin{eqnarray}ta$, such that the phase averaging does not
reduce the visibility. This behaviour is shown in Fig. 2. The
magnitude of the revival is reduced by any decoherence of the
mirror.
The revival demonstrates the coherence of the superposition state
that exists at intermediate times. For $\kappa^2 \gtrsim 1$ the
state of the system is a superposition involving two distinct
mirror positions. More precisely, for a thermal mirror, the state
of the system is a mixture of such superpositions. However, this
does not affect the fundamentally non-classical character of the
state. We now discuss the experimental requirements for achieving
such a superposition and observing its recoherence at $t=2
\pi/\omega_m$.
\begin{eqnarray}gin{figure}
\includegraphics[width= 0.9 \columnwidth]{Fig2.eps}
\caption{Time evolution of the interference visibility of the
photon over one period of the mirror's motion for $\kappa=1$,
$T=1$mK and $T=60 \mu$K. The time is given in units of
$1/\omega_m$. The visibility decays very fast after $t=0$, but in
the absence of decoherence there is a sharp revival of the
visibility after a full period. The width of each peak scales like
$1/\sqrt{T}$. } \langleabel{peak}
\end{figure}
Firstly, we require $\kappa^2 \gtrsim 1$, which physically means
that the momentum kick imparted by the photon has to be larger
than the initial quantum uncertainty of the mirror's momentum. Let
$N$ denote the number of roundtrips of the photon in the cavity
during one period of the mirror's motion, such that
$2NL/c=2\pi/\omega_m$. This allows us to rewrite the condition
$\kappa^2 \gtrsim 1$ as \begin{eqnarray} \frac{2 \hbar N^3 L}{\pi c M \langleambda^2
} \gtrsim 1, \langleabel{required} \end{eqnarray} where $\langleambda$ is the
wavelength of the light. The factors entering Eq. (\rangleef{required})
are not all independent. The achievable $N$, which is determined
by the quality of the mirrors, and the minimum mirror size (and
hence $M$) both depend strongly on $\langleambda$. The mirror's lateral
dimension should be an order of magnitude larger than $\langleambda$ to
limit diffraction losses. Its thickness in order to achieve
sufficiently high reflectivity depends on $\langleambda$ as well.
Eq. (\rangleef{required}) allows one to compare the viability of
different wavelength ranges. While the highest values for $N$ are
achievable for microwaves (up to $10^{10}$), this is counteracted
by their cm wavelengths. On the other hand there
are no good mirrors for highly energetic photons. The optical
regime seems optimal. Here we propose an experiment with $\langleambda$
around 630 nm.
The cavity mode needs to have a very narrow focus on the tiny
mirror, which requires the other cavity end mirror to be large due
to beam divergence. The maximum cavity length is therefore limited
by the difficulty of making large high quality mirrors. We propose
a cavity length of 5 cm, and a small mirror size of $10 \times 10 \times 10$
microns, leading to a mass of order $5 \times 10^{-12}$ kg.
Such a small mirror on a mechanical oscillator can be fabricated
by coating a silicon cantilever with alternating layers of
SiO$_{2}$ and a metal oxide. The best current mirrors are made in
this way. A larger silicon oscillator has been coated with
SiO$_2$/Ta$_2$O$_5$ and used as part of a high-finesse cavity in
Ref. \cite{schiller}.
For the above dimensions the condition (\rangleef{required}) is
satisfied for $N=5.6 \times 10^6$. Correspondingly, we are aiming
for a photon loss per reflection not larger than $3 \times
10^{-7}$, about a factor of 4 below the best reported values for
such mirrors \cite{rempe}, and for a transmission of $10^{-7}$,
consistent with the $10 \mu$m mirror thickness \cite{hood}. For
these values, about 1\% of the photons are still left in the
cavity after a full period of the mirror. For the above values of
$N$ and $L$ one obtains a frequency $\omega_m=2 \pi \times 500$
Hz. This leads to a quantum uncertainty for the mirror position of
order $10^{-13}$ m, which corresponds to the necessary
displacement in the superposition in order to achieve
$\kappa^2\sim 1$.
The fact that a relatively large $L$ is needed to satisfy Eq.
(\rangleef{required}) implies that the creation of superpositions
following the related proposal of Ref. \cite{bose}, which relies
on a micro-cavity, imposes requirements that are probably
beyond the reach of current technology. A large $L$ is
helpful because, for a given $N$, it allows one to use a lower
frequency $\omega_m$, and thus a more weakly bound mirror that is
easier to displace by the photon.
Secondly, the requirement of observing the revival puts a bound on
the acceptable environmental decoherence. To estimate the expected
decoherence we model the mirror's environment by an (Ohmic) bath
of harmonic oscillators. The effect of this environment can
approximately be described by a decoherence rate
$\gamma_D=\gamma_m k T M ({\cal D}elta x)^2/\hbar^2$ governing the decay
of off-diagonal elements between different mirror positions
\cite{decoherence}. Here $\gamma_m$ is the damping rate for the
mechanical oscillator, $T$ is the temperature of the environment,
which is constituted mainly by the internal degrees of freedom of
the mirror and cantilever, and ${\cal D}elta x$ is the separation of two
coherent states that are originally in a superposition. This
approximation is strictly valid only for times much longer than
$2\pi/\omega_m$ and for ${\cal D}elta x$ large compared to the width of
the individual wavepackets. A more rigorous description would
follow Ref. \cite{strunz} and take into account the movement of
the mirror. Our analysis indicates that the order of magnitude of
the decoherence is well captured by $\gamma_D$. Assuming that the
experiment achieves $\kappa^2 \gtrsim 1$, i.e. a separation by the
size of a coherent state wavepacket, ${\cal D}elta x \sim
\sqrt{\frac{\hbar}{M\omega_m}}$, the condition $\gamma_D \langleesssim
\omega_m$ can be cast in the form \begin{eqnarray} Q \gtrsim \frac{kT}{\hbar
\omega_m}=\bar{n}, \end{eqnarray} where $Q=\omega_m/\gamma_m$ is the quality
factor of the mechanical oscillator. Bearing in mind that
$Q\gtrsim 10^5$ has been achieved \cite{mamin} for silicon
cantilevers of approximately the right dimensions and frequency,
this implies that the temperature has to be 3 mK or less.
Thirdly, the stability requirements for the experiment are very
strict. The phase of the interferometer has to be stable over the
whole measurement time. This means that the distance between the
large cavity end mirror and the equilibrium position
\cite{thermal} of the small mirror has to be stable to of order
$\langleambda/20 N=0.6 \times 10^{-14}$m. The required measurement time
can be determined in the following way. A single run of the
experiment starts by sending a weak pulse into the interferometer,
such that on average 0.1 photons go into either cavity. This
probabilistically prepares a single-photon state as required to a
good approximation. The two-photon contribution has to be kept low
because it causes noise in the interferometer. From Eq.
(\rangleef{thermal}) the width of the revival peak is $2/\kappa
\omega_m \sqrt{\bar{n}}$. This implies that only a fraction $\sim
1/\pi\sqrt{\bar{n}}$ of the remaining photons will leak out in the
time interval of the revival. It is therefore important to work at
the lowest possible temperature. A temperature of 60 $\mu$K has
been achieved with a nuclear demagnetization cryostat \cite{yao}.
Considering the required low value of $\omega_m$ and the fact that
approximately 1\% of the photons remain after a full period for
the assumed loss, this implies a detection rate of approximately
100 photons per hour in the revival interval, given a detection
efficiency of 70 \%. Thus a measurement time of order $10$ minutes
should give statistically significant results. After one such
measurement period the interferometer can be readjusted, and the
experiment can be repeated. Stability of order $10^{-13}$m/min for
an STM inside at 8 K was achieved with a rather simple suspension
\cite{stipe}. Gravitational wave observatories using
interferometers also require very high stability in order to have
an length sensitivity of $10^{-19}$m over timescales of a ms or
greater \cite{ligo}, for arm lengths of order $1$ km.
The experiment requires ultra-high vacuum conditions in order to
ensure that events where an atom hits the cantilever are
sufficiently rare not to cause significant errors, which is at the
level of about 5/s. Background gas particle densities of order
100/cm$^3$ have been achieved \cite{gabrielse} and are sufficient
for our purposes.
After every single run of the experiment the mirror has to be
damped to reset it to its initial (thermal) state. This could be
done by electric or magnetic fields, e.g. following Ref.
\cite{damping}, where a Nickel sphere was attached to the
cantilever, whose $Q$ could then be changed by three orders of
magnitude by applying a magnetic field.
There are several ways in which the total measurement time could
be decreased, thus relaxing the stability requirements. Switchable
cavity mirrors would allow coupling the photon into and out of the
cavity at any desired time, instead of relying on postselection.
This allows the data collection in the revival interval to be
increased by three orders of magnitude. The transmission of a
distributed Bragg reflector (DBR) can be changed by altering the
refractive index of one of the two materials. Highly reflective
mirrors have been fabricated from alternating layers of GaAs and
AlAs \cite{sale}. The refractive index of GaAs can be changed
dramatically by optical pumping \cite{huang}. A change from $3.6$
to $3.15$ on a ps timescale was observed at a probe wavelength of
830nm for pump intensities in the range $1$ kJ/m$^{2}$ (just below
the damage threshold of the material). Such a large change in
refractive index would allow efficient optical switching
\cite{scalora} and is a promising avenue that deserves
experimental investigation. The main open question is whether the
absorption of the materials \cite{sturge} can be made low enough
to match our very strict demands.
Since the width of the revival peak scales like $1/\sqrt{T}$, the
required measurement time can also be decreased by decreasing the
temperature below 60 $\mu$K. Passive cooling techniques may be
improved. In addition, active cooling of mirror oscillators has
been proposed \cite{mancinicooling}, and even implemented
experimentally for a large mirror \cite{cohadon}. The mirror's
movement is sensed via the output light of an optical cavity, and
a feedback loop is used to damp the motion. Applying the theory of
Ref. \cite{mancinicooling} to our mirror, one finds that cooling
even to the ground state of the center of mass motion is
conceivable. This would reduce the required measurement time, and
thus the stability requirements, by a factor of 50.
In principle the proposed setup has the potential to test wave
function reduction models, in particular the one of Ref.
\cite{penrose}. Based on \cite{penrose} and \cite{strunz}, we
estimate that an improvement of the ratio $Q/T$ by about five
orders of magnitude from the values discussed in this paper
($Q=10^5$ and $T=60 \mu$K) would make the predicted wavefunction
decoherence rate comparable to the environmental decoherence rate.
Improvements in $Q$ are certainly conceivable. In particular, $Q$
is known to increase with decreasing temperature \cite{mamin}.
Active cooling methods for the mirror's center of mass motion as
described above could in principle also be used to indirectly cool
the mirror's internal degrees of freedom. Whether such sympathetic
cooling is a realistic avenue is in itself a fascinating subject
for further study.
We have performed a detailed study of the experimental
requirements for the creation and observation of quantum
superposition states of a mirror consisting of $10^{14}$ atoms,
approximately nine orders of magnitude more massive than any
superposition observed to date. Our analysis suggests that, while
very demanding, this goal appears to be in reach of current
technology. It is remarkable that a tabletop experiment has the
potential to test quantum mechanics in an entirely new regime.
Preliminary experiments on components of the proposal are
currently under way.
This work was supported by the E.U. (IST$-1999-10033$). W.M. is
supported by EPSRC (award no. 00309297). C.S. is supported by a
Marie Curie Fellowship of the E.U. (no. HPMF-CT-2001-01205). R.P.
thanks the NSF for support under contract 00-90091 and the
Leverhulme Foundation for an Emeritus Fellowship. We would like to
thank S. Bose, M. Davies, R. Epstein, T. Knuuttila, R. Lalezari,
J. Pethica, and J. Roberts for useful discussions.
\begin{eqnarray}gin{thebibliography}{99}
\bibitem{schroedinger} E. Schr\"{o}dinger, Die Naturwissenschaften {\bf 23}, 807 (1935).
\bibitem{decoherence} D. Giulini {\it et al.}, {\it Decoherence and the Appearance of a Classical World in
Quantum Theory} (Springer, Berlin, 1996); W.H. Zurek, Phys. Today
{\bf 44}, 36 (1991).
\bibitem{bec} J. Ruostekoski, M. J. Collett, R. Graham, and D. F. Walls, Phys. Rev. A {\bf 57},
511 (1998); J. I. Cirac, M. Lewenstein, K. Molmer, and P. Zoller,
Phys. Rev. A {\bf 57}, 1208 (1998).
\bibitem{bose} S. Bose, K. Jacobs, and P.L. Knight, Phys. Rev. A {\bf 59}, 3204 (1999).
\bibitem{armour} A.D. Armour, M.P. Blencowe, and K.C. Schwab, Phys. Rev. Lett. {\bf 88}, 148301 (2002).
\bibitem{squidexp} C.H. van der Wal {\it et al.}, Science {\bf 290}, 773 (2000);
J.R. Friedman, V. Patel, W. Chen, S.K. Tolpygo, and J.E. Lukens,
Nature {\bf 406}, 43 (2000).
\bibitem{c60} M. Arndt {\it et al.}, Nature {\bf 401}, 680 (1999).
\bibitem{collapse} G.C. Ghirardi, A. Rimini and T. Weber, Phys.
Rev. D {\bf 34}, 470 (1986); G.C. Ghirardi, P. Pearle, and A.
Rimini, Phys. Rev. A {\bf 42}, 78 (1990); I.C. Percival, Proc.
Roy. Soc. London, Ser. A {\bf 447}, 189 (1994); D.I. Fivel, Phys.
Rev. A {\bf 56}, 146 (1997); L. Diosi, Phys. Rev. A {\bf 40}, 1165 (1989).
\bibitem{penrose} R. Penrose, in A. Fokas {\it et al.} (Eds.), {\it
Mathematical Physics 2000} (Imperial College, London, 2000).
\bibitem{schmiedmayer} J. Schmiedmayer, R. Penrose, D. Bouwmeester, J. Dapprich, H. Weinfurter, A. Zeilinger, private communication (1997).
See also D. Bouwmeester {\it et al.}, in N. Dadhich and J.
Narlikar (Eds.), {\it Gravitation and Relativity: At the turn of
the Millennium} (IUCAA, Pune, 1998).
\bibitem{law} C.K. Law, Phys. Rev. A {\bf 51}, 2537 (1994); C.K. Law, Phys. Rev. A {\bf 49}, 433
(1993).
\bibitem{mancini} S. Mancini, V.I. Man'ko, and P. Tombesi, Phys. Rev. A {\bf 55}, 3042
(1997); S. Bose, K. Jacobs, and P.L. Knight, Phys. Rev. A {\bf
56}, 4175 (1997).
\bibitem{schiller} I. Tittonen {\it et al.}, Phys. Rev. A {\bf
59}, 1038 (1999).
\bibitem{rempe} G. Rempe, R.J. Thompson, H.J. Kimble, and R.
Lalezari, Opt. Lett. {\bf 17}, 363 (1992).
\bibitem{hood} C.J. Hood, H.J. Kimble, and J. Ye, Phys. Rev. A
{\bf 64}, 033804 (2001).
\bibitem{strunz} W.T. Strunz and F. Haake, quant-ph/0205108 (2002)
\bibitem{mamin} H.J. Mamin and D. Rugar, Appl. Phys. Lett. {\bf
79}, 3358 (2001).
\bibitem{thermal} As we have seen above, the thermal
fluctuations of the small mirror around this equilibrium position
(which are of order $10^{-12}$ m for 60 $\mu$K) lead to a
narrowing of the visibility peaks, but do not destroy the revival.
\bibitem{yao} W. Yao {\it et al.}, J. Low Temp. Phys. {\bf
120}, 121 (2000).
\bibitem{stipe} B.C. Stipe, M.A. Rezaei and W. Ho., Rev. Sci. Inst. {\bf 70}, 137
(1999).
\bibitem{ligo} S. Rowan and J. Hough, Living Review in Relativity, {\bf 3}, 2000-3 (2000),
\bibitem{gabrielse} G. Gabrielse {\it et al.}, Phys. Rev. Lett. {\bf 65}, 1317 (1990).
\bibitem{damping} K. Wago, D. Botkin, C.S. Yannoni, and D. Rugar,
Appl. Phys. Lett. {\bf 72}, 2757 (1998).
\bibitem{sale} T.E. Sale, {\it Vertical Cavity Surface Emitting Lasers} (Research Studies Press, Baldock, 1995).
\bibitem{huang} L. Huang, J.P. Callan, E.N. Glezer, and E. Mazur, Phys. Rev. Lett. {\bf 80}, 185 (1998).
\bibitem{scalora} M. Scalora, J.P. Dowling, C.M. Bowden, and M.J.
Bloemer, Phys. Rev. Lett. {\bf 73}, 1368 (1993).
\bibitem{sturge} M.D. Sturge, Phys. Rev. {\bf 127}, 768 (1962).
\bibitem{mancinicooling} S. Mancini, D. Vitali, and P. Tombesi,
Phys. Rev. Lett. {\bf 80}, 688 (1998).
\bibitem{cohadon} P.F. Cohadon, A. Heidmann, and M. Pinard,
Phys. Rev. Lett. {\bf 83}, 3174 (1999).
\end{thebibliography}
\end{document}
|
\begin{document}
\title{Diffusive Limit of the Unsteady Neutron Transport Equation in Bounded Domains}
\author[Z. Ouyang]{Zhimeng Ouyang}
\address[Z. Ouyang]{
\newline\indent Department of Mathematics, University of Chicago}
\varepsilonmail{[email protected]}
\thanks{Z. Ouyang is supported by NSF Grant DMS-2202824.}
\deltaate{}
\Subjclass[2020]{Primary 35Q49, 82D75; Secondary 35Q62, 35Q20}
\keywords{non-convex domain, boundary layer, initial layer, diffusive limit}
\maketitle
\begin{abstract}
The justification of hydrodynamic limits in non-convex domains has long been an open problem due to the singularity at the grazing set. In this paper, we investigate the unsteady neutron transport equation in a general bounded domain with the in-flow, diffuse-reflection, or specular-reflection boundary condition.
Using a novel kernel estimate, we demonstrate the optimal $L^2$ diffusive limit in the presence of both initial and boundary layers.
Previously, this result was only proved for convex domains when the time variable is involved.
Our approach is highly robust, making it applicable to all basic types of physical boundary conditions.
\varepsilonnd{abstract}
\partialagestyle{myheadings} \thispagestyle{plain} \markboth{Z. OUYANG}{DIFFUSIVE LIMIT OF UNSTEADY NEUTRON TRANSPORT EQUATION IN BOUNDED DOMAINS}
\Smallskip
\Setcounter{tocdepth}{1}
\tableofcontents
\Section{Introduction}\label{Sec:intro}
The neutron transport equation is a fundamental model for studying particle dynamics in confined spaces, such as nuclear reactors and medical imaging devices (e.g., CT, MRI). Its investigation dates back to the dawn of the atomic age and has motivated significant developments in applied mathematics, such as the discontinuous Galerkin (DG) method \cite{Cockburn.Shu1989, Cockburn.Hou.Shu1990, Cockburn.Karniadakis.Shu2000} and the inverse transport problem \cite{Bal2009, Bal.Ren2012}.
We consider the unsteady neutron transport equation in a
three-dimensional smooth bounded domain $\Omega$ without assuming any convexity.
Denote the time variable $t\in\mathbb{R}p$, the space variable $x=(x_1,x_2,x_3)\in\Omega$, and the velocity variable
$w=(w_1,w_2,w_3)\in\S^2$.
Let the neutron density $u^{\varepsilon}(t,x,w)$ satisfy the in-flow boundary problem
\begin{align}\label{transport}
\left\{
\begin{array}{l}\deltaisplaystyle
\varepsilon\partialartial_t u^{\varepsilon}+w\cdot\nabla_x u^{\varepsilon}+\varepsilon^{-1}\big(u^{\varepsilon}-\overline{u}^{\varepsilon}\big)=0\ \ \text{ in}\ \ \mathbb{R}p\times\Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
u^{\varepsilon}(0,x,w)=u_o(x,w)\ \ \text{ in}\ \ \Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
u^{\varepsilon}(t,x_0,w)=g(t,x_0,w)\ \ \text{ for}\
\ x_0\in\partial\Omega\ \ \text{and}\ \ w\cdotn(x_0)<0.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Here, $\overline{u}^{\varepsilon}$ is the velocity average of $u^{\varepsilon}$, i.e.,
\begin{align}\label{average}
\overline{u}^{\varepsilon}(t,x)=\frac{1}{4\partiali}\int_{\S^2}u^{\varepsilon}(t,x,w)\,\mathrm{d}{w}.
\varepsilonnd{align}
Throughout the paper, we will use $\overline{f}(t,x)$ to denote the velocity average of a function $f(t,x,w)$.
$u_o$ and $g$ are the given initial data and boundary data, respectively,
and $n(x_0)$ is the outward unit normal vector at boundary point $x_0\in\partial\Omega$.
Our goal is to study the asymptotic behavior of the solution $u^{\varepsilon}$ as the Knudsen number $\varepsilon\mathbb{R}ightarrow0^+$.
When the incoming particles are distributed like a probability average of the outgoing particles (possibly plus a small perturbation), then the boundary condition in \varepsilonqref{transport} is replaced by
\begin{align}\label{diffuse-BC}
u^{\varepsilon}(t,x_0,w)=\partialp[u^{\varepsilon}](t,x_0)+\varepsilon h(t,x_0,w)\ \ \text{ for}\
\ x_0\in\partial\Omega\ \ \text{and}\ \ w\cdotn(x_0)<0,
\varepsilonnd{align}
where
\begin{align}
\partialp[u^{\varepsilon}](t,x_0):=c_{\mathcal{G}amma}\int_{w\cdotn>0}u^{\varepsilon}(t,x_0,w)(w\cdotn)\,\mathrm{d}w
\varepsilonnd{align}
normalized with the constant $c_{\mathcal{G}amma}=\frac{1}{\partiali}$ satisfying $c_{\mathcal{G}amma}\int_{w\cdotn>0}(w\cdotn)\,\mathrm{d}w=1$.
This is called the diffuse-reflection boundary condition.
In the specular-reflection boundary problem, where the particles bounce back specularly (with a small perturbation), the boundary condition in \varepsilonqref{transport} is replaced by
\begin{align}\label{specular-BC}
u^{\varepsilon}(t,x_0,w)=u^{\varepsilon}(t,x_0,\mathbb{R}rw)+\varepsilon h(t,x_0,w)\ \ \text{ for}\
\ x_0\in\partial\Omega\ \ \text{and}\ \ w\cdotn(x_0)<0
\varepsilonnd{align}
with $\mathbb{R}rw:=w-2(w\cdotn)n$.
For both reflection type boundary conditions, we additionally assume the compatibility condition
\begin{align}\label{g-compatibility=}
\int_{w\cdotn<0}h(t,x_0,w)(w\cdotn)\,\mathrm{d}w=0\ \ \text{ for all}\ \ x_0\in\partial\Omega\ \ \text{and}\ \ t\in\mathbb{R}p
\varepsilonnd{align}
on the given perturbative boundary data $h$,
so that the solution enjoys the null flux condition at the boundary:
\begin{align}\label{ue-zero-flux}
\int_{\S^2}u^{\varepsilon}(t,x_0,w)(w\cdotn)\,\mathrm{d}w = 0
\ \ \text{ for all}\ \ x_0\in\partial\Omega\ \ \text{and}\ \ t\in\mathbb{R}p.
\varepsilonnd{align}
\Subsection{Notation}
Based on the flow direction, we can divide the phase boundary $\mathcal{G}amma:=\partial\Omega\times\S^2$ into the incoming boundary $\mathcal{G}amma_-:=\big\{(x_0,w)\in\mathcal{G}amma:w\cdotn(x_0)<0\big\}$, the outgoing boundary $\mathcal{G}amma_+:=\big\{(x_0,w)\in\mathcal{G}amma:w\cdotn(x_0)>0\big\}$, and the grazing set $\mathcal{G}amma_0:=\big\{(x_0,w)\in\mathcal{G}amma:w\cdotn(x_0)=0\big\}$.
In particular, the boundary conditions of \varepsilonqref{transport}, \varepsilonqref{diffuse-BC} and \varepsilonqref{specular-BC} are only prescribed on $\mathcal{G}amma_{-}$.
Denote $\Gamma:=[0,T]\times\mathcal{G}amma$ and we may also split $\Gamma=\Gamma_-\cup\Gamma_+\cup\Gamma_0$ in a parallel manner.
Let $\brb{\ \cdot\ ,\ \cdot\ }{\Sigma}$ denote the inner product with respect to variable(s) $\Sigma$ which can be $t,x,w$, their combinations, or the domain specified, and let $\deltabr{\ \cdot\ ,\ \cdot\ }$ be the inner product over $[0,t]\times\Omega\times\S^2$ for $t\in[0,T]$, i.e.,
\begin{align}
\deltabr{f,g} := \int_0^t\!\iint_{\Omega\times\S^2} fg \,\mathrm{d}w\mathrm{d}x\mathrm{d} t' .
\varepsilonnd{align}
Also, let $\br{\ \cdot\ ,\ \cdot\ }_{\mathcal{G}amma_{\partialm}}$ denote the inner product on $\mathcal{G}amma_{\partialm}$ with measure $\mathrm{d}\mathcal{G}amma:=\abs{w\cdot n}\mathrm{d} w\mathrm{d} S_x$, where $\mathrm{d} S_x$ is the differential of the boundary surface.
When the time variable is involved, we use $\br{\ \cdot\ ,\ \cdot\ }_{\Gamma_{\partialm}}$
to denote
\begin{align}
\br{f,g}_{\Gamma_{\partialm}} := \int_0^t\!\iint_{\partial\Omega\times\S^2} fg \abs{w\cdot n}\mathrm{d} w\mathrm{d} S_x\mathrm{d} t' .
\varepsilonnd{align}
Define the bulk and boundary $L^2$ norms
\begin{align}
\tnm{f}:=\left(\iint_{\Omega\times\S^2}\babs{f(x,w)}^2\mathrm{d} w\mathrm{d} x\mathbb{R}ight)^{\frac{1}{2}},\qquad \tnms{f}{\mathcal{G}amma_{\partialm}}:=\bigg(\int_{\mathcal{G}amma_{\partialm}}\babs{f(x,w)}^2\mathrm{d}\mathcal{G}amma\bigg)^{\frac{1}{2}},
\varepsilonnd{align}
and the $L^{\infty}$ norms
\begin{align}
\lnm{f}:=\varepsilonsssup_{(x,w)\in\Omega\times\S^2}\babs{f(x,w)},\qquad
\lnms{f}{\mathcal{G}amma_{\partialm}}:=\varepsilonsssup_{(x,w)\in\mathcal{G}amma_{\partialm}}\babs{f(x,w)}.
\varepsilonnd{align}
When the $t$ variable is involved, we use $\tnnm{f}$, $\tnnms{f}{\Gamma_\partialm}$, $\lnnm{f}$ and $\lnnms{f}{\Gamma_\partialm}$ to denote the corresponding norms.
For $k,m,l\in\mathbb{N}$, $1\leq p,q,r\leq\infty$, let $W^{k,p}$ be the Sobolev space, and let $\nnm{\,\cdot\,}_{W^{l,r}_tW^{k,p}_xW^{m,q}_w}$ denote the usual Sobolev norm for $t\in\mathbb{R}p$, $x\in\Omega$, and $w\in\S^2$.
If we use mixed norms on the boundary, then denote by $\abs{\,\cdot\,}_{W^{k,p}_xW^{m,q}_w(\mathcal{G}amma_\partialm)}$ the mixed Sobolev norm for $(x,w)\in\mathcal{G}amma_\partialm$ with measure $\mathrm{d}\mathcal{G}amma=\abs{w\cdot n}\mathrm{d} w\mathrm{d} S_x$. The same convention also applies to $\nm{\,\cdot\,}_{W^{l,r}_tW^{k,p}_xW^{m,q}_w(\Gamma_\partialm)}$ when $t$ is involved.
Sometimes we will omit the variables in the subscript when there is no possibility of confusion.
We will abuse notation for the functions under change of variables:
for example, if $\mathcal{T}:(t,x,w)\mapsto(\tau,y,v)$, and let $f(t,x,w)=f\circ\mathcal{T}^{-1}(\tau,y,v)=:\widetilde{f}(\tau,y,v)$,
then we just write $f(\tau,y,v)$ instead of $\widetilde{f}(\tau,y,v)$ for notational simplicity.
However, unless otherwise specified, the integrals in the $L^p$-norms and the inner products are still with respect to the original variables $t,x,w$.
Throughout this paper, $C>0$ denotes a constant that only depends on
the spatial domain $\Omega$ or temporal domain $[0,T]$ for some given $T>0$, but does not depend on the data or $\varepsilon$.
It is referred as universal constant and can change from one inequality to another. We write $a\lesssim b$ to denote $a\leq Cb$ and $a\mathcal{G}trsim b$ to denote $a\mathcal{G}eq Cb$. Also, we write $a\Simeq b$ if $a\lesssim b$ and $a\mathcal{G}trsim b$.
We will use $0<\delta\ll 1$ to denote a sufficiently small constant independent of the data or $\varepsilon$.
\Subsection{Main Results}
Our main theorems establish the diffusive limit of the unsteady neutron transport equation in a bounded domain, under three different types of boundary conditions: in-flow, diffuse-reflection, and specular-reflection.
As the assumption on data and the limit problem differ among the three cases, it is better to state our results separately:
\begin{theorem}[In-flow case]\label{main theorem}
Given $u_o$ and $g$ satisfying
\begin{align}\label{assumption}
\nm{u_o}_{W^{3,\infty}_xL^{\infty}_w}+\nm{g}_{W^{2,\infty}_tW^{3,\infty}_xW^{1,\infty}_{w}(\Gamma_-)}\lesssim 1
\varepsilonnd{align}
and the compatibility condition
\begin{align}\label{compatibility}
u_o(x_0,w)=g(0,x_0,w)=C(x_0)\ \ \text{ for every}\ \ x_0\in\partial\Omega,
\varepsilonnd{align}
there exists a unique solution $u^{\varepsilon}(t,x,w)\in L^{\infty}\big([0,T]\times\Omega\times\S^2\big)$ to \varepsilonqref{transport} for any $T>0$. Moreover, the solution obeys the estimate
\begin{align}\label{main}
\bnnm{u^{\varepsilon}-U_0}_{L^2([0,T]\times\Omega\times\S^2)}\lesssim\varepsilon^{\frac{1}{2}},
\varepsilonnd{align}
where $U_0(t,x,w)=\overlineU_0(t,x)$ satisfies the heat equation with Dirichlet boundary condition:
\begin{align}
\left\{
\begin{array}{l}
\partialartial_t\overlineU_0-\Delta_x\overlineU_0=0\ \ \text{ in}\
\ [0,T]\times\Omega,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_0(0,x)=\overline{u}_o(x)\ \ \text{ in}\
\ \Omega,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_0(t,x_0)=\Phi_{0,\infty}(t;\iota_1,\iota_2)\ \ \text{ on}\ \
[0,T]\times\partial\Omega,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
in which $\Phi_{0,\infty}$ is determined by solving the boundary layer equation (Milne problem) for $\Phi_0$:
\begin{align}
\left\{
\begin{array}{l}
\Sin\partialhi\,\deltafrac{\partial \Phi_0 }{\partial\varepsilonta}+\Phi_0 -\overline{\bl}_0 =0,\\\mathbb{R}ule{0ex}{1.2em}
\Phi_0 (t;0,\iota_1,\iota_2;\partialhi,\partialsi)=g(t;\iota_1,\iota_2;\partialhi,\partialsi)\ \ \text{ for}\ \
\Sin\partialhi>0,\\\mathbb{R}ule{0ex}{1.5em}
\deltaisplaystyle\lim_{\varepsilonta\mathbb{R}ightarrow\infty}\Phi_0 (t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)=\Phi_{0,\infty}(t;\iota_1,\iota_2).
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Here, the change of variables $(x;w)\mapsto(\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)$ near the boundary are defined in Section~\mathbb{R}ef{sec:boundary-layer}.
\varepsilonnd{theorem}
\begin{remark} \label{Rmk:time-decay}
If we make the stronger assumption on the boundary data that for some $\alpha>0$
\begin{align}\label{assumption.}
\nm{\mathrm{e}^{\alpha t}g}_{W^{2,\infty}_tW^{3,\infty}_xW^{1,\infty}_{w}(\Gamma_-)}\lesssim 1,
\varepsilonnd{align}
then based on Theorem~\mathbb{R}ef{thm:remainder-est}, we have the approximation estimate with time decay for some $0<\beta<\alpha$
\begin{align}\label{main=}
\nnm{\mathrm{e}^{\beta t}\big(u^{\varepsilon}-U_0\big)}_{L^2(\mathbb{R}p\times\Omega\times\S^2)}\lesssim\varepsilon^{\frac{1}{2}}.
\varepsilonnd{align}
\varepsilonnd{remark}
\begin{remark}
The estimate \varepsilonqref{main} achieves the optimal convergence rate in $L^2_{t,x,w}$. In \cite{AA005,AA012,AA016} for 2D/3D convex domains, it is justified that
\begin{align}
\btnnm{u^{\varepsilon}-\widetilde{U}_0-\widetilde{U}u_0-Uuu_0}\lesssim\varepsilon^{\frac{5}{6}-},
\varepsilonnd{align}
where $\widetilde{U}u_0$ is the boundary layer with geometric correction, $Uuu_0$ is the initial layer, and $\widetilde{U}_0$ is the corresponding interior solution. \cite[Theorem 2.1]{Li.Lu.Sun2017} reveals that the two interior solutions differ by
\begin{align}
\btnnm{\widetilde{U}_0-U_0}\lesssim\varepsilon^{\frac{2}{3}}.
\varepsilonnd{align}
Due to the rescaling in the normal spatial variable $\varepsilonta=\varepsilon^{-1}\mu$, for general non-constant in-flow boundary data, the boundary layer $\widetilde{U}u_0\neq0$ satisfies
\begin{align}
\btnnm{\widetilde{U}u_0}\Simeq\varepsilon^{\frac{1}{2}}.
\varepsilonnd{align}
Also,
due to the rescaling in the temporal variable $\tau=\varepsilon^{-2}t$, we have
\begin{align}
\btnnm{Uuu_0}\Simeq\varepsilon.
\varepsilonnd{align}
Hence, we conclude that
\begin{align}
\btnnm{u^{\varepsilon}-U_0}\Simeq\varepsilon^{\frac{1}{2}}.
\varepsilonnd{align}
This suggests the optimality of our diffusive approximation \varepsilonqref{main}.
\varepsilonnd{remark}
In the case of the diffuse and specular reflection boundary conditions, we have the following analogues:
\begin{theorem}[Diffuse-reflection case]\label{main theorem-2}
If we consider \varepsilonqref{transport} with the boundary condition replaced by \varepsilonqref{diffuse-BC},
then under the assumption
\begin{align}\label{assumption-2}
\nm{u_o}_{W^{3,\infty}_xL^{\infty}_w}+\nm{h}_{W^{1,\infty}_tW^{2,\infty}_xL^{\infty}_w(\Gamma_-)}\lesssim 1
\varepsilonnd{align}
with the compatibility conditions \varepsilonqref{g-compatibility=} and
\begin{align}\label{compatibility-2}
u_o(x_0,w)=C(x_0), \quad h(0,x_0,w)=0\ \ \text{ for every}\ \ x_0\in\partial\Omega,
\varepsilonnd{align}
there exists a unique solution $u^{\varepsilon}(t,x,w)\in L^{\infty}\big([0,T]\times\Omega\times\S^2\big)$ for any $T>0$. Moreover, the solution obeys the estimate
\begin{align}\label{main-2}
\bnnm{u^{\varepsilon}-U_0}_{L^2([0,T]\times\Omega\times\S^2)}\lesssim\varepsilon^{\frac{1}{2}},
\varepsilonnd{align}
where $U_0(t,x,w)=\overlineU_0(t,x)$ satisfies the heat equation with Neumann boundary condition:
\begin{align}
\left\{
\begin{array}{l}
\partialartial_t\overlineU_0-\Delta_x\overlineU_0=0\ \ \text{ in}\
\ [0,T]\times\Omega,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_0(0,x)=\overline{u}_o(x)\ \ \text{ in}\
\ \Omega,\\\mathbb{R}ule{0ex}{1.5em}
\tfrac{\partial\overlineU_0}{\partialn}(t,x_0)=0\ \ \text{ on}\ \
[0,T]\times\partial\Omega.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
\varepsilonnd{theorem}
\begin{theorem}[Specular-reflection case]\label{main theorem-3}
If we consider \varepsilonqref{transport} with the boundary condition replaced by \varepsilonqref{specular-BC},
then under the assumption
\begin{align}\label{assumption-3}
\nm{u_o}_{W^{3,\infty}_xL^{\infty}_w}+\nm{h}_{W^{1,\infty}_tW^{2,\infty}_xW^{1,\infty}_w(\Gamma_-)}\lesssim 1
\varepsilonnd{align}
with the compatibility conditions \varepsilonqref{g-compatibility=} and $h|_{\Gamma_0}=0$, as well as
\begin{align}\label{compatibility-3}
u_o(x_0,w)=C(x_0), \quad \nabla_x u_o(x_0,w)=0, \quad h(0,x_0,w)=0\ \ \text{ for every}\ \ x_0\in\partial\Omega,
\varepsilonnd{align}
there exists a unique solution $u^{\varepsilon}(t,x,w)\in L^{\infty}\big([0,T]\times\Omega\times\S^2\big)$ for any $T>0$. Moreover, the solution obeys the estimate
\begin{align}\label{main-3}
\bnnm{u^{\varepsilon}-U_0}_{L^2([0,T]\times\Omega\times\S^2)}\lesssim\varepsilon^{\frac{1}{2}},
\varepsilonnd{align}
where $U_0(t,x,w)=\overlineU_0(t,x)$ satisfies the heat equation with Neumann boundary condition:
\begin{align}
\left\{
\begin{array}{l}
\partialartial_t\overlineU_0-\Delta_x\overlineU_0=0\ \ \text{ in}\
\ [0,T]\times\Omega,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_0(0,x)=\overline{u}_o(x)\ \ \text{ in}\
\ \Omega,\\\mathbb{R}ule{0ex}{1.5em}
\tfrac{\partial\overlineU_0}{\partialn}(t,x_0)=0\ \ \text{ on}\ \
[0,T]\times\partial\Omega.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
\varepsilonnd{theorem}
\begin{remark}
To compare the three boundary problems:
In all the three cases,
we achieve the $L^2$ diffusive limit with the same convergence rate
\begin{align}
\bnnm{u^{\varepsilon}-U_0}_{L^2([0,T]\times\Omega\times\S^2)}\lesssim\varepsilon^{\frac{1}{2}},
\varepsilonnd{align}
and in a similar fashion as Remark~\mathbb{R}ef{Rmk:time-decay}, the corresponding time decay if assuming decay on the boundary data.
For the in-flow case,
the interior solution $U_0$ satisfies the heat equation with Dirichlet boundary determined by boundary layers.
For both diffuse and specular cases, $U_0$ does not depend on boundary layers due to the vanishing leading-order boundary layer.
However, the proof of the specular case still requires introducing a next-order (artificial) ``boundary layer'', in order to enforce the perfect specular boundary condition for the remainder
(because we cannot control the boundary trace of remainder in the energy or kernel estimate); while the diffuse case allows small perturbation that can be controlled by $\big(1-\partialp\big)[\mathbb{R}e]$ in the energy, and thus no boundary layer in needed in the analysis.
\varepsilonnd{remark}
\Subsection{Overview of the Method}
The neutron transport equation has been investigated from various perspectives since the 1960s. For the physical modeling and formal asymptotic expansion, we refer to \cite{Larsen1974=, Larsen1974, Larsen1975, Larsen1977, Larsen.D'Arruda1976, Larsen.Habetler1973, Larsen.Keller1974, Larsen.Zweifel1974, Larsen.Zweifel1976}. For theoretical analysis, we refer to \cite{Bensoussan.Lions.Papanicolaou1979, Bardos.Santos.Sentis1984, Bardos.Golse.Perthame1987, Bardos.Golse.Perthame.Sentis1988}. For recent developments on the diffusive limit and the half-space problem, we refer to \cite{AA003, AA016, AA020, Li.Lu.Sun2015, Li.Lu.Sun2015==, Li.Lu.Sun2017} and the references therein.
In bounded domains, the mismatch of boundary conditions for the asymptotic expansion calls for a kinetic correction -- the so-called boundary layer (Knudsen layer). As far as we are aware of, there are two main approaches:
In flat domains (e.g. half space or $\mathbb{T}^2\times[0,1]$), the boundary layer $Uu_0(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)$ relies on the Milne problem (cf. \cite{Bensoussan.Lions.Papanicolaou1979, Bardos.Santos.Sentis1984})
\begin{align}\label{intro 01}
\Sin\partialhi\,\frac{\partial Uu_0}{\partial\varepsilonta}+Uu_0-\overline{Uu_0}=0.
\varepsilonnd{align}
This formulation is consistent with the intuitive derivation from the transport operator under change of coordinates and rescaling (see \varepsilonqref{expand 6}).
Unfortunately, in curved domains, the non-vanishing curvature and geometric effects are non-negligible due to the grazing set singularity. A surprising counter-example is constructed in \cite{AA003} so that
\begin{align}
\lim_{\varepsilon\mathbb{R}ightarrow0}\lnnm{u^{\varepsilon}-{U_0}-{Uu_0}-{Uuu_0}}\neq0.
\varepsilonnd{align}
As \cite{AA003,AA016} pointed out, in curved convex domains, the boundary layer $\widetilde{U}u_0(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)$ relies on the so-called $\varepsilon$-Milne problem with geometric correction:
\begin{align}\label{intro 02}
\Sin\partialhi\,\frac{\partial\widetilde{U}u_0}{\partial\varepsilonta}-\frac{\varepsilon}{1-\varepsilon\varepsilonta}\cos\partialhi\,\frac{\partial\widetilde{U}u_0}{\partial\partialhi}+\widetilde{U}u_0-\overline{\widetilde{U}u_0}=0,
\varepsilonnd{align}
where the extra correction term $-\frac{\varepsilon}{1-\varepsilon\varepsilonta}\cos\partialhi\frac{\partial\widetilde{U}u_0}{\partial\partialhi}$ helps provide the weighted $W^{1,\infty}$ bound of $\widetilde{U}u_0$ and eventually leads to the desired diffusive limit.
In both flat and convex domains, the proof relies on expanding the boundary layer to higher order which provides sufficient $\varepsilon$ power to close the remainder estimate.
However, in non-convex domains, as \cite{AA006, AA020} reveals, the Milne problem with or without geometric correction does not guarantee $W^{1,\infty}$ regularity of the boundary layer. As a consequence, we cannot even bound the remainder induced by the leading-order boundary layer.
In this paper, we will employ a fresh approach to justify the $L^2$ diffusive expansion in general bounded domains with a cutoff boundary layer and the novel remainder estimate. We intend to show that
\begin{align}\label{intro 03}
\lim_{\varepsilon\mathbb{R}ightarrow0}\tnnm{u^{\varepsilon}-U_0-Uu_0-Uuu_0}=0
\varepsilonnd{align}
for the leading-order interior solution $U_0$, boundary layer $Uu_0$, and initial layer $Uuu_0$.
It is well-known that the classical bounds for the remainder $\mathbb{R}e:=u^{\varepsilon}-U_0-Uu_0-Uuu_0$ read (see \cite{AA005,AA012,AA016})
\begin{align}
\tnm{\mathbb{R}e(t)}+\varepsilon^{-\frac{1}{2}}\tnnms{\mathbb{R}e}{\Gamma_+}+\varepsilon^{-1}\tnnm{\re-\bre}&\lesssim\delta\tnnm{\overline{\re}}+1,\\
\tnnm{\overline{\re}}&\lesssim \varepsilon^{-1}\tnnm{\re-\bre}+\tnnms{\mathbb{R}e}{\Gamma_+}+\varepsilon^{\frac{1}{2}}\tnm{\mathbb{R}e(t)}+1,\label{intro 05}
\varepsilonnd{align}
which yield
\begin{align}\label{intro 04}
\tnm{\mathbb{R}e(t)}+\varepsilon^{-\frac{1}{2}}\tnnms{\mathbb{R}e}{\Gamma_+}+\varepsilon^{-1}\tnnm{\re-\bre}+\tnnm{\overline{\re}}\lesssim1.
\varepsilonnd{align}
Clearly, without expanding to higher-order boundary layers, this is insufficient for the validity of \varepsilonqref{intro 03}.
The bottleneck of \varepsilonqref{intro 04} lies in the $\overline{\re}$ bound \varepsilonqref{intro 05}.
Initiated from \cite{AA020} for the steady problem with the in-flow boundary, we design several delicate test functions in the kernel estimate to obtain
\begin{align}\label{intro 06}
\tnnm{\overline{\re}}\lesssim \tnnm{\re-\bre}+\tnnms{\mathbb{R}e}{\Gamma_+}+\varepsilon^{\frac{1}{2}}\tnm{\mathbb{R}e(t)}+\varepsilon^{\frac{1}{2}},
\varepsilonnd{align}
which will lead to the desired remainder estimate
\begin{align}
\tnm{\mathbb{R}e(t)}+\varepsilon^{-\frac{1}{2}}\tnnms{\mathbb{R}e}{\Gamma_+}+\varepsilon^{-1}\tnnm{\re-\bre}+\varepsilon^{-\frac{1}{2}}\tnnm{\overline{\re}}\lesssim1.
\varepsilonnd{align}
The key to achieve \varepsilonqref{intro 06} is a tricky combination of three conservation laws. We introduce the auxiliary function $\xi(t,x)$ satisfying $-\Delta_x\xi=\overline{\re}$ and $\xi|_{\partial\Omega}=0$. For the remainder equation
\begin{align}
\varepsilon\partialartial_t\mathbb{R}e+w\cdot\nabla_x\mathbb{R}e+\varepsilon^{-1}\big(\re-\bre\big)=\Ss,
\varepsilonnd{align}
testing against $\varepsilon^{-1}\xi$ yields
\begin{align}\label{kk 01}
\deltabr{\partialartial_t\overline{\re},\xi}-\varepsilon^{-1}\deltabbr{\re-\bre,w\cdot\nabla_x\xi}=\varepsilon^{-1}\deltabr{\Ss,\xi},
\varepsilonnd{align}
the choice of test function $w\cdot\nabla_x\xi$ yields
\begin{align}\label{kk 02}
\varepsilon\deltabbr{\partialartial_t\mathbb{R}e,w\cdot\nabla_x\xi}+\bbr{\mathbb{R}e,w\cdot\nabla_x\xi}_{\Gamma_+} - \bbr{\mathcal{G}, w\cdot\nabla_x\xi}_{\Gamma_-} \\
-\deltabbr{\mathbb{R}e,w\cdot\nabla_x\big(w\cdot\nabla_x\xi\big)}+\varepsilon^{-1}\deltabbr{\re-\bre,w\cdot\nabla_x\xi}
&=\deltabbr{\Ss,w\cdot\nabla_x\xi},\nonumber
\varepsilonnd{align}
while the test function $\varepsilon\partialartial_t\xi$ leads to
\begin{align}\label{kk 03}
\varepsilon^2\deltabbr{\partialartial_t\overline{\re},\partialartial_t\xi}-\varepsilon\deltabbr{\re-\bre,w\cdot\nabla_x\partialartial_t\xi}=\varepsilon\deltabbr{\Ss,\partialartial_t\xi}.
\varepsilonnd{align}
On the one hand, the linear combination of \varepsilonqref{kk 01} and \varepsilonqref{kk 02} crucially cancels $\varepsilon^{-1}\deltabbr{\re-\bre,w\cdot\nabla_x\xi}$ so that it kills the worst contribution of $\varepsilon^{-1}\tnnm{\re-\bre}$, and also provides the control of $-\deltabbr{\overline{\re},w\cdot\nabla_x\big(w\cdot\nabla_x\xi\big)}\Simeq\tnnm{\overline{\re}}^2$.
On the other hand, the estimate of $\varepsilon\deltabbr{\partialartial_t\mathbb{R}e,w\cdot\nabla_x\xi}\Sim \varepsilon\deltabbr{\mathbb{R}e,w\cdot\nabla_x\partialartial_t\xi}$ (up to some good terms) calls for the control of $\partialartial_t\nabla_x\xi$, which is in turn provided by \varepsilonqref{kk 03} as $\varepsilon^2\deltabbr{\partialartial_t\overline{\re},\partialartial_t\xi}=\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2$.
Combined with the key favorable sign of $\deltabbr{\partialartial_t\overline{\re},\xi}\Sim\btnm{\nabla_x\xi(t)}^2$ and a careful analysis of the source terms $\varepsilon^{-1}\deltabr{\Ss,\xi}$, $\deltabbr{\Ss,w\cdot\nabla_x\xi}$, and $\varepsilon\deltabbr{\Ss,\partialartial_t\xi}$ using Hardy's inequality and delicately chosen norms, we are able to create an extra gain of $\varepsilon^{\frac{1}{2}}$ for the kernel bound. We then conclude the remainder estimate without any further expansion of higher-order boundary layers.
The remaining of this paper is structured as follows: Section~\mathbb{R}ef{Sec:asymptotic} focuses on the asymptotic expansion for the interior solution, boundary and initial layers. Section~\mathbb{R}ef{Sec:remainder-eq} presents the setup of the remainder equation, and Section~\mathbb{R}ef{Sec:remainder-est} elaborates the estimates of $\re-\bre$ and $\overline{\re}$.
Finally, the main theorem for the inflow case is proved in Section~\mathbb{R}ef{Sec:diffusive-limit}, while the diffuse and specular boundary problems are discussed in Section~\mathbb{R}ef{Sec:diffuse-BC} and Section~\mathbb{R}ef{Sec:specular-BC}, respectively.
\Section{Asymptotic Analysis}\label{Sec:asymptotic}
We seek a solution to \varepsilonqref{transport} in the form of
\begin{align}\label{expand}
u^{\varepsilon}=U+Uuu+Uu+\mathbb{R}e=\left(U_0+\varepsilonU_1+\varepsilon^2U_2\mathbb{R}ight)+\left(Uuu_0+\varepsilonUuu_1\mathbb{R}ight)+Uu_0+\mathbb{R}e,
\varepsilonnd{align}
where the interior solution
\begin{align}\label{expand 1}
U(t,x,w):= U_0(t,x,w)+\varepsilonU_1(t,x,w)+\varepsilon^2U_2(t,x,w),
\varepsilonnd{align}
the initial layer
\begin{align}\label{expand 2'}
Uuu(\tau,x,w):= Uuu_0(\tau,x,w)+\varepsilonUuu_1(\tau,x,w),
\varepsilonnd{align}
and the boundary layer
\begin{align}\label{expand 2}
Uu(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi):= Uu_0(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi).
\varepsilonnd{align}
Here, $U_0$, $U_1$, $U_2$, $Uuu_0$, $Uuu_1$, and $Uu_0$ will be constructed in the following subsections, and $\mathbb{R}e(t,x,w)$ is the remainder.
\Subsection{Interior Solution}\label{sec:interior}
Inserting the asymptotic expansion ansatz $u^{\varepsilon}\Sim \Sum_{k=0}^{\infty}U_k\,\varepsilon^k$ into \varepsilonqref{transport}, we get a hierarchy of equations by looking at each order of $\varepsilon$.
Following the analysis in \cite{AA016}, we deduce that
\begin{align}
&U_0=\overline{\u}_0,\qquad
\partialartial_t\overline{\u}_0-\Delta_x\overline{\u}_0=0,\label{expand 4}\\
&U_1=\overline{\u}_1-w\cdot\nabla_xU_{0},\qquad
\partialartial_t\overline{\u}_1-\Delta_x\overline{\u}_1=0,\label{expand 4'}\\
&U_2=\overline{\u}_2-w\cdot\nabla_xU_{1}-\partialartial_tU_0,\qquad
\partialartial_t\overline{\u}_2-\Delta_x\overline{\u}_2=0. \label{expand 4''}
\varepsilonnd{align}
The initial and boundary conditions for $\overline{\u}_0$, $\overline{\u}_1$, and $\overline{\u}_2$ are determined by the initial and boundary layers.
\Subsection{Initial Layer}\label{sec:initial-layer}
Based on the principle of dominant balance,
we find the correct scaling for the time variable is $\tau=\varepsilon^{-2}t$, and so $\partial_t=\varepsilon^{-2}\partial_{\tau}$.
Under the substitution $t\mapsto\tau$, the rescaled equation of \varepsilonqref{transport} for the initial layer reads
\begin{align}\label{initial equation}
\partial_\tau Uuu+\varepsilonw\cdot\nabla_x Uuu+\big(Uuu-\overline{Uuu}\big)=0.
\varepsilonnd{align}
Inserting the expansion $Uuu\Sim \Sum_{k=0}^{\infty}Uuu_k\,\varepsilon^k$ into \varepsilonqref{initial equation} and comparing the order of $\varepsilon$, we find that
\begin{align}
\partial_{\tau}Uuu_0+Uuu_0-\overline{Uuu_0}&=0,\\
\partial_{\tau}Uuu_1+Uuu_1-\overline{Uuu_1}&=-w\cdot\nabla_xUuu_0.
\varepsilonnd{align}
Let us consider the general initial layer problem
\begin{align} \label{Pb:initial-layer}
\left\{
\begin{array}{l}
\deltafrac{\mathrm{d}\Theta}{\mathrm{d}\tau}+\Theta-\overline{\il}=S,\\\mathbb{R}ule{0ex}{1.5em}
\Theta(0,x,w)=\Theta_o(x,w).
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Here, $S(\tau,x,w)$ is the forcing term, and $\Theta_o(x,w)$ is the given initial data.
We are interested in the solution $\Theta(\tau,x,w)$ that satisfies
\begin{align} \label{initial-layer-decay}
\lim_{\tau\mathbb{R}ightarrow\infty}\Theta(\tau,x,w)=\Theta_{\infty}(x)
\varepsilonnd{align}
for some $\Theta_{\infty}(x)$ that does not depend on $w$.
The following proposition guarantees the solvability and regularity of the above initial layer problem:
\begin{proposition}[Initial layer problem] \label{prop:initial-wellposedness}
Let $k\in\mathbb{N}$.
Assume
\begin{align}
\nm{\Theta_o}_{W^{k,\infty}_xL^{\infty}_w} +\nnm{\mathrm{e}^{\alpha\tau}S}_{L^{\infty}_\tau W^{k,\infty}_xL^{\infty}_w} \lesssim 1
\varepsilonnd{align}
for some $\alpha>0$.
Then there exist a unique solution $\Theta(\tau,x,w)\in L^{\infty}_\tau W^{k,\infty}_xL^{\infty}_w$ to \varepsilonqref{Pb:initial-layer} and a $\Theta_{\infty}(x)\in W^{k,\infty}_x$ such that \varepsilonqref{initial-layer-decay} holds in the sense of
\begin{align} \label{initial-layer-decay'}
\nm{\Theta_{\infty}}_{W^{k,\infty}_x}+\nnm{\mathrm{e}^{\beta\tau}(\Theta-\Theta_{\infty})}_{L^{\infty}_\tau W^{k,\infty}_xL^{\infty}_w}\lesssim 1
\varepsilonnd{align}
for any $0<\beta\leq \min\{1,\alpha\}$.
\varepsilonnd{proposition}
\begin{proof}
Based on ODE theory, the initial value problem \varepsilonqref{Pb:initial-layer} admits a unique solution.
We first integrate the equation in $w$ to find $\partial_\tau\overline{\Theta}=\overline{S}$
and so $\overline{\il} = \overline{\Theta}_o + \int_0^\tau \overline{S}$.
Then subtracting this from \varepsilonqref{Pb:initial-layer}, we get
\begin{align} \label{Pb:initial-layer'}
\left\{
\begin{array}{l}
\deltafrac{\mathrm{d}}{\mathrm{d}\tau}\big(\Theta-\overline{\Theta}\big)+\big(\Theta-\overline{\Theta}\big)=S-\overline{S},\\\mathbb{R}ule{0ex}{1.5em}
\big(\Theta-\overline{\Theta}\big)\big|_{\tau=0}=\Theta_o-\overline{\Theta}_o,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
which can be solved using the integrating factor $\mathrm{e}^\tau$. We have
\begin{align}
\Theta-\overline{\Theta} = \mathrm{e}^{-\tau}\big(\Theta_o-\overline{\Theta}_o\big)
+ \int_0^\tau \mathrm{e}^{\tau'-\tau}\big(S-\overline{S}\big)\,\mathrm{d}\tau',
\varepsilonnd{align}
and thus
\begin{align}
\Theta = \overline{\Theta}_o + \mathrm{e}^{-\tau}\big(\Theta_o-\overline{\Theta}_o\big)
+ \int_0^\tau \Big\{ \overline{S} + \mathrm{e}^{\tau'-\tau}\big(S-\overline{S}\big)\Big\}\,\mathrm{d}\tau'.
\varepsilonnd{align}
Let
\begin{align} \label{theta-infinity}
\Theta_{\infty}:=\overline{\Theta}_{\infty}=\lim_{\tau\mathbb{R}ightarrow\infty}\overline{\Theta}=\overline{\Theta}_o + \int_0^\infty \!\overline{S}\,\mathrm{d}\tau.
\varepsilonnd{align}
Then it is straightforward to verify \varepsilonqref{initial-layer-decay'}.
\varepsilonnd{proof}
Let $\Theta_0$ be the solution to \varepsilonqref{Pb:initial-layer} with $S=0$ and the initial data $\Theta_{0,o}=u_o$ satisfying \varepsilonqref{assumption}. Then by \varepsilonqref{theta-infinity} we have $\Theta_{0,\infty}=\overline{u}_o$.
Define the leading-order initial layer to be
\begin{align}\label{initial-layer-0}
Uuu_0(\tau,x,w):=\Theta_0(\tau,x,w)-\Theta_{0,\infty}(x),
\varepsilonnd{align}
so that it satisfies
\begin{align}\label{Pb:initial-layer-0}
\left\{
\begin{array}{l}
\partial_{\tau}Uuu_0+Uuu_0-\overline{Uuu_0}=0,\\\mathbb{R}ule{0ex}{1.5em}
Uuu_0(0,x,w)=u_o(x,w)-\overline{u}_o(x),\\\mathbb{R}ule{0ex}{1.5em}
\deltaisplaystyle\lim_{\tau\mathbb{R}ightarrow\infty}Uuu_0(\tau,x,w)=0,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
as well as
\begin{align}
\nnm{\mathrm{e}^{\tau}Uuu_0}_{L^{\infty}_\tau W^{3,\infty}_xL^{\infty}_w}\lesssim 1.
\varepsilonnd{align}
Next, we define the second-order initial layer to be
\begin{align}\label{initial-layer-1}
Uuu_1(\tau,x,w):=\Theta_1(\tau,x,w)-\Theta_{1,\infty}(x),
\varepsilonnd{align}
where $\Theta_1$ is determined by solving \varepsilonqref{Pb:initial-layer} with $S=-w\cdot\nabla_xUuu_0$ and $\Theta_{1,o}=w\cdot\nabla_xU_0(0,x,w)$,
and so $\Theta_{1,\infty}(x)=\lim_{\tau\mathbb{R}ightarrow\infty}\overline{\Theta}_1(\tau,x)$ whose explicit formula is given by \varepsilonqref{theta-infinity}.
Then $Uuu_1$ satisfies
\begin{align}\label{Pb:initial-layer-1}
\left\{
\begin{array}{l}
\partial_{\tau}Uuu_1+Uuu_1-\overline{Uuu_1}=-w\cdot\nabla_xUuu_0,\\\mathbb{R}ule{0ex}{1.5em}
Uuu_1(0,x,w)=w\cdot\nabla_xU_0(0,x,w)-\Theta_{1,\infty}(x),\\\mathbb{R}ule{0ex}{1.5em}
\deltaisplaystyle\lim_{\tau\mathbb{R}ightarrow\infty}Uuu_1(\tau,x,w)=0,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
as well as
\begin{align}\label{theta_1,infty}
\nm{\Theta_{1,\infty}}_{W^{2,\infty}_x}+
\nnm{\mathrm{e}^{\tau}Uuu_1}_{L^{\infty}_\tau W^{2,\infty}_xL^{\infty}_w}\lesssim 1.
\varepsilonnd{align}
\Subsection{Boundary Layer}\label{sec:boundary-layer}
We first perform change of coordinates depending on the geometry of the boundary.
For the smooth surface $\partial\Omega$, there exists an orthogonal curvilinear coordinates system $(\iota_1,\iota_2)$ such that the coordinate lines coincide with the principal directions at any $x_0\in\partial\Omega$. Assume that $\partial\Omega$ is parameterized by $\mathbf{r}=\mathbf{r}(\iota_1,\iota_2)$, and let $\varsigma_i:=\partiall_i^{-1}\partial_{\iota_i}\mathbf{r}$ ($i=1,2$) be the two orthogonal unit tangential vectors, where $\partiall_i:=\abs{\partial_{\iota_i}\mathbf{r}}$.
Then $\{\varsigma_1,\varsigma_2,n\}$ form an orthonormal basis at each point in a neighborhood of the boundary.
Hence, we can define a new coordinate system $(\mu, \iota_1,\iota_2)$ near the boundary, where $\mu$ denotes the normal distance to the boundary surface $\partial\Omega$ so that $x=\mathbf{r}-\mun$.
We then define the rescaled normal spatial variable $\varepsilonta=\varepsilon^{-1}\mu$ again due to the principle of dominant balance, and so $\partial_\mu=\varepsilon^{-1}\partial_\varepsilonta$.
Define also the spherical substitution $w\mapsto(\partialhi,\partialsi)$ for the velocity $w\in\S^2$ via
\begin{align}\label{velocity}
-w\cdotn=\Sin\partialhi,\quad
w\cdot\varsigma_1=\cos\partialhi\Sin\partialsi,\quad
w\cdot\varsigma_2=\cos\partialhi\cos\partialsi,
\qquad \text{for }\: \partialhi\in[-\tfrac{\partiali}{2},\tfrac{\partiali}{2}],\;\,
\partialsi\in[0,2\partiali).
\varepsilonnd{align}
Under the change of variables $(x,w)\mapsto(\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)$ in the phase space, the transport operator becomes
\begin{align}\label{expand 6}
\varepsilon\partialartial_t+w\cdot\nabla_x=&\;\varepsilon\partialartial_t+\varepsilon^{-1}\Sin\partialhi\,\frac{\partial}{\partial\varepsilonta}
+\frac{\mathbb{R}c_1\cos\partialhi\Sin\partialsi}{\partiall_1(\mathbb{R}c_1-\varepsilon\varepsilonta)}\frac{\partial}{\partial\iota_1}+\frac{\mathbb{R}c_2\cos\partialhi\cos\partialsi}{\partiall_2(\mathbb{R}c_2-\varepsilon\varepsilonta)}\frac{\partial}{\partial\iota_2} \\
&\; -\bigg(\frac{\Sin^2\partialsi}{\mathbb{R}c_1-\varepsilon\varepsilonta}+\frac{\cos^2\partialsi}{\mathbb{R}c_2-\varepsilon\varepsilonta}\bigg)\cos\partialhi\,\frac{\partial}{\partial\partialhi} \nonumber\\
&\;+\frac{\Sin\partialsi}{\mathbb{R}c_1-\varepsilon\varepsilonta}\bigg\{\frac{\mathbb{R}c_1\cos\partialhi}{\partiall_1\partiall_2}\Big[\varsigma_1\cdot\Big(\varsigma_2\times\big(\partial_{\iota_1\iota_2}\mathbf{r}\times\varsigma_2\big)\Big)\Big]
-\Sin\partialhi\cos\partialsi\bigg\}\frac{\partial}{\partial\partialsi}\nonumber\\
&\;
-\frac{\cos\partialsi}{\mathbb{R}c_2-\varepsilon\varepsilonta}\bigg\{\frac{\mathbb{R}c_2\cos\partialhi}{\partiall_1\partiall_2}\Big[\varsigma_2\cdot\Big(\varsigma_1\times\big(\partial_{\iota_1\iota_2}\mathbf{r}\times\varsigma_1\big)\Big)\Big]
-\Sin\partialhi\Sin\partialsi\bigg\}\frac{\partial}{\partial\partialsi},\nonumber
\varepsilonnd{align}
where $R_i$ ($i=1,2$) is the radius of curvature.
Inserting the expansion $Uu\Sim \Sum_{k=0}^{\infty}Uu_k\,\varepsilon^k$ into the rescaled equation and comparing the order of $\varepsilon$ (see \varepsilonqref{expand 6}), we expect that the standard leading-order boundary layer $Uu_0(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)$ should satisfy
\begin{align}
\Sin\partialhi\,\frac{\partialUu_0}{\partial\varepsilonta}+Uu_0-\overline{Uu_0}=0.
\varepsilonnd{align}
Consider the general boundary layer problem (Milne problem)
\begin{align} \label{Pb:boundary-layer}
\Sin\partialhi\,\frac{\partial\Phi}{\partial\varepsilonta}+\Phi-\overline{\bl}=S,\qquad
\overline{\bl}(t;\varepsilonta,\iota_1,\iota_2):=\frac{1}{4\partiali}\int_{-\partiali}^{\partiali}\int_{-\frac{\partiali}{2}}^{\frac{\partiali}{2}}\Phi(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)\cos\partialhi\,\mathrm{d}{\partialhi}\mathrm{d}{\partialsi},
\varepsilonnd{align}
with boundary condition
\begin{align} \label{Pb:boundary-layer-BC}
\Phi(t;0,\iota_1,\iota_2;\partialhi,\partialsi)=\mathbb{R}ho(t;\iota_1,\iota_2;\partialhi,\partialsi)\ \ \text{ for}\ \ \Sin\partialhi>0.
\varepsilonnd{align}
We are interested in the solution $\Phi(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)$ that satisfies
\begin{align} \label{Pb:boundary-layer-decay}
\lim_{\varepsilonta\mathbb{R}ightarrow\infty}\Phi(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)=\Phi_{\infty}(t;\iota_1,\iota_2)
\varepsilonnd{align}
for some $\Phi_{\infty}(t;\iota_1,\iota_2)$ that does not depend on the velocity variables $(\partialhi,\partialsi)$.
According to \cite[Section 4]{AA009}, we have the well-posedness and regularity of the above Milne problem:
\begin{proposition}[Milne problem] \label{prop:boundary-wellposedness}
Assume
\begin{align}
\nm{\mathbb{R}ho}_{W^{2,\infty}_tW^{3,\infty}_{\iota_1,\iota_2}W^{1,\infty}_{w}} +\nnm{\mathrm{e}^{\alpha\varepsilonta}S}_{W^{2,\infty}_tW^{3,\infty}_xW^{1,\infty}_w} \lesssim 1
\varepsilonnd{align}
for some $\alpha>0$.
Then there exist a unique solution $\Phi\in W^{2,\infty}_tW^{3,\infty}_{\iota_1,\iota_2}W^{1,\infty}_\partialsi L^{\infty}_{\varepsilonta,\partialhi}$ to \varepsilonqref{Pb:boundary-layer}\varepsilonqref{Pb:boundary-layer-BC} and a $\Phi_{\infty}\in W^{2,\infty}_tW^{3,\infty}_{\iota_1,\iota_2}$ such that \varepsilonqref{Pb:boundary-layer-decay} holds in the sense of
\begin{align} \label{boundary-layer-decay}
\nm{\Phi_{\infty}}_{W^{2,\infty}_tW^{3,\infty}_{\iota_1,\iota_2}}
+\nnm{\mathrm{e}^{\beta\varepsilonta}\big(\Phi-\Phi_{\infty}\big)}_{W^{2,\infty}_tW^{3,\infty}_{\iota_1,\iota_2}W^{1,\infty}_\partialsi L^{\infty}_{\varepsilonta,\partialhi}}&\lesssim 1,\\
\nnm{\mathrm{e}^{\beta\varepsilonta}\Sin\partialhi\,\partial_\varepsilonta\big(\Phi-\Phi_{\infty}\big)}_{L^\infty}
+ \nnm{\mathrm{e}^{\beta\varepsilonta}\Sin\partialhi\,\partial_\partialhi\big(\Phi-\Phi_{\infty}\big)}_{L^\infty}&\lesssim 1
\varepsilonnd{align}
for any $0<\beta< \min\{1,\alpha\}$.
\varepsilonnd{proposition}
Let $\Phi_0$ be the solution to \varepsilonqref{Pb:boundary-layer} with $S=0$ and the boundary data $\mathbb{R}ho(t;\iota_1,\iota_2;\partialhi,\partialsi)=g(t,x_0,w)$ satisfying \varepsilonqref{assumption}, and so there exists $\Phi_{0,\infty}(t;\iota_1,\iota_2)=\lim_{\varepsilonta\mathbb{R}ightarrow\infty}\Phi_0(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)$.
Then $\Phil_0:=\Phi_0-\Phi_{0,\infty}$ satisfies
\begin{align}
\left\{
\begin{array}{l}
\Sin\partialhi\,\deltafrac{\partial\Phil_0}{\partial\varepsilonta}+\Phil_0-\overline{\bl}l_0=0,\\\mathbb{R}ule{0ex}{1.2em}
\Phil_0\big|_{\{\varepsilonta=0,\,\Sin\!\partialhi>0\}}=g-\Phi_{0,\infty},\\\mathbb{R}ule{0ex}{1.5em}
\deltaisplaystyle\lim_{\varepsilonta\mathbb{R}ightarrow\infty}\Phil_0=0,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
as well as
\begin{align}
\nm{\Phi_{0,\infty}}_{W^{2,\infty}_tW^{3,\infty}_{\iota_1,\iota_2}}
+\nnm{\mathrm{e}^{\beta\varepsilonta}\Phil_0}_{W^{2,\infty}_tW^{3,\infty}_{\iota_1,\iota_2}W^{1,\infty}_\partialsi L^{\infty}_{\varepsilonta,\partialhi}}&\lesssim 1,\label{Phi_0,infty}\\
\nnm{\mathrm{e}^{\beta\varepsilonta}\Sin\partialhi\,\partial_\varepsilonta\Phil_0}_{L^\infty}
+ \nnm{\mathrm{e}^{\beta\varepsilonta}\Sin\partialhi\,\partial_\partialhi\Phil_0}_{L^\infty}&\lesssim 1
\varepsilonnd{align}
for any $0<\beta< 1$.
Additionally, let $\widetilde{\chi}i(r)\in C^{\infty}(\mathbb{R})$
and $\widetilde{\chi}(r)=1-\widetilde{\chi}i(r)$ be smooth cutoff functions satisfying $\widetilde{\chi}i(r)=1$ for $\abs{r}\leq1$ and $\widetilde{\chi}i(r)=0$ for $\abs{r}\mathcal{G}eq2$.
We define the cutoff boundary layer
\begin{align}\label{boundary layer}
Uu_0(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi):=\widetilde{\chi}i(\varepsilon^{\frac{1}{2}}\varepsilonta)\widetilde{\chi}(\varepsilon^{-1}\partialhi)\Phil_0(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi).
\varepsilonnd{align}
Here, the space cutoff $\widetilde{\chi}i(\varepsilon^{\frac{1}{2}}\varepsilonta)$ restricts the boundary layer effect within a thin layer near the boundary and helps avoid self-interactions, while the velocity cutoff $\widetilde{\chi}(\varepsilon^{-1}\partialhi)$ truncates the grazing singularity.
With these cutoffs, the modified boundary layer $Uu_0$ satisfies
\begin{align}\label{Pb:boundary-layer'}
\left\{
\begin{array}{l}
\Sin\partialhi\,\deltafrac{\partialUu_0}{\partial\varepsilonta}+Uu_0-\overline{Uu_0}=
\Sin\partialhi\cdot\varepsilon^{\frac{1}{2}}\widetilde{\chi}i'(\varepsilon^{\frac{1}{2}}\varepsilonta)\widetilde{\chi}(\varepsilon^{-1}\partialhi)\Phil_0
+ \widetilde{\chi}i(\varepsilon^{\frac{1}{2}}\varepsilonta)\Big[\overline{\widetilde{\chi}i(\varepsilon^{-1}\partialhi)\Phil_0} -\widetilde{\chi}i(\varepsilon^{-1}\partialhi)\overline{\Phil}_0 \Big], \\\mathbb{R}ule{0ex}{1.5em}
Uu_0(t;0,\iota_1,\iota_2;\partialhi,\partialsi)=\widetilde{\chi}(\varepsilon^{-1}\partialhi)\Big[g(t;\iota_1,\iota_2;\partialhi,\partialsi)-\Phi_{0,\infty}(t;\iota_1,\iota_2)\Big] \ \ \text{ for}\ \ \Sin\partialhi>0,\\\mathbb{R}ule{0ex}{1.5em}
\deltaisplaystyle\lim_{\varepsilonta\mathbb{R}ightarrow\infty}Uu_0(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi)=0.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Moreover, it holds that
\begin{align} \label{boundary-layer-decay''}
\nnm{\mathrm{e}^{\beta\varepsilonta\,}Uu_0}_{W^{2,\infty}_tW^{3,\infty}_{\iota_1,\iota_2}W^{1,\infty}_\partialsi L^{\infty}_{\varepsilonta,\partialhi}}\lesssim 1
\varepsilonnd{align}
for any $0<\beta< 1$.
\Subsection{Matching Procedure}\label{Subsec:matching}
Now we construct the initial layer, boundary layer, and interior solution for each level of the asymptotic expansion in \varepsilonqref{expand} via a matching procedure.
\partialaragraph{Underline{\textit{Construction of $Uuu_0$, $Uu_0$, and $U_0$}}}
For the leading-order expansion,
we want to enforce the matching initial condition
\begin{align}
\big(U_0+Uuu_0\big)\big|_{t=0} = u_o ,
\varepsilonnd{align}
and the matching boundary condition
\begin{align}
\big(U_0+Uu_0\big)\big|_{\Gamma_-} = g +O(\varepsilon^{0+}) .
\varepsilonnd{align}
With the initial layer $Uuu_0$ given in \varepsilonqref{initial-layer-0} that satisfies \varepsilonqref{Pb:initial-layer-0}
and the boundary layer $Uu_0$ given in \varepsilonqref{boundary layer} that satisfies \varepsilonqref{Pb:boundary-layer'},
we require the interior solution $U_0$ to satisfy the following initial-boundary value problem (combining \varepsilonqref{expand 4}):
\begin{align}\label{U_0-pb}
\left\{
\begin{array}{l}
U_0=\overline{\u}_0,\quad
\partialartial_t\overline{\u}_0-\Delta_x\overline{\u}_0=0,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_0(0,x)=\overline{u}_o(x),\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_0(t,x_0)=\Phi_{0,\infty}(t;\iota_1,\iota_2)\ \ \text{ for}\ \
x_0\in\partial\Omega,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
which uniquely determines $U_0(t,x,w)$ due to classical theory for the heat equation.
Also, with \varepsilonqref{assumption}, \varepsilonqref{Phi_0,infty}, and by the standard parabolic estimate (cf. \cite{Krylov2008}), we have
for any $2\leqrN<\infty$
\begin{align}\label{expand 10}
\nnm{U_0}_{W^{2,\infty}_tW^{3,rN}_x}+\nm{U_0}_{W^{2,\infty}_tW^{3,rN}_{\iota_1,\iota_2}}\lesssim 1.
\varepsilonnd{align}
Then the joint boundary value is
\begin{align}
\big(U_0+Uu_0\big)\big|_{\Gamma_-} = g - \widetilde{\chi}i(\varepsilon^{-1}\partialhi)\big(g-\Phi_{0,\infty}\big) .
\varepsilonnd{align}
Here the additional term $\widetilde{\chi}i(\varepsilon^{-1}\partialhi)\big(g-\Phi_{0,\infty}\big)\Sim O(\varepsilon^{\frac{1}{p}})$ in $L^p_w$-norm due to the velocity cutoff.
\partialaragraph{Underline{\textit{Construction of $Uuu_1$ and $U_1$}}}
For the next-order expansion,
we want to enforce the matching initial condition
\begin{align}
\big(U_1+Uuu_1\big)\big|_{t=0} = 0 .
\varepsilonnd{align}
With the initial layer $Uuu_1$ given in \varepsilonqref{initial-layer-1} that satisfies \varepsilonqref{Pb:initial-layer-1},
we require the interior solution $U_1$ to satisfy (combining \varepsilonqref{expand 4'}):
\begin{align}\label{U_1-pb}
\left\{
\begin{array}{l}
U_1=\overline{\u}_1-w\cdot\nabla_xU_{0},\quad
\partialartial_t\overline{\u}_1-\Delta_x\overline{\u}_1=0,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_1(0,x)=\Theta_{1,\infty}(x),\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_1(t,x_0)=0 \ \ \text{ for}\ \
x_0\in\partial\Omega,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
which uniquely determines $U_1(t,x,w)$.
Also, with \varepsilonqref{expand 10} and \varepsilonqref{theta_1,infty}, we have
for any $2\leqrN<\infty$
\begin{align}\label{expand 11}
\nnm{U_1}_{W^{2,\infty}_tW^{2,rN}_xL^\infty_w}+\nm{U_1}_{W^{2,\infty}_tW^{2,rN}_{\iota_1,\iota_2}L^{\infty}_w}\lesssim 1.
\varepsilonnd{align}
\partialaragraph{Underline{\textit{Construction of $U_2$}}}
Lastly, we solve for a higher-order expansion $U_2(t,x,w)$ from (see \varepsilonqref{expand 4''})
\begin{align}\label{U_2-pb}
\left\{
\begin{array}{l}
U_2=\overline{\u}_2-w\cdot\nabla_xU_{1}-\partialartial_tU_0,\quad
\partialartial_t\overline{\u}_2-\Delta_x\overline{\u}_2=0,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_2(0,x)=0,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_2(t,x_0)=0 \ \ \text{ for}\ \
x_0\in\partial\Omega.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
In view of \varepsilonqref{expand 10} and \varepsilonqref{expand 11}, we have
for any $2\leqrN<\infty$
\begin{align}\label{expand 11'}
\nnm{U_2}_{W^{1,\infty}_tW^{1,rN}_xL^\infty_w}+\nm{U_2}_{W^{1,\infty}_tW^{1,rN}_{\iota_1,\iota_2}L^{\infty}_w}\lesssim 1.
\varepsilonnd{align}
To summarize, we have obtained the well-posedness and regularity estimates of the interior solution, initial layer, and boundary layer:
\begin{proposition}\label{prop:wellposedness}
Assume \varepsilonqref{assumption} holds for the initial data $u_o$ and boundary data $g$.
Let $U_0, U_1, U_2$ be constructed via \varepsilonqref{U_0-pb}\varepsilonqref{U_1-pb}\varepsilonqref{U_2-pb}, $Uuu_0, Uuu_1$ in Subsection~\mathbb{R}ef{sec:initial-layer}, and $Uu_0$ in Subsection~\mathbb{R}ef{sec:boundary-layer}.
Then we have for any $2\leqrN<\infty$
\begin{align}
&\nnm{U_0}_{W^{2,\infty}_tW^{3,rN}_x}+\nm{U_0}_{W^{2,\infty}_tW^{3,rN}_{\iota_1,\iota_2}}\lesssim 1,\label{U_0-est}\\
&\nnm{U_1}_{W^{2,\infty}_tW^{2,rN}_xL^\infty_w}+\nm{U_1}_{W^{2,\infty}_tW^{2,rN}_{\iota_1,\iota_2}L^{\infty}_w}\lesssim 1,\label{U_1-est}\\
&\nnm{U_2}_{W^{1,\infty}_tW^{1,rN}_xL^\infty_w}+\nm{U_2}_{W^{1,\infty}_tW^{1,rN}_{\iota_1,\iota_2}L^{\infty}_w}\lesssim 1,\label{U_2-est}
\varepsilonnd{align}
and
\begin{align}
&\nnm{\mathrm{e}^{\tau}Uuu_0}_{L^{\infty}_\tau W^{3,\infty}_xL^{\infty}_w}\lesssim 1,\label{U^I_0-est}\\
&\nnm{\mathrm{e}^{\tau}Uuu_1}_{L^{\infty}_\tau W^{2,\infty}_xL^{\infty}_w}\lesssim 1,\label{U^I_1-est}\\
&\nnm{\mathrm{e}^{\beta\varepsilonta\,}Uu_0}_{W^{2,\infty}_tW^{3,\infty}_{\iota_1,\iota_2}W^{1,\infty}_\partialsi L^{\infty}_{\varepsilonta,\partialhi}}\lesssim 1 \label{U^B_0-est}
\varepsilonnd{align}
for any $0<\beta< 1$.
\varepsilonnd{proposition}
\Section{Remainder Equation}\label{Sec:remainder-eq}
Denote the approximate solution
\begin{align}
Ua:=\left(U_0+\varepsilonU_1+\varepsilon^2U_2\mathbb{R}ight)+\left(Uuu_0+\varepsilonUuu_1\mathbb{R}ight)+Uu_0,
\varepsilonnd{align}
and so the remainder
\begin{align}\label{expand-R}
\mathbb{R}e:= u^\varepsilon - Ua.
\varepsilonnd{align}
Inserting \varepsilonqref{expand-R} into \varepsilonqref{transport}, we have
\begin{align}
&\varepsilon\partialartial_t\big(Ua+\mathbb{R}e\big)+w\cdot\nabla_x\big(Ua+\mathbb{R}e\big)+\varepsilon^{-1}\big(Ua+\mathbb{R}e\big)-\varepsilon^{-1}\big(\overline{\u}a+\overline{\re}\big)=0,\\
&\big(Ua+\mathbb{R}e\big)\big|_{t=0}=u_o,\qquad \big(Ua+\mathbb{R}e\big)\big|_{\Gamma_-}=g,\nonumber
\varepsilonnd{align}
which yields
\begin{align}
&\varepsilon\partialartial_t\mathbb{R}e+w\cdot\nabla_x\mathbb{R}e+\varepsilon^{-1}\big(\mathbb{R}e-\overline{\re}\big)
=-\varepsilon\partialartial_tUa -w\cdot\nabla_xUa-\varepsilon^{-1}\big(Ua-\overline{\u}a\big),\\
&\mathbb{R}e|_{t=0}=u_o-Ua|_{t=0},\qquad
\mathbb{R}e|_{\Gamma_-}=g-Ua|_{\Gamma_-}.\nonumber
\varepsilonnd{align}
Consider the initial-boundary value problem for the remainder $\mathbb{R}e(t,x,w)$:
\begin{align}\label{remainder}
\left\{
\begin{array}{l}\deltaisplaystyle
\varepsilon\partialartial_t\mathbb{R}e+w\cdot\nabla_x \mathbb{R}e+\varepsilon^{-1}\big(\re-\bre\big)=\Ss\ \ \text{ in}\ \ \mathbb{R}p\times\Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
\mathbb{R}e(0,x,w)=\mathcal{I}(x,w)\ \ \text{ in}\ \ \Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
\mathbb{R}e(t,x_0,w)=\mathcal{G}(t,x_0,w)\ \ \text{ for}\
\ x_0\in\partial\Omega\ \ \text{and}\ \ w\cdotn(x_0)<0,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
where
\begin{align}
\overline{\re}(t,x)=\frac{1}{4\partiali}\int_{\S^2}\mathbb{R}e(t,x,w)\,\mathrm{d}{w}.
\varepsilonnd{align}
In particular, we know the initial data is given by
\begin{align}\label{d:z}
\mathcal{I}:=\varepsilon^2w\cdot\nabla_xU_1\big|_{t=0} + \varepsilon^2\partialartial_tU_0\big|_{t=0},
\varepsilonnd{align}
and the boundary data is given by
\begin{align}\label{d:h}
\mathcal{G}:=\widetilde{\chi}i(\varepsilon^{-1}\partialhi)\big(g-\Phi_{0,\infty}\big) -\varepsilonUuu_1\big|_{\Gamma_-}
+\varepsilon\big(w\cdot\nabla_xU_0\big)\big|_{\Gamma_-} +\varepsilon^2\big(w\cdot\nabla_xU_1\big)\big|_{\Gamma_-}
+ \varepsilon^2\partialartial_t\Phi_{0,\infty}.
\varepsilonnd{align}
Note that with the compatibility condition $u_o|_{\mathcal{G}amma}=g|_{t=0}=C(x_0)$ (see \varepsilonqref{compatibility}), we actually have $Uu_0\big|_{t=0}=0$ from the Milne problem and $Uuu_0\big|_{\Gamma_-}=0$ from the initial layer problem.
The source term can be split as
\begin{align}\label{d:ss}
\Ss&:= -\varepsilon\partialartial_tUa -w\cdot\nabla_xUa-\varepsilon^{-1}\big(Ua-\overline{\u}a\big)\\
&\;=:\Sss+\Ssd+\Ssa+\Ssb+\Ssc \nonumber
\varepsilonnd{align}
with the expressions
\begin{align}
\Sss:=&\,-\varepsilon^2w\cdot\nabla_xU_2-\varepsilon^2\partialartial_tU_1-\varepsilon^3\partialartial_tU_2,\label{d:s0}\\
\Ssd:=&\,-\varepsilonw\cdot\nabla_xUuu_1,\label{d:s4}\\
\Ssa:=&\:\bigg(\deltafrac{\Sin^2\partialsi}{\mathbb{R}c_1-\varepsilon\varepsilonta}+\deltafrac{\cos^2\partialsi}{\mathbb{R}c_2-\varepsilon\varepsilonta}\bigg)\cos\partialhi\,\deltafrac{\partialUu_0}{\partial\partialhi},\label{d:s1}\\
\Ssb:=&\;-\varepsilon\partialartial_tUu_0 -\frac{\mathbb{R}c_1\cos\partialhi\Sin\partialsi}{\partiall_1(\mathbb{R}c_1-\varepsilon\varepsilonta)}\frac{\partialUu_0}{\partial\iota_1} -\frac{\mathbb{R}c_2\cos\partialhi\cos\partialsi}{\partiall_2(\mathbb{R}c_2-\varepsilon\varepsilonta)}\frac{\partialUu_0}{\partial\iota_2}\label{d:s2}\\
&\;-\frac{\Sin\partialsi}{\mathbb{R}c_1-\varepsilon\varepsilonta}\bigg\{\frac{\mathbb{R}c_1\cos\partialhi}{\partiall_1\partiall_2}\Big[\varsigma_1\cdot\Big(\varsigma_2\times\big(\partial_{\iota_1\iota_2}\mathbf{r}\times\varsigma_2\big)\Big)\Big]
-\Sin\partialhi\cos\partialsi\bigg\}\frac{\partialUu_0}{\partial\partialsi}\nonumber\\
&\;
+\frac{\cos\partialsi}{\mathbb{R}c_2-\varepsilon\varepsilonta}\bigg\{\frac{\mathbb{R}c_2\cos\partialhi}{\partiall_1\partiall_2}\Big[\varsigma_2\cdot\Big(\varsigma_1\times\big(\partial_{\iota_1\iota_2}\mathbf{r}\times\varsigma_1\big)\Big)\Big]
-\Sin\partialhi\Sin\partialsi\bigg\}\frac{\partialUu_0}{\partial\partialsi},\nonumber\\
\Ssc:=&\,-\varepsilon^{-1}\Big\{\Sin\partialhi\cdot\varepsilon^{\frac{1}{2}}\widetilde{\chi}i'(\varepsilon^{\frac{1}{2}}\varepsilonta)\widetilde{\chi}(\varepsilon^{-1}\partialhi)\Phil_0
+ \widetilde{\chi}i(\varepsilon^{\frac{1}{2}}\varepsilonta)\Big[\overline{\widetilde{\chi}i(\varepsilon^{-1}\partialhi)\Phil_0} -\widetilde{\chi}i(\varepsilon^{-1}\partialhi)\overline{\Phil}_0 \Big]\Big\}.\label{d:s3}
\varepsilonnd{align}
We now give preliminary estimates of the initial, boundary, and source terms specified above.
The proof is straightforward and largely based on Proposition~\mathbb{R}ef{prop:wellposedness} and the certain definitions with rescaling and cutoffs (cf. \cite[Section~3.3]{AA020}).
\begin{lemma}\label{lem:source-est}
Assume \varepsilonqref{assumption}\varepsilonqref{compatibility} hold for $u_o$ and $g$.
For the initial term $\mathcal{I}(x,w)$ given in \varepsilonqref{d:z}, we have
\begin{align}\label{H-est}
\tnm{\mathcal{I}} \lesssim \varepsilon^2.
\varepsilonnd{align}
For the boundary term $\mathcal{G}(t,x_0,w)$ given in \varepsilonqref{d:h}, we have
\begin{align}\label{G-est}
\tnnms{\mathcal{G}}{\Gamma_-}\lesssim\varepsilon.
\varepsilonnd{align}
For the source term $\Ss(t,x,w)$ given in \varepsilonqref{d:ss}--\varepsilonqref{d:s3}, we have
\begin{align}
&\qquad \tnnm{\Sss}\lesssim\varepsilon^2, \qquad \tnnm{\Ssd} \lesssim\varepsilon^2,\label{S-is-il-est}\\
&\tnnm{\big(1+\varepsilonta\big)\Ssa}\lesssim1,\qquad
\tnnm{\big(1+\varepsilonta\big)\Ss^{B\!L}_2}\lesssim \varepsilon^{\frac{1}{2}},\label{S-bl-12-est}\\
&\tnnm{\big(1+\varepsilonta\big)\Ss^{B\!L}_3}\lesssim1,\qquad \nnm{\big(1+\varepsilonta\big)\Ss^{B\!L}_3}_{L^2_tL^2_xL^1_w}\lesssim \varepsilon^{\frac{1}{2}}.\label{S-bl-3-est}
\varepsilonnd{align}
Moreover, the boundary layer $Uu_0$ defined in \varepsilonqref{boundary layer} satisfies
\begin{align}\label{BL-est}
\tnnm{\big(1+\varepsilonta\big)Uu_0}
\lesssim \varepsilon^{\frac{1}{2}}.
\varepsilonnd{align}
\varepsilonnd{lemma}
In preparation for the a priori estimate of the remainder, we need to write the weak formulation of the remainder equation, which depends essentially on the validity of integration by parts for the transport operator (cf. \cite[Lemma 2.2]{Esposito.Guo.Kim.Marra2013}):
\begin{lemma}[Green's identity]\label{lem:green}
Assume $f(t,x,w),\ g(t,x,w)\in L^{\infty}\big([0,T]; L^2(\Omega\times\S^2)\big)$ with
$\partialartial_t f+w\cdot\nabla_x f,\ \partialartial_t g+w\cdot\nabla_x g\in L^2([0,T]\times\Omega\times\S^2)$ and $f,\
g\in L^2_{\Gamma}$. Then for almost all $t,s\in[0,T]$, we have
\begin{align}
&\int_s^t\iint_{\Omega\times\S^2}\Big\{\big(\partialartial_t f+w\cdot\nabla_x f\big)g+\big(\partialartial_t g+w\cdot\nabla_x
g\big)f\Big\}\\
=&\iint_{\Omega\times\S^2}f(t)g(t)-\iint_{\Omega\times\S^2}f(s)g(s)+\int_s^t\!\int_{\mathcal{G}amma}fg(w\cdot n).\nonumber
\varepsilonnd{align}
\varepsilonnd{lemma}
With this result, the weak formulation of \varepsilonqref{remainder} takes the following form:
for any test function $\varphi(t,x,w)\in L^{\infty}\big([0,T]; L^2(\Omega\times\S^2)\big)$ with $\partialartial_t\varphi+w\cdot\nabla_x\varphi\in L^2([0,T]\times\Omega\times\S^2)$ and $\varphi\in L^2_{\Gamma}$,
it holds for any $t\in[0,T]$ that
\begin{align}\label{weak formulation}
&\;\varepsilon\bbrb{\mathbb{R}e(t),\varphi(t)}{x,w}-\varepsilon\bbrb{\mathcal{I},\varphi(0)}{x,w}
+\br{\mathbb{R}e, \varphi}_{\Gamma_+} - \br{\mathcal{G}, \varphi}_{\Gamma_-} \\
=&\; \varepsilon\deltabbr{\mathbb{R}e,\partialartial_t\varphi}+\deltabbr{\mathbb{R}e, w\cdot\nabla_x \varphi}-\varepsilon^{-1}\deltabbr{\mathbb{R}e-\overline{\re}, \varphi} +\deltabr{\Ss, \varphi}.\nonumber
\varepsilonnd{align}
Here, the time integral is taken over $[0,t]$.
\Section{Remainder Estimate}\label{Sec:remainder-est}
Our goal of this section is to prove the remainder estimate for
\begin{align}
\mathbb{R}e:= u^\varepsilon - \Big[\left(U_0+\varepsilonU_1+\varepsilon^2U_2\mathbb{R}ight)+\left(Uuu_0+\varepsilonUuu_1\mathbb{R}ight)+Uu_0\Big].
\varepsilonnd{align}
\begin{theorem}[Remainder estimate]\label{thm:remainder-est}
Under the assumption \varepsilonqref{assumption} with \varepsilonqref{compatibility}, we have
\begin{align}
\nnm{\mathbb{R}e}_{L^{\infty}_tL^2_{x,w}}+\varepsilon^{-\frac{1}{2}}\tnnms{\mathbb{R}e}{\Gamma_+}+\varepsilon^{-\frac{1}{2}}\tnnm{\overline{\re}}+\varepsilon^{-1}\tnnm{\re-\bre}\lesssim 1,
\varepsilonnd{align}
where the implicit constant is independent of $\varepsilon$.
\varepsilonnd{theorem}
The combination of the following two lemmas, which will be presented in the rest of this section, will lead immediately to the desired estimate above.
\begin{lemma}[Energy estimate]\label{lem:energy}
Under the assumption \varepsilonqref{assumption} with \varepsilonqref{compatibility}, we have
\begin{align}\label{energy}
\nnm{\mathbb{R}e}_{L^{\infty}_tL^2_{x,w}}^2+\varepsilon^{-1}\tnnms{\mathbb{R}e}{\Gamma_+}^2+\varepsilon^{-2}\tnnm{\re-\bre}^2\lesssim \delta\varepsilon^{-1}\tnnm{\overline{\re}}^2+\delta^{-1}.
\varepsilonnd{align}
Here $0<\delta\ll 1$ is a constant that can be taken sufficiently small.
\varepsilonnd{lemma}
\begin{proof}
Taking $\varphi=\varepsilon^{-1}\mathbb{R}e$ in the weak formulation \varepsilonqref{weak formulation} and using the fundamental theorem of calculus and divergence theorem, we obtain
\begin{align}
\tfrac{1}{2}\tnm{\mathbb{R}e(t)}^2-\tfrac{1}{2}\tnm{\mathcal{I}}^2+\tfrac{1}{2}\varepsilon^{-1}\tnnms{\mathbb{R}e}{\Gamma_+}^2 - \tfrac{1}{2}\varepsilon^{-1}\tnnms{\mathcal{G}}{\Gamma_-}^2
=-\varepsilon^{-2}\deltabr{\re-\bre,\mathbb{R}e} +\varepsilon^{-1}\deltabr{\Ss, \mathbb{R}e}.
\varepsilonnd{align}
Observing the orthogonality $\br{\overline{\re},\re-\bre}_{w}=0$, we then rearrange the terms to arrive at
\begin{align}\label{Feimeng}
\tfrac{1}{2}\tnm{\mathbb{R}e(t)}^2+\tfrac{1}{2}\varepsilon^{-1}\tnnms{\mathbb{R}e}{\Gamma_+}^2+\varepsilon^{-2}\tnnm{\re-\bre}^2
=\tfrac{1}{2}\tnm{\mathcal{I}}^2+\tfrac{1}{2}\varepsilon^{-1}\tnnms{\mathcal{G}}{\Gamma_-}^2 +\varepsilon^{-1}\deltabr{\Ss,\mathbb{R}e}.
\varepsilonnd{align}
By \varepsilonqref{H-est} and \varepsilonqref{G-est}, we know
\begin{align}\label{energy 01}
\tnm{\mathcal{I}}^2+\varepsilon^{-1}\tnnms{\mathcal{G}}{\Gamma_-}^2 \lesssim \varepsilon.
\varepsilonnd{align}
Now we split the last term in \varepsilonqref{Feimeng} and estimate each part separately.
Using Young's inequality and by \varepsilonqref{S-is-il-est}, we have
\begin{align}\label{energy 02}
\varepsilon^{-1}\abs{\deltabbr{\Sss+\Ssd,\mathbb{R}e}}\lesssim \delta\tnnm{\mathbb{R}e}^2+\delta^{-1}\varepsilon^{-2}\tnnm{\Sss+\Ssd}^2\lesssim \delta\tnnm{\mathbb{R}e}^2+\delta^{-1}\varepsilon^2.
\varepsilonnd{align}
Likewise, by \varepsilonqref{S-bl-12-est} and \varepsilonqref{S-bl-3-est}, we have
\begin{align}\label{energy 03}
\varepsilon^{-1}\abs{\deltabbr{\Ssa+\Ssb+\Ssc,\re-\bre}}
&\lesssim\delta\varepsilon^{-2}\tnnm{\re-\bre}^2+\delta^{-1}\tnnm{\Ssa+\Ssb+\Ssc}^2\\
&\lesssim \delta\varepsilon^{-2}\tnnm{\re-\bre}^2+\delta^{-1}.\nonumber
\varepsilonnd{align}
For $\varepsilon^{-1}\deltabbr{\Ssa,\overline{\re}}$, we use integration by parts in $\partialhi$ (note that the velocity integral contains a Jacobian $\cos\partialhi$) followed by \varepsilonqref{BL-est} and get
\begin{align}\label{energy 04}
\varepsilon^{-1}\abs{\deltabbr{\Ssa,\overline{\re} }}&=\varepsilon^{-1}\abs{\deltabr{ \Big(\tfrac{\Sin^2\partialsi}{R_1-\varepsilon\varepsilonta}+\tfrac{\cos^2\partialsi}{R_2-\varepsilon\varepsilonta}\Big)\cos\partialhi\,\partial_{\partialhi}Uu_0,\overline{\re}}}\\
&=\varepsilon^{-1}\abs{\deltabr{ \Big(\tfrac{\Sin^2\partialsi}{R_1-\varepsilon\varepsilonta}+\tfrac{\cos^2\partialsi}{R_2-\varepsilon\varepsilonta}\Big)2\Sin\partialhi\,Uu_0,\overline{\re}}}\nonumber\\
&\lesssim\varepsilon^{-1}\nnm{Uu_0}_{L^2_tL^2_xL^1_w}\tnnm{\overline{\re}}\lesssim\varepsilon^{-\frac{1}{2}}\tnnm{\overline{\re}}\lesssim \delta\varepsilon^{-1}\tnnm{\overline{\re}}^2+\delta^{-1}.\nonumber
\varepsilonnd{align}
Turning to the remaining terms, we use \varepsilonqref{S-bl-12-est}\varepsilonqref{S-bl-3-est} to estimate
\begin{align}\label{energy 05}
\varepsilon^{-1}\abs{\deltabbr{\Ssb+\Ssc,\overline{\re}}}
&\lesssim\varepsilon^{-1}\tnnm{\overline{\re}}\Big(\nnm{\Ssb}_{L^2_tL^2_xL^1_w}+\nnm{\Ssc}_{L^2_tL^2_xL^1_w}\Big)\\
&\lesssim\varepsilon^{-\frac{1}{2}}\tnnm{\overline{\re}}\lesssim \delta\varepsilon^{-1}\tnnm{\overline{\re}}^2+\delta^{-1}.\nonumber
\varepsilonnd{align}
Collecting \varepsilonqref{energy 02} through \varepsilonqref{energy 05}, we deduce that
\begin{align}\label{energy 06}
\varepsilon^{-1}\abs{\deltabr{\Ss, \mathbb{R}e}}\lesssim \delta\varepsilon^{-2}\tnnm{\re-\bre}^2+\delta\varepsilon^{-1}\tnnm{\overline{\re}}^2+\delta^{-1}.
\varepsilonnd{align}
Finally, combining \varepsilonqref{Feimeng} with \varepsilonqref{energy 01} and \varepsilonqref{energy 06} settles \varepsilonqref{energy}.
\varepsilonnd{proof}
\begin{lemma}[Kernel estimate]\label{lem:kernel}
Under the assumption \varepsilonqref{assumption} with \varepsilonqref{compatibility}, we have
\begin{align}\label{kernel}
\tnnm{\overline{\re}}^2\lesssim\varepsilon\nnm{\mathbb{R}e}_{L^{\infty}_tL^2_{x,w}}^2+\tnnm{\re-\bre}^2 +\tnnms{\mathbb{R}e}{\Gamma_+}^2+\varepsilon.
\varepsilonnd{align}
\varepsilonnd{lemma}
\begin{proof}
We will use an auxiliary Dirichlet problem for the Poisson equation to construct our test functions.
Let $\xi(t,x):=(-\Delta_x)^{-1}\overline{\re}(t,x)$ be the solution to (for fixed $t$)
\begin{align}
\left\{
\begin{array}{l}
-\Delta_x\xi=\overline{\re}\ \ \text{ in}\
\ \Omega,\\\mathbb{R}ule{0ex}{1.2em}
\xi=0\ \ \quad\text{ on}\ \
\partial\Omega.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Based on the standard elliptic estimate and the trace theorem, for every $t\in\mathbb{R}p$ we have
\begin{align}\label{kernel 01}
\nm{\xi(t)}_{H^2(\Omega)}+\babs{\nabla_x\xi(t)}_{H^{\frac{1}{2}}(\partial\Omega)}\lesssim\nm{\overline{\re}(t)}_{L^2(\Omega)}.
\varepsilonnd{align}
We first test \varepsilonqref{remainder} with $\varphi=\xi$ (or equivalently, take $\varphi=\xi$ in \varepsilonqref{weak formulation}) to get
\begin{align}
\varepsilon\deltabbr{\partialartial_t\mathbb{R}e,\xi} +\br{\mathbb{R}e, \xi}_{\Gamma_+} - \br{\mathcal{G}, \xi}_{\Gamma_-}
-\deltabbr{\mathbb{R}e,w\cdot\nabla_x\xi}+\varepsilon^{-1}\deltabbr{\re-\bre,\xi}=\deltabr{\Ss,\xi}.
\varepsilonnd{align}
Notice that $\br{\re-\bre,\xi}_{w}=0$ and $\br{\partialartial_t\mathbb{R}e- \partialartial_t\overline{\re},\xi}_{w}=0$ by orthogonality, $\br{\overline{\re},w\cdot\nabla_x\xi}_{w}=0$ by symmetry, and that the boundary terms vanish due to $\xi|_{\partial\Omega}=0$. Hence, we are left with
\begin{align}\label{kernel 02}
\varepsilon\deltabbr{\partialartial_t\overline{\re},\xi}-\deltabbr{\re-\bre,w\cdot\nabla_x\xi}=\deltabr{\Ss,\xi}.
\varepsilonnd{align}
We then test \varepsilonqref{remainder} with $\varphi=w\cdot\nabla_x\xi$ to get
\begin{align}\label{kernel 03}
\varepsilon\deltabbr{\partialartial_t\mathbb{R}e,w\cdot\nabla_x\xi}+\bbr{\mathbb{R}e, w\cdot\nabla_x\xi}_{\Gamma_+} - \bbr{\mathcal{G}, w\cdot\nabla_x\xi}_{\Gamma_-}\\
-\deltabbr{\mathbb{R}e,w\cdot\nabla_x\big(w\cdot\nabla_x\xi\big)}+\varepsilon^{-1}\deltabbr{\re-\bre,w\cdot\nabla_x\xi}
&=\deltabbr{\Ss,w\cdot\nabla_x\xi}.\nonumber
\varepsilonnd{align}
Adding $\varepsilon^{-1}\times$\varepsilonqref{kernel 02} and \varepsilonqref{kernel 03} to eliminate $\varepsilon^{-1}\deltabbr{\re-\bre,w\cdot\nabla_x\xi}$, we obtain
\begin{align}\label{kernel 04}
\deltabbr{\partialartial_t\overline{\re},\xi}+\varepsilon\deltabbr{\partialartial_t\mathbb{R}e,w\cdot\nabla_x\xi}+\bbr{\mathbb{R}e, w\cdot\nabla_x\xi}_{\Gamma_+} - \bbr{\mathcal{G}, w\cdot\nabla_x\xi}_{\Gamma_-}\\
-\deltabbr{\mathbb{R}e,w\cdot\nabla_x\big(w\cdot\nabla_x\xi\big)}&=\varepsilon^{-1}\deltabr{\Ss,\xi}+\deltabbr{\Ss,w\cdot\nabla_x\xi}.\nonumber
\varepsilonnd{align}
On the one hand, we may split
\begin{align}
-\deltabbr{\mathbb{R}e,w\cdot\nabla_x\big(w\cdot\nabla_x\xi\big)}=
-\deltabbr{\overline{\re},w\cdot\nabla_x\big(w\cdot\nabla_x\xi\big)}
-\deltabbr{\re-\bre,w\cdot\nabla_x\big(w\cdot\nabla_x\xi\big)},
\varepsilonnd{align}
where the kernel part has the key positivity:
\begin{align}
-\deltabbr{\overline{\re},w\cdot\nabla_x\big(w\cdot\nabla_x\xi\big)}\Simeq-\deltabbr{\overline{\re},\Delta_x\xi}=\tnnm{\overline{\re}}^2,
\varepsilonnd{align}
while the non-kernel part can be bounded as
\begin{align}
\abs{\deltabbr{\re-\bre,w\cdot\nabla_x\big(w\cdot\nabla_x\xi\big)}}\lesssim\tnnm{\re-\bre}\nnm{\xi}_{L^2_tH^2_x}\lesssim\delta\tnnm{\overline{\re}}^2+\delta^{-1}\tnnm{\re-\bre}^2.
\varepsilonnd{align}
On the other hand, integration by parts reveals that
\begin{align}
\deltabbr{\partialartial_t\overline{\re},\xi}=-\deltabbr{\partialartial_t\Delta_x\xi,\xi}=\deltabbr{\partialartial_t\nabla_x\xi,\nabla_x\xi}=\tfrac{1}{2}\btnm{\nabla_x\xi(t)}^2-\tfrac{1}{2}\btnm{\nabla_x\xi(0)}^2,
\varepsilonnd{align}
where \varepsilonqref{H-est} yields
\begin{align}
\tfrac{1}{2}\btnm{\nabla_x\xi(0)}^2\lesssim\tnm{\overline{\re}(0)}^2=\tnm{\overline\mathcal{I}}^2 \lesssim \tnm{\mathcal{I}}^2\lesssim\varepsilon^4.
\varepsilonnd{align}
In addition, we bound
\begin{align}
\varepsilon\deltabbr{\partialartial_t\mathbb{R}e,w\cdot\nabla_x\xi}&=\varepsilon\deltabbr{\partialartial_t\left(\re-\bre\mathbb{R}ight), w\cdot\nabla_x\xi}\\
&=-\varepsilon\deltabbr{\re-\bre, w\cdot\nabla_x\partialartial_t\xi}
+\varepsilon\bbrb{\left(\re-\bre\mathbb{R}ight)(t),w\cdot\nabla_x\xi(t)}{x,w}-\varepsilon\bbrb{\left(\re-\bre\mathbb{R}ight)(0),w\cdot\nabla_x\xi(0)}{x,w},\nonumber
\varepsilonnd{align}
by
\begin{align}
\varepsilon\Babs{\deltabbr{\re-\bre, w\cdot\nabla_x\partialartial_t\xi}}&\lesssim\varepsilon\tnnm{\re-\bre}\btnnm{\nabla_x\partialartial_t\xi}\lesssim\tnnm{\re-\bre}^2+\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2,\\
\varepsilon\abs{\bbrb{\left(\re-\bre\mathbb{R}ight)(t),w\cdot\nabla_x\xi(t)}{x,w}}
&\lesssim\varepsilon\tnm{\left(\re-\bre\mathbb{R}ight)(t)}\nm{\xi(t)}_{H^1}\lesssim\varepsilon\tnm{\mathbb{R}e(t)}^2,\\
\varepsilon\abs{\bbrb{\left(\re-\bre\mathbb{R}ight)(0),w\cdot\nabla_x\xi(0)}{x,w}}
&\lesssim \varepsilon\tnm{\mathbb{R}e(0)}^2 =\varepsilon\tnm{\mathcal{I}}^2\lesssim\varepsilon^5.
\varepsilonnd{align}
Also, the boundary terms can be controlled using \varepsilonqref{kernel 01} and \varepsilonqref{G-est}:
\begin{align}\label{boundary-est}
\abs{\bbr{\mathbb{R}e, w\cdot\nabla_x\xi}_{\Gamma_+} - \bbr{\mathcal{G}, w\cdot\nabla_x\xi}_{\Gamma_-}}&\lesssim\Big(\tnnms{\mathbb{R}e}{\Gamma_+}+\tnnms{\mathcal{G}}{\Gamma_-}\Big)\nm{\nabla_x\xi}_{L^2_{t,\partial\Omega}}\\
&\lesssim\delta\tnnm{\overline{\re}}^2+\delta^{-1}\tnnms{\mathbb{R}e}{\Gamma_+}^2+\delta^{-1}\varepsilon^2.\nonumber
\varepsilonnd{align}
Collecting \varepsilonqref{kernel 04} through \varepsilonqref{boundary-est}, we deduce that
\begin{align}\label{kernel 05}
\btnm{\nabla_x\xi(t)}^2+\tnnm{\overline{\re}}^2\lesssim&\;\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2+\varepsilon\tnm{\mathbb{R}e(t)}^2+\delta^{-1}\tnnm{\re-\bre}^2 +\delta^{-1}\tnnms{\mathbb{R}e}{\Gamma_+}^2+\delta^{-1}\varepsilon^2\\
&\;+\varepsilon^{-1}\abs{\deltabr{\Ss,\xi}}+\abs{\deltabbr{\Ss,w\cdot\nabla_x\xi}}.\nonumber
\varepsilonnd{align}
Next, we estimate the term $\varepsilon^2\tnnm{\nabla_x\partialartial_t\xi}^2$ on the RHS above.
Let $\mathcal{I}eta:=\partialartial_t\xi=(-\Delta_x)^{-1}\partialartial_t\overline{\re}$ denote the solution to
\begin{align}
\left\{
\begin{array}{l}
-\Delta_x\mathcal{I}eta=\partialartial_t\overline{\re}\ \ \text{ in}\
\ \Omega,\\\mathbb{R}ule{0ex}{1.2em}
\mathcal{I}eta=0\ \ \quad\text{ on}\ \
\partial\Omega.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Poincar\'e's inequality indicates that
\begin{align}\label{kernel 08}
\nm{\mathcal{I}eta(t)}_{L^2(\Omega)}\lesssim\nm{\nabla_x\mathcal{I}eta(t)}_{L^2(\Omega)}=\bnm{\nabla_x\partialartial_t\xi(t)}_{L^2(\Omega)}.
\varepsilonnd{align}
Testing \varepsilonqref{remainder} against $\varphi=\varepsilon\mathcal{I}eta$ and similar to \varepsilonqref{kernel 02} we get
\begin{align}\label{kernel 06}
\varepsilon^2\deltabbr{\partialartial_t\overline{\re},\mathcal{I}eta}-\varepsilon\deltabbr{\re-\bre,w\cdot\nabla_x\mathcal{I}eta}=\varepsilon\deltabr{\Ss,\mathcal{I}eta}.
\varepsilonnd{align}
Again, integration by parts reveals that
\begin{align}\label{kernel 06-1}
\varepsilon^2\deltabbr{\partialartial_t\overline{\re},\mathcal{I}eta}=
-\varepsilon^2\deltabbr{\Delta_x\mathcal{I}eta,\mathcal{I}eta}=\varepsilon^2\tnnm{\nabla_x\mathcal{I}eta}^2=\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2.
\varepsilonnd{align}
Also, we have
\begin{align}\label{kernel 06-2}
\varepsilon\abs{\deltabbr{\re-\bre,w\cdot\nabla_x\mathcal{I}eta}}\lesssim \varepsilon\tnnm{\re-\bre}\tnnm{\nabla_x\mathcal{I}eta}\lesssim \delta\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2+\delta^{-1}\tnnm{\re-\bre}^2.
\varepsilonnd{align}
Hence, \varepsilonqref{kernel 06} together with \varepsilonqref{kernel 06-1} and \varepsilonqref{kernel 06-2} yields
\begin{align}\label{kernel 07}
\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2\lesssim \delta^{-1}\tnnm{\re-\bre}^2+\varepsilon\abs{\deltabr{\Ss,\mathcal{I}eta}}.
\varepsilonnd{align}
We then insert \varepsilonqref{kernel 07} into \varepsilonqref{kernel 05} and obtain
\begin{align}\label{kernel 09}
\btnm{\nabla_x\xi(t)}^2+\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2+\tnnm{\overline{\re}}^2\lesssim&\;\varepsilon\tnm{\mathbb{R}e(t)}^2+\delta^{-1}\tnnm{\re-\bre}^2 +\delta^{-1}\tnnms{\mathbb{R}e}{\Gamma_+}^2+\delta^{-1}\varepsilon^2\\
&\;+\varepsilon^{-1}\abs{\deltabr{\Ss,\xi}}+\abs{\deltabbr{\Ss,w\cdot\nabla_x\xi}}+\varepsilon\abs{\deltabr{\Ss,\mathcal{I}eta}}.\nonumber
\varepsilonnd{align}
Now it remains to handle the last three terms above involving the source term $\Ss$.
By using \varepsilonqref{kernel 01}, \varepsilonqref{kernel 08}, and \varepsilonqref{S-is-il-est}, we see that
\begin{align}\label{kernel 10}
&\;\varepsilon^{-1}\abs{\deltabbr{\Sss+\Ssd, \xi}}+\abs{\deltabbr{\Sss+\Ssd, w\cdot\nabla_x\xi}}+\varepsilon\abs{\deltabbr{\Sss+\Ssd,\mathcal{I}eta}}\\
\lesssim&\;\varepsilon^{-1}\Big(\tnnm{\Sss}+\tnnm{\Ssd}\Big)\nnm{\xi}_{L^2_tH^1_x}+\varepsilon\Big(\tnnm{\Sss}+\tnnm{\Ssd}\Big)\tnnm{\mathcal{I}eta}\nonumber\\
\lesssim&\;\varepsilon\tnnm{\overline{\re}}+\varepsilon^3\btnnm{\nabla_x\partialartial_t\xi}\lesssim \delta\tnnm{\overline{\re}}^2+\delta\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2+\delta^{-1}\varepsilon^2.\nonumber
\varepsilonnd{align}
Analogous to \varepsilonqref{energy 04}, we may transfer $\Ssa$ to $Uu_0$ via integration by parts in $\partialhi$.
Noting that $\xi|_{\partial\Omega}=0$, we then invoke Hardy's inequality and \varepsilonqref{S-bl-12-est}\varepsilonqref{S-bl-3-est}\varepsilonqref{BL-est} followed by \varepsilonqref{kernel 01} to bound
\begin{align}\label{kernel 11}
&\,\varepsilon^{-1}\Babs{\deltabbr{\Ssa+\Ssb+\Ssc, \xi}} \\
\lesssim&\;\varepsilon^{-1}\deltaBbr{\abs{Uu_0}+\abs{\Ssb}+\abs{\Ssc},\babs{\!\int_0^{\mu}\!\partial_{\mu}\xi}}
=\deltaBbr{\abs{\varepsilontaUu_0}+\abs{\varepsilonta\Ssb}+\abs{\varepsilonta\Ssc}, \Babs{\frac{1}{\mu}\int_0^{\mu}\!\partial_{\mu}\xi}}\nonumber\\
\lesssim&\, \Big(\bnnm{\varepsilontaUu_0}_{L^2_tL^2_xL^1_w}+\bnnm{\varepsilonta\Ssb}_{L^2_tL^2_xL^1_w}+\bnnm{\varepsilonta\Ssc}_{L^2_tL^2_xL^1_w}\Big)
\tnnm{\frac{1}{\mu}\int_0^{\mu}\!\partial_{\mu}\xi}\nonumber\\
\lesssim&\;\varepsilon^{\frac{1}{2}}\tnnm{\partial_{\mu}\xi} \lesssim\varepsilon^{\frac{1}{2}}\nnm{\xi}_{L^2_tH^1_x}
\lesssim \varepsilon^{\frac{1}{2}}\tnnm{\overline{\re}}
\lesssim\delta\tnnm{\overline{\re}}^2+\delta^{-1}\varepsilon.\nonumber
\varepsilonnd{align}
In a parallel fashion, we have
\begin{align}\label{kernel 12}
&\,\Babs{\deltabbr{\Ssa+\Ssb+\Ssc, w\cdot\nabla_x\xi}} \\
\lesssim&\,\deltaBbr{\abs{Uu_0}+\abs{\Ssb}+\abs{\Ssc}, \Babs{\big(\nabla_x\xi\big)\big|_{\mu=0}+\int_0^{\mu}\!\partial_\mu\big(\nabla_x\xi\big)}}\nonumber\\
\lesssim&\,\deltaBbr{\abs{Uu_0}+\abs{\Ssb}+\abs{\Ssc}, \abs{\nabla_x\xi}\big|_{\mu=0}}
+\varepsilon\,\deltaBbr{\abs{\varepsilontaUu_0}+\abs{\varepsilonta\Ssb}+\abs{\varepsilonta\Ssc}, \Babs{\frac{1}{\mu}\int_0^{\mu}\!\partial_\mu\big(\nabla_x\xi\big)}}\nonumber\\
\lesssim&\;\varepsilon^{\frac{1}{2}}\nm{\nabla_x\xi}_{L^2_{t,\partial\Omega}}
+\varepsilon^{\frac{3}{2}}\tnnm{\partial_\mu\big(\nabla_x\xi\big)}
\lesssim\varepsilon^{\frac{1}{2}}\nm{\nabla_x\xi}_{L^2_{t,\partial\Omega}}+\varepsilon^{\frac{3}{2}}\nnm{\xi}_{L^2_tH^2_x}\lesssim\varepsilon^{\frac{1}{2}}\tnnm{\overline{\re}}
\lesssim\delta\tnnm{\overline{\re}}^2+\delta^{-1}\varepsilon,\nonumber
\varepsilonnd{align}
and
\begin{align}\label{kernel 13}
&\;\varepsilon\Babs{\deltabbr{\Ssa+\Ssb+\Ssc, \mathcal{I}eta}} \\
\lesssim&\;\varepsilon\,\deltaBbr{\abs{Uu_0}+\abs{\Ssb}+\abs{\Ssc},\babs{\!\int_0^{\mu}\!\partial_\mu\mathcal{I}eta}}
=\varepsilon^2\deltaBbr{\abs{\varepsilontaUu_0}+\abs{\varepsilonta\Ssb}+\abs{\varepsilonta\Ssc}, \babs{\frac{1}{\mu}\int_0^{\mu}\!\partial_\mu\mathcal{I}eta}}\nonumber\\
\lesssim&\;\varepsilon^2 \Big(\bnnm{\varepsilontaUu_0}_{L^2_tL^2_xL^1_w}+\bnnm{\varepsilonta\Ssb}_{L^2_tL^2_xL^1_w}+\bnnm{\varepsilonta\Ssc}_{L^2_tL^2_xL^1_w}\Big) \tnnm{\frac{1}{\mu}\int_0^{\mu}\!\partial_\mu\mathcal{I}eta}\nonumber\\
\lesssim&\;\varepsilon^{\frac{5}{2}}\tnnm{\partial_{\mu}\mathcal{I}eta}
\lesssim \varepsilon^{\frac{5}{2}}\btnnm{\nabla_x\partialartial_t\xi}
\lesssim\delta\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2+\delta^{-1}\varepsilon^3.\nonumber
\varepsilonnd{align}
Collecting \varepsilonqref{kernel 10} through \varepsilonqref{kernel 13}, we find
\begin{align}\label{kernel 14}
\varepsilon^{-1}\abs{\deltabr{\Ss,\xi}}+\abs{\deltabbr{\Ss,w\cdot\nabla_x\xi}}+\varepsilon\abs{\deltabr{\Ss,\mathcal{I}eta}}
\lesssim\delta\tnnm{\overline{\re}}^2+\delta\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2+\delta^{-1}\varepsilon.
\varepsilonnd{align}
Combined with \varepsilonqref{kernel 09}, this yields
\begin{align}
\btnm{\nabla_x\xi(t)}^2+\varepsilon^2\btnnm{\nabla_x\partialartial_t\xi}^2+\tnnm{\overline{\re}}^2\lesssim\varepsilon\tnm{\mathbb{R}e(t)}^2+\delta^{-1}\tnnm{\re-\bre}^2 +\delta^{-1}\tnnms{\mathbb{R}e}{\Gamma_+}^2 +\delta^{-1}\varepsilon,
\varepsilonnd{align}
and thus \varepsilonqref{kernel} follows.
\varepsilonnd{proof}
\Section{Diffusive Limit}\label{Sec:diffusive-limit}
We are now ready to prove our main result for the in-flow boundary problem:
\begin{proof}[Proof of Theorem~\mathbb{R}ef{main theorem}]
The well-posedness of \varepsilonqref{transport} is rather standard and was addressed already in \cite{Bensoussan.Lions.Papanicolaou1979, Bardos.Santos.Sentis1984, AA016}.
The existence of $U_0$ is guaranteed in Section~\mathbb{R}ef{Subsec:matching}.
This leaves us to demonstrate the approximation estimate \varepsilonqref{main}.
Given the interior solutions $U_0,U_1,U_2$, the initial layers $Uuu_0, Uuu_1$, and the boundary layer $Uu_0$ constructed in Section~\mathbb{R}ef{Sec:asymptotic}, the remainder estimate (Theorem~\mathbb{R}ef{thm:remainder-est}) implies
\begin{align}\label{main 01}
\tnnm{u^{\varepsilon}-U_0-\varepsilonU_1-\varepsilon^2U_2-Uuu_0-\varepsilonUuu_1-Uu_0}\lesssim\varepsilon^{\frac{1}{2}}.
\varepsilonnd{align}
On the other hand,
using the estimates recalled in Proposition~\mathbb{R}ef{prop:wellposedness}, we see that
\begin{align}\label{main 02}
\tnnm{\varepsilonU_1+\varepsilon^2U_2}\lesssim \varepsilon,\qquad
\tnnm{Uuu_0+\varepsilonUuu_1}\lesssim\varepsilon,\qquad
\tnnm{Uu_0}\lesssim\varepsilon^{\frac{1}{2}},
\varepsilonnd{align}
where we take into account the temporal rescaling $\tau=\varepsilon^{-2}t$ for the initial layer and the normal spatial rescaling $\varepsilonta=\varepsilon^{-1}\mu$ for the boundary layer.
Consequently, \varepsilonqref{main} follows by combining \varepsilonqref{main 01} with \varepsilonqref{main 02}.
This completes the proof of the theorem.
\varepsilonnd{proof}
\Section{Diffuse-Reflection Boundary Problem}\label{Sec:diffuse-BC}
Consider the unsteady neutron transport equation with the diffuse-reflection boundary condition:
\begin{align}\label{transport=}
\left\{
\begin{array}{l}\deltaisplaystyle
\varepsilon\partialartial_t u^{\varepsilon}+w\cdot\nabla_x u^{\varepsilon}+\varepsilon^{-1}\big(u^{\varepsilon}-\overline{u}^{\varepsilon}\big)=0\ \ \text{ in}\ \ \mathbb{R}p\times\Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
u^{\varepsilon}(0,x,w)=u_o(x,w)\ \ \text{ in}\ \ \Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
u^{\varepsilon}(t,x_0,w)=\partialp[u^{\varepsilon}](t,x_0)+\varepsilon h(t,x_0,w)\ \ \text{ for}\
\ x_0\in\partial\Omega\ \ \text{and}\ \ w\cdotn(x_0)<0.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Here, $\partialp[u^{\varepsilon}]$ represents the weighted average over the outgoing boundary, i.e.,
\begin{align}
\partialp[u^{\varepsilon}](t,x_0):=c_{\mathcal{G}amma}\int_{w\cdotn>0}u^{\varepsilon}(t,x_0,w)(w\cdotn)\,\mathrm{d}w,
\varepsilonnd{align}
where the constant $c_{\mathcal{G}amma}=\frac{1}{\partiali}$ satisfies the normalization condition
$c_{\mathcal{G}amma}\int_{w\cdotn>0}(w\cdotn)\,\mathrm{d}w=1$.
In addition, for the perturbative boundary data $h$, we assume the compatibility condition \varepsilonqref{g-compatibility=}
to make sure $u^{\varepsilon}$ has zero flux \varepsilonqref{ue-zero-flux} at the boundary.
\Subsection{Asymptotic Analysis}
By analogy with what has gone in Section~\mathbb{R}ef{Sec:asymptotic},
we expand the exact solution as
\begin{align}\label{expand=}
u^{\varepsilon}=U+Uuu+\mathbb{R}e=\left(U_0+\varepsilonU_1+\varepsilon^2U_2\mathbb{R}ight)+\left(Uuu_0+\varepsilonUuu_1\mathbb{R}ight)+\mathbb{R}e.
\varepsilonnd{align}
Here, the initial layer $Uuu:=Uuu_0+\varepsilonUuu_1$ and the corresponding $\Theta_{0,\infty}$, $\Theta_{1,\infty}$ can be taken the same as the in-flow case.
To determine the interior solutions, as noted in \cite{AA007}, we should instead enforce the Neumann boundary condition here.
Specifically, $U_0$ is solved from
\begin{align}
\left\{
\begin{array}{l}
U_0=\overline{\u}_0,\quad
\partialartial_t\overline{\u}_0-\Delta_x\overline{\u}_0=0,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_0(0,x)=\overline{u}_o(x),\\\mathbb{R}ule{0ex}{1.5em}
\tfrac{\partial\overlineU_0}{\partialn}(t,x_0)=0\ \ \text{ for}\ \
x_0\in\partial\Omega.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Then $U_1$ is solved from
\begin{align}
\left\{
\begin{array}{l}
U_1=\overline{\u}_1-w\cdot\nabla_xU_{0},\quad
\partialartial_t\overline{\u}_1-\Delta_x\overline{\u}_1=0,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_1(0,x)=\Theta_{1,\infty}(x),\\\mathbb{R}ule{0ex}{1.5em}
\tfrac{\partial\overlineU_1}{\partialn}(t,x_0)=0 \ \ \text{ for}\ \
x_0\in\partial\Omega,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
and $U_2$ from
\begin{align}
\left\{
\begin{array}{l}
U_2=\overline{\u}_2-w\cdot\nabla_xU_{1}-\partialartial_tU_0,\quad
\partialartial_t\overline{\u}_2-\Delta_x\overline{\u}_2=0,\\\mathbb{R}ule{0ex}{1.5em}
\overlineU_2(0,x)=0,\\\mathbb{R}ule{0ex}{1.5em}
\tfrac{\partial\overlineU_2}{\partialn}(t,x_0)=0 \ \ \text{ for}\ \
x_0\in\partial\Omega.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Note that we do not need to introduce boundary layers here because the leading-order boundary condition is automatically satisfied by the interior solution $U_0$ and the next-order boundary trace can be controlled in the remainder estimate.
Moreover, under the assumption \varepsilonqref{assumption-2}, the well-posedness and corresponding regularity estimates as \varepsilonqref{U_0-est}--\varepsilonqref{U^I_1-est} in Proposition~\mathbb{R}ef{prop:wellposedness} still hold.
\Subsection{Remainder Estimate}
With a view to achieving the same approximation estimate \varepsilonqref{main-2}, we look at the remainder
\begin{align}
\mathbb{R}e:= u^\varepsilon - \Big[\left(U_0+\varepsilonU_1+\varepsilon^2U_2\mathbb{R}ight)+\left(Uuu_0+\varepsilonUuu_1\mathbb{R}ight)\Big],
\varepsilonnd{align}
and derive the initial-boundary value problem
\begin{align}\label{remainder=}
\left\{
\begin{array}{l}\deltaisplaystyle
\varepsilon\partialartial_t\mathbb{R}e+w\cdot\nabla_x \mathbb{R}e+\varepsilon^{-1}\big(\re-\bre\big)=\Ss\ \ \text{ in}\ \ \mathbb{R}p\times\Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
\mathbb{R}e(0,x,w)=\mathcal{I}(x,w)\ \ \text{ in}\ \ \Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
\mathbb{R}e(t,x_0,w)=\partialp[\mathbb{R}e](t,x_0)+\mathcal{H}(t,x_0,w)\ \ \text{ for}\
\ x_0\in\partial\Omega\ \ \text{and}\ \ w\cdotn(x_0)<0,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
where
\begin{align}
\Ss&:= -\varepsilonw\cdot\nabla_xUuu_1 -\varepsilon^2w\cdot\nabla_xU_2-\varepsilon^2\partialartial_tU_1-\varepsilon^3\partialartial_tU_2,\\[3pt]
\mathcal{I}&:=\varepsilon^2w\cdot\nabla_xU_1\big|_{t=0} + \varepsilon^2\partialartial_tU_0\big|_{t=0},\\[3pt]
\mathcal{H}&:=\varepsilon h -\varepsilon\big(1-\partialp\big)[Uuu_1]\big|_{\Gamma_-}
+\varepsilon\big(1-\partialp\big)[w\cdot\nabla_xU_0]\big|_{\Gamma_-} +\varepsilon^2\big(1-\partialp\big)[w\cdot\nabla_xU_1]\big|_{\Gamma_-},
\varepsilonnd{align}
with the parallel estimates as in Lemma~\mathbb{R}ef{lem:source-est}:
\begin{align}
\tnm{\Ss} \lesssim \varepsilon^2, \qquad
\tnm{\mathcal{I}} \lesssim \varepsilon^2, \qquad
\tnnms{\mathcal{H}}{\Gamma_-}\lesssim\varepsilon.
\varepsilonnd{align}
Also, it can be verified that
\begin{align}\label{temp 1}
\iint_{\Omega\times\S^2}\mathcal{I}(x,w) \,\mathrm{d}w\mathrm{d}x=0, \qquad
\iint_{\Omega\times\S^2}\Ss(t,x,w) \,\mathrm{d}w\mathrm{d}x=0 \ \ \text{ for all}\ \ t\in\mathbb{R}p,
\varepsilonnd{align}
\begin{align} \label{G-compatibility}
\int_{w\cdotn<0}\mathcal{H}(t,x_0,w)(w\cdotn)\,\mathrm{d}w=0\ \ \text{ for all}\ \ x_0\in\partial\Omega\ \ \text{and}\ \ t\in\mathbb{R}p,
\varepsilonnd{align}
which yields
\begin{align}\label{R-zero-average}
\int_{\Omega}\overline{\re}(t,x)\,\mathrm{d} x=0\ \ \text{ for all}\ \ t\in\mathbb{R}p
\varepsilonnd{align}
and
\begin{align}\label{R-zero-flux}
\int_{\S^2}\mathbb{R}e(t,x_0,w)(w\cdotn)\,\mathrm{d}w = 0
\ \ \text{ for all}\ \ x_0\in\partial\Omega\ \ \text{and}\ \ t\in\mathbb{R}p.
\varepsilonnd{align}
Observe that by the orthogonality $\bbr{\partialp[\mathbb{R}e],\big(1-\partialp\big)[\mathbb{R}e]}_{\mathcal{G}amma_+}=0$ and the compatibility \varepsilonqref{G-compatibility}, the boundary integral equals
\begin{align} \label{pangfufu}
\int_{\Gamma}\mathbb{R}e^2(w\cdotn)=\tnnms{\mathbb{R}e}{\Gamma_+}^2-\btnnms{\partialp[\mathbb{R}e]+\mathcal{H}}{\Gamma_-}^2=\btnnms{\big(1-\partialp\big)[\mathbb{R}e]}{\Gamma_+}^2-\tnnms{\mathcal{H}}{\Gamma_-}^2.
\varepsilonnd{align}
Correspondingly,
we have the energy bound
\begin{align}\label{energy=}
\nnm{\mathbb{R}e}_{L^{\infty}_tL^2_{x,w}}^2+\varepsilon^{-1}\btnnms{\big(1-\partialp\big)[\mathbb{R}e]}{\Gamma_+}^2+\varepsilon^{-2}\tnnm{\re-\bre}^2\lesssim \delta\varepsilon^{-1}\tnnm{\overline{\re}}^2+\delta^{-1}
\varepsilonnd{align}
with a small constant $0<\delta\ll 1$.
Paralleling Lemma~\mathbb{R}ef{lem:kernel}, we may deduce the kernel estimate
\begin{align}\label{kernel=}
\tnnm{\overline{\re}}^2\lesssim\varepsilon\nnm{\mathbb{R}e}_{L^{\infty}_tL^2_{x,w}}^2+\tnnm{\re-\bre}^2 +\btnnms{\big(1-\partialp\big)[\mathbb{R}e]}{\Gamma_+}^2+\varepsilon,
\varepsilonnd{align}
by using test functions built from $\xi(t,x):=(-\Delta_x)^{-1}\overline{\re}(t,x)$ with the Neumann boundary
condition instead.
To be specific, \varepsilonqref{R-zero-average} allows us to construct $\xi$ via
\begin{align}
\left\{
\begin{array}{l}
-\Delta_x\xi=\overline{\re}\ \ \text{ in}\
\ \Omega,\\\mathbb{R}ule{0ex}{1.2em}
\tfrac{\partial\xi}{\partialn}=0\ \ \text{ on}\ \
\partial\Omega,\\\mathbb{R}ule{0ex}{1.2em}
\int_{\Omega}\xi=0.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
Consequently, we see the boundary terms
\begin{align}
\int_{\Gamma}\mathbb{R}e\xi(w\cdotn) =
\br{\mathbb{R}e, \xi}_{\Gamma_+} - \bbr{\partialp[\mathbb{R}e]+\mathcal{H}, \xi}_{\Gamma_-}=0
\varepsilonnd{align}
from \varepsilonqref{R-zero-flux},
and
\begin{align}
\int_{\Gamma}\mathbb{R}e \big(w\cdot\nabla_x\xi\big)(w\cdotn) &=
\bbr{\mathbb{R}e, w\cdot\nabla_x\xi}_{\Gamma_+} - \bbr{\partialp[\mathbb{R}e]+\mathcal{H}, w\cdot\nabla_x\xi}_{\Gamma_-}\\
&=\bbr{\big(1-\partialp\big)[\mathbb{R}e], w\cdot\nabla_x\xi}_{\Gamma_+} - \bbr{\mathcal{H}, w\cdot\nabla_x\xi}_{\Gamma_-},\nonumber
\varepsilonnd{align}
thanks to $\tfrac{\partial\xi}{\partialn}\big|_{\partial\Omega} =(n\cdot\nabla_x\xi)\big|_{\partial\Omega}=0$.
The remaining proof of \varepsilonqref{kernel=} then follows from analogous arguments.
Combining \varepsilonqref{energy=} with \varepsilonqref{kernel=} yields the remainder estimate
\begin{align}
\nnm{\mathbb{R}e}_{L^{\infty}_tL^2_{x,w}}+\varepsilon^{-\frac{1}{2}}\btnnms{\big(1-\partialp\big)[\mathbb{R}e]}{\Gamma_+}+\varepsilon^{-\frac{1}{2}}\tnnm{\overline{\re}}+\varepsilon^{-1}\tnnm{\re-\bre}\lesssim 1,
\varepsilonnd{align}
and Theorem~\mathbb{R}ef{main theorem-2} can thus be proved accordingly.
\Section{Specular-Reflection Boundary Problem}\label{Sec:specular-BC}
Consider the unsteady neutron transport equation with the specular-reflection boundary condition:
\begin{align}\label{transport==}
\left\{
\begin{array}{l}\deltaisplaystyle
\varepsilon\partialartial_t u^{\varepsilon}+w\cdot\nabla_x u^{\varepsilon}+\varepsilon^{-1}\big(u^{\varepsilon}-\overline{u}^{\varepsilon}\big)=0\ \ \text{ in}\ \ \mathbb{R}p\times\Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
u^{\varepsilon}(0,x,w)=u_o(x,w)\ \ \text{ in}\ \ \Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
u^{\varepsilon}(t,x_0,w)=\Sr[u^{\varepsilon}](t,x_0,w)+\varepsilon h(t,x_0,w)\ \ \text{ for}\
\ x_0\in\partial\Omega\ \ \text{and}\ \ w\cdotn(x_0)<0,
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
where the specular-reflection operator
\begin{align}
\Sr[u^{\varepsilon}](t,x_0,w):=u^{\varepsilon}(t,x_0,\mathbb{R}rw)
\quad\text{ with}\quad \mathbb{R}rw:=w-2(w\cdotn)n,
\varepsilonnd{align}
and $h$ is a given perturbation satisfying the compatibility condition \varepsilonqref{g-compatibility=},
so that $u^{\varepsilon}$ has zero flux at each boundary point.
\Subsection{Asymptotic Analysis}
Let us expand the exact solution as
\begin{align}\label{expand==}
u^{\varepsilon}=U+Uuu+Uu+\mathbb{R}e=\left(U_0+\varepsilonU_1+\varepsilon^2U_2\mathbb{R}ight)+\left(Uuu_0+\varepsilonUuu_1\mathbb{R}ight)+\left(Uu_0+\varepsilonUu_1\mathbb{R}ight)+\mathbb{R}e.
\varepsilonnd{align}
Here, the initial layer $Uuu:=Uuu_0+\varepsilonUuu_1$ and the interior solution $U:=U_0+\varepsilonU_1+\varepsilon^2U_2$ can be constructed identically to the diffuse-reflection case.
However, unlike the previous scenario, some kind of ``boundary layer'' need to be introduced (if $h\neq 0$) in order to guarantee the perfect specular boundary condition for the remainder.
Specifically, we let $Uu_0=0$ because the leading-order interior solution $U_0$ already satisfies the specular boundary condition.
We then define $Uu_1$ to be an extension of $h$ into a thin layer of the interior of domain:
\begin{align}
Uu_1=
\mathcal{G}e(t;\varepsilonta,\iota_1,\iota_2;\partialhi,\partialsi):= \widetilde{\chi}i(\varepsilonta) \mathds{1}_{\{\Sin\partialhi>0\}} h(t;\iota_1,\iota_2;\partialhi,\partialsi),
\varepsilonnd{align}
and let it act as the next-order boundary layer.
\Subsection{Remainder Estimate}
Proceeding in a parallel fashion,
we look at the remainder
\begin{align}
\mathbb{R}e:= u^\varepsilon - \Big[\left(U_0+\varepsilonU_1+\varepsilon^2U_2\mathbb{R}ight)+\left(Uuu_0+\varepsilonUuu_1\mathbb{R}ight)+\varepsilonUu_1\Big],
\varepsilonnd{align}
and derive the initial-boundary value problem
\begin{align}\label{remainder==}
\left\{
\begin{array}{l}\deltaisplaystyle
\varepsilon\partialartial_t\mathbb{R}e+w\cdot\nabla_x \mathbb{R}e+\varepsilon^{-1}\big(\re-\bre\big)=\Ss\ \ \text{ in}\ \ \mathbb{R}p\times\Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
\mathbb{R}e(0,x,w)=\mathcal{I}(x,w)\ \ \text{ in}\ \ \Omega\times\S^2,\\\mathbb{R}ule{0ex}{1.5em}
\mathbb{R}e(t,x_0,w)=\mathbb{R}e(t,x_0,\mathbb{R}rw)\ \ \text{ for}\
\ x_0\in\partial\Omega\ \ \text{and}\ \ w\cdotn(x_0)<0.
\varepsilonnd{array}
\mathbb{R}ight.
\varepsilonnd{align}
In particular, $\mathbb{R}e$ satisfies the perfect specular boundary condition without any perturbation,
due to the introduction of the ``boundary layer'' $Uu_1$
and the observation that
\begin{align}
U_0=\Sr[U_0],\qquadU_1=\Sr[U_1],\qquad U_2=\Sr[U_2],
\qquad Uuu_0=\Sr[Uuu_0],\qquad Uuu_1=\Sr[Uuu_1]
\varepsilonnd{align}
from the construction of $U_0,U_1,U_2,Uuu_0,Uuu_1$ and the compatibility conditions \varepsilonqref{compatibility-3}.
Compared to the diffuse-reflection case, the expression of $\mathcal{I}$ remains the same, while the source term $\Ss$ now contains additional terms coming from $Uu_1$
(see \varepsilonqref{expand 6} for the transport operator under the boundary coordinates):
\begin{align}
\Ss^{B\!L}:=
-\varepsilon^2\partialartial_t Uu_1-\varepsilonw\cdot\nabla_x Uu_1-\big(Uu_1-\overline{Uu_1}\big)
=: \Ss^{B\!L}_a + \Ss^{B\!L}_b .
\varepsilonnd{align}
Here, let $\Ss^{B\!L}_a:=\varepsilon\big(\Ssa+\Ssb\big)[Uu_1]$ denote the terms corresponding to $\Ssa+\Ssb$ in the inflow case, which now raise by one order of $\varepsilon$ and so $\tnnm{\big(1+\varepsilonta\big)\Ss^{B\!L}_a}\lesssim \varepsilon^{\frac{3}{2}}$.
In particular, the term containing $\partial_\partialhi Uu_1$ is still good due to the continuity of $\mathcal{G}e$ across the grazing set where $w\cdotn=0$ under assumption \varepsilonqref{assumption-3} with the compatibility condition $h|_{\Gamma_0}=0$.
The remaining part
\begin{align}
\Ss^{B\!L}_b:=
-\Sin\partialhi\, \partial_{\varepsilonta}Uu_1 - \big(Uu_1-\overline{Uu_1}\big)
= -\widetilde{\chi}i'(\varepsilonta) \Sin\partialhi\, \mathds{1}_{\{\Sin\partialhi>0\}} h
- \widetilde{\chi}i(\varepsilonta)\Big(\mathds{1}_{\{\Sin\partialhi>0\}} h - \overline{\mathds{1}_{\{\Sin\partialhi>0\}} h}\Big)
\varepsilonnd{align}
which corresponds to $\Ssc$ in the inflow case
now has no cancellation to exploit since it does not satisfy the Milne problem.
In view of the scaling $\varepsilonta=\varepsilon^{-1}\mu$, we see $\bnnm{Uu_1}_{L^2}\lesssim\varepsilon^{\frac{1}{2}}$
and those terms in $\Ss^{B\!L}_b$ contribute to
\begin{align}
\tnnm{\big(1+\varepsilonta\big)\Ss^{B\!L}}\lesssim \varepsilon^{\frac{1}{2}},
\varepsilonnd{align}
which is acceptable.
Besides, due to the presence of $Uu_1$, we have
\begin{align}
\iint_{\Omega\times\S^2}\Ss = -\varepsilon^2 \iint_{\Omega\times\S^2} \partialartial_t Uu_1,
\quad\text{ and so }\quad
\int_{\Omega}\overline{\re} = -\varepsilon \iint_{\Omega\times\S^2} Uu_1,
\varepsilonnd{align}
considering the null flux of $\mathbb{R}e$ and $Uu_1$ under \varepsilonqref{g-compatibility=}.
Despite the fact that $\overline{\re}$ does not necessarily have zero average, we may redefine
\begin{align}
\widetilde\mathbb{R}e:=\mathbb{R}e-\frac{1}{|\Omega|}\int_{\Omega}\overline{\re},\qquad
\widetilde\Ss:=\Ss-\frac{1}{4\partiali|\Omega|}\iint_{\Omega\times\S^2}\Ss,
\varepsilonnd{align}
so that $\int_{\Omega}\overline{\widetilde\mathbb{R}e}=0$, $\iint_{\Omega\times\S^2}\widetilde\Ss=0$, and that
$\widetilde\mathbb{R}e$ solves \varepsilonqref{remainder==} with $\Ss$ replaced by $\widetilde\Ss$.
Noticing that the difference $\abs{\int_{\Omega}\overline{\re}}\lesssim\varepsilon^2$ has higher order of smallness, we only need to obtain the remainder estimate for $\widetilde\mathbb{R}e$. For simplicity, we will abuse notation $\mathbb{R}e$ for $\widetilde\mathbb{R}e$ in what follows.
The specular boundary condition on $\mathbb{R}e$ yields
\begin{align}\label{R-zero-flux=}
\int_{\S^2}\mathbb{R}e(t,x_0,w)(w\cdotn)\,\mathrm{d}w = 0
\ \ \text{ for all}\ \ x_0\in\partial\Omega\ \ \text{and}\ \ t\in\mathbb{R}p,
\varepsilonnd{align}
as well as
\begin{align}\label{temp 3}
\int_{\Gamma}\mathbb{R}e^2(w\cdotn)=\tnnms{\mathbb{R}e}{\Gamma_+}^2-\tnnms{\mathbb{R}e}{\Gamma_-}^2=0.
\varepsilonnd{align}
Also, with the same test functions as in the diffuse-reflection case where $\tfrac{\partial\xi}{\partialn}=0$ on $\partial\Omega$, the corresponding boundary terms will vanish:
\begin{align}
\int_{\Gamma}\mathbb{R}e\xi(w\cdotn) &= \br{\mathbb{R}e, \xi}_{\Gamma_+} - \br{\mathbb{R}e, \xi}_{\Gamma_-}=0, \\
\int_{\Gamma}\mathbb{R}e \big(w\cdot\nabla_x\xi\big)(w\cdotn) &= \bbr{\mathbb{R}e, w\cdot\nabla_x\xi}_{\Gamma_+} - \bbr{\mathbb{R}e, w\cdot\nabla_x\xi}_{\Gamma_-}=0.\label{boundary-term}
\varepsilonnd{align}
Arguing analogously, we thus find
the energy bound
\begin{align}\label{energy==}
\nnm{\mathbb{R}e}_{L^{\infty}_tL^2_{x,w}}^2+\varepsilon^{-2}\tnnm{\re-\bre}^2\lesssim \delta\varepsilon^{-1}\tnnm{\overline{\re}}^2+\delta^{-1},
\varepsilonnd{align}
and the kernel bound
\begin{align}\label{kernel==}
\tnnm{\overline{\re}}^2\lesssim\varepsilon\nnm{\mathbb{R}e}_{L^{\infty}_tL^2_{x,w}}^2+\tnnm{\re-\bre}^2 +\varepsilon.
\varepsilonnd{align}
For the kernel estimate, however, the fresh terms $\varepsilon^{-1}\abs{\deltaobr{\Ss^{B\!L},\xi}}+\abs{\deltabbr{\Ss^{B\!L},w\cdot\nabla_x\xi}}+\varepsilon\abs{\deltaobr{\Ss^{B\!L},\mathcal{I}eta}}$ in \varepsilonqref{kernel 09} require a different treatment, especially for the worst part $\varepsilon^{-1}\abs{\deltaobr{\Ss^{B\!L}_b,\xi}}$:
note that the estimate like \varepsilonqref{kernel 11} no longer applies since now we do not have $\xi|_{\partial\Omega}=0$.
Instead, using integration by parts in $\mu$ and noticing that the boundary term vanishes due to
$\int_{\S^2} \Sin\partialhi\,Uu_1\big|_{\mu=0}\mathrm{d} w = \int_{w\cdotn<0}h(t,x_0,w)(w\cdotn)\,\mathrm{d}w=0$ for each $x_0\in\partial\Omega$ and all $t\in\mathbb{R}p$ (which is precisely the compatibility condition \varepsilonqref{g-compatibility=}), we find
\begin{align}
\varepsilon^{-1}\abs{\deltaobr{\Ss^{B\!L}_b,\xi}}&=
\varepsilon^{-1}\Babs{\deltabbr{\Sin\partialhi\, \partial_{\varepsilonta}Uu_1+\big(Uu_1-\overline{Uu_1}\big), \xi}}=\babs{\deltabbr{\Sin\partialhi\,\partial_{\mu}Uu_1, \xi}}
=\babs{\deltabbr{\Sin\partialhi\,Uu_1, \partial_{\mu}\xi}}\\
&\lesssim
\bnnm{Uu_1}_{L^2_tL^2_xL^1_w}\tnnm{\partial_{\mu}\xi} \lesssim\varepsilon^{\frac{1}{2}}\nnm{\xi}_{L^2_tH^1_x}
\lesssim \varepsilon^{\frac{1}{2}}\tnnm{\overline{\re}}
\lesssim\delta\tnnm{\overline{\re}}^2+\delta^{-1}\varepsilon.\nonumber
\varepsilonnd{align}
The rest of the terms can be bounded either in a similar way as \varepsilonqref{kernel 11}--\varepsilonqref{kernel 13}, or even more directly considering the higher order of $\varepsilon$ they possess.
Finally, by combining \varepsilonqref{energy==} and \varepsilonqref{kernel==} we have the remainder estimate
\begin{align}
\nnm{\mathbb{R}e}_{L^{\infty}_tL^2_{x,w}}+\varepsilon^{-\frac{1}{2}}\tnnm{\overline{\re}}+\varepsilon^{-1}\tnnm{\re-\bre}\lesssim 1.
\varepsilonnd{align}
This leads to the proof of \varepsilonqref{main-3} and so that of Theorem~\mathbb{R}ef{main theorem-3}.
\varepsilonnd{document}
|
\begin{document}
\title[Maxwell--Stefan--Cahn--Hilliard systems]{Existence and weak-strong uniqueness
for Maxwell--Stefan--Cahn--Hilliard systems}
\author[X. Huo]{Xiaokai Huo}
\address{Institute of Analysis and Scientific Computing, Technische Universit\"at
Wien, Wiedner Hauptstra\ss e 8--10, 1040 Wien, Austria}
\email{[email protected]}
\author[A. J\"ungel]{Ansgar J\"ungel}
\address{Institute of Analysis and Scientific Computing, Technische Universit\"at
Wien, Wiedner Hauptstra\ss e 8--10, 1040 Wien, Austria}
\email{[email protected]}
\author[A. Tzavaras]{Athanasios E. Tzavaras}
\address{Computer, Electrical and Mathematical Science and Engineering Division,
King Abdullah University of Science and Technology (KAUST),
Thuwal 23955-6900, Saudi Arabia}
\email{[email protected]}
\date{\today}
\thanks{XH and AJ acknowledge partial support from
the Austrian Science Fund (FWF), grants P33010, W1245, and F65.
AET acknowledges support from the King Abdullah University of Science and Technology (KAUST).
This work has received funding from the European
Research Council (ERC) under the European Union's Horizon 2020 research and
innovation programme, ERC Advanced Grant no.~101018153.}
\begin{abstract}
A Maxwell--Stefan system for fluid mixtures with driving forces depending on
Cahn--Hilliard-type chemical potentials is analyzed. The corresponding parabolic
cross-diffusion equations contain fourth-order derivatives and are considered
in a bounded domain with no-flux boundary conditions. The main difficulty of
the analysis is the degeneracy of the diffusion matrix, which is overcome by
proving the positive definiteness of the matrix on a subspace and using the
Bott--Duffin matrix inverse. The global existence of weak solutions and a
weak-strong uniqueness property are shown by a careful combination of
(relative) energy and entropy estimates, yielding $H^2(\Omega)$ bounds for
the densities, which cannot be obtained from the energy or entropy inequalities alone.
\end{abstract}
\keywords{Cross-diffusion systems, global existence, weak-strong uniqueness,
relative entropy, relative free energy, parabolic fourth-order equations,
Maxwell--Stefan equations, Cahn--Hilliard equations.}
\subjclass[2000]{35A02, 35G20, 35G31, 35K51, 35K55, 35Q35.}
\maketitle
\section{Introduction}
The evolution of fluid mixtures is important in many scientific fields like
biology and nanotechnology to understand the
diffusion-driven transport of the species. The transport can be modeled
by the Maxwell--Stefan equations \cite{Max66,Ste71}, which
consist of the mass balance equations and the relations between the driving forces
and the fluxes. The driving forces involve the chemical potentials of the
species, which in turn are determined by the (free) energy.
When the fluid is immiscible, the energy can be assumed to consist
of the thermodynamic entropy and the phase separation energy,
given by a density gradient \cite{CaHi58}. The gradient energetically penalizes
the formation of an interface and restrains the segregation.
This leads to a system of cross-diffusion equations with fourth-order derivatives.
The aim of this paper is to provide a
global existence and weak-strong uniqueness analysis for the multicomponent
Maxwell--Stefan--Cahn--Hilliard system.
\subsection{Model equations and state of the art}
The equations for the partial densities $c_i$ and partial velocities $u_i$
are given by
\begin{align}
\pa_t c_i + \operatorname{div}(c_iu_i) &= 0, \quad i = 1,\ldots, n, \label{1.eq1} \\
\label{1.eq2}
c_i\na\mu_i - \frac{c_i}{\sum_{k=1}^n c_k}\sum_{j=1}^n c_j\na\mu_j
&= -\sum_{j=1}^n K_{ij}(\bm{c})c_ju_j, \\
\sum_{j=1}^n c_ju_j &= 0, \label{1.eq3}
\end{align}
supplemented by the initial and boundary conditions
\begin{equation}\label{1.bic}
\bm{c}(\cdot,0)=\bm{c}^0\quad\mbox{in }\Omega, \quad
c_iu_i\cdot\nu = \na c_i\cdot\nu = 0\quad\mbox{on }\pa\Omega,\ t>0,\ i=1,\ldots,n,
\end{equation}
where $\Omega\subset{\mathbb R}^d$ ($d=1,2,3$) is a bounded domain, $\nu$ is the
exterior unit normal vector on the boundary $\pa\Omega$, $\bm{c}=(c_1,\ldots,c_n)$
is the density vector, and $K_{ij}(\bm{c})$ are the friction coefficients.
The left-hand side of \eqref{1.eq2} can be interpreted as the driving forces
of the thermodynamic system, and the right-hand side is the sum of the friction forces.
The chemical potentials
\begin{equation}\label{1.mu}
\mu_i=\frac{\delta{\mathcal E}}{\delta c_i} = \log c_i-\Delta c_i, \quad i=1,\ldots,n,
\end{equation}
are the variational derivatives of the (free) energy
\begin{equation}\label{1.HE}
{\mathcal E}(\bm{c}) = {\mathcal H}(\bm{c}) + \frac{1}{2}\sum_{i=1}^n\int_\Omega|\na c_i|^2 dx, \quad
{\mathcal H}(\bm{c}) = \sum_{i=1}^n\int_\Omega\big(c_i(\log c_i-1)+1\big)dx,
\end{equation}
and ${\mathcal H}(\bm{c})$ is the thermodynamic entropy. We assume that
$\sum_{i=1}^n K_{ij}(\bm{c})=0$ for $j=1,\ldots,n$, meaning that the
linear system in $\na\mu_j$ is invertible only on a subspace, and that
$\sum_{i=1}^n c_i^0=1$ in $\Omega$, which implies that $\sum_{i=1}^n c_i(t)=1$
in $\Omega$ for all time $t>0$. This means that the mixture is saturated
and $c_i$ can be interpreted as volume fraction.
For simplicity, we have normalized all physical constants.
Model \eqref{1.eq1}--\eqref{1.mu} has been derived rigorously in \cite{HJT19}
in the high-friction
limit from a multicomponent Euler--Korteweg system for a general convex energy
functional depending on $\bm{c}$ and $\na\bm{c}$. A thermodynamics-based derivation
can be found in \cite{MiSc09}. When the energy equals
${\mathcal E}(\bm{c})={\mathcal H}(\bm{c})$, the model reduces to the classical Maxwell--Stefan
equations, analyzed first in \cite{Bot11,GoMa98,HMPW17} for local-in-time smooth
solutions and later in \cite{JuSt13} for global-in-time weak solutions.
In the single-species case, model \eqref{1.eq1}--\eqref{1.mu} becomes the
fourth-order Cahn--Hilliard equation with potential $\phi(c)=c(\log c-1)$,
which was analyzed in, e.g., \cite{ElGa96,Jin92}. Only few works are concerned
with the multi-species situation, and all of them require additional conditions.
The mobility matrix in \cite{BoLa06,MaZi17} is assumed to be diagonal and that one in
\cite{KRS21} has constant entries, while the
works \cite{EMP21,ElGa97} suppose a particular (but nondiagonal)
structure of the mobility matrix. We also mention the works
\cite{BaEh18,BBEP20} on related models with free energies of the type ${\mathcal H}$.
The proof of the uniqueness of solutions to cross-diffusion or fourth-order
systems is quite delicate due to the lack of a maximum principle and regularity
of the solutions.
The uniqueness of strong solutions to Maxwell--Stefan systems has been shown in
\cite{HMPW17,HuSa18}, and uniqueness results for weak solutions in a very
special case can be found in \cite{ChJu18}. A weak-strong uniqueness result
for Maxwell--Stefan systems was proved in \cite{HJT21}.
Concerning uniqueness results for fourth-order equations,
we refer to \cite{CGPS13} for single-species Cahn--Hilliard equations,
\cite{Joh15} for single-species thin-film equations,
and \cite{F13} for the quantum drift-diffusion equations.
Up to our knowledge, there are no uniqueness results for multicomponent
Cahn--Hilliard systems.
In this paper, we analyze these equations in a general setting for the first time.
\subsection{Key ideas of the analysis}
Before stating the main results, we explain the mathematical ideas needed
to analyze model \eqref{1.eq1}--\eqref{1.mu}. First, we rewrite
\eqref{1.eq2}
by introducing the matrix $D(\bm{c})\in{\mathbb R}^{n\times n}$ with
entries
$$
D_{ij}(\bm{c}) = \frac{1}{\sqrt{c_i}} K_{ij}(\bm{c})\sqrt{c_j}
$$
in the unknowns $(\sqrt{c_1}u_1,\ldots,\sqrt{c_n}u_n)$:
\begin{equation}\label{1.D}
\begin{aligned}
\sqrt{c_i}\na\mu_i - \frac{\sqrt{c_i}}{\sum_{k=1}^n c_k}\sum_{j=1}^n c_j\na\mu_j
&= -\sum_{j=1}^n D_{ij}(\bm{c})\sqrt{c_j}u_j, \\
\sum_{i=1}^n\sqrt{c_i}\big(\sqrt{c_i}u_i\big) &= 0.
\end{aligned}
\end{equation}
We show in Lemma \ref{lem.Dz} that this linear system has a unique solution
in the space $L(\bm{c}):=\{\bm{z}\in{\mathbb R}^n:\sum_{i=1}^n\sqrt{c_i}z_i=0\}$,
and the solution reads as
$$
\sqrt{c_i}u_i = -\sum_{j=1}^n D_{ij}^{BD}(\bm{c})\sqrt{c_j}\na\mu_j,
$$
where $D^{BD}(\bm{c})$ is the so-called Bott--Duffin matrix inverse; see
Lemmas \ref{lem.Dz} and \ref{lem.DB} for the definition and some properties. Then,
defining the matrix $B(\bm{c})\in{\mathbb R}^{n\times n}$ with elements
\begin{equation}\label{1.B}
B_{ij}(\bm{c}) = \sqrt{c_i}D_{ij}^{BD}(\bm{c})\sqrt{c_j}, \quad i,j=1,\ldots,n,
\end{equation}
system \eqref{1.eq1}--\eqref{1.eq2} can be formulated as
(see Section \ref{sec.BD} for details)
$$
\pa_t c_i = \operatorname{div}\sum_{j=1}^n B_{ij}(\bm{c})\na\mu_j, \quad i=1,\ldots,n.
$$
The matrix $B(\bm{c})$ is often called Onsager or mobility matrix in the literature.
The major difficulty of the analysis consists in the fact
that the matrix $B(\bm{c})$ is singular
and degenerates when $c_i\to 0$ for some $i\in\{1,\ldots,n\}$.
Computing formally the energy identity
$$
\frac{d{\mathcal E}}{dt}(\bm{c})
+ \sum_{i,j=1}^n\int_\Omega B_{ij}(\bm{c})\na\mu_i\cdot\na\mu_j dx = 0,
$$
the degeneracy at $c_i=0$ prevents uniform estimates for $\na\mu_i$ in $L^2(\Omega)$.
In some works, this issue has been compensated. For instance, there exists
an entropy equality for the model of \cite{ElGa97} yielding an $L^2(\Omega)$
bound for $\Delta c_i$, and the decoupled mobilities in \cite{CMN19,MaZi17} allow for
decoupled entropy estimates. In our model, the energy identity does not
provide a gradient estimate for the full vector
$(\na\mu_1,\ldots,\na\mu_n)$ but only for a projection:
$$
\frac{d{\mathcal E}}{dt}(\bm{c}) + C_1\sum_{i=1}^n\int_\Omega
\bigg|\sum_{j=1}^n(\delta_{ij}-\sqrt{c_ic_j})\sqrt{c_j}\na\mu_j\bigg|^2 dx \le 0,
$$
where $\delta_{ij}$
is the Kronecker delta; see Lemma \ref{lem.fei}.
(The constant $C_1>0$ and all constants that follow do not depend on $\bm{c}$.)
To address the degeneracy issue, we compute the time derivative of the entropy:
$$
\frac{d{\mathcal H}}{dt}(\bm{c}) + \sum_{i,j=1}^n\int_\Omega B_{ij}(\bm{c})\na\log c_i
\cdot\na\mu_j dx = 0.
$$
This does not provide a uniform estimate for $\Delta c_i$, but we show
(see Lemma \ref{lem.fei}) that
$$
\frac{d{\mathcal H}}{dt}(\bm{c}) + C_2\sum_{i=1}^n\int_\Omega(\Delta c_i)^2 dx
\le C_3\sum_{i=1}^n\int_\Omega\bigg|\sum_{j=1}^n(\delta_{ij}-\sqrt{c_ic_j})
\sqrt{c_j}\na\mu_j\bigg|^2 dx.
$$
Combining the energy and entropy inequalities in a suitable way, the last
integral cancels:
\begin{equation}\label{1.cee}
\frac{d}{dt}\bigg({\mathcal H}(\bm{c})+\frac{C_3}{C_1}{\mathcal E}(\bm{c})\bigg)
+ C_2\sum_{i=1}^n\int_\Omega(\Delta c_i)^2 dx \le 0.
\end{equation}
This provides the desired $H^2(\Omega)$ bound for $c_i$.
Note that the energy or entropy inequality alone does not give estimates
for $c_i$. The combined energy-entropy inequality is the key idea of the paper
for both the existence and weak-strong uniqueness analysis.
\subsection{Main results}
We make the following assumptions:
\begin{itemize}
\item[(A1)] Domain: $\Omega\subset{\mathbb R}^d$ with $d\le 3$ is a bounded domain.
We set $Q_T=\Omega\times(0,T)$ for $T>0$.
\item[(A2)] Initial data: $c_i^0\in H^1(\Omega)$ satisfies $c_i^0\ge 0$ in $\Omega$,
$i=1,\ldots,n$, and $\sum_{i=1}^n c_i^0=1$ in $\Omega$.
\end{itemize}
The assumption $d\le 3$ is made for convenience, it can be relaxed for higher space
dimension, by choosing another regularization in the existence proof;
see \eqref{3.regul1}.
The constraint $\sum_{i=1}^n c_i^0=1$ expresses the saturation of the mixture
and it propagates to the solution.
We introduce the matrix $D_{ij}(\bm{c})=(1/\sqrt{c_i})K_{ij}(\bm{c})\sqrt{c_j}$
for $i,j=1,\ldots,n$ and set
\begin{equation}\label{1.L}
L(\bm{c}) = \{\bm{x}\in{\mathbb R}^n:\sqrt{\bm{c}}\cdot\bm{x}=0\}, \quad
L^\perp(\bm{c}) = \operatorname{span}\{\sqrt{\bm{c}}\},
\end{equation}
where $\sqrt{\bm{c}}=(\sqrt{c_1},\ldots,\sqrt{c_n})$.
The projections $P_L(\bm{c})$, $P_{L^\perp}(\bm{c})\in{\mathbb R}^{n\times n}$
on $L(\bm{c})$, $L(\bm{c})^\perp$, re\-spec\-tive\-ly, are given by
\begin{equation}\label{1.PL}
P_L(\bm{c})_{ij} = \delta_{ij}-\sqrt{c_ic_j}, \quad
P_{L^\perp}(\bm{c})_{ij} = \sqrt{c_ic_j}\quad\mbox{for }i,j=1,\ldots,n.
\end{equation}
We impose for any given $\bm{c}\in[0,1]^n$ the following assumptions on
$D(\bm{c})=(D_{ij}(\bm{c}))\in{\mathbb R}^{n\times n}$:
\begin{itemize}
\item[(B1)]$D(\bm{c})$ is symmetric and $\operatorname{ran} D(\bm{c})=L(\bm{c})$,
$\ker(D(\bm{c})P_L(\bm{c}))=L^\perp(\bm{c})$.
\item[(B2)] For all $i,j=1,\ldots,n$, $D_{ij}\in C^1([0,1]^n)$ is bounded.
\item[(B3)] The matrix $D(\bm{c})$ is positive semidefinite, and there exists
$\rho>0$ such that all eigenvalues $\lambda\neq 0$ of
$D(\bm{c})$ satisfy $\lambda\ge\rho$.
\item[(B4)] For all $i,j=1,\ldots,n$, $K_{ij}(\bm{c})
=\sqrt{c_i}D_{ij}(\bm{c})/\sqrt{c_j}$ is bounded in $[0,1]^n$.
\end{itemize}
Examples of matrices $D(\bm{c})$ satisfying these assumptions
are presented in Section \ref{sec.exam}.
Our first main result is the global existence of weak solutions.
\begin{theorem}[Global existence]\label{thm.ex}
Let Assumptions (A1)--(A2) and (B1)--(B4) hold. Then there exists a weak solution
$\bm{c}$ to \eqref{1.eq1}--\eqref{1.mu} satisfying $0\le c_i\le 1$,
$\sum_{i=1}^n c_i=1$ in $\Omega\times(0,\infty)$,
$$
c_i\in L_{\rm loc}^\infty(0,\infty;H^1(\Omega))\cap
L_{\rm loc}^2(0,\infty;H^2(\Omega)), \quad
\pa_t c_i\in L_{\rm loc}^2(0,\infty;H^1(\Omega)'),
$$
the initial condition in \eqref{1.bic} is satisfied in the sense of
$H^1(\Omega)'$, and for all $\phi_i\in C_0^\infty(\Omega\times(0,\infty))$,
\begin{align}\label{1.weak}
0 &= -\int_0^\infty\int_\Omega c_i\pa_t\phi_i dxdt
+ \sum_{j=1}^n\int_0^\infty\int_\Omega B_{ij}(\bm{c})\na\log c_i\cdot\na\phi_i dxdt \\
&\phantom{xx}{}+ \sum_{j=1}^n\int_0^\infty\int_\Omega\operatorname{div}(B_{ij}(\bm{c})\na\phi_i)
\Delta c_j dxdt, \nonumber
\end{align}
where $B_{ij}(\bm{c})$ is defined in \eqref{1.B}. Furthermore,
\begin{align}\label{1.EH}
{\mathcal H}(\bm{c}(\cdot,T)) &+ C_1{\mathcal E}(\bm{c}(\cdot,T))
+ C_2\int_0^T\int_\Omega(|\na\sqrt{\bm{c}}|^2+|\Delta\bm{c}|^2) dxdt \\
&{}+ C_2\int_0^T\int_\Omega|\bm{\zeta}|^2 dxdt
\le {\mathcal H}(\bm{c}^0) + C_1{\mathcal E}(\bm{c}^0), \nonumber
\end{align}
where $C_1>0$ depends on $\rho$, $n$, $\|D(\bm{c})\|_F$ and $C_2>0$ depends
on $n$, $\|D(\bm{c})\|_F$ ($\|\cdot\|_F$ is the Frobenius matrix norm
and $\rho$ is introduced in Assumption (B3)). Moreover,
$\bm{\zeta}$ is the weak $L^2(\Omega)$ limit of an approximating sequence of
$\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\mu_j$.
\end{theorem}
Some comments are in order. First, by Assumption (B2), the elements of the matrix
$D(\bm{c})$ are bounded for any $\bm{c}\in[0,1]^n$ and therefore, the quantity
$\|D(\bm{c})\|_F$ is bounded uniformly in $\bm{c}$.
Second, the weak formulation \eqref{1.weak} makes sence
since $B_{ij}(\bm{c})\na\log c_i \in L^2(Q_T)$.
Indeed, by the definition of $B(\bm{c})$, we have
$$
B_{ij}(\bm{c})\na\log c_j = \sqrt{c_i}D^{BD}_{ij}(\bm{c})\frac{1}{\sqrt{c_j}}\na c_j,
$$
and the matrix $\sqrt{c_i}D^{BD}_{ij}(\bm{c})/\sqrt{c_j}$ is bounded for all
$\bm{c}\in[0,1]^n$; see Lemma \ref{lem.DB} (iii) below. However, note that
the expression $\sum_{j=1}^n B_{ij}(\bm{c})\na\mu_j$ is generally not an element
of $L^2(Q_T)$. In particular, we cannot expect that $\na\Delta c_i\in L^2(Q_T)$.
Third, we have not been able to identify the weak limit $\bm{\zeta}$ because of
low regularity. However, if
$\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\mu_j\in
L^2_{\rm loc}(0,\infty;L^2(\Omega))$ holds for all $i=1,\ldots,n$, then
we can identify $\zeta_i=\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\mu_j$;
see Lemma \ref{lem.ident}.
To prove Theorem \ref{thm.ex}, we first introduce a truncation with parameter
$\delta\in(0,1)$ as in \cite{ElGa97} to avoid the degeneracy.
Then we reduce the cross-diffusion system to $n-1$
equations by replacing $c_n$ by $1-\sum_{i=1}^{n-1}c_i$. The advantage is that
the diffusion matrix of the reduced system is positive definite
(with a lower bound depending on $\delta$). The existence of solutions
$c_i^\delta$ to the truncated,
reduced system is proved by an approximation as in \cite{Jue16}
and the Leray--Schauder fixed-point theorem; see Section \ref{sec.approx}.
An approximate version of the
free energy estimate \eqref{1.EH}
(proved in Lemma \ref{lem.eei} in Section \ref{sec.unif})
provides suitable uniform bounds that allow
us to perform the limit $\delta\to 0$. The approximate densities $c_i^\delta$
may be negative but, by exploiting the entropy bound for $c_i^\delta$,
its limit $c_i$ turns out to be nonnegative.
The limit $\delta\to 0$ is then performed in Section \ref{sec.exproof}, using
the uniform estimates and compactness arguments.
Our second main result is concerned with the weak-strong uniqueness.
For this, we define the relative entropy and free energy
in the spirit of \cite{GLT17} by, respectively,
\begin{align}
{\mathcal H}(\bm{c}|\bar{\bm{c}}) &:= {\mathcal H}(\bm{c}) - {\mathcal H}(\bar{\bm{c}})
- \frac{\pa{\mathcal H}}{\pa\bm{c}}(\bar{\bm{c}})\cdot(\bm{c}-\bar{\bm{c}})
= \sum_{i=1}^n\int_\Omega\bigg(c_i\log\frac{c_i}{\bar{c}_i} - (c_i-\bar{c}_i)
\bigg)dx, \label{1.relH} \\
{\mathcal E}(\bm{c}|\bar{\bm{c}}) &:= {\mathcal E}(\bm{c}) - {\mathcal E}(\bar{\bm{c}})
- \frac{\pa{\mathcal E}}{\pa\bm{c}}(\bar{\bm{c}})\cdot(\bm{c}-\bar{\bm{c}})
= {\mathcal H}(\bm{c}|\bar{\bm{c}}) + \frac12\sum_{i=1}^n\int_\Omega
|\na(c_i-\bar{c}_i)|^2dx. \label{1.relE}
\end{align}
\begin{theorem}[Weak-strong uniqueness]\label{thm.wsu}
Let Assumptions (A1)--(A2), (B1)--(B4) hold, let $\bm{c}$ be a weak solution
to \eqref{1.eq1}--\eqref{1.mu} with initial datum $\bm{c^0}$, and let
$\bar{\bm{c}}$ be a strong solution to \eqref{1.eq1}--\eqref{1.mu} with
initial datum $\bar{\bm{c}}^0$. We assume that the weak solution
$\bm{c}$ satisfies
\begin{equation}\label{1.cregul}
\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\mu_j
\in L^2_{\rm loc}(0,\infty;L^2(\Omega))
\mbox{ for }i,j=1,\ldots,n
\end{equation}
(see \eqref{1.PL} for the definition of $P_L(\bm{c})$)
and for all $T>0$ the energy and entropy inequalities
\begin{align}
{\mathcal E}(\bm{c}(T)) + \sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bm{c})
\na\mu_i\cdot\na\mu_j dxdt &\le {\mathcal E}(\bm{c}^0), \label{1.dEdt} \\
{\mathcal H}(\bm{c}(T)) + \sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bm{c})
\na\log c_i\cdot\na\mu_j dxdt &\le {\mathcal H}(\bm{c}^0). \label{1.dHdt}
\end{align}
The strong solution $\bar{\bm{c}}$ is supposed to be strictly positive,
i.e., there exists $m>0$ such that $\bar{c}_i\ge m$ in $\Omega$, $t>0$, and
satisfies the regularity
$$
\bar{c}_i\in L_{\rm loc}^\infty(0,\infty;W^{3,\infty}(\Omega)),
\quad \na\operatorname{div}\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)
\in L^\infty_{\rm loc}(0,\infty;L^\infty(\Omega))
$$
for $i=1,\ldots,n$,
as well as for any $T>0$ the energy and entropy conservation identities
\begin{align}
{\mathcal E}(\bar{\bm{c}}(T)) + \sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bar{\bm{c}})
\na\bar{\mu}_i\cdot\na\bar{\mu}_j dxdt &= {\mathcal E}(\bar{\bm{c}}^0), \label{1.dEdtbar} \\
{\mathcal H}(\bar{\bm{c}}(T)) + \sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bar{\bm{c}})
\na\log \bar{c}_i\cdot\na\bar{\mu}_j dxdt &= {\mathcal H}(\bar{\bm{c}}^0),
\label{1.dHdtbar}
\end{align}
where $\mu_i=\log c_i-\Delta c_i$ and $\bar{\mu}_i=\log\bar{c}_i-\Delta \bar{c}_i$.
Then, for any $T>0$, there exist constants $C_1$, only depending on $\|D(\bm{c})\|_F$,
$n$, $\rho$, and $C_2(T)>0$, only depending on $T$,
$\operatorname{meas}(\Omega)$, $n$, $\rho$, such that
\begin{equation}\label{1.comb}
{\mathcal H}(\bm{c}(T)|\bar{\bm{c}}(T)) + C_1{\mathcal E}(\bm{c}(T)|\bar{\bm{c}}(T))
\le C_2(T)\big({\mathcal H}(\bm{c}^0|\bar{\bm{c}}^0) + C_1{\mathcal E}(\bm{c}^0|\bar{\bm{c}}^0)\big).
\end{equation}
In particular, if $\bm{c}^0=\bar{\bm{c}}^0$ then the weak and strong solutions
coincide.
\end{theorem}
Assumption \eqref{1.cregul} guarantees that the flux
$\sum_{j=1}^n B_{ij}(\bm{c})\na\mu_j$ lies in $L^2(Q_T)$. Indeed, we prove in
Lemma \ref{lem.DB} (i) in Section \ref{sec.mobil} that $D_{ij}^{BD}(\bm{c})$
is bounded for $\bm{c}\in[0,1]^n$. Therefore, since
$D^{BD}(\bm{c})=D^{BD}(\bm{c})P_L(\bm{c})$, assumption \eqref{1.cregul}
and $c_i\in L^\infty(Q_T)$ imply that
\begin{equation}\label{1.regflux}
\sum_{j=1}^n B_{ij}(\bm{c})\na\mu_j
= \sqrt{c_i}\sum_{j,k=1}^n D_{ik}^{BD}(\bm{c})P_L(\bm{c})_{kj}\sqrt{c_j}\na\mu_j
\in L^2(Q_T).
\end{equation}
By the way, it follows from
$\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\log c_j = 2\na\sqrt{c_i}\in L^2(Q_T)$ that
\begin{equation}\label{1.regc}
\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\Delta c_j
= \sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na(\log c_j-\mu_j) \in L^2(Q_T).
\end{equation}
Since $\na\Delta c_i$ may be not in $L^2(Q_T)$, we interpret \eqref{1.regc}
in the sense of distributions, i.e.\ for all $\Phi\in
C_0^\infty(\Omega;{\mathbb R}^d)$,
$$
\bigg\langle\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\Delta c_j,\Phi
\bigg\operatorname{ran}gle
= -\sum_{j=1}^n\int_\Omega\big(\na(P_L(\bm{c})_{ij}\sqrt{c_j})\cdot\Phi
+ P_L(\bm{c})_{ij}\sqrt{c_j}\operatorname{div}\Phi\big)\Delta c_j dx.
$$
For the proof of Theorem \ref{thm.wsu}, we estimate first the time derivative of
the relative entropy \eqref{1.relH}:
\begin{align*}
\frac{d{\mathcal H}}{dt}&(\bm{c}|\bar{\bm{c}})
+ C_1\sum_{i=1}^n\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}
\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dx
+ C_1\sum_{i=1}^n\int_\Omega(\Delta(c_i-\bar{c}_i))^2 dx \\
&\le C_2\sum_{i=1}^n\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}
\sqrt{c_j}\na(\mu_j-\bar{\mu}_j)\bigg|^2 dx + C_3\int_\Omega
{\mathcal E}(\bm{c}|\bar{\bm{c}})dx,
\end{align*}
where $C_i>0$ are some constants depending only on the data.
The first term on the right-hand side can be handled by estimating the time
derivative of the relative energy \eqref{1.relE}:
\begin{align*}
\frac{d{\mathcal E}}{dt}&(\bm{c}|\bar{\bm{c}}) + C_4\sum_{i=1}^n\int_\Omega
\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na(\mu_j-\bar{\mu}_j)\bigg|^2 dx \\
&\le \theta\sum_{i=1}^n\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}
\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dx
+ \theta\sum_{i=1}^n\int_\Omega(\Delta(c_i-\bar{c}_i))^2 dx \\
&\phantom{xx}{}+ C_5(\theta)\int_\Omega{\mathcal E}(\bm{c}|\bar{\bm{c}})dx,
\end{align*}
where $\theta>0$ can be arbitrarily small. Choosing $\theta=C_1C_4/C_2$,
we can combine both estimates leading to
$$
\frac{d}{dt}\bigg({\mathcal H}(\bm{c}|\bar{\bm{c}}) + \frac{C_2}{C_4}{\mathcal E}(\bm{c}|\bar{\bm{c}})
\bigg)
\le \bigg(C_3+\frac{C_2C_5}{C_4}\bigg){\mathcal E}(\bm{c}|\bar{\bm{c}}),
$$
and the theorem follows after applying Gronwall's lemma. As the computations
are quite involved, we compute first in Section \ref{sec.wsu.formal}
the time derivative of the relative entropy and energy for {\em smooth} solutions.
The rigorous proof of the combined relative entropy-energy inequality
for weak solutions $\bm{c}$ and strong solutions $\bar{\bm{c}}$ is
then performed in Section \ref{sec.wsu.rig}.
The paper is organized as follows. The Bott--Duffin matrix inverse is introduced
in Section \ref{sec.mobil}, some properties of the mobility matrix $B(\bm{c})$ are
proved, and the combined energy-entropy inequality \eqref{1.cee} is derived for
smooth solutions. The global existence of solutions (Theorem \ref{thm.ex}) is
shown in Section \ref{sec.ex}, while Section \ref{sec.wsu} is concerned with the
proof of the weak-strong uniqueness property (Theorem \ref{thm.wsu}). Finally,
we present some examples verifying Assumptions (B1)--(B4) in Section \ref{sec.exam}.
\subsection*{Notation}
Elements of the matrix $A\in{\mathbb R}^{n\times n}$ are denoted by $A_{ij}$,
$i,j=1,\ldots,n$, and the elements of a vector $\bm{c}\in{\mathbb R}^n$ are $c_1,\ldots,c_n$.
We use the notation $f(\bm{c})=(f(c_1),\ldots,f(c_n))$ for $\bm{c}\in{\mathbb R}^n$
and a function $f:{\mathbb R}\to{\mathbb R}$. The expression $|\na f(\bm{c})|^2$ is defined by
$\sum_{i=1}^n|\na f(c_i)|^2$ and $|\cdot|$ is the usual Euclidean norm.
The matrix $R(\bm{c})\in{\mathbb R}^{n\times n}$
is the diagonal matrix with elements $\sqrt{c_1},\ldots,\sqrt{c_n}$, i.e.\
$R_{ij}(\bm{c})=\sqrt{c_i}\delta_{ij}$ for $i,j=1,\ldots,n$, where
$\delta_{ij}$ denotes the Kronecker delta.
We understand by $\na\bm{\mu}$ the matrix with entries $\pa_{x_i}\mu_j$.
Furthermore, $C>0$, $C_i>0$ are generic constants
with values changing from line to line.
\section{Properties of the mobility matrix and a priori estimates}\label{sec.mobil}
We wish to express the fluxes $c_iu_i$ as a linear combination of the gradients
of the chemical potentials. Since $K(\bm{c})$ has a nontrivial kernel,
we need to use a generalized matrix inverse, the Bott--Duffin inverse.
This inverse and its properties are studied in Section \ref{sec.BD}. The properties
allow us to derive in Section \ref{sec.apriori} some a priori estimates
for the Maxwell--Stefan--Cahn--Hilliard system.
\subsection{The Bott--Duffin inverse}\label{sec.BD}
We wish to invert \eqref{1.eq2} or, equivalently, \eqref{1.D}.
We recall definition \eqref{1.PL} of the projection matrices
$P_L(\bm{c})\in{\mathbb R}^{n\times n}$ on $L(\bm{c})$ and
$P_{L^\perp}(\bm{c})\in{\mathbb R}^{n\times n}$ on $L^\perp(\bm{c})$,
where $L(\bm{c})$ and $L^\perp(\bm{c})$ are defined in \eqref{1.L}.
Then \eqref{1.D} is equivalent to the problem:
\begin{equation}\label{2.Dz}
\mbox{Solve}\quad D(\bm{c})\bm{z} = -P_L(\bm{c})R(\bm{c})\na\bm{\mu}
\quad\mbox{in the space }\bm{z}\in L(\bm{c}),
\end{equation}
where $z_i=\sqrt{c_i}u_i$, recalling that
$R(\bm{c})=\operatorname{diag}(\sqrt{\bm{c}})$.
\begin{lemma}[Solution of \eqref{2.Dz}]\label{lem.Dz}
Suppose that $D(\bm{c})$ satisfies Assumption (B1). The Bott--Duffin inverse
$$
D^{BD}(\bm{c}) = P_L(\bm{c})\big(D(\bm{c})P_L(\bm{c})+P_{L^\perp}(\bm{c})\big)^{-1}
$$
is well-defined, symmetric, and satisfies $\ker D^{BD}(\bm{c})=L^\perp(\bm{c})$.
Furthermore, for any $\bm{y}\in L(\bm{c})$, the linear problem
$D(\bm{c})\bm{z}=\bm{y}$ for $\bm{z}\in L(\bm{c})$ has a unique solution
given by $\bm{z}=D^{BD}(\bm{c})\bm{y}$.
\end{lemma}
We refer to \cite[Lemma 17]{HJT21} for the proof. The property for the kernel
follows from $\ker D^{BD}(\bm{c})=\ker P_L(\bm{c})=L^\perp(\bm{c})$.
Since $P_L(\bm{c})R(\bm{c})\na\bm{\mu}\in L(\bm{c})$
(this follows from the definition of $P_L(\bm{c})$ and $\sum_{i=1}^n c_i=1$),
we infer from Lemma \ref{lem.Dz} that \eqref{2.Dz} has the unique solution
$\bm{z}=-D^{BD}(\bm{c})P_L(\bm{c})R(\bm{c})\na\bm{\mu}\in L(\bm{c})$ or,
componentwise,
$$
c_iu_i = \sqrt{c_i}z_i
= -\sum_{j=1}^n \sqrt{c_i}\big(D^{BD}(\bm{c})P_L(\bm{c})\big)_{ij}\sqrt{c_j}\na\mu_j
= -\sum_{j=1}^n \sqrt{c_i}D^{BD}(\bm{c})_{ij}\sqrt{c_j}\na\mu_j
$$
for $i=1,\ldots,n$, where the last equality follows from
$D^{BD}(\bm{c})P_L(\bm{c})=D^{BD}(\bm{c})$; see \cite[(81)]{HJT21}.
Then we can formulate equation \eqref{1.eq1} as
\begin{equation}\label{2.B}
\pa_t c_i = \operatorname{div}\sum_{j=1}^n B_{ij}(\bm{c})\na\mu_j, \quad
\mbox{where }B_{ij}(\bm{c})=\sqrt{c_i}D^{BD}_{ij}(\bm{c})\sqrt{c_j},
\quad i,j=1,\ldots,n.
\end{equation}
The boundary conditions $c_iu_i\cdot\nu=0$ on $\pa\Omega$ yield
\begin{equation}\label{2.bc}
\sum_{j=1}^n B_{ij}(\bm{c})\na\mu_j\cdot\nu = 0\quad\mbox{on }\pa\Omega,\ t>0,\
i=1,\ldots,n.
\end{equation}
We recall some properties of the Bott--Duffin inverse.
\begin{lemma}[Properties of $D^{BD}(\bm{c})$]\label{lem.DB}
Suppose that $D(\bm{c})\in{\mathbb R}^{n\times n}$ satisfies Assumptions (B1)--(B4). Then:
\begin{itemize}
\item[\rm (i)] The coefficients $D^{BD}_{ij}\in C^1([0,1]^n)$ are bounded for
$i,j=1,\ldots,n$.
\item[\rm (ii)] Let $\lambda(\bm{c})$ be an eigenvalue of
$(D(\bm{c})P_L(\bm{c})+P_{L^\perp}(\bm{c}))^{-1}$. Then
$\lambda_m\le\lambda(\bm{c})\le\lambda_M$, where
$$
\lambda_m = (1+n\|D(\bm{c})\|_F)^{-1}, \quad
\lambda_M = \max\{1,\rho^{-1}\},
$$
$\|\cdot\|_F$ is the Frobenius matrix norm, and
$\rho>0$ is a lower bound for the eigenvalues of $D(\bm{c})$; see Assumption (B3).
\item[\rm (iii)] The functions $\bm{c}\mapsto \sqrt{c_i}D^{BD}_{ij}(\bm{c})/\sqrt{c_j}$
are bounded in $[0,1]^n$ for $i,j=1,\ldots,n$.
\end{itemize}
\end{lemma}
A consequence of (ii) are the inequalities
\begin{equation}\label{2.DBD}
\lambda_m|P_L(\bm{c})\bm{z}|^2 \le \bm{z}^T D^{BD}(\bm{c})\bm{z}
\le \lambda_M|P_L(\bm{c})\bm{z}|^2 \quad\mbox{for }\bm{z}\in{\mathbb R}^n.
\end{equation}
Note that the Frobenius norm of $D(\bm{c})$ is bounded uniformly in $\bm{c}\in[0,1]^n$,
since $D_{ij}$ is bounded by Assumption (B1).
\begin{proof} The points (i) and (ii) are proved in \cite[Lemma 11]{HJT21}
in an interval $[m,1]^n$ for some $m>0$. In fact,
we can conclude (i)--(ii) in the full interval $[0,1]^n$,
since our Assumptions (B2)--(B3) are stronger than those in \cite{HJT21}.
For the proof of (iii),
dropping the argument $\bm{c}$ and observing that $RDR^{-1}=K$, we obtain
\begin{align*}
R D^{BD}R^{-1} &= RP_L(DP_L+P_{L^\perp})^{-1}R^{-1}
= RP_L(R^{-1}R)(DP_L+P_{L^\perp})^{-1}R^{-1} \\
&= RP_LR^{-1}\big(R(DP_L+P_{L^\perp})R^{-1}\big)^{-1} \\
&= RP_LR^{-1}\big(RDR^{-1}RP_LR^{-1}+RP_{L^\perp}R^{-1}\big)^{-1} \\
&= RP_LR^{-1}\big(KRP_LR^{-1}+RP_{L^\perp}R^{-1}\big)^{-1}.
\end{align*}
The determinant of the expression in the brackets equals
$$
\det\big(R(DP_L+P_{L^\perp})R^{-1}\big) = \det(DP_L+P_{L^\perp}).
$$
Therefore, denoting by ``adj'' the adjugate matrix, it follows that
\begin{equation}\label{2.RDR}
R D^{BD}R^{-1} = \frac{RP_LR^{-1}\operatorname{adj}(KRP_LR^{-1}+RP_{L^\perp}R^{-1})}{
\det(DP_L+P_{L^\perp})}.
\end{equation}
By Assumption (B3), the eigenvalues of $D$ are not smaller than $\rho>0$.
The proof of \cite[Lemma 11]{HJT21} shows that the eigenvalues of
$DP_L+P_{L^\perp}$ are not smaller than $\rho>0$, too. This implies that
$\det(DP_L+P_{L^\perp})\ge\rho^{n-1}>0$. The coefficients
$$
(RP_LR^{-1})_{ij} = \delta_{ij}-c_i, \quad (RP_{L^\perp}R^{-1})_{ij} = c_i
$$
are bounded for $\bm{c}\in[0,1]^n$ and, by Assumption (B4), the coefficients of
$K$ are also bounded. Therefore, all elements of
$\operatorname{adj}(KRP_LR^{-1}+RP_{L^\perp}R^{-1})$ are bounded.
We conclude from \eqref{2.RDR} that the entries of $RD^{BD}R^{-1}$ are
bounded in $[0,1]^n$, i.e., point (iii) holds.
\end{proof}
The most important property is the positive definiteness of $D^{BD}(\bm{c})$
on $L(\bm{c})$; see \eqref{2.DBD}. This property implies the a priori estimates
proved in the following subsection.
\subsection{A priori estimates}\label{sec.apriori}
We show an energy inequality for smooth solutions.
\begin{lemma}[Free energy inequality]\label{lem.fei}
Let $\bm{c}\in C^\infty(\Omega\times(0,\infty);{\mathbb R}^n)$ be a positive, bounded, smooth
solution to \eqref{1.eq1}--\eqref{1.mu}. Then, for any
$0<\lambda<\lambda_m$,
\begin{align*}
\frac{d}{dt}\bigg({\mathcal H}(\bm{c}) + \frac{(\lambda_M-\lambda)^2}{\lambda_m\lambda}
{\mathcal E}(\bm{c})\bigg) &+ 2\lambda\int_\Omega|\na\sqrt{\bm{c}}|^2 dx
+ \lambda\int_\Omega|\Delta\bm{c}|^2 dx \\
&{}+ \frac{(\lambda_M-\lambda)^2}{2\lambda}\int_\Omega
|P_L(\bm{c})R(\bm{c})\na\bm{\mu}|^2dx \le 0.
\end{align*}
where the entropy ${\mathcal H}(\bm{c})$ and the free energy ${\mathcal E}(\bm{c})$ are given by
\eqref{1.HE} and $\lambda_m$, $\lambda_M$ are defined in Lemma \ref{lem.DB}.
\end{lemma}
\begin{proof}
We derive first the energy inequality. To this end, we multiply equation
\eqref{2.B} for $c_i$ by $\mu_i=(\pa{\mathcal E}/\pa c_i)(\bm{c})$, integrate over $\Omega$,
integrate by parts (using the boundary conditions \eqref{2.bc}), and take
into account the lower bound \eqref{2.DBD} for $D^{BD}(\bm{c})$:
\begin{align}\label{2.dEdt}
\frac{d{\mathcal E}}{dt}(\bm{c}) &= \sum_{i=1}^n\int_\Omega\frac{\pa{\mathcal E}}{\pa c_i}(\bm{c})
\pa_t c_i dx
= -\sum_{i,j=1}^n\int_\Omega B_{ij}(\bm{c})\na\mu_i\cdot\na\mu_j dx \\
&= -\sum_{i,j=1}^n D_{ij}^{BD}(\bm{c})(\sqrt{c_i}\na\mu_i)\cdot
(\sqrt{c_j}\na\mu_j)dx
\le -\lambda_m\int_\Omega|P_L(\bm{c})R(\bm{c})\na\bm{\mu}|^2 dx. \nonumber
\end{align}
The entropy inequality is derived by multiplying \eqref{2.B} by $\log c_i$,
integrating over $\Omega$, and integrating by parts (using the boundary conditions
\eqref{2.bc}):
\begin{equation*}
\frac{d{\mathcal H}}{dt}(\bm{c})
= \sum_{i=1}^n\int_\Omega(\log c_i)\pa_t c_i dx
= -\sum_{i,j=1}^n\int_\Omega B_{ij}(\bm{c})\na\log c_i
\cdot\na\mu_j dx.
\end{equation*}
To estimate the right-hand side, we
set $G=RP_LR$ (omitting the argument $\bm{c}$) and $M:=B-\lambda G$ for
$\lambda\in(0,\lambda_m)$. Then
\begin{equation}\label{2.dHdt}
\frac{d{\mathcal H}}{dt}(\bm{c}) = -\sum_{i,j=1}^n\int_\Omega M_{ij}\na\log c_i
\cdot\na\mu_j dx - \lambda\sum_{i,j=1}^n\int_\Omega G_{ij}\na\log c_i
\cdot\na\mu_j dx =: I_1 + I_2.
\end{equation}
Before estimating the integrals $I_1$ and $I_2$, we start with some preparations.
We use Lemma \ref{lem.DB} (ii) and $P_L^TP_L=P_L$ to obtain
$$
\bm{z}^TB\bm{z} = (R\bm{z})^T D^{BD}R\bm{z} \ge \lambda_m|P_LR\bm{z}|^2
= \lambda_m(P_LR\bm{z})^T (P_LR\bm{z}) = \lambda_m\bm{z}^T G\bm{z}\quad\mbox{for }
\bm{z}\in{\mathbb R}^n.
$$
The matrix $M$ is positive semidefinite since for any $\bm{z}\in{\mathbb R}^n$,
\begin{equation}\label{psd.zMz}
\bm{z}^T M\bm{z} = \bm{z}^TB\bm{z} - \lambda\bm{z}^T G\bm{z}
\ge (\lambda_m-\lambda)\bm{z}^T G\bm{z} = (\lambda_m-\lambda)|P_LR\bm{z}|^2.
\end{equation}
Furthermore, by Lemma \ref{lem.DB} (ii) again, we have the upper bound
\begin{equation}\label{3.zMz}
\bm{z}^T M\bm{z} = \bm{z}^T(B-\lambda G)\bm{z}
\le (\lambda_M-\lambda)\bm{z}^TG\bm{z} = (\lambda_M-\lambda)|P_LR\bm{z}|^2.
\end{equation}
We are now in the position to estimate the integral $I_1$,
using Young's inequality for any $\theta>0$:
\begin{align*}
I_1 &\le \frac{\theta}{2}\sum_{i,j=1}^n\int_\Omega M_{ij}\na\log c_i
\cdot\na\log c_j dx + \frac{1}{2\theta}\sum_{i,j=1}^n\int_\Omega
M_{ij}\na\mu_i\cdot\na\mu_j dx \\
&\le \frac{\theta}{2}(\lambda_M-\lambda)\int_\Omega|P_LR\na\log\bm{c}|^2 dx
+ \frac{\lambda_M-\lambda}{2\theta}\int_\Omega|P_LR\na\bm{\mu}|^2 dx \\
&= 2\theta(\lambda_M-\lambda)\int_\Omega|\na\sqrt{\bm{c}}|^2 dx
+ \frac{\lambda_M-\lambda}{2\theta}\int_\Omega|P_LR\na\bm{\mu}|^2 dx,
\end{align*}
where the last step follows from $\sum_{j=1}^n (P_L)_{ij}R_j\na\log c_j
=2\na\sqrt{c_i}$, which is a consequence of $\sum_{j=1}^n\na c_j=0$.
For the integral $I_2$, we use the definitions $G_{ij} = c_i\delta_{ij}-c_ic_j$ and
$\mu_j=\log c_j-\Delta c_j$:
\begin{align*}
I_2 &= -\lambda\sum_{i,j=1}^n\int_\Omega (c_i\delta_{ij}-c_ic_j)
\frac{\na c_i}{c_i}\cdot\na(\log c_j-\Delta c_j) dx \\
&= -\lambda\sum_{i=1}^n\int_\Omega\na c_i\cdot\na(\log c_i-\Delta c_i) dx
+ \lambda\int_\Omega\sum_{i=1}^n \na c_i\cdot\sum_{j=1}^n
c_j\na(\log c_j-\Delta c_j)dx \\
&= -\lambda\sum_{i=1}^n\int_\Omega\na c_i\cdot\na(\log c_i-\Delta c_i) dx
= -\lambda\int_\Omega\big(4|\na\sqrt{\bm{c}}|^2 + |\Delta\bm{c}|^2\big)dx,
\end{align*}
where we integrated by parts in the last step.
Inserting the estimates for $I_1$ and $I_2$ into \eqref{2.dHdt} yields
\begin{align*}
\frac{d{\mathcal H}}{dt}(\bm{c}) &+ 4\lambda\int_\Omega|\na\sqrt{\bm{c}}|^2 dx
+ \lambda\int_\Omega|\Delta\bm{c}|^2 dx \\
&\le 2\theta(\lambda_M-\lambda)\int_\Omega|\na\sqrt{\bm{c}}|^2 dx
+ \frac{\lambda_M-\lambda}{2\theta}\int_\Omega|P_LR\na\bm{\mu}|^2 dx.
\end{align*}
We set $\theta=\lambda/(\lambda_M-\lambda)$ to conclude that
\begin{equation}\label{2.dHdt2}
\frac{d{\mathcal H}}{dt}(\bm{c}) + 2\lambda\int_\Omega|\na\sqrt{\bm{c}}|^2 dx
+ \lambda\int_\Omega|\Delta\bm{c}|^2 dx
\le \frac{(\lambda_M-\lambda)^2}{2\lambda}\int_\Omega|P_LR\na\bm{\mu}|^2 dx.
\end{equation}
The right-hand side can be absorbed by the corresponding term in \eqref{2.dEdt}.
Indeed, adding the previous inequality to \eqref{2.dEdt} times
$(\lambda_M-\lambda)^2/(\lambda_m\lambda)$ finishes the proof.
\end{proof}
Note that the energy inequality \eqref{2.dEdt} or the entropy inequality
\eqref{2.dHdt2} alone are not sufficient to control the derivatives of $\bm{c}$ but
only a suitable linear combination. We will prove these inequalities rigorously
in the following section for weak solutions; see Lemma \ref{lem.eei}.
\section{Proof of Theorem \ref{thm.ex}}\label{sec.ex}
We prove the existence of global weak solutions to \eqref{1.eq1}--\eqref{1.bic}.
For this, we construct an approximate system depending on a parameter $\delta>0$,
similarly as in \cite{ElGa97}, and then pass to the limit $\delta\to 0$.
\subsection{An approximate system}\label{sec.approx}
In order to deal with the degeneracy of the matrix $B(\bm{c})$ when a component
of $\bm{c}$ vanishes, we introduce the cutoff function $\chi_\delta:{\mathbb R}^n\to{\mathbb R}^n$ by
$$
(\chi_\delta\bm{c})_i := \left\{\begin{array}{ll}
\delta &\quad\mbox{for }c_i<\delta, \\
c_i &\quad\mbox{for }\delta\le c_i\le 1-\delta, \\
1-\delta &\quad\mbox{for }c_i>1-\delta,
\end{array}\right.
$$
and define the approximate matrix
\begin{equation}\label{3.Bdelta}
B^\delta(\bm{c}) := R(\chi_\delta\bm{c})D^{BD}(\chi_\delta\bm{c})R(\chi_\delta\bm{c}),
\end{equation}
recalling that $R(\chi_\delta\bm{c})=\operatorname{diag}(\sqrt{\chi_\delta\bm{c}})$.
We wish to solve the approximate problem
\begin{align}
& \pa_t c_i^\delta = \operatorname{div}\sum_{j=1}^n B_{ij}^\delta(\bm{c}^\delta)\na\mu_j^\delta,
\quad \mu_j^\delta = \frac{\pa{\mathcal E}^\delta}{\pa c_j}(\bm{c}^\delta)
\quad\mbox{in }\Omega,\ t>0, \label{3.eq} \\
& c_i^\delta(\cdot,0)=c_i^0 \quad\mbox{in }\Omega, \quad
\sum_{j=1}^n B_{ij}^\delta(\bm{c}^\delta)\na\mu_j^\delta\cdot\nu = 0,\
\na c_i^\delta\cdot\nu=0 \quad\mbox{on }\pa\Omega, \label{3.bic}
\end{align}
where $i=1,\ldots,n$, $\sum_{i=1}^n c^0_i =1$ and the approximate energy is defined by
\begin{align}
& {\mathcal E}^\delta(\bm{c}) := {\mathcal H}^\delta(\bm{c})
+ \frac12\sum_{i=1}^n\int_\Omega|\na c_i|^2 dx,
\quad {\mathcal H}^\delta(\bm{c}) := \sum_{i=1}^n\int_\Omega h_i^\delta(c_i)dx, \nonumber \\
& h_i^\delta(r) = \left\{\begin{array}{ll}
r\log\delta - \delta/2 + r^2/(2\delta) &\quad\mbox{for }r<\delta, \\
r\log r &\quad\mbox{for }\delta\le r\le 1-\delta, \\
r\log(1-\delta) - (1-\delta)/2 + r^2/(2(1-\delta))
&\quad\mbox{for }r>1-\delta.
\end{array}\right. \label{3.hdelta}
\end{align}
Observe that the solutions $c_i^\delta$ may be negative.
We will show below that $c_i^\delta$ converges to a nonnegative function as
$\delta\to 0$. The approximate entropy density is chosen in such a way that
$h_i^\delta\in C^2({\mathbb R})$. Indeed, we obtain
$$
(h_i^\delta)'(c_i) = \left\{\begin{array}{ll}
\log\delta + c_i/\delta &\quad\mbox{for }c_i<\delta, \\
\log c_i + 1 &\quad\mbox{for }\delta< c_i< 1-\delta, \\
\log(1-\delta) + c_i/(1-\delta) &\quad\mbox{for }c_i>1-\delta,
\end{array}\right. \quad
(h_i^\delta)''(c_i) = \frac{1}{(\chi_\delta\bm{c})_i}.
$$
With these definitions, we obtain $\mu_i^\delta = (h_i^\delta)'(c_i^\delta)-\Delta
c_i^\delta$ for $i=1,\ldots,n$.
\begin{theorem}[Existence for the approximate system]\label{thm.approx}\quad
Let Assumptions (A1)--(A2) and (B1)--(B4) hold and let $\delta>0$. Then there
exists a weak solution $(\bm{c}^\delta,\bm{\mu}^\delta)$ to \eqref{3.eq}--\eqref{3.bic}
satisfying $\sum_{i=1}^n c_i^\delta(t)=1$ in $\Omega$, $t>0$,
\begin{align*}
& c_i^\delta\in L_{\rm loc}^\infty(0,\infty;H^1(\Omega))\cap
L_{\rm loc}^2(0,\infty;H^2(\Omega)), \\
& \pa_t c_i\in L_{\rm loc}^2(0,\infty;H^2(\Omega)'), \quad
\mu_i^\delta\in L_{\rm loc}^2(0,\infty;H^1(\Omega)), \quad i=1,\ldots,n,
\end{align*}
and the first equation in \eqref{3.eq} as well as the initial condition
in \eqref{3.bic} are satisfied in the sense of
$L_{\rm loc}^2(0,\infty;H^2(\Omega)')$.
\end{theorem}
Before we prove this theorem, we show some properties of the matrix
$B^\delta(\bm{c})$. We introduce the matrices
$P_L(\chi_\delta\bm{c})$, $P_{L^\perp}(\chi_\delta\bm{c})\in{\mathbb R}^{n\times n}$
with entries
$$
P_L(\chi_\delta\bm{c})_{ij} = \delta_{ij} - \frac{\sqrt{(\chi_\delta\bm{c})_i
(\chi_\delta\bm{c})_j}}{\sum_{k=1}^n(\chi_\delta\bm{c})_k}, \quad
P_{L^\perp}(\chi_\delta\bm{c})_{ij} = \frac{\sqrt{(\chi_\delta\bm{c})_i
(\chi_\delta\bm{c})_j}}{\sum_{k=1}^n(\chi_\delta\bm{c})_k}, \quad i,j=1,\ldots,n.
$$
\begin{lemma}[Properties of $B^\delta(\bm{c})$]\label{lem.Bdelta}\
Suppose that $D(\bm{c})$ satisfies Assumptions (B1)--(B4). Then Lemmas
\ref{lem.Dz} and \ref{lem.DB} hold with $P_L(\bm{c})$, $P_{L^\perp}(\bm{c})$, and
$D^{BD}(\bm{c})$ replaced by $P_L(\chi_\delta\bm{c})$,
$P_{L^\perp}(\chi_\delta\bm{c})$, and $D^{BD}(\chi_\delta\bm{c})$. As a consequence,
the matrix $B^\delta(\bm{c})$, defined in \eqref{3.Bdelta}, satisfies
\begin{equation}\label{3.zBdeltaz}
\bm{z}^TB^\delta(\bm{c})\bm{z}\ge\lambda_m|P_L(\chi_\delta\bm{c})R(\chi_\delta\bm{c})
\bm{z}|^2 \quad\mbox{for any }\bm{z},\bm{c}\in{\mathbb R}^n,
\end{equation}
and the first $(n-1)\times(n-1)$ submatrix $\widetilde{B}^\delta(\bm{c})$ of
$B^\delta(\bm{c})$ is positive definite and satisfies for
$\eta(\delta)=\lambda_m\delta^2/n$,
\begin{equation}\label{3.tilde}
\widetilde{\bm{z}}^T\widetilde{B}^\delta(\bm{c})\widetilde{\bm{z}}
\ge \eta(\delta)|\widetilde{\bm{z}}|^2\quad\mbox{for any }\widetilde{\bm{z}}
\in{\mathbb R}^{n-1}.
\end{equation}
\end{lemma}
\begin{proof}
It can be verified that Assumptions (B1)--(B2) hold for $D(\chi_\delta\bm{c})$,
so Lemmas \ref{lem.Dz} and \ref{lem.DB} still hold for the matrix
$D(\chi_\delta\bm{c})$. Inequality \eqref{3.zBdeltaz} is a direct consequence of
Lemma \ref{lem.DB} (ii). It remains to prove \eqref{3.tilde}.
We define for given $\widetilde{\bm{z}}\in{\mathbb R}^{n-1}$ the vector $\bm{z}\in{\mathbb R}^n$ with
$z_i=\widetilde{z}_i$ for $i=1,\ldots,n-1$ and $z_n=0$. Then \eqref{3.zBdeltaz}
becomes
\begin{equation}\label{3.aux}
\widetilde{\bm{z}}^T\widetilde{B}^\delta(\bm{c})\widetilde{\bm{z}}
\ge \lambda_m\big|\widetilde{P}_L(\chi_\delta\bm{c})\widetilde{R}(\chi_\delta\bm{c})
\widetilde{\bm{z}}\big|^2
= \lambda_m\big(\widetilde{R}(\chi_\delta\bm{c})\widetilde{\bm{z}}\big)^T
\widetilde{P}_L(\chi_\delta\bm{c})
\big(\widetilde{R}(\chi_\delta\bm{c})\widetilde{\bm{z}}\big),
\end{equation}
where $\widetilde{A}$ denotes the first $(n-1)\times(n-1)$ submatrix of a given
matrix $A\in{\mathbb R}^{n\times n}$. It follows from the Cauchy--Schwarz inequality that
for any $\zeta\in{\mathbb R}^{n-1}$,
\begin{align*}
\zeta^T\widetilde{P}_L(\chi_\delta\bm{c})\zeta
&= \sum_{i=1}^{n-1}\zeta_i^2 - \left(\sum_{j=1}^{n-1}
\sqrt{\frac{(\chi_\delta\bm{c})_j}{\sum_{k=1}^n(\chi_\delta\bm{c})_k}}
\zeta_j\right)^2
\ge |\zeta|^2 - \sum_{j=1}^{n-1}\frac{(\chi_\delta\bm{c})_j}{\sum_{k=1}^n
(\chi_\delta\bm{c})_k}|\zeta|^2 \\
&= \frac{(\chi_\delta\bm{c})_n}{\sum_{k=1}^n(\chi_\delta\bm{c})_k}|\zeta|^2
\ge \frac{\delta}{n}|\zeta|^2.
\end{align*}
Therefore, \eqref{3.aux} becomes
$$
\widetilde{\bm{z}}^T\widetilde{B}^\delta(\bm{c})\widetilde{\bm{z}}
\ge \frac{\lambda_m\delta}{n}\sum_{i=1}^{n-1}\big|\sqrt{(\chi_\delta\bm{c})_i}
\widetilde{z}_i\big|^2
= \frac{\lambda_m\delta}{n}
\sum_{i=1}^{n-1}(\chi_\delta\bm{c})_i\big|\widetilde{z}_i\big|^2
\ge \frac{\lambda_m\delta^2}{n}|\widetilde{\bm{z}}|^2,
$$
which proves \eqref{3.tilde}.
\end{proof}
We proceed to the proof of Theorem \ref{thm.approx}. The proof is divided
into four steps. First, we reformulate \eqref{3.eq} using the first $n-1$
components. Second, a time-discretized regularized system, similarly
as in \cite[Chapter 4]{Jue16}, is constructed and the existence of weak solutions
to this system is proved. Third, we derive some uniform estimates from the
energy inequality. Finally, we perform the de-regularization limit.
{\em Step 1: Reformulation in $n-1$ components.}
We reformulate the approximate system in terms of the $n-1$ relative chemical
potentials
$$
w_i^\delta = \mu_i^\delta-\mu_n^\delta, \quad i=1,\ldots,n-1.
$$
It holds that
$$
\sum_{j=1}^n \big(P_L(\chi_\delta\bm{c})R(\chi_\delta\bm{c})\big)_{kj}
= \sum_{j=1}^n\bigg(\delta_{kj} - \frac{\sqrt{(\chi_\delta\bm{c})_k
(\chi_\delta\bm{c})_j}}{\sum_{\ell=1}^n(\chi_\delta\bm{c})_\ell}\bigg)
\sqrt{(\chi_\delta\bm{c})_j} = 0.
$$
Then, using $D^{BD}(\bm{c})=D^{BD}(\bm{c})P_L(\bm{c})$ (which is a general
property of the Bott--Duffin inverse; see \cite[(81)]{HJT21}),
\begin{align*}
\sum_{j=1}^n B_{ij}^\delta(\bm{c}) &= \sum_{j=1}^n\sqrt{(\chi_\delta\bm{c})_i}
D_{ij}^{BD}(\bm{c})\sqrt{(\chi_\delta\bm{c})_j} \\
&= \sum_{j,k=1}^n\sqrt{(\chi_\delta\bm{c})_i}D_{ik}^{BD}(\bm{c})
\big(P_L(\chi_\delta\bm{c})R(\chi_\delta\bm{c})\big)_{kj} = 0.
\end{align*}
This shows that
$$
\sum_{j=1}^n B_{ij}^\delta(\bm{c})\na\mu_j^\delta
= \sum_{j=1}^{n-1}B_{ij}^\delta(\bm{c})\na\mu_j^\delta
+ B_{in}^\delta(\bm{c})\na\mu_n^\delta
= \sum_{j=1}^{n-1}B_{ij}^\delta(\bm{c})\na(\mu_j^\delta-\mu_n^\delta).
$$
Consequently, we can rewrite the first equation in \eqref{3.eq} as
\begin{equation}\label{3.cdelta}
\pa_t c_i^\delta = \operatorname{div}\sum_{j=1}^{n-1}\widetilde{B}_{ij}^\delta(\bm{c}^\delta)
\na w_j^\delta, \quad i=1,\ldots,n-1, \quad
c_n^\delta = 1-\sum_{i=1}^{n-1}c_i^\delta,
\end{equation}
recalling that $\widetilde{B}^\delta$ is the first $(n-1)\times(n-1)$ submatrix
of $B^\delta$.
{\em Step 2: Existence for a regularized system.}
We consider for given $\delta>0$, $T>0$, $N\in{\mathbb N}$, and
$(c_1^{k-1},\ldots,c_{n-1}^{k-1})$ the regularized system
\begin{align}\label{3.regul1}
& \frac{1}{\tau}(c_i^k-c_i^{k-1})
= \operatorname{div}\sum_{j=1}^{n-1}\widetilde{B}_{ij}^\delta( \widetilde{\bm{c}}^k )
\na w_j^k - \eps(\Delta^2 w_i^k + w_i^k)\quad\mbox{in }\Omega, \\
& w_i^k = (h_i^\delta)'(c_i^k) - (h_n^\delta)'(c_n^k)
- \Delta(c_i^k-c_n^k), \quad i=1,\ldots,n-1, \label{3.regul2}
\end{align}
where $\tau=T/N$ and $c_n^k=1-\sum_{i=1}^{n-1}c_i^k$.
Equation \eqref{3.regul1} is understood in the weak sense
$$
\frac{1}{\tau} \int_\Omega(c_i^k-c_i^{k-1})\phi_i dx
+ \sum_{j=1}^{n-1} \int_\Omega \widetilde{B}_{ij}^\delta(\bm{c}^k)
\na\phi_i\cdot\na w_j^k dx
+ \eps \int_\Omega(\Delta w_i^k\Delta\phi_i + w_i^k\phi_i)dx = 0
$$
for test functions $\phi_i\in H^2(\Omega)$.
The $\eps$-regularization ensures that $w_i^k\in H^2(\Omega)\hookrightarrow
L^\infty(\Omega)$ since $d\le 3$. In higher space dimensions, we can replace
$\Delta^2 w_i^k$ by $(-\Delta)^m w_i^k$ with $m>d/2$, which gives
$w_i^k\in H^m(\Omega)\hookrightarrow L^\infty(\Omega)$.
We prove the solvability of \eqref{3.regul1}--\eqref{3.regul2} in two steps.
\begin{lemma}[Solvability of \eqref{3.regul2}]\label{lem.solv}
Let $\bm{w}\in L^2(\Omega;{\mathbb R}^{n-1})$.
Then there exists a unique strong solution $\widetilde{\bm{c}}\in H^2(\Omega;{\mathbb R}^{n-1})$
to
\begin{equation}\label{3.deltac}
w_i = (h_i^\delta)'(c_i) - (h_n^\delta)'(c_n) - \Delta(c_i-c_n)\quad\mbox{in }\Omega,
\quad \na c_i\cdot\nu=0\quad\mbox{on }\pa\Omega
\end{equation}
for $i=1,\ldots,n-1$, where $c_n=1-\sum_{i=1}^{n-1}c_i$. This defines the
operator ${\mathcal L}:L^2(\Omega;{\mathbb R}^{n-1})
\to H^2(\Omega;{\mathbb R}^{n-1})$, ${\mathcal L}(\bm{w})=\widetilde{\bm{c}}$.
\end{lemma}
\begin{proof}
The system of equations can be written as
$$
\operatorname{div}(M\na\widetilde{\bm{c}})_i = (h_i^\delta)'(c_i) - (h_n^\delta)'(c_n) - w_i
\quad\mbox{in }\Omega,
$$
where the entries of the diffusion matrix $M$ are $M_{ii}=2$ and
$M_{ij}=1$ for all $i\neq j$. In particular, $M$ is symmetric and positive definite.
Thus, we can apply
the theory for elliptic systems with sublinear growth function and conclude the
existence of a unique weak solution $\widetilde{\bm{c}}\in H^1(\Omega;{\mathbb R}^{n-1})$.
It remains to verify that this solution lies in $H^2(\Omega;{\mathbb R}^{n-1})$.
Summing \eqref{3.deltac} over $i=1,\ldots,n-1$, we find that
$$
\Delta c_n = -\sum_{i=1}^{n-1}\Delta c_i
= \frac{1}{n}\sum_{i=1}^{n-1}(w_i-(h_i^\delta)'(c_i))
+ \frac{n-1}{n} (h_n^\delta)'(c_n)
\in L^2(\Omega)
$$
with the boundary condition $\na c_n\cdot\nu=0$ on $\pa\Omega$.
We infer from elliptic regularity theory that $c_n\in H^2(\Omega)$.
Consequently, $\Delta c_n\in L^2(\Omega)$ and elliptic regularity again
implies that $c_i\in H^2(\Omega)$.
\end{proof}
It follows from Lemma \ref{lem.solv} that we can write \eqref{3.regul1} as
\begin{equation}\label{3.Bregul}
\frac{1}{\tau}({\mathcal L}(\bm{w})_i - c_i^{k-1})
= \operatorname{div}\sum_{j=1}^{n-1} \widetilde{B}_{ij}^\delta(\widetilde{\bm{c}}^k)
\na w_j^k - \eps(\Delta^2 w_i^k+w_i^k)\quad\mbox{in }\Omega,\ i=1,\ldots,n-1.
\end{equation}
\begin{lemma}[Solvability of \eqref{3.Bregul}]
Let $\widetilde{\bm{c}}^{k-1}\in H^2(\Omega;{\mathbb R}^{n-1})$.
Then there exists a weak solution $\bm{w}^k\in H^2(\Omega;{\mathbb R}^{n-1})$
to \eqref{3.Bregul} such that for all $\phi_i\in L^2(0,T;H^2(\Omega))$,
\begin{align*}
\frac{1}{\tau}\int_\Omega({\mathcal L}(\bm{w})_i - c_i^{k-1})\phi_i dx
&+ \sum_{i,j=1}^{n-1}\int_\Omega\widetilde{B}_{ij}^\delta({\mathcal L}(\bm{w}))
\na\phi_i \cdot\na w_j^k dx \\
&{}+ \eps\sum_{i=1}^{n-1}\int_\Omega(\Delta w_i^k\Delta\phi_i+w_i^k\phi_i)dx = 0.
\end{align*}
\end{lemma}
\begin{proof}
Given $\bar{\bm{w}}\in L^\infty(\Omega;{\mathbb R}^{n-1})$ and $\sigma\in[0,1]$,
we wish to find a solution to the linear problem
\begin{equation}\label{3.LM}
{\mathcal A}(\bm{w},\bm{\phi}) = {\mathcal F}(\bm{\phi})\quad\mbox{for }
\bm{\phi}\in H^2(\Omega;{\mathbb R}^{n-1}),
\end{equation}
where
\begin{align*}
{\mathcal A}(\bm{w},\bm{\phi}) &= \sum_{i,j=1}^{n-1}\int_\Omega
\widetilde{B}_{ij}^\delta({\mathcal L}(\bar{\bm{w}}))\na\phi_i\cdot\na w_j dx
+ \eps\sum_{i=1}^{n-1}\int_\Omega(\Delta w_i\Delta\phi_i + w_i\phi_i)dx, \\
{\mathcal F}(\bm{\phi}) &= -\frac{\sigma}{\tau}\int_\Omega({\mathcal L}(\bar{\bm{w}})
- \widetilde{\bm{c}}^{k-1})\cdot\bm{\phi}dx.
\end{align*}
We infer from the boundedness of $\widetilde{B}_{ij}^\delta
({\mathcal L}(\bar{\bm{w}}))$
that the bilinear form ${\mathcal A}$ is continuous on $H^2(\Omega;{\mathbb R}^{n-1})$.
Furthermore, by the positive definiteness of
$\widetilde{B}_{ij}^\delta({\mathcal L}(\bar{\bm{w}}))$, thanks to \eqref{3.tilde},
${\mathcal A}$ is coercive. Moreover, ${\mathcal F}$ is a continuous linear form
on $H^2(\Omega;{\mathbb R}^{n-1})$. We conclude from the Lax--Milgram theorem that there
exists a unique solution $\bm{w}\in H^2(\Omega;{\mathbb R}^{n-1})$ to \eqref{3.LM}.
Since $d\le 3$ by Assumption (A1), we have $H^2(\Omega)\hookrightarrow
L^\infty(\Omega)$ and therefore $\bm{w}\in L^\infty(\Omega;{\mathbb R}^{n-1})$.
This defines the fixed-point operator $S:L^\infty(\Omega;{\mathbb R}^{n-1})\times[0,1]\to
L^\infty(\Omega;{\mathbb R}^{n-1})$, $S(\bar{\bm{w}},\sigma)=\bm{w}$. The operator $S$
is continuous, and it satisfies $S(\bar{\bm{w}},0)=\bm{0}$ for all $\bar{\bm{w}}
\in L^\infty(\Omega;{\mathbb R}^{n-1})$. In view of the compact embedding
$H^2(\Omega)\hookrightarrow L^\infty(\Omega)$, $S$ is also compact. It remains to
verify that all fixed points of $S(\cdot,\sigma)$ are uniformly bounded.
To this end, let $\bm{w}\in L^\infty(\Omega;{\mathbb R}^{n-1})$ be such a fixed point.
Then $\bm{w}\in H^2(\Omega;{\mathbb R}^{n-1})$ solves \eqref{3.LM} with
$\bar{\bm{w}}=\bm{w}$. We choose the test function $\bm{\phi} = \bm{w}$ in \eqref{3.LM}
to find that
\begin{equation}\label{3.aux2}
\frac{\sigma}{\tau}\int_\Omega(\widetilde{\bm{c}}-\widetilde{\bm{c}}^{k-1})
\cdot\bm{w} dx + \sum_{i,j=1}^{n-1}\int_\Omega \widetilde{B}_{ij}^\delta
(\widetilde{\bm{c}})\na w_i\cdot\na w_j dx
+ \eps\sum_{i=1}^{n-1}\int_\Omega((\Delta w_i)^2+w_i^2)dx = 0,
\end{equation}
where $\widetilde{\bm{c}}={\mathcal L}(\bm{w})=(c_1,\ldots,c_{n-1})$ and $c_i$ solves
\eqref{3.regul2} with $w_i^k$ replaced by $w_i$. Using the test function
$c_i-c_i^{k-1}$ in the weak formulation of \eqref{3.regul2} leads to
\begin{align*}
\sum_{i=1}^{n-1}\int_\Omega(c_i-c_i^{k-1})w_i dx
&= \sum_{i=1}^{n-1}\int_\Omega\big(\na(c_i-c_n)\cdot\na(c_i-c_i^{k-1}) \\
&\phantom{xx}{}+ ((h_i^\delta)'(c_i)-(h_i^\delta)'(c_n))(c_i-c_i^{k-1})\big)dx.
\end{align*}
The convexity of the function $h_i^\delta$ and $\sum_{i=1}^{n-1}c_i=1-c_n$ imply that
\begin{align*}
\sum_{i=1}^{n-1}(c_i-c_i^{k-1})(h_i^\delta)'(c_i)
&\ge \sum_{i=1}^{n-1}\big(h_i^\delta(c_i)-h_i^\delta(c_i^{k-1})\big), \\
-\sum_{i=1}^{n-1}(c_i-c_i^{k-1})(h_n^\delta)'(c_n)
&= (c_n-c_n^{k-1})(h_n^\delta)'(c_n) \ge h_n^\delta(c_n)-h_n^\delta(c_n^{k-1}).
\end{align*}
Moreover, since $\sum_{i=1}^{n-1}\na c_i=-\na c_n$ and
$\sum_{i=1}^{n-1}\na c_i^{k-1}=-\na c_n^{k-1}$,
\begin{align*}
\sum_{i=1}^{n-1}\na(c_i-c_n)\cdot\na (c_i-c_i^{k-1})
&= \sum_{i=1}^{n}|\na c_i|^2 - \sum_{i=1}^{n}\na c_i^{k-1}\cdot\na c_i \\
&\ge \frac12\sum_{i=1}^n|\na c_i|^2 - \frac12\sum_{i=1}^n|\na c_i^{k-1}|^2.
\end{align*}
This yields
\begin{align*}
\sum_{i=1}^{n-1}\int_\Omega(c_i-c_i^{k-1})w_i dx
&\ge \sum_{i=1}^{n}\int_\Omega\big(h_i^\delta(c_i)-h_i^\delta(c_i^{k-1})\big)dx
+ \frac12\sum_{i=1}^{n}\int_\Omega\big(|\na c_i|^2 - |\na c_i^{k-1}|^2\big) dx \\
&\ge \widetilde{{\mathcal E}}^\delta(\widetilde{\bm{c}})
- \widetilde{{\mathcal E}}^\delta(\widetilde{\bm{c}}^{k-1}),
\end{align*}
where
$$
\widetilde{{\mathcal E}}^\delta(\widetilde{\bm{c}}) := \widetilde{{\mathcal H}}^\delta(\widetilde{\bm{c}})
+ \sum_{i=1}^n\int_\Omega|\na c_i|^2 dx, \quad
\widetilde{{\mathcal H}}^\delta(\widetilde{\bm{c}}) := {\mathcal H}^\delta(\bm{c}).
$$
Inserting this inequality into \eqref{3.aux2} finally gives
\begin{equation}\label{3.rfei}
\sigma\widetilde{{\mathcal E}}^\delta(\widetilde{\bm{c}})
+ \tau\sum_{i,j=1}^{n-1}\int_\Omega \widetilde{B}_{ij}^\delta(\widetilde{\bm{c}})
\na w_i\cdot\na w_j dx + \eps\tau\int_\Omega(|\Delta\bm{w}|^2 + |\bm{w}|^2)dx
\le \sigma\widetilde{{\mathcal E}}^\delta(\widetilde{\bm{c}}^{k-1}).
\end{equation}
By the positive definiteness of $\widetilde{B}^\delta$ (positive semidefiniteness
is sufficient), this gives a uniform $H^2(\Omega)$ bound and
consequently a uniform $L^\infty(\Omega)$ bound for $\bm{w}$.
The Leray--Schauder fixed-point theorem now implies the existence of a solution
to \eqref{3.regul1}--\eqref{3.regul2}.
\end{proof}
{\em Step 3: Uniform estimates.}
We wish to derive estimates uniform in $\eps$ and $\tau$. The starting point is
the regularized energy estimate \eqref{3.rfei} and the positive definiteness
estimate \eqref{3.tilde}. First, we introduce the piecewise constant in time functions
$\bm{w}^{(\tau)}(x,t)=\bm{w}^k(x)$, $\widetilde{\bm{c}}^{(\tau)}(x,t)
= {\mathcal L}(\bm{w}^k(x))$ for $x\in\Omega$ and $t\in((k-1)\tau,k\tau]$,
$k=1,\ldots,N$, and set
$\bm{w}^{(\tau)}(x,0)=(\pa\widetilde{{\mathcal E}}/\pa\widetilde{\bm{c}})(\widetilde{\bm{c}}^0)$
and $\widetilde{\bm{c}}^{(\tau)}(x,0)=\widetilde{\bm{c}}^0$. Introducing the shift
operator $(\sigma_\tau\bm{w}^{(\tau)})(x,t)=\bm{w}^{(\tau)}(x,t-\tau)$ for
$x\in\Omega$ and $t\ge \tau$, we can formulate \eqref{3.regul1}--\eqref{3.regul2} as
\begin{align}
& \frac{1}{\tau}(\widetilde{\bm{c}}^{(\tau)}-\sigma_\tau\widetilde{\bm{c}}^{(\tau)})
= \operatorname{div}(\widetilde{B}^\delta(\widetilde{\bm{c}})\na\bm{w}^{(\tau)})
- \eps(\Delta^2\bm{w}^{(\tau)}+\bm{w}^{(\tau)}), \label{3.tau1} \\
& w_i^{(\tau)} = (h_i^\delta)'(c_i^{(\tau)}) - (h_n^\delta)'(c_n^{(\tau)})
- \Delta(c_i^{(\tau)}-c_n^{(\tau)}), \quad i=1,\ldots,n-1, \label{3.tau2}
\end{align}
recalling that $\widetilde{\bm{c}}^{(\tau)}={\mathcal L}(\bm{w}^{(\tau)})$
is a function of $\bm{w}^{(\tau)}$. Then \eqref{3.rfei} can be
written after summation over $k=1,\ldots,N$ as
\begin{equation*}
\widetilde{{\mathcal E}}^\delta(\widetilde{\bm{c}}^{(\tau)}(T))
+ \eta(\delta)\int_0^T\int_\Omega|\na\bm{w}^{(\tau)}|^2 dxdt
+ \eps C\int_0^T\|\bm{w}^{(\tau)}\|_{H^2(\Omega)}^2 dt
\le \widetilde{{\mathcal E}}^\delta(\widetilde{\bm{c}}^0),
\end{equation*}
where we used \eqref{3.tilde} and the generalized Poincar\'e inequality with
constant $C>0$. This implies the estimates
\begin{equation}\label{3.west}
C(\delta)\|\bm{w}^{(\tau)}\|_{L^2(0,T;H^1(\Omega))}
+ \sqrt{\eps}\|\bm{w}^{(\tau)}\|_{L^2(0,T;H^2(\Omega))} \le C,
\end{equation}
where $C>0$ denotes here and in the following a constant independent of
$\eps$ and $\tau$.
To derive a uniform estimate for $\widetilde{\bm{c}}^{(\tau)}$, we multiply
\eqref{3.tau2} by $-\Delta c_i^{(\tau)}$, integrate over $Q_T=\Omega\times(0,T)$,
integrate by parts, and sum over $i=1,\ldots,n-1$:
\begin{align*}
\sum_{i=1}^{n-1}\int_0^T\int_\Omega&\na w_i^{(\tau)}\cdot\na c_i^{(\tau)} dxdt
= \sum_{i=1}^{n-1}\int_0^T\int_\Omega\na\big((h_i^\delta)'(c_i^{(\tau)})
- (h_n^\delta)'(c_n^{(\tau)})\big)\cdot\na c_i^{(\tau)} dxdt \\
&\phantom{xx}{}+ \sum_{i=1}^{n-1}\int_0^T\int_\Omega\big((\Delta c_i^{(\tau)})^2
- \Delta c_i^{(\tau)}\Delta c_n^{(\tau)}\big)dxdt =: I_3+I_4.
\end{align*}
Since $\na(h_i^\delta)'(c_i^{(\tau)})=(h_i^\delta)''(c_i^{(\tau)})\na c_i^{(\tau)}
= \na c_i^{(\tau)}/(\chi_\delta\bm{c}^{(\tau)})_i$ and
$\sum_{i=1}^{n-1}\na c_i^{(\tau)}=-\na c_n^{(\tau)}$, the term $I_3$ can be written as
$$
I_3 = \sum_{i=1}^n\int_0^T\int_\Omega
\frac{|\na c_i^{(\tau)}|^2}{(\chi_\delta\bm{c}^{(\tau)})_i}dxdt.
$$
Using the property $\sum_{i=1}^{n-1}\Delta c_i^{(\tau)}=-\Delta c_n^{(\tau)}$,
the remaining term $I_4$ becomes
$$
I_4 = \sum_{i=1}^n\int_0^T\int_\Omega(\Delta c_i^{(\tau)})^2 dxdt.
$$
Therefore, by Young's inequality,
\begin{align*}
\sum_{i=1}^n&\int_0^T\int_\Omega(\Delta c_i^{(\tau)})^2 dxdt
+ \sum_{i=1}^n\int_0^T\int_\Omega
\frac{|\na c_i^{(\tau)}|^2}{(\chi_\delta\bm{c}^{(\tau)})_i}dxdt
= \sum_{i=1}^{n-1}\int_0^T\int_\Omega\na w_i^{(\tau)}\cdot\na c_i^{(\tau)}dxdt \\
&\le \frac12\sum_{i=1}^{n-1}\int_0^T\int_\Omega\bigg(\frac{|\na c_i^{(\tau)}|^2}{
(\chi_\delta\bm{c}^{(\tau)})_i}
+ (\chi_\delta\bm{c}^{(\tau)})_i|\na w_i^{(\tau)}|^2\bigg) dxdt \\
&\le \frac12\sum_{i=1}^{n-1}\int_0^T\int_\Omega\frac{|\na c_i^{(\tau)}|^2}{
(\chi_\delta\bm{c}^{(\tau)})_i}dxdt
+ \frac12\sum_{i=1}^{n-1}\int_0^T\int_\Omega|\na w_i^{(\tau)}|^2dxdt.
\end{align*}
The first term on the right-hand side is absorbed by the left-hand side. Thus,
we deduce from \eqref{3.west} that
$$
\sum_{i=1}^n\int_0^T\int_\Omega(\Delta c_i^{(\tau)})^2 dxdt
+ \frac12\sum_{i=1}^n\int_0^T\int_\Omega
\frac{|\na c_i^{(\tau)}|^2}{(\chi_\delta\bm{c}^{(\tau)})_i}dxdt
\le \frac12\|\na \bm{w}^{(\tau)}\|^2_{L^2(Q_T)}
\le C.
$$
Since $c_i^{(\tau)}\in L^\infty(Q_T)$, we infer from the
previous estimate that
\begin{equation}\label{3.cH2}
\|c_i^{(\tau)}\|_{L^2(0,T;H^2(\Omega))} \le C, \quad i=1,\ldots,n.
\end{equation}
Finally, we derive an estimate for the discrete time derivative. It follows from
\eqref{3.Bregul} that
\begin{align*}
\frac{1}{\tau}\|c_i^{(\tau)}-\sigma_\tau c_i^{(\tau)}\|_{L^2(0,T;H^2(\Omega)')}
&\le \sum_{j=1}^{n-1}\|\widetilde{B}^\delta_{ij}(\widetilde{\bm{c}}^{(\tau)})
\|_{L^\infty(Q_T)}\|\na w_j^{(\tau)}\|_{L^2(Q_T)} \\
&\phantom{xx}{}+ \eps\|w_i^{(\tau)}\|_{L^2(0,T;H^2(\Omega))}.
\end{align*}
The entries of $\widetilde{B}^\delta(\widetilde{\bm{c}}^{(\tau)})$ are bounded since
$\delta\le(\chi_\delta\bm{c}^{(\tau)})_i\le 1-\delta$. Thus, by \eqref{3.west},
\begin{equation}\label{3.ctime}
\tau^{-1}\|c_i^{(\tau)}-\sigma_\tau c_i^{(\tau)}\|_{L^2(0,T;H^2(\Omega)')}
\le C, \quad i=1,\ldots,n-1.
\end{equation}
{\em Step 4: Limit $(\eps,\tau)\to 0$.}
In view of estimates \eqref{3.cH2} and \eqref{3.ctime}, we can apply the
Aubin--Lions lemma in the version of \cite[Theorem 1]{DrJu12} to conclude the
existence of a subsequence, which is not relabeled, such that
as $(\eps,\tau)\to 0$,
$$
c_i^{(\tau)}\to c_i\quad\mbox{strongly in }L^2(0,T;H^1(\Omega)),\ i=1,\ldots,n-1.
$$
We deduce from \eqref{3.west}--\eqref{3.ctime} that, possibly for another subsequence,
\begin{align*}
c_i^{(\tau)} \rightharpoonup c_i &\quad\mbox{weakly in }L^2(0,T;H^2(\Omega)), \\
\tau^{-1}(c_i^{(\tau)}-\sigma_\tau c_i^{(\tau)}) \rightharpoonup \pa_t c_i
&\quad\mbox{weakly in }L^2(0,T;H^2(\Omega)'), \\
w_i^{(\tau)} \rightharpoonup w_i &\quad\mbox{weakly in }L^2(0,T;H^1(\Omega)), \\
\eps w_i^{(\tau)}\to 0 &\quad\mbox{strongly in }L^2(0,T;H^2(\Omega)),
\quad i=1,\ldots,n-1.
\end{align*}
We define $c_n:=1-\sum_{i=1}^{n-1}c_i$. Then $c_n^{(\tau)}\to c_n$ strongly
in $L^2(0,T;H^1(\Omega))$ and weakly in $L^2(0,T;H^2(\Omega))$.
Furthermore, $(c_i^{(\tau)})$ converges, up to a subsequence, pointwise a.e.,
and its limit satisfies $\delta\le(\chi_\delta\bm{c})_i\le 1-\delta$, $i=1,\ldots,n$.
The matrix $\widetilde{B}_{ij}^\delta(\widetilde{\bm{c}}^{(\tau)})$
is uniformly bounded and
$$
\widetilde{B}_{ij}^\delta(\widetilde{\bm{c}}^{(\tau)})
\to \widetilde{B}_{ij}^\delta(\widetilde{\bm{c}})\quad\mbox{strongly in }
L^q(Q_T)\mbox{ for any } q<\infty,\ i,j=1,\ldots,n.
$$
These convergence results allow us to pass to the limit $(\eps,\tau)\to 0$
in the weak formulation of \eqref{3.tau1}--\eqref{3.tau2} to find that
$\bm{c}$ solves
$$
\pa_t c_i = \operatorname{div}\sum_{j=1}^{n-1}\widetilde{B}_{ij}^\delta(\widetilde{\bm{c}})
\na w_j, \quad w_i = (h_i^\delta)'(c_i)-(h_n^\delta)'(c_n)
- \Delta(c_i-c_n)
$$
for $i=1,\ldots,n-1$. Transforming back to the chemical potential $\bm{\mu}$
via $w_i=\mu_i-\mu_n$ and $c_n=1-\sum_{i=1}^{n-1}c_i$, we see that
$\bm{c}^\delta:=\bm{c}$ solves system \eqref{3.eq}--\eqref{3.bic},
where $\mu_i=(h_i^\delta)'(c_i)-\Delta c_i$.
\subsection{Uniform estimates}\label{sec.unif}
We derive energy and entropy estimates for the solutions to \eqref{3.eq},
being uniform in $\delta$.
\begin{lemma}[Energy and entropy inequalities]\label{lem.eei}
Let $\bm{c}^\delta$ be a weak solution to \eqref{3.eq}--\eqref{3.bic},
constructed in Theorem \ref{thm.approx}. Then the following inequalities hold
for any $T>0$,
\begin{align}
& {\mathcal E}^\delta(\bm{c}^\delta(\cdot,T)) + \sum_{i,j=1}^n\int_0^T\int_\Omega
B_{ij}^\delta(\bm{c}^\delta)\na\mu_i^\delta\cdot\na\mu_j^\delta dxdt
\le {\mathcal E}^\delta(\bm{c}^0), \label{3.Ed} \\
& {\mathcal H}^\delta(\bm{c}^\delta(\cdot,T)) + \sum_{i,j=1}^n\int_0^T\int_\Omega
B_{ij}^\delta(\bm{c}^\delta)\na(h_i^\delta)'(c_i^\delta)\cdot\na\mu_j^\delta dxdt
\le {\mathcal H}^\delta(\bm{c}^0), \label{3.Hd} \\
& {\mathcal H}^\delta(\bm{c}^\delta(\cdot,T)) + \frac{(\lambda_M-\lambda)^2}{2\lambda_m\lambda}
{\mathcal E}^\delta(\bm{c}^\delta(\cdot,T)) + \lambda\sum_{i=1}^n\int_0^T\int_\Omega
\frac{|\na c_i^\delta|^2}{(\chi_\delta\bm{c}^\delta)_i}dxdt \label{3.EHd} \\
&\phantom{xxxx}{}+ \lambda\sum_{i=1}^n\int_0^T\int_\Omega(\Delta c_i^\delta)^2 dxdt
+ \frac{(\lambda_M-\lambda)^2}{2\lambda}\int_0^T\int_\Omega
\big|P_L(\chi_\delta\bm{c}^\delta)R(\chi_\delta\bm{c}^\delta)\na\bm{\mu}^\delta
\big|^2 dxdt \nonumber \\
&\phantom{xx}{}\le {\mathcal H}^\delta(\bm{c}^0) + \frac{(\lambda_M-\lambda)^2}{2\lambda_m\lambda}
{\mathcal E}^\delta(\bm{c}^0), \nonumber
\end{align}
where $0<\lambda<\lambda_m$, $\lambda_m$, $\lambda_M$ are introduced in
Lemma \ref{lem.DB}, and $R(\chi_\delta\bm{c}^\delta)
=\operatorname{diag}(\sqrt{\chi_\delta\bm{c}^\delta})$.
\end{lemma}
\begin{proof}
Summing \eqref{3.rfei} with $\sigma=1$ over $k=1,\ldots,N$, we find that
\begin{align*}
\widetilde{{\mathcal E}}^\delta(\widetilde{\bm{c}}^{(\tau)}(\cdot,T))
&+ \sum_{i,j=1}^{n-1}\int_0^T\int_\Omega \widetilde{B}_{ij}^\delta
(\widetilde{\bm{c}}^{(\tau)})
\na w_i^{(\tau)}\cdot\na w_j^{(\tau)} dxdt \\
&{}+ \eps\sum_{i=1}^n\int_0^T\int_\Omega\big((\Delta w_i^{(\tau)})^2
+ (w_i^{(\tau)})^2\big)dxdt \le \widetilde{{\mathcal E}}^\delta(\widetilde{\bm{c}}^0).
\end{align*}
We know from \eqref{3.west} and the construction of $\chi_\delta$ that
$(\bm{w}^{(\tau)})$ is bounded in $L^2(0,T;H^1(\Omega))$ and
$(\widetilde{B}_{ij}^\delta(\widetilde{\bm{c}}))$ is bounded in
$L^\infty(Q_T)$ with respect to $(\eps,\tau)$.
Therefore, we can pass to the limit $(\eps,\tau)\to 0$ in the previous
inequality, and weak lower semicontinuity of the integral functionals
leads to \eqref{3.Ed}.
To show \eqref{3.Hd}, we use $(h_i^\delta)'(c_i^\delta)-(h_i^\delta)'(c_n^\delta)$
as a test function in the weak formulation of \eqref{3.cdelta} and sum over
$i=1,\ldots,n-1$:
$$
{\mathcal H}^\delta(\bm{c}(\cdot,T)) + \sum_{i,j=1}^{n-1}\int_0^T\int_\Omega
\widetilde{B}_{ij}^\delta(\widetilde{\bm{c}}^\delta)
\na\big((h_i^\delta)'(c_i^\delta)-(h_i^\delta)'(c_n^\delta)\big)\cdot
\na w_j^\delta dxdt \le {\mathcal H}^\delta(\bm{c}^0).
$$
This inequality can be rewritten as \eqref{3.Hd} using
$w_i^\delta=\mu_i^\delta-\mu_n^\delta$. Finally, we derive \eqref{3.EHd}
by combining \eqref{3.Hd} and \eqref{3.Ed} and proceeding as in the proof
of Lemma \ref{lem.fei}.
\end{proof}
\subsection{Proof of Theorem \ref{thm.ex}}\label{sec.exproof}
We perform the limit $\delta\to 0$ to finish the proof of Theorem \ref{thm.ex}.
It follows from \cite[Lemma 2.1]{ElLu91} that for sufficiently small $\delta>0$,
there exists $C>0$ (independent of $\delta$)
such that for all $r_1,\ldots,r_n\in{\mathbb R}$ satisfying $\sum_{i=1}^n r_i=1$,
\begin{equation}\label{3.lower}
\sum_{i=1}^n h_i^\delta(r_i)\ge -C.
\end{equation}
Therefore, estimate \eqref{3.EHd} implies that
\begin{equation}
\label{eqnhelp1}
\begin{aligned}
\sum_{i=1}^n\int_\Omega|\na c_i^\delta(\cdot,T)|^2 dx
&+ \sum_{i=1}^n\int_0^T\int_\Omega\frac{|\na c_i^\delta|^2}{
(\chi_\delta\bm{c}^\delta)_i}dxdt
+ \sum_{i=1}^n\int_0^T\int_\Omega(\Delta c_i^\delta)^2 dxdt
\\
&{}+ \int_0^T\int_\Omega\big|P_L(\chi_\delta\bm{c}^\delta)
R(\chi_\delta\bm{c}^\delta)\na\bm{\mu}^\delta\big|^2 dxdt \le C,
\end{aligned}
\end{equation}
and the constant $C>0$ depends on $\lambda_m$, $\lambda_M$, and $\bm{c}^0$.
Mass conservation (or using the test function $\phi_i=1$ in the weak
formulation of \eqref{3.eq}) shows that
$\int_\Omega c_i^\delta(\cdot,T)dx=\int_\Omega c_0^\delta dx$ for any $T>0$,
i.e.\ $\|\bm{c}^\delta\|_{L^\infty(0,T;L^1(\Omega))}\le C$. We conclude from the
Poincar\'e--Wirtinger inequality that
\begin{equation}\label{3.cd}
\|\bm{c}^\delta\|_{L^\infty(0,T;H^1(\Omega))}
+ \|\bm{c}^\delta\|_{L^2(0,T;H^2(\Omega))} \le C.
\end{equation}
Next, we estimate $\pa_t c_i^\delta$. Lemma \ref{lem.Bdelta} implies that
the entries of
$$(D(\chi_\delta\bm{c}^\delta)P_L(\chi_\delta\bm{c}^\delta)
+ P_{L^\perp}(\chi_\delta\bm{c}^\delta))^{-1}$$
are uniformly bounded. Thus,
by the definition of $D^{BD}(\chi_\delta\bm{c}^\delta)$ and \eqref{2.DBD},
$$
\int_0^T\int_\Omega\bigg|\sum_{j=1}^n B_{ij}^\delta(\bm{c}^\delta)
\na\mu_j^\delta\bigg|^2 dxdt \le \lambda_M\int_0^T\int_\Omega
\big|P_L(\chi_\delta\bm{c}^\delta)R(\chi_\delta\bm{c}^\delta)
\na\bm{\mu}^\delta\big|^2 dxdt,
$$
and the right-hand side is bounded by \eqref{eqnhelp1}. Setting
$J_i^\delta:=\sum_{j=1}^n B_{ij}^\delta(\bm{c}^\delta)\na\mu_j^\delta$,
this means that $(J_i^\delta)$ is bounded in $L^2(Q_T)$. Therefore, there exists
a subsequence that is not relabeled such that, as $\delta\to 0$,
$$
J_i^\delta\rightharpoonup J_i\quad\mbox{weakly in }L^2(Q_T).
$$
This implies that
\begin{equation}
\label{eqnhelp2}
\|\pa_t c_i^\delta\|_{L^2(0,T;H^1(\Omega)')} \le C.
\end{equation}
We conclude from \eqref{3.cd} and \eqref{eqnhelp2}, using the Aubin--Lions lemma,
that, for a subsequence (if necessary),
\begin{equation}\label{3.cconv}
\begin{aligned}
c_i^\delta\to c_i &\quad\mbox{strongly in }L^2(0,T;H^1(\Omega)), \\
c_i^\delta \stackrel{\star}{\rightharpoonup} c_i
&\quad\mbox{weakly-$\star$\, in }L^\infty(0,T;H^1(\Omega)), \\
c_i^\delta\rightharpoonup c_i &\quad\mbox{weakly in }L^2(0,T;H^2(\Omega)), \\
\pa_t c_i^\delta\rightharpoonup \pa_t c_i &\quad\mbox{weakly in }
L^2(0,T;H^1(\Omega)').
\end{aligned}
\end{equation}
Performing the limit $\delta\to 0$ in \eqref{3.eq}, we see that
$\pa_t c_i=\operatorname{div} J_i$ holds in the sense of $L^2(0,T;H^1(\Omega)')$.
We prove that $c_i\ge 0$ in $Q_T$, $i=1,\ldots,n$,
following \cite{ElLu91}.
By definition \eqref{3.hdelta} and the lower bound \eqref{3.lower}, we have
for $0<\delta<1$,
\begin{align*}
C&\ge \int_\Omega h_i^\delta(c_i^\delta)dx \ge -C
+ \int_{\{c_i^\delta<\delta\}}\bigg(c_i^\delta\log\delta
- \frac{\delta}{2} + \frac{(c_i^\delta)^2}{2\delta}\bigg)dx \\
&\ge -C + \int_{\{c_i^\delta<0\}}c_i^\delta\log\delta dx
+ \int_{\{0<c_i^\delta<\delta\}}c_i^\delta\log\delta dx - C\delta \\
&\ge -C + \log\delta\int_{\{c_i^\delta<0\}}c_i^\delta dx
+ C\delta\log\delta - C\delta.
\end{align*}
Hence, we obtain
$$
\int_\Omega \max\{0,-c_i^\delta\}dx
= \int_{\{c_i^\delta<0\}}|c_i^\delta|dx \le \frac{C}{|\log\delta|}.
$$
The limit $\delta\to 0$ leads to
$$
\int_\Omega\max\{0,-c_i\}dx \le 0,
$$
implying that $c_i\ge 0$ in $Q_T$. The limit $\delta\to 0$ in
$\sum_{i=1}^n c_i^\delta=1$ gives $\sum_{i=1}^n c_i=1$, hence $c_i\le 1$
holds in $Q_T$.
Next, we identify $J_i$ by showing that $J_i=\sum_{j=1}^n
B_{ij}(\bm{c})\na(\log c_j-\Delta c_j)$ in the sense of distributions.
Inserting the definition of $\mu_i^\delta$ and choosing a test function
$\phi_i\in L^\infty(0,T;$ $W^{2,\infty}(\Omega))$ satisfying $\na\phi_i\cdot\nu=0$
on $\pa\Omega$, we find that
\begin{align}
\int_0^T&\int_\Omega J_i^\delta\cdot\na\phi_i dxdt
= \sum_{j=1}^n\int_0^T\int_\Omega B_{ij}^\delta(\bm{c}^\delta)
\na\phi_i\cdot\na\big((h_j^\delta)'(c_j^\delta)-\Delta c_j^\delta\big) dxdt
\nonumber \\
&= \sum_{j=1}^n\int_0^T\int_\Omega B_{ij}^\delta(\bm{c}^\delta)
\na\phi_i\cdot\na(h_j^\delta)'(c_j^\delta)dxdt
+ \sum_{j=1}^n\int_0^T\int_\Omega\Delta c_j^\delta\operatorname{div}(B_{ij}^\delta(\bm{c}^\delta)
\na\phi_i)dxdt \label{3.Jdelta} \\
&=: I_5 + I_6. \nonumber
\end{align}
By definition \eqref{3.Bdelta} of $B_{ij}^\delta(\bm{c}^\delta)$, we have
$$
I_5 = \sum_{j=1}^n\int_0^T\int_\Omega \sqrt{(\chi_\delta\bm{c}^\delta)_i}
D_{ij}^{BD}(\chi_\delta\bm{c}^\delta)\na\phi_i\cdot
\frac{\na c_j^\delta}{\sqrt{(\chi_\delta\bm{c}^\delta)_j}}dxdt.
$$
Lemma \ref{lem.DB} shows that $\sqrt{c_i}D_{ij}^{BD}(\bm{c})/\sqrt{c_j}$ is
bounded in $[0,1]^n$ and in particular when $c_k=0$ for some index $k$.
The strong convergence $\bm{c}^\delta\to \bm{c}$ implies that
$\chi_\delta\bm{c}^\delta\to\bm{c}$ in $L^q(0,T;L^q(\Omega))$ for any $q<\infty$
such that
$$
I_5\to \sum_{j=1}^n\int_0^T\int_\Omega\sqrt{c_i}D_{ij}^{BD}(\bm{c})
\frac{1}{\sqrt{c_j}}\na\phi_i\cdot\na c_j dxdt
= \sum_{j=1}^n\int_0^T\int_\Omega B_{ij}(\bm{c})\na\phi_i\cdot\na\log c_j dxdt.
$$
The limit in $I_6$ is more involved. We decompose $I_6=I_{61}+I_{62}$, where
$$
I_{61} = \sum_{j=1}^n\int_0^T\int_\Omega\Delta c_j^\delta
B_{ij}^\delta(\bm{c}^\delta)\Delta\phi_i dxdt, \quad
I_{62} = \sum_{j=1}^n\int_0^T\int_\Omega\Delta c_j^\delta
\na B_{ij}^\delta(\bm{c}^\delta)\cdot\na\phi_i dxdt.
$$
We deduce from the strong convergence of $\bm{c}^\delta$ and the weak
convergence of $\Delta c_j^\delta$ that
$$
I_{61} \to \sum_{j=1}^n\int_0^T\int_\Omega\Delta c_j B_{ij}(\bm{c})\Delta\phi_i dxdt.
$$
To show the convergence of $I_{62}$, we consider
\begin{align*}
\int_0^T&\int_\Omega\big|\na\big(B_{ij}^\delta(\bm{c}^\delta)-B_{ij}(\bm{c})\big)
\big|^2 dxdt \\
&= \int_0^T\int_\Omega\bigg|\sum_{k=1}^n\bigg\{\bigg(
\frac{\pa B_{ij}^\delta}{\pa c_k}(\bm{c}^\delta) - \frac{\pa B_{ij}}{\pa c_k}(\bm{c})
\bigg)\na c_k + \frac{\pa B_{ij}^\delta}{\pa c_k}(\bm{c}^\delta)
\na(c_k^\delta-c_k)\bigg\}\bigg|^2 dxdt.
\end{align*}
By Lemma \ref{lem.DB} (i), $\pa D_{ij}^{BD}/\pa c_k$ exists and is bounded
in $[0,1]^n$. Then, by the definition of $B_{ij}(\bm{c})$, we have
$(\pa B_{ij}^\delta/\pa c_k)(\bm{c}^\delta)\to (\pa B_{ij}/\pa c_k)(\bm{c})$
strongly in $L^2(Q_T)$. It follows from $\na c_k^\delta\to \na c_k$ strongly
in $L^2(Q_T)$ that the right-hand side of the previous identity
converges to zero. We infer that
$$
I_{62}\to \sum_{j=1}^n\int_0^T\int_\Omega\Delta c_j \na B_{ij}(\bm{c})\cdot
\na\phi_i dxdt.
$$
Consequently, we have
\begin{align*}
I_6 \to &\sum_{j=1}^n\int_0^T\int_\Omega\Delta c_j\big(B_{ij}(\bm{c})\Delta\phi_i
+ \na B_{ij}(\bm{c})\cdot\na\phi_i\big) dxdt \\
&= \sum_{j=1}^n\int_0^T\int_\Omega\Delta c_j\operatorname{div}(B_{ij}(\bm{c})\na\phi_i)dxdt.
\end{align*}
We have shown that \eqref{3.Jdelta} becomes in the limit $\delta\to 0$
$$
\int_0^T\int_\Omega J_i\cdot\na\phi dxdt
= \sum_{j=1}^n\int_0^T\int_\Omega\big(B_{ij}(\bm{c})\na\phi_i\cdot\na\log c_j
+ \Delta c_j\operatorname{div}(B_{ij}(\bm{c})\na\phi_i)\big)dxdt
$$
and hence, in the sense of distributions,
$$
J_i = \sum_{j=1}^n B_{ij}(\bm{c})\na(\log c_j-\Delta c_j), \quad i=1,\ldots,n.
$$
{\em Step 2: Energy and entropy inequalities.}
The limit $c_i^\delta\rightharpoonup c_i$ weakly-$\star$ in $L^\infty(0,T;H^1(\Omega))$
(see \eqref{3.cconv}) and the weak lower semicontinuity of the energy and
entropy show that
$$
{\mathcal H}(\bm{c}(\cdot,T))\le \liminf_{\delta\to 0}{\mathcal H}^\delta(\bm{c}^\delta(\cdot,T)), \quad
{\mathcal E}(\bm{c}(\cdot,T))\le \liminf_{\delta\to 0}{\mathcal E}^\delta(\bm{c}^\delta(\cdot,T)).
$$
Moreover, because of the weak convergence of $\Delta c_i^\delta$
in $L^2(Q_T)$ from \eqref{3.cconv},
$$
\sum_{i=1}^n\int_0^T\int_\Omega(\Delta c_i)^2 dxdt
\le \liminf_{\delta\to 0}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta c_i^\delta)^2 dxdt.
$$
The combined energy-entropy inequality \eqref{3.EHd} and the property
$|\na(\chi_\delta\bm{c}^\delta)_i|\le|\na c_i^\delta|$ give
$$
\big\|\na\sqrt{(\chi_\delta\bm{c}^\delta)_i}\big\|_{L^2(Q_T)}
= \frac12\bigg\|\frac{\na c_i^\delta}{\sqrt{(\chi_\delta\bm{c}^\delta)_i}}
\bigg\|_{L^2(Q_T)} \le C,
$$
which, together with $(\chi_\delta\bm{c}^\delta)_i\to c_i$ strongly in
$L^2(Q_T)$ leads to
\begin{equation}\label{3.sqrtc}
\na\sqrt{(\chi_\delta\bm{c}^\delta)_i}\rightharpoonup\na\sqrt{c_i}
\quad\mbox{weakly in }L^2(Q_T).
\end{equation}
We conclude that
$$
\|\na\sqrt{c_i}\|_{L^2(Q_T)}\le \liminf_{\delta\to 0}
\big\|\na\sqrt{(\chi_\delta\bm{c}^\delta)_i}\big\|_{L^2(Q_T)}.
$$
Finally, by \eqref{3.EHd}, we observe that $P_L(\chi_\delta\bm{c}^\delta)
R(\chi_\delta\bm{c}^\delta)\na\bm{\mu}^\delta$ is uniformly bounded
in $L^2(Q_T)$ such that, up to a subsequence,
$$
P_L(\chi_\delta\bm{c}^\delta)R(\chi_\delta\bm{c}^\delta)\na\bm{\mu}^\delta
\rightharpoonup \bm{\zeta}\quad\mbox{weakly in }L^2(Q_T).
$$
Hence, again by weak lower semicontinuity of the norm,
$$
\|\bm{\zeta}\|_{L^2(0,T;L^2(\Omega))}
\le\liminf_{\delta\to 0}\big\|(P_L(\chi_\delta\bm{c}^\delta)
R(\chi_\delta\bm{c}^\delta)\na\bm{\mu}^\delta\big\|_{L^2(0,T;L^2(\Omega))}.
$$
It remains to take the limit inferior $\delta\to 0$ in \eqref{3.EHd}
to conclude that the combined energy-entropy inequality \eqref{1.EH} holds.
\begin{lemma}[Identification of $\bm{\zeta}$]\label{lem.ident}
Let \eqref{1.cregul} hold and let $\bm{\zeta}$ be the weak $L^2(Q_T)$ limit
of $P_L(\chi_\delta\bm{c}^\delta)R(\chi_\delta\bm{c}^\delta)\na\bm{\mu}^\delta$.
Then $\bm{\zeta}=P_L(\bm{c})R(\bm{c})\na\bm{\mu}$.
\end{lemma}
\begin{proof}
Let $\phi_i\in C_0^\infty(Q_T)$ be a test function. Then, inserting the definition
$\mu_j^\delta=(h_j^\delta)'(c_j^\delta)-\Delta c_j^\delta$ and integrating by parts,
\begin{align}
\sum_{j=1}^n&\int_0^T\int_\Omega\Big(P_L(\chi_\delta\bm{c}^\delta)_{ij}
\sqrt{(\chi_\delta\bm{c}^\delta)_j}\na\mu_j^\delta
- P_L(\bm{c})_{ij}\sqrt{c_j}\na\mu_j\Big)\cdot\na\phi_i dxdt \nonumber \\
&= \sum_{j=1}^n\int_0^T\int_\Omega\Big(P_L(\chi_\delta\bm{c}^\delta)_{ij}
\sqrt{(\chi_\delta\bm{c}^\delta)_j}\na (h_j^\delta)'(c_j^\delta)
- P_L(\bm{c})_{ij}\sqrt{c_j}\na\log c_j\Big)\cdot\na\phi_i dxdt \label{4.id} \\
&\phantom{xx}{}+ \sum_{j=1}^n\int_0^T\int_\Omega\operatorname{div}\Big\{
\Big(P_L(\chi_\delta\bm{c}^\delta)_{ij}
\sqrt{(\chi_\delta\bm{c}^\delta)_j}-P_L(\bm{c})_{ij}\sqrt{c_j}\Big)\na\phi_i\Big\}
\Delta c^\delta_j dxdt \nonumber \\
&\phantom{xx}{}+ \sum_{j=1}^n\int_0^T\int_\Omega\operatorname{div}\big(P_L(\bm{c})_{ij}\sqrt{c_j}
\na\phi_i\big)\Delta(c_j^\delta-c_j)dxdt. \nonumber
\end{align}
The bracket in the first integral on the right-hand side can be written as
\begin{align*}
P_L(&\chi_\delta\bm{c}^\delta)_{ij}
\sqrt{(\chi_\delta\bm{c}^\delta)_j}\na (h_j^\delta)'(c_j^\delta)
- P_L(\bm{c})_{ij}\sqrt{c_j}\na\log c_j \\
&= P_L(\chi_\delta\bm{c}^\delta)_{ij}
\frac{\na c_j^\delta}{\sqrt{(\chi_\delta\bm{c}^\delta)_j}}
- P_L(\bm{c})_{ij}\frac{\na c_j}{\sqrt{c_j}}.
\end{align*}
Thanks to the convergences \eqref{3.cconv} and \eqref{3.sqrtc}, we can pass to
the limit $\delta\to 0$ in \eqref{4.id}:
$$
\lim_{\delta\to 0}\sum_{j=1}^n\int_0^T\int_\Omega
\Big(P_L(\chi_\delta\bm{c}^\delta)_{ij}
\sqrt{(\chi_\delta\bm{c}^\delta)_j}\na\mu_j^\delta
- P_L(\bm{c})_{ij}\sqrt{c_j}\na\mu_j\Big)\cdot\na\phi_i dxdt = 0.
$$
By the uniqueness of the limit, the claim
$\bm{\zeta}=P_L(\bm{c})R(\bm{c})\na\bm{\mu}$ follows.
\end{proof}
\section{Proof of Theorem \ref{thm.wsu}}\label{sec.wsu}
In this section, we prove the weak-strong uniqueness property.
First, we compute a combined {\em relative} energy-entropy inequality.
Then we use this inequality to derive a stability estimate, which leads
to the desired weak-strong uniqueness result.
\subsection{Evolution of the relative energy and entropy}\label{sec.wsu.formal}
We start by calculating the time evolution of the relative entropy \eqref{1.relH}
and the relative energy \eqref{1.relE} for {\em smooth} solutions $\bm{c}$ and
$\bar{\bm{c}}$. Inserting \eqref{2.B} and integrating by parts leads to
\begin{align*}
\frac{d{\mathcal H}}{dt}(\bm{c}|\bar{\bm{c}})
&= \sum_{i=1}^n\int_\Omega\bigg(\log\frac{c_i}{\bar{c}_i}\pa_t c_i
- \bigg(\frac{c_i}{\bar{c}_i}-1\bigg)\pa_t\bar{c}_i\bigg)dx \\
&= -\sum_{i,j=1}^n\int_\Omega B_{ij}(\bm{c})
\na\log\frac{c_i}{\bar{c}_i}\cdot\na\mu_j dx
+ \sum_{i,j=1}^n\int_\Omega B_{ij}(\bar{\bm{c}})\na\bigg(\frac{c_i}{\bar{c}_i}
\bigg)\cdot\na\bar{\mu}_j dx \\
&= -\sum_{i,j=1}^n\int_\Omega B_{ij}(\bm{c})\na\log\frac{c_i}{\bar{c}_i}
\cdot\na(\mu_j-\bar{\mu}_j)dx \\
&\phantom{xx}{}
- \sum_{i,j=1}^n\int_\Omega\bigg(B_{ij}(\bm{c}) - \frac{c_i}{\bar{c}_i}
B_{ij}(\bar{\bm{c}})\bigg)\na\log\frac{c_i}{\bar{c}_i}\cdot\na\bar{\mu}_j dx.
\end{align*}
Next, we compute
\begin{align}
\frac{d{\mathcal E}}{dt}(\bm{c}|\bar{\bm{c}})
&= \sum_{i=1}^n\int_\Omega\bigg(\log\frac{c_i}{\bar{c}_i}\pa_t c_i
- \bigg(\frac{c_i}{\bar{c}_i}-1\bigg)\pa_t\bar{c}_i\bigg)dx
+ \sum_{i=1}^n\int_\Omega\na(c_i-\bar{c}_i)\cdot\na\pa_t(c_i-\bar{c}_i)dx \nonumber \\
&= \sum_{i=1}^n\bigg\{\bigg(\log\frac{c_i}{\bar{c}_i} - \Delta(c_i-\bar{c}_i)\bigg)
\pa_t c_i - \bigg(\frac{c_i}{\bar{c}_i}-1-\Delta(c_i-\bar{c}_i)\bigg)\pa_t\bar{c}_i
\bigg\}dx \label{4.aux0} \\
&= -\sum_{i,j=1}^n\int_\Omega B_{ij}(\bm{c})\na(\mu_i-\bar{\mu}_i)\cdot\na\mu_j dx
\nonumber \\
&\phantom{xx}{}+ \sum_{i,j=1}^n\int_\Omega B_{ij}(\bar{\bm{c}})
\na\bigg(\frac{c_i}{\bar{c}_i}-1-\Delta(c_i-\bar{c}_i)\bigg)\cdot
\na\bar{\mu}_j dx. \nonumber
\end{align}
We add and subtract the expression $\sum_{i=1}^n\int_\Omega
B_{ij}(\bm{c})\na(\mu_i-\bar{\mu}_i)\cdot\na\bar{\mu}_j dx$:
\begin{align}
\frac{d{\mathcal E}}{dt}(\bm{c}|\bar{\bm{c}})
&= -\sum_{i=1}^n\int_\Omega B_{ij}(\bm{c})\na(\mu_i-\bar{\mu}_i)
\cdot\na(\mu_j-\bar{\mu}_j) dx \nonumber \\
&\phantom{xx}{}+ \sum_{i,j=1}^n\int_\Omega\bigg\{B_{ij}(\bar{\bm{c}})
\bigg(\frac{c_i}{\bar{c}_i}\na\log\frac{c_i}{\bar{c}_i} - \na\Delta(c_i-\bar{c}_i)
\bigg) - B_{ij}(\bm{c})\na(\mu_i-\bar{\mu}_i)\bigg\}\cdot\na\bar{\mu}_j dx
\label{4.aux} \\
&= -\sum_{i,j=1}^n\int_\Omega B_{ij}(\bm{c})\na(\mu_i-\bar{\mu}_i)
\cdot\na(\mu_j-\bar{\mu}_j) dx \nonumber \\
&\phantom{xx}{}- \sum_{i,j=1}^n\int_\Omega\bigg(B_{ij}(\bm{c})
- \frac{c_i}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\bigg)\na(\mu_i-\bar{\mu}_i)\cdot
\na\bar{\mu}_j dx \nonumber \\
&\phantom{xx}{}+ \sum_{i,j=1}^n\int_\Omega B_{ij}(\bar{\bm{c}})
\bigg(\frac{c_i}{\bar{c}_i}-1\bigg)\na\Delta(c_i-\bar{c}_i)\cdot\na\bar{\mu}_j dx.
\nonumber
\end{align}
We want to reformulate the expression $\bar{c}_i^{-1}(c_i-\bar{c}_i)
\na\Delta(c_i-\bar{c}_i)$ in the last integral.
For this, we observe that for any smooth function $f$, it holds that
\begin{align*}
f\na\Delta f &= \na(f\Delta f) - \na f\Delta f
= \na\big(\operatorname{div}(f\na f)- |\na f|^2\big)
- \operatorname{div}(\na f\otimes\na f) + \frac12\na|\na f|^2 \\
&= \na\operatorname{div}(f\na f) - \frac12\na|\na f|^2 - \operatorname{div}(\na f\otimes\na f).
\end{align*}
Therefore,
\begin{align*}
(c_i-\bar{c}_i)\na\Delta(c_i-\bar{c}_i)
&= \na\operatorname{div}\big((c_i-\bar{c}_i)\na(c_i-\bar{c}_i)\big)
- \frac12\na|\na(c_i-\bar{c}_i)|^2 \\
&\phantom{xx}{}- \operatorname{div}\big(\na(c_i-\bar{c}_i)\otimes\na(c_i-\bar{c}_i)\big).
\end{align*}
Inserting this expression into the last term of \eqref{4.aux} and integrating
by parts, we find that
\begin{align*}
\frac{d{\mathcal E}}{dt}(\bm{c}|\bar{\bm{c}})
&= -\sum_{i=1}^n\int_\Omega B_{ij}(\bm{c})\na(\mu_i-\bar{\mu}_i)
\cdot\na(\mu_j-\bar{\mu}_j) dx \\
&\phantom{xx}{}- \sum_{i,j=1}^n\int_\Omega\bigg(B_{ij}(\bm{c})
- \frac{c_i}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\bigg)\na(\mu_i-\bar{\mu}_i)\cdot
\na\bar{\mu}_j dx \\
&\phantom{xx}{}+ \sum_{i,j=1}^n\int_\Omega (c_i-\bar{c}_i)\na(c_i-\bar{c}_i)
\cdot\na\operatorname{div}\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dx \\
&\phantom{xx}{}+ \frac12\sum_{i,j=1}^n\int_\Omega|\na(c_i-\bar{c}_i)|^2
\operatorname{div}\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dx \\
&\phantom{xx}{}+ \sum_{i,j=1}^n\int_\Omega
\na(c_i-\bar{c}_i)\otimes\na(c_i-\bar{c}_i)
:\na\otimes\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dx,
\end{align*}
where $\na\otimes(\bar{c}_i^{-1}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j)$ is
a matrix with entries $\pa_{x_k}(\bar{c}_i^{-1}B_{ij}(\bar{\bm{c}})
\pa_{x_\ell}\bar{\mu}_j)$
for $k,\ell=1,\ldots,n$ and ``:'' denotes the Frobenius matrix product.
The following lemma states the rigorous result. Since we suppose that
the weak solution satisfies energy and entropy {\em inequalities}
instead of {\em equalities}, we obtain also inequalities for the relative
energy and entropy.
\begin{lemma}[Relative energy and entropy]
Let $\bm{c}$ and $\bar{\bm{c}}$ be a weak and strong solution to
\eqref{1.eq1}--\eqref{1.mu} with initial data $\bm{c}^0$ and $\bar{\bm{c}}^0$,
respectively. Assume that $\bm{c}$ satisfies the regularity \eqref{1.cregul}
and the energy and entropy inequalites \eqref{1.dEdt}--\eqref{1.dHdt}.
Furthermore, we suppose that $\bar{\bm{c}}$ is strictly positive and
satisfies the regularity
$$
\bar{\mu}_i = \log\bar{c}_i-\Delta\bar{c}_i\in L_{\rm loc}^2(0,\infty;H^2(\Omega)),
\quad \bar{c}_i\in L_{\rm loc}^\infty(0,\infty;W^{3,\infty}(\Omega)),
\quad i=1,\ldots,n.
$$
Then the following relative energy and entropy inequalities hold for any $T>0$:
\begin{align}\label{4.relE}
{\mathcal E}(&\bm{c}(T)|\bar{\bm{c}}(T))
+ \sum_{i=1}^n\int_0^T\int_\Omega B_{ij}(\bm{c})\na(\mu_i-\bar{\mu}_i)
\cdot\na(\mu_j-\bar{\mu}_j)dxdt \\
&\le {\mathcal E}(\bm{c}^0|\bar{\bm{c}}^0)
- \sum_{i,j=1}^n\int_0^T\int_\Omega\bigg(B_{ij}(\bm{c})
- \frac{c_i}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\bigg)\na(\mu_i-\bar{\mu}_i)\cdot
\na\bar{\mu}_j dxdt \nonumber \\
&\phantom{xx}{}+ \sum_{i,j=1}^n\int_0^T\int_\Omega (c_i-\bar{c}_i)\na(c_i-\bar{c}_i)
\cdot\na\operatorname{div}\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dxdt
\nonumber \\
&\phantom{xx}{}+ \frac12\sum_{i,j=1}^n\int_0^T\int_\Omega|\na(c_i-\bar{c}_i)|^2
\operatorname{div}\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dxdt
\nonumber \\
&\phantom{xx}{}+ \sum_{i,j=1}^n\int_0^T\int_\Omega
\na(c_i-\bar{c}_i)\otimes\na(c_i-\bar{c}_i)
:\na\otimes\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dxdt,
\nonumber \\
{\mathcal H}(&\bm{c}(T)|\bar{\bm{c}}(T)) \le {\mathcal H}(\bm{c}^0|\bar{\bm{c}}^0)
- \sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bm{c})\na\log\frac{c_i}{\bar{c}_i}
\cdot\na(\mu_j-\bar{\mu}_j)dxdt \label{4.relH} \\
&\phantom{xx}{}- \sum_{i,j=1}^n\int_0^T\int_\Omega
\bigg(B_{ij}(\bm{c}) - \frac{c_i}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\bigg)
\na\log\frac{c_i}{\bar{c}_i}\cdot\na\bar{\mu}_j dxdt. \nonumber
\end{align}
\end{lemma}
The integrals in \eqref{4.relE} and \eqref{4.relH} are well defined
because of the regularity
properties for weak solutions $\bm{c}$ and the regularity assumptions
on the strong solution $\bar{\bm{c}}$.
Indeed, we have $B_{ij}(\bm{c})\na\mu_j\in L^2(Q_T)$ (see \eqref{1.regflux}),
$B_{ij}(\bm{c})\na\log c_i=2D_{ij}^{BD}(\bm{c})\sqrt{c_j}\na\sqrt{c_i}
\in L^2(Q_T)$ (see \eqref{1.EH}), and using the definition \eqref{1.B},
the assumption \eqref{1.cregul}, and Lemma \ref{lem.DB} (i), we have
$$
B_{ij}(\bm{c})\na\mu_i\cdot\na\mu_j
= D_{ij}^{BD}(\bm{c})\big(2\na\sqrt{c_i} - \sqrt{c_i}\na\Delta c_i\big)
\cdot\big(2\na\sqrt{c_j} - \sqrt{c_j}\na\Delta c_j\big)\in L^1(Q_T).
$$
\begin{proof}
The relative energy and entropy inequalities are proved from the weak
formulation of \eqref{1.eq1} by choosing suitable test functions. For this,
we observe that, by \eqref{1.weak}, $c_i-\bar{c}_i$ satisfies
\begin{align}
0 &= \int_0^\infty\int_\Omega(c_i-\bar{c}_i)\pa_t\phi_i dxdt
+ \int_\Omega ( c_i^0 (x) - \bar c_i^0 (x) ) \phi_i (x,0) dx \label{4.start} \\
&\phantom{xx}{} - \sum_{j=1}^n\int_0^\infty\int_\Omega\big(B_{ij}(\bm{c})\na\log c_j
- B_{ij}(\bar{\bm{c}})\na\log\bar{c}_j\big)\cdot\na\phi_i dxdt \nonumber \\
&\phantom{xx}{}- \sum_{j=1}^n\int_0^\infty\int_\Omega
\Big(\operatorname{div} \big (B_{ij}(\bm{c})\na\phi_i \big) \Delta c_j
- \operatorname{div} \big ( B_{ij}(\bar{c})\na\phi_i \big )
\Delta\bar{c}_j\Big)dxdt. \nonumber
\end{align}
By density, this formulation also holds for $\phi_i=\bar{\mu}_i\theta_\eps(t)$, where
$$
\theta_\eps(t) = \left\{\begin{array}{ll}
1 &\quad\mbox{for }0\le t\le T, \\
(T-t)/\eps + 1 &\quad\mbox{for }T<t<T+\eps, \\
0 &\quad\mbox{for }t\ge T+\eps.
\end{array}\right.
$$
Then, passing to the limit $\eps\to 0$ and summing over $i=1,\ldots,n$, we arrive at
\begin{align*}
\sum_{i=1}^n&\int_\Omega(c_i-\bar{c}_i)\bar{\mu}_i dx\Bigg|_0^T
= \sum_{i=1}^n\int_0^T\langle \pa_t\bar{\mu}_i,c_i-\bar{c}_i\operatorname{ran}gle dt \\
&\phantom{xx}{}- \sum_{i,j=1}^n\int_0^T\int_\Omega\big(B_{ij}(\bm{c})
\na\log c_j\cdot\na\bar{\mu}_i
+ \operatorname{div}(B_{ij}(\bm{c})\na\bar{\mu}_i)\Delta c_j\big)dxdt \\
&\phantom{xx}{}+\sum_{i,j=1}^n\int_0^T\int_\Omega\big(B_{ij}(\bar{\bm{c}})
\na\log\bar{c}_j\cdot\na\bar{\mu}_i
+ \operatorname{div}(B_{ij}(\bar{\bm{c}})\na\bar{\mu}_i)\Delta\bar{c}_j\big)dxdt \\
&=: I_7 + I_8 + I_9,
\end{align*}
where $\langle\cdot,\cdot\operatorname{ran}gle$ is the duality bracket between $H^1(\Omega)'$ and
$H^1(\Omega)$. This product is well defined ,since it holds
in the sense of $H^1(\Omega)'$ that
$$
\pa_t\bar{\mu}_i = \pa_t(\log\bar{c}_i - \Delta\bar{c}_i)
= \sum_{j=1}^n\frac{1}{\bar{c}_i}\operatorname{div}(B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j)
- \sum_{j=1}^n\Delta\operatorname{div}(B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j).
$$
Inserting this expression into $I_7$, the dual
product can be written as an integral:
\begin{align*}
I_7 &= -\sum_{i,j=1}^n\int_0^T\int_\Omega\bigg(B_{ij}(\bar{\bm{c}})
\na\bigg(\frac{c_i}{\bar{c}_i}-1\bigg)\cdot\na\bar{\mu}_j
+ \Delta(c_i-\bar{c}_i)\operatorname{div}(B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j)\bigg)dxdt \\
&= -\sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bar{\bm{c}})
\na\bigg(\frac{c_i}{\bar{c}_i}-1\bigg)\cdot\na\bar{\mu}_j dxdt \\
&\phantom{xx}{} - \sum_{i,j=1}^n\int_0^T\int_\Omega\bar{c}_i\Delta(c_i-\bar{c}_i)
\operatorname{div}\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dxdt \\
&\phantom{xx}{}- \sum_{i,j=1}^n\int_0^T\int_\Omega\frac{1}{\bar{c}_i}
B_{ij}(\bar{\bm{c}})\Delta(c_i-\bar{c}_i)\na\bar{c}_i\cdot\na\bar{\mu}_j dxdt.
\end{align*}
Replacing $\Delta c_j$ by $\log c_j-\mu_j$ in $I_8$ and integrating by parts
in the term involving the divergence, some terms cancel and we find that
\begin{align*}
I_8 &= -\sum_{i,j=1}^n\int_0^T\int_\Omega\big( B_{ij}(\bm{c})
\na\bar{\mu}_i\cdot\na\log c_j + \operatorname{div}(B_{ij}(\bm{c})\na\bar{\mu}_i)
(\log c_j-\mu_j)\big)dxdt \\
&=-\sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bm{c})\na\bar{\mu}_i\cdot\na\mu_j dxdt.
\end{align*}
Assumption \eqref{1.cregul} guarantees that the flux
has the regularity $\sum_{j=1}^n B_{ij}(\bm{c})\na\mu_j\in L^2(Q_T)$
such that the last integral is defined.
The remaining term $I_9$ is reformulated in a similar way, leading to
$$
I_9 = \sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bar{\bm{c}})\na\bar{\mu}_i
\cdot\na\bar{\mu}_j dxdt.
$$
It follows from the definition of the relative energy, the inequality \eqref{1.dEdt}
for ${\mathcal E}(\bm{c})$, and the identity \eqref{1.dEdtbar} for ${\mathcal E}(\bar{\bm{c}})$ that
\begin{align*}
{\mathcal E}(&\bm{c}(T)|\bar{\bm{c}}(T)) - {\mathcal E}(\bm{c}^0|\bar{\bm{c}}^0) \\
&= \big({\mathcal E}(\bm{c}(T))-{\mathcal E}(\bm{c}^0)\big)
- \big({\mathcal E}(\bar{\bm{c}}(T))-{\mathcal E}(\bar{\bm{c}}^0)\big)
- \int_\Omega\bar{\bm{\mu}}\cdot(\bm{c}-\bar{\bm{c}})dx \Big|_0^T \\
&\le -\sum_{i,j=1}^n\int_0^T\int_\Omega\big(B_{ij}(\bm{c})\na\mu_i\cdot\na\mu_j
- B_{ij}(\bar{\bm{c}})\na\bar{\mu}_i\cdot\na\bar{\mu}_j\big)dxdt
- (I_7 + I_8 + I_9) \\
&= -\sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bm{c})\na(\mu_i-\bar{\mu}_i)
\cdot\na\mu_j dxdt \\
&\phantom{xx}{}-\sum_{i,j=1}^n\int_0^T\int_\Omega B_{ij}(\bar{\bm{c}})
\na\bigg(\frac{c_i}{\bar{c}_i}-1\bigg)\cdot\na\bar{\mu}_j dxdt \\
&\phantom{xx}{} - \sum_{i,j=1}^n\int_0^T\int_\Omega\bar{c}_i\Delta(c_i-\bar{c}_i)
\operatorname{div}\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dxdt \\
&\phantom{xx}{}- \sum_{i,j=1}^n\int_0^T\int_\Omega\frac{1}{\bar{c}_i}
B_{ij}(\bar{\bm{c}})\Delta(c_i-\bar{c}_i)\na\bar{c}_i\cdot\na\bar{\mu}_j dxdt.
\end{align*}
This inequality is just a reformulation of \eqref{4.aux0}, which leads,
by proceeding as in \eqref{4.aux} and the subsequent calculations, to \eqref{4.relE}.
Next, we verify the relative entropy inequality. Taking the test function
$\phi_i=(\log\bar{c}_i)\theta_\eps (t)$ in \eqref{4.start}, passing to the limit
$\eps\to 0$, and summing over $i=1,\ldots,n$ leads to
\begin{align*}
\sum_{i=1}^n&\int_\Omega(c_i-\bar{c}_i)\log\bar{c}_i dx\bigg|_0^T
= \sum_{i=1}^n\int_0^T \int_\Omega(c_i-\bar{c}_i)\pa_t(\log\bar{c}_i)dxdt \\
&\phantom{xx}{} - \sum_{j=1}^n\int_0^\infty\int_\Omega\big(B_{ij}(\bm{c})\na\log c_j
- B_{ij}(\bar{\bm{c}})\na\log\bar{c}_j\big)\cdot\na \log \bar c_i dxdt \nonumber \\
&\phantom{xx}{}- \sum_{j=1}^n\int_0^\infty\int_\Omega
\Big(\operatorname{div} \big (B_{ij}(\bm{c})\na \log \bar c_i \big) \Delta c_j
- \operatorname{div} \big ( B_{ij}(\bar{c})\na \log \bar c_i \big )
\Delta\bar{c}_j\Big)dxdt. \nonumber
\end{align*}
This yields, together with \eqref{1.dHdt}, \eqref{1.dHdtbar}, an integration by
parts, and regularity assumption \eqref{1.cregul}, that
\begin{align*}
{\mathcal H}(&\bm{c}(T)|\bar{\bm{c}}(T)) - {\mathcal H}(\bm{c}^0|\bar{\bm{c}}^0) \\
&= \big({\mathcal H}(\bm{c}(T))-{\mathcal H}(\bm{c}^0)\big) - \big({\mathcal H}(\bar{\bm{c}}(T))-{\mathcal H}(\bar{\bm{c}}^0)
\big) - \int_\Omega(\bm{c}-\bar{\bm{c}})\cdot \log\bar{\bm{c}}\,dx\bigg|_0^T
\\
&\le -\sum_{i,j=1}^n\int_0^T\int_\Omega \Big ( B_{ij}(\bm{c}) \na \log c_i
\cdot \na \mu_j - B_{i j}(\bar{\bm{c}}) \na \log \bar c_i \cdot
\nabla \bar \mu_j \Big ) dx dt \\
&\phantom{xx}{} - \sum_{i=1}^n\int_0^T \int_\Omega(c_i-\bar{c}_i)
\pa_t(\log\bar{c}_i)dxdt \\
&\phantom{xx}{} + \sum_{i,j=1}^n\int_0^\infty\int_\Omega
\Big ( B_{ij}(\bm{c}) \na \mu_j \cdot \na \log \bar c_i
- B_{ij}(\bar{\bm{c}}) \na \bar \mu_j \cdot \na \log \bar c_i \big )
\Big)dxdt. \nonumber \\
&= -\sum_{i,j=1}^n\int_0^T\int_\Omega
\Big ( B_{ij}(\bm{c}) \na\mu_j \cdot \nabla \bigg ( \log \frac{c_i}{\bar c_i} \bigg )
- \nabla \bigg ( \frac{c_i}{\bar c_i} - 1 \bigg ) \cdot B_{i j}(\bar{\bm{c}})
\nabla \bar \mu_j \Big ) dx dt,
\end{align*}
which readily gives \eqref{4.relH}.
\end{proof}
\subsection{Proof of the weak-strong uniqueness property}\label{sec.wsu.rig}
We proceed with the proof of Theorem \ref{thm.wsu}. First, we estimate the
relative entropy inequality \eqref{4.relH} and then the relative energy
inequality \eqref{4.relE}. A combination of both estimates shows \eqref{1.comb},
which proves the weak-strong uniqueness property.
{\em Step 1: Estimating the relative entropy.} As in the proof of Lemma \ref{lem.fei},
we decompose the matrix $B(\bm{c})$ by setting $M(\bm{c}):=B(\bm{c})-\lambda G(\bm{c})$
such that $B(\bm{c}) = M(\bm{c}) + \lambda G(\bm{c})$, where
$G(\bm{c})=R(\bm{c})P_L(\bm{c})R(\bm{c})$ has the entries
$G_{ij}(\bm{c}) = c_i\delta_{ij}-c_ic_j$ and $0<\lambda<\lambda_m$.
In terms of these matrices, we can formulate \eqref{4.relH} as
\begin{align}\label{4.HGM}
{\mathcal H}(&\bm{c}(T)|\bar{\bm{c}}(T)) - {\mathcal H}(\bm{c}^0|\bar{\bm{c}}^0)
\le -\sum_{i,j=1}^n\int_0^T\int_\Omega M_{ij}(\bm{c})\na\log\frac{c_i}{\bar{c}_i}\cdot
\na(\mu_j-\bar{\mu}_j)dxdt \\
&{}- \lambda\sum_{i,j=1}^n\int_0^T\int_\Omega G_{ij}(\bm{c})
\na\log\frac{c_i}{\bar{c}_i}\cdot\na(\mu_j-\bar{\mu}_j)dxdt \nonumber \\
&{}- \sum_{i,j=1}^n\int_0^T\int_\Omega\bigg(B_{ij}(\bm{c})
- \frac{c_i}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\bigg)
\na\log\frac{c_i}{\bar{c}_i}\cdot\na\bar{\mu}_j dxdt
=: I_{10} + I_{11} + I_{12}. \nonumber
\end{align}
{\em Step 1a: Estimate of $I_{10}$.}
We know from \eqref{psd.zMz} and \eqref{3.zMz} that $M(\bm{c})$
is positive semidefinite and satisfies
$\bm{z}^T M(\bm{c})\bm{z}\le (\lambda_M-\lambda)|P_L(\bm{c})R(\bm{c})\bm{z}|^2$ for all
$\bm{z}\in{\mathbb R}^n$. Therefore, using Young's inequality with $\theta>0$,
\begin{align}\label{4.I10}
I_{10} &\le \frac{\theta}{4}\sum_{i,j=1}^n\int_0^T\int_\Omega M_{ij}(\bm{c})
\na\log\frac{c_i}{\bar{c}_i}\cdot\na\log\frac{c_j}{\bar{c}_j} dxdt \\
&\phantom{xx}{}+ \frac{1}{\theta}\sum_{i,j=1}^n\int_0^T\int_\Omega M_{ij}(\bm{c})
\na(\mu_i-\bar{\mu}_i)\cdot \na(\mu_j-\bar{\mu}_j)dxdt \nonumber \\
&\le \frac{\theta}{4}(\lambda_M-\lambda)\sum_{i=1}^n\int_0^T\int_\Omega
\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\log\frac{c_i}{\bar{c}_i}\bigg|^2
dxdt \nonumber \\
&\phantom{xx}{}+ \frac{1}{\theta}(\lambda_M-\lambda)\sum_{i=1}^n\int_0^T\int_\Omega
\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na(\mu_j-\bar{\mu}_j)\bigg|^2 dxdt.
\nonumber
\end{align}
{\em Step 1b: Estimate of $I_{11}$.}
In the term $I_{11}$, we replace $\mu_j-\bar{\mu}_j$ by $\log(c_j/\bar{c}_j)
-\Delta(c_j-\bar{c}_j)$ and compute both terms in the difference separately.
The definition $G_{ij}(\bm{c})=\sqrt{c_i}P_L(\bm{c})_{ij}\sqrt{c_j}$ and
the property $P_L(\bm{c})^2=P_L(\bm{c})$ lead to
\begin{align}\label{4.G1}
\sum_{i,j=1}^n&\int_0^T\int_\Omega G_{ij}(\bm{c})\na\log\frac{c_i}{\bar{c}_i}
\cdot\na\log\frac{c_j}{\bar{c}_j}dxdt \\
&= \sum_{i,j=1}^n\int_0^T\int_\Omega\sqrt{c_i} P_L(\bm{c})_{ij}\sqrt{c_j}
\na\log\frac{c_i}{\bar{c}_i}\cdot\na\log\frac{c_j}{\bar{c}_j}dxdt \nonumber \\
&= \sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}
\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dxdt. \nonumber
\end{align}
Furthermore, we use $G_{ij}(\bm{c}) = c_i\delta_{ij}-c_ic_j$ and integration by
parts to find that
\begin{align*}
\sum_{i,j=1}^n&\int_0^T\int_\Omega
G_{ij}(\bm{c})\na\log\frac{c_i}{\bar{c}_i}\cdot\na\Delta(c_j-\bar{c}_j)dx dt \\
&= -\sum_{i,j=1}^n\int_0^T\int_\Omega\operatorname{div}\bigg(( c_i\delta_{ij}-c_ic_j)
\na\log\frac{c_i}{\bar{c}_i}\bigg)\Delta(c_j-\bar{c}_j)dxdt \\
&= -\sum_{i=1}^n\int_0^T\int_\Omega\operatorname{div}(\na c_i-c_i\na\log\bar{c}_i)
\Delta(c_i-\bar{c}_i)dxdt \\
&\phantom{xx}{}+ \sum_{i,j=1}^n\int_0^T\int_\Omega\operatorname{div}(c_j\na c_i-c_ic_j
\na\log\bar{c}_i)\Delta(c_j-\bar{c}_j)dxdt \\
&= -\sum_{i,j=1}^n\int_0^T\int_\Omega\operatorname{div}(\na c_i-c_i\na\log\bar{c}_i)
\Delta(c_i-\bar{c}_i)dxdt \\
&\phantom{xx}{}- \sum_{i,j=1}^n\int_0^T\int_\Omega\operatorname{div}(c_ic_j
\na\log\bar{c}_i)\Delta(c_j-\bar{c}_j)dxdt,
\end{align*}
where we used $\sum_{i=1}^n c_j\na c_i=0$ in the last step.
We mention that $\sum_{j=1}^n G_{ij}(\bm{c})\na\Delta c_j\in L^2(Q_T)$
because of \eqref{1.regc}, so the first integral in the previous computation
is well defined. It follows from $\Delta c_i\Delta(c_i-\bar{c}_i)
= (\Delta(c_i-\bar{c}_i))^2 + \Delta\bar{c}_i\Delta(c_i-\bar{c}_i)$ that
\begin{align}\label{4.G2}
\sum_{i,j=1}^n&\int_0^T\int_\Omega G_{ij}(\bm{c})\na\log\frac{c_i}{\bar{c}_i}
\cdot\na\Delta(c_i-\bar{c}_i) dxdt
= -\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2 dxdt \\
&\phantom{xx}{}
- \sum_{i=1}^n\int_0^T\int_\Omega\operatorname{div}(\na\bar{c}_i-c_i\na\log\bar{c}_i)
\Delta(c_i-\bar{c}_i)dxdt \nonumber \\
&\phantom{xx}{}- \sum_{i,j=1}^n\int_0^T\int_\Omega\operatorname{div}(c_ic_j\na\log\bar{c}_i)
\Delta(c_j-\bar{c}_j)dxdt. \nonumber
\end{align}
We multiply \eqref{4.G1} by $-\lambda$ and \eqref{4.G2} by $\lambda$ and
sum both expressions to find that
\begin{align}\label{4.I11}
I_{11} &= -\lambda\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}
\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dxdt
- \lambda\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2 dxdt \\
&\phantom{xx}{}- \lambda\sum_{i=1}^n\int_0^T\int_\Omega
\operatorname{div}(\na\bar{c}_i-c_i\na\log\bar{c}_i)\Delta(c_i-\bar{c}_i)dxdt \nonumber \\
&\phantom{xx}{}- \lambda\sum_{i,j=1}^n\int_0^T\int_\Omega
\operatorname{div}(c_ic_j\na\log\bar{c}_i)\Delta(c_j-\bar{c}_j)dxdt. \nonumber
\end{align}
We apply Young's inequality to the last two terms. The third term in \eqref{4.I11}
becomes
\begin{align*}
-&\lambda\sum_{i=1}^n\int_0^T\int_\Omega
\operatorname{div}(\na\bar{c}_i-c_i\na\log\bar{c}_i)\Delta(c_i-\bar{c}_i)dxdt \\
&\le \frac{\lambda}{4}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2 dxdt
+ \lambda\sum_{i=1}^n\int_0^T\int_\Omega|\operatorname{div}((c_i-\bar{c}_i)\na\log\bar{c}_i)|^2
dxdt \\
&\le \frac{\lambda}{4}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2
dxdt \\
&\phantom{xx}{}+ \lambda\sum_{i=1}^n\|\na\log\bar{c}_i\|_{L^\infty(Q_T)}
\int_0^T\int_\Omega|\na(c_i-\bar{c}_i)|^2 dxdt \\
&\phantom{xx}{}+ \lambda\sum_{i=1}^n
\|\Delta\log\bar{c}_i\|_{L^\infty(Q_T)}
\int_0^T\int_\Omega(c_i-\bar{c}_i)^2 dxdt \\
&\le \frac{\lambda}{4}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2 dxdt
\\
&\phantom{xx}{}+ \lambda C\sum_{i=1}^n\int_0^T\int_\Omega
\big((c_i-\bar{c}_i)^2+|\na(c_i-\bar{c}_i)|^2\big)dxdt,
\end{align*}
where the constant $C>0$ depends on the $L^\infty$ norms of $\na\log\bar{\bm{c}}$
and $\Delta\log\bar{\bm{c}}$. Next, for the fourth term in \eqref{4.I11},
\begin{align*}
-\lambda&\sum_{i,j=1}^n\int_0^T\int_\Omega
\operatorname{div}(c_ic_j\na\log\bar{c}_i)\Delta(c_j-\bar{c}_j)dxdt \\
&\le \frac{\lambda}{4}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2 dxdt
+ \lambda\sum_{j=1}^n\int_0^T\int_\Omega\bigg|\sum_{i=1}^n\operatorname{div}(c_ic_j
\na\log\bar{c}_i)\bigg|^2 dxdt.
\end{align*}
We estimate the integrand of the last term, taking into account that
$\na\sum_{i=1}^n\bar{c}_i\na\log\bar{c}_i=\sum_{i=1}^n\na\bar{c}_i=0$:
\begin{align*}
\sum_{i=1}^n&\operatorname{div}(c_ic_j\na\log\bar{c}_i) = \sum_{i=1}^n\operatorname{div}
\big((c_i-\bar{c}_i)c_j\na\log\bar{c}_i\big) \\
&= \sum_{i=1}^n c_j\operatorname{div}((c_i-\bar{c}_i)\na\log\bar{c}_i)
+ \sum_{i=1}^n(c_i-\bar{c}_i)\na\log\bar{c}_i\cdot\na c_j \\
&= \sum_{i=1}^n c_j\operatorname{div}((c_i-\bar{c}_i)\na\log\bar{c}_i)
+ \sum_{i=1}^n c_i\na\log\bar{c}_i\cdot\na (c_j-\bar{c}_j)
+ \sum_{i=1}^n(c_i-\bar{c}_i)\na\log\bar{c}_i\cdot\na\bar{c}_j
\\
&\le C\sum_{i=1}^n\big(|c_i-\bar{c}_i| + |\na(c_i-\bar{c}_i)|\big),
\end{align*}
where $C>0$ depends on the $L^\infty$ norms of $\na\log\bar{\bm{c}}$ and
$\Delta\log\bar{\bm{c}}$. This yields
\begin{align*}
-\lambda&\sum_{i,j=1}^n\int_0^T\int_\Omega
\operatorname{div}(c_ic_j\na\log\bar{c}_i)\Delta(c_j-\bar{c}_j)dxdt \\
&\le \frac{\lambda}{4}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2 dxdt
+ \lambda C\sum_{i=1}^n\int_0^T\int_\Omega
\big((c_i-\bar{c}_i)^2 + |\na(c_i-\bar{c}_i)|^2\big)dxdt.
\end{align*}
Using these estimates in \eqref{4.I11}, we arrive at
\begin{align}\label{4.I11final}
I_{11} &\le -\lambda\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n
P_L(\bm{c})_{ij}\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dxdt
- \frac{\lambda}{2}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2 dxdt \\
&\phantom{xx}{}+ \lambda C\sum_{i=1}^n\int_0^T\int_\Omega
\big((c_i-\bar{c}_i)^2 + |\na(c_i-\bar{c}_i)|^2\big)dxdt. \nonumber
\end{align}
{\em Step 1c: Estimate of $I_{12}$.}
By definition of $B_{ij}(\bm{c})$ and Young's inequality with $\theta'>0$,
\begin{align*}
I_{12} &= -\sum_{i,j=1}^n\int_0^T\int_\Omega\sqrt{c_i}\bigg(
D_{ij}^{BD}(\bm{c})\sqrt{c_j} - \sqrt{\frac{c_i}{\bar{c}_i}}D_{ij}^{BD}(\bar{\bm{c}})
\sqrt{\bar{c}_j}\bigg)\na\log\frac{c_i}{\bar{c}_i}\cdot\na\bar{\mu}_j dxdt \\
&\le \frac{\theta'}{4}\sum_{i=1}^n\int_0^T\int_\Omega c_i\bigg|\na\log
\frac{c_i}{\bar{c}_i}\bigg|^2 dxdt \\
&\phantom{xx}{}+ \frac{n}{\theta'}\sum_{i,j=1}^n\int_0^T\int_\Omega\bigg(
D_{ij}^{BD}(\bm{c})\sqrt{c_j} - \sqrt{\frac{c_i}{\bar{c}_i}}
D_{ij}^{BD}(\bar{\bm{c}})\sqrt{\bar{c}_j}\bigg)^2|\na\bar{\mu}_j|^2 dxdt.
\end{align*}
The bracket of the second term can be estimated according to
\begin{align}
\bigg|D_{ij}^{BD}&(\bm{c})\sqrt{c_j} - \sqrt{\frac{c_i}{\bar{c}_i}}
D_{ij}^{BD}(\bar{\bm{c}})\sqrt{\bar{c}_j}\bigg| \label{4.estD} \\
&= \bigg|D_{ij}^{BD}(\bm{c})\sqrt{c_j} - D_{ij}^{BD}(\bar{\bm{c}})\sqrt{\bar{c}_j}
- \frac{\sqrt{c_i}-\sqrt{\bar{c}_i}}{\sqrt{\bar{c}_i}}D_{ij}^{BD}(\bar{\bm{c}})
\sqrt{\bar{c}_j}\bigg| \nonumber \\
&\le \frac{C}{\sqrt{m}}\sum_{i=1}^n\big(|c_i-\bar{c}_i|
+ |\sqrt{c_i}-\sqrt{\bar{c}_i}|\big)
\le C(m)\sum_{i=1}^n|c_i-\bar{c}_i|, \nonumber
\end{align}
using the assumption $\bar{c}_i\ge m>0$ and the boundedness of $D_{ij}^{BD}$
(see Lemma \ref{lem.DB} (i)). It follows that
\begin{equation}
I_{12} \le \frac{\theta'}{4}\sum_{i=1}^n\int_0^T\int_\Omega c_i\bigg|\na\log
\frac{c_i}{\bar{c}_i}\bigg|^2 dxdt + C(m,\theta')\sum_{i=1}^n
\int_0^T\int_\Omega(c_i-\bar{c}_i)^2 dxdt. \label{4.I12}
\end{equation}
{\em Step 1d: Combining the estimates.}
We deduce from \eqref{4.HGM}, after inserting estimates \eqref{4.I10},
\eqref{4.I11final}, and \eqref{4.I12} for $I_{10}$, $I_{11}$, and $I_{12}$,
respectively, that
\begin{align}
{\mathcal H}(&\bm{c}(T)|\bar{\bm{c}}(T)) \le {\mathcal H}(\bm{c}^0|\bar{\bm{c}}^0) \nonumber \\
&{}+ \bigg(\frac{\theta}{4}(\lambda_M-\lambda) - \lambda\bigg)\sum_{i=1}^n\int_0^T
\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}
\bigg|^2 dxdt \label{4.I012} \\
&{}+ \frac{\lambda_M-\lambda}{\theta}\sum_{i=1}^n\int_0^T\int_\Omega\bigg|
\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na(\mu_j-\bar{\mu}_j)\bigg|^2 dxdt
\nonumber \\
&{}- \frac{\lambda}{2}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2dxdt
+ \lambda C\sum_{i=1}^n\int_0^T\int_\Omega\big((c_i-\bar{c}_i)^2
+ |\na(c_i-\bar{c}_i)|^2\big)dxdt \nonumber \\
&{}+ \frac{\theta'}{4}\sum_{i=1}^n\int_0^T\int_\Omega c_i\bigg|\na\log
\frac{c_i}{\bar{c}_i}\bigg|^2 dxdt + C(m,\theta')\sum_{i=1}^n
\int_0^T\int_\Omega(c_i-\bar{c}_i)^2 dxdt. \nonumber
\end{align}
The last but one term on the right-hand side still needs to be estimated.
To this end, we decompose $I=P_L(\bm{c})+P_{L^\perp}(\bm{c})$:
$$
\sum_{i=1}^n c_i\bigg|\na\log\frac{c_i}{\bar{c}_i}\bigg|^2
= \sum_{i=1}^n\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}
\na\log\frac{c_j}{\bar{c}_j}\bigg|^2
+ \sum_{i=1}^n\bigg|\sum_{j=1}^n P_{L^\perp}(\bm{c})_{ij}\sqrt{c_j}
\na\log\frac{c_j}{\bar{c}_j}\bigg|^2.
$$
The first term on the right-hand side can be absorbed for sufficiently
small $\theta'>0$ by the second term of the left-hand side of \eqref{4.I012}.
For the other term, we use the definition $P_{L^\perp}(\bm{c})_{ij}=\sqrt{c_ic_j}$
and $\sum_{j=1}^n\na c_j=\sum_{j=1}^n\na\bar{c}_j=0$:
$$
\sum_{j=1}^n P_{L^\perp}(\bm{c})_{ij}\sqrt{c_j}
\na\log\frac{c_j}{\bar{c}_j} = \sqrt{c_i}\sum_{j=1}^n c_j
\na\log\frac{c_j}{\bar{c}_j}
= \sqrt{c_i}\sum_{j=1}^n (c_j-\bar{c}_j)\na\log\bar{c}_j.
$$
This gives
\begin{align}\label{4.clogcc}
\sum_{i=1}^n\int_0^T\int_\Omega c_i\bigg|\na\log
\frac{c_i}{\bar{c}_i}\bigg|^2 dxdt
&\le \sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}
\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dxdt \\
&\phantom{xx}{}+ \sum_{j=1}^n\|\na\log\bar{c}_j\|_{L^\infty(Q_T)}
\int_0^T\int_\Omega(c_i-\bar{c}_i)^2 dxdt. \nonumber
\end{align}
Hence, choosing $\theta=\lambda/(\lambda_M-\lambda)$ and $\theta'=\lambda$,
we conclude from \eqref{4.I012} that
\begin{align}\label{4.Hfinal}
{\mathcal H}(&\bm{c}(T)|\bar{\bm{c}}(T)) + \frac{\lambda}{2}\sum_{i=1}^n\int_0^T
\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}
\bigg|^2 dxdt \\
&\phantom{xx}{}+ \frac{\lambda}{2}\sum_{i=1}^n\int_0^T\int_\Omega
(\Delta(c_i-\bar{c}_i))^2dxdt \nonumber \\
& \le {\mathcal H}(\bm{c}^0|\bar{\bm{c}}^0) + \frac{(\lambda_M-\lambda)^2}{\lambda}
\sum_{i=1}^n\int_0^T\int_\Omega\bigg|
\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}\na(\mu_j-\bar{\mu}_j)\bigg|^2 dxdt
\nonumber \\
&\phantom{xx}{}+ C\sum_{i=1}^n\int_0^T\int_\Omega\big((c_i-\bar{c}_i)^2
+ |\na(c_i-\bar{c}_i)|^2\big)dxdt. \nonumber
\end{align}
We show in the next step that the second term on the right-hand side can be
estimated by the relative energy inequality.
{\em Step 2: Estimating the relative energy.} We start from the relative energy
inequality \eqref{4.relE}. Observing that due to Lemma \ref{lem.DB} (ii),
\begin{align*}
\sum_{i,j=1}^n B_{ij}(\bm{c})\na(\mu_i-\bar{\mu}_i)\cdot\na(\mu_j-\bar{\mu}_j)
&= \sum_{i,j=1}^n D_{ij}^{BD}(\bm{c})\big(\sqrt{c_i}\na(\mu_i-\bar{\mu}_i)\big)
\cdot\big(\sqrt{c_j}\na(\mu_j-\bar{\mu}_j)\big) \\
&\ge \lambda_m\sum_{i=1}^n\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}
\na(\mu_j-\bar{\mu}_j)\bigg|^2,
\end{align*}
inequality \eqref{4.relE} becomes
\begin{align}\label{4.aux2}
& {\mathcal E}(\bm{c}(T)|\bar{\bm{c}}(T))
+ \lambda_m\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}
\na(\mu_j-\bar{\mu}_j)\bigg|^2dxdt \\
&\phantom{xx}{}\le {\mathcal E}(\bm{c}^0|\bar{\bm{c}}^0) + I_{13} + I_{14} + I_{15} + I_{16},
\qquad\mbox{where} \nonumber \\
& I_{13} = -\sum_{i,j=1}^n\int_0^T\int_\Omega\bigg(B_{ij}(\bm{c})
- \frac{c_i}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\bigg)\na(\mu_i-\bar{\mu}_i)\cdot
\na\bar{\mu}_j dxdt, \nonumber \\
& I_{14} = \sum_{i,j=1}^n\int_0^T\int_\Omega(c_i-\bar{c}_i)\na(c_i-\bar{c}_i)
\cdot\na\operatorname{div}\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dxdt,
\nonumber \\
& I_{15} = \frac12\sum_{i,j=1}^n\int_0^T\int_\Omega|\na(c_i-\bar{c}_i)|^2
\operatorname{div}\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dxdt,
\nonumber \\
& I_{16} = \sum_{i,j=1}^n\int_0^T\int_\Omega
\na(c_i-\bar{c}_i)\otimes\na(c_i-\bar{c}_i)
:\na\bigg(\frac{1}{\bar{c}_i}B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j\bigg)dxdt.
\nonumber
\end{align}
The terms $I_{14}$, $I_{15}$, and $I_{16}$ can be estimated directly by
using the regularity assumption
$\na\operatorname{div}((1/\bar{c}_i)B_{ij}(\bar{\bm{c}})\na\bar{\mu}_j)\in L^\infty(Q_T)$:
\begin{equation}\label{4.I1456}
I_{14}+I_{15}+I_{16} \le C\sum_{i=1}^n\int_0^T\int_\Omega\big(
(c_i-\bar{c}_i)^2 + |\na(c_i-\bar{c}_i)|^2\big)dxdt.
\end{equation}
The estimate for $I_{13}$ is more involved. First, we use the definition of
$B(\bm{c})$ and decompose $I=P_L(\bm{c})+P_{L^\perp}(\bm{c})$. Then
\begin{align*}
& I_{13} = \sum_{i,j=1}^n\int_0^T\int_\Omega\sqrt{c_i} E_{ij}(\bm{c},\bar{\bm{c}})
\na(\mu_i-\bar{\mu}_i)\cdot\na\bar{\mu}_j dxdt
=: I_{131} + I_{132}, \quad\mbox{where} \\
& E_{ij}(\bm{c},\bar{\bm{c}}) = D_{ij}^{BD}(\bm{c})\sqrt{c_j}
- \sqrt{\frac{c_i}{\bar{c}_i}}D_{ij}^{BD}(\bar{\bm{c}})\sqrt{\bar{c}_j}, \\
& I_{131} = \sum_{i,j,k,\ell=1}^n\int_0^T\int_\Omega
P_L(\bm{c})_{i\ell}E_{\ell j}(\bm{c},\bar{\bm{c}})
P_L(\bm{c})_{ik}\sqrt{c_k}\na(\mu_k-\bar{\mu}_k)\cdot\na\bar{\mu}_j dxdt, \\
& I_{132} = \sum_{i,j,k,\ell=1}^n\int_0^T\int_\Omega
P_{L^\perp}(\bm{c})_{i\ell}E_{\ell j}(\bm{c},\bar{\bm{c}})
P_{L^\perp}(\bm{c})_{ik}\sqrt{c_k}\na(\mu_k-\bar{\mu}_k)\cdot
\na\bar{\mu}_j dxdt.
\end{align*}
For $I_{131}$, it is sufficient to apply Young's inequality and to use
estimate \eqref{4.estD} for $E_{ij}(\bm{c},\bar{\bm{c}})$:
\begin{align}\label{4.I131}
I_{131} &\le \frac{\lambda_m}{2}\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n
P_L(\bm{c})_{ij}\sqrt{c_j}\na(\mu_j-\bar{\mu}_j)\bigg|^2 dxdt \\
&\phantom{xx}{}+ \frac{n}{2\lambda_m}\sum_{i,j=1}^n\int_0^T\int_\Omega
|E_{ij}(\bm{c},\bar{\bm{c}})|^2|\na\bar{\mu}_j|^2 dxdt \nonumber \\
&\le \frac{\lambda_m}{2}\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n
P_L(\bm{c})_{ij}\sqrt{c_j}\na(\mu_j-\bar{\mu}_j)\bigg|^2 dxdt \nonumber \\
&\phantom{xx}{}+ C(m)\sum_{i=1}^n\int_0^T\int_\Omega(c_i-\bar{c}_i)^2 dxdt,
\nonumber
\end{align}
where $C(m)>0$ depends on $m$, $n$, $\lambda_m$, and the $L^\infty(Q_T)$ norm
of $\na\bar{\bm{\mu}}$.
For $I_{132}$, we observe that the property $\operatorname{ran} D^{BD}(\bm{c})=L(\bm{c})$,
which follows from Lemma \ref{lem.Dz}, implies that
$P_{L^\perp}(\bm{c})D^{BD}(\bm{c})\bm{z}=\bm{0}$ for all $\bm{z}\in{\mathbb R}^n$.
Hence,
$$
\sum_{\ell=1}^n P_{L^\perp}(\bm{c})_{i\ell}E_{\ell j}(\bm{c},\bar{\bm{c}})
= -\sum_{\ell=1}^n P_{L^\perp}(\bm{c})_{i\ell}\sqrt{\frac{c_\ell}{\bar{c}_\ell}}
D_{\ell j}^{BD}(\bar{\bm{c}})\sqrt{\bar{c}_j}.
$$
We infer from the definitions
$P_{L^\perp}(\bm{c})_{ik}=\sqrt{c_ic_k}$ and
$\mu_k-\bar{\mu}_k=\log(c_k/\bar{c}_k)-\Delta(c_k-\bar{c}_k)$ that
\begin{align}\label{4.I132}
I_{132} &= -\sum_{i,j,k,\ell=1}^n\int_0^T\int_\Omega P_{L^\perp}(\bm{c})_{ik}
\sqrt{c_k}P_{L^\perp}(\bm{c})_{i\ell}\sqrt{\frac{c_\ell}{\bar{c}_\ell}}
D_{\ell j}^{BD}(\bar{\bm{c}})\sqrt{\bar{c}_j}\na(\mu_k-\bar{\mu}_k)
\cdot\na\bar{\mu}_j dxdt \\
&= -\sum_{j,k,\ell=1}^n\int_0^T\int_\Omega\sum_{i=1}^n c_ic_k
\frac{c_\ell}{\sqrt{\bar{c}_\ell}}D_{\ell j}^{BD}(\bar{\bm{c}})\sqrt{\bar{c}_j}
\na(\mu_k-\bar{\mu}_k)\cdot\na\bar{\mu}_j dxdt \nonumber \\
&= -\sum_{j,k,\ell=1}^n\int_0^T\int_\Omega c_k
\frac{c_\ell-\bar{c}_\ell}{\sqrt{\bar{c}_\ell}}D_{\ell j}^{BD}(\bar{\bm{c}})
\sqrt{\bar{c}_j}\na\log\frac{c_k}{\bar{c}_k}\cdot\na\bar{\mu}_j dxdt \nonumber \\
&\phantom{xx}{}- \sum_{j,k,\ell=1}^n\int_0^T\int_\Omega \operatorname{div}\bigg(c_k
\frac{c_\ell-\bar{c}_\ell}{\sqrt{\bar{c}_\ell}}D_{\ell j}^{BD}(\bar{\bm{c}})
\sqrt{\bar{c}_j}\na\bar{\mu}_j\bigg)\Delta(c_k-\bar{c}_k) dxdt \nonumber \\
&=: J_1 + J_2, \nonumber
\end{align}
where we added the expression $-\sum_{\ell=1}^n\sqrt{\bar{c}_\ell}
D_{\ell j}^{BD}(\bar{\bm{c}}) = 0$, which follows from
$\ker D^{BD}(\bar{\bm{c}})=L^\perp(\bar{\bm{c}})=\operatorname{span}
\{\sqrt{\bar{\bm{c}}}\}$ (see Lemma \ref{lem.DB}) and the symmetry of
$D^{BD}(\bar{\bm{c}})$ (see Lemma \ref{lem.Dz}), and we integrated by
parts in the last integral.
To estimate $J_1$, we use Young's inequality with $\theta>0$,
Lemma \ref{lem.DB} (iii), and \eqref{4.clogcc}:
\begin{align*}
J_1 &\le \frac{\theta}{4}\sum_{k=1}^n\int_0^T\int_\Omega c_k\bigg|
\na\log\frac{c_k}{\bar{c}_k}\bigg|^2 dxdt \\
&\phantom{xx}{}+ \frac{n}{\theta}\sum_{j,k,\ell=1}^n\int_0^T\int_\Omega
(c_\ell-\bar{c}_\ell)^2\frac{c_k}{\bar{c}_\ell}D_{\ell j}^{BD}(\bar{\bm{c}})^2
\bar{c}_j|\na\bar{\mu}_j|^2 dxdt \\
&\le \frac{\theta}{4}\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n
P_L(\bm{c})_{ij}\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dxdt
+ C\theta\sum_{i=1}^n\int_0^T\int_\Omega(c_i-\bar{c}_i)^2 dxdt \\
&\phantom{xx}{}+ \frac{C}{\theta}\sum_{\ell=1}^n\int_0^T\int_\Omega
(c_\ell-\bar{c}_\ell)^2 dxdt,
\end{align*}
where $C>0$ depends on the $L^\infty(Q_T)$ norms of $\na\bar{\bm{c}}$ and
$\na\bar{\bm{\mu}}$.
Next, we use again Young's inequality with
$\theta'>0$:
\begin{equation}\label{4.J2}
J_2 \le \frac{\theta'}{4}\sum_{k=1}^n\int_\Omega(\Delta(c_k-\bar{c}_k))^2dxdt
+ \frac{n}{\theta'}\sum_{k,\ell=1}^n\int_0^T\int_\Omega
\big|\operatorname{div}\big(c_k(c_\ell-\bar{c}_\ell)Q_\ell(\bar{\bm{c}})\big)\big|^2 dxdt,
\nonumber
\end{equation}
where we defined
$$
Q_\ell(\bar{\bm{c}}) := \sum_{j=1}^n\frac{1}{\sqrt{\bar{c}_\ell}}
D_{\ell j}^{BD}(\bar{\bm{c}})\sqrt{\bar{c}_j}\na\bar{\mu}_j.
$$
Estimating
\begin{align*}
\big|\operatorname{div}\big(c_k(c_\ell-\bar{c}_\ell)Q_\ell(\bar{\bm{c}})\big)\big|
&= \big|c_k(c_\ell-\bar{c}_\ell)\operatorname{div} Q_\ell(\bar{\bm{c}})
+ c_k\na(c_\ell-\bar{c}_\ell)\cdot Q_\ell(\bar{\bm{c}}) \\
&\phantom{xx}{}+ (c_\ell-\bar{c}_\ell)\na(c_k-\bar{c}_k)\cdot Q_\ell(\bar{\bm{c}})
+ (c_\ell-\bar{c}_\ell)\na\bar{c}_k\cdot Q_\ell(\bar{\bm{c}})\big| \\
&\le C\big(|c_\ell-\bar{c}_\ell| + |\na(c_\ell-\bar{c}_\ell)|
+ |\na(c_k-\bar{c}_k)|\big),
\end{align*}
where $C>0$ depends on the $L^\infty(Q_T)$ norm of $Q_\ell(\bar{\bm{c}})$,
we deduce from \eqref{4.J2} that
$$
J_2 \le \frac{\theta'}{4}\sum_{k=1}^n\int_\Omega(\Delta(c_k-\bar{c}_k))^2dxdt
+ \frac{C}{\theta'}\sum_{i=1}^n\int_0^T\int_\Omega\big((c_i-\bar{c}_i)^2
+ |\na(c_i-\bar{c}_i)|^2\big)dxdt.
$$
Inserting the estimates for $J_1$ and $J_2$ into \eqref{4.I132} leads to
\begin{align*}
I_{132} &\le \frac{\theta}{4}\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n
P_L(\bm{c})_{ij}\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dxdt
+ \frac{\theta'}{4}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2dxdt \\
&\phantom{cc}{}+ C(\theta,\theta')\sum_{i=1}^n\int_0^T\int_\Omega
\big((c_i-\bar{c}_i)^2 + |\na(c_i-\bar{c}_i)|^2\big)dxdt.
\end{align*}
Then, together with \eqref{4.I131}, we find that
\begin{align}
I_{13} &\le \frac{\lambda_m}{2}\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n
P_L(\bm{c})_{ij}\sqrt{c_j}\na(\mu_j-\bar{\mu}_j)\bigg|^2 dxdt \nonumber \\
&\phantom{xx}{}+ \frac{\theta}{4}\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n
P_L(\bm{c})_{ij}\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dxdt
+ \frac{\theta'}{4}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2dxdt
\label{4.I13} \\
&\phantom{cc}{}+ C(\theta,\theta')\sum_{i=1}^n\int_0^T\int_\Omega
\big((c_i-\bar{c}_i)^2 + |\na(c_i-\bar{c}_i)|^2\big)dxdt. \nonumber
\end{align}
Finally, we insert this estimate and estimate \eqref{4.I1456} for
$I_{14}$, $I_{15}$, and $I_{16}$ into \eqref{4.aux2}, observing that the
first term on the right-hand side of \eqref{4.I13}
is absorbed by the second term on the left-hand side of \eqref{4.aux2}:
\begin{align}\label{4.Efinal}
{\mathcal E}(&\bm{c}(T)|\bar{\bm{c}}(T)) + \frac{\lambda_m}{2}\sum_{i=1}^n
\int_0^T\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}
\na(\mu_j-\bar{\mu}_j)\bigg|^2dxdt \\
&\le {\mathcal E}(\bm{c}^0 |\bar{\bm{c}}^0)
+ \frac{\theta}{4}\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n
P_L(\bm{c})_{ij}\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dxdt \nonumber \\
&\phantom{xx}{}
+ \frac{\theta'}{4}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2dxdt
\nonumber \\
&\phantom{xx}{}+ C(\theta,\theta')\sum_{i=1}^n\int_0^T\int_\Omega
\big((c_i-\bar{c}_i)^2 + |\na(c_i-\bar{c}_i)|^2\big)dxdt. \nonumber
\end{align}
{\em Step 3: Combining the relative energy and relative entropy inequalities.}
Next, multiply \eqref{4.Efinal} by
$4(\lambda_M-\lambda)^2/(\lambda_m\lambda)$, choose
$\theta'=\lambda_m\lambda^2/(4(\lambda_M-\lambda)^2)$, and add this expression to
\eqref{4.Hfinal} (which estimates ${\mathcal H}(\bm{c}|\bar{\bm{c}})$).
Then some terms on the right-hand
side can be absorbed by the corresponding expressions on the left-hand side,
leading to
\begin{align*}
{\mathcal H}(&\bm{c}(T)|\bar{\bm{c}}(T)) + \frac{4(\lambda_M-\lambda)^2}{\lambda_m\lambda}
{\mathcal E}(\bm{c}(T)|\bar{\bm{c}}(T)) \\
&\phantom{xx}{}+ \frac{(\lambda_M-\lambda)^2}{\lambda}
\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n P_L(\bm{c})_{ij}\sqrt{c_j}
\na(\mu_j-\bar{\mu}_j)\bigg|^2 dxdt \\
&\phantom{xx}{}+ \frac{\lambda}{4}\sum_{i=1}^n\int_0^T\int_\Omega\bigg|\sum_{j=1}^n
P_L(\bm{c})_{ij}\sqrt{c_j}\na\log\frac{c_j}{\bar{c}_j}\bigg|^2 dxdt
+ \frac{\lambda}{4}\sum_{i=1}^n\int_0^T\int_\Omega(\Delta(c_i-\bar{c}_i))^2dxdt \\
&\le {\mathcal H}(\bm{c}^0|\bar{\bm{c}}^0) + \frac{4(\lambda_M-\lambda)^2}{\lambda_m\lambda}
{\mathcal E}(\bm{c}^0|\bar{\bm{c}}^0) \\
&\phantom{xx}{}+ C(\theta,\theta')\sum_{i=1}^n\int_0^T\int_\Omega
\big((c_i-\bar{c}_i)^2 + |\na(c_i-\bar{c}_i)|^2\big)dxdt.
\end{align*}
The last term can be bounded in terms of the free energy, since
$c_i\log(c_i/\bar{c}_i)-(c_i-\bar{c}_i)\ge (c_i-\bar{c}_i)^2/2$
\cite[Lemma 18]{HJT21}:
\begin{align*}
{\mathcal H}(\bm{c}(T)|\bar{\bm{c}}(T)) + \frac{4(\lambda_M-\lambda)^2}{\lambda_m\lambda}
{\mathcal E}(\bm{c}(T)|\bar{\bm{c}}(T))
&\le {\mathcal H}(\bm{c}^0|\bar{\bm{c}}^0) + \frac{4(\lambda_M-\lambda)^2}{\lambda_m\lambda}
{\mathcal E}(\bm{c}^0|\bar{\bm{c}}^0) \\
&\phantom{xx}{}+ C\int_0^T{\mathcal E}(\bm{c}(t)|\bar{\bm{c}}(t))dt.
\end{align*}
Then the theorem follows after applying Gronwall's lemma.
\section{Examples}\label{sec.exam}
We present some models which satisfy Assumptions (B1)--(B4).
\subsection{A phase separation model}
Elliott and Garcke have studied in \cite{ElGa97} equations
\eqref{1.eq1}--\eqref{1.mu}, formulated in terms of the mobility matrix
\eqref{1.B}, where
$$
B_{ij}(\bm{c}) = b_i(c_i)\bigg(\delta_{ij} - \frac{b_j(c_j)}{\sum_{k=1}^n b_k(c_k)}
\bigg), \quad i,j=1,\ldots,n.
$$
The functions $b_i\in C^1([0,1])$ are nonnegative and satisfy
$\beta_1 c_i\le b_i(c_i)\le \beta_2 c_i$ for $c_i\in[0,1]$ and some constants
$0<\beta_1\le \beta_2$.
This model describes phase transitions in multicomponent systems;
it has been suggested in \cite{AkTo90} to model the dynamics of polymer mixtures
with $b_i(c_i)=\beta_i c_i$ and $\beta_i>0$.
The subspace $L(\bm{c})$ becomes
$$
L(\bm{c}) = \bigg\{\bm{z}\in{\mathbb R}^n:\sum_{i=1}^n \sqrt{b_i(c_i)}z_i=0\bigg\},
$$
and the matrix $D^{BD}(\bm{c})$ is determined directly from the mobility matrix:
$$
D_{ij}^{BD}(\bm{c}) = \frac{B_{ij}(\bm{c})}{\sqrt{b_i(c_i)b_j(c_j)}}
= \delta_{ij} - \frac{\sqrt{b_i(c_i)b_j(c_j)}}{\sum_{k=1}^n b_k(c_k)}.
$$
Instead of checking Assumptions (B1)--(B4), it is more convenient to verify
the statements of Lemma \ref{lem.DB} directly. This has been done in
\cite[Section 2]{HJT21}. Although the global existence of weak solutions
has been already proved in \cite{ElGa97}, we obtain the weak-strong uniqueness
property as a new result.
\subsection{Classical Maxwell--Stefan system}
In the classical Maxwell--Stefan model, the matrix $K(\bm{c})$ has the entries
$K_{ij}(\bm{c}) = \delta_{ij}\sum_{\ell=1}^n k_{i\ell}c_\ell - k_{ij}c_i$ for
$i,j=1,\ldots,n$. The associated matrix $D^{MS}(\bm{c})$ is given by
$$
D_{ij}^{MS}(\bm{c}) = \frac{1}{\sqrt{c_i}}K_{ij}(\bm{c})\sqrt{c_j}
= \delta_{ij}\sum_{\ell=1}^n k_{i\ell} c_\ell - k_{ij}\sqrt{c_ic_j},
\quad i,j=1,\ldots,n.
$$
It is proved in \cite[Sec.~5.4]{HJT21} that this matrix satisfies
Assumptions (B1)--(B4). Thus, Theorems \ref{thm.ex} and \ref{thm.wsu} hold
for the model
\begin{align*}
& \pa_t c_i + \operatorname{div}(c_iu_i) = 0, \quad \sum_{i=1}^n c_iu_i = 0, \quad
i=1,\ldots,n, \\
& c_i\na\mu_i - \frac{c_i}{\sum_{k=1}^n c_k}\sum_{j=1}^n c_j\na\mu_j
= -\sum_{j=1}^n k_{ij}c_ic_j(u_i-u_j),
\end{align*}
where $\mu_i=\log c_i-\Delta c_i$.
Compared to \cite{HJT21}, the mobility does not only depend on $c_i$ but also
on $\Delta c_i$. This extends the existence and weak-strong uniqueness results
to a more general case.
\subsection{A physical vapor decomposition model for solar cells}
Thin-film crystalline solar cells can be fabricated as thin coatings on a
substrate by the physical vapor decomposition process. The dynamics of the
volume fractions of the process components can be described by model
\eqref{1.eq1}--\eqref{1.bic} with the chemical potentials $\mu_i=\log c_i$
and the mobility matrix
$$
B_{ij}(\bm{c}) = \delta_{ij}\sum_{\ell=1}^n k_{i\ell} \, c_i c_\ell - k_{ij} c_ic_j,
\quad i,j=1,\ldots,n.
$$
In this case, the Bott--Duffin matrix is given by
$D_{ij}^{BD}(\bm{c}) = B_{ij}(\bm{c})/\sqrt{c_ic_j} = D_{ij}^{MS}(\bm{c})$,
where $D^{MS}(\bm{c})$ is the Maxwell--Stefan matrix of the previous subsection.
Thus, Assumptions (B1)--(B4) are verified for this matrix. We infer that
Theorems \ref{thm.ex} and \ref{thm.wsu} hold for the model
$$
\pa_t c_i = \operatorname{div}\sum_{j=1}^n k_{ij}c_ic_j\na(\mu_i-\mu_j), \quad
\mu_i = \log c_i-\Delta c_i, \quad i=1,\ldots,n.
$$
When $\mu_i=\log c_i$ for all $i$, the global existence of weak solutions was proved in
\cite{BaEh18} and the weak-strong uniqueness of solutions was shown in
\cite{HoBu21}. A global existence result was obtained in \cite{EMP21} for
$\mu_1 = \log c_1 - \delta c_1 + \beta(1-2c_1)$ with $\beta>0$
and $\mu_i=\log c_i$ for $i=2,\ldots,n$. Our theorems extend these results
to a more general case.
\end{document}
|
\betagin{document}
\title{Global smooth solutions for the inviscid SQG equation}
\author{Angel Castro, Diego C\'ordoba and Javier G\'omez-Serrano}
\maketitle
\betagin{abstract}
In this paper, we show the existence of the first non trivial family of classical global solutions of the inviscid surface quasi-geostrophic equation.
\vskip 0.3cm
\thetaxtit{Keywords: global existence, surface quasi-geostrophic, incompressible, computer-assisted}
\end{abstract}
\tableofcontents
\section{Introduction}
We consider the initial value problem for the inviscid surface quasi-geostrophic equation (SQG):
\betagin{align}
\partialrtial_t \theta (x,t)+ u(x,t) \cdot \nabla \theta(x,t)& = 0, \ \ (x,t) \in \mathbb{R}^2 \times \mathbb{R}_+ \Lambdabel{sqg} \\
u(x,t) & = (-R_2(\theta), R_1(\theta))(x,t)\nonumber\\
\theta(x,0)&= \theta_0(x)\nonumber,
\end{align}
where $R_j$ is the $j$-th Riesz transform:
\betagin{align*}
R_j(\theta)(x) = \frac{1}{2\pi}P.V.\int_{\mathbb{R}^{2}} \frac{(x_j - y_j)}{|x-y|^{3}}\theta(y) dy.
\end{align*}
This equation is derived considering small Rossby and Ekman numbers and constant potential vorticity. It models the evolution of the temperature from a general quasi-geostrophic system for atmospheric and oceanic flows (see \cite{Constantin-Majda-Tabak:formation-fronts-qg,Held-Pierrehumbert-Garner-Swanson:sqg-dynamics,Pedlosky:geophysical,Majda-Bertozzi:vorticity-incompressible-flow} for more details). The numerical and analytical study of the equation was started by Constantin, Majda and Tabak in \cite{Constantin-Majda-Tabak:formation-fronts-qg}, since the SQG system presents an analogy with the 3D Euler equations.
The aim of this paper is to address the main problem of whether its classical solution corresponding to given initial data $\theta(x,0) = \theta_0(x)$ with finite energy exists for all time or not. We remark that both the $L^{p}$ norms of theta $(1 \leq p \leq \infty)$ and the $L^{2}$ norm of $u$ (the energy of the system) are conserved quantities. Moreover, the $L^{p}$ norms of $u$ obey the following bounds:
$$||u(\cdot,t)||_{L^p}\leq C_p ||\theta_0||_{L^p}\quad 1<p<\infty.$$
Local existence of solutions for \eqref{sqg} was first shown in \cite{Constantin-Majda-Tabak:formation-fronts-qg} in Sobolev spaces. By using different functional frameworks local existence has been also addressed in several papers, see for example \cite{Chae:qg-equation-triebel-lizorkin,Li:existence-theorems-2d-sqg-plane-waves,Wu:qg-equations-morrey-spaces,Wu:solutions-2d-qg-holder}.
Resnick, in his thesis \cite{Resnick:phd-thesis-sqg-chicago}, showed global existence of weak solutions in $L^{2}$ using an extra cancellation due to the oddness of the Riesz transform. Marchand \cite{Marchand:existence-regularity-weak-solutions-sqg} extended Resnick's result to the class of initial data belonging to
$L^{p}$ with $p > 4/3$. The question of non-uniqueness for weak solutions in $L^2$ is still a challenging open problem (see \cite{Isett-Vicol:holder-continuous-active-scalar,Azzam-Bedrossian:bmo-uniqueness-active-scalar-equations,Rusin:logarithmic-spikes-uniqueness-weak-solution-active-scalars} and references therein).
The problem of whether the SQG system presents finite time singularities or there is global existence is open for the smooth case. Kiselev and Nazarov \cite{Kiselev-Nazarov:simple-energy-pump-sqg} constructed solutions that started arbitrarily small but grew arbitrarily big in finite time, and Friedlander and Shvydkoy \cite{Friedlander-Shvydkoy:unstable-spectrum-sqg} showed the existence of unstable eigenvalues of the spectrum. Castro and C\'ordoba constructed singular solutions with infinite energy in \cite{Castro-Cordoba:infinite-energy-sqg} and Dritschel \cite{Dritschel:exact-rotating-solution-sqg} constructed global solutions that have $C^{1/2}$ regularity.
The numerical simulations in \cite{Constantin-Majda-Tabak:formation-fronts-qg} indicated a possible singularity in the form of a hyperbolic saddle closing in finite time. Ohkitani and Yamada \cite{Ohkitani-Yamada:inviscid-limit-sqg} and Constantin et al \cite{Constantin-Nie-Schorghofer:nonsingular-sqg-flow} suggested that the growth was double exponential. The question was settled by Cordoba \cite{Cordoba:nonexistence-hyperbolic-blowup-qg} who bounded the growth by a quadruple exponential and further improved by Cordoba and Fefferman \cite{Cordoba-Fefferman:growth-solutions-qg-2d-euler} to a double exponential (see also \cite{Deng-Hou-Li-Yu:non-blowup-2d-sqg}). The same scenario was recomputed almost 20 years with bigger computational power and improved algorithms by Constantin et al. \cite{Constantin-Lai-Sharma-Tseng-Wu:new-numerics-sqg}, yielding no evidence of blowup and the depletion of the hyperbolic saddle past the previously computed times. In \cite{Majda-Tabak:2d-model-sqg}, Majda and Tabak compared simulations for the SQG scenario with the Euler case. Scott, in \cite{Scott:scenario-singularity-quasigeostrophic}, starting from elliptical configurations, proposed a candidate that develops filamentation and after a few cascades, blowup of $\nabla \theta$.
Several criteria of blowup have been found and blowup can only occur through the blowup of either some geometric quantities or certain space-time norms. For more details see \cite{Chae-Constantin-Wu:deformation-symmetry-inviscid-sqg-3d-euler, Chae:continuation-principles-euler-sqg, Chae:geometric-approaches-singularities-inviscid-fluid-flows, Chae:behavior-solution-euler-related, Cordoba-Fefferman:scalars-convected-2d-incompressible-flow, Hou-Shi:dynamic-growth-vorticity-3d-euler-sqg, Ju:geometric-constraints-global-regularity-sqg,Cannone-Xue:self-similar-solutions-sqg}.
A different approach for the study of the formation of singularities for SQG comes from the patch-problem, i.e., ``sharp fronts'' . In this problem one considers that the scalar $\theta(x,t)$ is the characteristic function of some compact and simple connected domain which depends on time and with smooth boundary. Local existence for the patch was proven by Rodrigo \cite{Rodrigo:evolution-sharp-fronts-qg} for a $C^\infty$ boundary and by Gancedo \cite{Gancedo:existence-alpha-patch-sobolev} in Sobolev spaces. C\'ordoba et al. found, in \cite{Cordoba-Fontelos-Mancho-Rodrigo:evidence-singularities-contour-dynamics}, strong numerical evidences of the formation of a singularity in the boundary of the patch. For further numerical simulations addressing formation of singularities see \cite{Scott-Dritschel:self-similar-sqg} and \cite{Scott:scenario-singularity-quasigeostrophic}. The possibility of a splash singularity scenario (i.e. when the interface touches itself on a point but the curve does not lose regularity) was ruled out by Gancedo and Strain \cite{Gancedo-Strain:absence-splash-muskat-SQG}.
Through a different motivation, Cordoba et al. \cite{Cordoba-Fefferman-Rodrigo:almost-sharp-fronts-sqg}, Fefferman and Rodrigo \cite{Fefferman-Rodrigo:almost-sharp-fronts-sqg} and Fefferman et al. \cite{Fefferman-Luli-Rodrigo:spine-sqg-almost-sharp-front} studied the existence of a special type of solutions that are known as ``almost sharp fronts'' for SQG. These solutions can be thought of as a regularization of a front, with a small strip around the front in which the solution changes (reasonably) from one value of the front to the other. These are strong solutions of the equation with large gradient ($\sigmam \thetaxt{(Width of the strip)}^{-1}$).
The main purpose of the paper is to show the following theorem:
\betagin{theorem}
\Lambdabel{globalsqg}
There is a nontrivial global smooth solution for the SQG equations that has finite energy.
\end{theorem}
It is well known that radial functions are stationary solutions for \eqref{sqg} due to the structure of the nonlinear term. The solutions that will be constructed in this paper are a smooth perturbation in a suitable direction of a specific radial function. The smooth profile we will perturb satisfies (in polar coordinates)
\betagin{align*}
\theta(r)\equiv \left\{ \betagin{array}{ccc} 1 & \thetaxt{for $0\leq r \leq 1-a$}\\ \thetaxt{smooth and decreasing} & \quad \thetaxt{for $1-a < r< 1$} \\ 0 & \thetaxt{for $1\leq r <\infty$} \end{array}\right.,
\end{align*}
where $a$ is a small number (below we will impose some more constraints in this profile). In addition the dynamics of these solutions consist of global rotating level sets with constant angular velocity. These level sets are a perturbation of the circle. The limit case $a=0$ gives rise to the well known V-state solution for SQG, i.e., a global rotating patch which solves weakly \eqref{sqg}. The existence of V-states with $C^\infty$ boundary for SQG was proven in \cite{Castro-Cordoba-GomezSerrano:existence-regularity-vstates-gsqg}. It was shown in \cite{Castro-Cordoba-GomezSerrano:analytic-vstates-ellipses} that the boundary of these solutions is actually analytic.
The proofs of these results are motivated by the ones for 2D incompressible Euler in the simply connected case; Burbea in \cite{Burbea:motions-vortex-patches} proved the existence of V-states for Euler and $C^\infty$-regularity for its boundary was proved by Hmidi at al. in \cite{Hmidi-Mateu-Verdera:rotating-vortex-patch} (see also \cite{Hassainia-Hmidi:v-states-generalized-sqg}).
The paper is organized as follows: section \rho_{\varpirepsilonsilonilon}f{sectionequations} is devoted to the reformulation of the equations \eqref{sqg} in new variables. In section \rho_{\varpirepsilonsilonilon}f{CR} we state the main theorem and present the Crandall-Rabinowitz (C-R) theorem which will be the main tool in our proof. In section \rho_{\varpirepsilonsilonilon}f{checking} we check that our equation satisfies the hypotheses of the C-R theorem. This will be the main part of our work.
In particular section \rho_{\varpirepsilonsilonilon}f{checking3} is different from previous analysis. We stress the following main differences:
\betagin{itemize}
\item The study of the linear problem is now reduced to a functional equation, as opposed to a scalar equation (which was in the patch case). Even the existence of nontrivial elements in the kernel of the linear part is not evident a priori.
\item There is no algebraic formula for neither the eigenvalue nor the eigenvector, not even in an implicit way (such as in \cite{Castro-Cordoba-GomezSerrano:analytic-vstates-ellipses}). This makes the proof of the dimensionality of the kernel much harder since one needs to show that the eigenvalue is simple and have some control of the rest of the eigenvalues.
\end{itemize}
\betagin{rem}In a forthcoming paper \cite{Castro-Cordoba-GomezSerrano:uniformly-rotating-smooth-euler}, by using the same techniques, we are able to extend our construction to the 2D Incompressible Euler equations.
\end{rem}
Finally, in Appendix \rho_{\varpirepsilonsilonilon}f{sectionasymptotics}, we compute the asymptotics and bounds on the error terms of some of the elliptic integrals that appear and Appendix \rho_{\varpirepsilonsilonilon}f{sectioncomputerassisted} is devoted to discuss the details of the computer-assisted code and its implementation, as well as to show the rigorous numerical bounds used in the theorems. Appendix \rho_{\varpirepsilonsilonilon}f{appendixprojections} contains an explicit description of two big matrices used in the proofs.
A major theme of our work is the interplay between rigorous computer calculations and traditional mathematics. We use interval arithmetics as part of a proof whenever they are needed.
Advances in computing power have made rigorous computer-assisted proofs realizable. Naturally, floating-point operations can result in numerical errors. In order to overcome these, we will employ interval arithmetics to deal with this issue. The main paradigm is the following: instead of working with arbitrary real numbers, we perform computations over intervals which have representable numbers by the computer as endpoints in order to guarantee that the true result at any point belongs to the interval by which is represented. On these objects, an arithmetic is defined in such a way that we are guaranteed that for every $x \in X, y \in Y$
\betagin{align*}
x \star y \in X \star Y,
\end{align*}
for any operation $\star$. For example,
\betagin{align*}
[\underline{x},\overline{x}] + [\underline{y},\overline{y}] & = [\underline{x} + \underline{y}, \overline{x} + \overline{y}] \\
[\underline{x},\overline{x}] \times [\underline{y},\overline{y}] & = [\min\{\underline{x}\underline{y},\underline{x}\overline{y},\overline{x}\underline{y},\overline{x}\overline{y}\},\max\{\underline{x}\underline{y},\underline{x}\overline{y},\overline{x}\underline{y},\overline{x}\overline{y}\}].
\end{align*}
We can also define the interval version of a function $f(X)$ as an interval $I$ that satisfies that for every $x \in X$, $f(x) \in I$. Rigorous computation of integrals has been theoretically developed since the seminal works of Moore and many others (see \cite{Berz-Makino:high-dimensional-quadrature,Cordoba-GomezSerrano-Zlatos:stability-shifting-muskat-II,Kramer-Wedner:adaptive-gauss-legendre-verified-computation,Lang:multidimensional-verified-gaussian-quadrature,Moore-Bierbaum:methods-applications-interval-analysis,Tucker:validated-numerics-book} for just a small sample). In our computations, all arithmetic will be double precision (64 bits).
\section{The equations}
\Lambdabel{sectionequations}
In this section we describe the equation that a global rotating solution with constant angular velocity of SQG must satisfy. We will look to the level sets of this solution rather than the solution itself. Let's assume that $\theta(x,t)$ is a smooth solution of \eqref{sqg} with initial data $\theta_0(x)$. On $\theta_0(x)$ we will assume that its level sets can be parameterized by $z_0(\alphapha,\rho)$ in such a way that $$\theta_0(z_0(\alphapha,\rho))=f(\rho)$$ for some smooth and even
function $f \, : \, \mathbb{R}\to \mathbb{R}$. The application $z_0(\alphapha,\rho)$ satisfies:
\betagin{enumerate}
\item It is one to one from $\{(\alphapha,\rho)\in \mathbb{R}^2\,:\, -\pi\leq \alphapha <\pi, \, 0<\rho<\infty\}$ to $\mathbb{R}^2\setminus \{0\}$.
\item For all $\alphapha$, $z_0(\alphapha,0)=(0,0)$.
\item For fixed $\rho>0$, $z^i_0(\alphapha,\rho)$, with $i=1,2$, are $2\pi$-periodic and $z_0(\alphapha,\rho)$, with $-\pi\leq \alphapha <\pi$, is a closed and $C^1$ curve in $\mathbb{R}^2$ satisfying the chord-arc condition. Also we parametrize this curve counterclockwise with $\alphapha$.
\item It is differentiable with respect to $\alphapha$ and $\rho$ in $\mathbb{T}\times [0,\infty)$, $|z_{0\,\rho}(\alphapha,\rho)|>c>0$ in $\mathbb{T}\times [0,\infty)$, $|z_{0\,\alphapha}(\alphapha,\rho)|>0$ in $\mathbb{T}\times (0,\infty)$ and $z_{0\,\alphapha}^\perp(\alphapha,\rho) \cdot z_{0\,\rho}(\alphapha,\rho)<0\quad \thetaxt{for $-\pi\leq \alphapha <\pi$ and $0<\rho<\infty$}.$
\end{enumerate}
Because of the transport character of the equation \eqref{sqg} and by continuity we can assume that the level sets of the solution $\theta(x,t)$ at time $t$ can be parameterized by an application $z(\alphapha,\rho,t)$ such that
\betagin{align}\theta(z(\alpha,\rho,t),t) = f(\rho),\Lambdabel{teta}\end{align}
and it satisfies properties 1, 3 and 4.
Property 2 changes to $$z(\alphapha,0,t)=c(t)\quad \thetaxt{for all $\alphapha\in \mathbb{T}$},$$
where, here, $c(t)$ is a vector in $\mathbb{R}^2$.
Differentiating \eqref{teta} with respect to $\alphapha$ and with respect to $\rho$ we have that
\betagin{align*}
z_{\alpha}(\alphapha,\rho,t) \cdot \nabla \theta(z(\alphapha,\rho,t),t) = 0 \\
z_{\rho}(\alphapha,\rho,t) \cdot \nabla \theta(z(\alphapha,\rho,t),t) = f_\rho(\rho).
\end{align*}
Therefore
\betagin{align}\Lambdabel{gradiente}
\nabla \theta(z(\alphapha,\rho,t)) = \frac{f_\rho(\rho)}{z_{\alpha}^{\perp} \cdot z_{\rho}(\alphapha,\rho,t)}z_{\alpha}^{\perp}(\alphapha,\rho,t),
\end{align}
Taking a time derivative in \eqref{teta} and using \eqref{sqg} and \eqref{gradiente} yields
\betagin{align*}
0=&\frac{d }{dt}\theta(z(\alphapha,\rho,t),t)=\partial_{t} \theta(z(\alpha,\rho,t),t) + z_{t}(\alphapha,\rho,t) \cdot \nabla \theta(z(\alpha,\rho,t),t) \\
=&(-u(z(\alpha,\rho,t),t) + z_{t}(\alphapha,\rho,t) )\cdot \nabla \theta(z(\alpha,\rho,t),t) \\
=&\left(-u(z(\alpha,\rho,t),t) + z_{t}(\alphapha,\rho,t )\right)\cdot z_{\alpha}^{\perp}(\alphapha,\rho,t) \frac{f_\rho(\rho)}{z_{\alpha}^{\perp} \cdot z_{\rho}(\alphapha,\rho,t)}.
\end{align*}
This last expression is the equation that the level sets $z(\alphapha,\rho,t)$ of the solutions $\theta(x,t)$ satisfy.
Notice that since $u(x,t)=R^\perp \theta(x,t)$ we can also write $u(x,t)=-\Lambda^{-1}\nabla^\perp \theta(x,t)$ and therefore
\betagin{align*}
u(z(\alphapha,\rho,t),t) & = -\frac{1}{2\pi} \int_{\mathbb{R}^{2}} \frac{1}{|z(\alphapha,\rho,t)-y|} \nabla^{\perp} \theta(y,t) dy \\
& = -\frac{1}{2\pi} \int_0^\infty\int_{-\pi}^\pi \frac{1}{|z(\alpha,\rho,t)-z(\alpha',\rho',t)|} f_\rho(\rho') z_{\alpha}(\alpha',\rho') d\alpha' d\rho'.\\
\end{align*}
where we just did the change of variables $y=z(\alphapha',\rho',t)$ and used \eqref{gradiente}.
Conversely, if $z(\alphapha,\rho,t)$ satisfies the equation
\betagin{align}\Lambdabel{leveleq}
&\left(-u(z(\alpha,\rho,t),t) + z_{t}(\alphapha,\rho,t )\right)\cdot z_{\alpha}^{\perp}(\alphapha,\rho,t) \frac{f_\rho(\rho)}{z_{\alpha}^{\perp} \cdot z_{\rho}(\alphapha,\rho,t)}= 0 \\
&z(\alphapha,\rho,0)= z_0(\alphapha,\rho)\nonumber
\end{align}
with
\betagin{align}
u(z(\alphapha,\rho,t),t)=-\frac{1}{2\pi} \int_0^\infty\int_{-\pi}^\pi \frac{1}{|z(\alpha,\rho,t)-z(\alpha',\rho',t)|} f_\rho(\rho') z_{\alpha}(\alpha',\rho') d\alpha' d\rho',\Lambdabel{ulevel}
\end{align}
we can prove that the function $\theta \,:\, \mathbb{R}^2\times \mathbb{R}\to \mathbb{R} $ defined by \eqref{teta}
is a solution of the equation \eqref{sqg}.
Let us assume now that ${\rm supp}\thinspace(f_\rho) \subset [1-a,1]$, where $0 < a < 1$ will be chosen later. Then, in order to find a solution for SQG, we can solve the equation
\betagin{align*}
\left(z_{t}(\alphapha,\rho,t) -u(z(\alphapha,\rho,t),t)\right) \cdot z_{\alpha}^{\perp}(\alphapha,\rho,t) & = 0,
\end{align*}
with $u(z(\alphapha, \rho,t),t)$ given by \eqref{ulevel}, in the domain $\rho \in (1-a,1)$, $\alphapha\in \mathbb{T}$. After that we extend the solution $z(\alphapha,\rho,t)$ to the domain $0\leq \rho<\infty$, $-\pi\leq \alphapha <\pi$ in a smooth way and finally define $\theta(x,t)$ through equation \eqref{teta}.
In addition, we will assume that our solution rotates with angular velocity $\Lambdambda$, counterclockwise. Hence
\betagin{align*}
z(\alphapha,\rho,t) = \mathcal{O}(t)x(\alphapha, \rho),
\quad
\mathcal{O}(t) =
\left(
\betagin{array}{cc}
\phi_{\delta} * \phi_{\delta} *s(\Lambdambda t) & -\sigman(\Lambdambda t) \\
\sigman(\Lambdambda t) & \phi_{\delta} * \phi_{\delta} *s(\Lambdambda t)
\end{array}
\right)
\end{align*}
This implies on one hand
\betagin{align*}
z_t(\alphapha,\rho,t) = \mathcal{O}_t(t) x(\alphapha,\rho) = \Lambdambda
\left(
\betagin{array}{cc}
-\sigman(\Lambdambda t) & -\phi_{\delta} * \phi_{\delta} *s(\Lambdambda t) \\
\phi_{\delta} * \phi_{\delta} *s(\Lambdambda t) & -\sigman(\Lambdambda t)
\end{array}
\right)x(\alphapha,\rho), \quad
z_{\alpha}(\rho,\alphapha,t) = \mathcal{O}(t) x_{\alpha}(\alphapha,\rho),
\end{align*}
and
\betagin{align*}
&z_{t} \cdot z_{\alpha}^{\perp}(\alphapha,\rho,t) = \Lambdangle \mathcal{O}_t(t) x(\alphapha,\rho), (\mathcal{O}(t) x_{\alpha}(\alphapha,\rho))^{\perp} \rightarrowngle\\
&= \Lambdangle \mathcal{O}_{t}(t) x(\alphapha,\rho), \mathcal{O}(t)x_{\alpha}^{\perp}(\alphapha,\rho) \rightarrowngle = \Lambdangle \mathcal{O}^{T} \mathcal{O}_{t}(t) x(\alpha,\rho), x_{\alpha}^{\perp}(\alpha,\rho) \rightarrowngle
= -\Lambdambda \Lambdangle x(\alphapha,\rho), x_{\alpha}(\alphapha,\rho) \rightarrowngle.
\end{align*}
On the other hand from \eqref{ulevel} we see that
\betagin{align*}
u(z(\alphapha,\rho,t),t)=-\mathcal{O}\frac{1}{2\pi} \int_0^\infty\int_{-\pi}^\pi \frac{1}{|x(\alpha,\rho)-x(\alpha',\rho')|} f_\rho(\rho') x_{\alpha}(\alpha',\rho') d\alpha' d\rho'\equiv \mathcal{O}\overline{u}(x(\alphapha,\rho)).
\end{align*}
Thus
\betagin{align*}
\Lambdangle u(z(\alphapha,\rho,t),t), z_{\alpha}^{\perp}(\alphapha,\rho,t) \rightarrowngle = \Lambdangle \mathcal{O} (t)\overline{u}(x(\alphapha,\rho)), \mathcal{O}(t) x_{\alpha}^{\perp}(\alphapha,\rho) \rightarrowngle = \Lambdangle \overline{u}(x(\alphapha,\rho)), x_{\alpha}^{\perp} \rightarrowngle,
\end{align*}
which yields having to solve, for the pair $(x(\alphapha,\rho), \Lambdambda)$,
\betagin{align}\Lambdabel{equationx}
-\Lambdambda x (\alphapha,\rho)\cdot x_{\alpha}(\alphapha,\rho) + \frac{1}{2\pi} x_{\alpha}^{\perp}(\alphapha,\rho) \cdot \int_{0}^\infty \int_{-\pi}^\pi \frac{f_\rho(\rho')}{|x(\alpha,\rho)-x(\alpha',\rho')|} x_{\alpha}(\alpha',\rho') d\alpha' d\rho'=0.
\end{align}
We now write $x(\alpha,\rho)$ in polar coordinates
\betagin{align}
x(\alphapha,\rho) & = r(\alpha,\rho)(\phi_{\delta} * \phi_{\delta} *s(\alpha),\sigman(\alpha)). \Lambdabel{x}
\end{align}
This choice restricts the class of functions we are considering. However this restriction will not be strong enough and we will be able to find a solution.
We have the following relations:
\betagin{align*}
x_{\alpha}(\alpha,\rho) & = r_{\alpha}(\alpha,\rho)(\phi_{\delta} * \phi_{\delta} *s(\alpha),\sigman(\alpha)) + r(\alpha,\rho)(-\sigman(\alpha),\phi_{\delta} * \phi_{\delta} *s(\alpha)) = r_{\alpha}(\alpha,\rho) n(\alpha) + r(\alpha,\rho) t(\alpha) \\
x_{\alpha}^{\perp}(\alpha,\rho) & = r_{\alpha}(\alpha,\rho) n^{\perp}(\alpha) + r(\alpha,\rho) t^{\perp}(\alpha) = r_{\alpha}(\alpha,\rho) t(\alpha) - r(\alpha,\rho) n(\alpha)\\
x_{\alpha}(\alpha',\rho') \cdot x_{\alpha}^{\perp}(\alpha,\rho) & = r_{\alpha}(\alpha,\rho)r_{\alpha}(\alpha',\rho') n(\alpha') \cdot t(\alpha) - r_{\alpha}(\alpha',\rho')r(\alpha,\rho) n(\alpha) \cdot n(\alpha') \\
& + r_{\alpha}(\alpha,\rho)r(\alpha',\rho') t(\alpha') \cdot t(\alpha) - r(\alpha,\rho)r(\alpha',\rho') n(\alpha) \cdot t(\alpha'),
\end{align*}
where
\betagin{align*}
n(\alpha) \cdot n(\alpha') & = \phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha') \\
t(\alpha) \cdot t(\alpha') & = \phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha') \\
n(\alpha') \cdot t(\alpha) & = -\sigman(\alpha-\alpha') \\
n(\alpha) \cdot t(\alpha') & = \sigman(\alpha-\alpha').
\end{align*}
Moreover, we have that
\betagin{align*}
x(\alpha,\rho) \cdot x_{\alpha}(\alpha,\rho) = r(\alpha,\rho) n(\alpha) \cdot (r_{\alpha}(\alpha,\rho)n(\alpha) + r(\alpha,\rho) t(\alpha)) = r(\alpha,\rho) r_{\alpha}(\alpha,\rho).
\end{align*}
Therefore, equation \eqref{equationx} reads
\betagin{align}\Lambdabel{rotequation}
F[r,\,\Lambdambda]=0 \quad \thetaxt{in $\rho\in (1-a,1)$, $\alphapha\in\mathbb{T}$},
\end{align}
with
\betagin{align}\Lambdabel{functionalF}
&F[r,\,\Lambdambda] \\&\equiv
\Lambdambda r_{\alpha}(\alpha,\rho) - \frac{1}{2\pi} \int_0^\infty \int_{-\pi}^\pi \frac{f_\rho(\rho')}{|x(\alpha,\rho)-x(\alpha',\rho')|}\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')(r_{\alpha}(\alpha',\rho')-r_{\alpha}(\alpha,\rho)) d\alpha' d\rho' \nonumber\\
& + \frac{r_{\alpha}(\alpha,\rho)}{2\pi r(\alpha,\rho)} \int_0^\infty \int_{-\pi}^\pi \frac{f_\rho(\rho')}{|x(\alpha,\rho)-x(\alpha',\rho')|}\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')(r(\alpha',\rho')-r(\alpha,\rho)) d\alpha' d\rho'\nonumber \\
& - \frac{1}{2\pi r(\alpha,\rho)} \int_0^\infty \int_{-\pi}^\pi \frac{f_\rho(\rho')}{|x(\alpha,\rho)-x(\alpha',\rho')|}\sigman(\alpha-\alpha')(r(\alpha,\rho)r(\alpha',\rho') + r_{\alpha}(\alpha,\rho)r_{\alpha}(\alpha',\rho')) d\alpha' d\rho',\nonumber
\end{align}
where $x(\alphapha,\rho)$ is given by \eqref{x} and we have added and subtracted $$\frac{r_{\alpha}(\alpha,\rho)}{2\pi r(\alpha,\rho)} \int_0^\infty \int_{-\pi}^\pi \frac{f_\rho(\rho')}{|x(\alpha,\rho)-x(\alpha',\rho')|}\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')r(\alpha,\rho) d\alpha' d\rho'$$ for cosmetic reasons.
The rest of the paper consists in finding a nontrivial solution $(r(\alphapha,\rho),\Lambdambda)$ of \eqref{rotequation} and with $r_\rho(\alphapha,\rho)>c>0.$
\section{Main theorem and Crandall-Rabinowitz (C-R) theorem}\Lambdabel{CR}
This section is devoted to state the main theorem of the paper. But firstly, we will fix the function $f(\rho)$. The derivative of this function will be given by the expression
\betagin{align}\Lambdabel{definitiondef}
&\frac{a}{2}f_\rho\left(\rho\right) \\&=
\left\{
\scriptsize
\betagin{array}{cc}
-\frac{1}{2\betata^{9}}(126 \betata^4 - 420 \betata^3 (1 + \tilde{\rho}) + 540 \betata^2 (1 + \tilde{\rho})^2 - 315 \betata (1 + \tilde{\rho})^3 + 70 (1 + \tilde{\rho})^4)(1 + \tilde{\rho})^5 & \thetaxt{ if } -1 \leq \tilde{\rho} \leq -1 + \betata \\
-0.5 & \thetaxt{ if } -1 + \betata \leq \tilde{\rho} \leq 1 - \betata \\
\frac{1}{2\betata^{9}}(126 \betata^4 + 420 \betata^3 (-1 + \tilde{\rho}) + 540 \betata^2 (-1 + \tilde{\rho})^2 + 315 \betata (-1 + \tilde{\rho})^3 + 70 (-1 + \tilde{\rho})^4) (-1 + \tilde{\rho})^5 & \thetaxt{ if } 1 - \betata \leq \tilde{\rho} \leq 1 \\
\end{array}
\right.\nonumber
\end{align}
where $\betata$ will be chosen later and $\tilde{\rho} = \frac{2}{a}(\rho-1)+1$. We take $f(1)=0$. This expression will be used in order to compute some integrals where $f_\rho(\rho)$ arises. Next we describe the main properties of this function:
\betagin{enumerate}
\item The function $f(\rho)$ defined in $(1-a,1)$ admits a $C^4$-extension (we still call it $f$) to $\mathbb{R}$. This extension is given by
\betagin{align*}
f(\rho)=\left\{\betagin{array}{ccc} 1 & \rho\leq 1-a\\ f(\rho) & 1-a< \rho <1 \\ 0 & \rho \gammaeq 1\end{array}\right\}.
\end{align*}
\item It is strictly decreasing in $(1-a,1)$.
\item The derivative $f_\rho(\rho)$ is constant for $1-a+\frac{a\betata}{2}\leq \rho \leq 1-\frac{a\betata}{2}$.
\end{enumerate}
\betagin{rem}
Due to the computer-assisted nature of some parts of the proof, the choice of $f(\rho)$ and all the parameters of the problem $(n,a,\betata)$ need to be explicit. A similar strategy works for other $n$-fold symmetric solutions, and more regular solutions can be obtained by choosing more regular (explicit) profiles $f$ if the computer-assisted parts of the proof yield suitable numbers.
\end{rem}
\betagin{theorem}\Lambdabel{main} Let $a = 0.05$ and $\betata= \frac{2}{512} = 2^{-8}$ and consider the domain $$\Omega_a\equiv \{ (\alphapha,\, \rho)\,:\,\alphapha\in \mathbb{T},\, 1-a<\rho<1\}$$ and $f\in C^{4}([1-a,1])$ as in \eqref{definitiondef}. Then there exists a branch of nontrivial smooth solutions, with $3-$fold symmetry, of equation \eqref{rotequation}, in $H^{4,3}(\Omega_a)$, bifurcating from $r(\alphapha,\rho)=\rho$ and $\Lambdambda=\Lambdambda_3$ for some $\Lambdambda_3\in \mathbb{R}$.
\end{theorem}
\betagin{rem} Nontrivial solutions means that the function $r(\alphapha, \rho)$ depends on $\alphapha$ in a nontrivial way. See Definition \rho_{\varpirepsilonsilonilon}f{Hkl} for a precise definition of the space $H^{k,l}({\Omega_a})$.
\end{rem}
From section \rho_{\varpirepsilonsilonilon}f{sectionequations} is clear that Theorem \rho_{\varpirepsilonsilonilon}f{globalsqg} follows from Theorem \rho_{\varpirepsilonsilonilon}f{main}.
The proof of Theorem \rho_{\varpirepsilonsilonilon}f{main} relies on the Crandall-Rabinowitz theorem. We recall here the statement of this theorem from \cite{Crandall-Rabinowitz:bifurcation-simple-eigenvalues} for expository purposes.
\betagin{theorem}[Crandall-Rabinowitz] Let $X$, $Y$ be Banach spaces, $V$ a neighborhood of
0 in $X$ and
\betagin{align*}
F \,:\, & V\times (-1,\,1) \rightarrow Y\\
& \,\qquad (r,\,\mu)\,\,\,\,\rightarrow F[r,\,\mu]
\end{align*}
have the properties
\betagin{align}
\emph{$F[0,\, \mu] = 0$ for any $|\mu| < 1$.} \\
\emph{The partial derivatives $\partial_\mu F$, $\partial_r F$ and $\partial^2 _{\mu r}F$ exist and are continuous.} \\
\emph{$\mathcal{N}(\partial_r F[0, 0])$ and $Y/\mathcal{R}(\partial_r F[0, 0])$ are one-dimensional.} \\
\emph{$\partial^2_{\mu r} F[0, 0]r_0\not\in \mathcal {R}(\partial_r F[0, 0])$, where $\mathcal{N}(\partial_r F[0, 0]) = \thetaxt{span }r_0.$} \Lambdabel{condTransversality}
\end{align}
(Here $\mathcal{N}$ and $\mathcal{R}$ denote the kernel and range respectively). If $Z$ is any complement of $\mathcal{N}(\partial_r F(0, 0))$ in $X$, then there is a neighborhood $U$ of (0, 0) in
$X \times \mathbb{R}$, an interval $(-b, b)$, and continuous functions
\betagin{align*}
\phi\, : \, (-b, b) \rightarrow \mathbb{R} && \psi\, :\, (-b, b) \rightarrow Z\end{align*}
such that $\phi(0) = 0, \psi(0) = 0$ and
$$F^{-1}(0)\cap U=\{ (\xi r_0+\xi\psi(\xi), \phi(\xi))\,:\, |\xi|<b\}\cup \{(0,t)\,:\, (0,t)\in U\}.$$
\end{theorem}
We will check in the following section the hypotheses of the C-R theorem.
\section{Checking the hypotheses of the C-R theorem for the equation \eqref{rotequation}}\Lambdabel{checking}
In this section we will check the hypotheses of the C-R theorem in suitable Banach spaces $X$ and $Y$ in order to find a nontrivial branch of the solution $(r(\alphapha,\rho), \Lambdambda)$ of \eqref{rotequation}. In order to be able to apply this theorem in the way that it is written in section \rho_{\varpirepsilonsilonilon}f{CR} we will define new variables:
\betagin{align}
\overline{r}(\alphapha,\rho)& \equiv r(\alphapha,\rho)-\rho\\
\mu & \equiv \Lambdambda-\Lambdambda_3\\
\overline{F}[\overline{r},\mu]& \equiv F[\overline{r}+\rho,\Lambdambda_3+\mu]\Lambdabel{functionalfb}
\end{align}
with $\Lambdambda_3$ to be fixed later. Thus we understand \eqref{rotequation} as an equation for $(\overline{r},\mu)$ rather that for $(r,\Lambdambda)$. In fact, we look for solutions of
\betagin{align}\Lambdabel{rbarra}
\overline{F}[\overline{r},\mu]=0 \quad \thetaxt{in $\Omega_a$}.
\end{align}
Let us also define the spaces $H^{k,l}(\Omega_a)$ for $k,\,l \in \mathbb{N}$ and $k\gammaeq l$ as follows:
\betagin{align}\Lambdabel{Hkl}
\left\{ r\in L^2(\Omega_a) \, : \, ||r||^2_{L^2(\Omega_a)}+ ||\partial^{l}_\rho r||_{L^2(\Omega_a)}^2+\sum_{j=0}^l||\partial^{k-j}_\alphapha \partial_\rho^{j} r||^{2}_{L^2(\Omega_a)}<\infty\right\}.
\end{align}
We will work in the space $H^{4,3}$ and notice that $H^{4,3} \subset C^2$. This is shown below. The reason why we work with this space is because the functional $\overline{F}$ takes 1 derivative in $\alpha$ but no derivatives in $\rho$. Due to this anisotropy we would not be able to apply the C-R theorem by solely using homogeneous spaces (see \cite{Lannes:water-waves-book} for additional information).
\betagin{lemma}
Let $\Omega \in [0,1] \times [-\pi,\pi]$ and $f: \Omega \rightarrow \mathbb{R} \in H^{4,3}(\Omega)$. Then:
\betagin{align*}
\|f\|_{L^{\infty}(\Omega)} + \|\partial_{x}^{2} f\|_{L^{\infty}(\Omega)} + \|\partial_{\alpha}^{2} f\|_{L^{\infty}(\Omega)} \leq C \|f\|_{H^{4,3}(\Omega)}
\end{align*}
\end{lemma}
\betagin{proof}
If $f \in H^{4,3}(\Omega)$, then:
\betagin{align*}
\|f\|_{L^{2}(\Omega)}^{2} + \|\partial_{x}^{3}f \|_{L^{2}(\Omega)}^{2} + \|\partial_{\alpha}^{4}f \|_{L^{2}(\Omega)}^{2} + \|\partial_{\alpha}^{3} \partial_{x}f \|_{L^{2}(\Omega)}^{2} + \|\partial_{\alpha}^{2} \partial_{x}^{2}f \|_{L^{2}(\Omega)}^{2}
+ \|\partial_{\alpha}\partial_{x}^{3}f \|_{L^{2}(\Omega)}^{2} < C
\end{align*}
On one hand, $f_{\alpha} \in H^{3}(\Omega)$, since
\betagin{align*}
\|f_{\alpha}\|_{H^{3}(\Omega)}^{2} & = \|f_{\alpha}\|_{L^{2}(\Omega)}^{2} + \|\partial_{x}^{3}f_{\alpha} \|_{L^{2}(\Omega)}^{2} + \|\partial_{\alpha}^{3}f_{\alpha} \|_{L^{2}(\Omega)}^{2} \\
& \lesssim \|f\|_{L^{2}(\Omega)}^{2} + \|\partial_{\alpha}^{4} f\|_{L^{2}(\Omega)}^{2} + \|\partial_{x}^{3}\partial_{\alpha} f \|_{L^{2}(\Omega)}^{2} + \|\partial_{\alpha}^{4}f \|_{L^{2}(\Omega)}^{2} \lesssim \|f\|_{H^{4,3}(\Omega)}
\end{align*}
This implies that $f_{\alpha} \in C^{1+\gammaamma}(\overline{\Omega})$, yielding $\|f_{\alpha \alpha}\|_{L^{\infty}(\overline{\Omega})} \leq C$ and $\|f_{\alpha x}\|_{L^{\infty}(\overline{\Omega})} \leq C$ for some constant $C$.
On the other hand, if we define
\betagin{align*}
g(\alpha) = \int_{0}^{1} \partial_{x}^{2} f(x,\alpha) dx,
\end{align*}
we claim that $g \in H^{1}([-\pi,\pi])$. In order to see this, we can compute
\betagin{align*}
\|g\|_{L^{2}([-\pi,\pi])}^{2} = \int_{-\pi}^{\pi} \left(\int \partial_{x}^{2} f(x,\alpha) dx\right)^{2} d\alpha \leq \int_{-\pi}^{\pi} \int_{0}^{1} |\partial_{x}^{2} f(x,\alpha)|^{2} dx d\alpha \leq \|f\|_{H^{4,3}(\Omega)}.
\end{align*}
In addition
\betagin{align*}
\|g_{\alpha}\|_{L^{2}([-\pi,\pi])}^{2} = \int_{-\pi}^{\pi} \left(\int \partial_{x}^{2} \partial_{\alpha} f(x,\alpha) dx\right)^{2} d\alpha \leq \int_{-\pi}^{\pi} \int_{0}^{1} |\partial_{x}^{2} \partial_{\alpha} f(x,\alpha)|^{2} dx d\alpha \leq \|f\|_{H^{4,3}(\Omega)}.
\end{align*}
Therefore
\betagin{align*}
\|g\|_{L^{\infty}(\Omega)} \leq \|g\|_{L^{\infty}([-\pi,\pi])} \leq \|g\|_{H^{1}([-\pi,\pi])} \leq C \|f\|_{H^{4,3}(\Omega)}.
\end{align*}
We have that
\betagin{align*}
\partial_{x}^{2} f(x,\alpha) = \partial_{x}^{2} f(x,\alpha) - \int_{0}^{1} \partial_{x}^{2} f(x',\alpha) dx' + g(\alpha),
\end{align*}
which we can bound in the following way:
\betagin{align*}
\|\partial_{x}^{2} f\|_{L^{\infty}(\Omega)} \leq \left\|\int_{0}^{1} \partial_{x}^{2} f(x,\alpha) - \partial_{x}^{2} f(x',\alpha) dx'\right\|_{L^{\infty}(\Omega)} + \|g\|_{L^{\infty}(\Omega)}.
\end{align*}
In addition:
\betagin{align*}
\int_{0}^{1} \partial_{x}^{2} f(x,\alpha) - \partial_{x}^{2} f(x',\alpha) dx'
= \int_{0}^{1} \int_{x'}^{x} \partial_{x}^{3} f(x'',\alpha) dx'' dx' = h(\alpha,x)
\end{align*}
We now fix $x$ and we show that $\|g(\alpha,x)\|_{H^{1}([-\pi,\pi])}$ is uniformly bounded. We achieve that by using the following estimate:
\betagin{align*}
\|h(\alpha,x)\|_{L^{2}([-\pi,\pi])} & \leq \int_{0}^{1} \int_{x'}^{x}\left(\int_{-\pi}^{\pi} |\partial_{x}^{3} f(x'',\alpha)|^{2}d\alpha\right)^{\frac12} dx'' dx' \\
& \leq \int_{0}^{1}|x-x'|^{\frac12}\left(\int_{x'}^{x} \int_{-\pi}^{\pi}|\partial_{x}^{3}f(x'',\alpha)|^{2}\thetaxt{sign}(x-x') d\alpha dx''\right)^{\frac12} dx' \\
& \leq C\left(\int_{0}^{1}\left(\int_{0}^{1} \int_{-\pi}^{\pi}|\partial_{x}^{3}f(x'',\alpha)|^{2} d\alpha dx''\right)^{\frac12} dx'\right) \\
& \leq C \|f\|_{H^{4,3}(\Omega)},
\end{align*}
where $C$ is independent of $x$. We can do the same procedure with $\partial_{\alpha} h(\alpha,x)$, getting
\betagin{align*}
\|h(\alpha,x)\|_{L^{\infty}([-\pi,\pi])} \leq \|h(\alpha,x)\|_{H^{1}([-\pi,\pi])} \leq C\|f\|_{H^{4,3}(\Omega)}
\end{align*}
Taking the supremum over $x$ yields the desired result.
\end{proof}
The theorem we will prove is the following:
\betagin{theorem}\Lambdabel{thm3} Let $f$ and $a$ be as in Theorem \rho_{\varpirepsilonsilonilon}f{main}. Then there exist $(\overline{r}_0(\alphapha,\rho), \Lambdambda_3)\in H^{4,3}(\Omega_a)\times \mathbb{R}$,
an interval $(-b, b)$, and continuous functions
\betagin{align*}
\phi\, : \, (-b, b) \rightarrow \mathbb{R} && \psi\, :\, (-b, b) \rightarrow Z\end{align*}
with $\phi(0) = 0, \psi(0) = 0$, such that, if $Z$ is any complement of $\thetaxt{span}\{\overline{r}_0\}$ in $H^{4,3}(\Omega_a)$,
$$\overline{F}[\xi \overline{r}_0+ \xi \psi(\xi),\phi(\xi)]=0,$$
for $|\xi|<b$.
In addition these solutions will have $3-$fold symmetry.
\end{theorem}
Here it is important to remark that this theorem provides a nontrivial solution $r(\alphapha,\rho)=\rho +\xi \overline{r}_0+\xi \psi(\xi)$ of \eqref{rotequation} with $\Lambdambda=\Lambdambda_3+\phi(\xi)$ satisfying $|r_\rho(\alphapha,\rho)|>c>0$ if we take $\xi$ small enough. Theorem \rho_{\varpirepsilonsilonilon}f{main} follows from Theorem \rho_{\varpirepsilonsilonilon}f{thm3}.
\subsection{Step 1. The functional setting and the hypothesis 1}
Our first step is to define the spaces we will work with in order to apply the C-R theorem. The spaces $H^{k,l}_{3, \thetaxt{even}}(\Omega_a)$ and $H^{k,l}_{3, \thetaxt{odd}}(\Omega_a)$ will be given by
\betagin{align*}
\left\{ \overline{r}\in H^{k,l}(\Omega_a) \,:\, \overline{r}(\alphapha,\rho)=\sum_{m=1}^\infty \hat{r}_m(\rho)\phi_{\delta} * \phi_{\delta} *s(3m\alphapha)\right\},\end{align*} and \betagin{align*}
\left\{ \overline{r}\in H^{k,l}(\Omega_a) \,:\, \overline{r}(\alphapha,\rho)=\sum_{m=1}^\infty \hat{r}_m(\rho)\sigman(3m\alphapha)\right\},
\end{align*}
respectively.
One of the purposes to introducing these spaces, which only represent frequencies multiples of $3$, is to be able to show the $3-$fold symmetry of the solution. Our starting space $X$ will be $H^{4,3}_{3, \thetaxt{even}}(\Omega_a)$.
The target space $Y$ will be $H^{3,3}_{3, \thetaxt{odd}}(\Omega_a)$. Notice that a function in $H^{4,3}(\Omega_a)$ belongs to $C^{2,2}(\overline{\Omega_a})=C^1(\overline{\Omega_a})$. Finally we take the neighbourhood $V$ of $0$ in $X$ to be
\betagin{align*}
V \equiv \left\{ \overline{r}\in H^{4,3}_{3, \thetaxt{even}}(\Omega_a)\, : \, ||\overline{r}||_{H^{4,3}_{3, \thetaxt{even}}(\Omega_a)}<\delta \right\}
\end{align*}
for $\delta>0$. The parameter $\delta$ will be fixed later (small enough).
Given these definitions we need to show the following lemma:
\betagin{lemma}\Lambdabel{lema42}Let $\overline{F}[\overline{r},\mu]$ be as in \eqref{functionalfb}. Then, for fixed $a\in (0,1)$, there exists $\delta>0$ small enough so that
\betagin{align*}
\overline{F} \, : \, V\times [-1, 1]\to H^{3,3}_{3,\, \emph{odd}}(\Omega_a).
\end{align*}
\end{lemma}
\betagin{proof}
Here we recall the definition of the functional
\betagin{align*}
&F[r,\,\Lambdambda] \\&\equiv
\Lambdambda r_{\alpha}(\alpha,\rho) - \frac{1}{2\pi} \int_0^\infty \int_{-\pi}^\pi \frac{f_\rho(\rho')}{|x(\alpha,\rho)-x(\alpha',\rho')|}\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')(r_{\alpha}(\alpha',\rho')-r_{\alpha}(\alpha,\rho)) d\alpha' d\rho' \nonumber\\
& + \frac{r_{\alpha}(\alpha,\rho)}{2\pi r(\alpha,\rho)} \int_0^\infty \int_{-\pi}^\pi \frac{f_\rho(\rho')}{|x(\alpha,\rho)-x(\alpha',\rho')|}\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')(r(\alpha',\rho')-r(\alpha,\rho)) d\alpha' d\rho'\nonumber \\
& - \frac{1}{2\pi r(\alpha,\rho)} \int_0^\infty \int_{-\pi}^\pi \frac{f_\rho(\rho')}{|x(\alpha,\rho)-x(\alpha',\rho')|}\sigman(\alpha-\alpha')(r(\alpha,\rho)r(\alpha',\rho') + r_{\alpha}(\alpha,\rho)r_{\alpha}(\alpha',\rho')) d\alpha' d\rho'\\
&\equiv F_1[r,\Lambdambda]+F_2[r]+F_3[r]+F_4[r],
\end{align*}
with $r=\rho+\overline{r}$ and $x=r(\phi_{\delta} * \phi_{\delta} *s(\alphapha),\sigman(\alphapha))$.
It is easy to check that $F_1[r,\Lambdambda]=\Lambdambda \partial_\alphapha r$ belongs to $H^{3,3}_{3,\, \thetaxt{odd}}(\Omega_a)$.
Next we show that \betagin{align}\Lambdabel{Fenh3} F_i[r]\in H^{3,3}(\Omega_a),\quad i=2,3,4.\end{align} In order to do it we notice that, since $||\cdot||_{C^2(\overline{\Omega_a})}\leq C||\cdot ||_{H^{4,3}(\Omega_a)}$,
we can choose, for fixed $a\in (0,1)$, $\delta$ small enough to have that $r(\alphapha,\rho)>c_0(a,\delta)>0$ and $r_\rho(\alphapha,\rho)>c_1(a,\delta)>0$, for every $(\alphapha,\rho)\in \Omega_a$. By comparing equations \eqref{equationx} and \eqref{rotequation} we see that
\betagin{align*}
\left(F_2[r]+F_3[r]+F_4[r]\right)(\alphapha,\rho)=&-\frac{x_\alphapha^\perp(\alphapha,\rho)}{2\pi r(\alphapha,\rho)}\cdot \int_{0}^\infty\int_{-\pi}^\pi f_\rho(\rho')\frac{x_\alphapha(\alphapha',\rho')}{|x(\alphapha,\rho)-x(\alphapha',\rho')|}d\alphapha' d\rho'\\
=& \frac{x_\alphapha^\perp(\alphapha,\rho)}{2\pi r(\alphapha,\rho)}\cdot \int_{0}^\infty\int_{-\pi}^\pi f_\rho(\rho')\frac{x_\alphapha(\alphapha,\rho)-x_\alphapha(\alphapha',\rho')}{|x(\alphapha,\rho)-x(\alphapha',\rho')|}d\alphapha' d\rho'.
\end{align*}
with $x(\alphapha,\rho)=r(\alphapha,\rho)(\phi_{\delta} * \phi_{\delta} *s(\alphapha),\sigman(\alphapha))$ and $r(\alphapha, \rho)=\rho+\overline{r}(\alphapha,\rho)$. We will extend the restriction of the function $f_\rho$ to the positive real axis $f_\rho|_{\mathbb{R}^+}$ to $\mathbb{R}$ by zero. We still call this extension $f_\rho$. Thus, a change of variables yields,
\betagin{align*}
\left(F_2[r]+F_3[r]+F_4[r]\right)(\alphapha,\rho)& =\underbrace{\frac{x_\alphapha^\perp(\alphapha,\rho)}{2\pi r(\alphapha,\rho)}}_{g(\alphapha,\rho)}\cdot \underbrace{\int_{-\infty}^\infty\int_{-\pi}^\pi f_\rho(\rho-\rho')\frac{x_\alphapha(\alphapha,\rho)-x_\alphapha(\alphapha-\alphapha',\rho-\rho')}{|x(\alphapha,\rho)-x(\alphapha-\alphapha',\rho-\rho')|}d\alphapha' d\rho'}_{B[f,x](\alphapha,\rho)}.
\end{align*}
We will use the following notation. For a general function $h(\alphapha,\rho)$ we define \betagin{align*} h=&h(\alphapha,\rho)\\ h'= & h(\alphapha',\rho')\\h''=& h(\alphapha-\alphapha',\rho-\rho')\\ {\rm div}\thinspacesplaystyleelta h =&h-h'\\ {\rm div}\thinspacesplaystyleelta h'=&h-h''\end{align*}
Thus we can write
\betagin{align*}
B[f,x]=\int_{-\infty}^\infty \int_{-\pi}^\pi f_\rho''\frac{{\rm div}\thinspacesplaystyleelta x'_\alphapha}{|{\rm div}\thinspacesplaystyleelta x'|}d\alphapha'd\rho'.
\end{align*}
Next we look at \betagin{align}\Lambdabel{deriv}\sum_{j=0}^3 \partial^{3-j}_\alphapha\partial^{j}_\rho \left(g(\alphapha,\rho)B[f,x](\alphapha,\rho)\right).\end{align}
We will consider two groups of terms. Group 1 consists of the terms
\betagin{align*}
g \partial^{3}_\alphapha B,\,\, \,\,\partial_\alphapha g \partial_\alphapha^{2}B,\,\,\,\, \partial_\alphapha g \partial^{2}_{\alphapha\rho}B, \,\,\,\, \partial_\rho g \partial^{2}_{\alphapha} B,\,\,\,\, g \partial^{2}_\alphapha\partial_\rho B,\,\,\,\, \partial_\alphapha g\partial^{2}_\rho B,
\,\,\,\, \partial_\rho g \partial^{2}_{\alphapha\rho}B,\,\,\,\, g\partial^{2}_\rho\partial_\alphapha B,\,\,\,\, \partial_\rho g \partial^{2}_\rho B, \,\,\,\, g\partial^{3}_\rho B.
\end{align*}
Group 2 consists of the terms
\betagin{align*}
\partial^{3}_{\alphapha}g B,\,\, \,\, \partial^{2}_\alphapha g\partial_\alphapha B,\,\, \,\, \partial^{2}_\alphapha g\partial_\rho B,\,\, \,\, \partial^{2}_{\alphapha\rho}g\partial_\alphapha B,\,\, \,\, \partial^{2}_\rho\partial_\alphapha g B, \,\, \,\, \partial^{2}_\rho g \partial_\alphapha B,\,\, \,\, \partial^{2}_{\alphapha\rho}g\partial_\rho B,\,\, \,\, \partial^{2}_\rho g\partial_\rho B,\,\, \,\, \partial^{3}_\rho g B.
\end{align*}
It is easy to check that expression \eqref{deriv} is given by a linear combination of the terms in group 1 and group 2. On one hand, since in group 1 there is no more than one derivative acting on $g$ and
$$||g||_{L^\infty(\overline{\Omega_a})},\quad ||\partial_\alphapha g||_{L^\infty(\overline{\Omega_a})}, \quad ||\partial_\rho g||_{L^\infty(\overline{\Omega_a})} \leq C(\delta, a)$$
in order to bound its terms we need to estimate
\betagin{align*}
\partial^{3}_\alphapha B,\,\, \,\, \partial_\alphapha^{2}B,\,\,\,\, \partial^{2}_{\alphapha\rho}B, \,\,\,\, \partial^{2}_\alphapha\partial_\rho B,\,\,\,\, \partial^{2}_\rho B,\,\,\,\, \partial^{2}_\rho\partial_\alphapha B,\,\,\,\, \partial^{3}_\rho B.
\end{align*}
On the other hand, since
\betagin{align*}
\partial^{2}_\alphapha g,\quad \partial^{2}_\rho g,\quad \partial^{2}_{\alphapha\rho}g, \quad \partial^{3}_\alphapha g,\quad \partial^{3}_\rho g,\quad \partial^{2}_\alphapha\partial_\rho g,\quad \partial^{2}_\rho \partial_\alphapha g,
\end{align*}
have $L^2$-norms bounded by some constant depending on $\delta$ and $a$, in order to estimate the terms in group 2 we just need to control the $L^\infty$ norms of $B$, $\partial_\alphapha B$, $\partial_\rho B$. These norms are bounded by $||B||_{H^3(\overline{\Omega_a})}$.
In addition since the $L^2$-norm of $B[f,x]$ is easy to control we just have to estimate the $L^2$-norms of the derivatives of order 3 of $B[f,x]$.
\betagin{lemma}\Lambdabel{d3B} Let $r = \rho + \overline{r}$, where $\overline{r} \in V$, and $x = r(\phi_{\delta} * \phi_{\delta} *s(\alpha),\sigman(\alpha))$, the derivatives $\partial^{3}_{\sigmagma_1,\sigmagma_2,\sigmagma_3}B[f,x]$ where $\sigmagma_i$ is either $\alphapha$ or $\rho$, with $i=1,2,3$, are in $L^2$ with norm bounded by a constant $C$ just depending on $\delta$, $a$ and $||f||_{C^4}$.
\end{lemma}
\betagin{proof}
This lemma will be proved by using the following lemma:
\betagin{lemma}\Lambdabel{cuerdaarco} Let $r = \rho + \overline{r}$, where $\overline{r} \in V$, and $x = r(\phi_{\delta} * \phi_{\delta} *s(\alpha),\sigman(\alpha))$, then there exists a constant $c(a,\delta)>0$ such that
\betagin{align*}
|x(\alphapha,\rho)-x(\alphapha-\alphapha',\rho-\rho')|^2\gammaeq c(a,\delta)\left(\alphapha'^2+\rho'^2\right).
\end{align*}
\end{lemma}
\betagin{proof}
Because of the definition we have that
\betagin{align*}
|{\rm div}\thinspacesplaystyleelta x'|^2=r^2+r''^2-2rr''\phi_{\delta} * \phi_{\delta} *s(\alphapha')=({\rm div}\thinspacesplaystyleelta r')^2+4 r r''\sigman^2\left(\frac{\alphapha'}{2}\right).
\end{align*}
Now we notice that $r=\rho+\overline{r}$ and then, for $\rho\in (1-a,1)$, there exists $c_0(a,\delta)>0$ such that $r\gammaeq c_0(a,\delta)$, where $c_0(a,\delta)$ is increasing with $a$ and decreasing with $\delta$. Since $\rho-\rho'$ belongs to $(1-a,1)$, $r''\gammaeq c_0(a,\delta)$ too. Both inequalities together yield
\betagin{align*}
|{\rm div}\thinspacesplaystyleelta x'|^2\gammaeq (r-r'')^2+ c_0(a,\delta)\sigman^2\left(\frac{\alphapha'}{2}\right).
\end{align*}
In addition, $r-r'=\rho'+ {\rm div}\thinspacesplaystyleelta\overline{r}'$, and $|\overline{r}'|\leq C(\delta)(|\alphapha'|+|\rho'|)$ where $C(\delta)\to 0$ when $\delta\to 0$. Then
\betagin{align*}
(r-r'')^2\gammaeq\rho'^2-C(\delta)\left(\alphapha'^2+\rho'^2\right)
\end{align*}
where $C(\delta)\to 0$ when $\delta\to 0$. Therefore we obtain that, by making $\delta$ small enough
\betagin{align*}
|{\rm div}\thinspacesplaystyleelta x'|^2\gammaeq c(a,\delta)\left(\alphapha'^2+\rho'^2\right).
\end{align*}
\end{proof}
Let $\partialrtial$ mean differentiation with respect to either $\alphapha$ or $\rho$. Then, the derivatives $\partial^{3} \left(f_\rho''\frac{{\rm div}\thinspacesplaystyleelta x'_\alphapha}{|{\rm div}\thinspacesplaystyleelta x'|}\right)$, consist of terms of the form
\betagin{align*}
&\partial^{3}f_\rho''\frac{{\rm div}\thinspacesplaystyleelta x'_\alphapha}{|{\rm div}\thinspacesplaystyleelta x|}, \quad f''_\rho \partial^{3}{\rm div}\thinspacesplaystyleelta x_\alphapha'\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|},\quad f_\rho''{\rm div}\thinspacesplaystyleelta x_\alphapha' \partial^{3}\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|},\\
& \partial^{2}f''_\rho \partial {\rm div}\thinspacesplaystyleelta x'_\alphapha \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|},\quad \partial^{2}f''_\rho {\rm div}\thinspacesplaystyleelta x_\alphapha'\partial \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|},\quad \partial f''_\rho \partial^{2}{\rm div}\thinspacesplaystyleelta x'_\alphapha\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|},\\
& f''_\rho\partial^2 {\rm div}\thinspacesplaystyleelta x'_\alphapha \partial\frac{1}{|{\rm div}\thinspacesplaystyleelta x|}, \quad \partial f''_\rho {\rm div}\thinspacesplaystyleelta x'_\alphapha \partial^{2}\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|},\quad f''_\rho \partial {\rm div}\thinspacesplaystyleelta x'_\alphapha \partial^{2}\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}\\
&\partial f''_\rho \partial {\rm div}\thinspacesplaystyleelta x'_\alphapha \partial \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}.
\end{align*}
Since $f\in C^4$, Lemma \rho_{\varpirepsilonsilonilon}f{cuerdaarco}, the fact that
$$\int_{\rho-1}^{\rho-(1-a)}\int_{-\pi}^\pi\frac{1}{\sqrt{\alphapha'^2+\rho'^2}}d\alphapha'd\rho'\leq C $$
and that $||\overline{r}||_{C^2}\leq C(\delta)$ we obtain that the terms in $\partial^{3}B$ coming from $\partial^{3} f''_\rho\frac{{\rm div}\thinspacesplaystyleelta x_\alphapha'}{|{\rm div}\thinspacesplaystyleelta x'|}$ and $\partial^{2} f_\rho'' \partial {\rm div}\thinspacesplaystyleelta x_\alphapha' \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$ are in $L^\infty$ with $L^\infty-$norm bounded by some constant $C\left(a,\delta,||f||_{C^4(\overline{\Omega_a})}\right)$.
The terms in $\partial^{3}B$ coming from $f_\rho''\partial^{3}{\rm div}\thinspacesplaystyleelta x_\alphapha'\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$ and $\partial f_\rho''\partial^{2}{\rm div}\thinspacesplaystyleelta x'_\alphapha \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$ all can be bounded in $L^2$ in the following way. Again we will use the bound for $f$ in $C^4$ and Lemma \rho_{\varpirepsilonsilonilon}f{cuerdaarco}. Let us focus on
\betagin{align}\Lambdabel{i1i2}
\int_{-\infty}^\infty \int_{-\pi}^\pi f''_\rho \frac{\partial^3{\rm div}\thinspacesplaystyleelta x_\alphapha}{|{\rm div}\thinspacesplaystyleelta x'|}d\alphapha' d\rho'=\partial^{3}x_\alphapha\int_{\rho-1}^{\rho-(1-a)}\int_{-\pi}^\pi f''_\rho \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}d\alphapha'd\rho'-\int_{-\infty}^{\infty}\int_{-\pi}^\pi f''_\rho \frac{\partial^{3}x''_\alphapha}{|{\rm div}\thinspacesplaystyleelta x'|}d\alphapha'd\rho'\equiv I_1+I_2.
\end{align}
Since
\betagin{align*}
\left|\int_{\rho-1}^{\rho-(1-a)}\int_{-\pi}^\pi f''_\rho \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}d\alphapha'd\rho'\right|\leq C\left(a,\delta, ||f||_{C^4}\right)
\end{align*}
we have that $||I_1||_{L^2(\overline{\Omega_a})}\leq C\left(a,\delta, ||f||_{C^4}\right) ||\partial^{3}x_\alphapha||_{L^2(\overline{\Omega_a})}$.
In order to bound $I_2$ we notice that, after a change of variables,
\betagin{align*}
|I_2|=\left|\int_{1-a}^1\int_{-\pi}^\pi \frac{f'_\rho}{|{\rm div}\thinspacesplaystyleelta x|}\partial^{3}x_\alphapha' d\alphapha'd\rho'\right|\leq C(a,\delta)\int_{1-a}^1\int_{-\pi}^\pi \frac{|f'_\rho|}{|\sqrt{(\alphapha-\alphapha')^2+(\rho-\rho')^2}|}|\partial^{3}x_\alphapha'| d\alphapha'd\rho'
\end{align*}
and therefore Young's inequality applies to yield $$||I_2||_{L^2(\overline{\Omega_a})}\leq C\left(a,\delta, ||f||_{C^4}\right) ||\partial^{3}x_\alphapha||_{L^2(\overline{\Omega_a})}.$$
It remains to bound the terms with derivatives acting on the factor $\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$.
We first will deal with the terms in $\partial^{3}B$ with a factor $\partial\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$ i.e., the terms coming from $\partial^{2}_\rho f''_\rho{\rm div}\thinspacesplaystyleelta x_\alphapha' \partial \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$, $f''_\rho\partial^{2}{\rm div}\thinspacesplaystyleelta x_\alphapha'\partial \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$ and $\partial f''_\rho \partial {\rm div}\thinspacesplaystyleelta x'_\alphapha \partial \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$. Just a computation shows that
\betagin{align*}
\partial\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}=-\frac{\partial{\rm div}\thinspacesplaystyleelta x'\cdot {\rm div}\thinspacesplaystyleelta x'}{|{\rm div}\thinspacesplaystyleelta x'|^3}
\end{align*}
and therefore by Lemma \rho_{\varpirepsilonsilonilon}f{cuerdaarco} and because of $x\in C^2$ we have that
\betagin{align*}
\left|\partial\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}\right|\leq C(a,\delta)\frac{1}{\sqrt{\alphapha'^2+\rho'^2}}.
\end{align*}
Therefore the terms in $\partial^{3}B$ coming from $\partial^{2}_\rho f''_\rho {\rm div}\thinspacesplaystyleelta x_\alphapha' \partial \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$, and $\partial f''_\rho \partial {\rm div}\thinspacesplaystyleelta x'_\alphapha \partial \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$ are actually bounded in $L^\infty$. The term coming from $f''_\rho\partial^{2}{\rm div}\thinspacesplaystyleelta x_\alphapha'\partial \frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$ is bounded as we did before for $I_1+I_2$ in \eqref{i1i2}.
The term with two derivatives of the factor $\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$ which causes more difficulties is $f_\rho''\partial {\rm div}\thinspacesplaystyleelta x_\alphapha \partial^{2}\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$. We will use the following embedding: since $x_\alphapha\in H^{3,3}(\Omega_a)=H^3(\Omega_a)$ we know that $x_\alphapha\in C^{1+\gammaamma}(\overline{\Omega_a})$ with $C^{1+\gammaamma}(\overline{\Omega_a})-$norm bounded for some constant $C(\delta)$. Then, since,
\betagin{align*}
\partial^{2}\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}=-\frac{\partial^2{\rm div}\thinspacesplaystyleelta x'\cdot {\rm div}\thinspacesplaystyleelta x' }{|{\rm div}\thinspacesplaystyleelta x'|^3}-\frac{\partial {\rm div}\thinspacesplaystyleelta x' \cdot \partial {\rm div}\thinspacesplaystyleelta x' }{|{\rm div}\thinspacesplaystyleelta x'|^3}+3\frac{\partial{\rm div}\thinspacesplaystyleelta x' \cdot {\rm div}\thinspacesplaystyleelta x' \partial{\rm div}\thinspacesplaystyleelta x'\cdot{\rm div}\thinspacesplaystyleelta x'}{|{\rm div}\thinspacesplaystyleelta x'|^5}
\end{align*}
we can use the previous embedding to estimate
\betagin{align*}
\left|\partial{\rm div}\thinspacesplaystyleelta x'_\alphapha\partial^{2}\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}\right|\leq C(a,\delta)\left(\frac{1}{\left(\alphapha'^2+\rho'^2\right)^{1-\frac{\alphapha}{2}}}|\partial^{2}{\rm div}\thinspacesplaystyleelta x'|+\frac{1}{\sqrt{\alphapha'^2+\rho'^2}}|\partial{\rm div}\thinspacesplaystyleelta x'_\alphapha|\right).
\end{align*}
Therefore by using again Young's inequality we bound the term $$\int_{-\infty}^\infty \int_{-\pi}^\pi f_\rho'{\rm div}\thinspacesplaystyleelta x_\alphapha' \partial^{2}\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}d\alphapha' d\rho'\leq C(a,\delta,||f||_{C^4}).$$
Finally we compute three derivatives of the factor $\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|}$. The terms arising from these derivatives are linear combinations of terms with the structures
\betagin{align*}
& \frac{\partial^{3}{\rm div}\thinspacesplaystyleelta x'\cdot {\rm div}\thinspacesplaystyleelta x'}{|{\rm div}\thinspacesplaystyleelta x'|^3},\quad\frac{\partial^{2}{\rm div}\thinspacesplaystyleelta x'\cdot \partial{\rm div}\thinspacesplaystyleelta x'}{|{\rm div}\thinspacesplaystyleelta x'|^3}, \frac{\partial^{2}{\rm div}\thinspacesplaystyleelta x'\cdot {\rm div}\thinspacesplaystyleelta x'\partial{\rm div}\thinspacesplaystyleelta x'\cdot {\rm div}\thinspacesplaystyleelta x'}{|{\rm div}\thinspacesplaystyleelta x'|^5},\\ & \frac{\partial{\rm div}\thinspacesplaystyleelta x'\cdot \partial {\rm div}\thinspacesplaystyleelta x'\partial{\rm div}\thinspacesplaystyleelta x'\cdot {\rm div}\thinspacesplaystyleelta x'}{|{\rm div}\thinspacesplaystyleelta x'|^5},\quad
\frac{\partial{\rm div}\thinspacesplaystyleelta x'\cdot {\rm div}\thinspacesplaystyleelta x'\partial{\rm div}\thinspacesplaystyleelta x'\cdot {\rm div}\thinspacesplaystyleelta x'\partial{\rm div}\thinspacesplaystyleelta x'\cdot {\rm div}\thinspacesplaystyleelta x'}{|{\rm div}\thinspacesplaystyleelta x'|^7}
\end{align*}
and then, a similar analysis we did before helps us to prove that
\betagin{align*}
\int_{-\infty}^\infty\int_{-\pi}^\pi f_\rho'{\rm div}\thinspacesplaystyleelta x'_\alphapha \partial^{3}\frac{1}{|{\rm div}\thinspacesplaystyleelta x'|} d\alphapha' d\rho',
\end{align*}
is bounded in $L^2$ for a constant $C(a,\delta,||f||_{C^4}$). This concludes the proof of Lemma \rho_{\varpirepsilonsilonilon}f{d3B}.\end{proof}
Thus we have proven that \eqref{Fenh3} holds. This finishes the proof of Lemma \rho_{\varpirepsilonsilonilon}f{lema42}.
\end{proof}
Therefore, in order to prove that, $$\overline{F}\,:\, V\times (-1,1)\to H^{3,3}_{3, \thetaxt{odd}}(\Omega_a)$$
we just need to show that if \betagin{align*} \overline{r}(-\alphapha,\rho) = &\overline{r}(\alphapha,\rho)\end{align*}
and
\betagin{align*} \overline{r}\left(\alphapha+\frac{2n\pi}{3},\rho\right) = &\overline{r}(\alphapha,\rho)\end{align*} for $n\in \mathbb{N}$, then
\betagin{align*} \overline{F}(-\alphapha,\rho) =- &\overline{F}(\alphapha,\rho)\end{align*}
and
\betagin{align*} \overline{F}\left(\alphapha+\frac{2n\pi}{3},\rho\right) = &\overline{F}(\alphapha,\rho)\end{align*} for $n\in \mathbb{N}$. These two properties are easy to check.
The last part of this section will be to check that the hypothesis 1 in the C-R theorem holds. This fact is a consequence of radial functions being stationary solutions of the SQG equation but let us check it on \eqref{functionalF}. If we take $\overline{r}=0$, i.e., $r=\rho$, the only term in \eqref{functionalF} that is not trivially zero is the last integral. In order to check that this integral is zero we just notice that the integrand is odd in $\alphapha$.
\subsection{Step 2. The partial derivatives of the functional $F$}
We need to prove the existence and the continuity of the Gateaux derivatives $\partial_{\overline{r}} \overline{F}[\overline{r},\Lambdambda]$,$\partial_\Lambdambda \overline{F}[\overline{r},\Lambdambda]$ and $\partial^{2}_{\overline{r}\Lambdambda}\overline{F}[\overline{r},\Lambdambda]$. We have the following lemma
\betagin{lemma}\Lambdabel{partialderivatives} For all $\overline{r}\in V^\delta$ and $\mu\in \mathbb{R}$ the partial derivatives $\partial_{\overline{r}} \overline{F}[\overline{r},\Lambdambda]$,$\partial_\Lambdambda \overline{F}[\overline{r},\Lambdambda]$ and $\partial^{2}_{\overline{r}\Lambdambda}\overline{F}[\overline{r},\Lambdambda]$ exist and are continuous. In addition
\betagin{align*}
&\partialrtial_r\overline{F}[0,\mu]\tilde{r}(\alphapha,\rho)=\partialrtial_rF[\rho,\Lambdambda]\tilde{r}(\alphapha,\rho)\\&=\Lambdambda \tilde{r}_{\alpha}(\alpha,\rho) - \frac{1}{2\pi} \int \int \frac{f_\rho(\rho')\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')(\tilde{r}_{\alpha}(\alpha',\rho')-\tilde{r}_{\alpha}(\alpha,\rho))}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')}} d\alpha' d\rho' \\
& + \frac{\tilde{r}_{\alpha}(\alpha,\rho)}{2\pi} \int \int \frac{f_\rho(\rho')\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')(\rho'-\rho)}{\rho\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')}} d\alpha' d\rho' \\
& - \frac{1}{2\pi} \int \int \frac{f_\rho(\rho')(\rho-\rho'\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha'))\sigman(\alpha-\alpha')(\rho\tilde{r}(\alpha',\rho') - \rho'\tilde{r}(\alpha,\rho))}{(\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha'))^{\frac32}} d\alpha' d\rho' \\
&\equiv I_{0}[\tilde{r},\Lambdambda](\alphapha,\rho) + I_{1}[\tilde{r}](\alphapha,\rho) + I_{2}[\tilde{r}](\alphapha,\rho) + I_{3}[\tilde{r}](\alphapha,\rho).
\end{align*}
\end{lemma}
\betagin{proof} The lemma is trivial for the derivatives involving $\Lambdambda$. The continuity of the derivative with respect to $\overline{r}$ also follows since $f\in C^4$ and is compactly supported.
\end{proof}
\subsection{Step 3. Analysis of the linear operator} \Lambdabel{checking3}
Now we have to study the dimension of both the kernel and image of the operator $\partial_r F[\rho,\Lambdambda]$. We will first show that for a certain value of $\Lambdambda$ that we will call $\Lambdambda_3$ the kernel of the operator $\partial_r F[\rho,\Lambdambda_3]$ is one dimensional. After that we will show that the codimension of the image of $\partial_r F[\rho,\Lambdambda_3]$ is also one dimensional. This will finish the checking of the hypothesis 3 in the C-R theorem. Propositions \rho_{\varpirepsilonsilonilon}f{kernel} and \rho_{\varpirepsilonsilonilon}f{codimension} are the main results of this section.
\betagin{prop}\Lambdabel{kernel}There exists a pair $(\tilde{r}_0,\Lambdambda_3)\in H^{4,3}_{3,\thetaxt{even}}(\Omega_a)\times \mathbb{R}$, with $\tilde{r}_0$ not identically zero, such that
\betagin{align}\Lambdabel{kerneleq}
\partial_r F[\rho,\Lambdambda_3]\tilde{r}_0(\alphapha,\rho)=0.
\end{align}
Moreover $\tilde{r}_0$ is unique modulo multiplication by constant.
\end{prop}
\betagin{proof} The proof of Proposition \rho_{\varpirepsilonsilonilon}f{kernel} consists of the following steps:
\betagin{enumerate}
\item \thetaxtbf{The equation for the radial part.} We introduce in \eqref{kerneleq} the $m$-fold ansatz:
\betagin{align*}
\tilde{r}(\alpha,\rho) = \rho B(\rho) \phi_{\delta} * \phi_{\delta} *s(3m\alpha)
\end{align*}
and we obtain an equation for the pair $(B(\rho),\Lambdambda)$, which we will write in the following form:
\betagin{align}\Lambdabel{lineal}\mathbb{T}heta^{m} B-\Lambdambda B=0\quad \thetaxt{in $(1-a,1)$}.\end{align}(See the equation \eqref{simplificada} below).
\item \thetaxtbf{Existence of solutions of equation \eqref{kerneleq}.} We solve the equation \eqref{lineal} for $m=3$ and find a solution $(B^3, \Lambdambda_3)\in H^3((1-a,1))\times\mathbb{R}$ of \eqref{lineal}. Therefore $\tilde{r}_0=\rho B^3(\rho)\phi_{\delta} * \phi_{\delta} *s(3\alphapha)\in H^{4,3}_{3,\thetaxt{even}}(\Omega_a)$ satisfies \eqref{kerneleq}.
\item \thetaxtbf{Uniqueness for the equation \eqref{kerneleq}.} We notice that we still need to show uniqueness for \eqref{kerneleq}, since, until now, we have that, given $\Lambdambda_3$ there is a unique $B^3$ such that \eqref{lineal} holds. But this fact does not imply that there is only one solution (modulo multiplication by constants), $\tilde{r}_0$, to \eqref{kerneleq}. Indeed, we need to show that the equation $$\partial_r F[\rho,\Lambdambda_3]\left( b^{3m}(\rho)\phi_{\delta} * \phi_{\delta} *s(3m\alphapha)\right)=0\quad \thetaxt{for $m>1$}$$
implies $b^{3m}(\rho)=0$ for $m>1$.
\end{enumerate}
\subsubsection{The equation for the radial part}
Taking $\tilde{r}(\alphapha, \rho)=\rho B(\rho)\phi_{\delta} * \phi_{\delta} *s(m\alphapha)$ we have that $\partial_r F[\rho,\Lambdambda]\tilde{r}(\alphapha, \rho)$ is given by the following terms:
\betagin{align*}
I_{0}[\tilde{r},\Lambdambda](\alphapha,\rho) & = -\Lambdambda m \rho B(\rho) \sigman(m\alpha) \\
I_{2}[\tilde{r}](\alphapha,\rho) & = -\frac{m}{2\pi} B(\rho)\sigman(m\alpha) \int_{-\infty}^\infty \int_{-\pi}^\pi \frac{f_\rho(\rho')\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')(\rho'-\rho)}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(\alpha-\alpha')}} d\alpha' d\rho' \\
& = -\frac{m}{2\pi} B(\rho)\sigman(m\alpha) \int_{-\infty}^\infty f_\rho(\rho')(\rho'-\rho) \left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho' \\
\end{align*}
We move on to $I_1[\tilde{r},\Lambdambda](\alphapha,\rho)$. We have that
\betagin{align*}
\tilde{r}_{\alpha}(\alpha',\rho') - \tilde{r}_{\alpha}(\alpha,\rho) & = -m(\rho'B(\rho')\sigman(m\alpha') - \rho B(\rho) \sigman(m\alpha)) \\
& = -m(\rho'B(\rho')\sigman(m\alpha)\phi_{\delta} * \phi_{\delta} *s(m(\alpha-\alpha'))\\& - \underbrace{\rho'B(\rho')\phi_{\delta} * \phi_{\delta} *s(m\alpha)\sigman(m(\alpha-\alpha'))}_{\thetaxt{will integrate to } 0} - \rho B(\rho) \sigman(m\alpha)).
\end{align*}
Therefore
\betagin{align*}
I_1[\tilde{r}](\alphapha,\rho) & = -\frac{m}{2\pi}\sigman(m\alpha) \int_{-\infty}^\infty \int_{-\pi}^\pi f_\rho(\rho') \frac{(\rho B(\rho) - \rho' B(\rho') \phi_{\delta} * \phi_{\delta} *s(mx))\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x)}}dx d\rho'
\end{align*}
Finally, we develop $I_3[\tilde{r}](\alphapha,\rho)$. Using:
\betagin{align*}
\rho \tilde{r}(\alpha',\rho') - \rho' \tilde{r}(\alpha,\rho) & = \rho \rho'(B(\rho')\phi_{\delta} * \phi_{\delta} *s(m\alpha') - B(\rho)\phi_{\delta} * \phi_{\delta} *s(m\alpha)) \\
& = \rho \rho'(B(\rho')\sigman(m(\alpha-\alpha'))\sigman(m\alpha)\\ & + \underbrace{B(\rho')\phi_{\delta} * \phi_{\delta} *s(m(\alpha-\alpha'))\phi_{\delta} * \phi_{\delta} *s(m\alpha) - B(\rho)\phi_{\delta} * \phi_{\delta} *s(m\alpha)}_{\thetaxt{will integrate to } 0})
\end{align*}
This implies that
\betagin{align*}
I_3[\tilde{r}](\alphapha,\rho) & = -\frac{1}{2\pi} \sigman(m\alpha) \int_{-\infty}^\infty f_\rho(\rho')B(\rho') \left(\int_{-\pi}^\pi \frac{\sigman(mx)(\rho-\rho'\phi_{\delta} * \phi_{\delta} *s(x))\rho\rho'\sigman(x)}{(\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x))^{\frac32}} dx\right) d\rho'
\end{align*}
Integrating by parts, using that
\betagin{align*}
\frac{\rho\rho'\sigman(x)}{(\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x))^{\frac32}} = - \partial_{x}\left(\frac{1}{(\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x))^{\frac12}}\right)
\end{align*}
we get that $I_3[\tilde{r}](\alphapha,\rho)$ is given by:
\betagin{align*}
I_3[\tilde{r}](\alphapha,\rho) & = -\frac{m}{2\pi} \sigman(m\alpha) \int_{-\infty}^\infty f_\rho(\rho')B(\rho') \left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(mx)(\rho-\rho'\phi_{\delta} * \phi_{\delta} *s(x))}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho' \\
& -\frac{1}{2\pi} \sigman(m\alpha) \int_{-\infty}^\infty f_\rho(\rho')\rho'B(\rho') \left(\int_{-\pi}^\pi \frac{\sigman(mx)\sigman(x)}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho' \\
\end{align*}
Putting all the pieces together and dividing by $\sigman(m\alpha)$, the equation we want to solve is:
\betagin{align}\Lambdabel{sinsimplificar}
& B(\rho)\left( -\Lambdambda m \rho -\frac{m}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')\rho' \left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho'\right) \\
& + \frac{2m}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')\rho'B(\rho') \left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(mx)\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho'\nonumber \\
& -\frac{m}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')\rho B(\rho') \left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho'\nonumber \\
& -\frac{1}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')\rho'B(\rho') \left(\int_{-\pi}^\pi \frac{\sigman(mx)\sigman(x)}{\sqrt{\rho^2+(\rho')^2-2\rho\rho'\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho' = 0.\nonumber
\end{align}
The inner integrals can be explicitly calculated in terms of EllipticE and EllipticK functions for any $m$. We can simplify the equation \eqref{sinsimplificar} in the following way. Letting $s(\rho,\rho') = \frac{\rho}{\rho'}$, we obtain
\betagin{align*}
\frac{1}{\sqrt{\rho^2 + (\rho')^{2} - 2\rho \rho' \phi_{\delta} * \phi_{\delta} *s(x)}} = \frac{1}{\rho'} \frac{1}{\sqrt{1+\left(\frac{\rho}{\rho'}\right)^{2}-2\frac{\rho}{\rho'}\phi_{\delta} * \phi_{\delta} *s(x)}} = \frac{1}{\rho'} \frac{1}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}},
\end{align*}
thus equation \eqref{sinsimplificar} reads:
\betagin{align}\Lambdabel{sinsimplificar2}
& B(\rho)\left( -\Lambdambda \rho -\frac{1}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')\left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho'\right) \\
& + \frac{2}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')B(\rho') \left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(mx)\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho'\nonumber \\
& -\frac{1}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')\frac{\rho}{\rho'} B(\rho') \left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho'\nonumber \\
& -\frac{1}{2m\pi} \int_{-\infty}^\infty f_\rho(\rho')B(\rho') \left(\int_{-\pi}^\pi \frac{\sigman(mx)\sigman(x)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho' = 0.\nonumber
\end{align}
We focus on the term
\betagin{align*}
\int_{-\pi}^{\pi} \frac{2\phi_{\delta} * \phi_{\delta} *s(mx)\phi_{\delta} * \phi_{\delta} *s(x)-s\phi_{\delta} * \phi_{\delta} *s(mx)-\frac{1}{m}\sigman(mx)\sigman(x)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx \equiv T(s).
\end{align*}
We remark that $\phi_{\delta} * \phi_{\delta} *s(mx) = \frac{1}{m} \partial_{x} \sigman(mx)$. This implies, on the one hand:
\betagin{align*}
\int_{-\pi}^{\pi} \frac{2\phi_{\delta} * \phi_{\delta} *s(mx)\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx
& = \frac{1}{m} \int_{-\pi}^{\pi} \frac{2\phi_{\delta} * \phi_{\delta} *s(x)\partial_{x}\sigman(mx)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx \\
& = \frac{1}{m} \int_{-\pi}^{\pi} \frac{2\sigman(x)\sigman(mx)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx
+ \frac{1}{m} \int_{-\pi}^{\pi} \frac{2s\phi_{\delta} * \phi_{\delta} *s(x)\sigman(mx)\sigman(x)}{(1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x))^{3/2}}dx \\
\end{align*}
On the other
\betagin{align*}
s\int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx
= \frac{s^2}{m}\int_{-\pi}^{\pi} \frac{\sigman(mx)\sigman(x)}{(1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x))^{3/2}}dx
\end{align*}
Therefore, $T(s)$ can be transformed into
\betagin{align*}
T(s) & = \frac{1}{m} \int_{-\pi}^{\pi} \frac{\sigman(x)\sigman(mx)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx
+ \frac{1}{m} \int_{-\pi}^{\pi} \frac{2s\phi_{\delta} * \phi_{\delta} *s(x)\sigman(mx)\sigman(x)}{(1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x))^{3/2}}dx
\\ & -\frac{s^2}{m}\int_{-\pi}^{\pi} \frac{\sigman(mx)\sigman(x)}{(1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x))^{3/2}}dx
\\ & = \frac{1}{m} \int_{-\pi}^{\pi} \frac{\sigman(mx)\sigman(x)}{(1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x))^{3/2}}dx
= \frac{1}{s}\int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx.
\end{align*}
Substituting into \eqref{sinsimplificar2}, we have to solve:
\betagin{align}\Lambdabel{sinsimplificar3}
& B(\rho)\left( -\Lambdambda \rho -\frac{1}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')\left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho'\right) \\
& + \frac{1}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')B(\rho') \frac{\rho'}{\rho}\int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx d\rho' = 0.\nonumber
\end{align}
From now on, we will call
\betagin{align*}
I(\rho) & = -\frac{1}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')\left(\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{1+\left(\frac{\rho}{\rho'}\right)^2-2\left(\frac{\rho}{\rho'}\right)\phi_{\delta} * \phi_{\delta} *s(x)}} dx\right) d\rho' \\
T^mB(\rho) & = \frac{1}{2\pi} \int_{-\infty}^\infty f_\rho(\rho')B(\rho') \frac{\rho'}{\rho}\int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\sqrt{1+\left(\frac{\rho}{\rho'}\right)^2-2\left(\frac{\rho}{\rho'}\right)\phi_{\delta} * \phi_{\delta} *s(x)}}dx d\rho'
\end{align*}
We will also define
\betagin{align*}
K^{m}(s) & = \frac{1}{2\pi s} \int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx \\
T^{m} B(\rho) & = \int_{-\infty}^{\infty} f_\rho(\rho') B(\rho')K^{m}\left(\frac{\rho}{\rho'}\right) d\rho'
\end{align*}
This allows us to write \eqref{sinsimplificar3} as:
\betagin{align}
\Lambdabel{simplificada}
\tilde{I}(\rho)B(\rho) + \tilde{T}^mB(\rho) = \Lambdambda B(\rho),\quad \thetaxt{in $(1-a,1)$,}
\end{align}
where $\tilde{T}^m = \frac{1}{\rho} T^m$ and $\tilde{I}=\frac{1}{\rho}I(\rho)$. Thus, using the notation of \eqref{lineal},
$$\mathbb{T}heta^m B(\rho)\equiv \tilde{I}(\rho)B(\rho) + \tilde{T}^mB(\rho).$$
\subsubsection{Existence of an element in the kernel of $\partial_rF[\rho,\Lambdambda_3]$}
In this part we will study the equation \eqref{simplificada} in order to obtain an element in the kernel or $\partial_r F[\rho,\Lambdambda_3]$ for some value $\Lambdambda_3\in \mathbb{R}$ . We shall show the following proposition:
\betagin{prop} \Lambdabel{btres} There exists a solution $(B^3,\Lambdambda_3)\in H^3((1-a),1)\times \mathbb{R}$ to the equation \eqref{simplificada}. In addition, $\Lambdambda_3$ is simple.
\end{prop}
We remark that this proposition yields the next corollary:
\betagin{corollary}The function $\tilde{r}_0(\alphapha,\rho)=\rho B^3(\rho)\phi_{\delta} * \phi_{\delta} *s(3\alphapha)$ belongs to $H^{4,3}_{3,\thetaxt{even}}(\Omega_a)$ and solves \eqref{kerneleq}.
\end{corollary}
\betagin{proof}
The proof of Proposition \rho_{\varpirepsilonsilonilon}f{btres} is divided in two parts. In the first one we deal with the operator $\tilde{T}$ in \eqref{simplificada}. In the second one we show existence of pair $(B^3,\Lambdambda_3)\in H^3((1-a,1))\times \mathbb{R}$ solving \eqref{simplificada} and that $\Lambdambda_3$ is simple.
\betagin{enumerate}
\item \thetaxtbf{Study of the operator $\tilde{T}^m$.}
This part is devoted to studying the operator $\tilde{T}^m$ and its derivatives until order 3. Here we recall its definition:
\betagin{align*}
\tilde{T}^m B(\rho)=\frac{1}{\rho}\int_{-\infty}^{\infty} f_\rho(\rho') B(\rho')K^{m}\left(\frac{\rho}{\rho'}\right) d\rho'
\end{align*}
with
\betagin{align*}
K^{m}(s) & = \frac{1}{2\pi s} \int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx .
\end{align*}
The main results here are Corollary \rho_{\varpirepsilonsilonilon}f{lemacompacidad} and Lemma \rho_{\varpirepsilonsilonilon}f{Tstarcompacidad} that state that the operator $\tilde{T}^m$ and its adjoint $\tilde{T}^{m*}$ are compact operators acting from $H^k$ to $H^{k+1}$ for $k=0,1,2$.
Let's compute the derivatives of $\tilde{T}^{3m}$.
\betagin{lemma}\Lambdabel{Tderivadas}Let $B\in C^3$ then the following equalities hold:
\betagin{align*}
\partial_\rho \tilde{T}^{m}B(\rho)=&\tilde{T}^{m}_1\partial_\rho B (\rho)+\frac{1}{\rho}\int_{-\infty}^\infty \partial^{2}_\rho f(\rho') B(\rho')\frac{\rho'}{\rho}K^{m}\left(\frac{\rho}{\rho'}\right)d\rho',\\
\partial^{2}_\rho \tilde{T}^{m}B(\rho)= & T^{m}_2\partial^{2}_\rho B(\rho)+\frac{1}{\rho}\sum_{j=1}^2\int_{-\infty}^\infty \partial^{j+1}_\rho f(\rho')\partial^{2-j}_\rho B(\rho')\left(\frac{\rho'}{\rho}\right)^2K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'\\
\partial^{3}_\rho \tilde{T}^{m}B(\rho)= & T^{m}_3\partial^{3}_\rho B(\rho)+\sum_{j=1}^3\int_{-\infty}^\infty \partial^{j+1}_\rho f(\rho')\partial^{3-j}_\rho B(\rho')\left(\frac{\rho'}{\rho}\right)^3K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'
\end{align*}
where
\betagin{align}
\tilde{T}^{m}_1 B=&\frac{1}{\rho}\int_{-\infty}^\infty f_\rho(\rho')B(\rho')\frac{\rho'}{\rho}K^{m}\left(\frac{1}{\gammaamma}\right)d\rho'\\
\tilde{T}^{m}_2B(\rho)=&\frac{1}{\rho}\int_{-\infty}^\infty f_\rho(\rho')B(\rho')\left(\frac{\rho'}{\rho}\right)^2K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'\\
\tilde{T}^{m}_3B(\rho)=&\frac{1}{\rho}\int_{-\infty}^\infty f_\rho(\rho')B(\rho')\left(\frac{\rho'}{\rho}\right)^3K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'.
\end{align}
\end{lemma}
\betagin{proof}
We notice that after the change of variable $\gammaamma=\frac{\rho'}{\rho}$ we have that
\betagin{align*}
\tilde{T}^{m}B(\rho)=\int_{-\infty}^\infty g(\rho\gammaamma) K^{m}\left(\frac{1}{\gammaamma}\right)d\gammaamma
\end{align*}
where $g=f_\rho B$. Taking one derivative we have that
\betagin{align*}
\partial_\rho \tilde{T}^{m}B(\rho)= & \int_{-\infty}^\infty (\partial_\rho g)(\gammaamma \rho)\gammaamma K^{m}\left(\frac{1}{\gammaamma}\right)d\gammaamma\\
= & \frac{1}{\rho}\int_{-\infty}^\infty \partial_\rho g (\rho')\frac{\rho'}{\rho}K^{m}\left(\frac{1}{\gammaamma}\right)d\rho'\\
=&\frac{1}{\rho}\int_{-\infty}^\infty \partial^{2}_\rho f(\rho') B(\rho')\frac{\rho'}{\rho}K^{m}\left(\frac{1}{\gammaamma}\right)d\rho+\frac{1}{\rho}\int_{-\infty}^\infty f_\rho(\rho') \partial_\rho B(\rho')\frac{\rho'}{\rho}K^{m}\left(\frac{1}{\gammaamma}\right)d\rho',
\end{align*}
so that
\betagin{align*}
\partial_\rho \tilde{T}^{m}B(\rho)=\tilde{T}^{m}_1\partial_\rho B (\rho)+\frac{1}{\rho}\int_{-\infty}^\infty \partial^{2}_\rho f(\rho') B(\rho')\frac{\rho'}{\rho}K^{m}\left(\frac{1}{\gammaamma}\right)d\rho'.
\end{align*}
Computing in a similar way, by taking two derivatives, we have that
\betagin{align*}
\partial_\rho^{2}\tilde{T}^{m}B(\rho)=&\frac{1}{\rho}\int_{-\infty}^\infty \partial_\rho ^{2}g(\rho')\left(\frac{\rho'}{\rho}\right)^2K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'\\
\partial_\rho^{3}\tilde{T}^{m}B(\rho)=&\frac{1}{\rho}\int_{-\infty}^\infty \partial_\rho ^{3}g(\rho')\left(\frac{\rho'}{\rho}\right)^3K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'.
\end{align*}
And we can write
\betagin{align*}
\partial^{2}_\rho \tilde{T}^{m}B(\rho)= & \tilde{T}^{m}_2\partial^{2}_\rho B(\rho)+\frac{1}{\rho}\sum_{j=1}^2\int_{-\infty}^\infty \partial^{j+1}_\rho f(\rho')\partial^{2-j}_\rho B(\rho')\left(\frac{\rho'}{\rho}\right)^2K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'\\
\partial^{3}_\rho \tilde{T}^{m}B(\rho)= & \tilde{T}^{m}_3\partial^{3}_\rho B(\rho)+\sum_{j=1}^3\int_{-\infty}^\infty \partial^{j+1}_\rho f(\rho')\partial^{3-j}_\rho B(\rho')\left(\frac{\rho'}{\rho}\right)^3K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'.
\end{align*}
\end{proof}
Some of the properties of the operator $\tilde{T}^m$ come from the sign of its kernel. We study this sign in the following lemma.
\betagin{lemma}\Lambdabel{positivo}
Let $T^{m}\left(\frac{\rho}{\rho'}\right)$ be defined as:
\betagin{align*}
T^{m}\left(\frac{\rho}{\rho'}\right) = \frac{1}{2\pi}\int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\sqrt{1+\left(\frac{\rho}{\rho'}\right)^2-2\left(\frac{\rho}{\rho'}\right)\phi_{\delta} * \phi_{\delta} *s(x)}}dx
\end{align*}
Then, we have that, for every $(\rho,\rho') \in \mathbb{R}^{2}, \rho \neq \rho'$
\betagin{enumerate}
\item $T^{m}\left(\frac{\rho}{\rho'}\right) > 0$
\item $T^{m+1}\left(\frac{\rho}{\rho'}\right) < T^{m}\left(\frac{\rho}{\rho'}\right)$.
\end{enumerate}
\end{lemma}
\betagin{proof}
Let $r = \frac{\rho}{\rho'}$. We have that
\betagin{align*}
T^{m}\left(\frac{\rho}{\rho'}\right) = \frac{1}{2\pi}\int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{(1+r^2-2r\phi_{\delta} * \phi_{\delta} *s(x))^{\frac12}}dx
\end{align*}
We do first the $r < 1$ case. The $r > 1$ case follows from the property
\betagin{align*}
T^{m}\left(\frac{\rho}{\rho'}\right) = \left(\frac{\rho}{\rho'}\right)^{3}T^{m}\left(\frac{\rho'}{\rho}\right)
\end{align*}
\betagin{align*}
T^{m}\left(\frac{\rho}{\rho'}\right) & = \frac{1}{1+r} \int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(mx)}{\left(1 - \frac{4r}{(1+r)^2}\phi_{\delta} * \phi_{\delta} *s^{2}\left(\frac{x}{2}\right)\right)^{\frac12}}
= \frac{1}{2\pi}\frac{2}{1+r} \int_{-\frac{\pi}{2}}^{\frac{\pi}{2}} \frac{\phi_{\delta} * \phi_{\delta} *s(2mx)}{\left(1 - \frac{4r}{(1+r)^2}\phi_{\delta} * \phi_{\delta} *s^{2}\left(x\right)\right)^{\frac12}} \\
& = \frac{1}{2\pi}\frac{4}{1+r} \sum_{k=0}^{\infty}\int_{0}^{\frac{\pi}{2}} \phi_{\delta} * \phi_{\delta} *s(2mx)\left(\frac{4r}{(1+r)^2}\right)^{k} \phi_{\delta} * \phi_{\delta} *s^{2k}\left(x\right)\frac{1}{k!}\left(1/2\right)_{k} dx \\
& = \frac{1}{2\pi}\frac{4}{1+r} \sum_{k=0}^{\infty}\left(\frac{4r}{(1+r)^2}\right)^{k} \frac{1}{k!}\left(1/2\right)_{k} \int_{0}^{\frac{\pi}{2}} \phi_{\delta} * \phi_{\delta} *s(2mx) \phi_{\delta} * \phi_{\delta} *s^{2k}\left(x\right) dx \\
& = \frac{1}{2\pi} \frac{4}{1+r} \sum_{k=m}^{\infty}\left(\frac{4r}{(1+r)^2}\right)^{k} \frac{1}{k!}\left(1/2\right)_{k} \frac{\pi}{2^{2k+1}} \frac{\Gamma(n,l)amma(2k+1)}{\Gamma(n,l)amma(1+k+m)\Gamma(n,l)amma(1+k-m)} \\
& = \sum_{k=m}^{\infty}A_{k} \frac{1}{\Gamma(n,l)amma(1+k+m)\Gamma(n,l)amma(1+k-m)}. \\
\end{align*}
Since $A_{k} > 0$ for every $k$, this shows the first item. Next, we compute for $r < 1$:
\betagin{align*}
T^{m}\left(\frac{\rho}{\rho'}\right) - T^{m+1}\left(\frac{\rho}{\rho'}\right) & = \frac{A_{m}}{\Gamma(n,l)amma(1+2m)}\\ & + \sum_{k=m+1}^{\infty}A_{k} \left(\frac{1}{\Gamma(n,l)amma(1+k+m)\Gamma(n,l)amma(1+k-m)} - \frac{1}{\Gamma(n,l)amma(2+k+m)\Gamma(n,l)amma(k-m)}\right) \\
& = \frac{A_{m}}{\Gamma(n,l)amma(1+2m)} + \sum_{k=m+1}^{\infty}\frac{A_k}{\Gamma(n,l)amma(1+k+m)\Gamma(n,l)amma(1+k-m)} \left( \frac{1+2m}{1+k+m}\right) > 0.\\
\end{align*}
This completes the proof of the lemma.
\end{proof}
In order to prove the compactness of the operator $\tilde{T}^m$ we will use the following decomposition:
\betagin{lemma}\Lambdabel{log+c1}The function $T^m(s)$ satisfies
$$T^{m}(s)=-\frac{2}{\sqrt{s}}\log(|1-s|)+E^m(s)$$
where $E^m(s)$ is a $C^1-$function
with $$||E^m||_{C^1}\leq C(m).$$
\end{lemma}
\betagin{proof}
We will split $T^m(s)$ in two parts
\betagin{align*}
T^{m}(s)=&\int_{-\pi}^\pi \frac{1}{\sqrt{(1-s)^2+4s\sigman^2\left(\frac{x}{2}\right)}}dx +\int_{-\pi}^\pi \frac{\phi_{\delta} * \phi_{\delta} *s(mx)-1}{(1-s)^2+4s\sigman^2\left(\frac{x}{2}\right)}dx\\
\\ \equiv & T_1(s)+T^{m}_2(s).
\end{align*}
We now focus on the term $T_1(s)$. By making the change $y=\sigman\left(\frac{x}{2}\right)$ yields
\betagin{align*}
T_1(s)=&4\int_{0}^1 \frac{1}{\sqrt{(1-s)^2+4s\sigman^2\left(\frac{x}{2}\right)}}\frac{dy}{\sqrt{1-y^2}}\\
&=\frac{2}{\sqrt{s}}\int_{0}^\frac{1}{\varpirepsilon}\frac{1}{\sqrt{1+z^2}}\frac{dz}{\sqrt{1-\varpirepsilon^2z^2}},
\end{align*}
with $\varpirepsilon=\frac{|1-s|}{2\sqrt{s}}.$ We will break $T_1(s)$ in two parts
\betagin{align*}
T_1(s)=&\frac{2}{\sqrt{s}}\int_{0}^\frac{1}{\varpirepsilon}\frac{1}{\sqrt{1+z^2}}\left(\frac{1}{\sqrt{1-\varpirepsilon^2z^2}}-1\right)dz\\
+& \frac{2}{\sqrt{s}}\int_{0}^\frac{1}{\varpirepsilon}\frac{1}{\sqrt{1+z^2}}dz\\
\equiv & T_{11}(s)+T_{12}(s).
\end{align*}
The integral in the term $T_{12}(s)$ can be computed exactly. We obtain that
\betagin{align}\Lambdabel{T12}
T_{12}(s)=\frac{2}{\sqrt{s}}\arcsinh\left(\frac{1}{\varpirepsilon}\right),
\end{align}
where we recall that
\betagin{align*}
\arcsinh(x)=\log\left(x+\sqrt{x^2+1}\right)
\end{align*}
and then,
\betagin{align*}
\arcsinh\left(\frac{1}{\varpirepsilon}\right)=\log\left(\frac{2\sqrt{s}}{|1-s|}+\sqrt{\frac{4s}{|1-s|^2}+1}\right)=-\log\left(|1-s|\right)+2\log(1+\sqrt{s}).
\end{align*}
Finally,
\betagin{align*}
T_{12}(s)=&-\frac{2}{\sqrt{s}}\log(|1-s|)+\frac{4}{\sqrt{s}}\log(1+\sqrt{s})=-\frac{2}{\sqrt{s}}\log(|1-s|)+S(s).
\end{align*}
where $S(s)$ is an smooth function. Next we will show that the first derivative of the function $T_{11}(s)$ is continuous. We notice that undoing the change of variable we can write
\betagin{align*}
T_{11}(s)&=4\int_0^{1}\frac{1}{\sqrt{(1-s)^2+4sy^2}}\left(\frac{1}{\sqrt{1-y^2}}-1\right)dy\\&=4\int_0^{1}\frac{1}{\sqrt{(1-s)^2+4sy^2}}\left(\frac{y^2}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy
\end{align*}
Since the function $$\frac{y^2}{\sqrt{(s-1)^2+4sy^2}}$$
is in $L^\infty$, by the dominated convergence theorem (DCT), $T_{11}(s)$ is continuous at $s=1$. In addition, for $s\neq 1$, we can differentiate to get that in a weak sense
\betagin{align*}
\partial_s T_{11}(s)=&-4\int_{0}^1\frac{(s-1)+2y^2}{\left((1-s)^2+4sy^2\right)^\frac{3}{2}}\left(\frac{y^2}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy\\
&=T_{111}(s)+T_{112}(s)
\end{align*}
where
\betagin{align*}
T_{111}(s)=-4\int_{0}^1 \frac{(s-1)}{\left((1-s)^2+4sy^2\right)^\frac{3}{2}}\left(\frac{y^2}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy
\end{align*}
and
\betagin{align*}
T_{112}(s)=-8\int_{0}^1\frac{y^4}{\left((1-s)^2+4sy^2\right)^\frac{3}{2}}\left(\frac{1}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy
\end{align*}
where we can prove that $T_{112}(s)$ is a continuous function at $s=1$ by DCT. To analyze $T_{111}(s)$ we split this term into two parts
\betagin{align*}
T_{111}(s)=&-2\int_0^1 \frac{(s-1)y^2}{\left((1-s)^2+4sy^2\right)^\frac{3}{2}}dy\\& -4\int_{0}^1 \frac{(s-1)y^2}{\left((1-s)^2+4sy^2\right)^\frac{3}{2}}\left(\frac{1}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}-\frac{1}{2}\right)dy.
\end{align*}
The second integral is continuous at $s=1$ again by DCT. The first integral can be computed analytically. We obtain that
\betagin{align*}
-2\int_0^1 \frac{(s-1)y^2}{\left((1-s)^2+4sy^2\right)^\frac{3}{2}}dy=\frac{1-s}{3s}\left(-\frac{2\sqrt{s}}{1+s}+\arcsinh\left(\frac{2\sqrt{s}}{|1-s|}\right)\right),
\end{align*}
to show that $T_{111}(s)$ is also a continuous function at $s=1$. Therefore we have that $$T_1(s)=-\frac{2}{\sqrt{s}}\log(|1-s|)+E(s)$$ where $E(s)$ is a $C^1$ function.
For $T^m_2(s)$ we use Taylor's integral remainder formula to show that $$\phi_{\delta} * \phi_{\delta} *s(mx)-1=-\frac{m^2x^2}{2}+m^4x^4\int_{0}^1\gammaamma^3\int_{0}^1\mu^2\int_{0}^1\nu\int_{0}^1 \phi_{\delta} * \phi_{\delta} *s(m\gammaamma\mu\nu\tau x)d\nu d\mu d\gammaamma d\tau \equiv \frac{m^2x^2}{2}
+ m^4x^4 R^m(x),$$
and that
\betagin{align*}
2\sigman^2\left(\frac{x}{2}\right)=(1-\phi_{\delta} * \phi_{\delta} *s(x))=\frac{x^2}{2}-x^4R^1(x)
\end{align*}
Therefore
\betagin{align*}
\phi_{\delta} * \phi_{\delta} *s(mx)-1=2m^2\sigman^2\left(\frac{x}{2}\right)+x^4\tilde{R}^m(x).
\end{align*}
where $\tilde{R}^m(x)$ is a bounded function. We now can write $T^m_2(s)$ as follows
\betagin{align*}
T^m_2(s)=&\int_{-\pi}^\pi \frac{2m^2\sigman^2\left(\frac{x}{2}\right)}{\sqrt{(1-s)^2+4s\sigman^2\left(\frac{x}{2}\right)}}dx+\int_{-\pi}^\pi \frac{x^4\tilde{R}^m(x)}{\sqrt{(1-s)^2+4s\sigman^2\left(\frac{x}{2}\right)}}dx
\end{align*}
By DCT $T^m_2(s)$ is continuous at $s=1$. For $s\neq 1$ we differentiate to get
\betagin{align*}
\partial_s T^m_2(s)=&-\int_{-\pi}^\pi \frac{(s-1)+2\sigman^2\left(\frac{x}{2}\right)}{\left((1-s)^2+4s\sigman^2\left(\frac{x}{2}\right)\right)^\frac{3}{2}}\sigman^2\left(\frac{x}{2}\right)dx\\&
-\int_{-\pi}^\pi \frac{(s-1)+2\sigman^2\left(\frac{x}{2}\right)}{\left((1-s)^2+4s\sigman^2\left(\frac{x}{2}\right)\right)^\frac{3}{2}}x^4\tilde{R}^m(x)dx.
\end{align*}
The second integral on the right hand side is a continuous function. In the first one we make the change of variables $y=\sigman\left(\frac{x}{2}\right)$ to get that
\betagin{align*}
\int_{-\pi}^\pi \frac{(s-1)+2\sigman^2\left(\frac{x}{2}\right)}{\left((1-s)^2+4s\sigman^2\left(\frac{x}{2}\right)\right)^\frac{3}{2}}\sigman^2\left(\frac{x}{2}\right)dx=4\int_0^1 \frac{(s-1)+2y^2}{\left((1-s)^2+4sy^2\right)^\frac{3}{2}}\frac{y^2}{\sqrt{1-y^2}}dy.
\end{align*}
Here, the difficulty to show continuity comes from the integral
\betagin{align*}
\int_0^1 \frac{(s-1)}{\left((1-s)^2+4sy^2\right)^\frac{3}{2}}\frac{y^2}{\sqrt{1-y^2}}dy
\end{align*}
but it has been already proven that this integral is continuous in the analysis of $T_{111}(s)$. This concludes the proof of the lemma.
\end{proof}
We also need to study the derivatives of the function $\tilde{T}^{3*}B(\rho)$. We start with the following lemma:
\betagin{lemma}\Lambdabel{c2}The function $(1-s)^3T^3(s)$ is $C^2$ in a neighbourhood of $s=1$.
\end{lemma}
\betagin{proof}
In order to prove the lemma we will split $T^3(s)$ in different parts and we will deal with every one separately. We first notice that we can write
\betagin{align*}
T^{3}(s)&=\int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(3x)}{\sqrt{(1-s)^2+4s\sigman\left(\frac{x}{2}\right)^2}}dx
\end{align*}
We will use the formula
\betagin{align*}
\phi_{\delta} * \phi_{\delta} *s(3x)=1-18\sigman^2\left(\frac{x}{2}\right)+48\sigman^4\left(\frac{x}{2}\right)-32\sigman^6\left(\frac{x}{2}\right)
\end{align*}
and make the change $y=\sigman\left(\frac{x}{2}\right)$ to get
\betagin{align*}
T^{3}(s)=&2\int_{-1}^{1}\frac{1-18y^2+48y^4-32y^6}{\sqrt{(1-s)^2+4sy^2}}\frac{dy}{\sqrt{1-y^2}}\\
=& 4\int_{0}^{1}\frac{1-18y^2+48y^4-32y^6}{\sqrt{(1-s)^2+4sy^2}}\frac{dy}{\sqrt{1-y^2}}
\end{align*}
In addition we change again of variable by making $z=\frac{2\sqrt{s}}{|1-s|}y$ so that
\betagin{align*}
T^{3}(s)=&\frac{2}{\sqrt{s}}\int_{0}^\frac{1}{\varpirepsilon}\frac{1-18\varpirepsilon^2z^2+48\varpirepsilon^4z^4-32\varpirepsilon^6z^6}{\sqrt{1+z^2}}\frac{dz}{\sqrt{1-\varpirepsilon^2z^2}},
\end{align*}
where $\varpirepsilon=\frac{|1-s|}{2\sqrt{s}}$. Now we define
\betagin{align*}
T_1(s)=&\frac{2}{\sqrt{s}}\int_{0}^\frac{1}{\varpirepsilon}\frac{1}{\sqrt{1+z^2}}\frac{dz}{\sqrt{1-\varpirepsilon^2z^2}}\\
T_2^{3}(s)=&\frac{2}{\sqrt{s}}\int_{0}^\frac{1}{\varpirepsilon}\frac{-18\varpirepsilon^2z^2+48\varpirepsilon^4z^4-32\varpirepsilon^6z^6}{\sqrt{1+z^2}}\frac{dz}{\sqrt{1-\varpirepsilon^2z^2}}.
\end{align*}
Thus $T^{3}(s)=T_1(s)+T^3_2(s)$ and $T_1(s)$ is the same function that in the proof of Lemma \rho_{\varpirepsilonsilonilon}f{log+c1}. Then we know that $T_1(s)=T_{11}(s)+T_{12}(s)$, where
$$T_{12}(s)=-\frac{2}{\sqrt{s}}\log(|1-s|)+S(s),$$ with $S(s)$ a smooth function. Therefore $(1-s)^3T_{12}(s)$ is a $C^2-$function.
Also we know that $T_{11}(s)$ is a $C^1-$ function. In order to analyze two derivatives of $T_{11}(s)$ we differentiate for $s\neq 1$ to get
\betagin{align*}
\partial^2_s T_{11}(s)=&-4\int_{0}^1\frac{1}{\left((1-s)+4sy^2\right)^\frac{3}{2}}\left(\frac{y^2}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy
\\&+12\int_{0}^1\frac{\left((s-1)^2+4y^2\right)^2}{\left((1-s)^2+4sy^2\right)^\frac{5}{2}}\left(\frac{y^2}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy.
\end{align*}
Multiplying by $(1-s)$ we have that
\betagin{align*}
(1-s)\partial^{2}_{s} T^3_{11}(s)=-T^3_{111}(s)+6\int_{0}^1(1-s)\frac{\left((s-1)+4y^2\right)^2}{\left((1-s)^2+4sy^2\right)^\frac{5}{2}}\left(\frac{y^2}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy,
\end{align*}
where we have already checked that $T^3_{111}(s)$ is continuous. Next we check that this function is also continuous. This will be a consequence of the continuity of the following terms
\betagin{align*}
\int_{0}^1(1-s)\frac{(s-1)^2}{\left((1-s)^2+4sy^2\right)^\frac{5}{2}}\left(\frac{y^2}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy\\
\int_{0}^1(1-s)\frac{(s-1)y^2}{\left((1-s)^2+4sy^2\right)^\frac{5}{2}}\left(\frac{y^2}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy\\
\int_{0}^1(1-s)\frac{y^4}{\left((1-s)^2+4sy^2\right)^\frac{5}{2}}\left(\frac{y^2}{\sqrt{1-y^2}
\left(1+\sqrt{1-y^2}\right)}\right)dy.
\end{align*}
The last term is continuous just by applying DCT. The other two terms can be treated in a similar way we did before to show continuity.
In addition the analysis of $T^3_{2}(s)$ does not introduce any new difficulty and we will not give the details here. This concludes the proof of the lemma.
\end{proof}
\betagin{lemma}\Lambdabel{commutator}The function $(1-s)T^m(s)$ satisfies
\betagin{align*}
\partial_s \left((1-s)T^m(s)\right)=-T^m(s)+E^{m}(s)
\end{align*}
where $E^{m}(s)$ is a continuous function with $L^\infty-$norm independent on $m$.
\end{lemma}
\betagin{proof}
By Lemma \rho_{\varpirepsilonsilonilon}f{log+c1} $(1-s)T^m(s)$ is a continuous function. For $s\neq 1$ we can differentiate to get
\betagin{align*}
\partial_s \left((1-s)T^m(s)\right)=-T^m(s)+E^{m}(s)
\end{align*}
with
\betagin{align*}
E^m(s)=\int_{-\pi}^\pi\frac{-(1-s)^2+2(1-s)\sigman^2\left(\frac{x}{2}\right)}{\left((1-s)^2+2\sigman^2\left(\frac{x}{2}\right)\right)^\frac{3}{2}}\phi_{\delta} * \phi_{\delta} *s(mx)dx.
\end{align*}
In addition we have that
\betagin{align*}
\left|E^m(s)\right|\leq \int_{-\pi}^{\pi} \frac{(1-s)^2+2|1-s|\sigman^2\left(\frac{x}{2}\right)}{\left((1-s)^2+s\sigman^2\left(\frac{x}{2}\right)\right)^\frac{3}{2}}dx.
\end{align*}
For the first integral we have that
\betagin{align}\Lambdabel{a1}
\int_{-\pi}^{\pi} &\frac{(1-s)^2}{\left((1-s)^2+4s\sigman^2\left(\frac{x}{2}\right)\right)^\frac{3}{2}}dx=\frac{2}{\sqrt{s}}\int_{0}^\frac{2\sqrt{s}}{|1-s|}\frac{1}{\left(1+y^2\right)^\frac{3}{2}}
\frac{dy}{\sqrt{1-\frac{|1-s|^2}{2s}y^2}}\nonumber\\
&=\frac{2}{\sqrt{s}}\int_{0}^\frac{2\sqrt{s}}{|1-s|}\frac{1}{\left(1+y^2\right)^\frac{3}{2}}
dy +\frac{2}{\sqrt{s}}\int_{0}^\frac{2\sqrt{s}}{|1-s|}\frac{1}{\left(1+y^2\right)^\frac{3}{2}}
\left(\frac{1}{\sqrt{1-\frac{|1-s|^2}{2s}y^2}}-1\right)dy\nonumber\\
&=\frac{2}{\sqrt{s}}\int_{0}^\frac{2\sqrt{s}}{|1-s|}\frac{1}{\left(1+y^2\right)^\frac{3}{2}}
dy +4\int_{0}^1\frac{|1-s|}{\left((1-s)^2+4sy^2\right)^\frac{3}{2}}
\left(\frac{1}{\sqrt{1-y^2}}-1\right)dy.
\end{align}
In addition, the second integral, can be written
\betagin{align}\Lambdabel{a2}
&\int_{-\pi}^{\pi} \frac{2|1-s|\sigman^2\left(\frac{x}{2}\right)}{\left((1-s)^2+2\sigman^2\left(\frac{x}{2}\right)\right)^\frac{3}{2}}dx=
8\int_{0}^1\frac{|1-s|y^2}{\left(|1-s|^2+4sy^2\right)^\frac{3}{2}}\frac{dy}{\sqrt{1-y^2}}\nonumber\\
&=\frac{|1-s|}{s^\frac{3}{2}}\int_{0}^\frac{2\sqrt{s}}{|1-s|}\frac{y^2}{(1+y^2)^\frac{3}{2}}\frac{dy}{\sqrt{1-\frac{|1-s|^2}{4s}y^2}} =\frac{|1-s|}{s^\frac{3}{2}}\int_{0}^\frac{2\sqrt{s}}{|1-s|}\frac{y^2}{(1+y^2)^\frac{3}{2}}dy\nonumber\\
&+\frac{|1-s|}{s^\frac{3}{2}}\int_{0}^\frac{2\sqrt{s}}{|1-s|}\frac{y^2}{(1+y^2)^\frac{3}{2}}\left(\frac{1}{\sqrt{1-\frac{|1-s|^2}{4s}y^2}}-1\right)dy\nonumber\\
&\frac{|1-s|}{s^\frac{3}{2}}\int_{0}^\frac{2\sqrt{s}}{|1-s|}\frac{y^2}{(1+y^2)^\frac{3}{2}}dy+8\int_{0}^1\frac{|1-s|y^2}{\left(|1-s|^2+4sy^2\right)^\frac{3}{2}}\left(\frac{1}{\sqrt{1-y^2}}-1\right)dy.
\end{align}
From expression \eqref{a1}, \eqref{a2} and DCT it is easy to achieve the conclusion of the lemma.
\end{proof}
Now we can state and prove the main lemmas of this section.
\betagin{lemma}\Lambdabel{lemacompacidad}
Let $f \in C^{4}(\mathbb{R}^{+})$ and $m \gammaeq 1$. Then, $\tilde{T}^{m}$ and $\tilde{T}^m_i$ , with $i=1,2,3$ are a compact operators, acting between $L^{2}$ and $H^{1}$, with
\betagin{align*}
&||\tilde{T}_i^m v||_{L^2}\leq C ||v||_{L^2},\\
&\|\tilde{T}_i^{m} v\|_{H^{1}} \leq C(m)\|v\|_{L^{2}},
\end{align*}
where the constant $C$ in the first inequality only depends on $\|f\|_{C^{4}}$ and $\Omega_a$ and the constant $C(m)$ in the second one also depends on $m$.
\end{lemma}
\betagin{proof}
Because of Lemma \rho_{\varpirepsilonsilonilon}f{log+c1} $\tilde{T}^3_i$ is bounded from $L^2$ to $L^2$. Then, by Lemma \rho_{\varpirepsilonsilonilon}f{positivo} and the monotonicity of $f(\rho)$, every $\tilde{T}^m_i$ is also bounded from $L^2$ to $L^2$ by the same constant as $\tilde{T}^3_i$. In order to show the boundedness from $L^2$ to $H^1$ we first show the estimate for smooth functions. This is done just using Lemma \rho_{\varpirepsilonsilonilon}f{log+c1} and the $L^2-$boundedness of the Hilbert transform. Finally we proceed by a density argument.
\end{proof}
\betagin{corollary}\Lambdabel{H2H3} The operator $\tilde{T}^{m}$ is bounded from $H^{k}$ to $H^{k+1}$, for $k=0,1,2$, with norm depending only on $m$, $||f||_{C^4}$ and $a$.
\end{corollary}
\betagin{proof} We first prove the bound for smooth functions. This can be done by using Lemma \rho_{\varpirepsilonsilonilon}f{Tderivadas} and \rho_{\varpirepsilonsilonilon}f{lemacompacidad}. Then we proceed by a density argument.
\end{proof}
Finally, we will study the adjoint operator of $\tilde{T}^3$ given by the expression
\betagin{align*}
\tilde{T}^{3*}B(\rho)=f_\rho(\rho)\int_{1-a}^1B(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho'}.
\end{align*}
\betagin{lemma}\Lambdabel{Tstarderivadas}Let $B\in C^3$ then
the following equalities hold:
\betagin{enumerate}
\item
\betagin{align*}
&\partial_\rho \tilde{T}^{3*}B(\rho)\\&=\partial^{2}_\rho f(\rho)\int_{1-a}^1K^3\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho'}-\frac{1}{\rho'}V.P.\int_{1-a}^1B(\rho'),
\partial_{\rho'}\left(K^3\left(\frac{\rho'}{\rho}\right)\right)d\rho'\end{align*}
\item
\betagin{align*}
&\partial^2_\rho \tilde{T}^{3*}B(\rho)\\&=\partial^3_\rho f(\rho)\int_{1-a}^1B(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho'}\\
&-\frac{\partial^2_\rho f(\rho)}{\rho}P.V.\int_{1-a}^1B(\rho')\partial_{\rho'} K^3\left(\frac{\rho'}{\rho}\right)d\rho'-\partial_\rho\left(B(1)\frac{f_\rho(\rho)K^3\left(\frac{1}{\rho}\right)}{\rho}-B(1-a)\frac{f_\rho(\rho)K^3\left(\frac{1-a}{\rho}\right)}{\rho}\right)\\
&-\frac{\partial^2_\rho f(\rho)}{\rho}\int_{1-a}^1 B'(\rho') K^3\left(\frac{\rho'}{\rho}\right)+\frac{f_\rho(\rho)}{\rho}P.V.\int_{1-a}^1 B'(\rho')\partial_{\rho'}\left(\rho'K^3\left(\frac{\rho'}{\rho}\right)\right)d\rho',\end{align*}
\item
\betagin{align*}
&\partial^3_\rho \tilde{T}^{3*}(s)\\&=\partial^4_\rho f(\rho)\int_{1-a}^1B(\rho')K^{3}\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho'}+\frac{\partial^3_\rho f(\rho)}{\rho}\left(-B(1)K^{3}\left(\frac{1}{\rho}\right)+B(1-a)K^{3}\left(\frac{1-a}{\rho}\right)\right)\\&+\partial^3_\rho f(\rho)\int_{1-a}^1B'(\rho')K^{3}\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho}\\
&+\partial_\rho\left(\frac{\partial^2_\rho f(\rho)}{\rho}\left(-B(1)K^3\left(\frac{1}{\rho}\right)+B(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\right)\\
&+2\partial^3_\rho f(\rho)\int_{1-a}^1B''(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho}++2\frac{\partial^2_\rho f(\rho)}{\rho^2}\left(-B'(1)K^3\left(\frac{1}{\rho}\right)+(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\\
&+2\partial^2_\rho f(\rho)\int_{1-a}^1B''(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{\rho'}{\rho}\frac{d\rho'}{\rho}
-\partial^2_\rho\left(B(1)\frac{f_\rho(\rho)K^3\left(\frac{1}{\rho}\right)}{\rho}-B(1-a)\frac{f_\rho(\rho)K^3\left(\frac{1-a}{\rho}\right)}{\rho}\right)\\
&\partial_\rho^2f(\rho)\int_{1-a}^1 B''(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{\rho'}{\rho}\frac{d\rho'}{\rho}-f_\rho(\rho)P.V.\int_{1-a}^1 \partial_{\rho'}\left(\rho'^2K^3\left(\frac{\rho'}{\rho}\right)\right)\frac{d\rho'}{\rho}.
\end{align*}
\end{enumerate}
\end{lemma}
\betagin{proof}
First we notice that, after a change of variables,
\betagin{align*}
\tilde{T}^{3*}B(\rho)=f_\rho(\rho)\int_\frac{{1-a}}{\rho}^\frac{1}{\rho} B(s\rho)K^3\left(s\right)\frac{ds}{s}.
\end{align*}
Then, taking a derivative yields,
\betagin{align}\Lambdabel{luego1}
&\partial_\rho \tilde{T}^{3*}B(\rho)=\partial^2_\rho f(\rho)\int_\frac{{1-a}}{\rho}^\frac{1}{\rho}B(\rho s)K^3(s)\frac{ds}{s}+\frac{1}{\rho}f_\rho(\rho)
\left(B(1)K^3\left(\frac{1}{\rho}\right)-B(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\nonumber\\
&+f_\rho(\rho)\int_\frac{{1-a}}{\rho}^\frac{1}{\rho}K^3(s)B'(\rho s)ds
\end{align}
and changing variable we have that
\betagin{align*}
&\partial_\rho \tilde{T}^{3*}B(\rho)=\partial^2_\rho f(\rho)\int_{1-a}^1 B(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho'}+\frac{1}{\rho}f_\rho(\rho)
\left(B(1)K^3\left(\frac{1}{\rho}\right)-B(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\\
&-f_\rho(\rho)\int_{1-a}^1K^3\left(\frac{\rho'}{\rho}\right)B'(\rho')\frac{d\rho'}{\rho}.
\end{align*}
And integration by parts in the last integral yields
\betagin{align*}
&\partial_\rho \tilde{T}^{3*}B(\rho)=\partial^2_\rho f(\rho)\int_{1-a}^1 B(\rho') K^3\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho'}+f_\rho(\rho)V.P.\int_{1-a}^1\partial_{\rho'}K^3\left(\frac{\rho'}{\rho}\right)B(\rho')\frac{d\rho'}{\rho}
\end{align*}
Taking a derivative on the equation \eqref{luego1} we obtain
\betagin{align}\Lambdabel{luego2}
&\partial^2_\rho\tilde{T}^{3*}B(\rho)=\partial^3_\rho f(\rho)\int_\frac{{1-a}}{\rho}^\frac{1}{\rho}B(\rho s)K^3(s)\frac{ds}{s}+\partial^2_\rho f(\rho)\frac{1}{\rho}\left(-B(1)K^3\left(\frac{1}{\rho}\right)+B(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\nonumber\\
&+\partial^2_\rho f(\rho)\int_\frac{{1-a}}{\rho}^\frac{1}{\rho}B'(\rho s)K^3(s) ds-\partial_\rho\left(B(1)\frac{f_\rho(\rho)K^3\left(\frac{1}{\rho}\right)}{\rho}-B(1-a)\frac{f_\rho(\rho)K^3\left(\frac{1-a}{\rho}\right)}{\rho}\right)\nonumber\\
& +\partial^2_\rho f(\rho)\int_{\frac{1-a}{\rho}}^\frac{1}{\rho}B'(\rho s)K^3(s)ds+\frac{f_\rho(\rho)}{\rho^2}\left(B'(1)K^3\left(\frac{1}{\rho}\right)-B'(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\nonumber\\
&+f_\rho(\rho)\int_\frac{{1-a}}{\rho}^\frac{1}{\rho}K^3(s)B''(\rho s)s ds
\end{align}
A change of variable and integration by parts then yield
\betagin{align*}
&\partial^2_\rho \tilde{T}^{3*}B(\rho)=\partial^2_\rho f(\rho)\int_{1-a}^1B(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho'}\\
&-\frac{\partial^2_\rho f(\rho)}{\rho}P.V.\int_{1-a}^1B(\rho')\partial_{\rho'} K^3\left(\frac{\rho'}{\rho}\right)d\rho'-\partial_\rho\left(B(1)\frac{f_\rho(\rho)K^3\left(\frac{1}{\rho}\right)}{\rho}-B(1-a)\frac{f_\rho(\rho)K^3\left(\frac{1-a}{\rho}\right)}{\rho}\right)\\
&+\frac{\partial^2_\rho f(\rho)}{\rho}\int_{1-a}^1 B'(\rho') K^3\left(\frac{\rho'}{\rho}\right)+\frac{f_\rho(\rho)}{\rho}P.V.\int_{1-a}^1 B'(\rho')\partial_{\rho'}\left(\rho'K^3\left(\frac{\rho'}{\rho}\right)\right)d\rho'.
\end{align*}
Finally we take a derivative of the equation \rho_{\varpirepsilonsilonilon}f{luego2}. We have that
\betagin{align*}
&\partial^{3}_\rho \tilde{T}^{3*}=\partial^4_\rho f(\rho)\int_{\frac{1-a}{\rho}}^\frac{1}{\rho} B(\rho s)K^3(s)\frac{ds}{s} +\frac{\partial^3_\rho f(\rho)}{\rho}\left(-B(1)K^{3}\left(\frac{1}{\rho}\right)+B(1-a)K^{3}\left(\frac{1-a}{\rho}\right)\right)\\&+\partial^3_\rho f(\rho)\int_\frac{1-a}{\rho}^\frac{1}{\rho}B'(\rho s) K^3(s)ds\\
&+\partial_\rho\left(\frac{\partial^2_\rho f(\rho)}{\rho}\left(-B(1)K^3\left(\frac{1}{\rho}\right)+B(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\right)\\
&+2\partial^3_\rho f(\rho)\int_\frac{1-a}{\rho}^\frac{1}{\rho}B'(\rho s)K^3(s)ds +2\frac{\partial^2_\rho f(\rho)}{\rho^2}\left(-B'(1)K^3\left(\frac{1}{\rho}\right)+(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\\
&+2\partial^2_\rho f(\rho)\int_\frac{1-a}{\rho}^\frac{1}{\rho}B''(\rho s)K^3(s)s ds -\partial^2_\rho\left(B(1)\frac{f_\rho(\rho)K^3\left(\frac{1}{\rho}\right)}{\rho}-B(1-a)\frac{f_\rho(\rho)K^3\left(\frac{1-a}{\rho}\right)}{\rho}\right)\\
&+\partial^2_\rho f(\rho)\int_\frac{1-a}{\rho}^\frac{1}{\rho}K^3(s)B''(\rho s)s ds-f_\rho(\rho)\left(-\frac{1}{\rho^3}B''(1)K^3\left(\frac{1}{\rho}\right)+\frac{(1-a)^2}{\rho^3}B''(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\\
&f_\rho(\rho)\int_\frac{1-a}{\rho}^\frac{1}{\rho}K^3(s)B'''(\rho s)s^2 ds.
\end{align*}
And a change of variable and an integration by parts yield
\betagin{align*}
&\partial^3_\rho \tilde{T}^{3*}(s)=\partial^4_\rho f(\rho)\int_{1-a}^1B(\rho')K^{3}\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho'}+\frac{\partial^3_\rho f(\rho)}{\rho}\left(-B(1)K^{3}\left(\frac{1}{\rho}\right)+B(1-a)K^{3}\left(\frac{1-a}{\rho}\right)\right)\\&+\partial^3_\rho f(\rho)\int_{1-a}^1B'(\rho')K^{3}\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho}\\
&+\partial_\rho\left(\frac{\partial^2_\rho f(\rho)}{\rho}\left(-B(1)K^3\left(\frac{1}{\rho}\right)+B(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\right)\\
&+2\partial^3_\rho f(\rho)\int_{1-a}^1B''(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{d\rho'}{\rho}++2\frac{\partial^2_\rho f(\rho)}{\rho^2}\left(-B'(1)K^3\left(\frac{1}{\rho}\right)+(1-a)K^3\left(\frac{1-a}{\rho}\right)\right)\\
&+2\partial^2_\rho f(\rho)\int_{1-a}^1B''(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{\rho'}{\rho}\frac{d\rho'}{\rho}
-\partial^2_\rho\left(B(1)\frac{f_\rho(\rho)K^3\left(\frac{1}{\rho}\right)}{\rho}-B(1-a)\frac{f_\rho(\rho)K^3\left(\frac{1-a}{\rho}\right)}{\rho}\right)\\
&\partial_\rho^2f(\rho)\int_{1-a}^1 B''(\rho')K^3\left(\frac{\rho'}{\rho}\right)\frac{\rho'}{\rho}\frac{d\rho'}{\rho}-f_\rho(\rho)P.V.\int_{1-a}^1 \partial_{\rho'}\left(\rho'^2K^3\left(\frac{\rho'}{\rho}\right)\right)\frac{d\rho'}{\rho}.
\end{align*}
\end{proof}
\betagin{lemma}\Lambdabel{Tstarcompacidad} The operator $\tilde{T}^{3*}$ is bounded from $H^{k}$ to $H^{k+1}$, for $k=0,1,2$, with norm depending only on $||f||_{C^4}$ and $a$.
\end{lemma}
\betagin{proof}
In order to prove this lemma we prove the estimates for smooth functions. This can be done by using Lemma \rho_{\varpirepsilonsilonilon}f{Tstarderivadas}. Indeed, in order to control the evaluations of $B$ and its derivatives we use the standard Sobolev embedding $H^k\subset C^{l}$, for $k>l+ \frac{1}{2}$ in one dimension ($H^3\subset C^2$). In addition we use Lemmas \rho_{\varpirepsilonsilonilon}f{log+c1}, \rho_{\varpirepsilonsilonilon}f{c2}, together with the $L^2$-boundedness of the Hilbert transform to control the terms involving the derivative of the kernel $K^3$. Finally we proceed by a density argument.
\end{proof}
Finally, we finish this section by studying the regularity of $\tilde{I}(\rho)$.
\betagin{lemma}\Lambdabel{regularidadI}
Let $f \in C^{4}(\mathbb{R}^{+})$. Then, $\tilde{I}(\rho)\in C^{3}(1-a,1)$.
\end{lemma}
\betagin{proof}
The proof follows easily by the fact that $f \in C^{4}$ and Lemma \rho_{\varpirepsilonsilonilon}f{log+c1}.
\end{proof}
\item \thetaxtbf{Existence of a solution for the equation \eqref{simplificada}.}
We will regard our operator $\mathbb{T}heta^3$ as a perturbation of its symmetric part $\mathbb{T}heta_S^3$ (which we can do by taking $a$ and $\betata$ small enough). Then we estimate the first and second eigenvalues (and the first eigenfunction) and see that there is a gap. If the antisymmetric perturbation is small enough, then there is still a gap and we are good. The strategy is to use a computer-assisted proof for the estimation of the eigenvalue-eigenvector pair and the norms of the different operators that appear.
Let $(B_{sj}, \Lambdambda^*)$ be the approximate eigenvector-eigenvalue pair for the symmetric operator $\mathbb{T}heta^3_{S}$, satisfying
\betagin{align}\Lambdabel{casi}
\mathbb{T}heta_{S}^3 B_{sj} = \Lambdambda^{*} B_{sj} + e,
\end{align}
where $e$ is small (see the next lemma for an explicit bound on $e$).
\betagin{lemma}\Lambdabel{lemacotae}
Let $B_{sj} = \frac{1}{\sqrt{a-a\betata}}{1}_{[1-a+\frac{a\betata}{2},1-\frac{a\betata}{2}]}$ and $\Lambdambda^{*} = 0.3482$. Then:
\betagin{align*}
\|e\|_{L^{2}} < 0.0905 \\
|\Lambdangle e, B_{sj} \rightarrowngle | < 0.0101
\end{align*}
\end{lemma}
\betagin{proof}
The proof is computer-assisted and the code can be found in the supplementary material. We refer to the appendix for details about the implementation.
\end{proof}
The symbol $B_{sj}^\perp$ will denote the orthogonal space to $B_{sj}$ in $L^2((1-a,1))$, i.e.
$$B_{sj}^\perp\equiv \{ v\in L^2((1-a,1)),\,\:\, \Lambdangle B_{sj}, v\rightarrowngle_{L^2((1-a,1))}=0\}.$$
Let us find a solution $(B^{3},\Lambdambda)$ such that
\betagin{align}\Lambdabel{eqnB3}
\mathbb{T}heta^3 B^{3} = \Lambdambda B^{3}, \quad B= B_{sj} + v, \quad v \in B_{sj}^{\perp}.
\end{align}
We expect $v$ to be small if $(B_{sj},\Lambdambda^{*})$ is an accurate enough approximation to the true eigenpair. Plugging this ansatz for $B^{3}$ into the previous equation and using the equation \eqref{casi}, we obtain
\betagin{align*}
\mathbb{T}heta^3 v & = \mathbb{T}heta^3 B^{3} - \mathbb{T}heta^3 B_{sj} \\
& = \Lambdambda B_{sj} + \Lambdambda v - \Lambdambda^{*} B_{sj} - e - \mathbb{T}heta^3_{A} B_{sj} \\
& = (\Lambdambda - \Lambdambda^{*})B_{sj} + \Lambdambda v - e - \mathbb{T}heta^3_{A} B_{sj},
\end{align*}
where $\mathbb{T}heta^{3}_{A} = \mathbb{T}heta^{3} - \mathbb{T}heta^{3}_{S}$ is the antisymmetric part of $\mathbb{T}heta^{3}$. From now on, the pairing $\Lambdangle \cdot, \cdot \rightarrowngle $ is assumed to be taken in $L^2((1-a,1))$. Let us consider the functional equation
\betagin{align*}
\Lambdangle \mathbb{T}heta^3 v - \Lambdambda v, u\rightarrowngle = - \Lambdangle e + \mathbb{T}heta^3_{A} B_{sj}, u \rightarrowngle \quad \forall u \in B_{sj}^{\perp},
\end{align*}
and let $c^{*}$ be defined by
\betagin{align*}
c^{*} = \inf_{v \in B_{sj}^{\perp}} \frac{\Lambdangle \mathbb{T}heta^3 v, v\rightarrowngle }{\|v\|_{L^{2}}^{2}}.
\end{align*}
A rigorous bound for $c^{*}$ will be given in the next lemma:
\betagin{lemma} \Lambdabel{lemacotascestrella}
Let $B_{sj} = \frac{1}{\sqrt{a-a\betata}}{1}_{[1-a+\frac{a\betata}{2},1-\frac{a\betata}{2}]}$. Then:
\betagin{align*}
c^{*} \gammaeq 0.8526
\end{align*}
\end{lemma}
\betagin{proof}
The proof is computer-assisted and the code can be found in the supplementary material. We refer to the appendix for details about the implementation.
\end{proof}
Then, if $\Lambdambda < c^{*}$ the operator $\mathbb{T}heta^3 - \Lambdambda$ is coercive in $B_{sj}^{\perp}$. By Lax-Milgram, for every $\Lambdambda < c^{*}$ there exists a function $v^{\Lambdambda} \in B_{sj}^{\perp}$ such that
\betagin{align}\Lambdabel{lm}
\Lambdangle \mathbb{T}heta^3 v^{\Lambdambda} - \Lambdambda v^{\Lambdambda}, u\rightarrowngle = - \Lambdangle e + \mathbb{T}heta^3_{A} B_{sj}, u \rightarrowngle \quad \forall u \in B_{sj}^{\perp}.
\end{align}
Thus, there exists a function $d(\Lambdambda)$ such that
\betagin{align}
\Lambdabel{ecuacionalphadelambda}
\mathbb{T}heta^{3} v^{\Lambdambda} & = (\Lambdambda - \Lambdambda^{*})B_{sj} + \Lambdambda v^{\Lambdambda} - e - \mathbb{T}heta^{3}_{A} B_{sj} + d(\Lambdambda) B_{sj}.
\end{align}
By computing the scalar product of \eqref{ecuacionalphadelambda} with $v^{\Lambdambda}$ we obtain
\betagin{align*}
(c^{*} - \Lambdambda) \|v^{\Lambdambda}\|_{L^{2}}^{2} \leq \Lambdangle (\mathbb{T}heta^3 - \Lambdambda) v^{\Lambdambda}, v^{\Lambdambda} \rightarrowngle = - \Lambdangle \mathbb{T}heta^3_{A} B_{sj}, v^{\Lambdambda} \rightarrowngle - \Lambdangle e, v^{\Lambdambda} \rightarrowngle
\leq |\Lambdangle \mathbb{T}heta^3_{A} B_{sj} + e, v^{\Lambdambda}\rightarrowngle|
\leq \|\mathbb{T}heta^3_{A} B_{sj} + e\|_{L^{2}} \|v^{\Lambdambda}\|_{L^{2}}.
\end{align*}
This inequality implies
\betagin{align}\Lambdabel{cotav}
\|v^{\Lambdambda}\|_{L^{2}} \leq \frac{\|\mathbb{T}heta^3_{A} B_{sj} + e\|_{L^{2}}}{c^{*} - \Lambdambda}.
\end{align}
In addition, using that
\betagin{align*}
\Lambdangle \mathbb{T}heta^3 v^{\Lambdambda}, B_{sj} \rightarrowngle & = \Lambdangle \mathbb{T}heta^3_{S} v^{\Lambdambda}, B_{sj} \rightarrowngle + \Lambdangle \mathbb{T}heta^3_{A} v^{\Lambdambda}, B_{sj} \rightarrowngle = \Lambdangle v^{\Lambdambda}, \mathbb{T}heta^3_{S} B_{sj} \rightarrowngle - \Lambdangle v^{\Lambdambda}, \mathbb{T}heta^3_{A} B_{sj} \rightarrowngle = \Lambdangle v^{\Lambdambda},e \rightarrowngle - \Lambdangle v^{\Lambdambda}, \mathbb{T}heta^3_{A} B_{sj} \rightarrowngle,
\end{align*}
taking the scalar product with $B_{sj}$ of the equation \eqref{ecuacionalphadelambda} yields
\betagin{align*}
d(\Lambdambda) \|B_{sj}\|_{L^{2}}^{2} & = \Lambdangle \mathbb{T}heta^3 v^{\Lambdambda}, B_{sj} \rightarrowngle + (\Lambdambda^{*} - \Lambdambda) \|B_{sj}\|_{L^{2}}^{2} + \Lambdangle e, B_{sj} \rightarrowngle \\
& = \Lambdangle v^{\Lambdambda},e \rightarrowngle - \Lambdangle v^{\Lambdambda}, \mathbb{T}heta^3_{A} B_{sj} \rightarrowngle + (\Lambdambda^{*} - \Lambdambda) \|B_{sj}\|_{L^{2}}^{2} + \Lambdangle e, B_{sj} \rightarrowngle.
\end{align*}
An important fact, that will be used, is the continuity of this function $d(\Lambdambda)$:
\betagin{lemma}\Lambdabel{ddl}The function $d(\Lambdambda)$ defined in \eqref{ecuacionalphadelambda}
is continuous.
\end{lemma}
\betagin{proof}
Let $\Lambdambda,\gammaamma <c^*$. By definition of $d(\Lambdambda)$ we have that
\betagin{align*}
|d(\Lambdambda)-d(\gammaamma)|\leq C\left(||B_{sj}||_{L^2},||e||_{L^2},||\mathbb{T}heta^3||_{L^2\to L^2}\right)||v^\Lambdambda-v^\gammaamma||_{L^2}+||B_{sj}||_{L^2}^2|\Lambdambda-\gammaamma|.
\end{align*}
For every $u\in B^\perp_{sj}$ we know from \eqref{lm} that
\betagin{align*}
\Lambdangle \mathbb{T}heta^3 v^{\Lambdambda}-\Lambdambda v^{\Lambdambda}, u\rightarrowngle = & \Lambdangle f, u \rightarrowngle\\
\Lambdangle \mathbb{T}heta^3 v^{\gammaamma}-\gammaamma v^{\gammaamma}, u\rightarrowngle = & \Lambdangle f, u \rightarrowngle,
\end{align*}
where $f\in L^2$. And then
\betagin{align*}
\Lambdangle \mathbb{T}heta^{3}(v^\Lambdambda-v^\gammaamma)-\Lambdambda (v^\Lambdambda-v^\gammaamma),u\rightarrowngle=(\gammaamma-\Lambdambda)\Lambdangle v^\gammaamma, u \rightarrowngle.
\end{align*}
Taking $u=v^\Lambdambda-v^\gammaamma\in B^\perp_{sj}$ yields
\betagin{align*}
(c^*-\Lambdambda)||v^\Lambdambda-v^\gammaamma||_{L^2}^2\leq |\gammaamma-\Lambdambda|\,||v^\gammaamma||_{L^{2}}\,||v^\Lambdambda-v^\gammaamma||_{L^2}.
\end{align*}
Thus
\betagin{align*}
||v^\Lambdambda-v^\gammaamma||_{L^2}\leq \frac{C}{c^*-\Lambdambda}|\gammaamma-\Lambdambda|\,||v^\gammaamma||_{L^2}.
\end{align*}
We achieve the conclusion of the lemma from the inequality \eqref{cotav}.
\end{proof}
Our purpose is now to show that there exists a value of $\Lambdambda =\Lambdambda^{**} < c^{*}$ such that $d(\Lambdambda^{**})=0$. We have the following upper bound for $d(\Lambdambda)$:
\betagin{align}\Lambdabel{ddlambda}
d(\Lambdambda) & \gammaeq -\frac{\|v^{\Lambdambda}\|_{L^{2}} \|e - \mathbb{T}heta^3_{A} B_{sj}\|_{L^{2}}}{\|B_{sj}\|_{L^{2}}^{2}} + (\Lambdambda^{*} - \Lambdambda) - \frac{|\Lambdangle e, B_{sj} \rightarrowngle|}{\|B_{sj}\|_{L^{2}}^{2}} \\
& \gammaeq -\frac{\|\mathbb{T}heta^3_{A} B_{sj} + e\|_{L^{2}}}{c^{*} - \Lambdambda}\frac{\|e - \mathbb{T}heta^3_{A} B_{sj}\|_{L^{2}}}{\|B_{sj}\|_{L^{2}}^{2}} + (\Lambdambda^{*} - \Lambdambda) - \frac{|\Lambdangle e, B_{sj} \rightarrowngle|}{\|B_{sj}\|_{L^{2}}^{2}}\nonumber.
\end{align}
We observe that $\lim_{\Lambdambda \to -\infty}d(\Lambdambda) = +\infty$. Hence, there exists a $\Lambdambda$ sufficiently small for which $d(\Lambdambda) > 0$. Similarly:
\betagin{align}\Lambdabel{cotad}
d(\Lambdambda) & \leq \frac{\|\mathbb{T}heta^3_{A} B_{sj} + e\|_{L^{2}}}{c^{*} - \Lambdambda}\frac{\|e - \mathbb{T}heta^3_{A} B_{sj}\|_{L^{2}}}{\|B_{sj}\|_{L^{2}}^{2}} + (\Lambdambda^{*} - \Lambdambda) + \frac{|\Lambdangle e, B_{sj} \rightarrowngle|}{\|B_{sj}\|_{L^{2}}^{2}}
\end{align}
The objective is to use the previous inequality to show that there exist a value of $\Lambdambda$ smaller than $c^*$, such that $d(\Lambdambda)<0$. We will use the following lemma.
\betagin{lemma}
\Lambdabel{lemavertice}
Let $h(x) = \mathcal{A} - x + \frac{\mathcal{B}}{\mathcal{C}-x}$, where $\mathcal{A},\mathcal{B},\mathcal{C} > 0$. If
\betagin{align*}
\mathcal{C} > \mathcal{A} + 2\sqrt{\mathcal{B}},
\end{align*}
then there exists an $x < \mathcal{C}$ such that $h(x) < 0$.
\end{lemma}
\betagin{proof}
It is easy to notice that $h(x)$ is concave up since $h''(x) > 0$ for $x < \mathcal{C}$. Thus, it is enough to check that the minimum of $h$ is negative. We calculate the point $x_{m}$ where the minimum is attained.
\betagin{align*}
h'(x_{m}) = 0 \Leftrightarrow (\mathcal{C}-x_{m})^{2} = \mathcal{B} \Leftrightarrow x_{m} = \mathcal{C} - \sqrt{\mathcal{B}}.
\end{align*}
Evaluating at $x = x_{m}$:
\betagin{align*}
h(x_{m}) = \mathcal{A} - \mathcal{C} + 2\sqrt{\mathcal{B}} < 0,
\end{align*}
by hypothesis.
\end{proof}
\betagin{lemma} \Lambdabel{lemacotasABC}
Let $B_{sj} = \frac{1}{\sqrt{a-a\betata}}{1}_{[1-a+\frac{a\betata}{2},1-\frac{a\betata}{2}]}$, $\Lambdambda^{*} = 0.3482$ and let us define:
\betagin{align*}
\mathcal{A} = \Lambdambda^{*} + \frac{|\Lambdangle e, B_{sj} \rightarrowngle|}{\|B_{sj}\|_{L^{2}}^{2}}, \quad \mathcal{B} = \frac{\|\mathbb{T}heta^3_{A} B_{sj} + e\|_{L^{2}}\|e - \mathbb{T}heta^{3}_{A} B_{sj}\|_{L^{2}}}{\|B_{sj}\|_{L^{2}}^{2}}
\end{align*}
Then, we have the following bounds:
\betagin{align*}
\mathcal{A} < 0.3583 \\
\sqrt{\mathcal{B}} < 0.1534 \\
\end{align*}
\end{lemma}
\betagin{proof}
The proof is computer-assisted and the code can be found in the supplementary material. We refer to the appendix for details about the implementation.
\end{proof}
Finally, by applying Lemma \rho_{\varpirepsilonsilonilon}f{lemavertice} to the right hand side of \eqref{cotad} with
\betagin{align*}
\mathcal{A} = \Lambdambda^{*} + \frac{|\Lambdangle e, B_{sj} \rightarrowngle|}{\|B_{sj}\|_{L^{2}}^{2}}, \quad \mathcal{B} = \frac{\|\mathbb{T}heta^3_{A} B_{sj} + e\|_{L^{2}}\|e - \mathbb{T}heta^{3}_{A} B_{sj}\|_{L^{2}}}{\|B_{sj}\|_{L^{2}}^{2}}, \quad \mathcal{C} = c^{*}
\end{align*}
and by the bounds given by Lemma \rho_{\varpirepsilonsilonilon}f{lemacotasABC}, we obtain that there exists a $\Lambdambda_0$ for which $d(\Lambdambda_0) < 0$. More precisely, we have that
\betagin{align*}
\Lambdambda_{0} & \leq \frac{\mathcal{C}+\mathcal{A}}{2} - \sqrt{\left(\frac{\mathcal{C}-\mathcal{A}}{2}\right)^{2} - \mathcal{B}} = \frac{\mathcal{A}\mathcal{C}+\mathcal{B}}{\frac{\mathcal{A}+\mathcal{C}}{2} + \sqrt{\left(\frac{\mathcal{A}-\mathcal{C}}{2}\right)^{2} - \mathcal{B}}} \\
& = \frac{c^{*} + \Lambdambda^{*} + \frac{|\Lambdangle e, B_{sj} \rightarrowngle|}{\|B_{sj}\|_{L^{2}}^{2}}}{2} - \sqrt{\left(\frac{c^{*} - \Lambdambda^{*} - \frac{|\Lambdangle e, B_{sj} \rightarrowngle|}{\|B_{sj}\|_{L^{2}}^{2}}}{2}\right)^{2}-\frac{\|\mathbb{T}heta^3_{A} B_{sj} + e\|_{L^{2}}\|e - \mathbb{T}heta^{3}_{A} B_{sj}\|_{L^{2}}}{\|B_{sj}\|_{L^{2}}^{2}}}
\end{align*}
By the continuity of $d(\Lambdambda)$ proved in Lemma \rho_{\varpirepsilonsilonilon}f{ddl}, there has to be a $\Lambdambda^{**} \leq \Lambdambda_{0} < c^{*}$
for which $d(\Lambdambda^{**}) = 0$ and therefore
\betagin{align*}
\mathbb{T}heta v^{\Lambdambda^{**}} & = (\Lambdambda^{**} - \Lambdambda^{*})B_{sj} + \Lambdambda^{**} v^{\Lambdambda^{**}} - e - \mathbb{T}heta_{A} B_{sj}.
\end{align*}
which means that $( B_{sj} + v^{\Lambdambda^{**}}, \Lambdambda^{**})$ is the eigenvalue-eigenvector pair, $(B^3,\Lambdambda_3)\in L^2((1-a,1))\times \mathbb{R}$, we were looking for.
\betagin{corollary}
\Lambdabel{corollarylambda0}
We have just shown that $\Lambdambda_3 \leq \Lambdambda_{0} < 0.4117$.
\end{corollary}
\betagin{proof}
Follows from the numerical bounds obtained in Lemma \rho_{\varpirepsilonsilonilon}f{lemacotasABC} and Lemma \rho_{\varpirepsilonsilonilon}f{lemavertice}.
\end{proof}
Next we show that $B^3$ is unique modulo multiplication by constants. In order to prove it, let us assume that $w\in L^2((1-a,1))$ satisfies
\betagin{align*}
\mathbb{T}heta^3 w=\Lambdambda_3 w.
\end{align*}
Now we write $w=B_{sj}+(w-B_{sj})$ and we notice we can decompose $w-B_{sj}=\alphapha B_{sj}+v$ where $\alphapha\in \mathbb{R}$ and $v\in B_{sj}^\perp$. Then $w=(1+\alphapha)B_{sj}+ v$ and by linearity we have that $\frac{w}{1+\alphapha}=B_{sj}+ u$, with $u=\frac{v}{1+\alphapha}\in B_{sj}^\perp$, is also a solution. Then the uniqueness of the solutions in the Lax-Milgram theorem implies $\frac{w}{1+\alphapha}=B^3$. If $\alphapha=-1$ the previous argument fails. But, in this case, $w\in B_{sj}^\perp$ and then $c^*\leq \Lambdambda_3$ which has already been proven to be false.
In conclusion, we have shown that dim($\mathcal{N}(\mathbb{T}heta^{3} - \Lambdambda^{3})) = 1$.
\betagin{lemma}
\Lambdabel{lemmaImayorquelambda}
We have that $\tilde{I}(\rho)-\Lambdambda_3 > 0$. In particular, we have the following bounds:
\betagin{align*}
\tilde{I}(\rho)-\Lambdambda_3 > 0.8526
\end{align*}
\end{lemma}
\betagin{proof}
The proof is computer-assisted and the code can be found in the supplementary material. We refer to the appendix for details about the implementation.
\end{proof}
Then it remains to prove the regularity of $B^3$, the solution of equation \eqref{eqnB3}. To do this we will bootstrap using Lemma \rho_{\varpirepsilonsilonilon}f{lemacompacidad}. Since
\betagin{align}\Lambdabel{lades}
\tilde{I}(\rho)B^3(\rho)-\Lambdambda_3 B^3(\rho)=-\tilde{T}^3B^3(\rho)
\end{align}
by Lemma \rho_{\varpirepsilonsilonilon}f{lemacompacidad} the function $(\tilde{I}(\rho)-\Lambdambda_3) B^3(\rho)\in H^1((1-a,1))$. Since $\tilde{I}\in C^3$ and Lemma \rho_{\varpirepsilonsilonilon}f{lemmaImayorquelambda}
we have that $B^3(\rho)$ is in $H^1$. Let's take two derivatives on equation \eqref{lades}, by Lemma \rho_{\varpirepsilonsilonilon}f{Tderivadas} we have that
\betagin{align*}
\tilde{I}(\rho)\partial^{2}_\rho B^3(\rho)-\Lambdambda_3\partial^{2}_\rho B^3(\rho)=&-\sum_{j=1}^2\partial_\rho^{j)} \tilde{I}(\rho)\partial^{2-j)}_\rho B^3(\rho)-\partial_\rho T^{3}_1\partial_\rho B^3(\rho)\\&-\partial_\rho\left(\frac{1}{\rho}\int_{-\infty}^\infty \partial^{2}_\rho f(\rho') B(\rho')\frac{\rho'}{\rho}K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'\right)
\end{align*}
Then using that $\tilde{I}(\rho)\in C^3$, $f\in C^4$, $\tilde{I}-\Lambdambda_3>0$, Lemma \rho_{\varpirepsilonsilonilon}f{lemacompacidad} and $B^3\in H^1$ we have that $B^3\in H^2$. Finally, taking three derivatives yields
\betagin{align*}
\tilde{I}(\rho)\partial^{3}_\rho B^3(\rho)-\Lambdambda_3\partial^{3}_\rho B^3(\rho)=&-\sum_{j=1}^3\partial_\rho^{j)} \tilde{I}(\rho)\partial^{3-j)}_\rho B^3(\rho)-\partial_\rho T^{3}_2\partial^2_\rho B^3(\rho)\\&-\partial_\rho\left(\sum_{j=1}^2\frac{1}{\rho}\int_{-\infty}^\infty \partial^{j+1)}_\rho f(\rho') \partial^{2-j)}_\rho B(\rho')\left(\frac{\rho'}{\rho}\right)^2 K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'\right)
\end{align*}
Then using, again, that $\tilde{I}(\rho)\in C^3$, $f\in C^4$, $\tilde{I}-\Lambdambda_3>0$, Lemma \rho_{\varpirepsilonsilonilon}f{lemacompacidad} and $B^3\in H^2$ we have that $B^3\in H^3.$
\end{enumerate}
This concludes the proof of Proposition \rho_{\varpirepsilonsilonilon}f{btres}.
\end{proof}
A similar proof as in Proposition \rho_{\varpirepsilonsilonilon}f{btres} also works to show:
\betagin{prop} \Lambdabel{btresadj} There exists a solution $(B^3_*,\Lambdambda_3)\in H^3((1-a),1)\times \mathbb{R}$ to the equation
\betagin{align*}
\mathbb{T}heta^{3*}B^3_*=\Lambdambda_3B^3_*,
\end{align*}
where $\Lambdambda_3$ is the same eigenvalue as in Proposition \rho_{\varpirepsilonsilonilon}f{btres} and $\mathbb{T}heta^{3*}$ is the adjoint operator of $\mathbb{T}heta^{3}$.
In addition, $\Lambdambda_3$ is simple and we can decompose $$B_*^3=B_{sj}+ v_*^{\Lambdambda_3},$$ where $v_*^{\Lambdambda_3}\in \left(B_{sj}\right)^\perp$
and
\betagin{align*}
||v_*^{\Lambdambda_3}||_{L^2}\leq \frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^{2}}}{c^*-\Lambdambda_3}
\end{align*}
\end{prop}
\betagin{proof} The proof that there exists a pair $(B^3_*,\Lambdambda_3^*)\in L^2\times \mathbb{R}$ satisfying
\betagin{align*}
(\mathbb{T}heta^{3*}-\Lambdambda_3^*)B^3_*=\Lambdambda_3^*B^3_*
\end{align*}
runs the same steps than the proof of Proposition \rho_{\varpirepsilonsilonilon}f{btres}. The only modification is the change $\mathbb{T}heta^3_A\to -\mathbb{T}heta^3_A$. In order to check that, in fact, $\Lambdambda_3^*=\Lambdambda_3$ we notice that
\betagin{align*}
\Lambdambda_3\Lambdangle B^3,B^3_*\rightarrowngle=\Lambdangle \mathbb{T}heta^3 B^3, B^3_*\rightarrowngle =\Lambdangle B^3, \mathbb{T}heta^{3*}B^*_3\rightarrowngle=\Lambdambda_3^* \Lambdangle B^3,B^3_*\rightarrowngle.
\end{align*}
Therefore it is enough to show that $\Lambdangle B^3,B^3_*\rightarrowngle \neq 0$. We prove this result in the following lemma:
\betagin{lemma}\Lambdabel{BBstar}The following inequality holds:
\betagin{align*}\Lambdangle B^3,B^3_*\rightarrowngle >0.\end{align*}
\end{lemma}
\betagin{proof}
We can decompose $B^3$ and $B^3_*$ in the following way
\betagin{align*}
B^3= & B_{sj}+v^{\Lambdambda_3}\\
B^3_*= & B_{sj}+v^{\Lambdambda_3}_*
\end{align*}
where $v^{\Lambdambda_3},\, v^{\Lambdambda_3}_*\in B_{sj}^\perp$. Thus
\betagin{align}\Lambdabel{eqB3B3star}\Lambdangle B^3,B^3_* \rightarrowngle = ||B_{sj}||_{L^2}^2+\Lambdangle v^{\Lambdambda_3}, v^{\Lambdambda_3}_*\rightarrowngle\gammaeq ||B_{sj}||_{L^2}^2-\frac{||-\mathbb{T}heta_A^3B_{sj}+e||_{L^2}||\mathbb{T}heta_A^3B_{sj}+e||_{L^2}}{(c^*-\Lambdambda_3)^2}.\end{align}
Using the notation from Lemma \rho_{\varpirepsilonsilonilon}f{lemavertice}, we can bound
\betagin{align*}
c^{*} - \Lambdambda_{3} \gammaeq c^{*} - \Lambdambda_{0} = \frac{C-A}{2} + \sqrt{\left(\frac{C-A}{2}\right)^{2} - B}.
\end{align*}
This implies that the RHS of \eqref{eqB3B3star} is bounded below by
\betagin{align*}
||B_{sj}||_{L^2}^2\left(1 - \frac{B}{\left(\frac{C-A}{2} + \sqrt{\left(\frac{C-A}{2}\right)^{2} - B}\right)^{2}}\right) > 0,
\end{align*}
where in the last inequality we have used the already checked condition that
\betagin{align*}
\frac{C-A}{2} > \sqrt{B}.
\end{align*}
\end{proof}
Finally we prove that $B^3_*$ is in $H^3$ in the same way we did for $B^3$ by using Lemma \rho_{\varpirepsilonsilonilon}f{Tstarcompacidad}.
\end{proof}
\subsubsection{One-dimensionality of the kernel of $\partial_r F[\rho,\Lambdambda_3]$}\Lambdabel{uniqueness}
Until now, it has already been proven that there exists an element in the kernel of the operator $\partial_r F[\rho,\Lambdambda_3]$. In this section we will prove that this kernel is the span of this element. As commented in point 3 at the beginning of the proof of Proposition \rho_{\varpirepsilonsilonilon}f{kernel} it is enough to prove that the equation
$$\mathbb{T}heta^{3n} u=\Lambdambda_3u$$
implies that $u=0$ for $n>1$.
\betagin{lemma}\Lambdabel{cotalambda1}
Let $m>1$ and
\betagin{align*}\Lambdambda_{3m}^{s}=\inf_{\substack{u\in L^2 \\ ||u||_{L^2}=1}} \Lambdangle \mathbb{T}heta^{3m}u,u\rightarrowngle\end{align*}
Then, if the pair $(u_{3m},\Lambdambda_{3m})\in L^2 \times \mathbb{R}$ (with $u_{3m}$ not identically zero) satisfies
\betagin{align*}
\mathbb{T}heta^{3m}u_{3m}=\Lambdambda_{3m},
\end{align*}
we have that
\betagin{align*}
\Lambdambda_{3m}^{s} \leq \Lambdambda_{3m}
\end{align*}
\end{lemma}
\betagin{proof}
Since we can take $u_{3m}$ with norm 1
we have that
\betagin{align*}
\Lambdambda_{3m} = \Lambdangle \mathbb{T}heta^{3m} u_{3m}, u_{3m} \rightarrowngle \gammaeq \Lambdambda_{3m}^{s},
\end{align*}
by definition of $\Lambdambda_{3m}^{s}$.
\end{proof}
\betagin{lemma}\Lambdabel{mn}
Let $m > n$ and let $\Lambdambda_{3m}^{s}, \Lambdambda_{3n}^{s}$ be given by
\betagin{align*}
\Lambdambda_{3m}^{s}=\inf_{\substack{u\in L^2 \\ ||u||_{L^2}=1}} \Lambdangle \mathbb{T}heta^{3m} u,u\rightarrowngle\quad \thetaxt{and}\quad \Lambdambda_{3n}^{s}=\inf_{\substack{u\in L^2 \\ ||u||_{L^2}=1}} \Lambdangle \mathbb{T}heta^{3n}u,u\rightarrowngle .
\end{align*}
Then
\betagin{align*}
\Lambdambda_{3m}^{s} \gammaeq \Lambdambda_{3n}^{s}
\end{align*}
\end{lemma}
\betagin{proof}
First we will show that for every $m\gammaeq 1$ and $u\in L^2$ the following inequality holds $$\Lambdangle \mathbb{T}heta^{3m}u,u\rightarrowngle \gammaeq \Lambdangle\mathbb{T}heta^{3m}|u|,|u|\rightarrowngle.$$
By using the positivity of $T^{3m}(\rho,\rho')$ proved in Lemma \rho_{\varpirepsilonsilonilon}f{positivo} we have that
\betagin{align*}
\Lambdangle \mathbb{T}heta^{3m}|u|,|u| \rightarrowngle & = \int \tilde{I}(\rho)|u(\rho)|^{2} d\rho +\int \int \frac{1}{\rho}\left(\frac{\rho'}{\rho}\right)T^{3m}(\rho,\rho')f_\rho(\rho')|u(\rho)||u(\rho')| d\rho d\rho' \\
& = \int I(\rho)|u(\rho)|^{2} d\rho + \int \int \frac{1}{\rho}\left(\frac{\rho'}{\rho}\right)T^{3m}(\rho,\rho')f_\rho(\rho')u^{+}(\rho)u^{+}(\rho') d\rho d\rho'\\ & + \int \int \frac{1}{\rho}\left(\frac{\rho'}{\rho}\right)T^{3m}(\rho,\rho')f_\rho(\rho')u^{-}(\rho)u^{-}(\rho') d\rho d\rho' \\
& + \int \int \frac{1}{\rho}\left(\frac{\rho'}{\rho}\right)T^{3m}(\rho,\rho')f_\rho(\rho')u^{+}(\rho)u^{-}(\rho') d\rho d\rho'\\& + \int \int \frac{1}{\rho}\left(\frac{\rho'}{\rho}\right)T^{3m}(\rho,\rho')f_\rho(\rho')u^{-}(\rho)u^{+}(\rho') d\rho d\rho' \\
& \leq \int I(\rho)|u(\rho)|^{2} d\rho + \int \int \frac{1}{\rho}\left(\frac{\rho'}{\rho}\right)T^{3m}(\rho,\rho')f_\rho(\rho')u^{+}(\rho)u^{+}(\rho') d\rho d\rho' \\ & + \int \int \frac{1}{\rho}\left(\frac{\rho'}{\rho}\right)T^{3m}(\rho,\rho')f_\rho(\rho')u^{-}(\rho)u^{-}(\rho') d\rho d\rho' \\
& - \int \int\frac{1}{\rho}\left(\frac{\rho'}{\rho}\right) T^{3m}(\rho,\rho')f_\rho(\rho')u^{+}(\rho)u^{-}(\rho') d\rho d\rho' \\ &- \int \int \frac{1}{\rho}\left(\frac{\rho'}{\rho}\right)T^{3m}(\rho,\rho')f_\rho(\rho')u^{-}(\rho)u^{+}(\rho') d\rho d\rho' \\
& = \int I(\rho)u(\rho)^{2} d\rho + \int \int \frac{1}{\rho}\left(\frac{\rho'}{\rho}\right)T^{3m}(\rho,\rho')f_\rho(\rho')u(\rho)u(\rho') d\rho d\rho' \\
& = \Lambdangle \mathbb{T}heta^{3m}u, u \rightarrowngle
\end{align*}
Then $$\inf_{u\in L^2\atop ||u||_{L^2}=1}\Lambdangle \mathbb{T}heta^{3m} u , u\rightarrowngle=\inf_{u\in L^2\atop ||u||_{L^2}=1,\, u\gammaeq 0}\Lambdangle \mathbb{T}heta^{3m} u , u\rightarrowngle,$$
Now, for a positive $u\in L^2$, we write
\betagin{align*}
\Lambdangle \mathbb{T}heta^{3m}u,u \rightarrowngle = \Lambdangle \left(\mathbb{T}heta^{3m}-\mathbb{T}heta^{3n}\right)u,u\rightarrowngle + \Lambdangle \mathbb{T}heta^{3m}u,u \rightarrowngle.
\end{align*}
where
\betagin{align*}
\left(\mathbb{T}heta^{3m}-\mathbb{T}heta^{3n}\right)u=\frac{1}{\rho}\int_{-\infty}^\infty f_\rho(\rho')\left(K^{3m}-K^{3n}\right)\left(\frac{\rho}{\rho'}\right) u(\rho')d\rho'
\end{align*}
and notice that
\betagin{align*}
\frac{1}{\rho}\int_{-\infty}^\infty f_\rho(\rho')\left(K^{3m}-K^{3n}\right)\left(\frac{\rho}{\rho'}\right) u(\rho')d\rho'\gammaeq 0
\end{align*}
by Lemma \rho_{\varpirepsilonsilonilon}f{positivo}.
We then obtain that
\betagin{align*}
\Lambdangle \mathbb{T}heta^{3m}u,u \rightarrowngle \gammaeq \Lambdangle \mathbb{T}heta^{3n}u,u \rightarrowngle,
\end{align*}
for every positive $u\in L^2$.
This concludes the proof of the lemma.
\end{proof}
Now our purpose is to find a bound from below for the number $\Lambdambda_s^6$ defined by $$\Lambdambda_s^{6} =\inf_{u\in L^2 \atop ||u||_{L^2}=1} \Lambdangle \mathbb{T}heta^6 u, u\rightarrowngle.$$
In order to do this we will need Lemmas \rho_{\varpirepsilonsilonilon}f{lemacotae6} and \rho_{\varpirepsilonsilonilon}f{lemacotascestrella6} below.
\betagin{lemma}\Lambdabel{lemacotae6} Let $B^{6\thetaxt{aprox}}_s= \frac{1}{\sqrt{a-a\betata}}{1}_{[1-a+\frac{a\betata}{2},1-\frac{a\betata}{2}]}$ and $\Lambdambda^{s}_{6\thetaxt{aprox}}= 0.573$. Then
\betagin{align*}
\mathbb{T}heta^6_S B^{6\thetaxt{aprox}}_s-\Lambdambda^{s}_{6\thetaxt{aprox}}B^{6\thetaxt{aprox}}_s=e^6
\end{align*}
with
$$||e^6||_{L^2} < 0.0893.$$
\end{lemma}
\betagin{proof} The proof is computer-assisted and the codes can be found in the supplementary material. We refer to the appendix for the implementation.
\end{proof}
\betagin{lemma}\Lambdabel{lemacotascestrella6} Let $B^{6\thetaxt{aprox}}_s$ be the approximation in Lemma \rho_{\varpirepsilonsilonilon}f{lemacotae6}. Then, if we define the number $c^{6*}$ by
\betagin{align*}
c^{6*}=\inf_{v\in \left(B^{6\thetaxt{aprox}}_s\right)^\perp\atop ||v||_{L^2}=1} \Lambdangle \mathbb{T}heta^6 v,v\rightarrowngle,
\end{align*}
the following bound holds
\betagin{align*}
c^{6*}> 0.8355.
\end{align*}
\end{lemma}
\betagin{proof}
The proof is computer-assisted and the codes can be founded in the supplementary material. We refer to the appendix for details on the implementation.
\end{proof}
\betagin{lemma}\Lambdabel{lemacotalambda6}
Let $\Lambdambda_{6}^{s}\in\mathbb{R}$ given by \betagin{align*}
\Lambdambda_6^{s} =\inf_{u\in L^2 \atop ||u||_{L^2}=1} \Lambdangle \mathbb{T}heta^6 u, u\rightarrowngle.
\end{align*} Then, we have the following bound:
\betagin{align*}
\Lambdambda_{6}^{s} > 0.4837.
\end{align*}
\end{lemma}
\betagin{proof} For a generic $B\in L^2$ with $||B||_{L^2}=1$ we can decompose $B=\alphapha B^{6\thetaxt{aprox}}_{s}+\betata v$, where $v\in \left(B^{6\thetaxt{aprox}}_{s}\right)^\perp$, $||v||_{L^2}=1$ and $\alphapha^2+\betata^2=1$. Therefore
\betagin{align*}
\Lambdangle \mathbb{T}heta^6_{S}B, B\rightarrowngle &=\alphapha^2\Lambdambda^{s}_{6\thetaxt{aprox}}+ \betata^2\Lambdangle \mathbb{T}heta^6_{S}v,v \rightarrowngle+ 2\alphapha \betata\Lambdangle e^6, v\rightarrowngle \\
& \gammaeq \alphapha^2\Lambdambda^{s}_{6\thetaxt{aprox}}+ \betata^2\Lambdangle \mathbb{T}heta^6_{S}v,v \rightarrowngle - 2\alphapha \sqrt{1-\alphapha^2}\Lambdangle e^6, v\rightarrowngle.
\end{align*}
Finally we obtain that
\betagin{align*}
\Lambdangle \mathbb{T}heta^6_{S}B, B\rightarrowngle \gammaeq \Lambdambda^{s}_{6\thetaxt{aprox}}-||e^6||_{L^2},
\end{align*}
since $$\inf_{v\in\left(B^{6\thetaxt{aprox}}_{s}\right)^\perp\atop ||v||_{L^2}=1 }\Lambdangle \mathbb{T}heta^6_{S} v,v\rightarrowngle\gammaeq c^{6*}>\Lambdambda^{s}_{6\thetaxt{aprox}}$$ by Lemmas \rho_{\varpirepsilonsilonilon}f{lemacotae6} and \rho_{\varpirepsilonsilonilon}f{lemacotascestrella6}.
\end{proof}
Using Lemmas \rho_{\varpirepsilonsilonilon}f{cotalambda1}, \rho_{\varpirepsilonsilonilon}f{mn} and \rho_{\varpirepsilonsilonilon}f{lemacotalambda6} we can prove the following proposition:
\betagin{prop} \Lambdabel{thetaicoer}The bilinear forms $\Lambdangle (\mathbb{T}heta^{3m}-\Lambdambda_3)v,v\rightarrowngle$ are coercive in $L^2$ for $m>1$. In addition
\betagin{align*}
\Lambdangle (\mathbb{T}heta^{3m}-\Lambdambda_3)v,v\rightarrowngle\gammaeq \frac{1}{\Lambdambda^{s}_6-\Lambdambda_3}||v||_{L^2}^2.
\end{align*}
Therefore, if $u\in L^2$ satisfies\betagin{align*}
\mathbb{T}heta^{3m} u = \Lambdambda_3 u,
\end{align*}
for $m>1$ then $u = 0$.
\end{prop}
Proposition \rho_{\varpirepsilonsilonilon}f{kernel} is then proven.
\end{proof}
To finish this section we study the codimension of the image of the operator $\partial_rF[\rho,\Lambdambda_3]$.
\betagin{prop}\Lambdabel{codimension} The space $H^{3,3}_{3,\thetaxt{odd}}(\Omega_a)/\mathcal{R}\left(\partial_rF[\rho,\Lambdambda_3]\right)$ has dimension one.
\end{prop}
\betagin{proof}
In order to prove this proposition we will study the range of $\partial_r F[\rho,\Lambdambda_3]$. Let $G(\alphapha,\rho)\in H^{3,3}_{3,\thetaxt{odd}}(\Omega_a)$. We shall try to find $g(\alphapha,\rho)\in H^{4,3}_{3,\thetaxt{even}}(\Omega_a)$ such that
\betagin{align}\Lambdabel{codeq}
\partial_{r}F[\rho,\Lambdambda_3]g(\alphapha,\rho)=G(\alphapha,\rho)\end{align}
By using the expansions
\betagin{align*}
g(\alphapha,\rho)= &\sum_{m=1}^\infty \rho g^{3m}(\rho)\phi_{\delta} * \phi_{\delta} *s(3m\alphapha)\\
G(\alphapha,\rho)= &\sum_{m=1}^\infty \rho G^{3m}(\rho)\sigman(3m\alphapha)
\end{align*} in \eqref{codeq} we have that
\betagin{align*}
\sum_{m=1}^\infty 3m\rho\left(\mathbb{T}heta^{3m}g^{3m}(\rho)-\Lambdambda_3 g^{3m}(\rho)\right)\sigman(3m\alphapha)=\sum_{m=1}^{\infty}\rho G^{3m}(\rho)\sigman(3m\alphapha).
\end{align*}
Taking the projection onto the $3m-$mode yields
\betagin{align}\Lambdabel{gG}
3m\left(\mathbb{T}heta^{3m}g^{3m}-\Lambdambda_3 g^{3m}(\rho)\right)=G^{3m}(\rho)\quad \thetaxt{for $m=1,2,3,...$}
\end{align}
Next we shall study the existence of solutions for the equation \eqref{gG} in $L^2$ and after that the $H^3$-regularity.
\betagin{enumerate}
\item \thetaxtbf{Existence in $L^2$.} We will deal separately with two cases: in Lemma \rho_{\varpirepsilonsilonilon}f{emm1} we take $m>1$ and in Lemma \rho_{\varpirepsilonsilonilon}f{emi1} $m=1$.
\betagin{lemma}\Lambdabel{emm1} For $m>1$ there exists an inverse operator
\betagin{align*}
\left(\mathbb{T}heta^{3m}-\Lambdambda_3\right)^{-1} \,:\, L^2\to L^2
\end{align*}
with norm bounded independently of $m$.
\end{lemma}
\betagin{proof}
By Lemmas \rho_{\varpirepsilonsilonilon}f{cotalambda1}, \rho_{\varpirepsilonsilonilon}f{mn} and the explicit bound for $\Lambdambda^s_6$ in Lemma \rho_{\varpirepsilonsilonilon}f{lemacotalambda6} we have that the bilinear form $\Lambdangle(\mathbb{T}heta^{3m}-\Lambdambda_3)v,v\rightarrowngle$ is coercive in $L^2$ with $$\Lambdangle(\mathbb{T}heta^{3m}-\Lambdambda_3)v,v\rightarrowngle\gammaeq \frac{1}{\Lambdambda_6^s-\Lambdambda_3}||v||_{L^2}^2\quad \thetaxt{for $m>1$}.$$ Since $G^{3m}\in H^3\subset L^2$ we can apply Lax-Milgram theorem in order to obtain the existence of an inverse operator $$(\mathbb{T}heta^{3m}-\Lambdambda_3)^{-1} \,:\, L^2\to L^2.$$
\end{proof}
\betagin{lemma}\Lambdabel{emi1} Let $B^{3}_*$ be defined by Proposition \rho_{\varpirepsilonsilonilon}f{btresadj}. Then, if $G^3\in (B^{3}_*)^\perp$, there exists a solution, $g^{3}\in L^2$, to the equation \eqref{gG} with $m=1$. However, if $G^3\in \thetaxt{span}\left\{B_*^3\right\}$ there is no function in $L^2$ satisfying the equation \eqref{gG} with $m=1$.
\end{lemma}
\betagin{proof}
First we notice that we have already checked that $\Lambdangle (\mathbb{T}heta^3-\Lambdambda_3)v,v\rightarrowngle$ is coercive in the space $B_{sj}^\perp$. In the next lemma we prove that it is also coercive in $(B^3_*)^\perp.$
\betagin{lemma}\Lambdabel{c}There exists a constant $c>\Lambdambda_3$ such that
\betagin{align*}
\inf_{v\in (B^3)^\perp\atop ||v||_{L_2}=1}\Lambdangle (\mathbb{T}heta^3-\Lambdambda_3)v,v\rightarrowngle\gammaeq c-\Lambdambda_3.
\end{align*}
\end{lemma}
\betagin{proof}
We take $v\in (B_*^3)^\perp$, with $L^2-$norm equal to 1. We can decompose $v$ in the following form $v=\alphapha B_{sj}+\betata h$ where $h\in B^\perp_{sj}$ and $||h||_{L^2}=1$. Since $B^3_*=B_{sj}+v^{\Lambdambda_3}_*$ we have that
\betagin{align*}
\alphapha ||B_{sj}||^2_{L^2}+\betata \Lambdangle h, v^{\Lambdambda_3}_*\rightarrowngle=&0\\
\alphapha^2 ||B_{sj}||_{L^2}^2+\betata^2=1.
\end{align*}
Therefore, we can write,
\betagin{align*}
\Lambdangle (\mathbb{T}heta^3-\Lambdambda_3)v,v\rightarrowngle =\Lambdangle (\mathbb{T}heta^3_S-\Lambdambda_3)v,v\rightarrowngle,
\end{align*}
using that $\mathbb{T}heta^3_S B_{sj}=\Lambdambda^* B_{sj}+e$,
\betagin{align*}
&\Lambdangle (\mathbb{T}heta^3_S-\Lambdambda_3)v,v\rightarrowngle=\alphapha^2\Lambdangle(\mathbb{T}heta^3_S-\Lambdambda_3)B_{sj},B_{sj}\rightarrowngle+ 2\alphapha\betata\Lambdangle(\mathbb{T}heta^3_S-\Lambdambda_3)B_{sj},h \rightarrowngle+\betata^2\Lambdangle (\mathbb{T}heta^3_S-\Lambdambda_3)h,h \rightarrowngle \\
&=\alphapha^2\Lambdangle (\Lambdambda^*-\Lambdambda_3)B_{sj}+e,B_{sj}\rightarrowngle+2\alphapha\betata\Lambdangle (\Lambdambda^*-\Lambdambda_3)B_{sj}+e,h \rightarrowngle+\betata^2\Lambdangle (\mathbb{T}heta^3_S-\Lambdambda_3)h,h \rightarrowngle.
\end{align*}
And substituting
\betagin{align*}
\alphapha=-\betata \frac{\Lambdangle h,v_*^{\Lambdambda_3} \rightarrowngle}{||B_{sj}||_{L^2}^2}
\end{align*}
yields
\betagin{align}
\Lambdangle (\mathbb{T}heta^3_S-\Lambdambda_3)v,v\rightarrowngle& = \betata^2\left(\frac{\Lambdangle h,v_*^{\Lambdambda_3} \rightarrowngle^2}{||B_{sj}||_{L^2}^4}\left((\Lambdambda^*-\Lambdambda_3)\|B_{sj}\|_{L^{2}}^{2} + \Lambdangle e, B_{sj} \rightarrowngle\right) +\Lambdangle (\mathbb{T}heta^3_S-\Lambdambda_3)h,h\rightarrowngle- 2\frac{\Lambdangle h,v_*^{\Lambdambda_3} \rightarrowngle}{||B_{sj}||_{L^2}^2}\left\Lambdangle e,h\right\rightarrowngle\right). \Lambdabel{cuadraticahv}
\end{align}
Here we recall that
\betagin{align*}
||v_*^{\Lambdambda_3}||_{L^2}\leq & \frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{c^*-\Lambdambda_3}\\
\Lambdangle (\mathbb{T}heta_S^3-\Lambdambda_3)h,h\rightarrowngle\gammaeq & (c^*-\Lambdambda_3)
\end{align*}
and notice that
\betagin{align*}
\betata^2\gammaeq \frac{1}{1+\frac{||-\mathbb{T}heta_A^3B_{sj}+e||^2_{L^2}}{(c^*-\Lambdambda_3)^2||B_{sj}||_{L^2}^2}} > 0
\end{align*}
Therefore, it is enough to show that
\betagin{align*}
\left(\frac{\Lambdangle h,v_*^{\Lambdambda_3} \rightarrowngle^2}{||B_{sj}||_{L^2}^4}\left((\Lambdambda^*-\Lambdambda_3)\|B_{sj}\|_{L^{2}}^{2} + \Lambdangle e, B_{sj} \rightarrowngle\right) +\Lambdangle (\mathbb{T}heta^3_S-\Lambdambda_3)h,h\rightarrowngle- 2\frac{\Lambdangle h,v_*^{\Lambdambda_3} \rightarrowngle}{||B_{sj}||_{L^2}^2}\left\Lambdangle e,h\right\rightarrowngle\right) > 0.
\end{align*}
However, the LHS can be bounded from below by
\betagin{align*}
c^* - \Lambdambda_0 + \left(\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{\|B_{sj}\|_{L^{2}}^{2}(c^*-\Lambdambda_0)}\right)^{2}\left(((\Lambdambda^{*} - \Lambdambda_{0})\|B_{sj}\|_{L^{2}}^{2} -|\Lambdangle e, B_{sj} \rightarrowngle |\right)
- 2\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{\|B_{sj}\|_{L^{2}}^{2}(c^*-\Lambdambda_0)}\|e\|_{L^{2}},
\end{align*}
since
\betagin{align*}
\Lambdangle (\mathbb{T}heta_S^3-\Lambdambda_3)h,h\rightarrowngle & \gammaeq (c^*-\Lambdambda_3) \gammaeq c^{*} - \Lambdambda_0 \\
\frac{\Lambdangle h,v_*^{\Lambdambda_3} \rightarrowngle^2}{||B_{sj}||_{L^2}^4}(\Lambdambda^*-\Lambdambda_3)\|B_{sj}\|_{L^{2}}^{2} & \gammaeq
\left(\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{(c^*-\Lambdambda_3)\|B_{sj}\|_{L^{2}}^{2}}\right)^{2}(\Lambdambda^*-\Lambdambda_3)\|B_{sj}\|_{L^{2}}^{2} \\
& \gammaeq
\left(\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{(c^*-\Lambdambda_0)\|B_{sj}\|_{L^{2}}^{2}}\right)^{2}(\Lambdambda^*-\Lambdambda_0)\|B_{sj}\|_{L^{2}}^{2} \\
\frac{\Lambdangle h,v_*^{\Lambdambda_3} \rightarrowngle^2}{||B_{sj}||_{L^2}^4}\Lambdangle e, B_{sj} \rightarrowngle & \gammaeq
-\left(\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{(c^*-\Lambdambda_3)\|B_{sj}\|_{L^{2}}^{2}}\right)^{2}\left|\Lambdangle e, B_{sj} \rightarrowngle\right| \\
& \gammaeq
-\left(\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{(c^*-\Lambdambda_0)\|B_{sj}\|_{L^{2}}^{2}}\right)^{2}\left|\Lambdangle e, B_{sj} \rightarrowngle\right| \\
- 2\frac{\Lambdangle h,v_*^{\Lambdambda_3} \rightarrowngle}{||B_{sj}||_{L^2}^2}\left\Lambdangle e,h\right\rightarrowngle & \gammaeq - 2\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{\|B_{sj}\|_{L^{2}}^{2}(c^*-\Lambdambda_3)}\|e\|_{L^{2}} \gammaeq - 2\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{\|B_{sj}\|_{L^{2}}^{2}(c^*-\Lambdambda_0)}\|e\|_{L^{2}}
\end{align*}
Using Lemmas \rho_{\varpirepsilonsilonilon}f{lemacotascestrella} and Corollary \rho_{\varpirepsilonsilonilon}f{corollarylambda0}, we get
\betagin{align*}
c^{*} - \Lambdambda_{0} \gammaeq 0.8526 - 0.4117 = 0.4409.
\end{align*}
Via Lemmas \rho_{\varpirepsilonsilonilon}f{lemacotae} and \rho_{\varpirepsilonsilonilon}f{lemacotasABC} we obtain
\betagin{align*}
\left(\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{(c^*-\Lambdambda_0)\|B_{sj}\|_{L^{2}}^{2}}\right)^{2}(\Lambdambda^*-\Lambdambda_0)\|B_{sj}\|_{L^{2}}^{2}
\gammaeq \left(\frac{0.1534}{0.4409}\right)^{2}(0.3482-0.4117) \gammaeq -0.01
\end{align*}
and similarly:
\betagin{align*}
-\left(\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{(c^*-\Lambdambda_0)\|B_{sj}\|_{L^{2}}^{2}}\right)^{2}\left|\Lambdangle e, B_{sj} \rightarrowngle\right|
\gammaeq -\left(\frac{0.1534}{0.4409}\right)^{2} 0.0101 \gammaeq -0.002
\end{align*}
Finally, the last term can be bounded by:
\betagin{align*}
- 2\frac{||-\mathbb{T}heta^3_A B_{sj}+e||_{L^2}}{\|B_{sj}\|_{L^{2}}^{2}(c^*-\Lambdambda_0)}\|e\|_{L^{2}} \gammaeq (-2) \frac{0.1534}{0.4409} 0.0905 \gammaeq -0.013
\end{align*}
Adding all the numbers we get the desired positivity result. This finishes the proof.
\end{proof}
By Lemma \rho_{\varpirepsilonsilonilon}f{c} and Lax-Milgram theorem we find $g^3\in (B_*^3)^\perp$ such that
\betagin{align*}
\Lambdangle (\mathbb{T}heta^3-\Lambdambda_3)g^3, v\rightarrowngle =\left\Lambdangle \frac{1}{3}G^3, v\right\rightarrowngle
\end{align*}
for all $v\in (B_*^3)^\perp$ and then, there exists a real $\gammaamma \in \mathbb{R}$ such that
\betagin{align*}
(\mathbb{T}heta^3-\Lambdambda_3)g^3=\frac{1}{3} G^3+\gammaamma B_*^3.
\end{align*}
But taking scalar product with $B_*^3$ we have that
\betagin{align*}
||B_*^3||_{L^2}^2\gammaamma= \Lambdangle (\mathbb{T}heta^3-\Lambdambda_3)g^3, B^3_* \rightarrowngle =\Lambdangle g^3, (\mathbb{T}heta^{3*}-\Lambdambda_3)B^3_*\rightarrowngle=0.
\end{align*}
This last equality implies that $\gammaamma=0$ and therefore
\betagin{align*}
(\mathbb{T}heta^3-\Lambdambda_3)g^3= \frac{1}{3}G^3.
\end{align*}
However the equation
\betagin{align}\Lambdabel{notiene}
(\mathbb{T}heta^3-\Lambdambda_3)g^3=B_*^3
\end{align}
does not have any solution in $L^2$. In order to check it let us assume that there exists $g^3\in L^2$ such that the equation \eqref{notiene} is satisfied. Multiplying \eqref{notiene} by $B^3_*$ and integrating yields
\betagin{align*}
\Lambdangle (\mathbb{T}heta^3-\Lambdambda_3)g^3, B_*^3 \rightarrowngle =||B_*^3||_{L^2}^2=\Lambdangle g^3,(\mathbb{T}heta^{3*}-\Lambdambda_3)B_*^3\rightarrowngle=0,
\end{align*}
which is a contradiction.
\end{proof}
\item \thetaxtbf{$H^3-$regularity.} We again deal separately with two cases: in Lemma \rho_{\varpirepsilonsilonilon}f{rmm1} we take $m>1$ and in Lemma \rho_{\varpirepsilonsilonilon}f{rmi1} $m=1$.
\betagin{lemma}\Lambdabel{rmm1} The solution $g^{3m}\in L^2$ to the equation \eqref{gG} given by Lemma \rho_{\varpirepsilonsilonilon}f{emm1} is actually in $H^3$ with the bound $$||g^{3m}||_{H^3}\leq \frac{C}{3m}||G^{3m}||_{H^3},$$ where $C$ does not depend on $m$.
\end{lemma}
\betagin{proof}
For $m>1$ let us consider the equation \eqref{gG}. We split the proof in two steps: in the first one we will show that $g^{3m}\in H^3$ but its $H^3-$norm will depend on $m$; in the second one we will prove that the $H^3-$norm is actually independent of $m$.
\betagin{enumerate}
\item Step 1. Since $\mathbb{T}heta^{3m}g^{3m}-\Lambdambda_3 g^{3m}=\frac{1}{3m}G^{3m}$ and $\frac{1}{3m}G^{3m}$ is $H^1$, we can take a derivative on both sides to obtain
\betagin{align*}
(\tilde{I}(\rho)-\Lambdambda_3)\partial_\rho g^{3m}(\rho)=-\frac{1}{3m}\partial_\rho G^{3m}(\rho)-\partial_\rho\tilde{I}(\rho)g^{3m}(\rho)-\partial_\rho \tilde{T}^{3m}g^{3m},
\end{align*}
where we remark that $\partial_\rho \tilde{T}^{3m}g^{3m}\in L^2$ since we know that $g^{3m}\in L^2$ and $\tilde{T}^{3m}\,:\, L^2\to H^1$. The problem here is that $||\tilde{T}^{3m}||_{L^2\to H^1}$ depends on $m$. In addition, since $\tilde{I}(\rho)\in C^3$ and $\tilde{I}-\Lambdambda_3>0$, we have that $\partial_\rho g^{3m}$ is $L^2$ with norm bounded by a constant depending on $m$. Taking 2 derivatives in the equation \eqref{gG} we have that
\betagin{align*}
(\tilde{I}(\rho)-\Lambdambda_3)\partial^{2}_\rho g^{3m}(\rho)=\frac{1}{3m}\partial_\rho G^{3m}(\rho)-\sum_{j=1}^2\partial^{j)}_\rho\tilde{I}(\rho)\partial^{2-j)}_\rho g^{3m}-\partial^{2}_{\rho}\tilde{T}^{3m}g^{3m}(\rho).
\end{align*}
By Lemma \rho_{\varpirepsilonsilonilon}f{Tderivadas} we know that
\betagin{align*}
\partial_\rho \tilde{T}^{3m}g^{3m}(\rho)=&\tilde{T}^{3m}_1\partial_\rho g^{3m} (\rho)+\frac{1}{\rho}\int_{-\infty}^\infty \partial^{2}_\rho f(\rho') g^{3m}(\rho)(\rho')\frac{\rho'}{\rho}K^{3m}\left(\frac{1}{\gammaamma}\right)d\rho'.
\end{align*}
Thus we can write
\betagin{align*}
\partial^{2}_\rho \tilde{T}^{3m}g^{3m}(\rho)=&\partial_\rho\tilde{T}^{3m}_1\partial_\rho g^{3m} (\rho)+\partial_\rho \frac{1}{\rho}\int_{-\infty}^\infty \partial^{2}_\rho f(\rho') g^{3m}(\rho)(\rho')\frac{\rho'}{\rho}K^{3m}\left(\frac{1}{\gammaamma}\right)d\rho'.
\end{align*}
Then, by Lemma \rho_{\varpirepsilonsilonilon}f{lemacompacidad} we know that $\partial_\rho\tilde{T}^{3m}_1\partial_\rho g^{3m}\in H^3$ (with norm depending on $m$) and therefore $\partial^{2}_\rho \tilde{T}^{3m}g^{3m}(\rho)\in L^2$. Again, using that $\tilde{I}\in C^3$, $\tilde{I}(\rho)-\Lambdambda_3>0$, and $g^{3m}\in H^2$ we have that $\partial^{2}_\rho g^{3m}\in L^2$ with $L^2-$norm depending on $m$. Finally since
\betagin{align*}
(\tilde{I}(\rho)-\Lambdambda_3)\partial^{3}_\rho g^{3m}=\frac{1}{3m}\partial^{3}_\rho G^{3m}(\rho)-\sum_{j=1}^3 \partial^{j)}_\rho \tilde{I}(\rho)\partial^{3-j)}_\rho g^{3m}(\rho)-\partial^{3}_\rho \tilde{T}^{3m}g^{3m}(\rho)
\end{align*}
and
\betagin{align*}
\partial^{2}_\rho \tilde{T}^{3m}g^{3m}(\rho)= & T^{3m}_2\partial^{2}_\rho B(\rho)+\frac{1}{\rho}\sum_{j=1}^2\int_{-\infty}^\infty \partial^{j+1)}_\rho f(\rho')\partial^{2-j)}_\rho B(\rho')\left(\frac{\rho'}{\rho}\right)^2K^{3m}\left(\frac{\rho}{\rho'}\right)d\rho',
\end{align*}
and $\tilde{T}^{3m}_2$ is compact from $L^2\to H^1$, a similar argument yields that $\partial^{3}_\rho g^{3m}\in L^2$ with $L^2-$norm depending on $m$.
\item Step 2.
Now, taking one, two and three derivatives on the equation \eqref{gG} and applying Lemma \rho_{\varpirepsilonsilonilon}f{Tderivadas} we have that,
\betagin{align}
(\mathbb{T}heta^{3m}-\Lambdambda_3)\partial_\rho g^{3m}(\rho)=&\frac{1}{3m}\partial_\rho G^{3m}(\rho)\nonumber-\partial_\rho \tilde{I}(\rho) g^{3m}(\rho)-\frac{1}{\rho}\int_{-\infty}^\infty \partial^{2}_\rho f(\rho') g^{3m}(\rho')\frac{\rho'}{\rho}K^{3m}\left(\frac{1}{\gammaamma}\right)d\rho'\nonumber\\ &+\frac{1}{\rho}\int_{-\infty}^\infty f_\rho(\rho')\partial_\rho g^{3m}(\rho')\left(1-\frac{\rho'}{\rho}\right)K^{m}\left(\frac{1}{\gammaamma}\right)d\rho' \Lambdabel{primera}\\
(\mathbb{T}heta^{3m}-\Lambdambda_3)\partial_\rho^{2} g^{3m}(\rho)=&\frac{1}{3m}\partial^{2}_\rho G^{3m}(\rho)-\sum_{j=1}^2 \partial^{j)}_\rho \tilde{I}(\rho)\partial^{2-j)}_\rho g^{3m}(\rho)\nonumber\\
&-\frac{1}{\rho}\sum_{j=1}^2\int_{-\infty}^\infty \partial^{j+1)}_\rho f(\rho')\partial^{2-j)}_\rho g^{3m}(\rho')\left(\frac{\rho'}{\rho}\right)^2K^{3m}\left(\frac{\rho}{\rho'}\right)d\rho'\nonumber\\
&+\frac{1}{\rho}\int_{-\infty}^\infty f_\rho(\rho')\partial_\rho^2 g^{3m}(\rho')\left(1-\left(\frac{\rho'}{\rho}\right)^2\right)K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'\Lambdabel{segunda}\\
(\mathbb{T}heta^{3m}-\Lambdambda_3)\partial_\rho^{3} g^{3m}(\rho)=&\frac{1}{3m}\partial^{3}_\rho G^{3m}(\rho)-\sum_{j=1}^3 \partial^{j)}_\rho \tilde{I}(\rho)\partial^{3-j)}_\rho g^{3m}(\rho)\nonumber\\
&-\frac{1}{\rho}\sum_{j=1}^3\int_{-\infty}^\infty \partial^{j+1)}_\rho f(\rho')\partial^{3-j)}_\rho g^{3m}(\rho')\left(\frac{\rho'}{\rho}\right)^3K^{3m}\left(\frac{\rho}{\rho'}\right)d\rho'\nonumber\\
& +\frac{1}{\rho}\int_{-\infty}^\infty f_\rho(\rho')\partial_\rho^3 g^{3m}(\rho')\left(1-\left(\frac{\rho'}{\rho}\right)^3\right)K^{m}\left(\frac{\rho}{\rho'}\right)d\rho'\Lambdabel{tercera}.
\end{align}
Since $g^{3m}\in H^3$, by the step 1 above, the coercivity property in Proposition \rho_{\varpirepsilonsilonilon}f{thetaicoer} applies. We first apply it to \eqref{primera}, then to \eqref{segunda} and \eqref{tercera} yielding the bound
$$||g^{3m}||_{H^3} \leq \frac{C}{m}||G^{3m}||_{H^3}$$
with $C$ independent of $m$. The only problem comes from the last term on \eqref{primera}, \eqref{segunda} and \eqref{tercera}. In order to bound these terms we apply an integration by parts, Lemma \rho_{\varpirepsilonsilonilon}f{commutator} and the same argument that we used in Lemma \rho_{\varpirepsilonsilonilon}f{lemacompacidad} to control the $L^2-$norm.
\end{enumerate}
\end{proof}
\betagin{lemma}\Lambdabel{rmi1}
The solution, $g^3\in L^2$ of the equation $(\mathbb{T}heta^3-\Lambdambda_3)g^3=\frac{1}{3}G^3$ with $G^{3}\in H^3\cap (B_*^3)^\perp$ given by Lemma \rho_{\varpirepsilonsilonilon}f{emi1} is actually in $H^3$.
\end{lemma}
\betagin{proof}
We can show that $g^3\in H^3$ in the same way we did in the proof of the first part of Lemma \rho_{\varpirepsilonsilonilon}f{rmm1}.
\end{proof}
\end{enumerate}
Then Proposition \rho_{\varpirepsilonsilonilon}f{codimension} is already proven.
\end{proof}
\subsection{Step 4. The transversality property 4}
In this section we prove the transversality condition \eqref{condTransversality} of the C-R theorem, i.e., the fourth hypothesis. In order to do this is enough to show that
\betagin{align}
\Lambdabel{eqtransversal}
(\mathbb{T}heta^{3} - \Lambdambda_3)b^{3} = B^{3}
\end{align}
does not have a solution in $b^{3}\in L^2$.
Let's suppose that there exists $b^3\in L^2$ such that \eqref{eqtransversal} holds. Then, taking scalar product in $L^2$ with $B^3_*$, we have that
\betagin{align*}
\Lambdangle B^3, B^3_*\rightarrowngle =0.
\end{align*}
This is impossible as it was proved in Lemma \rho_{\varpirepsilonsilonilon}f{BBstar}.
This concludes the proof of Theorem \rho_{\varpirepsilonsilonilon}f{main}.
\section{Asymptotics}
\Lambdabel{sectionasymptotics}
Part of the computer-assisted proof involves having to compute the kernels $T^{1}, T^{3}$ and $T^{6}$, which are given by elliptic integrals. As far as we know, we are not aware of any rigorous implementation of them in any library. One possibility could be to leave the (singular) integrals as they are and integrate over a domain of one more dimension. This would be very time consuming in terms of the computer performance. Instead, we do the laborious work of deriving explicit approximations (to order 0) of the kernels by hand, with computable error bounds of order greater than 1. Once we do this, whenever we have to code either $T^{1}, T^{3}$ or $T^{6}$, we substitute it by the explicit expression found here.
We start with the elliptic integral
\betagin{align*}
I & = \int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(x)}{\sqrt{1+r^2-2r\phi_{\delta} * \phi_{\delta} *s(x)}}dx.
\end{align*}
Taking ${\rm div}\thinspacesplaystyle u = \frac{1-r}{1+r}$:
\betagin{align*}
I & = \frac{1}{1+r} \int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(x)dx}{\sqrt{\sigman^{2}\left(\frac{x}{2}\right) + u^{2}\phi_{\delta} * \phi_{\delta} *s^{2}\left(\frac{x}{2}\right)}}= \frac{4}{1+r}\int_{0}^{\frac{\pi}{2}} \frac{\phi_{\delta} * \phi_{\delta} *s(2y)dy}{\sqrt{\sigman^{2}(y)+u^{2}\phi_{\delta} * \phi_{\delta} *s^{2}(y)}} \\& = \frac{4}{1+r}\int_{0}^{\frac{\pi}{2}} \frac{1-2\sigman^{2}(y)}{\sqrt{\sigman^{2}(y)+u^{2}\phi_{\delta} * \phi_{\delta} *s^{2}(y)}}dy = \left\{
\betagin{array}{ccc}
z & = & \tan(y) \\
\frac{dz}{1+z^{2}} & = & dy
\end{array}
\right\} = \frac{4}{1+r}\int_{0}^{\infty} \frac{1-z^{2}}{(1+z^{2})^{3/2}} \frac{dz}{\sqrt{z^{2}+u^{2}}}.
\end{align*}
We remark that $u$ will be close to zero. We need to derive the asymptotics in powers of $u$ as $u \rightarrow 0$ of
\betagin{align*}
I & = \frac{4}{1+r} \int_{0}^{\infty} \frac{1-z^2}{(1+z^2)^{3/2}} \frac{dz}{\sqrt{z^2+u^2}} \\
& = \frac{4}{1+r} \int_{0}^{|u|} \frac{1-z^2}{(1+z^2)^{3/2}} \frac{dz}{\sqrt{z^2+u^2}}
+ \frac{4}{1+r} \int_{|u|}^{1} \frac{1-z^2}{(1+z^2)^{3/2}} \frac{dz}{\sqrt{z^2+u^2}}
+ \frac{4}{1+r} \int_{1}^{\infty} \frac{1-z^2}{(1+z^2)^{3/2}} \frac{dz}{\sqrt{z^2+u^2}} \\
& = I_1 + I_2 + I_3.
\end{align*}
We start with $I_1$:
\betagin{align*}
I_1 & = \frac{4}{1+r} \int_{0}^{1} \frac{1-u^2 w^2}{(1+u^2 w^2)^{3/2}} \frac{dw}{\sqrt{1+w^2}}.
\end{align*}
We expand $(1+u^2 w^2)^{-3/2}$ as a power series around $w = 0$:
\betagin{align*}
(1+u^2 w^2)^{-3/2} = 1 - \frac32 u^2w^2 + \frac{1}{2}\frac{15}{4}(u^2 w^2)^2E^{1}_{2},
\end{align*}
where $(1 + u^2)^{-7/2} \leq E^{1}_2 \leq 1$. A naive integration and bounding then yields:
\betagin{align*}
I_1 & = \frac{4}{1+r}\left(\int_{0}^{1} \frac{1-u^2 w^2}{\sqrt{1+w^2}} dw - \frac32 u^2 \int_{0}^{1} \frac{1-u^2 w^2}{\sqrt{1+w^2}} w^2 dw + \frac{15}{8} u^4 \tilde{E}^{1}_2\right) \\
& = \frac{4}{1+r}\left(\arcsinh(1) + u^2 \left(\frac{5}{4} (-\sqrt{2} + \arcsinh(1))\right) + u^4 \left(-\frac{3}{16}\left(\sqrt{2}-3\arcsinh(1)\right) + \frac{15}{8}\tilde{E}^{1}_2\right)\right),
\end{align*}
where
\betagin{align*}
\frac{1}{\sqrt{2}}\frac{1-u^2}{(1+u^2)^{7/2}}\leq \tilde{E}^{1}_{2} \leq 1.
\end{align*}
Next, we proceed with $I_3$. We can write it as:
\betagin{align*}
I_3 & = \frac{4}{1+r} \int_{0}^{1} \frac{w^2-1}{(w^2+1)^{3/2}} \frac{dw}{\sqrt{1+w^2u^2}}.
\end{align*}
Expanding in series $(1+w^2u^2)^{-1/2}$:
\betagin{align*}
(1+u^2 w^2)^{-1/2} = 1 - \frac12 u^2w^2 + \frac{1}{2}\frac{3}{4}(u^2 w^2)^2E^{2}_{2},
\end{align*}
where $\frac{1}{(1+u^2)^{5/2}} \leq E^{2}_2 \leq 1$. A naive integration and bounding then yields:
\betagin{align*}
I_3 & = \frac{4}{1+r}\left(\int_{0}^{1} \frac{w^2-1}{(w^2+1)^{3/2}}dw - \frac12 u^{2} \int_{0}^{1} w^2\frac{w^2-1}{(w^2+1)^{3/2}}dw + \frac{3}{8} u^4 \tilde{E}^{2}_{2}\right) \\
& = \frac{4}{1+r}\left(\arcsinh(1)-\sqrt{2} + \frac{1}{4} u^2\left(-3\sqrt{2}+5\arcsinh(1)\right) + \frac{3}{8} u^4 \tilde{E}^{2}_{2}\right),
\end{align*}
and
\betagin{align*}
-1 \leq \tilde{E}^{2}_{2} \leq 0.
\end{align*}
Finally, we do the asymptotics for $I_2$. This is the most careful part. We start by writing
\betagin{align*}
I_2 & = \frac{4}{1+r} \frac12 \int_{u^{2}}^{1} \frac{1}{w} \frac{1-w}{(1+w)^{3/2}} \frac{dw}{\sqrt{1+\frac{u^{2}}{w}}} \\
& = \frac{4}{1+r} \frac12 \int_{u^{2}}^{1} \frac{1}{w} \frac{1}{(1+w)^{3/2}} \frac{dw}{\sqrt{1+\frac{u^{2}}{w}}} - \frac{4}{1+r} \frac12 \int_{u^{2}}^{1} \frac{1}{w} \frac{w}{(1+w)^{3/2}} \frac{dw}{\sqrt{1+\frac{u^{2}}{w}}}\\ &
= I_{21} + I_{22}.
\end{align*}
Next, we expand the term $\left(1+\frac{u^{2}}{w}\right)^{-1/2}$, but we do not truncate. Hence
\betagin{align*}
I_{21} & = \frac{2}{1+r}\left(\sum_{k \gammaeq 0}{-1/2 \choose k}\underbrace{ u^{2k} \int_{u^{2}}^{1} \frac{1}{w^{1+k}} \frac{dw}{(1+w)^{3/2}}}_{F_k}\right).
\end{align*}
Using the following integration by parts formula for $k > 0$:
\betagin{align*}
\int_{u^{2}}^{1} w^{-k}(1+w)^{-3/2} dw & = \left.(-2)(1+w)^{-1/2}w^{-k}\right|_{w=u^{2}}^{w=1}
-2k\int_{u^{2}}^{1} w^{-k-1}(1+w)^{-1/2} dw \\
& = \left.(-2)(1+w)^{-1/2}w^{-k}\right|_{w=u^{2}}^{w=1}
-2k\int_{u^{2}}^{1} w^{-k-1}(1+w)^{-3/2} dw \\ &-2k\int_{u^{2}}^{1} w^{-k}(1+w)^{-3/2} dw,
\end{align*}
which implies:
\betagin{align*}
\int_{u^{2}}^{1} w^{-k-1}(1+w)^{-3/2} dw & = \left.-\frac{1}{k}(1+w)^{-1/2}w^{-k}\right|_{w=u^{2}}^{w=1}
- \frac{2k+1}{2k} \int_{u^{2}}^{1}w^{-k}(1+w)^{-3/2} dw,
\end{align*}
yielding
\betagin{align*}
F_{k} & = \frac{1}{k}\left(\frac{1}{\sqrt{1+u^{2}}} - \frac{u^{2k}}{\sqrt{2}}\right) - u^{2}\left(\frac{2k+1}{2k}\right)F_{k-1}.
\end{align*}
We now get back to $I_{21}$. We have to compute
\betagin{align*}
I_{21} & = \frac{2}{1+r}\left(\sum_{k \gammaeq 0} {-1/2 \choose k} F_{k}\right) =\\ & \frac{2}{1+r}\left(F_{0} + \sum_{k \gammaeq 1}{-1/2 \choose k} \frac{1}{k}\left(\frac{1}{\sqrt{1+u^{2}}} - \frac{u^{2k}}{\sqrt{2}}\right) - u^{2} \sum_{k\gammaeq 1}{-1/2 \choose k} \left(\frac{2k+1}{2k}\right)F_{k-1}\right).
\end{align*}
We calculate the following explicit numbers:
\betagin{align*}
F_0 & = \sqrt{2} - \frac{2}{\sqrt{1+u^{2}}} - 2\arcsinh(1) + 2\arcsinh\left(\frac{1}{|u|}\right). \\
\sum_{k \gammaeq 1}{-1/2 \choose k} \frac{1}{k}\left(\frac{1}{\sqrt{1+u^{2}}}-\frac{u^{2k}}{\sqrt{2}}\right) & = \frac{-2\arcsinh(1)+\log(4)}{\sqrt{1+u^{2}}} + \sqrt{2}\log\left(\frac{1}{2}(1+\sqrt{1+u^{2}})\right).
\end{align*}
We treat the rightmost sum as an error. Using the fact that the terms are alternating (since ${\rm div}\thinspacesplaystyle {-1/2 \choose k}$ is alternating and the other factors are positive), and that, for $k \gammaeq 1$:
\betagin{align*}
F_{k-1} = \frac{1}{u^{2}} \int_{u^{2}}^{1} \left(\frac{u^{2}}{w}\right)^{k} \frac{dw}{(1+w)^{3/2}} \gammaeq \frac{1}{u^{2}} \int_{u^{2}}^{1} \left(\frac{u^{2}}{w}\right)^{k+1} \frac{dw}{(1+w)^{3/2}} = F_{k} \quad \forall \quad 0 < u^{2} < 1,
\end{align*}
\betagin{align*}
\left| {-1/2 \choose k} \right| \frac{2k+1}{2k} = \frac{2 (1 + k)^2}{k (3 + 2 k)} \left| {-1/2 \choose k+1} \right| \frac{2(k+1)+1}{2(k+1)}
\gammaeq \left| {-1/2 \choose k+1} \right| \frac{2(k+1)+1}{2(k+1)}
\end{align*}
we can bound the absolute value of the sum by the absolute value of its first term, yielding:
\betagin{align*}
\left| u^{2} \sum_{k\gammaeq 1}{-1/2 \choose k} \left(\frac{2k+1}{2k}\right)F_{k-1}\right| \leq \frac{3}{4} u^{2}|F_{0}|.
\end{align*}
We move on to $I_{22}$. We can write it as:
\betagin{align*}
I_{22} & = \frac{2}{1+r}\left(-u^{2}F_{-1} - u^{2}\sum_{k \gammaeq 1} {-1/2 \choose k} F_{k-1}\right).
\end{align*}
Together with the explicit calculation
\betagin{align*}
-u^{2}F_{-1} & = \sqrt{2} - \frac{2}{\sqrt{1+u^{2}}},
\end{align*}
and the bound on the last term by the same reason as above:
\betagin{align*}
\left|u^{2}\sum_{k \gammaeq 1} {-1/2 \choose k} F_{k-1}\right| \leq \frac12 u^{2}|F_{0}|
\end{align*}
we can conclude, after gathering all the contributions, that $I$ splits in the following way:
\betagin{align*}
I & = I_{ho,ns} + I_{ho,s} + I_{e,s} + I_{e,ns},
\end{align*}
where
\betagin{align*}
I_{ho,ns} & = \frac{4}{1+r}\left(\arcsinh(1) + u^2 \left(\frac{5}{4} (-\sqrt{2} + \arcsinh(1))\right) + u^4 \left(-\frac{3}{16}\left(\sqrt{2}-3\arcsinh(1)\right) \right)\right)\\
& + \frac{4}{1+r}\left(\arcsinh(1)-\sqrt{2} + \frac{1}{4} u^2\left(-3\sqrt{2}+5\arcsinh(1)\right)\right) \\
& + \frac{2}{1+r}\left(\sqrt{2} - \frac{2}{\sqrt{1+u^{2}}} - 2\arcsinh(1) \right) \\
& + \frac{2}{1+r}\left( \frac{-2\arcsinh(1)+\log(4)}{\sqrt{1+u^{2}}} + \sqrt{2}\log\left(\frac{1}{2}(1+\sqrt{1+u^{2}})\right) \right) \\
& + \frac{2}{1+r}\left(\sqrt{2} - \frac{2}{\sqrt{1+u^{2}}} \right) \\
& = \frac{4}{1+r}\left(\frac{1}{\sqrt{2}}\log\left(\frac{1}{2}(1+\sqrt{1+u^{2}})\right) + \frac{1}{\sqrt{1+u^{2}}}\left(-2+\left(-1+\sqrt{1+u^{2}}\right)\arcsinh(1)+\log(2)\right)\right) \\
& + \frac{4}{1+r}\left(u^2 \left(-2 \sqrt{2} + \frac{5}{2}\arcsinh(1)\right) - \frac{3}{16} u^4 (\sqrt{2} - 3 \arcsinh(1)) \right) \\
I_{ho,s} & = \frac{4}{1+r} \arcsinh\left(\frac{1}{|u|}\right)
\end{align*}
and
\betagin{align*}
|I_{e,ns}| & \leq \frac{4}{1+r}\frac{15}{8} u^{4} + \frac{4}{1+r}\frac{3}{8} u^4 + \frac{2}{1+r}\frac{5}{4}u^{2}\left|\sqrt{2} - \frac{2}{\sqrt{1+u^{2}}} - 2\arcsinh(1)\right| \\
& = \frac{4}{1+r}\frac{9}{4} u^{4} + \frac{4}{1+r}\frac{5}{8}u^{2}\left|\sqrt{2} - \frac{2}{\sqrt{1+u^{2}}} - 2\arcsinh(1)\right| \\
|I_{e,s}| & \leq \frac{4}{1+r}\frac{5}{4}u^{2} \arcsinh\left(\frac{1}{|u|}\right).
\end{align*}
Note that in the computer implementation any error term $I_{e}$ will be implemented as the interval $[-M,M]$ whenever $|I_{e}| \leq M$.
We do the same for the other elliptic integral:
\betagin{align*}
J = \int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(3x)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx.
\end{align*}
Taking ${\rm div}\thinspacesplaystyle u = \frac{1-r}{1+r}$:
\betagin{align*}
J & = \frac{1}{1+r} \int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(3x)dx}{\sqrt{\sigman^{2}\left(\frac{x}{2}\right) + u^{2}\phi_{\delta} * \phi_{\delta} *s^{2}\left(\frac{x}{2}\right)}} = \frac{4}{1+r}\int_{0}^{\frac{\pi}{2}} \frac{\phi_{\delta} * \phi_{\delta} *s(6y)dy}{\sqrt{\sigman^{2}(y)+u^{2}\phi_{\delta} * \phi_{\delta} *s^{2}(y)}} \\
& = \left\{
\betagin{array}{ccc}
z & = & \tan(y) \\
\frac{dz}{1+z^{2}} & = & dy
\end{array}
\right\} = \frac{4}{1+r}\int_{0}^{\infty} \frac{1-15z^2+15z^4-z^6}{(1+z^{2})^{7/2}} \frac{dz}{\sqrt{z^{2}+u^{2}}}.
\end{align*}
We repeat the splitting that we did for $I$, this time for $J$:
\betagin{align*}
J & = \frac{4}{1+r} \int_{0}^{\infty} \frac{1-15z^2+15z^4-z^6}{(1+z^2)^{7/2}} \frac{dz}{\sqrt{z^2+u^2}} \\
& = \frac{4}{1+r} \int_{0}^{|u|} \frac{1-15z^2+15z^4-z^6}{(1+z^2)^{7/2}} \frac{dz}{\sqrt{z^2+u^2}}
+ \frac{4}{1+r} \int_{|u|}^{1} \frac{1-15z^2+15z^4-z^6}{(1+z^2)^{7/2}} \frac{dz}{\sqrt{z^2+u^2}} \\
& + \frac{4}{1+r} \int_{1}^{\infty} \frac{1-15z^2+15z^4-z^6}{(1+z^2)^{7/2}} \frac{dz}{\sqrt{z^2+u^2}}
= J_1 + J_2 + J_3.
\end{align*}
We start with $J_1$:
\betagin{align*}
J_1 & = \frac{4}{1+r} \int_{0}^{1} \frac{1-15 u^2 w^2+15 u^4 w^4- u^6w^6}{(1+u^2 w^2)^{7/2}} \frac{dw}{\sqrt{1+w^2}}.
\end{align*}
We expand $(1+u^2 w^2)^{-7/2}$ as a power series around $w = 0$:
\betagin{align*}
(1+u^2 w^2)^{-7/2} = 1 - \frac{7}{2} u^2w^2 F^{1}_{2},
\end{align*}
where $(1 + u^2)^{-9/2} \leq F^{1}_2 \leq 1$. A naive integration and bounding then yields:
\betagin{align*}
J_1 & = \frac{4}{1+r}\left(\int_{0}^{1} \frac{1-15 u^2 w^2+15 u^4 w^4- u^6w^6}{\sqrt{1+w^2}} dw - \frac{7}{2} u^2 \tilde{F}^{1}_2\right) \\
& = \frac{4}{1+r}\left(\arcsinh(1) - u^2 \left(\frac{15}{2} (\sqrt{2} - \arcsinh(1))\right) - u^4 \left(\frac{15}{8}\left(\sqrt{2}-3\arcsinh(1)\right)\right)\right. \\
& \left. - \frac{1}{48}u^{6}\left(13 \sqrt{2} - 15 \arcsinh(1)\right) - \frac{7}{2}u^{2}\tilde{F}^{1}_2\right),
\end{align*}
where
\betagin{align*}
0 \leq \tilde{F}^{1}_{2} \leq 1,
\end{align*}
and we have used that $|u|$ is small enough to guarantee the positiveness of the integrand.
Next, we proceed with $J_3$. We can write it as:
\betagin{align*}
J_3 & = \frac{4}{1+r} \int_{0}^{1} \frac{w^6-15w^4+15w^2-1}{(w^2+1)^{7/2}} \frac{dw}{\sqrt{1+w^2u^2}}.
\end{align*}
Expanding in series $(1+w^2u^2)^{-1/2}$:
\betagin{align*}
(1+u^2 w^2)^{-1/2} = 1 - \frac12 u^2w^2 F^{2}_{2},
\end{align*}
where $\frac{1}{(1+u^2)^{3/2}} \leq F^{2}_2 \leq 1$. We use the fact that
\betagin{align*}
\max_{w \in [0,1]} |w^6-15w^4+15w^2-1| \leq 176 - 80 \sqrt{5} < 3,
\end{align*}
to obtain, via integration and bounding:
\betagin{align*}
J_3 & = \frac{4}{1+r}\left(\int_{0}^{1} \frac{w^6-15w^4+15w^2-1}{(w^2+1)^{7/2}}dw - \frac{1}{2} u^2 \tilde{F}^{2}_{2}\right) \\
& = \frac{4}{1+r}\left(\arcsinh(1)-\frac{7}{15}\sqrt{2} - \frac{1}{2} u^2 \tilde{F}^{2}_{2}\right),
\end{align*}
and
\betagin{align*}
|\tilde{F}^{2}_{2}| \leq 3.
\end{align*}
We finally move on to $J_2$. As before, we have the following formula obtained by integration by parts:
\betagin{align*}
\int_{u^{2}}^{1} w^{-k-1}(1+w)^{-7/2}dw = \left.-\frac{1}{k}w^{-k}(1+w)^{-5/2}\right|_{w=u^{2}}^{w=1} - \left(\frac{2k+5}{2k}\right)\int_{u^{2}}^{1} w^{-k}(1+w)^{-7/2}dw.
\end{align*}
Defining now
\betagin{align*}
G_{k} & = u^{2k} \int_{u^{2}}^{1} \frac{1}{w^{1+k}} \frac{dw}{(1+w)^{7/2}}
\end{align*}
we arrive to
\betagin{align}
\Lambdabel{formulaJ}
G_{k} & = \frac{1}{k}\left(\frac{1}{(1+u^{2})^{5/2}} - \frac{u^{2k}}{\sqrt{32}}\right) - u^{2}\left(\frac{2k+5}{2k}\right)G_{k-1}.
\end{align}
We can write
\betagin{align*}
J_2 & = \frac{2}{1+r}\left(\sum_{k\gammaeq 0} {-1/2 \choose k} G_{k} - 15 u^{2} \sum_{k\gammaeq 0} {-1/2 \choose k} G_{k-1} + 15 u^{4}\sum_{k\gammaeq 0} {-1/2 \choose k} G_{k-2} - u^{6} \sum_{k\gammaeq 0} {-1/2 \choose k} G_{k-3}\right) \\
& = J_{21} + J_{22} + J_{23} + J_{24}.
\end{align*}
The last three terms are easier. We deal with them first.
\betagin{align*}
J_{24} & = \frac{2}{1+r}\left(-u^{6}G_{-3} - u^{6}\sum_{k\gammaeq 1} {-1/2 \choose k} G_{k-3}\right).
\end{align*}
The first term can be explicitly calculated and it amounts to
\betagin{align*}
-u^{6}G_{-3} & = -\frac{1}{60}\frac{64 + 160u^{2} + 120 u^{4} - 43 \sqrt{2}(1+u^{2})^{5/2}}{(1+u^{2})^{5/2}}.
\end{align*}
The last series is alternating and convergent and we can get the following bound:
\betagin{align*}
\left| u^{6}\sum_{k\gammaeq 1} {-1/2 \choose k} G_{k-3}\right| \leq u^{6}\frac{1}{2}G_{-2}
= \frac{1}{120}u^{2}\left|-7\sqrt{2} + \frac{40}{(1+u^{2})^{3/2}} - \frac{24}{(1+u^{2})^{5/2}}\right|.
\end{align*}
We move on to $J_{23}$. By the same reasoning:
\betagin{align*}
J_{23} & = \frac{2}{1+r}\left(15u^{4}G_{-2} + 15 u^{4}\sum_{k\gammaeq 1} {-1/2 \choose k} G_{k-2}\right) \\
& = \frac{2}{1+r}\left(-\frac{7}{2\sqrt{2}} + \frac{10}{(1+u^{2})^{3/2}} -\frac{6}{(1+u^{2})^{5/2}} + 15 u^{4}\sum_{k\gammaeq 1} {-1/2 \choose k} G_{k-2}\right), \\
\end{align*}
and
\betagin{align*}
\left|15 u^{4}\sum_{k\gammaeq 1} {-1/2 \choose k} G_{k-2}\right| \leq u^{4}\frac{15}{2}G_{-1}
= \frac{3}{8}u^{2}\left|-\sqrt{2} + \frac{8}{(1+u^{2})^{5/2}}\right|.
\end{align*}
The same applies to $J_{22}$. We have:
\betagin{align*}
J_{22} & = \frac{2}{1+r}\left(-15u^{2}G_{-1} - 15 u^{2}\sum_{k\gammaeq 1} {-1/2 \choose k} G_{k-1}\right) \\
& = \frac{2}{1+r}\left(\frac{3}{4}\sqrt{2} -\frac{6}{(1+u^{2})^{5/2}} - 15 u^{2}\sum_{k\gammaeq 1} {-1/2 \choose k} G_{k-1}\right) \\
\end{align*}
and
\betagin{align*}
&\left|-15 u^{2}\sum_{k\gammaeq 1} {-1/2 \choose k} G_{k-1}\right| \leq u^{2}\frac{15}{2}G_{0} \\
& = \frac{1}{8}u^{2}\left|73\sqrt{2} - \frac{24}{(1+u^{2})^{5/2}} - \frac{40}{(1+u^{2})^{3/2}} - \frac{120}{(1+u^{2})^{1/2}} - 120 \arcsinh(1) + 120\arcsinh\left(\frac{1}{|u|}\right)\right|.
\end{align*}
In order to compute $J_{21}$ we use formula \eqref{formulaJ}:
\betagin{align*}
\sum_{k\gammaeq 0} {-1/2 \choose k} G_{k} & = G_{0} + \sum_{k\gammaeq 1} {-1/2 \choose k} G_{k} \\
& = G_{0} + \sum_{k\gammaeq 1} {-1/2 \choose k} \frac{1}{k}\left(\frac{1}{(1+u^{2})^{5/2}} - \frac{u^{2k}}{\sqrt{32}}\right) - u^{2} \sum_{k\gammaeq 1} {-1/2 \choose k}\left(\frac{2k+5}{2k}\right)G_{k-1}\\
& = \frac{73}{30\sqrt{2}} - \frac{2}{\sqrt{1+u^{2}}} - \frac{2}{3}\frac{1}{(1+u^2)^{3/2}} - \frac{2}{5} \frac{1}{(1+u^2)^{5/2}} - 2 \arcsinh(1) + 2 \arcsinh\left(\frac{1}{|u|}\right) \\
& + \frac{1}{8}\left(\frac{16(\log(2) - \arcsinh(1))}{(1+u^2)^{5/2}} - \sqrt{2}\log(4) + 2\sqrt{2}\log(1+\sqrt{1+u^2})\right) \\
& - u^{2} \sum_{k\gammaeq 1} {-1/2 \choose k}\left(\frac{2k+5}{2k}\right)G_{k-1}.
\end{align*}
The last sum can be bounded as usual by
\betagin{align*}
& \left|u^{2} \sum_{k\gammaeq 1} {-1/2 \choose k}\left(\frac{2k+5}{2k}\right)G_{k-1}\right| \leq \frac{1}{2}\frac{7}{2}u^{2}G_{0} \\
& = \frac{7}{4}u^{2}\left|\frac{73}{30\sqrt{2}} - \frac{2}{\sqrt{1+u^{2}}} - \frac{2}{3}\frac{1}{(1+u^2)^{3/2}} - \frac{2}{5} \frac{1}{(1+u^2)^{5/2}} - 2 \arcsinh(1) + 2 \arcsinh\left(\frac{1}{|u|}\right)\right|.
\end{align*}
We finally add everything together to write $J$ as
\betagin{align*}
J & = J_{ho,ns} + J_{ho,s} + J_{e,ns} + J_{e,s}
\end{align*}
where
\betagin{align*}
J_{ho,ns}
& =
\frac{4}{1+r}\left(\arcsinh(1) - u^2 \left(\frac{15}{2} (\sqrt{2} - \arcsinh(1))\right)\right.\\ & \quad\qquad\qquad \left. - u^4 \left(\frac{15}{8}\left(\sqrt{2}-3\arcsinh(1)\right)\right)- \frac{1}{48}u^{6}\left(13 \sqrt{2} - 15 \arcsinh(1)\right)\right) \\
& +\frac{4}{1+r}\left(\arcsinh(1)-\frac{7}{15}\sqrt{2} \right) \\
& + \frac{2}{1+r}\left(-\frac{1}{60}\frac{64 + 160 u^2 + 120 u^4 - 43 \sqrt{2}(1 + u^2)^{5/2}}{(1+u^2)^{5/2}}\right) \\
& + \frac{2}{1+r}\left(-\frac{7}{2\sqrt{2}} - \frac{6}{(1+u^2)^{5/2}} + \frac{10}{(1+u^2)^{3/2}}\right) \\
& + \frac{2}{1+r}\left(\frac{3}{4}\left(\sqrt{2} - \frac{8}{(1+u^2)^{5/2}}\right)\right) \\
& + \frac{2}{1+r}\left(\frac{1}{8}\left(\frac{16(\log(2) - \arcsinh(1))}{(1+u^2)^{5/2}} - \sqrt{2}\log(4) + 2\sqrt{2}\log(1+\sqrt{1+u^2})\right)\right) \\
& + \frac{2}{1+r}\left(\frac{73}{30\sqrt{2}} - \frac{2}{\sqrt{1+u^{2}}} - \frac{2}{3}\frac{1}{(1+u^2)^{3/2}} - \frac{2}{5} \frac{1}{(1+u^2)^{5/2}} - 2 \arcsinh(1) \right) \\
& = \frac{4}{1+r}\left(-\frac{46}{15(1+u^2)^{5/2}} - \frac{1}{48}u^{6}(13\sqrt{2}-15\arcsinh(1)) + \arcsinh(1)\right) \\
& + \frac{4}{1+r}\left(-\frac{\arcsinh(1)}{(1+u^2)^{5/2}} + u^{4}\left(-\frac{15}{4\sqrt{2}} - \frac{2}{(1+u^2)^{5/2}} + \frac{45}{8}\arcsinh(1)\right)\right) \\
& + \frac{4}{1+r}\left(\frac{1}{6}u^{2}\left(-45\sqrt{2} + \frac{8}{(1+u^{2})^{5/2}} + 45 \arcsinh(1)\right)\right) \\
& + \frac{4}{1+r}\left(\frac{\log(2)}{(1+u^2)^{5/2}} + \frac{\log\left(\frac{1}{2}\left(1+\sqrt{1+u^2}\right)\right)}{4\sqrt{2}}\right) \\
J_{ho,s} & = \frac{4}{1+r} \arcsinh\left(\frac{1}{|u|}\right)
\end{align*}
and
\betagin{align*}
|J_{e,ns}| & \leq \frac{4}{1+r}\left(\frac{7}{2}u^2 + \frac{3}{2} u^2\right)\\
& + \frac{4}{1+r}\left(\frac{1}{240}u^2\left|-7 \sqrt{2} - \frac{24}{(1+u^2)^{5/2}} + \frac{40}{(1+u^2)^{3/2}}\right|\right) \\
& + \frac{4}{1+r}\left(\frac{3}{16}u^2\left|-\sqrt{2} + \frac{8}{(1+u^2)^{5/2}}\right|\right) \\
& + \frac{4}{1+r}\left(\frac{37}{8}u^2\left|\frac{73}{30\sqrt{2}} - \frac{2}{\sqrt{1+u^{2}}} - \frac{2}{3}\frac{1}{(1+u^2)^{3/2}} - \frac{2}{5} \frac{1}{(1+u^2)^{5/2}} - 2 \arcsinh(1)\right|\right) \\
|J_{e,s}| & \leq \frac{4}{1+r}\frac{37}{4}u^{2} \arcsinh\left(\frac{1}{|u|}\right).
\end{align*}
Finally, we do $K_6$, corresponding to:
\betagin{align*}
L = \int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(6x)}{\sqrt{1+s^2-2s\phi_{\delta} * \phi_{\delta} *s(x)}}dx.
\end{align*}
Taking ${\rm div}\thinspacesplaystyle u = \frac{1-r}{1+r}$:
\betagin{align*}
L & = \frac{1}{1+r} \int_{-\pi}^{\pi} \frac{\phi_{\delta} * \phi_{\delta} *s(6x)dx}{\sqrt{\sigman^{2}\left(\frac{x}{2}\right) + u^{2}\phi_{\delta} * \phi_{\delta} *s^{2}\left(\frac{x}{2}\right)}} = \frac{4}{1+r}\int_{0}^{\frac{\pi}{2}} \frac{\phi_{\delta} * \phi_{\delta} *s(12y)dy}{\sqrt{\sigman^{2}(y)+u^{2}\phi_{\delta} * \phi_{\delta} *s^{2}(y)}} \\
& = \left\{
\betagin{array}{ccc}
z & = & \tan(y) \\
\frac{dz}{1+z^{2}} & = & dy
\end{array}
\right\} = \frac{4}{1+r}\int_{0}^{\infty} \frac{1 - 66 z^2 + 495 z^4 - 924 z^6 + 495 z^8 - 66 z^{10} + z^{12}}{(1+z^{2})^{13/2}} \frac{dz}{\sqrt{z^{2}+u^{2}}}.
\end{align*}
We repeat the splitting again:
\betagin{align*}
L & = \frac{4}{1+r} \int_{0}^{\infty} \frac{1 - 66 z^2 + 495 z^4 - 924 z^6 + 495 z^8 - 66 z^{10} + z^{12}}{(1+z^2)^{13/2}} \frac{dz}{\sqrt{z^2+u^2}} \\
& = \frac{4}{1+r} \int_{0}^{|u|} \frac{1 - 66 z^2 + 495 z^4 - 924 z^6 + 495 z^8 - 66 z^{10} + z^{12}}{(1+z^2)^{13/2}} \frac{dz}{\sqrt{z^2+u^2}} \\
& + \frac{4}{1+r} \int_{|u|}^{1} \frac{1 - 66 z^2 + 495 z^4 - 924 z^6 + 495 z^8 - 66 z^{10} + z^{12}}{(1+z^2)^{13/2}} \frac{dz}{\sqrt{z^2+u^2}} \\
& + \frac{4}{1+r} \int_{1}^{\infty} \frac{1 - 66 z^2 + 495 z^4 - 924 z^6 + 495 z^8 - 66 z^{10} + z^{12}}{(1+z^2)^{13/2}} \frac{dz}{\sqrt{z^2+u^2}}
= L_1 + L_2 + L_3.
\end{align*}
We start with $L_1$:
\betagin{align*}
L_1 & = \frac{4}{1+r} \int_{0}^{1} \frac{1 - 66 u^2w^2 + 495 u^4w^4 - 924 u^6w^6 + 495 u^8w^8 - 66 u^{10}w^{10} + u^{12}w^{12}}{(1+u^2 w^2)^{13/2}} \frac{dw}{\sqrt{1+w^2}}.
\end{align*}
We expand $(1+u^2 w^2)^{-13/2}$ as a power series around $w = 0$:
\betagin{align*}
(1+u^2 w^2)^{-13/2} = 1 - \frac{13}{2} u^2w^2 G^{1}_{2},
\end{align*}
where $(1 + u^2)^{-15/2} \leq F^{1}_2 \leq 1$. A naive integration and bounding then yields:
\betagin{align*}
L_1 & = \frac{4}{1+r}\left(\int_{0}^{1} \frac{1 - 66 u^2w^2 + 495 u^4w^4 - 924 u^6w^6 + 495 u^8w^8 - 66 u^{10}w^{10} + u^{12}w^{12}}{\sqrt{1+w^2}} dw - \frac{13}{2} u^2 \tilde{G}^{1}_2\right) \\
& = \frac{4}{1+r}\left(\arcsinh(1) - 33 u^2 (\sqrt{2} - \arcsinh(1)) - u^4 \left(\frac{495}{8}\left(\sqrt{2}-3\arcsinh(1)\right)\right)\right. \\
& \left. - \frac{77}{4}u^{6}\left(13 \sqrt{2} - 15 \arcsinh(1)\right) -\frac{165}{128} u^8 (43 \sqrt{2} - 105 \arcsinh(1)) - \frac{33}{640} u^{10} (257 \sqrt{2} - 315 \arcsinh(1)) \right. \\
& \left.-\frac{7}{15360}u^{12}(221 \sqrt{2} - 495 \arcsinh(1)) - \frac{13}{2}u^{2}\tilde{G}^{1}_2\right),
\end{align*}
where
\betagin{align*}
0 \leq \tilde{G}^{1}_{2} \leq 1,
\end{align*}
and we have used that $|u|$ is small enough to guarantee the positiveness of the integrand.
Next, we proceed with $L_3$. We can write it as:
\betagin{align*}
L_3 & = \frac{4}{1+r} \int_{0}^{1} \frac{1 - 66 w^2 + 495 w^4 - 924w^6 + 495 w^8 - 66 w^{10} + w^{12}}{(w^2+1)^{13/2}} \frac{dw}{\sqrt{1+w^2u^2}}.
\end{align*}
Expanding in series $(1+w^2u^2)^{-1/2}$:
\betagin{align*}
(1+u^2 w^2)^{-1/2} = 1 - \frac12 u^2w^2 G^{2}_{2},
\end{align*}
where $\frac{1}{(1+u^2)^{3/2}} \leq G^{2}_2 \leq 1$. We use the fact that
\betagin{align*}
\max_{w \in [0,1]} |1 - 66 w^2 + 495 w^4 - 924 w^6 + 495 w^8 - 66 w^{10} + w^{12}| =
|w(1)| = 64,
\end{align*}
to obtain, via integration and bounding:
\betagin{align*}
L_3 & = \frac{4}{1+r}\left(\int_{0}^{1} \frac{1 - 66 w^2 + 495 w^4 - 924 w^6 + 495 w^8 - 66 w^{10} + w^{12}}{(w^2+1)^{13/2}}dw - \frac{1}{2} u^2 \tilde{G}^{2}_{2}\right) \\
& = \frac{4}{1+r}\left(\arcsinh(1)-\frac{2182}{3465}\sqrt{2} - \frac{1}{2} u^2 \tilde{G}^{2}_{2}\right),
\end{align*}
and
\betagin{align*}
|\tilde{G}^{2}_{2}| \leq 64.
\end{align*}
We finally move on to $L_2$. As before, we have the following formula obtained by integration by parts:
\betagin{align*}
\int_{u^{2}}^{1} w^{-k-1}(1+w)^{-13/2}dw = \left.-\frac{1}{k}w^{-k}(1+w)^{-11/2}\right|_{w=u^{2}}^{w=1} - \left(\frac{2k+11}{2k}\right)\int_{u^{2}}^{1} w^{-k}(1+w)^{-11/2}dw.
\end{align*}
Defining now
\betagin{align*}
H_{k} & = u^{2k} \int_{u^{2}}^{1} \frac{1}{w^{1+k}} \frac{dw}{(1+w)^{13/2}}
\end{align*}
we arrive to
\betagin{align}
\Lambdabel{formulaL}
H_{k} & = \frac{1}{k}\left(\frac{1}{(1+u^{2})^{11/2}} - \frac{u^{2k}}{\sqrt{2^{11}}}\right) - u^{2}\left(\frac{2k+11}{2k}\right)H_{k-1}.
\end{align}
We can write
\betagin{align*}
L_2 & = \frac{2}{1+r}\left(\sum_{k\gammaeq 0} {-1/2 \choose k} H_{k} - 66 u^{2} \sum_{k\gammaeq 0} {-1/2 \choose k} H_{k-1} + 495 u^{4}\sum_{k\gammaeq 0} {-1/2 \choose k} H_{k-2} - 924 u^{6} \sum_{k\gammaeq 0} {-1/2 \choose k} H_{k-3}\right) \\
& + \frac{2}{1+r}\left(495 u^{8}\sum_{k\gammaeq 0} {-1/2 \choose k} H_{k-4} - 66 u^{10} \sum_{k\gammaeq 0} {-1/2 \choose k} H_{k-5} + u^{12}\sum_{k\gammaeq 0} {-1/2 \choose k} H_{k-6}\right) \\
& = L_{21} + L_{22} + L_{23} + L_{24} + L_{25} + L_{26} + L_{27}.
\end{align*}
The last six terms are easier. We deal with them first.
\betagin{align*}
L_{27} & = \frac{2}{1+r}\left(u^{12}H_{-6} + u^{12}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-6}\right).
\end{align*}
The first term can be explicitly calculated and it amounts to
\betagin{align*}
u^{12}H_{-6} & = \frac{1}{22176}\frac{16384 + 90112 u^2 + 202752 u^4 + 236544 u^6 + 147840 u^8 + 44352 u^{10} - 11531 \sqrt{2} (1 + u^2)^{11/2}}{(1+u^{2})^{11/2}}.
\end{align*}
The last series is alternating and convergent and we can get the following bound:
\betagin{align*}
\left| u^{12}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-6}\right| \leq u^{12}\frac{1}{2}H_{-5}
& = \frac{1}{221760(1+u^2)^{11/2}}u^{2} \\
& \times \left|8192 + 45056 u^2 + 101376 u^4 + 118272 u^6 + 73920 u^8 - 5419 \sqrt{2}(1 + u^2)^{11/2}\right|.
\end{align*}
We move on to $L_{26}$. By the same reasoning:
\betagin{align*}
L_{26} & = \frac{2}{1+r}\left(-66u^{10}H_{-5} - 66 u^{10}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-5}\right) \\
& = \frac{2}{1+r}\left(-\frac{8192 + 45056 u^2 + 101376 u^4 + 118272 u^6 + 73920 u^8 - 5419 \sqrt{2} (1 + u^2)^{11/2}}{1680(1+u^2)^{11/2}}\right) \\
& + \frac{2}{1+r}\left(- 66 u^{10}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-5}\right),
\end{align*}
\betagin{align*}
\left|66 u^{10}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-5}\right| \leq 33u^{10}H_{-4}
= \frac{1}{1120}u^{2}\left|\frac{1024 + 5632 u^2 + 12672 u^4 + 14784 u^6 - 533 \sqrt{2} (1 + u^2)^{11/2}}{(1+u^2)^{11/2}}\right|.
\end{align*}
The same applies to $L_{25}$. We have:
\betagin{align*}
L_{25} & = \frac{2}{1+r}\left(495 u^{8} H_{-4} + 495 u^{8}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-4}\right) \\
& = \frac{2}{1+r}\left(\frac{3(1024 + 5632 u^2 + 12672 u^4 + 14784 u^6 - 533 \sqrt{2} (1 + u^2)^{11/2})}{224(1+u^{2})^{11/2}} + 495 u^{8}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-4}\right) \\
\end{align*}
and
\betagin{align*}
&\left|495 u^{8}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-4}\right| \leq u^{8}\frac{495}{2}H_{-3} \\
& = \frac{5}{448}u^{2}\left|\frac{512 + 2816 u^2 + 6336 u^4 - 151 \sqrt{2} (1 + u^2)^{11/2}}{(1+u^2)^{11/2}}\right|.
\end{align*}
The next term is $L_{24}$. We have:
\betagin{align*}
L_{24} & = \frac{2}{1+r}\left(-924 u^{6} H_{-3} -924 u^{6}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-3}\right) \\
& = \frac{2}{1+r}\left(\frac{-512 - 2816 u^2 - 6336 u^4 + 151 \sqrt{2} (1 + u^2)^{11/2})}{24(1+u^{2})^{11/2}} - 924 u^{6}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-3}\right) \\
\end{align*}
and
\betagin{align*}
&\left|924 u^{6}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-3}\right| \leq 462u^{6}H_{-2} = \frac{7}{48}u^{2}\left|-13 \sqrt{2} - \frac{576}{(1+u^2)^{11/2}} + \frac{704}{(1+u^2)^{9/2}}\right|.
\end{align*}
The term $L_{23}$ can be decomposed as:
\betagin{align*}
L_{23} & = \frac{2}{1+r}\left(495 u^{4} H_{-2} + 495 u^{4}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-2}\right) \\
& = \frac{2}{1+r}\left(- \frac{65}{16 \sqrt{2}} - \frac{90}{(1+u^2)^{11/2}} + \frac{110}{(1+u^2)^{9/2}} + 495 u^{4}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-2}\right) \\
\end{align*}
and we have the bound
\betagin{align*}
&\left|495 u^{4}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-2}\right| \leq \frac{495}{2}u^{4}H_{-1} =
\frac{45}{64} u^2 \left(-\sqrt{2} +\frac{64}{(1+u^2)^{11/2}}\right).
\end{align*}
We can split $L_{22}$ into:
\betagin{align*}
L_{22} & = \frac{2}{1+r}\left(-66 u^{2} H_{-1} - 66 u^{2}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-1}\right) \\
& = \frac{2}{1+r}\left(\frac{3}{8 \sqrt{2}} - \frac{12}{(1+u^2)^{11/2}} - 66 u^{2}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-1}\right) \\
\end{align*}
and bound the second sum by
\betagin{align*}
&\left|66 u^{2}\sum_{k\gammaeq 1} {-1/2 \choose k} H_{k-1}\right| \leq 33u^{2}H_{0} \\
& = 33u^{2}\left(\frac{137969}{55440 \sqrt{2}} - \frac{2}{11(1+u^2)^{11/2}} - \frac{2}{9(1+u^2)^{9/2}}\right. \\
& \left.-\frac{2}{7(1+u^2)^{7/2}} - \frac{2}{5(1+u^2)^{5/2}} - \frac{2}{3(1+u^2)^{3/2}}\right. \\
& \left. - \frac{2}{\sqrt{1+u^{2}}} - 2 \arcsinh(1) + 2 \arcsinh\left(\frac{1}{|u|}\right)\right).
\end{align*}
In order to compute $L_{21}$ we use formula \eqref{formulaL}:
\betagin{align*}
\sum_{k\gammaeq 0} {-1/2 \choose k} H_{k} & = H_{0} + \sum_{k\gammaeq 1} {-1/2 \choose k} H_{k} \\
& = H_{0} + \sum_{k\gammaeq 1} {-1/2 \choose k} \frac{1}{k}\left(\frac{1}{(1+u^{2})^{11/2}} - \frac{u^{2k}}{\sqrt{2^{11}}}\right) - u^{2} \sum_{k\gammaeq 1} {-1/2 \choose k}\left(\frac{2k+11}{2k}\right)H_{k-1}\\
& = \frac{137969}{55440 \sqrt{2}} - \frac{2}{11(1+u^2)^{11/2}} - \frac{2}{9(1+u^2)^{9/2}} -\frac{2}{7(1+u^2)^{7/2}} - \frac{2}{5(1+u^2)^{5/2}} \\
& - \frac{2}{3(1+u^2)^{3/2}} - \frac{2}{\sqrt{1+u^{2}}} - 2 \arcsinh(1) + 2 \arcsinh\left(\frac{1}{|u|}\right) \\
& + \frac{1}{64}\left(\frac{128(\log(2) - \arcsinh(1))}{(1+u^2)^{5/2}} - \sqrt{2}\log(4) + 2\sqrt{2}\log(1+\sqrt{1+u^2})\right) \\
& - u^{2} \sum_{k\gammaeq 1} {-1/2 \choose k}\left(\frac{2k+11}{2k}\right)H_{k-1}.
\end{align*}
The last sum can be bounded as usual by
\betagin{align*}
& \left|u^{2} \sum_{k\gammaeq 1} {-1/2 \choose k}\left(\frac{2k+11}{2k}\right)H_{k-1}\right| \leq \frac{1}{2}\frac{11}{2}u^{2}H_{0} \\
& = \frac{11}{4}u^{2}\left|\frac{137969}{55440 \sqrt{2}} - \frac{2}{11(1+u^2)^{11/2}} - \frac{2}{9(1+u^2)^{9/2}} -\frac{2}{7(1+u^2)^{7/2}} - \frac{2}{5(1+u^2)^{5/2}}\right. \\
& \left.- \frac{2}{3(1+u^2)^{3/2}} - \frac{2}{\sqrt{1+u^{2}}} - 2 \arcsinh(1) + 2 \arcsinh\left(\frac{1}{|u|}\right)\right|.
\end{align*}
We finally add everything together to write $L$ as
\betagin{align*}
L & = L_{ho,ns} + L_{ho,s} + L_{e,ns} + L_{e,s}
\end{align*}
where
\betagin{align*}
L_{ho,ns}
& = \frac{4}{1+r}\left(\arcsinh(1) - 33 u^2 (\sqrt{2} - \arcsinh(1)) - u^4 \left(\frac{495}{8}\left(\sqrt{2}-3\arcsinh(1)\right)\right)\right) \\
& + \frac{4}{1+r}\left(- \frac{77}{4}u^{6}\left(13 \sqrt{2} - 15 \arcsinh(1)\right) -\frac{165}{128} u^8 (43 \sqrt{2} - 105 \arcsinh(1)) \right.\\ &\left. \qquad\qquad - \frac{33}{640} u^{10} (257 \sqrt{2} - 315 \arcsinh(1))\right)\\
& + \frac{4}{1+r}\left( -\frac{7}{15360}u^{12}(221 \sqrt{2} - 495 \arcsinh(1)) \right) \\
& + \frac{4}{1+r}\left(\arcsinh(1)-\frac{2182}{3465}\sqrt{2}\right) \\
& + \frac{2}{1+r}\left(u^{12}H(-6) - 66 u^{10}H(-5) + 495u^8H(-4) - 924u^6H(-3) +
495u^4H(-2) - 66u^2H(-1)\right) \\
& + \frac{2}{1+r}\left( H(0) - 2 \arctan\left(\frac{1}{|u|}\right)+\sum_{k\gammaeq 1} {-1/2 \choose k} \frac{1}{k}\left(\frac{1}{(1+u^{2})^{11/2}} - \frac{u^{2k}}{\sqrt{2^{11}}}\right)\right) \\
& = \frac{4}{1+r}\left(\arcsinh(1) + 33u^2(\arcsinh(1)-\sqrt{2}) - \frac{495}{8}u^4\left(\sqrt{2}-3\arcsinh(1)\right)\right. \\ &\left. \qquad\qquad - \frac{77}{4}u^6\left(13\sqrt{2} - 15 \arcsinh(1)\right)\right) \\
& + \frac{4}{1+r}\left(-\frac{165}{128}u^8\left(43\sqrt{2} - 105 \arcsinh(1)\right) - \frac{33}{640}u^{10}\left(257\sqrt{2} - 315 \arcsinh(1)\right)\right. \\ & \left. \qquad\qquad - \frac{7 u^{12}}{15360}\left(221\sqrt{2} - 495\arcsinh(1)\right)\right) \\
& + \frac{4}{1+r}\left(-\frac{8}{3465(1+u^2)^{11/2}}(1627+11u^2(-604 + 3366 u^2 - 2268 u^4 + 945 u^6))
\right) \\
& + \frac{4}{1+r}\left(\frac{\log(2)-\arcsinh(1)}{(1+u^2)^{11/2}} + \frac{\log\left(\frac{1}{2}\left(1+\sqrt{1+u^2}\right)\right)}{32\sqrt{2}}\right) \\
L_{ho,s} & = \frac{4}{1+r} \arcsinh\left(\frac{1}{|u|}\right)
\end{align*}
and
\betagin{align*}
|L_{e,ns}| & \leq \frac{4}{1+r}\left(\frac{13}{2}u^2 + 32 u^2\right)\\
& + \frac{4}{1+r}\left|\frac{u^{2}}{443520}\frac{8192 + 45056 u^2 + 101376 u^4 + 118272 u^6 + 73920 u^8 - 5419 \sqrt{2} (1 + u^2)^{11/2}}{(1+u^2)^{11/2}}\right| \\
& + \frac{4}{1+r}\left|\frac{u^{2}}{2240}\frac{1024 + 5632 u^2 + 12672 u^4 + 14784 u^6 - 533 \sqrt{2} (1 + u^2)^{11/2}}{(1+u^2)^{11/2}}\right| \\
& + \frac{4}{1+r}\left|\frac{5u^{2}}{896}\frac{512 + 2816 u^2 + 6336 u^4 - 151 \sqrt{2}(1 + u^2)^{11/2}}{(1+u^2)^{11/2}}\right| \\
& + \frac{4}{1+r}\left|\frac{7}{96} u^2 \left(-13 \sqrt{2} - \frac{576}{(1+u^2)^{11/2}} + \frac{704}{(1+u^2)^{9/2}}\right)\right| \\
& + \frac{4}{1+r}\left|\frac{45 u^{2}}{128}\left(-\sqrt{2} + \frac{64}{(1+u^2)^{11/2}}\right)\right| \\
& + \frac{4}{1+r}\left|\frac{143}{8}u^{2}\left(\frac{137969}{55440 \sqrt{2}} - \frac{2}{11(1+u^2)^{11/2}} - \frac{2}{9(1+u^2)^{9/2}} -\frac{2}{7(1+u^2)^{7/2}} - \frac{2}{5(1+u^2)^{5/2}}\right.\right. \\
& \left.\left.- \frac{2}{3(1+u^2)^{3/2}} - \frac{2}{\sqrt{1+u^{2}}} - 2 \arcsinh(1)\right)\right| \\
|L_{e,s}| & \leq \frac{4}{1+r}\frac{143}{4}u^{2} \arcsinh\left(\frac{1}{|u|}\right).
\end{align*}
\section{Implementation of the computer-assisted part and rigorous numerical results}
\Lambdabel{sectioncomputerassisted}
In this section we will discuss the technical details about the implementation of the different integrals that appear in the proofs. We remark that we are computing explicit (but complicated) functions over a one dimensional domain. In order to perform the rigorous computations we used the C-XSC library \cite{CXSC}. The code can be found in the supplementary material.
The implementation is split into several files, and many of the headers of the functions (such as the integration methods) contain pointers to functions (the integrands) so that they can be reused for an arbitrary number of integrals with minimal changes and easy and safe debugging. For the sake of clarity, and at the cost of numerical performance and duplicity in the code, we decided to treat many simple integrals instead of a single big one.
We will only integrate in $\rho'$ (or more specifically, in $\tilde{\rho}'$: see below for the change of variables). We outline the computation for $\tilde{I}(\rho)$ here but the other parts of the functions are calculated in the same way.
In order to minimize the impact of $a$ and $\betata$ being too small, we transform the original domain $[1-a,1]$ into a reference one: $[-1,1]$.
\betagin{align*}
\tilde{I}(\rho) & = -\frac{1}{2\pi \rho} \int_{1-a}^{1} f'(\rho') K^{1}\left(\frac{\rho}{\rho'}\right) d\rho' \\
& = -\frac{1}{2\pi \left(\frac{a}{2}(\tilde{\rho}-1)+1\right)} \int_{-1}^{1} \frac{a}{2}f_{\rho}\left(\frac{a}{2}(\tilde{\rho}'-1)+1\right)K^{1}\left(\frac{(\tilde{\rho}-1)+\frac{2}{a}}{(\tilde{\rho}'-1)+\frac{2}{a}}\right) d\tilde{\rho}' \\
& = -\frac{1}{2\pi \left(\frac{a}{2}(\tilde{\rho}-1)+1\right)} \int_{-1}^{1} \frac{a}{2}f_{\rho}\left(\frac{a}{2}(\tilde{\rho}'-1)+1\right)(I_{ho,s} + I_{ho,ns} + I_{e,s} + I_{e,ns})\left(\frac{(\tilde{\rho}-1)+\frac{2}{a}}{(\tilde{\rho}'-1)+\frac{2}{a}}\right) d\tilde{\rho}' \\
& \equiv \tilde{I}(\tilde{\rho}),
\end{align*}
where $\tilde{\rho} = \frac{2}{a}(\rho-1)+1$.
There are two basic classes in the programs that enclose all the necessary information used throughout the computations. The first one is called \thetaxttt{ParameterSet} and has the following members: two doubles, \thetaxttt{abs\_tol} and \thetaxttt{rel\_tol}, providing the desired tolerances used to accept or reject the enclosure of the integral in the adaptive integration scheme described below; two intervals, \thetaxttt{a} and \thetaxttt{beta}, which are parameters of the system. We take them to be intervals since the actual value of $a$ is not representable by a computer. A \thetaxttt{ParameterSet} also contains two intervals, \thetaxttt{Left} and \thetaxttt{Right}, describing the boundaries of the integration region; two integers, \thetaxttt{region\_rho} and \thetaxttt{region\_rhop}, denoting whether $\tilde{\rho}$ and $\tilde{\rho}'$ are in $[-1,-1+\betata]$,$[-1+\betata,1-\betata]$ or $[1-\betata,1]$ respectively. Finally, there is also an interval \thetaxttt{rho\_normalized} indicating the value of $\tilde{\rho}$ (we remark that we are integrating in $\tilde{\rho}'$). The second data structure is called \thetaxttt{IntegrationResult} and is composed of a \thetaxttt{ParameterSet}, an interval \thetaxttt{result} containing the result of the integration, an ivector (vector of intervals) \thetaxttt{error\_by\_coordinate} which has information about the error in the different directions and an integer \thetaxttt{flag} which is set to 1 if we ever encounter an error in the program (e.g. a division by zero due to overestimation).
We now explain how the integrals are calculated. By technical issues explained below, we will split the integration region $[-1,1]$ into smaller pieces and sum the contributions over each piece. Regardless of the domain, the integration is done in an adaptive way unless specified. We keep track of the regions over which we need to integrate in a Standard Template Library \thetaxttt{priority\_queue}, which keeps the \thetaxttt{IntegrationResults} sorted by absolute width of their member \thetaxttt{result}. We operate by taking the topmost element (i.e. the one with the highest absolute width) and deciding to accept the result or reject it. This is done based on the width of the result in an absolute and a relative (to the length of the integration region) way (it has to be smaller than \thetaxttt{abs\_tol} and \thetaxttt{rel\_tol} respectively). In the latter case, we split the region and recomputed the integral on both subregions. The splitting is done by the midpoint. In order to avoid infinite loops -- which could potentially happen since there is uncertainty in the value of $\tilde{\rho}$ --, we repeat this step at most \thetaxttt{MAX\_ELEMENTS\_EVALUATED} times. In our code, \thetaxttt{MAX\_ELEMENTS\_EVALUATED} $= 100.000$. All integrations are done using a Gauss-Legendre quadrature of order 2, given by:
\betagin{align*}
\int_{a}^{b} f(x) dx \in \frac{b-a}{2}\left(f\left(\frac{b-a}{2}\frac{\sqrt{3}}{3} + \frac{b+a}{2}\right)+f\left(-\frac{b-a}{2}\frac{\sqrt{3}}{3} + \frac{b+a}{2}\right)\right)
+\frac{1}{4320}(b-a)^{5}f^{4}([a,b]).
\end{align*}
Once we have defined the basic classes and explained the integration method, we now turn into the discussion of the splitting of the interval $[-1,1]$. We will compute 4 different integrals depending on whether we are integrating $I_{ho,ns}$,$I_{ho,s}$,$I_{e,ns}$,$I_{e,s}$ (see Appendix \rho_{\varpirepsilonsilonilon}f{sectionasymptotics}). On the one hand, the integrals of $I_{ho,ns}$ and $I_{e,ns}$ will be performed on the full interval $[-1,1]$ taking care of adjusting the regions of $\tilde{\rho}$ and $\tilde{\rho}'$ accordingly (see Figure \rho_{\varpirepsilonsilonilon}f{regionsF} for a depiction of the different regions) in order to adjust the expression of $f_{\rho}$ accordingly to the region. We remark that because of the monotonicity of $f_{\rho}$, whenever we want to evaluate $f_{\rho}$ in an interval it is enough to compute it at the endpoints and take the hull of the two results. On the other hand, the integrals of $I_{ho,s}$ and $I_{e,s}$ will be split into a staircase domain and a singularity region depending on $(\rho,\rho')$. The staircase domain is shown in Figure \rho_{\varpirepsilonsilonilon}f{staircase} for $N = 20, \betata = \frac{2}{N}$.
\betagin{figure}[ht]
\centering
\includegraphics[width=0.6\thetaxtwidth]{regionsF.eps}
\caption{The 9 different possibilities in $(\rho,\rho')$ leading to different values of $f'$.}
\Lambdabel{regionsF}
\end{figure}
\betagin{figure}[ht]
\centering
\includegraphics[width=0.6\thetaxtwidth]{staircase.eps}
\caption{Different integration regions. Colored: singular region, white: staircase region.}
\Lambdabel{staircase}
\end{figure}
In order to integrate over the singularity region, we will integrate by factoring out everything out of the factor $\arcsinh(\frac{1}{|u|})$ and integrating explicitly.
For example, if we want to integrate
\betagin{align}\Lambdabel{integralarcsinh}
\int A(\rho,\rho') \arcsinh\left(\frac{1}{|u|}\right) d\rho'
\end{align}
and we have uniform bounds on $A$ of the type
\betagin{align*}
\underline{a} \leq A(\rho,\rho') \leq \overline{a},
\end{align*}
then the integral \eqref{integralarcsinh} can be bounded by
\betagin{align*}
\underline{a}\int \arcsinh\left(\frac{1}{|u|}\right) d\rho' \leq \int A(\rho,\rho') \arcsinh\left(\frac{1}{|u|}\right) d\rho' \leq \overline{a}\int \arcsinh\left(\frac{1}{|u|}\right) d\rho',
\end{align*}
yielding the enclosure
\betagin{align*}
\int A(\rho,\rho') \arcsinh\left(\frac{1}{|u|}\right) d\rho' \in \left[\underline{a} \int \arcsinh\left(\frac{1}{|u|}\right)d\rho' ,\overline{a} \int \arcsinh\left(\frac{1}{|u|}\right)d\rho'\right].
\end{align*}
A strightforward but long calculation yields the following lemma, which is useful for that purpose:
\betagin{lemma}
Let $-1 \leq \tilde{\rho},\tilde{\rho}' \leq 1$, $0 < a < 1$. We have that:
\betagin{align*}
& \int_{c}^{d} \arcsinh\left(\frac{|\tilde{\rho} + \tilde{\rho}' - 2 + \frac{4}{a}|}{|\tilde{\rho} - \tilde{\rho}'|}\right) d\tilde{\rho}' \\
& = -c \arcsinh\left(\frac{4+a(-2+c+\tilde{\rho})}{a|c-\tilde{\rho}|}\right) + d \arcsinh\left(\frac{4+a(-2+d+\tilde{\rho})}{a|d-\tilde{\rho}|}\right) + \tilde{\rho}\log\left(\frac{|d-\tilde{\rho}|}{|c-\tilde{\rho}|}\right) \\
& + \frac{\sqrt{2}}{a}(-2+a-a\tilde{\rho})\log\left(\frac{2+a(-1+c)+\sqrt{8+4a(-2+c+\tilde{\rho})+a^2(2+(-2+c)c+(-2+\tilde{\rho})\tilde{\rho})}}{2+a(-1+d)+\sqrt{8+4a(-2+d+\tilde{\rho})+a^2(2+(-2+d)d+(-2+\tilde{\rho})\tilde{\rho})}}\right) \\
& + \tilde{\rho} \log\left(\frac{4+a(-2+c+\tilde{\rho})+\sqrt{2}\sqrt{8+4a(-2+c+\tilde{\rho})+a^2(2+(-2+c)c+(-2+\tilde{\rho})\tilde{\rho})}}{4+a(-2+d+\tilde{\rho})+\sqrt{2}\sqrt{8+4a(-2+d+\tilde{\rho})+a^2(2+(-2+d)d+(-2+\tilde{\rho})\tilde{\rho})}}\right) \\
& = -c \log\left(\frac{4}{a}+(-2+c+\tilde{\rho}) + \sqrt{(c-\tilde{\rho})^2 + \left(\frac{4}{a}+(-2+c+\tilde{\rho})\right)^2}\right) + (c-\tilde{\rho})\log\left(|c-\tilde{\rho}|\right) \\
& +d \log\left(\frac{4}{a}+(-2+d+\tilde{\rho}) + \sqrt{(d-\tilde{\rho})^2 + \left(\frac{4}{a}+(-2+d+\tilde{\rho})\right)^2}\right) - (d-\tilde{\rho})\log\left(|d-\tilde{\rho}|\right) \\
& + \frac{\sqrt{2}}{a}(-2+a-a\tilde{\rho})\log\left(\frac{2+a(-1+c)+\sqrt{8+4a(-2+c+\tilde{\rho})+a^2(2+(-2+c)c+(-2+\tilde{\rho})\tilde{\rho})}}{2+a(-1+d)+\sqrt{8+4a(-2+d+\tilde{\rho})+a^2(2+(-2+d)d+(-2+\tilde{\rho})\tilde{\rho})}}\right) \\
& + \tilde{\rho} \log\left(\frac{4+a(-2+c+\tilde{\rho})+\sqrt{2}\sqrt{8+4a(-2+c+\tilde{\rho})+a^2(2+(-2+c)c+(-2+\tilde{\rho})\tilde{\rho})}}{4+a(-2+d+\tilde{\rho})+\sqrt{2}\sqrt{8+4a(-2+d+\tilde{\rho})+a^2(2+(-2+d)d+(-2+\tilde{\rho})\tilde{\rho})}}\right). \\
\end{align*}
\end{lemma}
The computations were run on a desktop with 8 cores at 3.10 GHz and 8 GB of RAM. The different runtimes are described in each of the lemmas separately.
We will explain the algorithms and the procedures of the different lemmas from easier to harder, irrespective of the order in which the lemmas are found in the main sections of the paper.
\betagin{proofsth}{Lemma \rho_{\varpirepsilonsilonilon}f{lemmaImayorquelambda}}
We compute the hull (an enclosure of the range) of $\tilde{I}\left(\frac{2}{a}(\tilde{\rho}-1)+1\right)$ for $\tilde{\rho} \in [-1,1]$. To do so, we split the interval $[-1,1]$ into $N = 512$ intervals $I_{j}, j=0\ldots N-1$ of equal size and compute an enclosure $II_{j} = \tilde{I}\left(\frac{2}{a}(\tilde{\rho}-1)+1\right)$, for $\tilde{\rho} = I_{j}$. We can prove the following estimate:
\betagin{align*}
\min_{\tilde{\rho} \in [-1,1]} \tilde{I}\left(\frac{2}{a}(\tilde{\rho}-1)+1\right) \gammaeq \min_{j} II_{j} > 1.2655
\end{align*}
The minimum is attained in the last region, $\tilde{\rho} \in \left[1-\frac{2}{N},1\right]$. A nonrigorous computation shows that indeed this minimum is attained at $\tilde{\rho} = 1$. The detailed breakdown of the regions can be found in the file \thetaxttt{output/output\_Min\_I\_512.out} in the supplementary material. The tolerances \thetaxttt{abs\_tol} and \thetaxttt{rel\_tol} were set to $10^{-5}$. The computation took approximately 242 minutes, giving an average time of around 7 seconds per integral.
\end{proofsth}
\betagin{proofsth}{Lemma \rho_{\varpirepsilonsilonilon}f{lemacotae}}
For every $\tilde{\rho} \in [-1,1]$, we complete an enclosure of $e\left(\frac{2}{a}(\tilde{\rho}-1)+1\right)$. To do so, we split the interval $[-1,1]$ into $N = 512$ intervals $I_{j}, j=0\ldots N-1$ of equal size and compute an enclosure $\mathcal{E}^{3}_{j} = e\left(\frac{2}{a}(\tilde{\rho}-1)+1\right)$, for $\tilde{\rho} = I_{j}$. Finally, we can estimate an enclosure of the $L^{2}$ norm of $e$ and the scalar product with $B_{sj}$ by
\betagin{align*}
\|e\|_{L^{2}} \in \left(\frac{a}{N}\sum_{j=0}^{N-1}(\mathcal{E}^{3}_{j})^{2}\right)^{\frac12}, \\
|\Lambdangle e, B_{sj} \rightarrowngle| \in \left|\frac{a}{N}\sum_{j=0}^{N-1}(\mathcal{E}^{3}_{j}B_{sj}(I_{j}))\right| = \left|\frac{a}{N}\sum_{j=1}^{N-2}(\mathcal{E}^{3}_{j})\right|,
\end{align*}
where we have used that $B_{sj}$ is piecewise constant in each of the $I_{j}$: it is zero if $j = 0$ or $j = N-1$ and 1 otherwise. This yields in this particular case
\betagin{align*}
\|e\|_{L^{2}} & < 0.0905 \\
|\Lambdangle e,B_{sj} \rightarrowngle | & < 0.0101
\end{align*}
The detailed breakdown of the regions can be found in the file \thetaxttt{output/output\_E\_3.out} in the supplementary material. The tolerances \thetaxttt{AbsTol} and \thetaxttt{RelTol} were set to $10^{-5}$. The computation took approximately 563 minutes, giving an average time of around 16.5 seconds per integral.
\end{proofsth}
\betagin{proofsth}{Lemma \rho_{\varpirepsilonsilonilon}f{lemacotasABC}}
For every $\tilde{\rho} \in [-1,1]$, we complete an enclosure of $\mathbb{T}heta^{3}_{A}B_{sj}\left(\frac{2}{a}(\tilde{\rho}-1)+1\right)$. To do so, we split the interval $[-1,1]$ into $N = 512$ intervals $I_{j}, j=0\ldots N-1$ of equal size and compute an enclosure $T^{3}_{A,j} = \mathbb{T}heta^{3}_{A}B_{sj}\left(\frac{2}{a}(\tilde{\rho}-1)+1\right)$, for $\tilde{\rho} = I_{j}$. Finally, we can estimate an enclosure of the $L^{2}$ norm of $\mathbb{T}heta^{3}_{A}B_{sj}$ by
\betagin{align*}
\|\mathbb{T}heta^{3}_{A}B_{sj}\|_{L^{2}} \in \left(\frac{a}{N}\sum_{j=0}^{N-1}(T^{3}_{A,j})^{2}\right)^{\frac12},
\end{align*}
yielding in this particular case
\betagin{align*}
\|\mathbb{T}heta^{3}_{A}B_{sj}\|_{L^{2}} < 0.0629
\end{align*}
The detailed breakdown of the regions can be found in the file \thetaxttt{output/output\_Theta\_A\_N\_512.out} in the supplementary material. The tolerances \thetaxttt{abs\_tol} and \thetaxttt{rel\_tol} were set to $10^{-5}$. The computation took approximately 405 minutes, giving an average time of around 12 seconds per integral.
\end{proofsth}
\betagin{proofsth}{Lemma \rho_{\varpirepsilonsilonilon}f{lemacotascestrella}}
To obtain a bound on $c^{*}$ we will employ the following strategy. First, we can bound $c^{*}$ in the following way:
\betagin{align*}
c^{*} \gammaeq \min \tilde{I}(\rho) + \min_{\|u\|_{L^{2}} = 1, u \in (B_{sj})^{\perp}} \Lambdangle T^{3}_{S} u, u\rightarrowngle,
\end{align*}
where $T_{S}^{3}$ is the symmetric part of $\mathbb{T}heta^{3} - \tilde{I}$, given by
\betagin{align*}
T_{S}^{3} u(\rho) & = \frac12 \int_{1-a}^{1}\left(f_\rho(\rho')K^3\left(\frac{\rho}{\rho'}\right)\frac{1}{\rho} +
f_\rho(\rho)K^3\left(\frac{\rho'}{\rho}\right)\frac{1}{\rho'}\right)B(\rho') d\rho'
\end{align*}
We know that $T_{S}^{3}$ is symmetric and compact. We approximate it by a finite rank operator. For any $\tilde{\rho} \in [-1,1]$:
\betagin{align*}
(T_{S}^{3}u)(\frac{a}{2}(\tilde{\rho}-1)+1) = T_{S}^{3}u(\tilde{\rho}) \approx T_{fin}^{3}u(\tilde{\rho}) = \int_{-1}^{1}\frac{a}{2}\sum_{i,j=0}^{23} (T_{fin}^{3})_{ij}u_i(\tilde{\rho})u_j(\tilde{\rho}') u(\frac{a}{2}(\tilde{\rho}'-1)+1) d\tilde{\rho}',
\end{align*}
The matrix $T_{fin}^{3}$ is symmetric and given explicitly in Appendix \rho_{\varpirepsilonsilonilon}f{appendixprojections}. The functions $u_i$ are an orthonormal basis chosen in the following way:
\betagin{align*}
u_i(\tilde{\rho}) & =
\left\{
\betagin{array}{cc}
\sqrt{\frac{2(2[i/3]+1)}{a\betata}}\thetaxt{Leg}([i/3],\frac{2}{\betata}(\tilde{\rho}+1)-1)1_{\{-1 \leq \tilde{\rho} \leq -1+\betata\}}& \thetaxt{ if } i \equiv 0 (mod \, 3) \\
\sqrt{\frac{2[i/3]+1}{a-a\betata}}\thetaxt{Leg}([i/3],\frac{\tilde{\rho}}{1-\betata})1_{\{-1+\betata \leq \tilde{\rho} \leq 1-\betata\}} & \thetaxt{ if } i \equiv 1 (mod \, 3) \\
\sqrt{\frac{2(2[i/3]+1)}{a\betata}}\thetaxt{Leg}([i/3],\frac{2}{\betata}(\tilde{\rho}-1)+1)1_{\{1-\betata \leq \tilde{\rho} \leq 1\}} & \thetaxt{ if } i \equiv 2 (mod \, 3) \\
\end{array}
\right.
\end{align*}
where Leg($a,x$) stands for the standard Legendre polynomial of order $a$, defined for $x \in [-1,1]$ by
\betagin{align*}
\thetaxt{Leg}(0,x) = 1, & \quad \thetaxt{Leg}(1,x) = x, \\
(n+1)\thetaxt{Leg}(n+1,x) & = (2n+1)x\thetaxt{Leg}(n,x) - n\thetaxt{Leg}(n-1,x), \quad n \gammaeq 1.
\end{align*}
Note that $B_{sj}$ corresponds to $u_1$. We now decompose $T_{S}^{3}$ as:
\betagin{align*}
\min_{\|u\|_{L^{2}} = 1, u \in (B_{sj})^{\perp}} \Lambdangle T^{3}_{S} u, u\rightarrowngle
\gammaeq \min_{\|u\|_{L^{2}} = 1, u \in (B_{sj})^{\perp}} \Lambdangle T^{3}_{fin} u, u\rightarrowngle
+ \min_{\|u\|_{L^{2}} = 1, u \in (B_{sj})^{\perp}} \Lambdangle (T^{3}_{S} - T^{3}_{fin})u, u\rightarrowngle
\end{align*}
The first term in the sum is simply the smallest eigenvalue of the matrix $T_{fin}^{3}$ without the second row and the second column. By Gershgorin's theorem \cite{Gerschgorin:eigenvalues-theorem}, the eigenvalues of an $n \times n$ matrix $A$ lie inside the union of the disks
\betagin{align*}
D_{i} = \left\{z \in \mathbb{C}, |z-A_{ii}| \leq \sum_{\substack{j=1 \\ j\neq i}} |A_{ij}|\right\}, \quad i =1, \ldots, n.
\end{align*}
In this particular case, this implies that
\betagin{align*}
\min_{\|u\|_{L^{2}} = 1, u \in (B_{sj})^{\perp}} \Lambdangle T^{3}_{fin} u, u\rightarrowngle > -0.3125,
\end{align*}
where the leftmost disk of $T_{fin}^{3}$ is $D_{5}$. The second term can be bounded via the operator norm
\betagin{align*}
\left|\min_{\|u\|_{L^{2}} = 1, u \in (B_{sj})^{\perp}} \Lambdangle (T^{3}_{S} - T^{3}_{fin})u, u\rightarrowngle\right|
\leq \|T^{3}_{S} - T^{3}_{fin}\|_{L^{2} \rightarrow L^{2}} \\
\leq
\max_{\tilde{\rho} \in [-1,1]} \int_{-1}^{1} \left|\frac{a}{2}\frac12 \left(f_\rho\left(\frac{a}{2}(\tilde{\rho}'-1)+1\right)K^3\left(\frac{\frac{a}{2}(\tilde{\rho}-1)+1}{\frac{a}{2}(\tilde{\rho}'-1)+1}\right)\frac{1}{(\frac{a}{2}(\tilde{\rho}-1)+1)} \right.\right. \\
\left.\left.+
f_\rho\left(\frac{a}{2}(\tilde{\rho}-1)+1\right)K^3\left(\frac{\frac{a}{2}(\tilde{\rho}'-1)+1}{\frac{a}{2}(\tilde{\rho}-1)+1}\right)\frac{1}{(\frac{a}{2}(\tilde{\rho}'-1)+1)}\right)\right. \\
\left. - \frac{a}{2}\sum_{i,j=0}^{23} (T_{fin}^{3})_{ij}u_i(\tilde{\rho})u_j(\tilde{\rho}')\right|
d\tilde{\rho}'
\end{align*}
where we have used that the operator is symmetric and the Generalized Young inequality. We computed the following bound:
\betagin{align*}
\|T^{3}_{S} - T^{3}_{fin}\|_{L^{2}} \leq 0.1004
\end{align*}
To do so, we split the subdomains $[-1,-1+\betata]$, $[-1+\betata,1-\betata]$ and $[1-\betata,1]$ for $\tilde{\rho}'$ into a uniform mesh of $N_1 = 512, N_2 = 510 \cdot 16, N_3 = 512$ subintervals respectively. Since we are expecting bounds of the order of the width of the integration integral and we can't do better (the integrand is not $C^{1}$), we simply compute the integrand evaluated in the full interval (a quadrature of order 0). We are careful and the singular part of the integrand is bounded first (all terms that multiply the arcsinh term) and then integrated explicitly separately. This is done whenever $\tilde{\rho} \in I_j, \tilde{\rho}' \in I_k$ and $|j-k| < 2$. Otherwise we evaluate the full integrand. For every $\tilde{\rho} \in I_i$ we sum over all $j$ such that $\tilde{\rho}' \in I_j$ to obtain the $L^1$ bound. Finally, we take the maximum over every $\tilde{\rho}$.
Putting everything together we obtain
\betagin{align*}
c^{*} > 1.2655 - 0.3125 - 0.1004 = 0.8526
\end{align*}
The detailed breakdown of the regions can be found in the file \thetaxttt{output/out\_L1\_Estimates\_T3\_N\_512.out} in the supplementary material. The computation took approximately 12 hours, 53 minutes, giving an average time of around 5 seconds per subinterval in $\tilde{\rho}$.
\end{proofsth}
\betagin{proofsth}{Lemma \rho_{\varpirepsilonsilonilon}f{lemacotae6}}
The proof follows the same strategy as the proof of Lemma \rho_{\varpirepsilonsilonilon}f{lemacotae}. We get the following:
\betagin{align*}
||e^6||_{L^2} < 0.0893.
\end{align*}
The detailed breakdown of the regions can be found in the file \thetaxttt{output/output\_E\_6.out} in the supplementary material. The tolerances \thetaxttt{abs\_tol} and \thetaxttt{rel\_tol} were set to $10^{-5}$. The computation took approximately 604 minutes, giving an average time of around 17.5 seconds per integral.
\end{proofsth}
\betagin{proofsth}{Lemma \rho_{\varpirepsilonsilonilon}f{lemacotascestrella6}}
The proof follows the same strategy as the proof of Lemma \rho_{\varpirepsilonsilonilon}f{lemacotascestrella}. We get the following figures:
\betagin{align*}
\min_{\|u\|_{L^{2}} = 1, u \in (B_{s}^{6\thetaxt{aprox}})^{\perp}} \Lambdangle T^{6}_{fin} u, u\rightarrowngle > -0.3121.
\end{align*}
\betagin{align*}
\|T^{6}_{S} - T^{6}_{fin}\|_{L^{2}} \leq 0.1179
\end{align*}
\betagin{align*}
c^{6*} > 1.2655 - 0.3121 - 0.1179 = 0.8355
\end{align*}
As before, the leftmost disk is $D_{5}$. The detailed breakdown of the regions can be found in the file \thetaxttt{output/out\_L1\_Estimates\_T6\_N\_512.out} in the supplementary material. The computation took approximately 13 hours, 3 minutes, giving an average time of around 5 seconds per subinterval in $\tilde{\rho}$.
\end{proofsth}
In summary, we have proved the following bounds (we are using that $\|B_{sj}\|_{L^{2}} = \|B_{s}^{6\thetaxt{aprox}}\|_{L^{2}} = 1$):
\betagin{itemize}
\item $c^{*} > 0.8526$
\item $\mathcal{A} = \Lambdambda^{*} + |\Lambdangle e, B_{sj} \rightarrowngle| < 0.3583$
\item $\sqrt{\mathcal{B}} \leq \|e\|_{L^{2}} + \|\mathbb{T}heta_{A}^{3} B_{sj}\|_{L^{2}} < 0.1534 $
\item $\Lambdambda_{3} \leq \Lambdambda_{0} < 0.4117 $
\item $\tilde{I} - \Lambdambda_{3} > 0.8538$
\item $c^{6*} > 0.8355$
\end{itemize}
\section{Finite projections}
\Lambdabel{appendixprojections}
In this section we provide the two matrices of size $24 \times 24$: $T^{3}_{fin}$ and $T^{6}_{fin}$ used to approximate the finite dimensional projections of $T^{3}$ and $T^{6}$ respectively. The matrices were computed using nonrigorous integration. The matrices (in a slight different format) can be found in the files \thetaxttt{input/good\_projection\_N\_512.out} and \thetaxttt{input/good\_projection\_T6\_N\_512.out}. In order to write the matrices, because of spacing issues, we will decompose $T^{3}_{fin}$ and $T^{6}_{fin}$ into the following blocks:
\betagin{align*}
T^{3}_{fin} & =
\left(
\betagin{array}{cccc}
T^{3}_{fin,1} & T^{3}_{fin,2} & T^{3}_{fin,3} & T^{3}_{fin,4}
\end{array}
\right) \\
T^{6}_{fin} & =
\left(
\betagin{array}{cccc}
T^{6}_{fin,1} & T^{6}_{fin,2} & T^{6}_{fin,3} & T^{6}_{fin,4}
\end{array}
\right),
\end{align*}
where every block is $24 \times 6$. The exact expressions are:
\tiny
\betagin{align*}
T^{3}_{fin,1} & = \left(
\betagin{array}{cccccc}
-0.00313914631 & -0.03306228140 & -0.00063757204 & -0.00130670573 & 0.00933189622 & 0.00027670636 \\
-0.03306228140 & -1.12901165788 & -0.03145756256 & -0.00822746089 & 0.00729039148 & 0.00876971358 \\
-0.00063757204 & -0.03145756256 & -0.00299812421 & -0.00022549348 & -0.00911506433 & 0.00124776818 \\
-0.00130670573 & -0.00822746089 & -0.00022549348 & -0.00021202888 & 0.00270210646 & 0.00000013750 \\
0.00933189622 & 0.00729039148 & -0.00911506433 & 0.00270210646 & -0.23717252091 & 0.00223673972 \\
0.00027670636 & 0.00876971358 & 0.00124776818 & 0.00000013750 & 0.00223673972 & -0.00020166430 \\
0.00006084465 & -0.00000985095 & -5.18229665685\cdot 10^{-8} & -0.00005068073 & 0.00001298234 & 4.63109155689\cdot 10^{-11} \\
-0.00402764479 & 0.06018656936 & -0.00384397048 & -0.00114915910 & 0.00170736716 & 0.00102743751 \\
-4.43314493090\cdot 10^{-8} & -0.00000908783 & 0.00005786424 & -4.60911625268\cdot 10^{-11} & -0.00001219458 & 0.00004810665 \\
0.00033343322 & 0.00192283641 & 0.00005295158 & 0.00004143653 & -0.00062130092 & -1.82960948332\cdot 10^{-8} \\
0.00233871666 & -0.00072531006 & -0.00222568341 & 0.00067663595 & 0.04134607065 & 0.00062743044 \\
-0.00006498459 & -0.00205039765 & -0.00031825067 & -1.39917284412\cdot 10^{-8} & -0.00051248314 & 0.00003940755 \\
0.00000533557 & 0.00000206570 & 1.15781378363\cdot 10^{-8} & 0.00003985325 & -0.00000267070 & -6.84832886920\cdot 10^{-12} \\
-0.00155761794 & 0.00542593789 & -0.00148129715 & -0.00046479429 & -0.00053855113 & 0.00043588867 \\
9.90200697265\cdot 10^{-9} & 0.00000190393 & 0.00000505660 & 5.62487168223\cdot 10^{-12} & 0.00000250964 & -0.00003785447 \\
-0.00007592114 & -0.00048164344 & -0.00001327566 & 0.00000498929 & 0.00015501214 & 4.58706059629\cdot 10^{-9} \\
0.00112127249 & -0.00008203811 & -0.00106597793 & 0.00034694746 & 0.00518713553 & 0.00032698003 \\
0.00001629250 & 0.00051364377 & 0.00007249718 & 3.50789736574\cdot 10^{-9} & 0.00012776307 & 0.00000473106 \\
0.00000125611 & 8.65982848231\cdot 10^{-8} & -3.19226015591\cdot 10^{-9} & -0.00000437207 & -0.00000039251 & 1.95786950003\cdot 10^{-12} \\
-0.00084878958 & 0.00138998159 & -0.00080678323 & -0.00027291240 & -0.00008220557 & 0.00025788023 \\
-2.73010821736\cdot 10^{-9} & 1.02554282905\cdot 10^{-7} & 0.00000119690 & -1.62202100706\cdot 10^{-12} & 0.00000038596 & 0.00000415121 \\
0.00001432963 & 0.00008343141 & 0.00000227980 & 0.00000129772 & -0.00002784648 & -7.87753818442\cdot 10^{-10} \\
0.00066557241 & -0.00002363432 & -0.00063256170 & 0.00022255360 & 0.00154868510 & 0.00021062293 \\
-0.00000279787 & -0.00008889372 & -0.00001367678 & -6.02460704593\cdot 10^{-10} & -0.00002311176 & 0.00000123395 \\
\end{array}
\right)
\end{align*}
\betagin{align*}
T^{3}_{fin,2} & = \left(
\betagin{array}{cccccc}
0.00006084465 & -0.00402764479 & -4.43314493090\cdot 10^{-8} & 0.00033343322 & 0.00233871666 & -0.00006498459 \\
-0.00000985095 & 0.06018656936 & -0.00000908783 & 0.00192283641 & -0.00072531006 & -0.00205039765 \\
-5.18229665685\cdot 10^{-8} & -0.00384397048 & 0.00005786424 & 0.00005295158 & -0.00222568341 & -0.00031825067 \\
-0.00005068073 & -0.00114915910 & -4.60911625268\cdot 10^{-11} & 0.00004143653 & 0.00067663595 & -1.39917284412\cdot 10^{-8} \\
0.00001298234 & 0.00170736716 & -0.00001219458 & -0.00062130092 & 0.04134607065 & -0.00051248314 \\
4.63109155689\cdot 10^{-11} & 0.00102743751 & 0.00004810665 & -1.82960948332\cdot 10^{-8} & 0.00062743044 & 0.00003940755 \\
-0.00010321148 & -0.00001414574 & -2.80290333759\cdot 10^{-14} & -0.00003848684 & 0.00001470528 & -3.67487307816\cdot 10^{-12} \\
-0.00001414574 & -0.12947879697 & -0.00001337930 & 0.00025555410 & 0.00095216359 & -0.00022761700 \\
-2.80290333759\cdot 10^{-14} & -0.00001337930 & -0.00009797657 & 4.90797112113\cdot 10^{-12} & -0.00001393579 & 0.00003651136 \\
-0.00003848684 & 0.00025555410 & 4.90797112113\cdot 10^{-12} & -0.00006239992 & -0.00014435698 & -3.49966215326\cdot 10^{-15} \\
0.00001470528 & 0.00095216359 & -0.00001393579 & -0.00014435698 & -0.08886148634 & -0.00013349522 \\
-3.67487307816\cdot 10^{-12} & -0.00022761700 & 0.00003651136 & -3.49966215326\cdot 10^{-15} & -0.00013349522 & -0.00005919817 \\
0.00003020315 & 0.00000287744 & 2.98104624843\cdot 10^{-15} & -0.00001908529 & -0.00000297094 & 1.50361388430\cdot 10^{-14} \\
-0.00001495138 & 0.03027233689 & -0.00001418074 & 0.00009471142 & 0.00066179035 & -0.00008862582 \\
3.14813570358\cdot 10^{-15} & 0.00000272492 & 0.00002869680 & -1.48184820399\cdot 10^{-14} & 0.00000282037 & 0.00001807875 \\
0.00002961061 & -0.00006309436 & -1.21506679426\cdot 10^{-12} & 0.00002355898 & 0.00003503703 & -7.01151105584\cdot 10^{-16} \\
0.00001500747 & -0.00041014040 & -0.00001423998 & -0.00006730898 & 0.02375644464 & -0.00006331650 \\
9.04914568531\cdot 10^{-13} & 0.00005613296 & -0.00002812073 & -6.97673484952\cdot 10^{-15} & 0.00003236439 & 0.00002238336 \\
0.00000406644 & 0.00000066295 & -5.79173351464\cdot 10^{-14} & 0.00002138146 & -0.00000090330 & -5.68804673351\cdot 10^{-14} \\
-0.00001493806 & 0.00436069767 & -0.00001417756 & 0.00005029006 & -0.00032965875 & -0.00004744310 \\
-7.69293477455\cdot 10^{-14} & 0.00000063871 & 0.00000386310 & 5.43227769034\cdot 10^{-14} & 0.00000086668 & -0.00002030190 \\
-0.00000252131 & 0.00001241650 & 7.34844588938\cdot 10^{-14} & 0.00000330787 & -0.00000788818 & 8.17156504431\cdot 10^{-14} \\
0.00001478127 & -0.00007108881 & -0.00001403083 & -0.00003888592 & 0.00368849025 & -0.00003674960 \\
-4.37312250174\cdot 10^{-14} & -0.00001114858 & 0.00000239297 & 1.14001332420\cdot 10^{-13} & -0.00000734374 & 0.00000314218 \\
\end{array}
\right)
\end{align*}
\betagin{align*}
T^{3}_{fin,3} & =
\left(
\betagin{array}{cccccc}
0.00000533557 & -0.00155761794 & 9.90200697265\cdot 10^{-9} & -0.00007592114 & 0.00112127249 & 0.00001629250 \\
0.00000206570 & 0.00542593789 & 0.00000190393 & -0.00048164344 & -0.00008203811 & 0.00051364377 \\
1.15781378363\cdot 10^{-8} & -0.00148129715 & 0.00000505660 & -0.00001327566 & -0.00106597793 & 0.00007249718 \\
0.00003985325 & -0.00046479429 & 5.62487168223\cdot 10^{-12} & 0.00000498929 & 0.00034694746 & 3.50789736574\cdot 10^{-9} \\
-0.00000267070 & -0.00053855113 & 0.00000250964 & 0.00015501214 & 0.00518713553 & 0.00012776307 \\
-6.84832886920\cdot 10^{-12} & 0.00043588867 & -0.00003785447 & 4.58706059629\cdot 10^{-9} & 0.00032698003 & 0.00000473106 \\
0.00003020315 & -0.00001495138 & 3.14813570358\cdot 10^{-15} & 0.00002961061 & 0.00001500747 & 9.04914568531\cdot 10^{-13} \\
0.00000287744 & 0.03027233689 & 0.00000272492 & -0.00006309436 & -0.00041014040 & 0.00005613296 \\
2.98104624843\cdot 10^{-15} & -0.00001418074 & 0.00002869680 & -1.21506679426\cdot 10^{-12} & -0.00001423998 & -0.00002812073 \\
-0.00001908529 & 0.00009471142 & -1.48184820399\cdot 10^{-14} & 0.00002355898 & -0.00006730898 & -6.97673484952\cdot 10^{-15} \\
-0.00000297094 & 0.00066179035 & 0.00000282037 & 0.00003503703 & 0.02375644464 & 0.00003236439 \\
1.50361388430\cdot 10^{-14} & -0.00008862582 & 0.00001807875 & -7.01151105584\cdot 10^{-16} & -0.00006331650 & 0.00002238336 \\
-0.00004067472 & 0.00000301020 & -3.70438406451\cdot 10^{-15} & -0.00001108514 & -0.00000301927 & 3.33415541431\cdot 10^{-14} \\
0.00000301020 & -0.06718915938 & 0.00000286072 & -0.00002243646 & 0.00050351668 & 0.00002096805 \\
-3.70438406451\cdot 10^{-15} & 0.00000286072 & -0.00003855497 & -4.36948352896\cdot 10^{-14} & 0.00000287165 & 0.00001047733 \\
-0.00001108514 & -0.00002243646 & -4.36948352896\cdot 10^{-14} & -0.00002712959 & 0.00001542937 & 5.10048792691\cdot 10^{-14} \\
-0.00000301927 & 0.00050351668 & 0.00000287165 & 0.00001542937 & -0.05364315620 & 0.00001449165 \\
3.33415541431\cdot 10^{-14} & 0.00002096805 & 0.00001047733 & 5.10048792691\cdot 10^{-14} & 0.00001449165 & -0.00002568508 \\
0.00001920997 & 0.00000111905 & -2.13045262845\cdot 10^{-15} & -0.00000572563 & -0.00000131717 & -1.45197814251\cdot 10^{-14} \\
0.00000301060 & 0.01951726310 & 0.00000286489 & -0.00001103849 & 0.00040280165 & 0.00001039344 \\
2.27927040204\cdot 10^{-14} & 0.00000107185 & 0.00001825080 & 3.89829629231\cdot 10^{-14} & 0.00000126051 & 0.00000538519 \\
0.00001662305 & 0.00000597029 & 1.21914579746\cdot 10^{-14} & 0.00001614121 & -0.00000498558 & -7.83391499939\cdot 10^{-14} \\
-0.00000299111 & -0.00027523003 & 0.00000284750 & 0.00000806555 & 0.01654806391 & 0.00000760366 \\
1.26943093273\cdot 10^{-14} & -0.00000562167 & -0.00001578051 & -3.69080589571\cdot 10^{-14} & -0.00000471758 & 0.00001533483 \\
\end{array}
\right)
\end{align*}
\betagin{align*}
T^{3}_{fin,4} & =
\left(
\betagin{array}{cccccc}
0.00000125611 & -0.00084878958 & -2.73010821736\cdot 10^{-9} & 0.00001432963 & 0.00066557241 & -0.00000279787 \\
8.65982848231\cdot 10^{-8} & 0.00138998159 & 1.02554282905\cdot 10^{-7} & 0.00008343141 & -0.00002363432 & -0.00008889372 \\
-3.19226015591\cdot 10^{-9} & -0.00080678323 & 0.00000119690 & 0.00000227980 & -0.00063256170 & -0.00001367678 \\
-0.00000437207 & -0.00027291240 & -1.62202100706\cdot 10^{-12} & 0.00000129772 & 0.00022255360 & -6.02460704593\cdot 10^{-10} \\
-0.00000039251 & -0.00008220557 & 0.00000038596 & -0.00002784648 & 0.00154868510 & -0.00002311176 \\
1.95786950003\cdot 10^{-12} & 0.00025788023 & 0.00000415121 & -7.87753818442\cdot 10^{-10} & 0.00021062293 & 0.00000123395 \\
0.00000406644 & -0.00001493806 & -7.69293477455\cdot 10^{-14} & -0.00000252131 & 0.00001478127 & -4.37312250174\cdot 10^{-14} \\
0.00000066295 & 0.00436069767 & 0.00000063871 & 0.00001241650 & -0.00007108881 & -0.00001114858 \\
-5.79173351464\cdot 10^{-14} & -0.00001417756 & 0.00000386310 & 7.34844588938\cdot 10^{-14} & -0.00001403083 & 0.00000239297 \\
0.00002138146 & 0.00005029006 & 5.43227769034\cdot 10^{-14} & 0.00000330787 & -0.00003888592 & 1.14001332420\cdot 10^{-13} \\
-0.00000090330 & -0.00032965875 & 0.00000086668 & -0.00000788818 & 0.00368849025 & -0.00000734374 \\
-5.68804673351\cdot 10^{-14} & -0.00004744310 & -0.00002030190 & 8.17156504431\cdot 10^{-14} & -0.00003674960 & 0.00000314218 \\
0.00001920997 & 0.00000301060 & 2.27927040204\cdot 10^{-14} & 0.00001662305 & -0.00000299111 & 1.26943093273\cdot 10^{-14} \\
0.00000111905 & 0.01951726310 & 0.00000107185 & 0.00000597029 & -0.00027523003 & -0.00000562167 \\
-2.13045262845\cdot 10^{-15} & 0.00000286489 & 0.00001825080 & 1.21914579746\cdot 10^{-14} & 0.00000284750 & -0.00001578051 \\
-0.00000572563 & -0.00001103849 & 3.89829629231\cdot 10^{-14} & 0.00001614121 & 0.00000806555 & -3.69080589571\cdot 10^{-14} \\
-0.00000131717 & 0.00040280165 & 0.00000126051 & -0.00000498558 & 0.01654806391 & -0.00000471758 \\
-1.45197814251\cdot 10^{-14} & 0.00001039344 & 0.00000538519 & -7.83391499939\cdot 10^{-14} & 0.00000760366 & 0.00001533483 \\
-0.00001787015 & 0.00000149962 & 1.79534093854\cdot 10^{-14} & -0.00000204184 & -0.00000166924 & -2.11654961276\cdot 10^{-13} \\
0.00000149962 & -0.04435474412 & 0.00000143467 & 0.00000442712 & 0.00033271878 & -0.00000420077 \\
1.79534093854\cdot 10^{-14} & 0.00000143467 & -0.00001688740 & 2.11077743505\cdot 10^{-13} & 0.00000159584 & 0.00000188523 \\
-0.00000204184 & 0.00000442712 & 2.11077743505\cdot 10^{-13} & -0.00001114387 & -0.00000409665 & -2.57393155040\cdot 10^{-13} \\
-0.00000166924 & 0.00033271878 & 0.00000159584 & -0.00000409665 & -0.03758247246 & -0.00000389363 \\
-2.11654961276\cdot 10^{-13} & -0.00000420077 & 0.00000188523 & -2.57393155040\cdot 10^{-13} & -0.00000389363 & -0.00001049667 \\
\end{array}
\right)
\end{align*}
\betagin{align*}
T_{fin,1}^{6} & =
\left(
\betagin{array}{cccccc}
-0.00291345537 & -0.02540749127 & -0.00041941419 & -0.00121786943 & 0.00933076659 & 0.00018206274 \\
-0.02540749127 & -0.90497888764 & -0.02424571525 & -0.00635382205 & 0.00563881494 & 0.00674636418 \\
-0.00041941419 & -0.02424571525 & -0.00278371098 & -0.00014838001 & -0.00907915550 & 0.00116335903 \\
-0.00121786943 & -0.00635382205 & -0.00014838001 & -0.00021204206 & 0.00262575863 & 0.00000013645 \\
0.00933076659 & 0.00563881494 & -0.00907915550 & 0.00262575863 & -0.23685105510 & 0.00230342523 \\
0.00018206274 & 0.00674636418 & 0.00116335903 & 0.00000013645 & 0.00230342523 & -0.00020165240 \\
0.00006084737 & -0.00000978359 & -4.86286767418\cdot 10^{-8} & -0.00005068073 & 0.00001298540 & 4.66620681740\cdot 10^{-11} \\
-0.00403278487 & 0.06006461031 & -0.00384880693 & -0.00114914679 & 0.00170420893 & 0.00102804953 \\
-4.61137477934\cdot 10^{-8} & -0.00000913619 & 0.00005786179 & -4.64964261953\cdot 10^{-11} & -0.00001219898 & 0.00004810665 \\
0.00031255555 & 0.00148247120 & 0.00003482935 & 0.00004143807 & -0.00060335343 & -1.67484077527\cdot 10^{-8} \\
0.00233877011 & -0.00072279657 & -0.00222565755 & 0.00067668948 & 0.04134592791 & 0.00062737193 \\
-0.00004274417 & -0.00157496654 & -0.00029841490 & -1.52922857114\cdot 10^{-8} & -0.00052815010 & 0.00003940615 \\
0.00000533496 & 0.00000205063 & 1.08637530482\cdot 10^{-8} & 0.00003985325 & -0.00000267139 & -6.88851803831\cdot 10^{-12} \\
-0.00155761678 & 0.00542592858 & -0.00148129586 & -0.00046479549 & -0.00053855101 & 0.00043588952 \\
1.03004567414\cdot 10^{-8} & 0.00000191475 & 0.00000505715 & 5.72582288790\cdot 10^{-12} & 0.00000251061 & -0.00003785447 \\
-0.00007068683 & -0.00037123808 & -0.00000873218 & 0.00000498890 & 0.00015051244 & 4.19903510403\cdot 10^{-9} \\
0.00112127247 & -0.00008204165 & -0.00106597795 & 0.00034694746 & 0.00518713748 & 0.00032698004 \\
0.00001071653 & 0.00039444690 & 0.00006752409 & 3.83396386458\cdot 10^{-9} & 0.00013169100 & 0.00000473141 \\
0.00000125628 & 9.07808750600\cdot 10^{-8} & -2.99529358660\cdot 10^{-9} & -0.00000437207 & -0.00000039232 & 1.97070264514\cdot 10^{-12} \\
-0.00084878958 & 0.00138995868 & -0.00080678324 & -0.00027291241 & -0.00008219826 & 0.00025788023 \\
-2.83996989618\cdot 10^{-9} & 9.96003237305\cdot 10^{-8} & 0.00000119675 & -1.65191684991\cdot 10^{-12} & 0.00000038570 & 0.00000415121 \\
0.00001343075 & 0.00006447176 & 0.00000149956 & 0.00000129779 & -0.00002707375 & -7.21116613265\cdot 10^{-10} \\
0.00066557241 & -0.00002363043 & -0.00063256170 & 0.00022255360 & 0.00154868413 & 0.00021062293 \\
-0.00000184033 & -0.00006842433 & -0.00001282276 & -6.58453704807\cdot 10^{-10} & -0.00002378631 & 0.00000123389 \\
\end{array}
\right)
\end{align*}
\betagin{align*}
T_{fin,2}^{6} & =
\left(
\betagin{array}{cccccc}
0.00006084737 & -0.00403278487 & -4.61137477934\cdot 10^{-8} & 0.00031255555 & 0.00233877011 & -0.00004274417 \\
-0.00000978359 & 0.06006461031 & -0.00000913619 & 0.00148247120 & -0.00072279657 & -0.00157496654 \\
-4.86286767418\cdot 10^{-8} & -0.00384880693 & 0.00005786179 & 0.00003482935 & -0.00222565755 & -0.00029841490 \\
-0.00005068073 & -0.00114914679 & -4.64964261953\cdot 10^{-11} & 0.00004143807 & 0.00067668948 & -1.52922857114\cdot 10^{-8} \\
0.00001298540 & 0.00170420893 & -0.00001219898 & -0.00060335343 & 0.04134592791 & -0.00052815010 \\
4.66620681740\cdot 10^{-11} & 0.00102804953 & 0.00004810665 & -1.67484077527\cdot 10^{-8} & 0.00062737193 & 0.00003940615 \\
-0.00010321148 & -0.00001414600 & -2.73005911389\cdot 10^{-14} & -0.00003848684 & 0.00001470528 & -3.71351392015\cdot 10^{-12} \\
-0.00001414600 & -0.12947857817 & -0.00001337906 & 0.00025555125 & 0.00095215812 & -0.00022776080 \\
-2.73005911389\cdot 10^{-14} & -0.00001337906 & -0.00009797657 & 4.89450772014\cdot 10^{-12} & -0.00001393580 & 0.00003651136 \\
-0.00003848684 & 0.00025555125 & 4.89450772014\cdot 10^{-12} & -0.00006239992 & -0.00014436957 & -4.92646031077\cdot 10^{-15} \\
0.00001470528 & 0.00095215812 & -0.00001393580 & -0.00014436957 & -0.08886148567 & -0.00013348148 \\
-3.71351392015\cdot 10^{-12} & -0.00022776080 & 0.00003651136 & -4.92646031077\cdot 10^{-15} & -0.00013348148 & -0.00005919817 \\
0.00003020315 & 0.00000287750 & -6.11527496408\cdot 10^{-16} & -0.00001908529 & -0.00000297095 & 1.88925224729\cdot 10^{-14} \\
-0.00001495138 & 0.03027233185 & -0.00001418074 & 0.00009471169 & 0.00066178980 & -0.00008862601 \\
-3.31589309321\cdot 10^{-16} & 0.00000272488 & 0.00002869680 & -1.82627377477\cdot 10^{-14} & 0.00000282036 & 0.00001807875 \\
0.00002961061 & -0.00006309364 & -1.20807810807\cdot 10^{-12} & 0.00002355898 & 0.00003504019 & 1.06789333275\cdot 10^{-14} \\
0.00001500747 & -0.00041014029 & -0.00001423998 & -0.00006730898 & 0.02375644411 & -0.00006331651 \\
9.12420912576\cdot 10^{-13} & 0.00005616901 & -0.00002812073 & 1.93046228742\cdot 10^{-15} & 0.00003236094 & 0.00002238336 \\
0.00000406644 & 0.00000066293 & -6.41351673174\cdot 10^{-14} & 0.00002138146 & -0.00000090328 & -5.54519037512\cdot 10^{-14} \\
-0.00001493806 & 0.00436069008 & -0.00001417757 & 0.00005029007 & -0.00032965896 & -0.00004744310 \\
-8.14811085733\cdot 10^{-14} & 0.00000063872 & 0.00000386310 & 5.45200014040\cdot 10^{-14} & 0.00000086669 & -0.00002030190 \\
-0.00000252131 & 0.00001241640 & 7.05782109673\cdot 10^{-14} & 0.00000330787 & -0.00000788873 & 8.42888401493\cdot 10^{-14} \\
0.00001478127 & -0.00007109159 & -0.00001403083 & -0.00003888592 & 0.00368849050 & -0.00003674960 \\
-4.67295111503\cdot 10^{-14} & -0.00001115478 & 0.00000239297 & 1.14232496671\cdot 10^{-13} & -0.00000734314 & 0.00000314218 \\
\end{array}
\right)
\end{align*}
\betagin{align*}
T_{fin,3}^{6} & =
\left(
\betagin{array}{cccccc}
0.00000533496 & -0.00155761678 & 1.03004567414\cdot 10^{-8} & -0.00007068683 & 0.00112127247 & 0.00001071653 \\
0.00000205063 & 0.00542592858 & 0.00000191475 & -0.00037123808 & -0.00008204165 & 0.00039444690 \\
1.08637530482\cdot 10^{-8} & -0.00148129586 & 0.00000505715 & -0.00000873218 & -0.00106597795 & 0.00006752409 \\
0.00003985325 & -0.00046479549 & 5.72582288790\cdot 10^{-12} & 0.00000498890 & 0.00034694746 & 3.83396386458\cdot 10^{-9} \\
-0.00000267139 & -0.00053855101 & 0.00000251061 & 0.00015051244 & 0.00518713748 & 0.00013169100 \\
-6.88851803831\cdot 10^{-12} & 0.00043588952 & -0.00003785447 & 4.19903510403\cdot 10^{-9} & 0.00032698004 & 0.00000473141 \\
0.00003020315 & -0.00001495138 & -3.31589309321\cdot 10^{-16} & 0.00002961061 & 0.00001500747 & 9.12420912576\cdot 10^{-13} \\
0.00000287750 & 0.03027233185 & 0.00000272488 & -0.00006309364 & -0.00041014029 & 0.00005616901 \\
-6.11527496408\cdot 10^{-16} & -0.00001418074 & 0.00002869680 & -1.20807810807\cdot 10^{-12} & -0.00001423998 & -0.00002812073 \\
-0.00001908529 & 0.00009471169 & -1.82627377477\cdot 10^{-14} & 0.00002355898 & -0.00006730898 & 1.93046228742\cdot 10^{-15} \\
-0.00000297095 & 0.00066178980 & 0.00000282036 & 0.00003504019 & 0.02375644411 & 0.00003236094 \\
1.88925224729\cdot 10^{-14} & -0.00008862601 & 0.00001807875 & 1.06789333275\cdot 10^{-14} & -0.00006331651 & 0.00002238336 \\
-0.00004067472 & 0.00000301019 & -9.94013687038\cdot 10^{-15} & -0.00001108514 & -0.00000301927 & 3.09891987571\cdot 10^{-14} \\
0.00000301019 & -0.06718916229 & 0.00000286072 & -0.00002243655 & 0.00050351656 & 0.00002096811 \\
-9.94013687038\cdot 10^{-15} & 0.00000286072 & -0.00003855497 & -3.90779130797\cdot 10^{-14} & 0.00000287165 & 0.00001047733 \\
-0.00001108514 & -0.00002243655 & -3.90779130797\cdot 10^{-14} & -0.00002712959 & 0.00001542937 & 4.95308748195\cdot 10^{-14} \\
-0.00000301927 & 0.00050351656 & 0.00000287165 & 0.00001542937 & -0.05364315613 & 0.00001449165 \\
3.09891987571\cdot 10^{-14} & 0.00002096811 & 0.00001047733 & 4.95308748195\cdot 10^{-14} & 0.00001449165 & -0.00002568508 \\
0.00001920997 & 0.00000111906 & 2.94183176863\cdot 10^{-15} & -0.00000572563 & -0.00000131717 & -1.48589541887\cdot 10^{-14} \\
0.00000301060 & 0.01951725986 & 0.00000286489 & -0.00001103849 & 0.00040280094 & 0.00001039344 \\
2.46715880694\cdot 10^{-14} & 0.00000107186 & 0.00001825080 & 4.12671067396\cdot 10^{-14} & 0.00000126051 & 0.00000538519 \\
0.00001662305 & 0.00000597031 & 6.81443231533\cdot 10^{-15} & 0.00001614121 & -0.00000498558 & -8.56313692173\cdot 10^{-14} \\
-0.00000299111 & -0.00027522999 & 0.00000284750 & 0.00000806555 & 0.01654806366 & 0.00000760366 \\
1.62019892263\cdot 10^{-14} & -0.00000562168 & -0.00001578051 & -4.16453813480\cdot 10^{-14} & -0.00000471758 & 0.00001533483 \\
\end{array}
\right)
\end{align*}
\betagin{align*}
T_{fin,4}^{6} & =
\left(
\betagin{array}{cccccc}
0.00000125628 & -0.00084878958 & -2.83996989618\cdot 10^{-9} & 0.00001343075 & 0.00066557241 & -0.00000184033 \\
9.07808750600\cdot 10^{-8} & 0.00138995868 & 9.96003237305\cdot 10^{-8} & 0.00006447176 & -0.00002363043 & -0.00006842433 \\
-2.99529358660\cdot 10^{-9} & -0.00080678324 & 0.00000119675 & 0.00000149956 & -0.00063256170 & -0.00001282276 \\
-0.00000437207 & -0.00027291241 & -1.65191684991\cdot 10^{-12} & 0.00000129779 & 0.00022255360 & -6.58453704807\cdot 10^{-10} \\
-0.00000039232 & -0.00008219826 & 0.00000038570 & -0.00002707375 & 0.00154868413 & -0.00002378631 \\
1.97070264514\cdot 10^{-12} & 0.00025788023 & 0.00000415121 & -7.21116613265\cdot 10^{-10} & 0.00021062293 & 0.00000123389 \\
0.00000406644 & -0.00001493806 & -8.14811085733\cdot 10^{-14} & -0.00000252131 & 0.00001478127 & -4.67295111503\cdot 10^{-14} \\
0.00000066293 & 0.00436069008 & 0.00000063872 & 0.00001241640 & -0.00007109159 & -0.00001115478 \\
-6.41351673174\cdot 10^{-14} & -0.00001417757 & 0.00000386310 & 7.05782109673\cdot 10^{-14} & -0.00001403083 & 0.00000239297 \\
0.00002138146 & 0.00005029007 & 5.45200014040\cdot 10^{-14} & 0.00000330787 & -0.00003888592 & 1.14232496671\cdot 10^{-13} \\
-0.00000090328 & -0.00032965896 & 0.00000086669 & -0.00000788873 & 0.00368849050 & -0.00000734314 \\
-5.54519037512\cdot 10^{-14} & -0.00004744310 & -0.00002030190 & 8.42888401493\cdot 10^{-14} & -0.00003674960 & 0.00000314218 \\
0.00001920997 & 0.00000301060 & 2.46715880694\cdot 10^{-14} & 0.00001662305 & -0.00000299111 & 1.62019892263\cdot 10^{-14} \\
0.00000111906 & 0.01951725986 & 0.00000107186 & 0.00000597031 & -0.00027522999 & -0.00000562168 \\
2.94183176863\cdot 10^{-15} & 0.00000286489 & 0.00001825080 & 6.81443231533\cdot 10^{-15} & 0.00000284750 & -0.00001578051 \\
-0.00000572563 & -0.00001103849 & 4.12671067396\cdot 10^{-14} & 0.00001614121 & 0.00000806555 & -4.16453813480\cdot 10^{-14} \\
-0.00000131717 & 0.00040280094 & 0.00000126051 & -0.00000498558 & 0.01654806366 & -0.00000471758 \\
-1.48589541887\cdot 10^{-14} & 0.00001039344 & 0.00000538519 & -8.56313692173\cdot 10^{-14} & 0.00000760366 & 0.00001533483 \\
-0.00001787015 & 0.00000149962 & 2.77623635774\cdot 10^{-14} & -0.00000204184 & -0.00000166924 & -1.91830702198\cdot 10^{-13} \\
0.00000149962 & -0.04435472907 & 0.00000143468 & 0.00000442712 & 0.00033271917 & -0.00000420077 \\
2.77623635774\cdot 10^{-14} & 0.00000143468 & -0.00001688740 & 2.06623856601\cdot 10^{-13} & 0.00000159584 & 0.00000188523 \\
-0.00000204184 & 0.00000442712 & 2.06623856601\cdot 10^{-13} & -0.00001114387 & -0.00000409665 & -2.62477024413\cdot 10^{-13} \\
-0.00000166924 & 0.00033271917 & 0.00000159584 & -0.00000409665 & -0.03758247285 & -0.00000389363 \\
-1.91830702198\cdot 10^{-13} & -0.00000420077 & 0.00000188523 & -2.62477024413\cdot 10^{-13} & -0.00000389363 & -0.00001049667 \\
\end{array}
\right)
\end{align*}
\normalsize
\betagin{tabular}{l}
\thetaxtbf{Angel Castro} \\
{\small Departamento de Matem\'aticas} \\
{\small Universidad Aut\'onoma de Madrid} \\
{\small Instituto de Ciencias Matem\'aticas-CSIC}\\
{\small C/ Nicolas Cabrera, 13-15, 28049 Madrid, Spain} \\
{\small Email: angel\[email protected]} \\
\\
\thetaxtbf{Diego C\'ordoba} \\
{\small Instituto de Ciencias Matem\'aticas} \\
{\small Consejo Superior de Investigaciones Cient\'ificas} \\
{\small C/ Nicolas Cabrera, 13-15, 28049 Madrid, Spain} \\
{\small Email: [email protected]} \\
\\
\thetaxtbf{Javier G\'omez-Serrano} \\
{\small Department of Mathematics} \\
{\small Princeton University}\\
{\small 610 Fine Hall, Washington Rd,}\\
{\small Princeton, NJ 08544, USA}\\
{\small Email: [email protected]} \\
\\
\end{tabular}
\end{document}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.